diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / src / heap / gc - tracer . cc <nl> ppp b / src / heap / gc - tracer . cc <nl> void GCTracer : : PrintNVP ( ) const { <nl> " mark_weakrefs = % . 1f " <nl> " mark_globalhandles = % . 1f " <nl> " mark_codeflush = % . 1f " <nl> + " mark_optimizedcodemaps = % . 1f " <nl> " store_buffer_clear = % . 1f " <nl> " slots_buffer_clear = % . 1f " <nl> " sweep = % . 2f " <nl> void GCTracer : : PrintNVP ( ) const { <nl> current_ . scopes [ Scope : : MC_MARK_WEAK_REFERENCES ] , <nl> current_ . scopes [ Scope : : MC_MARK_GLOBAL_HANDLES ] , <nl> current_ . scopes [ Scope : : MC_MARK_CODE_FLUSH ] , <nl> + current_ . scopes [ Scope : : MC_MARK_OPTIMIZED_CODE_MAPS ] , <nl> current_ . scopes [ Scope : : MC_STORE_BUFFER_CLEAR ] , <nl> current_ . scopes [ Scope : : MC_SLOTS_BUFFER_CLEAR ] , <nl> current_ . scopes [ Scope : : MC_SWEEP ] , <nl> mmm a / src / heap / gc - tracer . h <nl> ppp b / src / heap / gc - tracer . h <nl> class GCTracer { <nl> MC_MARK_WEAK_REFERENCES , <nl> MC_MARK_GLOBAL_HANDLES , <nl> MC_MARK_CODE_FLUSH , <nl> + MC_MARK_OPTIMIZED_CODE_MAPS , <nl> MC_STORE_BUFFER_CLEAR , <nl> MC_SLOTS_BUFFER_CLEAR , <nl> MC_SWEEP , <nl> mmm a / src / heap / mark - compact - inl . h <nl> ppp b / src / heap / mark - compact - inl . h <nl> void CodeFlusher : : AddCandidate ( JSFunction * function ) { <nl> } <nl> <nl> <nl> - void CodeFlusher : : AddOptimizedCodeMap ( SharedFunctionInfo * code_map_holder ) { <nl> - if ( GetNextCodeMap ( code_map_holder ) - > IsUndefined ( ) ) { <nl> - SetNextCodeMap ( code_map_holder , optimized_code_map_holder_head_ ) ; <nl> - optimized_code_map_holder_head_ = code_map_holder ; <nl> - } <nl> - } <nl> - <nl> - <nl> JSFunction * * CodeFlusher : : GetNextCandidateSlot ( JSFunction * candidate ) { <nl> return reinterpret_cast < JSFunction * * > ( <nl> HeapObject : : RawField ( candidate , JSFunction : : kNextFunctionLinkOffset ) ) ; <nl> void CodeFlusher : : ClearNextCandidate ( SharedFunctionInfo * candidate ) { <nl> candidate - > code ( ) - > set_gc_metadata ( NULL , SKIP_WRITE_BARRIER ) ; <nl> } <nl> <nl> - <nl> - SharedFunctionInfo * CodeFlusher : : GetNextCodeMap ( SharedFunctionInfo * holder ) { <nl> - FixedArray * code_map = FixedArray : : cast ( holder - > optimized_code_map ( ) ) ; <nl> - Object * next_map = code_map - > get ( SharedFunctionInfo : : kNextMapIndex ) ; <nl> - return reinterpret_cast < SharedFunctionInfo * > ( next_map ) ; <nl> - } <nl> - <nl> - <nl> - void CodeFlusher : : SetNextCodeMap ( SharedFunctionInfo * holder , <nl> - SharedFunctionInfo * next_holder ) { <nl> - FixedArray * code_map = FixedArray : : cast ( holder - > optimized_code_map ( ) ) ; <nl> - code_map - > set ( SharedFunctionInfo : : kNextMapIndex , next_holder ) ; <nl> - } <nl> - <nl> - <nl> - void CodeFlusher : : ClearNextCodeMap ( SharedFunctionInfo * holder ) { <nl> - FixedArray * code_map = FixedArray : : cast ( holder - > optimized_code_map ( ) ) ; <nl> - code_map - > set_undefined ( SharedFunctionInfo : : kNextMapIndex ) ; <nl> - } <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / heap / mark - compact . cc <nl> ppp b / src / heap / mark - compact . cc <nl> void CodeFlusher : : ProcessSharedFunctionInfoCandidates ( ) { <nl> } <nl> <nl> <nl> - void CodeFlusher : : ProcessOptimizedCodeMaps ( ) { <nl> - STATIC_ASSERT ( SharedFunctionInfo : : kEntryLength = = 4 ) ; <nl> - <nl> - SharedFunctionInfo * holder = optimized_code_map_holder_head_ ; <nl> - SharedFunctionInfo * next_holder ; <nl> - <nl> - while ( holder ! = NULL ) { <nl> - next_holder = GetNextCodeMap ( holder ) ; <nl> - ClearNextCodeMap ( holder ) ; <nl> - <nl> - / / Process context - dependent entries in the optimized code map . <nl> - FixedArray * code_map = FixedArray : : cast ( holder - > optimized_code_map ( ) ) ; <nl> - int new_length = SharedFunctionInfo : : kEntriesStart ; <nl> - int old_length = code_map - > length ( ) ; <nl> - for ( int i = SharedFunctionInfo : : kEntriesStart ; i < old_length ; <nl> - i + = SharedFunctionInfo : : kEntryLength ) { <nl> - / / Each entry contains [ context , code , literals , ast - id ] as fields . <nl> - STATIC_ASSERT ( SharedFunctionInfo : : kEntryLength = = 4 ) ; <nl> - Context * context = <nl> - Context : : cast ( code_map - > get ( i + SharedFunctionInfo : : kContextOffset ) ) ; <nl> - HeapObject * code = HeapObject : : cast ( <nl> - code_map - > get ( i + SharedFunctionInfo : : kCachedCodeOffset ) ) ; <nl> - FixedArray * literals = FixedArray : : cast ( <nl> - code_map - > get ( i + SharedFunctionInfo : : kLiteralsOffset ) ) ; <nl> - Smi * ast_id = <nl> - Smi : : cast ( code_map - > get ( i + SharedFunctionInfo : : kOsrAstIdOffset ) ) ; <nl> - if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( context ) ) ) continue ; <nl> - DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( context ) ) ) ; <nl> - if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( code ) ) ) continue ; <nl> - DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( code ) ) ) ; <nl> - if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( literals ) ) ) continue ; <nl> - DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( literals ) ) ) ; <nl> - / / Move every slot in the entry and record slots when needed . <nl> - code_map - > set ( new_length + SharedFunctionInfo : : kCachedCodeOffset , code ) ; <nl> - code_map - > set ( new_length + SharedFunctionInfo : : kContextOffset , context ) ; <nl> - code_map - > set ( new_length + SharedFunctionInfo : : kLiteralsOffset , literals ) ; <nl> - code_map - > set ( new_length + SharedFunctionInfo : : kOsrAstIdOffset , ast_id ) ; <nl> - Object * * code_slot = code_map - > RawFieldOfElementAt ( <nl> - new_length + SharedFunctionInfo : : kCachedCodeOffset ) ; <nl> - isolate_ - > heap ( ) - > mark_compact_collector ( ) - > RecordSlot ( <nl> - code_map , code_slot , * code_slot ) ; <nl> - Object * * context_slot = code_map - > RawFieldOfElementAt ( <nl> - new_length + SharedFunctionInfo : : kContextOffset ) ; <nl> - isolate_ - > heap ( ) - > mark_compact_collector ( ) - > RecordSlot ( <nl> - code_map , context_slot , * context_slot ) ; <nl> - Object * * literals_slot = code_map - > RawFieldOfElementAt ( <nl> - new_length + SharedFunctionInfo : : kLiteralsOffset ) ; <nl> - isolate_ - > heap ( ) - > mark_compact_collector ( ) - > RecordSlot ( <nl> - code_map , literals_slot , * literals_slot ) ; <nl> - new_length + = SharedFunctionInfo : : kEntryLength ; <nl> - } <nl> - <nl> - / / Process context - independent entry in the optimized code map . <nl> - Object * shared_object = code_map - > get ( SharedFunctionInfo : : kSharedCodeIndex ) ; <nl> - if ( shared_object - > IsCode ( ) ) { <nl> - Code * shared_code = Code : : cast ( shared_object ) ; <nl> - if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( shared_code ) ) ) { <nl> - code_map - > set_undefined ( SharedFunctionInfo : : kSharedCodeIndex ) ; <nl> - } else { <nl> - DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( shared_code ) ) ) ; <nl> - Object * * slot = <nl> - code_map - > RawFieldOfElementAt ( SharedFunctionInfo : : kSharedCodeIndex ) ; <nl> - isolate_ - > heap ( ) - > mark_compact_collector ( ) - > RecordSlot ( code_map , slot , <nl> - * slot ) ; <nl> - } <nl> - } <nl> - <nl> - / / Trim the optimized code map if entries have been removed . <nl> - if ( new_length < old_length ) { <nl> - holder - > TrimOptimizedCodeMap ( old_length - new_length ) ; <nl> - } <nl> - <nl> - holder = next_holder ; <nl> - } <nl> - <nl> - optimized_code_map_holder_head_ = NULL ; <nl> - } <nl> - <nl> - <nl> void CodeFlusher : : EvictCandidate ( SharedFunctionInfo * shared_info ) { <nl> / / Make sure previous flushing decisions are revisited . <nl> isolate_ - > heap ( ) - > incremental_marking ( ) - > RecordWrites ( shared_info ) ; <nl> void CodeFlusher : : EvictCandidate ( JSFunction * function ) { <nl> } <nl> <nl> <nl> - void CodeFlusher : : EvictOptimizedCodeMap ( SharedFunctionInfo * code_map_holder ) { <nl> - FixedArray * code_map = <nl> - FixedArray : : cast ( code_map_holder - > optimized_code_map ( ) ) ; <nl> - DCHECK ( ! code_map - > get ( SharedFunctionInfo : : kNextMapIndex ) - > IsUndefined ( ) ) ; <nl> - <nl> - / / Make sure previous flushing decisions are revisited . <nl> - isolate_ - > heap ( ) - > incremental_marking ( ) - > RecordWrites ( code_map ) ; <nl> - isolate_ - > heap ( ) - > incremental_marking ( ) - > RecordWrites ( code_map_holder ) ; <nl> - <nl> - if ( FLAG_trace_code_flushing ) { <nl> - PrintF ( " [ code - flushing abandons code - map : " ) ; <nl> - code_map_holder - > ShortPrint ( ) ; <nl> - PrintF ( " ] \ n " ) ; <nl> - } <nl> - <nl> - SharedFunctionInfo * holder = optimized_code_map_holder_head_ ; <nl> - SharedFunctionInfo * next_holder ; <nl> - if ( holder = = code_map_holder ) { <nl> - next_holder = GetNextCodeMap ( code_map_holder ) ; <nl> - optimized_code_map_holder_head_ = next_holder ; <nl> - ClearNextCodeMap ( code_map_holder ) ; <nl> - } else { <nl> - while ( holder ! = NULL ) { <nl> - next_holder = GetNextCodeMap ( holder ) ; <nl> - <nl> - if ( next_holder = = code_map_holder ) { <nl> - next_holder = GetNextCodeMap ( code_map_holder ) ; <nl> - SetNextCodeMap ( holder , next_holder ) ; <nl> - ClearNextCodeMap ( code_map_holder ) ; <nl> - break ; <nl> - } <nl> - <nl> - holder = next_holder ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - <nl> void CodeFlusher : : EvictJSFunctionCandidates ( ) { <nl> JSFunction * candidate = jsfunction_candidates_head_ ; <nl> JSFunction * next_candidate ; <nl> void CodeFlusher : : EvictSharedFunctionInfoCandidates ( ) { <nl> } <nl> <nl> <nl> - void CodeFlusher : : EvictOptimizedCodeMaps ( ) { <nl> - SharedFunctionInfo * holder = optimized_code_map_holder_head_ ; <nl> - SharedFunctionInfo * next_holder ; <nl> - while ( holder ! = NULL ) { <nl> - next_holder = GetNextCodeMap ( holder ) ; <nl> - EvictOptimizedCodeMap ( holder ) ; <nl> - holder = next_holder ; <nl> - } <nl> - DCHECK ( optimized_code_map_holder_head_ = = NULL ) ; <nl> - } <nl> - <nl> - <nl> void CodeFlusher : : IteratePointersToFromSpace ( ObjectVisitor * v ) { <nl> Heap * heap = isolate_ - > heap ( ) ; <nl> <nl> void MarkCompactCollector : : AfterMarking ( ) { <nl> code_flusher_ - > ProcessCandidates ( ) ; <nl> } <nl> <nl> + / / Process and clear all optimized code maps . <nl> + if ( ! FLAG_flush_optimized_code_cache ) { <nl> + GCTracer : : Scope gc_scope ( heap ( ) - > tracer ( ) , <nl> + GCTracer : : Scope : : MC_MARK_OPTIMIZED_CODE_MAPS ) ; <nl> + ProcessAndClearOptimizedCodeMaps ( ) ; <nl> + } <nl> + <nl> if ( FLAG_track_gc_object_stats ) { <nl> if ( FLAG_trace_gc_object_stats ) { <nl> heap ( ) - > object_stats_ - > TraceObjectStats ( ) ; <nl> void MarkCompactCollector : : AfterMarking ( ) { <nl> } <nl> <nl> <nl> + void MarkCompactCollector : : ProcessAndClearOptimizedCodeMaps ( ) { <nl> + SharedFunctionInfo : : Iterator iterator ( isolate ( ) ) ; <nl> + while ( SharedFunctionInfo * shared = iterator . Next ( ) ) { <nl> + if ( shared - > optimized_code_map ( ) - > IsSmi ( ) ) continue ; <nl> + <nl> + / / Process context - dependent entries in the optimized code map . <nl> + FixedArray * code_map = FixedArray : : cast ( shared - > optimized_code_map ( ) ) ; <nl> + int new_length = SharedFunctionInfo : : kEntriesStart ; <nl> + int old_length = code_map - > length ( ) ; <nl> + for ( int i = SharedFunctionInfo : : kEntriesStart ; i < old_length ; <nl> + i + = SharedFunctionInfo : : kEntryLength ) { <nl> + / / Each entry contains [ context , code , literals , ast - id ] as fields . <nl> + STATIC_ASSERT ( SharedFunctionInfo : : kEntryLength = = 4 ) ; <nl> + Context * context = <nl> + Context : : cast ( code_map - > get ( i + SharedFunctionInfo : : kContextOffset ) ) ; <nl> + HeapObject * code = HeapObject : : cast ( <nl> + code_map - > get ( i + SharedFunctionInfo : : kCachedCodeOffset ) ) ; <nl> + FixedArray * literals = FixedArray : : cast ( <nl> + code_map - > get ( i + SharedFunctionInfo : : kLiteralsOffset ) ) ; <nl> + Smi * ast_id = <nl> + Smi : : cast ( code_map - > get ( i + SharedFunctionInfo : : kOsrAstIdOffset ) ) ; <nl> + if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( context ) ) ) continue ; <nl> + DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( context ) ) ) ; <nl> + if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( code ) ) ) continue ; <nl> + DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( code ) ) ) ; <nl> + if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( literals ) ) ) continue ; <nl> + DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( literals ) ) ) ; <nl> + / / Move every slot in the entry and record slots when needed . <nl> + code_map - > set ( new_length + SharedFunctionInfo : : kCachedCodeOffset , code ) ; <nl> + code_map - > set ( new_length + SharedFunctionInfo : : kContextOffset , context ) ; <nl> + code_map - > set ( new_length + SharedFunctionInfo : : kLiteralsOffset , literals ) ; <nl> + code_map - > set ( new_length + SharedFunctionInfo : : kOsrAstIdOffset , ast_id ) ; <nl> + Object * * code_slot = code_map - > RawFieldOfElementAt ( <nl> + new_length + SharedFunctionInfo : : kCachedCodeOffset ) ; <nl> + RecordSlot ( code_map , code_slot , * code_slot ) ; <nl> + Object * * context_slot = code_map - > RawFieldOfElementAt ( <nl> + new_length + SharedFunctionInfo : : kContextOffset ) ; <nl> + RecordSlot ( code_map , context_slot , * context_slot ) ; <nl> + Object * * literals_slot = code_map - > RawFieldOfElementAt ( <nl> + new_length + SharedFunctionInfo : : kLiteralsOffset ) ; <nl> + RecordSlot ( code_map , literals_slot , * literals_slot ) ; <nl> + new_length + = SharedFunctionInfo : : kEntryLength ; <nl> + } <nl> + <nl> + / / Process context - independent entry in the optimized code map . <nl> + Object * shared_object = code_map - > get ( SharedFunctionInfo : : kSharedCodeIndex ) ; <nl> + if ( shared_object - > IsCode ( ) ) { <nl> + Code * shared_code = Code : : cast ( shared_object ) ; <nl> + if ( Marking : : IsWhite ( Marking : : MarkBitFrom ( shared_code ) ) ) { <nl> + code_map - > set_undefined ( SharedFunctionInfo : : kSharedCodeIndex ) ; <nl> + } else { <nl> + DCHECK ( Marking : : IsBlack ( Marking : : MarkBitFrom ( shared_code ) ) ) ; <nl> + Object * * slot = <nl> + code_map - > RawFieldOfElementAt ( SharedFunctionInfo : : kSharedCodeIndex ) ; <nl> + RecordSlot ( code_map , slot , * slot ) ; <nl> + } <nl> + } <nl> + <nl> + / / Trim the optimized code map if entries have been removed . <nl> + if ( new_length < old_length ) { <nl> + shared - > TrimOptimizedCodeMap ( old_length - new_length ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> void MarkCompactCollector : : ClearNonLiveReferences ( ) { <nl> GCTracer : : Scope gc_scope ( heap ( ) - > tracer ( ) , <nl> GCTracer : : Scope : : MC_NONLIVEREFERENCES ) ; <nl> mmm a / src / heap / mark - compact . h <nl> ppp b / src / heap / mark - compact . h <nl> class MarkingDeque { <nl> / / CodeFlusher collects candidates for code flushing during marking and <nl> / / processes those candidates after marking has completed in order to <nl> / / reset those functions referencing code objects that would otherwise <nl> - / / be unreachable . Code objects can be referenced in three ways : <nl> + / / be unreachable . Code objects can be referenced in two ways : <nl> / / - SharedFunctionInfo references unoptimized code . <nl> / / - JSFunction references either unoptimized or optimized code . <nl> - / / - OptimizedCodeMap references optimized code . <nl> / / We are not allowed to flush unoptimized code for functions that got <nl> / / optimized or inlined into optimized code , because we might bailout <nl> / / into the unoptimized code again during deoptimization . <nl> class CodeFlusher { <nl> public : <nl> explicit CodeFlusher ( Isolate * isolate ) <nl> : isolate_ ( isolate ) , <nl> - jsfunction_candidates_head_ ( NULL ) , <nl> - shared_function_info_candidates_head_ ( NULL ) , <nl> - optimized_code_map_holder_head_ ( NULL ) { } <nl> + jsfunction_candidates_head_ ( nullptr ) , <nl> + shared_function_info_candidates_head_ ( nullptr ) { } <nl> <nl> inline void AddCandidate ( SharedFunctionInfo * shared_info ) ; <nl> inline void AddCandidate ( JSFunction * function ) ; <nl> - inline void AddOptimizedCodeMap ( SharedFunctionInfo * code_map_holder ) ; <nl> <nl> - void EvictOptimizedCodeMap ( SharedFunctionInfo * code_map_holder ) ; <nl> void EvictCandidate ( SharedFunctionInfo * shared_info ) ; <nl> void EvictCandidate ( JSFunction * function ) ; <nl> <nl> void ProcessCandidates ( ) { <nl> - ProcessOptimizedCodeMaps ( ) ; <nl> ProcessSharedFunctionInfoCandidates ( ) ; <nl> ProcessJSFunctionCandidates ( ) ; <nl> } <nl> <nl> void EvictAllCandidates ( ) { <nl> - EvictOptimizedCodeMaps ( ) ; <nl> EvictJSFunctionCandidates ( ) ; <nl> EvictSharedFunctionInfoCandidates ( ) ; <nl> } <nl> class CodeFlusher { <nl> void IteratePointersToFromSpace ( ObjectVisitor * v ) ; <nl> <nl> private : <nl> - void ProcessOptimizedCodeMaps ( ) ; <nl> void ProcessJSFunctionCandidates ( ) ; <nl> void ProcessSharedFunctionInfoCandidates ( ) ; <nl> - void EvictOptimizedCodeMaps ( ) ; <nl> void EvictJSFunctionCandidates ( ) ; <nl> void EvictSharedFunctionInfoCandidates ( ) ; <nl> <nl> class CodeFlusher { <nl> SharedFunctionInfo * next_candidate ) ; <nl> static inline void ClearNextCandidate ( SharedFunctionInfo * candidate ) ; <nl> <nl> - static inline SharedFunctionInfo * GetNextCodeMap ( SharedFunctionInfo * holder ) ; <nl> - static inline void SetNextCodeMap ( SharedFunctionInfo * holder , <nl> - SharedFunctionInfo * next_holder ) ; <nl> - static inline void ClearNextCodeMap ( SharedFunctionInfo * holder ) ; <nl> - <nl> Isolate * isolate_ ; <nl> JSFunction * jsfunction_candidates_head_ ; <nl> SharedFunctionInfo * shared_function_info_candidates_head_ ; <nl> - SharedFunctionInfo * optimized_code_map_holder_head_ ; <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( CodeFlusher ) ; <nl> } ; <nl> class MarkCompactCollector { <nl> / / collections when incremental marking is aborted . <nl> void AbortWeakCollections ( ) ; <nl> <nl> - <nl> void ProcessAndClearWeakCells ( ) ; <nl> void AbortWeakCells ( ) ; <nl> <nl> + / / After all reachable objects have been marked , those entries within <nl> + / / optimized code maps that became unreachable are removed , potentially <nl> + / / trimming or clearing out the entire optimized code map . <nl> + void ProcessAndClearOptimizedCodeMaps ( ) ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Phase 2 : Sweeping to clear mark bits and free non - live objects for <nl> / / a non - compacting collection . <nl> mmm a / src / heap / objects - visiting - inl . h <nl> ppp b / src / heap / objects - visiting - inl . h <nl> void StaticMarkingVisitor < StaticVisitor > : : VisitSharedFunctionInfo ( <nl> if ( FLAG_cleanup_code_caches_at_gc ) { <nl> shared - > ClearTypeFeedbackInfoAtGCTime ( ) ; <nl> } <nl> - if ( FLAG_flush_optimized_code_cache & & <nl> - ! shared - > optimized_code_map ( ) - > IsSmi ( ) ) { <nl> - / / Always flush the optimized code map if requested by flag . <nl> - shared - > ClearOptimizedCodeMap ( ) ; <nl> - } <nl> - MarkCompactCollector * collector = heap - > mark_compact_collector ( ) ; <nl> - if ( collector - > is_code_flushing_enabled ( ) ) { <nl> + if ( FLAG_flush_optimized_code_cache ) { <nl> + if ( ! shared - > optimized_code_map ( ) - > IsSmi ( ) ) { <nl> + / / Always flush the optimized code map if requested by flag . <nl> + shared - > ClearOptimizedCodeMap ( ) ; <nl> + } <nl> + } else { <nl> if ( ! shared - > optimized_code_map ( ) - > IsSmi ( ) ) { <nl> - / / Add the shared function info holding an optimized code map to <nl> - / / the code flusher for processing of code maps after marking . <nl> - collector - > code_flusher ( ) - > AddOptimizedCodeMap ( shared ) ; <nl> / / Treat some references within the code map weakly by marking the <nl> - / / code map itself but not pushing it onto the marking deque . <nl> + / / code map itself but not pushing it onto the marking deque . The <nl> + / / map will be processed after marking . <nl> FixedArray * code_map = FixedArray : : cast ( shared - > optimized_code_map ( ) ) ; <nl> MarkOptimizedCodeMap ( heap , code_map ) ; <nl> } <nl> + } <nl> + MarkCompactCollector * collector = heap - > mark_compact_collector ( ) ; <nl> + if ( collector - > is_code_flushing_enabled ( ) ) { <nl> if ( IsFlushable ( heap , shared ) ) { <nl> / / This function ' s code looks flushable . But we have to postpone <nl> / / the decision until we see all functions that point to the same <nl> void StaticMarkingVisitor < StaticVisitor > : : VisitSharedFunctionInfo ( <nl> return ; <nl> } <nl> } else { <nl> + / / TODO ( mstarzinger ) : Drop this case , it shouldn ' t be done here ! <nl> if ( ! shared - > optimized_code_map ( ) - > IsSmi ( ) ) { <nl> / / Flush optimized code map on major GCs without code flushing , <nl> / / needed because cached code doesn ' t contain breakpoints . <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> void SharedFunctionInfo : : AddToOptimizedCodeMap ( <nl> / / Copy old optimized code map and append one new entry . <nl> new_code_map = isolate - > factory ( ) - > CopyFixedArrayAndGrow ( <nl> old_code_map , kEntryLength , TENURED ) ; <nl> + / / TODO ( mstarzinger ) : Temporary workaround . The allocation above might have <nl> + / / flushed the optimized code map and the copy we created is full of holes . <nl> + / / For now we just give up on adding the entry and pretend it got flushed . <nl> + if ( shared - > optimized_code_map ( ) - > IsSmi ( ) ) return ; <nl> int old_length = old_code_map - > length ( ) ; <nl> / / Zap the old map to avoid any stale entries . Note that this is required <nl> / / for correctness because entries are being treated weakly by the GC . <nl> void SharedFunctionInfo : : AddToOptimizedCodeMap ( <nl> DCHECK ( new_code_map - > get ( i + kOsrAstIdOffset ) - > IsSmi ( ) ) ; <nl> } <nl> # endif <nl> + <nl> + if ( Heap : : ShouldZapGarbage ( ) ) { <nl> + / / Zap any old optimized code map for heap - verifier . <nl> + if ( ! shared - > optimized_code_map ( ) - > IsSmi ( ) ) { <nl> + FixedArray * old_code_map = FixedArray : : cast ( shared - > optimized_code_map ( ) ) ; <nl> + old_code_map - > FillWithHoles ( 0 , old_code_map - > length ( ) ) ; <nl> + } <nl> + } <nl> + <nl> shared - > set_optimized_code_map ( * new_code_map ) ; <nl> } <nl> <nl> <nl> void SharedFunctionInfo : : ClearOptimizedCodeMap ( ) { <nl> - FixedArray * code_map = FixedArray : : cast ( optimized_code_map ( ) ) ; <nl> - <nl> - / / If the next map link slot is already used then the function was <nl> - / / enqueued with code flushing and we remove it now . <nl> - if ( ! code_map - > get ( kNextMapIndex ) - > IsUndefined ( ) ) { <nl> - CodeFlusher * flusher = GetHeap ( ) - > mark_compact_collector ( ) - > code_flusher ( ) ; <nl> - flusher - > EvictOptimizedCodeMap ( this ) ; <nl> + if ( Heap : : ShouldZapGarbage ( ) ) { <nl> + / / Zap any old optimized code map for heap - verifier . <nl> + if ( ! optimized_code_map ( ) - > IsSmi ( ) ) { <nl> + FixedArray * old_code_map = FixedArray : : cast ( optimized_code_map ( ) ) ; <nl> + old_code_map - > FillWithHoles ( 0 , old_code_map - > length ( ) ) ; <nl> + } <nl> } <nl> <nl> - DCHECK ( code_map - > get ( kNextMapIndex ) - > IsUndefined ( ) ) ; <nl> set_optimized_code_map ( Smi : : FromInt ( 0 ) ) ; <nl> } <nl> <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class SharedFunctionInfo : public HeapObject { <nl> Handle < Object > script_object ) ; <nl> <nl> / / Layout description of the optimized code map . <nl> - static const int kNextMapIndex = 0 ; <nl> - static const int kSharedCodeIndex = 1 ; <nl> - static const int kEntriesStart = 2 ; <nl> + static const int kSharedCodeIndex = 0 ; <nl> + static const int kEntriesStart = 1 ; <nl> static const int kContextOffset = 0 ; <nl> static const int kCachedCodeOffset = 1 ; <nl> static const int kLiteralsOffset = 2 ; <nl>
|
[ heap ] Separate out optimized code map processing .
|
v8/v8
|
087513d6d4a710f7360d2a71abb4e324168ad571
|
2015-11-09T11:05:51Z
|
mmm a / addons / webinterface . default / js / MediaLibrary . js <nl> ppp b / addons / webinterface . default / js / MediaLibrary . js <nl> MediaLibrary . prototype = { <nl> ' studio ' , <nl> ' mpaa ' , <nl> ' premiered ' <nl> - ] <nl> + ] , <nl> + ' sort ' : { <nl> + ' method ' : ' sorttitle ' , <nl> + ' ignorearticle ' : true <nl> + } <nl> } , <nl> ' success ' : function ( data ) { <nl> if ( data & & data . result & & data . result . tvshows ) { <nl>
|
[ webinterface . default ] tvshows : sort results by sorttitle
|
xbmc/xbmc
|
4ddfff5e581aefc438a67d18f9daf3db6d3b48c2
|
2016-05-22T08:57:25Z
|
mmm a / stdlib / private / StdlibUnittest / CheckCollectionType . swift <nl> ppp b / stdlib / private / StdlibUnittest / CheckCollectionType . swift <nl> self . test ( " \ ( testNamePrefix ) . removeFirst ( n : Int ) / slice / removeTooMany / semantics " ) <nl> slice . removeFirst ( 3 ) / / Should trap . <nl> } <nl> <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / popFirst ( ) / slice <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + self . test ( " \ ( testNamePrefix ) . popFirst ( ) / slice / semantics " ) { <nl> + / / This can just reuse the test data for removeFirst ( ) <nl> + for test in removeFirstTests . filter ( { $ 0 . numberToRemove = = 1 } ) { <nl> + let c = makeWrappedCollection ( test . collection . map ( OpaqueValue . init ) ) <nl> + var slice = c [ c . startIndex . . < c . endIndex ] <nl> + let survivingIndices = Array ( slice . startIndex . successor ( ) . . < slice . endIndex ) <nl> + let removedElement = slice . popFirst ( ) ! <nl> + expectEqual ( test . collection . first , extractValue ( removedElement ) . value ) <nl> + expectEqualSequence ( <nl> + test . expectedCollection , <nl> + slice . map { extractValue ( $ 0 ) . value } , <nl> + " popFirst ( ) shouldn ' t mutate the tail of the slice " , <nl> + stackTrace : SourceLocStack ( ) . with ( test . loc ) <nl> + ) <nl> + expectEqualSequence ( <nl> + test . expectedCollection , <nl> + survivingIndices . map { extractValue ( slice [ $ 0 ] ) . value } , <nl> + " popFirst ( ) shouldn ' t invalidate indices " , <nl> + stackTrace : SourceLocStack ( ) . with ( test . loc ) <nl> + ) <nl> + expectEqualSequence ( <nl> + test . collection , <nl> + c . map { extractValue ( $ 0 ) . value } , <nl> + " popFirst ( ) shouldn ' t mutate the collection that was sliced " , <nl> + stackTrace : SourceLocStack ( ) . with ( test . loc ) ) <nl> + } <nl> + } <nl> + <nl> + self . test ( " \ ( testNamePrefix ) . popFirst ( ) / slice / empty / semantics " ) { <nl> + let c = makeWrappedCollection ( Array < OpaqueValue < Int > > ( ) ) <nl> + var slice = c [ c . startIndex . . < c . startIndex ] <nl> + expectEmpty ( slice . popFirst ( ) ) <nl> + } <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> } / / addForwardCollectionTests <nl> self . test ( " \ ( testNamePrefix ) . removeLast ( n : Int ) / slice / removeTooMany / semantics " ) <nl> slice . removeLast ( 3 ) / / Should trap . <nl> } <nl> <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / popLast ( ) / slice <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + self . test ( " \ ( testNamePrefix ) . popLast ( ) / slice / semantics " ) { <nl> + / / This can just reuse the test data for removeLast ( ) <nl> + for test in removeLastTests . filter ( { $ 0 . numberToRemove = = 1 } ) { <nl> + let c = makeWrappedCollection ( test . collection ) <nl> + var slice = c [ c . startIndex . . < c . endIndex ] <nl> + let survivingIndices = <nl> + Array ( <nl> + slice . startIndex . . < <nl> + slice . endIndex . advancedBy ( numericCast ( - test . numberToRemove ) ) <nl> + ) <nl> + let removedElement = slice . popLast ( ) ! <nl> + expectEqual ( <nl> + test . collection . last ! . value , <nl> + extractValue ( removedElement ) . value ) <nl> + expectEqualSequence ( <nl> + test . expectedCollection , <nl> + slice . map { extractValue ( $ 0 ) . value } , <nl> + " popLast ( ) shouldn ' t mutate the head of the slice " , <nl> + stackTrace : SourceLocStack ( ) . with ( test . loc ) <nl> + ) <nl> + expectEqualSequence ( <nl> + test . expectedCollection , <nl> + survivingIndices . map { extractValue ( slice [ $ 0 ] ) . value } , <nl> + " popLast ( ) shouldn ' t invalidate indices " , <nl> + stackTrace : SourceLocStack ( ) . with ( test . loc ) <nl> + ) <nl> + expectEqualSequence ( <nl> + test . collection . map { $ 0 . value } , <nl> + c . map { extractValue ( $ 0 ) . value } , <nl> + " popLast ( ) shouldn ' t mutate the collection that was sliced " , <nl> + stackTrace : SourceLocStack ( ) . with ( test . loc ) ) <nl> + } <nl> + } <nl> + <nl> + self . test ( " \ ( testNamePrefix ) . popLast ( ) / slice / empty / semantics " ) { <nl> + let c = makeWrappedCollection ( Array < OpaqueValue < Int > > ( ) ) <nl> + var slice = c [ c . startIndex . . < c . startIndex ] <nl> + expectEmpty ( slice . popLast ( ) ) <nl> + } <nl> + <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / Index <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> mmm a / stdlib / public / core / Collection . swift <nl> ppp b / stdlib / public / core / Collection . swift <nl> extension CollectionType where SubSequence = = Self { <nl> / / / If ` ! self . isEmpty ` , remove the first element and return it , otherwise <nl> / / / return ` nil ` . <nl> / / / <nl> - / / / - Complexity : O ( ` self . count ` ) <nl> + / / / - Complexity : O ( 1 ) <nl> @ warn_unused_result <nl> public mutating func popFirst ( ) - > Generator . Element ? { <nl> guard ! isEmpty else { return nil } <nl> extension CollectionType where SubSequence = = Self { <nl> self = self [ startIndex . successor ( ) . . < endIndex ] <nl> return element <nl> } <nl> + } <nl> <nl> + extension CollectionType where <nl> + SubSequence = = Self , Index : BidirectionalIndexType { <nl> / / / If ` ! self . isEmpty ` , remove the last element and return it , otherwise <nl> / / / return ` nil ` . <nl> / / / <nl> - / / / - Complexity : O ( ` self . count ` ) <nl> + / / / - Complexity : O ( 1 ) <nl> @ warn_unused_result <nl> public mutating func popLast ( ) - > Generator . Element ? { <nl> guard ! isEmpty else { return nil } <nl> - let lastElementIndex = startIndex . advancedBy ( numericCast ( count ) - 1 ) <nl> - let element = self [ lastElementIndex ] <nl> - self = self [ startIndex . . < lastElementIndex ] <nl> + let element = last ! <nl> + self = self [ startIndex . . < endIndex . predecessor ( ) ] <nl> return element <nl> } <nl> } <nl>
|
Merge pull request from tanadeau / sr - 538
|
apple/swift
|
fd8739105c8d91363ee76461bca5cf101055ab17
|
2016-01-24T05:20:06Z
|
new file mode 100644 <nl> index 00000000000 . . 312e70ef3e9 <nl> mmm / dev / null <nl> ppp b / ports / grpc / 00006 - crypt32 . patch <nl> <nl> + diff - - git a / CMakeLists . txt b / CMakeLists . txt <nl> + index b3ca615 . . 21b5aaf 100644 <nl> + mmm a / CMakeLists . txt <nl> ppp + b / CMakeLists . txt <nl> + elseif ( UNIX ) <nl> + endif ( ) <nl> + <nl> + if ( WIN32 AND MSVC ) <nl> + - set ( _gRPC_BASELIB_LIBRARIES wsock32 ws2_32 gdi32 ) <nl> + + set ( _gRPC_BASELIB_LIBRARIES wsock32 ws2_32 gdi32 crypt32 ) <nl> + endif ( ) <nl> + <nl> + # Create directory for generated . proto files <nl> new file mode 100644 <nl> index 00000000000 . . 336077290a0 <nl> mmm / dev / null <nl> ppp b / ports / grpc / 00007 - disable_grpcpp_channelz . patch <nl> <nl> + diff - - git a / CMakeLists . txt b / CMakeLists . txt <nl> + index 1961995 . . d721654 100644 <nl> + mmm a / CMakeLists . txt <nl> ppp + b / CMakeLists . txt <nl> + if ( gRPC_INSTALL ) <nl> + endif ( ) <nl> + <nl> + <nl> + - if ( gRPC_BUILD_CODEGEN ) <nl> + + if ( gRPC_BUILD_CODEGEN AND NOT gRPC_USE_PROTO_LITE ) <nl> + add_library ( grpcpp_channelz <nl> + src / cpp / server / channelz / channelz_service . cc <nl> + src / cpp / server / channelz / channelz_service_plugin . cc <nl> + foreach ( _hdr <nl> + endforeach ( ) <nl> + <nl> + <nl> + - if ( gRPC_BUILD_CODEGEN ) <nl> + + if ( gRPC_BUILD_CODEGEN AND NOT gRPC_USE_PROTO_LITE ) <nl> + <nl> + if ( gRPC_INSTALL ) <nl> + install ( TARGETS grpcpp_channelz EXPORT gRPCTargets <nl> + if ( gRPC_INSTALL ) <nl> + ARCHIVE DESTINATION $ { gRPC_INSTALL_LIBDIR } <nl> + ) <nl> + endif ( ) <nl> + - endif ( gRPC_BUILD_CODEGEN ) <nl> + + endif ( gRPC_BUILD_CODEGEN AND NOT gRPC_USE_PROTO_LITE ) <nl> + <nl> + - endif ( gRPC_BUILD_CODEGEN ) <nl> + + endif ( gRPC_BUILD_CODEGEN AND NOT gRPC_USE_PROTO_LITE ) <nl> + if ( gRPC_BUILD_TESTS ) <nl> + <nl> + if ( gRPC_BUILD_CODEGEN ) <nl> mmm a / ports / grpc / CONTROL <nl> ppp b / ports / grpc / CONTROL <nl> <nl> Source : grpc <nl> - Version : 1 . 22 . 0 <nl> + Version : 1 . 23 . 0 <nl> Build - Depends : zlib , openssl , protobuf , c - ares ( ! uwp ) <nl> Homepage : https : / / github . com / grpc / grpc <nl> Description : An RPC library and framework <nl> mmm a / ports / grpc / portfile . cmake <nl> ppp b / ports / grpc / portfile . cmake <nl> endif ( ) <nl> vcpkg_from_github ( <nl> OUT_SOURCE_PATH SOURCE_PATH <nl> REPO grpc / grpc <nl> - REF 08fd59f039c7cf62614ab7741b3f34527af103c7 <nl> - SHA512 a1200e0df981f69f9831425a5187dcc843d9eadee1663150cd3aa10d388ae0693ac6aa57760d7047ce1e6c55a15339d1db2f1b6e25f84c76ad97e0b48706e2c4 <nl> + REF v1 . 23 . 0 <nl> + SHA512 4770651bd73b71735152155327c44d59125442b405157ffea8fb80b6f16406dd388474394e44ef620e2a777018121988a3a7a552a42141650d7cac263ab499a0 <nl> HEAD_REF master <nl> PATCHES <nl> 00001 - fix - uwp . patch <nl> vcpkg_from_github ( <nl> 00003 - undef - base64 - macro . patch <nl> 00004 - link - gdi32 - on - windows . patch <nl> 00005 - fix - uwp - error . patch <nl> + 00006 - crypt32 . patch <nl> + 00007 - disable_grpcpp_channelz . patch <nl> ) <nl> <nl> if ( VCPKG_CMAKE_SYSTEM_NAME STREQUAL " WindowsStore " OR VCPKG_TARGET_ARCHITECTURE STREQUAL " arm " OR VCPKG_TARGET_ARCHITECTURE STREQUAL " arm64 " ) <nl>
|
[ grpc ] Update grpc to 1 . 23 . 0 ( )
|
microsoft/vcpkg
|
135474a7302ea6043d5eecef6e27daf7fe36b24b
|
2019-09-09T15:59:28Z
|
mmm a / tensorflow / python / autograph / operators / control_flow . py <nl> ppp b / tensorflow / python / autograph / operators / control_flow . py <nl> def loop_body ( self_x ) : <nl> from tensorflow . python . autograph . operators import py_builtins <nl> from tensorflow . python . autograph . operators import special_values <nl> from tensorflow . python . autograph . utils import ag_logging <nl> + from tensorflow . python . autograph . utils import misc <nl> from tensorflow . python . autograph . utils import tensors <nl> from tensorflow . python . data . experimental . ops import scan_ops <nl> from tensorflow . python . data . experimental . ops import take_while_ops <nl> from tensorflow . python . data . ops import dataset_ops <nl> from tensorflow . python . data . ops import iterator_ops <nl> from tensorflow . python . framework import constant_op <nl> + from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import func_graph <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import tensor_util <nl> from tensorflow . python . ops import control_flow_ops <nl> + from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import tensor_array_ops <nl> <nl> LIMIT_PYTHON_ITERATIONS = True <nl> def for_stmt ( iter_ , extra_test , body , get_state , set_state , init_vars ) : <nl> Tuple containing the final state . <nl> " " " <nl> if tensor_util . is_tensor ( iter_ ) : <nl> - return _known_len_tf_for_stmt ( iter_ , extra_test , body , get_state , set_state , <nl> - init_vars ) <nl> + if tensors . is_range_tensor ( iter_ ) : <nl> + return _tf_range_for_stmt ( iter_ , extra_test , body , get_state , set_state , <nl> + init_vars ) <nl> + else : <nl> + return _known_len_tf_for_stmt ( iter_ , extra_test , body , get_state , <nl> + set_state , init_vars ) <nl> <nl> if isinstance ( iter_ , dataset_ops . DatasetV2 ) : <nl> return _tf_dataset_for_stmt ( iter_ , extra_test , body , get_state , set_state , <nl> def while_cond ( iterate_index , * loop_vars ) : <nl> init_vars = ( 0 , ) + init_vars , <nl> opts = dict ( maximum_iterations = n ) ) <nl> <nl> - # Dropping the iteration index because it ' s not syntactically visible . <nl> - # TODO ( mdan ) : Don ' t . <nl> + # Note : the iteration index is not returned by the while loop , however <nl> + # if a symbol with the same name exists outside the loop , it will be captured <nl> + # by the loop variables and ultimately updated correctly . <nl> + if isinstance ( results , ( tuple , list ) ) : <nl> + assert len ( results ) > = 1 # Has at least the iterate . <nl> + if len ( results ) > 1 : <nl> + results = results [ 1 : ] <nl> + else : <nl> + results = ( ) <nl> + <nl> + return results <nl> + <nl> + <nl> + def _tf_range_for_stmt ( iter_ , extra_test , body , get_state , set_state , <nl> + init_vars ) : <nl> + " " " Overload of for_stmt that iterates over a TF range ( and elides it ) . " " " <nl> + _disallow_undefs_into_loop ( * init_vars ) <nl> + <nl> + start , limit , delta = iter_ . op . inputs <nl> + <nl> + def while_body ( iterate , * loop_vars ) : <nl> + new_vars = body ( iterate , * loop_vars ) <nl> + <nl> + loop_vars = ( iterate + delta , ) <nl> + if new_vars : <nl> + loop_vars + = new_vars <nl> + <nl> + return loop_vars <nl> + <nl> + def while_cond ( iterate , * loop_vars ) : <nl> + main_test = math_ops . logical_or ( <nl> + math_ops . logical_and ( delta > = 0 , iterate < limit ) , <nl> + math_ops . logical_and ( delta < 0 , iterate > limit ) ) <nl> + if extra_test is not None : <nl> + return control_flow_ops . cond ( <nl> + main_test , lambda : extra_test ( * loop_vars ) , lambda : False ) <nl> + return main_test <nl> + <nl> + # This specific dtype is required by while_loop . <nl> + maximum_iterations = math_ops . cast ( <nl> + misc . get_range_len ( start , limit , delta ) , dtypes . int32 ) <nl> + <nl> + results = _tf_while_stmt ( <nl> + while_cond , <nl> + while_body , <nl> + get_state , <nl> + set_state , <nl> + init_vars = ( start , ) + init_vars , <nl> + opts = dict ( maximum_iterations = maximum_iterations ) ) <nl> + <nl> + # Note : the iteration index is not returned by the while loop , however <nl> + # if a symbol with the same name exists outside the loop , it will be captured <nl> + # by the loop variables and ultimately updated correctly . <nl> if isinstance ( results , ( tuple , list ) ) : <nl> assert len ( results ) > = 1 # Has at least the iterate . <nl> if len ( results ) > 1 : <nl> mmm a / tensorflow / python / autograph / operators / control_flow_test . py <nl> ppp b / tensorflow / python / autograph / operators / control_flow_test . py <nl> <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import gen_math_ops <nl> + from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import variables <nl> from tensorflow . python . platform import test <nl> <nl> def test_tensor ( self ) : <nl> init_vars = ( 0 , ) ) <nl> self . assertEqual ( self . evaluate ( s ) , ( 1234 , ) ) <nl> <nl> + def test_range_tensor ( self ) : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + s = control_flow . for_stmt ( <nl> + math_ops . range ( 5 ) , <nl> + extra_test = lambda s : True , <nl> + body = lambda i , s : ( s * 10 + i , ) , <nl> + get_state = lambda : ( ) , <nl> + set_state = lambda _ : None , <nl> + init_vars = ( 0 , ) ) <nl> + self . assertEqual ( self . evaluate ( s ) , ( 1234 , ) ) <nl> + <nl> + def test_range_tensor_explicit_limit_delta ( self ) : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + s = control_flow . for_stmt ( <nl> + math_ops . range ( - 17 , - 3 , 5 ) , <nl> + extra_test = lambda s : True , <nl> + body = lambda i , s : ( s * 100 + i , ) , <nl> + get_state = lambda : ( ) , <nl> + set_state = lambda _ : None , <nl> + init_vars = ( 0 , ) ) <nl> + self . assertEqual ( self . evaluate ( s ) , ( - 171207 , ) ) <nl> + <nl> + def test_range_tensor_negative_delta ( self ) : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + s = control_flow . for_stmt ( <nl> + math_ops . range ( 17 , 3 , - 5 ) , <nl> + extra_test = lambda s : True , <nl> + body = lambda i , s : ( s * 100 + i , ) , <nl> + get_state = lambda : ( ) , <nl> + set_state = lambda _ : None , <nl> + init_vars = ( 0 , ) ) <nl> + self . assertEqual ( self . evaluate ( s ) , ( 171207 , ) ) <nl> + <nl> def test_tensor_with_extra_test_only_python_state ( self ) : <nl> class MutableObject ( object ) : <nl> field_1 = constant_op . constant ( 0 , dtype = dtypes . int32 ) <nl> mmm a / tensorflow / python / autograph / utils / misc . py <nl> ppp b / tensorflow / python / autograph / utils / misc . py <nl> <nl> <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . ops import array_ops <nl> + from tensorflow . python . ops import gen_math_ops <nl> + from tensorflow . python . ops import math_ops <nl> <nl> <nl> def alias_tensors ( * args ) : <nl> def capitalize_initial ( s ) : <nl> if s : <nl> return s [ 0 ] . upper ( ) + s [ 1 : ] <nl> return s <nl> + <nl> + <nl> + def get_range_len ( start , limit , delta ) : <nl> + dist = ops . convert_to_tensor ( limit - start ) <nl> + unadjusted_len = dist / / delta <nl> + adjustment = math_ops . cast ( <nl> + gen_math_ops . not_equal ( dist % delta , <nl> + array_ops . zeros_like ( unadjusted_len ) ) , dist . dtype ) <nl> + final_len = unadjusted_len + adjustment <nl> + return gen_math_ops . maximum ( final_len , array_ops . zeros_like ( final_len ) ) <nl> mmm a / tensorflow / python / autograph / utils / misc_test . py <nl> ppp b / tensorflow / python / autograph / utils / misc_test . py <nl> <nl> from __future__ import print_function <nl> <nl> from tensorflow . python . autograph . utils import misc <nl> + from tensorflow . python . eager import def_function <nl> + from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import test_util <nl> from tensorflow . python . framework . constant_op import constant <nl> from tensorflow . python . ops . variables import Variable <nl> def test_alias_tensors ( self ) : <nl> with self . cached_session ( ) as sess : <nl> self . assertEqual ( 1 , self . evaluate ( new_a ) ) <nl> <nl> + def test_get_range_len ( self ) : <nl> + get_range_as_graph = def_function . function ( misc . get_range_len ) <nl> + test_range = [ ( i , constant_op . constant ( i ) ) for i in range ( - 3 , 3 ) ] <nl> + results = [ ] <nl> + for i , ti in test_range : <nl> + for j , tj in test_range : <nl> + for k , tk in test_range : <nl> + if k = = 0 : <nl> + continue <nl> + results . append ( ( ( i , j , k ) , get_range_as_graph ( ti , tj , tk ) ) ) <nl> + <nl> + for ( i , j , k ) , result_tensor in results : <nl> + self . assertEqual ( <nl> + len ( list ( range ( i , j , k ) ) ) , self . evaluate ( result_tensor ) ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> mmm a / tensorflow / python / autograph / utils / tensors . py <nl> ppp b / tensorflow / python / autograph / utils / tensors . py <nl> def is_tensor_list ( t ) : <nl> # construct . <nl> return ( tensor_util . is_tensor ( t ) and t . dtype = = dtypes . variant and <nl> not t . shape . ndims ) <nl> + <nl> + <nl> + def is_range_tensor ( t ) : <nl> + " " " Returns True if a tensor is the result of a tf . range op . Best effort . " " " <nl> + return tensor_util . is_tensor ( t ) and hasattr ( t , ' op ' ) and t . op . type = = ' Range ' <nl> mmm a / tensorflow / python / autograph / utils / tensors_test . py <nl> ppp b / tensorflow / python / autograph / utils / tensors_test . py <nl> <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . ops import list_ops <nl> + from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import tensor_array_ops <nl> from tensorflow . python . platform import test <nl> <nl> def test_is_tensor_list ( self ) : <nl> self . assertFalse ( tensors . is_tensor_list ( self . _simple_list_of_tensors ( ) ) ) <nl> self . assertFalse ( tensors . is_tensor_list ( None ) ) <nl> <nl> + def is_range_tensor ( self ) : <nl> + self . assertTrue ( tensors . is_range_tensor ( math_ops . range ( 1 ) ) ) <nl> + self . assertTrue ( tensors . is_range_tensor ( math_ops . range ( 1 , 2 ) ) ) <nl> + self . assertTrue ( tensors . is_range_tensor ( math_ops . range ( 1 , 2 , 3 ) ) ) <nl> + self . assertFalse ( tensors . is_range_tensor ( None ) ) <nl> + self . assertFalse ( tensors . is_range_tensor ( constant_op . constant ( range ( 1 ) ) ) ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl>
|
Optimize away the calculation of a range tensor for the pattern ` for i in tf . range ` . Along with the performance improvement , this is more compatible with XLA because it avoids generating dynamically - shaped tensors . Fixes .
|
tensorflow/tensorflow
|
dd3ec9260259a5dba183f9271f517c06d7c69c82
|
2019-07-10T13:53:29Z
|
mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_BOOL ( turbo_store_elimination , true , <nl> DEFINE_BOOL ( trace_store_elimination , false , " trace store elimination " ) <nl> DEFINE_BOOL ( turbo_rewrite_far_jumps , true , <nl> " rewrite far to near jumps ( ia32 , x64 ) " ) <nl> - DEFINE_BOOL ( experimental_inline_promise_constructor , false , <nl> + DEFINE_BOOL ( experimental_inline_promise_constructor , true , <nl> " inline the Promise constructor in TurboFan " ) <nl> <nl> # ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS <nl>
|
Reland " [ turbofan ] Enable Promise constructor inlining by default "
|
v8/v8
|
b70dd880f2a0698b33c25693c37b5f270c9ad984
|
2018-04-17T09:35:44Z
|
mmm a / atom / browser / api / atom_api_protocol . cc <nl> ppp b / atom / browser / api / atom_api_protocol . cc <nl> mate : : ObjectTemplateBuilder Protocol : : GetObjectTemplateBuilder ( <nl> . SetMethod ( " registerHttpProtocol " , <nl> & Protocol : : RegisterProtocol < URLRequestFetchJob > ) <nl> . SetMethod ( " unregisterProtocol " , & Protocol : : UnregisterProtocol ) <nl> - . SetMethod ( " isHandledProtocol " , & Protocol : : IsHandledProtocol ) ; <nl> + . SetMethod ( " isHandledProtocol " , & Protocol : : IsHandledProtocol ) <nl> + . SetMethod ( " interceptStringProtocol " , <nl> + & Protocol : : InterceptProtocol < URLRequestStringJob > ) <nl> + . SetMethod ( " interceptBufferProtocol " , <nl> + & Protocol : : InterceptProtocol < URLRequestBufferJob > ) <nl> + . SetMethod ( " interceptFileProtocol " , <nl> + & Protocol : : InterceptProtocol < UrlRequestAsyncAsarJob > ) <nl> + . SetMethod ( " interceptHttpProtocol " , <nl> + & Protocol : : InterceptProtocol < URLRequestFetchJob > ) ; <nl> } <nl> <nl> void Protocol : : RegisterStandardSchemes ( <nl> mmm a / atom / browser / api / atom_api_protocol . h <nl> ppp b / atom / browser / api / atom_api_protocol . h <nl> <nl> <nl> # include " atom / browser / net / atom_url_request_job_factory . h " <nl> # include " base / callback . h " <nl> + # include " base / containers / scoped_ptr_hash_map . h " <nl> # include " content / public / browser / browser_thread . h " <nl> # include " native_mate / arguments . h " <nl> # include " native_mate / dictionary . h " <nl> class Protocol : public mate : : Wrappable { <nl> const BooleanCallback & callback ) ; <nl> bool IsHandledProtocolInIO ( const std : : string & scheme ) ; <nl> <nl> + / / Replace the protocol handler with a new one . <nl> + template < typename RequestJob > <nl> + void InterceptProtocol ( const std : : string & scheme , <nl> + const Handler & handler , <nl> + mate : : Arguments * args ) { <nl> + CompletionCallback callback ; <nl> + args - > GetNext ( & callback ) ; <nl> + content : : BrowserThread : : PostTaskAndReplyWithResult ( <nl> + content : : BrowserThread : : IO , FROM_HERE , <nl> + base : : Bind ( & Protocol : : InterceptProtocolInIO < RequestJob > , <nl> + base : : Unretained ( this ) , scheme , handler ) , <nl> + base : : Bind ( & Protocol : : OnIOCompleted , <nl> + base : : Unretained ( this ) , callback ) ) ; <nl> + } <nl> + template < typename RequestJob > <nl> + ProtocolError InterceptProtocolInIO ( const std : : string & scheme , <nl> + const Handler & handler ) { <nl> + if ( ! job_factory_ - > IsHandledProtocol ( scheme ) ) <nl> + return PROTOCOL_NOT_REGISTERED ; <nl> + / / It is possible a protocol is handled but can not be intercepted . <nl> + if ( ! job_factory_ - > HasProtocolHandler ( scheme ) ) <nl> + return PROTOCOL_FAIL ; <nl> + if ( ContainsKey ( original_protocols_ , scheme ) ) <nl> + return PROTOCOL_INTERCEPTED ; <nl> + scoped_ptr < CustomProtocolHandler < RequestJob > > protocol_handler ( <nl> + new CustomProtocolHandler < RequestJob > ( <nl> + isolate ( ) , request_context_getter_ , handler ) ) ; <nl> + original_protocols_ . set ( <nl> + scheme , <nl> + job_factory_ - > ReplaceProtocol ( scheme , protocol_handler . Pass ( ) ) ) ; <nl> + return PROTOCOL_OK ; <nl> + } <nl> + <nl> / / Convert error code to JS exception and call the callback . <nl> void OnIOCompleted ( const CompletionCallback & callback , ProtocolError error ) ; <nl> <nl> class Protocol : public mate : : Wrappable { <nl> <nl> scoped_refptr < net : : URLRequestContextGetter > request_context_getter_ ; <nl> <nl> + / / Map that stores the original protocols of schemes . <nl> + using OriginalProtocolsMap = base : : ScopedPtrHashMap < <nl> + std : : string , <nl> + scoped_ptr < net : : URLRequestJobFactory : : ProtocolHandler > > ; <nl> + OriginalProtocolsMap original_protocols_ ; <nl> + <nl> AtomURLRequestJobFactory * job_factory_ ; / / weak ref <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( Protocol ) ; <nl>
|
Implement protocol . interceptProtocol
|
electron/electron
|
773e932e987bfafc2fa6d00ac8748c3b07b29023
|
2015-08-13T12:10:05Z
|
mmm a / docs / en / sql - reference / statements / create . md <nl> ppp b / docs / en / sql - reference / statements / create . md <nl> There can be other clauses after the ` ENGINE ` clause in the query . See detailed <nl> <nl> # # # Default Values { # create - default - values } <nl> <nl> - The column description can specify an expression for a default value , in one of the following ways : ` DEFAULT expr ` , ` MATERIALIZED expr ` , ` ALIAS expr ` . <nl> + The column description can specify an expression for a default value , in one of the following ways : ` DEFAULT expr ` , ` MATERIALIZED expr ` , ` ALIAS expr ` . <nl> Example : ` URLDomain String DEFAULT domain ( URL ) ` . <nl> <nl> If an expression for the default value is not defined , the default values will be set to zeros for numbers , empty strings for strings , empty arrays for arrays , and ` 0000 - 00 - 00 ` for dates or ` 0000 - 00 - 00 00 : 00 : 00 ` for dates with time . NULLs are not supported . <nl>
|
Update create . md
|
ClickHouse/ClickHouse
|
8139ce598542154b80b3ac972ad3fdb3e1b388e3
|
2020-06-20T07:32:33Z
|
mmm a / Telegram / SourceFiles / window / window_controller . cpp <nl> ppp b / Telegram / SourceFiles / window / window_controller . cpp <nl> void Controller : : showAccount ( not_null < Main : : Account * > account ) { <nl> setupIntro ( ) ; <nl> _widget . updateGlobalMenu ( ) ; <nl> } <nl> - if ( was ) { <nl> - was - > session ( ) . updates ( ) . updateOnline ( ) ; <nl> - } <nl> } , _accountLifetime ) ; <nl> } <nl> <nl>
|
Fix crash in update online on logout .
|
telegramdesktop/tdesktop
|
2ef47222f45d95b021f67475ddc7916f5b779557
|
2020-06-30T13:17:07Z
|
mmm a / tensorflow / core / distributed_runtime / rpc / rpc_rendezvous_mgr . cc <nl> ppp b / tensorflow / core / distributed_runtime / rpc / rpc_rendezvous_mgr . cc <nl> class RpcRecvTensorCall : public BaseRecvTensorCall { <nl> / / RpcRecvTensorCall , and it always sets this - > wi_ to null when <nl> / / a call object is released to it , we can assert that this - > wi_ is <nl> / / always null at the point of deletion . <nl> - CHECK_EQ ( nullptr , wi_ ) <nl> + CHECK_EQ ( static_cast < WorkerInterface * > ( nullptr ) , wi_ ) <nl> < < " Leaking WorkerInterface in RpcRecvTensorCall destructor . " ; <nl> } <nl> <nl>
|
Merge pull request from mrry / fix_win_build
|
tensorflow/tensorflow
|
2852ae057b6b0ef2a0b30b34df43a562cc00acf4
|
2016-12-27T19:16:31Z
|
mmm a / tensorflow / compiler / mlir / xla / transforms / hlo_legalize_to_lhlo . cc <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / hlo_legalize_to_lhlo . cc <nl> limitations under the License . <nl> # include " mlir / Transforms / DialectConversion . h " / / TF : local_config_mlir <nl> # include " tensorflow / compiler / mlir / xla / ir / hlo_ops . h " <nl> # include " tensorflow / compiler / mlir / xla / ir / lhlo_ops . h " <nl> + # include " tensorflow / compiler / mlir / xla / transforms / passes . h " <nl> + # include " tensorflow / compiler / mlir / xla / transforms / rewriters . h " <nl> <nl> namespace mlir { <nl> namespace xla_hlo { <nl> class HloToLhloTensorStoreConverter : public ConversionPattern { <nl> } <nl> } ; <nl> <nl> - void populateHLOToLHLOConversionPattern ( MLIRContext * context , <nl> - OwningRewritePatternList * patterns ) { <nl> - patterns <nl> - - > insert < HloToLhloOpConverter < xla_hlo : : AddOp , xla_lhlo : : AddOp > , <nl> - HloToLhloOpConverter < xla_hlo : : AndOp , xla_lhlo : : AndOp > , <nl> - HloToLhloOpConverter < xla_hlo : : BroadcastInDimOp , <nl> - xla_lhlo : : BroadcastInDimOp > , <nl> - HloToLhloOpConverter < xla_hlo : : CompareOp , xla_lhlo : : CompareOp > , <nl> - HloToLhloOpConverter < xla_hlo : : DivOp , xla_lhlo : : DivOp > , <nl> - HloToLhloOpConverter < xla_hlo : : ExpOp , xla_lhlo : : ExpOp > , <nl> - HloToLhloOpConverter < xla_hlo : : IotaOp , xla_lhlo : : IotaOp > , <nl> - HloToLhloOpConverter < xla_hlo : : MaxOp , xla_lhlo : : MaxOp > , <nl> - HloToLhloOpConverter < xla_hlo : : MinOp , xla_lhlo : : MinOp > , <nl> - HloToLhloOpConverter < xla_hlo : : MulOp , xla_lhlo : : MulOp > , <nl> - HloToLhloOpConverter < xla_hlo : : SelectOp , xla_lhlo : : SelectOp > , <nl> - HloToLhloOpConverter < xla_hlo : : SubOp , xla_lhlo : : SubOp > , <nl> - HloToLhloTensorLoadConverter , HloToLhloTensorStoreConverter > ( <nl> - context ) ; <nl> - } <nl> - <nl> / / Lowers from HLO dialect to LHLO dialect allocating / deallocating temporary <nl> / / buffers if necessary . <nl> / / <nl> struct HloLegalizeToLhlo : public FunctionPass < HloLegalizeToLhlo > { <nl> <nl> } / / namespace <nl> <nl> + void populateHLOToLHLOConversionPattern ( MLIRContext * context , <nl> + OwningRewritePatternList * patterns ) { <nl> + / / clang - format off <nl> + patterns - > insert < <nl> + HloToLhloOpConverter < xla_hlo : : AddOp , xla_lhlo : : AddOp > , <nl> + HloToLhloOpConverter < xla_hlo : : AndOp , xla_lhlo : : AndOp > , <nl> + HloToLhloOpConverter < xla_hlo : : BroadcastInDimOp , <nl> + xla_lhlo : : BroadcastInDimOp > , <nl> + HloToLhloOpConverter < xla_hlo : : CompareOp , xla_lhlo : : CompareOp > , <nl> + HloToLhloOpConverter < xla_hlo : : DivOp , xla_lhlo : : DivOp > , <nl> + HloToLhloOpConverter < xla_hlo : : ExpOp , xla_lhlo : : ExpOp > , <nl> + HloToLhloOpConverter < xla_hlo : : IotaOp , xla_lhlo : : IotaOp > , <nl> + HloToLhloOpConverter < xla_hlo : : MaxOp , xla_lhlo : : MaxOp > , <nl> + HloToLhloOpConverter < xla_hlo : : MinOp , xla_lhlo : : MinOp > , <nl> + HloToLhloOpConverter < xla_hlo : : MulOp , xla_lhlo : : MulOp > , <nl> + HloToLhloOpConverter < xla_hlo : : SelectOp , xla_lhlo : : SelectOp > , <nl> + HloToLhloOpConverter < xla_hlo : : SubOp , xla_lhlo : : SubOp > , <nl> + HloToLhloTensorLoadConverter , HloToLhloTensorStoreConverter <nl> + > ( context ) ; <nl> + / / clang - format on <nl> + } <nl> + <nl> std : : unique_ptr < OpPassBase < FuncOp > > createLegalizeToLhloPass ( ) { <nl> return absl : : make_unique < HloLegalizeToLhlo > ( ) ; <nl> } <nl> mmm a / tensorflow / compiler / mlir / xla / transforms / rewriters . h <nl> ppp b / tensorflow / compiler / mlir / xla / transforms / rewriters . h <nl> void PopulateGeneralDotOpLoweringPatterns ( OwningRewritePatternList * patterns , <nl> void PopulateXlaToStdPatterns ( OwningRewritePatternList * patterns , <nl> MLIRContext * ctx ) ; <nl> <nl> + / / Collection of rewrite patterns for lowering of HLO to LHLO dialect . <nl> + void populateHLOToLHLOConversionPattern ( MLIRContext * context , <nl> + OwningRewritePatternList * patterns ) ; <nl> + <nl> } / / namespace xla_hlo <nl> } / / namespace mlir <nl> <nl>
|
Expose HLO to LHLO legalization patterns in a header file .
|
tensorflow/tensorflow
|
641ebb45527b210e091467a7f6d0d4085d4beefa
|
2019-11-12T13:13:17Z
|
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( INTERN_BUILD_MOBILE AND NOT BUILD_CAFFE2_MOBILE ) <nl> set ( USE_STATIC_DISPATCH ON ) <nl> set ( INTERN_DISABLE_ONNX ON ) <nl> set ( INTERN_DISABLE_AUTOGRAD ON ) <nl> + set ( INTERN_USE_EIGEN_BLAS ON ) <nl> endif ( ) <nl> <nl> # mmm [ Utils <nl> mmm a / android / pytorch_android / CMakeLists . txt <nl> ppp b / android / pytorch_android / CMakeLists . txt <nl> import_static_lib ( libc10 ) <nl> import_static_lib ( libnnpack ) <nl> import_static_lib ( libqnnpack ) <nl> import_static_lib ( libpytorch_qnnpack ) <nl> + import_static_lib ( libeigen_blas ) <nl> import_static_lib ( libcpuinfo ) <nl> import_static_lib ( libclog ) <nl> <nl> target_link_libraries ( pytorch <nl> libnnpack <nl> libqnnpack <nl> libpytorch_qnnpack <nl> + libeigen_blas <nl> libcpuinfo <nl> libclog <nl> ) <nl> mmm a / cmake / Dependencies . cmake <nl> ppp b / cmake / Dependencies . cmake <nl> set ( CONFU_DEPENDENCIES_SOURCE_DIR $ { PROJECT_BINARY_DIR } / confu - srcs <nl> set ( CONFU_DEPENDENCIES_BINARY_DIR $ { PROJECT_BINARY_DIR } / confu - deps <nl> CACHE PATH " Confu - style dependencies binary directory " ) <nl> <nl> + # mmm [ Eigen BLAS for Mobile <nl> + if ( INTERN_BUILD_MOBILE AND INTERN_USE_EIGEN_BLAS ) <nl> + set ( USE_BLAS 1 ) <nl> + include ( $ { CMAKE_CURRENT_LIST_DIR } / External / EigenBLAS . cmake ) <nl> + list ( APPEND Caffe2_DEPENDENCY_LIBS eigen_blas ) <nl> + endif ( ) <nl> + <nl> # mmm [ pthreadpool <nl> # QNNPACK and NNPACK both depend on pthreadpool , but when building with libtorch <nl> # they should use the pthreadpool implementation under caffe2 / utils / threadpool <nl> new file mode 100644 <nl> index 000000000000 . . e24ff5c66d97 <nl> mmm / dev / null <nl> ppp b / cmake / External / EigenBLAS . cmake <nl> <nl> + if ( __EIGEN_BLAS_INCLUDED ) <nl> + return ( ) <nl> + endif ( ) <nl> + set ( __EIGEN_BLAS_INCLUDED TRUE ) <nl> + <nl> + if ( NOT INTERN_BUILD_MOBILE OR NOT INTERN_USE_EIGEN_BLAS ) <nl> + return ( ) <nl> + endif ( ) <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # Eigen BLAS is built together with Libtorch mobile . <nl> + # By default , it builds code from third - party / eigen / blas submodule . <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + set ( CAFFE2_THIRD_PARTY_ROOT $ { PROJECT_SOURCE_DIR } / third_party ) <nl> + set ( EIGEN_BLAS_SRC_DIR " $ { CAFFE2_THIRD_PARTY_ROOT } / eigen / blas " CACHE STRING " Eigen BLAS source directory " ) <nl> + <nl> + set ( EigenBlas_SRCS <nl> + $ { EIGEN_BLAS_SRC_DIR } / single . cpp <nl> + $ { EIGEN_BLAS_SRC_DIR } / double . cpp <nl> + $ { EIGEN_BLAS_SRC_DIR } / complex_single . cpp <nl> + $ { EIGEN_BLAS_SRC_DIR } / complex_double . cpp <nl> + $ { EIGEN_BLAS_SRC_DIR } / xerbla . cpp <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / srotm . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / srotmg . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / drotm . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / drotmg . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / lsame . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / dspmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / ssbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / chbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / sspmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / zhbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / chpmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / dsbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / zhpmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / dtbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / stbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / ctbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / ztbmv . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / d_cnjg . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / r_cnjg . c <nl> + $ { EIGEN_BLAS_SRC_DIR } / f2c / complexdots . c <nl> + ) <nl> + <nl> + add_library ( eigen_blas $ { EigenBlas_SRCS } ) <nl> + <nl> + # We build static versions of eigen blas but link into a shared library , so they need PIC . <nl> + set_property ( TARGET eigen_blas PROPERTY POSITION_INDEPENDENT_CODE ON ) <nl> + <nl> + install ( TARGETS eigen_blas <nl> + LIBRARY DESTINATION lib <nl> + ARCHIVE DESTINATION lib ) <nl>
|
add eigen blas for mobile build ( )
|
pytorch/pytorch
|
d6e3aed03223b01c5ea40da4a159fbc24b466deb
|
2019-09-20T22:45:11Z
|
mmm a / ios / sdk / WeexSDK / Sources / Component / WXScrollerComponent . mm <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXScrollerComponent . mm <nl> - ( void ) _layoutPlatform <nl> _flexCssNode - > calculateLayout ( renderPageSize ) ; <nl> _flexCssNode - > setParent ( parent , _flexCssNode ) ; <nl> <nl> + / * We must clear BFCs becuase we have set parent of _flexCSSNode to nullptr and <nl> + manually called its calculateLayout method . This will cause a non - bfc layout node <nl> + to have items in its BFCs vector . Later , a wild pointer may cause crash . * / <nl> + _flexCssNode - > clearBFCs ( ) ; <nl> + <nl> / / set origin and size back <nl> _flexCssNode - > rewriteLayoutResult ( left , top , width , height ) ; <nl> } <nl> mmm a / weex_core / Source / core / layout / layout . h <nl> ppp b / weex_core / Source / core / layout / layout . h <nl> namespace WeexCore { <nl> <nl> markDirty ( ) ; <nl> } <nl> + <nl> + inline void clearBFCs ( ) { <nl> + BFCs . clear ( ) ; <nl> + } <nl> <nl> inline void addChildAt ( WXCoreLayoutNode * const child , Index index ) { <nl> mChildList . insert ( mChildList . begin ( ) + index , child ) ; <nl>
|
Merge pull request from apache / fix - rtl - crash
|
apache/incubator-weex
|
542b319e4fd49c3b7318790f82e28174fbfa8430
|
2019-10-09T12:12:47Z
|
mmm a / hphp / runtime / base / thread - safe - setlocale . cpp <nl> ppp b / hphp / runtime / base / thread - safe - setlocale . cpp <nl> IMPLEMENT_THREAD_LOCAL ( struct lconv , g_thread_safe_localeconv_data ) ; <nl> static const locale_t s_null_locale = ( locale_t ) 0 ; <nl> <nl> ThreadSafeLocaleHandler : : ThreadSafeLocaleHandler ( ) { <nl> - m_category_locale_map = { <nl> # define FILL_IN_CATEGORY_LOCALE_MAP ( category ) \ <nl> { category , category # # _MASK , # category , " " } <nl> + m_category_locale_map = { <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_CTYPE ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_NUMERIC ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_TIME ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_COLLATE ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_MONETARY ) , <nl> + # ifndef _MSC_VER <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_MESSAGES ) , <nl> + # endif <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_ALL ) , <nl> - # ifndef __APPLE__ <nl> + # if ! defined ( __APPLE__ ) & & ! defined ( _MSC_VER ) <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_PAPER ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_NAME ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_ADDRESS ) , <nl> ThreadSafeLocaleHandler : : ThreadSafeLocaleHandler ( ) { <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_MEASUREMENT ) , <nl> FILL_IN_CATEGORY_LOCALE_MAP ( LC_IDENTIFICATION ) , <nl> # endif <nl> - # undef FILL_IN_CATEGORY_LOCALE_MAP <nl> } ; <nl> + # undef FILL_IN_CATEGORY_LOCALE_MAP <nl> <nl> + # ifdef _MSC_VER <nl> + _configthreadlocale ( _ENABLE_PER_THREAD_LOCALE ) ; <nl> + : : setlocale ( LC_ALL , " C " ) ; <nl> + # else <nl> m_locale = s_null_locale ; <nl> + # endif <nl> + <nl> reset ( ) ; <nl> } <nl> <nl> ThreadSafeLocaleHandler : : ~ ThreadSafeLocaleHandler ( ) { <nl> } <nl> <nl> void ThreadSafeLocaleHandler : : reset ( ) { <nl> + # ifdef _MSC_VER <nl> + : : setlocale ( LC_ALL , " C " ) ; <nl> + # else <nl> if ( m_locale ! = s_null_locale ) { <nl> freelocale ( m_locale ) ; <nl> m_locale = s_null_locale ; <nl> } <nl> <nl> uselocale ( LC_GLOBAL_LOCALE ) ; <nl> + # endif <nl> } <nl> <nl> const char * ThreadSafeLocaleHandler : : actuallySetLocale ( <nl> const char * ThreadSafeLocaleHandler : : actuallySetLocale ( <nl> return nullptr ; <nl> } <nl> <nl> + # ifdef _MSC_VER <nl> + / / Windows doesn ' t accept POSIX as a valid <nl> + / / locale , use C instead . <nl> + if ( ! strcmp ( locale_cstr , " POSIX " ) ) <nl> + locale_cstr = " C " ; <nl> + <nl> + if ( : : setlocale ( category , locale_cstr ) = = nullptr ) <nl> + return nullptr ; <nl> + # else <nl> locale_t new_locale = newlocale ( <nl> m_category_locale_map [ category ] . category_mask , <nl> locale_cstr , <nl> const char * ThreadSafeLocaleHandler : : actuallySetLocale ( <nl> <nl> m_locale = new_locale ; <nl> uselocale ( m_locale ) ; <nl> + # endif <nl> <nl> if ( category = = LC_ALL ) { <nl> if ( strchr ( locale_cstr , ' ; ' ) ! = nullptr ) { <nl> const char * ThreadSafeLocaleHandler : : actuallySetLocale ( <nl> return locale_cstr ; <nl> } <nl> <nl> - # ifdef __APPLE__ <nl> + # ifdef _MSC_VER <nl> + struct lconv * ThreadSafeLocaleHandler : : localeconv ( ) { <nl> + / / We ' ve setup locales to be thread local , so this is no <nl> + / / problem at all . <nl> + struct lconv * ptr = g_thread_safe_localeconv_data . get ( ) ; <nl> + struct lconv * l = : : localeconv ( ) ; <nl> + memcpy ( ptr , l , sizeof ( struct lconv ) ) ; <nl> + return ptr ; <nl> + } <nl> + # elif defined ( __APPLE__ ) <nl> struct lconv * ThreadSafeLocaleHandler : : localeconv ( ) { <nl> / / BSD / OS X has localeconv_l , which actually returns data held onto by the <nl> / / locale itself - - and since that ' s thread - local ( since this object instance <nl> mmm a / hphp / runtime / base / thread - safe - setlocale . h <nl> ppp b / hphp / runtime / base / thread - safe - setlocale . h <nl> class ThreadSafeLocaleHandler { <nl> void generate_LC_ALL_String ( ) ; <nl> <nl> std : : vector < CategoryAndLocaleMap > m_category_locale_map ; <nl> + # ifndef _MSC_VER <nl> locale_t m_locale ; <nl> + # endif <nl> } ; <nl> <nl> extern DECLARE_THREAD_LOCAL ( ThreadSafeLocaleHandler , <nl>
|
Add MSVC support to thread - safe - setlocale
|
facebook/hhvm
|
419a35854e1f7d5497040447d4aec8430cc62ab0
|
2015-08-06T02:32:34Z
|
mmm a / ChangeLog <nl> ppp b / ChangeLog <nl> <nl> + 2013 - 10 - 16 : Version 3 . 22 . 13 <nl> + <nl> + Do not look up ArrayBuffer on global object in typed array constructor . <nl> + ( issue 2931 ) <nl> + <nl> + Performance and stability improvements on all platforms . <nl> + <nl> + <nl> 2013 - 10 - 15 : Version 3 . 22 . 12 <nl> <nl> Added histograms to track fraction of heap spaces and percentage of <nl> mmm a / src / version . cc <nl> ppp b / src / version . cc <nl> <nl> / / system so their names cannot be changed without changing the scripts . <nl> # define MAJOR_VERSION 3 <nl> # define MINOR_VERSION 22 <nl> - # define BUILD_NUMBER 13 <nl> + # define BUILD_NUMBER 14 <nl> # define PATCH_LEVEL 0 <nl> / / Use 1 for candidates and 0 otherwise . <nl> / / ( Boolean macro values are not supported by all preprocessors . ) <nl>
|
Prepare push to trunk . Now working on version 3 . 22 . 14 .
|
v8/v8
|
0bd3e179e599bc1ed4e862690f8e4fdfab4ef538
|
2013-10-16T08:58:53Z
|
similarity index 100 % <nl> rename from code / artificial_intelligence / src / neural_network / neuralnetwork . py <nl> rename to code / artificial_intelligence / src / neural_network / neural_network . py <nl>
|
Merge pull request from rbjoshi1309 / neural_network
|
OpenGenus/cosmos
|
518f16f21103997bd7dc9e97948b39e0f435c127
|
2020-03-28T15:32:24Z
|
mmm a / tensorflow / contrib / layers / python / layers / optimizers . py <nl> ppp b / tensorflow / contrib / layers / python / layers / optimizers . py <nl> <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> + from tensorflow . python . ops import array_ops <nl> from tensorflow . python . ops import clip_ops <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import init_ops <nl> from tensorflow . python . ops import logging_ops <nl> + from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import random_ops <nl> from tensorflow . python . ops import variable_scope as vs <nl> from tensorflow . python . ops import variables as vars_ <nl> + from tensorflow . python . training import moving_averages <nl> from tensorflow . python . training import optimizer as optimizer_ <nl> from tensorflow . python . training import training as train <nl> <nl> + <nl> OPTIMIZER_CLS_NAMES = { <nl> " Adagrad " : train . AdagradOptimizer , <nl> " Adam " : train . AdamOptimizer , <nl> class should be sub - class of ` tf . Optimizer ` that implements <nl> gradient_multipliers : dict of variables or variable names to floats . <nl> If present , gradients for specified <nl> variables will be multiplied by given constant . <nl> - clip_gradients : float or ` None ` , clips gradients by this value . <nl> + clip_gradients : float , callable or ` None ` . If float , is provided , a global <nl> + clipping is applied to prevent the norm of the gradient to exceed this <nl> + value . Alternatively , a callable can be provided e . g . : adaptive_clipping . <nl> + This callable takes a ` list ` of ` ( gradients , variables ) ` ` tuple ` s and <nl> + returns the same thing with the gradients modified . <nl> learning_rate_decay_fn : function , takes ` learning_rate ` and ` global_step ` <nl> ` Tensor ` s , returns ` Tensor ` . <nl> Can be used to implement any learning rate decay <nl> class should be sub - class of ` tf . Optimizer ` that implements <nl> * ` global_step ` is an invalid type or shape . <nl> * ` learning_rate ` is an invalid type or value . <nl> * ` optimizer ` is wrong type . <nl> + * ` clip_gradients ' is not float or callable . <nl> * ` learning_rate ` and ` learning_rate_decay_fn ` are supplied , but no <nl> ` global_step ` is available . <nl> " " " <nl> class should be sub - class of ` tf . Optimizer ` that implements <nl> if gradient_multipliers is not None : <nl> gradients = _multiply_gradients ( gradients , gradient_multipliers ) <nl> <nl> + if " gradient_norm " in summaries : <nl> + logging_ops . scalar_summary ( " global_norm / gradient_norm " , <nl> + clip_ops . global_norm ( zip ( * gradients ) [ 0 ] ) ) <nl> + <nl> # Optionally clip gradients by global norm . <nl> - if clip_gradients is not None : <nl> + if isinstance ( clip_gradients , float ) : <nl> gradients = _clip_gradients_by_norm ( gradients , clip_gradients ) <nl> + elif callable ( clip_gradients ) : <nl> + gradients = clip_gradients ( gradients ) <nl> + elif clip_gradients is not None : <nl> + raise ValueError ( <nl> + " Unknown type % s for clip_gradients " % type ( clip_gradients ) ) <nl> <nl> # Add scalar summary for loss . <nl> if " loss " in summaries : <nl> class should be sub - class of ` tf . Optimizer ` that implements <nl> grad_values ) <nl> if " gradient_norm " in summaries : <nl> logging_ops . scalar_summary ( " gradient_norm / " + variable . name , <nl> - clip_ops . global_norm ( [ grad_values ] ) ) <nl> + clip_ops . global_norm ( [ grad_values ] ) ) <nl> + <nl> + if clip_gradients is not None and " gradient_norm " in summaries : <nl> + logging_ops . scalar_summary ( " global_norm / clipped_gradient_norm " , <nl> + clip_ops . global_norm ( zip ( * gradients ) [ 0 ] ) ) <nl> <nl> # Create gradient updates . <nl> grad_updates = opt . apply_gradients ( gradients , <nl> def _clip_gradients_by_norm ( grads_and_vars , clip_gradients ) : <nl> return list ( zip ( clipped_gradients , variables ) ) <nl> <nl> <nl> + def _adaptive_max_norm ( norm , std_factor , decay , global_step , epsilon , name ) : <nl> + " " " Find max_norm given norm and previous average . " " " <nl> + with vs . variable_scope ( name , " AdaptiveMaxNorm " , [ norm ] ) : <nl> + log_norm = math_ops . log ( norm + epsilon ) <nl> + <nl> + def moving_average ( name , value , decay ) : <nl> + moving_average_variable = vs . get_variable ( <nl> + name , shape = value . get_shape ( ) , dtype = value . dtype , <nl> + initializer = init_ops . zeros_initializer , trainable = False ) <nl> + return moving_averages . assign_moving_average ( <nl> + moving_average_variable , value , decay ) <nl> + <nl> + # quicker adaptation at the beginning <nl> + if global_step is not None : <nl> + n = math_ops . to_float ( global_step ) <nl> + decay = math_ops . minimum ( decay , n / ( n + 1 . ) ) <nl> + <nl> + # update averages <nl> + mean = moving_average ( " mean " , log_norm , decay ) <nl> + sq_mean = moving_average ( " sq_mean " , math_ops . square ( log_norm ) , decay ) <nl> + <nl> + variance = sq_mean - math_ops . square ( mean ) <nl> + std = math_ops . sqrt ( math_ops . maximum ( epsilon , variance ) ) <nl> + max_norms = math_ops . exp ( mean + std_factor * std ) <nl> + return max_norms , mean <nl> + <nl> + <nl> + def adaptive_clipping_fn ( std_factor = 2 . , <nl> + decay = 0 . 95 , <nl> + static_max_norm = None , <nl> + global_step = None , <nl> + report_summary = False , <nl> + epsilon = 1e - 8 , <nl> + name = None ) : <nl> + " " " Adapt the clipping value using statistics on the norms . <nl> + <nl> + Implement adaptive gradient as presented in section 3 . 2 . 1 of <nl> + https : / / arxiv . org / abs / 1412 . 1602 . <nl> + <nl> + Keeps a moving average of the mean and std of the log ( norm ) of the gradient . <nl> + if the norm exceeds ` exp ( mean + std_factor * std ) ` , all gradients are rescaled <nl> + such that the global norm becomes ` exp ( mean ) ` . <nl> + <nl> + Args : <nl> + std_factor : Python scaler ( or tensor ) . <nl> + ` max_norm = exp ( mean + std_factor * std ) ` <nl> + decay : The smoothing factor of the moving averages . <nl> + static_max_norm : If provided , will threshold the norm to this value as an <nl> + extra safety . <nl> + global_step : Optional global_step . If provided , ` decay = decay * n / ( n + 1 ) ` . <nl> + This provides a quicker adaptation of the mean for the first steps . <nl> + report_summary : If ` True ` , will add histogram summaries of the ` max_norm ` . <nl> + epsilon : Small value chosen to avoid zero variance . <nl> + name : The name for this operation is used to scope operations and summaries . <nl> + <nl> + Returns : <nl> + A function for applying gradient clipping . <nl> + " " " <nl> + def gradient_clipping ( grads_and_vars ) : <nl> + " " " Internal function for adaptive clipping . " " " <nl> + grads , variables = zip ( * grads_and_vars ) <nl> + <nl> + norm = clip_ops . global_norm ( grads ) <nl> + <nl> + max_norm , log_mean = _adaptive_max_norm ( <nl> + norm , std_factor , decay , global_step , epsilon , name ) <nl> + <nl> + # reports the max gradient norm for debugging <nl> + if report_summary : <nl> + logging_ops . scalar_summary ( <nl> + " global_norm / adaptive_max_gradient_norm " , max_norm ) <nl> + <nl> + # factor will be 1 . if norm is smaller than max_norm <nl> + factor = math_ops . select ( norm < max_norm , <nl> + array_ops . ones_like ( norm ) , <nl> + math_ops . exp ( log_mean ) / norm ) <nl> + <nl> + if static_max_norm is not None : <nl> + factor = math_ops . minimum ( static_max_norm / norm , factor ) <nl> + <nl> + # apply factor <nl> + clipped_grads = [ ] <nl> + for grad in grads : <nl> + if grad is None : <nl> + clipped_grads . append ( None ) <nl> + elif isinstance ( grad , ops . IndexedSlices ) : <nl> + clipped_grads . append ( ops . IndexedSlices ( <nl> + grad . values * factor , grad . indices , grad . dense_shape ) ) <nl> + else : <nl> + clipped_grads . append ( grad * factor ) <nl> + <nl> + return list ( zip ( clipped_grads , variables ) ) <nl> + return gradient_clipping <nl> + <nl> + <nl> def _add_scaled_noise_to_gradients ( grads_and_vars , gradient_noise_scale ) : <nl> " " " Adds scaled noise from a 0 - mean normal distribution to gradients . " " " <nl> gradients , variables = zip ( * grads_and_vars ) <nl> mmm a / tensorflow / contrib / layers / python / layers / optimizers_test . py <nl> ppp b / tensorflow / contrib / layers / python / layers / optimizers_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import numpy as np <nl> import tensorflow as tf <nl> <nl> <nl> def testGradientClip ( self ) : <nl> self . assertAlmostEqual ( var_value , 9 . 98999 , 4 ) <nl> self . assertEqual ( global_step_value , 1 ) <nl> <nl> + def testAdaptiveGradientClip ( self ) : <nl> + with self . test_session ( ) as session : <nl> + x , var , loss , global_step = _setup_model ( ) <nl> + clip_gradients = tf . contrib . layers . adaptive_clipping_fn ( ) <nl> + train = tf . contrib . layers . optimize_loss ( loss , <nl> + global_step , <nl> + learning_rate = 0 . 1 , <nl> + optimizer = " SGD " , <nl> + clip_gradients = clip_gradients ) <nl> + tf . initialize_all_variables ( ) . run ( ) <nl> + session . run ( train , feed_dict = { x : 5 } ) <nl> + var_value , global_step_value = session . run ( [ var , global_step ] ) <nl> + self . assertAlmostEqual ( var_value , 9 . 8916 , 4 ) <nl> + self . assertEqual ( global_step_value , 1 ) <nl> + var_count = 0 <nl> + for var in tf . all_variables ( ) : <nl> + if var . name . startswith ( " OptimizeLoss / AdaptiveMaxNorm " ) : <nl> + var_count + = 1 <nl> + self . assertEqual ( 2 , var_count ) <nl> + <nl> def testGradientMultiply ( self ) : <nl> with self . test_session ( ) as session : <nl> x , var , loss , global_step = _setup_model ( ) <nl> def testUpdateOpFromCollection ( self ) : <nl> self . assertEqual ( update_var_value , 20 ) <nl> self . assertEqual ( global_step_value , 1 ) <nl> <nl> + <nl> + class AdaptiveClipping ( tf . test . TestCase ) : <nl> + <nl> + def testAverages ( self ) : <nl> + with self . test_session ( ) as session : <nl> + scale = 2 . <nl> + grad = tf . ones ( [ 3 , 4 ] ) * scale <nl> + log_norm = np . log ( np . sqrt ( scale * * 2 * grad . get_shape ( ) . num_elements ( ) ) ) <nl> + grads_and_vars = [ ( grad , grad ) ] <nl> + grads_and_vars = tf . contrib . layers . adaptive_clipping_fn ( <nl> + decay = 0 . 5 ) ( grads_and_vars ) <nl> + <nl> + var_dict = { } <nl> + for var in tf . all_variables ( ) : <nl> + if var . name . startswith ( " AdaptiveMaxNorm " ) : <nl> + var_dict [ var . name . split ( " : " ) [ 0 ] ] = var <nl> + self . assertEqual ( 2 , len ( var_dict ) ) <nl> + moving_mean = var_dict [ " AdaptiveMaxNorm / mean " ] <nl> + moving_sq_mean = var_dict [ " AdaptiveMaxNorm / sq_mean " ] <nl> + tf . initialize_all_variables ( ) . run ( ) <nl> + mean , sq_mean = session . run ( [ moving_mean , moving_sq_mean ] ) <nl> + self . assertEqual ( [ 0 ] , mean ) <nl> + self . assertEqual ( [ 0 ] , sq_mean ) <nl> + for i in range ( 20 ) : <nl> + mean , sq_mean , _ = session . run ( <nl> + [ moving_mean , moving_sq_mean , grads_and_vars [ 0 ] [ 0 ] ] ) <nl> + if i = = 0 : <nl> + self . assertLess ( mean , 0 . 9 * log_norm ) <nl> + self . assertLess ( sq_mean , 0 . 9 * log_norm * * 2 ) <nl> + <nl> + self . assertAlmostEqual ( float ( mean ) , log_norm , places = 4 ) <nl> + self . assertAlmostEqual ( float ( sq_mean ) , log_norm * * 2 , places = 4 ) <nl> + <nl> + def testClip ( self ) : <nl> + with self . test_session ( ) as session : <nl> + spike = 1000 . <nl> + multiplier = tf . placeholder ( tf . float32 , [ ] , " multiplier " ) <nl> + step = tf . placeholder ( tf . int32 , [ ] , " step " ) <nl> + <nl> + grad = tf . ones ( [ 3 , 4 ] ) * multiplier <nl> + grads_and_vars = [ ( grad , grad ) ] <nl> + grads_and_vars = tf . contrib . layers . adaptive_clipping_fn ( <nl> + decay = 0 . 9 , global_step = step ) ( grads_and_vars ) <nl> + <nl> + tf . initialize_all_variables ( ) . run ( ) <nl> + def run ( scale , i ) : <nl> + return session . run ( grads_and_vars [ 0 ] [ 0 ] , <nl> + feed_dict = { multiplier : scale , step : i } ) <nl> + <nl> + for i in range ( 20 ) : <nl> + scale = [ 1 . , - 2 . ] [ i % 2 ] <nl> + clipped_grad = run ( scale , i ) <nl> + if i > 3 : <nl> + self . assertAllClose ( np . ones ( clipped_grad . shape ) * scale , clipped_grad ) <nl> + <nl> + # assert that the spike will have low influence . <nl> + clipped_grad = run ( spike , 20 ) <nl> + self . assertTrue ( ( clipped_grad < 25 . ) . all ( ) ) <nl> + <nl> + # assert that a repeated spike will converge to this new value . <nl> + for i in range ( 10 ) : <nl> + clipped_grad = run ( spike , i + 21 ) <nl> + <nl> + self . assertAllClose ( np . ones ( clipped_grad . shape ) * spike , clipped_grad ) <nl> + <nl> if __name__ = = " __main__ " : <nl> tf . test . main ( ) <nl>
|
Implements adaptive gradient clipping .
|
tensorflow/tensorflow
|
1d5819366e96aa750bfbe6885a93e43daf7835a1
|
2016-10-27T17:36:28Z
|
mmm a / lib / SILOptimizer / PassManager / PassPipeline . cpp <nl> ppp b / lib / SILOptimizer / PassManager / PassPipeline . cpp <nl> static void addLateLoopOptPassPipeline ( SILPassPipelinePlan & P ) { <nl> P . addAccessEnforcementReleaseSinking ( ) ; <nl> P . addAccessEnforcementOpts ( ) ; <nl> <nl> + / / Sometimes stack promotion can catch cases only at this late stage of the <nl> + / / pipeline , after FunctionSignatureOpts . <nl> + P . addStackPromotion ( ) ; <nl> + <nl> / / Optimize overflow checks . <nl> P . addRedundantOverflowCheckRemoval ( ) ; <nl> P . addMergeCondFails ( ) ; <nl> mmm a / test / SILOptimizer / devirt_speculative_init . swift <nl> ppp b / test / SILOptimizer / devirt_speculative_init . swift <nl> public func make ( type : Cat . Type , cats : Int ) { <nl> / / CHECK : bb1 : <nl> / / CHECK : return <nl> / / CHECK : bb2 ( { { % . * } } : $ @ thick Cat . Type ) : <nl> - / / CHECK : alloc_ref $ Cat <nl> + / / CHECK : alloc_ref [ stack ] $ Cat <nl> / / CHECK : br bb1 <nl> / / CHECK : bb3 : <nl> - / / CHECK : alloc_ref $ BigCat <nl> + / / CHECK : alloc_ref [ stack ] $ BigCat <nl> / / CHECK : br bb1 <nl> new file mode 100644 <nl> index 000000000000 . . 011e6c8962db <nl> mmm / dev / null <nl> ppp b / test / SILOptimizer / stack_promotion_2_modules . swift <nl> <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - swift - frontend - parse - as - library - emit - module - emit - module - path = % t / Module . swiftmodule - module - name = Module - DMODULE % s - O - emit - sil - o % t / module . sil <nl> + / / RUN : % target - swift - frontend - module - name = main - DMAIN % s - I % t - O - emit - sil | % FileCheck % s <nl> + <nl> + / / REQUIRES : swift_stdlib_no_asserts , optimized_stdlib <nl> + <nl> + # if MODULE <nl> + <nl> + public struct Foo : Equatable { <nl> + @ usableFromInline var optionA : Bool <nl> + @ usableFromInline var optionB : Optional < Int > <nl> + <nl> + public typealias ArrayLiteralElement = FooElement <nl> + <nl> + public struct FooElement : Equatable { <nl> + @ usableFromInline enum Backing : Equatable { <nl> + case a <nl> + case b ( Int ) <nl> + } <nl> + <nl> + @ usableFromInline var backing : Backing <nl> + <nl> + @ inlinable internal init ( backing : Backing ) { <nl> + self . backing = backing <nl> + } <nl> + <nl> + public static let optionA = FooElement ( backing : . a ) <nl> + <nl> + @ inlinable <nl> + public static func getOptionA ( ) - > FooElement { <nl> + return FooElement ( backing : . a ) <nl> + } <nl> + @ inlinable <nl> + public static func optionB ( _ x : Int ) - > FooElement { <nl> + return FooElement ( backing : . b ( x ) ) <nl> + } <nl> + } <nl> + } <nl> + <nl> + extension Foo : ExpressibleByArrayLiteral { <nl> + @ inlinable <nl> + @ inline ( never ) <nl> + public init ( arrayLiteral things : FooElement . . . ) { <nl> + self . optionA = false <nl> + self . optionB = nil <nl> + for thing in things { <nl> + switch thing . backing { <nl> + case . a : <nl> + self . optionA = true <nl> + case . b ( let x ) : <nl> + self . optionB = x <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + # endif <nl> + <nl> + <nl> + # if MAIN <nl> + <nl> + import Module <nl> + <nl> + / / Check if the array literal can be stack promoted . <nl> + <nl> + / / CHECK - LABEL : sil @ $ s4main6testit6Module3FooVyF <nl> + / / CHECK : alloc_ref [ stack ] [ tail_elems $ Foo . FooElement <nl> + / / CHECK : } / / end sil function ' $ s4main6testit6Module3FooVyF ' <nl> + <nl> + public func testit ( ) - > Foo { <nl> + return [ . optionA , . optionB ( 0xbeef ) , . optionA ] <nl> + } <nl> + <nl> + <nl> + # endif <nl> + <nl> + <nl>
|
SIL optimizer : add an additional stack promotion pass to the late pipeline
|
apple/swift
|
6569c98332d7a060adcb3d2f8805eae10c1bd8fc
|
2020-05-28T08:23:40Z
|
mmm a / fdbrpc / BlobStore . actor . cpp <nl> ppp b / fdbrpc / BlobStore . actor . cpp <nl> Reference < BlobStoreEndpoint > BlobStoreEndpoint : : fromString ( std : : string const & ur <nl> } catch ( std : : string & err ) { <nl> if ( error ! = nullptr ) <nl> * error = err ; <nl> - TraceEvent ( SevWarnAlways , " BlobStoreEndpoint " ) . detail ( " Description " , err ) . detail ( " Format " , getURLFormat ( ) ) . detail ( " URL " , url ) ; <nl> + TraceEvent ( SevWarnAlways , " BlobStoreEndpointBadURL " ) . detail ( " Description " , err ) . detail ( " Format " , getURLFormat ( ) ) . detail ( " URL " , url ) ; <nl> throw file_not_found ( ) ; <nl> } <nl> } <nl> ACTOR Future < Reference < HTTP : : Response > > doRequest_impl ( Reference < BlobStoreEndpoi <nl> / / But only if our previous attempt was not the last allowable try . <nl> retryable = retryable & & ( thisTry < maxTries ) ; <nl> <nl> - TraceEvent event ( retryable ? SevWarn : SevWarnAlways , retryable ? " BlobStoreEndpointRequestFailedRetryable " : " BlobStoreEndpointRequestFailed " ) ; <nl> + TraceEvent event ( SevWarn , retryable ? " BlobStoreEndpointRequestFailedRetryable " : " BlobStoreEndpointRequestFailed " ) ; <nl> <nl> event . detail ( " RemoteEndpoint " , address ) <nl> . detail ( " Verb " , verb ) <nl>
|
Renamed an error , changed blob request failure to Warn severity .
|
apple/foundationdb
|
41f80bf7edea561753965311568cb3f04613d602
|
2017-12-06T23:58:54Z
|
mmm a / dbms / include / DB / Dictionaries / DictionaryStructure . h <nl> ppp b / dbms / include / DB / Dictionaries / DictionaryStructure . h <nl> struct DictionaryStructure <nl> <nl> const auto null_value_string = config . getString ( prefix + " null_value " ) ; <nl> Field null_value ; <nl> - ReadBufferFromString null_value_buffer { null_value_string } ; <nl> - type - > deserializeText ( null_value , null_value_buffer ) ; <nl> + try <nl> + { <nl> + ReadBufferFromString null_value_buffer { null_value_string } ; <nl> + type - > deserializeText ( null_value , null_value_buffer ) ; <nl> + } <nl> + catch ( const std : : exception & e ) <nl> + { <nl> + throw Exception { <nl> + std : : string { " Error parsing null_value : " } + e . what ( ) , <nl> + ErrorCodes : : BAD_ARGUMENTS <nl> + } ; <nl> + } <nl> <nl> const auto hierarchical = config . getBool ( prefix + " hierarchical " , false ) ; <nl> const auto injective = config . getBool ( prefix + " injective " , false ) ; <nl>
|
dbms : dictionaries : more meaningful exception message on null_value parsing error . [ # METR - 13298 ]
|
ClickHouse/ClickHouse
|
006360b75ac6e934b4ffe731036541db79d5d785
|
2015-03-23T14:30:43Z
|
mmm a / modules / dnn / src / layers / detection_output_layer . cpp <nl> ppp b / modules / dnn / src / layers / detection_output_layer . cpp <nl> class DetectionOutputLayerImpl : public DetectionOutputLayer <nl> <nl> if ( numKept = = 0 ) <nl> { <nl> - CV_ErrorNoReturn ( Error : : StsError , " Couldn ' t find any detections " ) ; <nl> + return ; <nl> } <nl> int outputShape [ ] = { 1 , 1 , ( int ) numKept , 7 } ; <nl> outputs [ 0 ] . create ( 4 , outputShape , CV_32F ) ; <nl> new file mode 100644 <nl> index 00000000000 . . def19412b66 <nl> mmm / dev / null <nl> ppp b / samples / data / dnn / MobileNetSSD_300x300 . prototxt <nl> <nl> + name : " MobileNet - SSD " <nl> + input : " data " <nl> + input_shape { <nl> + dim : 1 <nl> + dim : 3 <nl> + dim : 300 <nl> + dim : 300 <nl> + } <nl> + layer { <nl> + name : " conv0 " <nl> + type : " Convolution " <nl> + bottom : " data " <nl> + top : " conv0 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 32 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv0 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv0 " <nl> + top : " conv0 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv0 / scale " <nl> + type : " Scale " <nl> + bottom : " conv0 " <nl> + top : " conv0 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv0 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv0 " <nl> + top : " conv0 " <nl> + } <nl> + layer { <nl> + name : " conv1 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv0 " <nl> + top : " conv1 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 32 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 32 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv1 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv1 / dw " <nl> + top : " conv1 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv1 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv1 / dw " <nl> + top : " conv1 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv1 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv1 / dw " <nl> + top : " conv1 / dw " <nl> + } <nl> + layer { <nl> + name : " conv1 " <nl> + type : " Convolution " <nl> + bottom : " conv1 / dw " <nl> + top : " conv1 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 64 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv1 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv1 " <nl> + top : " conv1 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv1 / scale " <nl> + type : " Scale " <nl> + bottom : " conv1 " <nl> + top : " conv1 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv1 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv1 " <nl> + top : " conv1 " <nl> + } <nl> + layer { <nl> + name : " conv2 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv1 " <nl> + top : " conv2 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 64 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + group : 64 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv2 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv2 / dw " <nl> + top : " conv2 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv2 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv2 / dw " <nl> + top : " conv2 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv2 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv2 / dw " <nl> + top : " conv2 / dw " <nl> + } <nl> + layer { <nl> + name : " conv2 " <nl> + type : " Convolution " <nl> + bottom : " conv2 / dw " <nl> + top : " conv2 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv2 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv2 " <nl> + top : " conv2 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv2 / scale " <nl> + type : " Scale " <nl> + bottom : " conv2 " <nl> + top : " conv2 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv2 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv2 " <nl> + top : " conv2 " <nl> + } <nl> + layer { <nl> + name : " conv3 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv2 " <nl> + top : " conv3 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 128 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv3 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv3 / dw " <nl> + top : " conv3 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv3 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv3 / dw " <nl> + top : " conv3 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv3 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv3 / dw " <nl> + top : " conv3 / dw " <nl> + } <nl> + layer { <nl> + name : " conv3 " <nl> + type : " Convolution " <nl> + bottom : " conv3 / dw " <nl> + top : " conv3 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv3 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv3 " <nl> + top : " conv3 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv3 / scale " <nl> + type : " Scale " <nl> + bottom : " conv3 " <nl> + top : " conv3 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv3 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv3 " <nl> + top : " conv3 " <nl> + } <nl> + layer { <nl> + name : " conv4 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv3 " <nl> + top : " conv4 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + group : 128 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv4 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv4 / dw " <nl> + top : " conv4 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv4 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv4 / dw " <nl> + top : " conv4 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv4 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv4 / dw " <nl> + top : " conv4 / dw " <nl> + } <nl> + layer { <nl> + name : " conv4 " <nl> + type : " Convolution " <nl> + bottom : " conv4 / dw " <nl> + top : " conv4 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv4 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv4 " <nl> + top : " conv4 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv4 / scale " <nl> + type : " Scale " <nl> + bottom : " conv4 " <nl> + top : " conv4 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv4 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv4 " <nl> + top : " conv4 " <nl> + } <nl> + layer { <nl> + name : " conv5 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv4 " <nl> + top : " conv5 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 256 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv5 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv5 / dw " <nl> + top : " conv5 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv5 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv5 / dw " <nl> + top : " conv5 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv5 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv5 / dw " <nl> + top : " conv5 / dw " <nl> + } <nl> + layer { <nl> + name : " conv5 " <nl> + type : " Convolution " <nl> + bottom : " conv5 / dw " <nl> + top : " conv5 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv5 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv5 " <nl> + top : " conv5 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv5 / scale " <nl> + type : " Scale " <nl> + bottom : " conv5 " <nl> + top : " conv5 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv5 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv5 " <nl> + top : " conv5 " <nl> + } <nl> + layer { <nl> + name : " conv6 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv5 " <nl> + top : " conv6 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + group : 256 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv6 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv6 / dw " <nl> + top : " conv6 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv6 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv6 / dw " <nl> + top : " conv6 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv6 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv6 / dw " <nl> + top : " conv6 / dw " <nl> + } <nl> + layer { <nl> + name : " conv6 " <nl> + type : " Convolution " <nl> + bottom : " conv6 / dw " <nl> + top : " conv6 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv6 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv6 " <nl> + top : " conv6 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv6 / scale " <nl> + type : " Scale " <nl> + bottom : " conv6 " <nl> + top : " conv6 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv6 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv6 " <nl> + top : " conv6 " <nl> + } <nl> + layer { <nl> + name : " conv7 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv6 " <nl> + top : " conv7 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 512 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv7 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv7 / dw " <nl> + top : " conv7 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv7 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv7 / dw " <nl> + top : " conv7 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv7 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv7 / dw " <nl> + top : " conv7 / dw " <nl> + } <nl> + layer { <nl> + name : " conv7 " <nl> + type : " Convolution " <nl> + bottom : " conv7 / dw " <nl> + top : " conv7 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv7 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv7 " <nl> + top : " conv7 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv7 / scale " <nl> + type : " Scale " <nl> + bottom : " conv7 " <nl> + top : " conv7 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv7 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv7 " <nl> + top : " conv7 " <nl> + } <nl> + layer { <nl> + name : " conv8 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv7 " <nl> + top : " conv8 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 512 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv8 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv8 / dw " <nl> + top : " conv8 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv8 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv8 / dw " <nl> + top : " conv8 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv8 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv8 / dw " <nl> + top : " conv8 / dw " <nl> + } <nl> + layer { <nl> + name : " conv8 " <nl> + type : " Convolution " <nl> + bottom : " conv8 / dw " <nl> + top : " conv8 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv8 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv8 " <nl> + top : " conv8 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv8 / scale " <nl> + type : " Scale " <nl> + bottom : " conv8 " <nl> + top : " conv8 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv8 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv8 " <nl> + top : " conv8 " <nl> + } <nl> + layer { <nl> + name : " conv9 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv8 " <nl> + top : " conv9 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 512 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv9 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv9 / dw " <nl> + top : " conv9 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv9 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv9 / dw " <nl> + top : " conv9 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv9 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv9 / dw " <nl> + top : " conv9 / dw " <nl> + } <nl> + layer { <nl> + name : " conv9 " <nl> + type : " Convolution " <nl> + bottom : " conv9 / dw " <nl> + top : " conv9 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv9 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv9 " <nl> + top : " conv9 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv9 / scale " <nl> + type : " Scale " <nl> + bottom : " conv9 " <nl> + top : " conv9 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv9 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv9 " <nl> + top : " conv9 " <nl> + } <nl> + layer { <nl> + name : " conv10 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv9 " <nl> + top : " conv10 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 512 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv10 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv10 / dw " <nl> + top : " conv10 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv10 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv10 / dw " <nl> + top : " conv10 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv10 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv10 / dw " <nl> + top : " conv10 / dw " <nl> + } <nl> + layer { <nl> + name : " conv10 " <nl> + type : " Convolution " <nl> + bottom : " conv10 / dw " <nl> + top : " conv10 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv10 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv10 " <nl> + top : " conv10 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv10 / scale " <nl> + type : " Scale " <nl> + bottom : " conv10 " <nl> + top : " conv10 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv10 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv10 " <nl> + top : " conv10 " <nl> + } <nl> + layer { <nl> + name : " conv11 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv10 " <nl> + top : " conv11 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 512 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv11 / dw " <nl> + top : " conv11 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv11 / dw " <nl> + top : " conv11 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv11 / dw " <nl> + top : " conv11 / dw " <nl> + } <nl> + layer { <nl> + name : " conv11 " <nl> + type : " Convolution " <nl> + bottom : " conv11 / dw " <nl> + top : " conv11 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv11 " <nl> + top : " conv11 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11 / scale " <nl> + type : " Scale " <nl> + bottom : " conv11 " <nl> + top : " conv11 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv11 " <nl> + top : " conv11 " <nl> + } <nl> + layer { <nl> + name : " conv12 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv11 " <nl> + top : " conv12 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + group : 512 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv12 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv12 / dw " <nl> + top : " conv12 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv12 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv12 / dw " <nl> + top : " conv12 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv12 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv12 / dw " <nl> + top : " conv12 / dw " <nl> + } <nl> + layer { <nl> + name : " conv12 " <nl> + type : " Convolution " <nl> + bottom : " conv12 / dw " <nl> + top : " conv12 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 1024 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv12 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv12 " <nl> + top : " conv12 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv12 / scale " <nl> + type : " Scale " <nl> + bottom : " conv12 " <nl> + top : " conv12 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv12 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv12 " <nl> + top : " conv12 " <nl> + } <nl> + layer { <nl> + name : " conv13 / dw " <nl> + type : " Convolution " <nl> + bottom : " conv12 " <nl> + top : " conv13 / dw " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 1024 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + group : 1024 <nl> + engine : CAFFE <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13 / dw / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv13 / dw " <nl> + top : " conv13 / dw " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13 / dw / scale " <nl> + type : " Scale " <nl> + bottom : " conv13 / dw " <nl> + top : " conv13 / dw " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13 / dw / relu " <nl> + type : " ReLU " <nl> + bottom : " conv13 / dw " <nl> + top : " conv13 / dw " <nl> + } <nl> + layer { <nl> + name : " conv13 " <nl> + type : " Convolution " <nl> + bottom : " conv13 / dw " <nl> + top : " conv13 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 1024 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv13 " <nl> + top : " conv13 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13 / scale " <nl> + type : " Scale " <nl> + bottom : " conv13 " <nl> + top : " conv13 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv13 " <nl> + top : " conv13 " <nl> + } <nl> + layer { <nl> + name : " conv14_1 " <nl> + type : " Convolution " <nl> + bottom : " conv13 " <nl> + top : " conv14_1 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_1 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv14_1 " <nl> + top : " conv14_1 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_1 / scale " <nl> + type : " Scale " <nl> + bottom : " conv14_1 " <nl> + top : " conv14_1 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_1 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv14_1 " <nl> + top : " conv14_1 " <nl> + } <nl> + layer { <nl> + name : " conv14_2 " <nl> + type : " Convolution " <nl> + bottom : " conv14_1 " <nl> + top : " conv14_2 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 512 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv14_2 " <nl> + top : " conv14_2 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2 / scale " <nl> + type : " Scale " <nl> + bottom : " conv14_2 " <nl> + top : " conv14_2 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv14_2 " <nl> + top : " conv14_2 " <nl> + } <nl> + layer { <nl> + name : " conv15_1 " <nl> + type : " Convolution " <nl> + bottom : " conv14_2 " <nl> + top : " conv15_1 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_1 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv15_1 " <nl> + top : " conv15_1 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_1 / scale " <nl> + type : " Scale " <nl> + bottom : " conv15_1 " <nl> + top : " conv15_1 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_1 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv15_1 " <nl> + top : " conv15_1 " <nl> + } <nl> + layer { <nl> + name : " conv15_2 " <nl> + type : " Convolution " <nl> + bottom : " conv15_1 " <nl> + top : " conv15_2 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv15_2 " <nl> + top : " conv15_2 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2 / scale " <nl> + type : " Scale " <nl> + bottom : " conv15_2 " <nl> + top : " conv15_2 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv15_2 " <nl> + top : " conv15_2 " <nl> + } <nl> + layer { <nl> + name : " conv16_1 " <nl> + type : " Convolution " <nl> + bottom : " conv15_2 " <nl> + top : " conv16_1 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_1 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv16_1 " <nl> + top : " conv16_1 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_1 / scale " <nl> + type : " Scale " <nl> + bottom : " conv16_1 " <nl> + top : " conv16_1 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_1 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv16_1 " <nl> + top : " conv16_1 " <nl> + } <nl> + layer { <nl> + name : " conv16_2 " <nl> + type : " Convolution " <nl> + bottom : " conv16_1 " <nl> + top : " conv16_2 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 256 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv16_2 " <nl> + top : " conv16_2 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2 / scale " <nl> + type : " Scale " <nl> + bottom : " conv16_2 " <nl> + top : " conv16_2 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv16_2 " <nl> + top : " conv16_2 " <nl> + } <nl> + layer { <nl> + name : " conv17_1 " <nl> + type : " Convolution " <nl> + bottom : " conv16_2 " <nl> + top : " conv17_1 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 64 <nl> + bias_term : false <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_1 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv17_1 " <nl> + top : " conv17_1 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_1 / scale " <nl> + type : " Scale " <nl> + bottom : " conv17_1 " <nl> + top : " conv17_1 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_1 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv17_1 " <nl> + top : " conv17_1 " <nl> + } <nl> + layer { <nl> + name : " conv17_2 " <nl> + type : " Convolution " <nl> + bottom : " conv17_1 " <nl> + top : " conv17_2 " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 128 <nl> + bias_term : false <nl> + pad : 1 <nl> + kernel_size : 3 <nl> + stride : 2 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2 / bn " <nl> + type : " BatchNorm " <nl> + bottom : " conv17_2 " <nl> + top : " conv17_2 " <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 <nl> + decay_mult : 0 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2 / scale " <nl> + type : " Scale " <nl> + bottom : " conv17_2 " <nl> + top : " conv17_2 " <nl> + param { <nl> + lr_mult : 0 . 1 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 0 . 2 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + scale_param { <nl> + filler { <nl> + value : 1 <nl> + } <nl> + bias_term : true <nl> + bias_filler { <nl> + value : 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2 / relu " <nl> + type : " ReLU " <nl> + bottom : " conv17_2 " <nl> + top : " conv17_2 " <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_loc " <nl> + type : " Convolution " <nl> + bottom : " conv11 " <nl> + top : " conv11_mbox_loc " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 12 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_loc_perm " <nl> + type : " Permute " <nl> + bottom : " conv11_mbox_loc " <nl> + top : " conv11_mbox_loc_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_loc_flat " <nl> + type : " Flatten " <nl> + bottom : " conv11_mbox_loc_perm " <nl> + top : " conv11_mbox_loc_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_conf " <nl> + type : " Convolution " <nl> + bottom : " conv11 " <nl> + top : " conv11_mbox_conf " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 63 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_conf_perm " <nl> + type : " Permute " <nl> + bottom : " conv11_mbox_conf " <nl> + top : " conv11_mbox_conf_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_conf_flat " <nl> + type : " Flatten " <nl> + bottom : " conv11_mbox_conf_perm " <nl> + top : " conv11_mbox_conf_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv11_mbox_priorbox " <nl> + type : " PriorBox " <nl> + bottom : " conv11 " <nl> + bottom : " data " <nl> + top : " conv11_mbox_priorbox " <nl> + prior_box_param { <nl> + min_size : 60 . 0 <nl> + aspect_ratio : 2 . 0 <nl> + flip : true <nl> + clip : false <nl> + variance : 0 . 1 <nl> + variance : 0 . 1 <nl> + variance : 0 . 2 <nl> + variance : 0 . 2 <nl> + offset : 0 . 5 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_loc " <nl> + type : " Convolution " <nl> + bottom : " conv13 " <nl> + top : " conv13_mbox_loc " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 24 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_loc_perm " <nl> + type : " Permute " <nl> + bottom : " conv13_mbox_loc " <nl> + top : " conv13_mbox_loc_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_loc_flat " <nl> + type : " Flatten " <nl> + bottom : " conv13_mbox_loc_perm " <nl> + top : " conv13_mbox_loc_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_conf " <nl> + type : " Convolution " <nl> + bottom : " conv13 " <nl> + top : " conv13_mbox_conf " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 126 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_conf_perm " <nl> + type : " Permute " <nl> + bottom : " conv13_mbox_conf " <nl> + top : " conv13_mbox_conf_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_conf_flat " <nl> + type : " Flatten " <nl> + bottom : " conv13_mbox_conf_perm " <nl> + top : " conv13_mbox_conf_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv13_mbox_priorbox " <nl> + type : " PriorBox " <nl> + bottom : " conv13 " <nl> + bottom : " data " <nl> + top : " conv13_mbox_priorbox " <nl> + prior_box_param { <nl> + min_size : 105 . 0 <nl> + max_size : 150 . 0 <nl> + aspect_ratio : 2 . 0 <nl> + aspect_ratio : 3 . 0 <nl> + flip : true <nl> + clip : false <nl> + variance : 0 . 1 <nl> + variance : 0 . 1 <nl> + variance : 0 . 2 <nl> + variance : 0 . 2 <nl> + offset : 0 . 5 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_loc " <nl> + type : " Convolution " <nl> + bottom : " conv14_2 " <nl> + top : " conv14_2_mbox_loc " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 24 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_loc_perm " <nl> + type : " Permute " <nl> + bottom : " conv14_2_mbox_loc " <nl> + top : " conv14_2_mbox_loc_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_loc_flat " <nl> + type : " Flatten " <nl> + bottom : " conv14_2_mbox_loc_perm " <nl> + top : " conv14_2_mbox_loc_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_conf " <nl> + type : " Convolution " <nl> + bottom : " conv14_2 " <nl> + top : " conv14_2_mbox_conf " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 126 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_conf_perm " <nl> + type : " Permute " <nl> + bottom : " conv14_2_mbox_conf " <nl> + top : " conv14_2_mbox_conf_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_conf_flat " <nl> + type : " Flatten " <nl> + bottom : " conv14_2_mbox_conf_perm " <nl> + top : " conv14_2_mbox_conf_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv14_2_mbox_priorbox " <nl> + type : " PriorBox " <nl> + bottom : " conv14_2 " <nl> + bottom : " data " <nl> + top : " conv14_2_mbox_priorbox " <nl> + prior_box_param { <nl> + min_size : 150 . 0 <nl> + max_size : 195 . 0 <nl> + aspect_ratio : 2 . 0 <nl> + aspect_ratio : 3 . 0 <nl> + flip : true <nl> + clip : false <nl> + variance : 0 . 1 <nl> + variance : 0 . 1 <nl> + variance : 0 . 2 <nl> + variance : 0 . 2 <nl> + offset : 0 . 5 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_loc " <nl> + type : " Convolution " <nl> + bottom : " conv15_2 " <nl> + top : " conv15_2_mbox_loc " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 24 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_loc_perm " <nl> + type : " Permute " <nl> + bottom : " conv15_2_mbox_loc " <nl> + top : " conv15_2_mbox_loc_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_loc_flat " <nl> + type : " Flatten " <nl> + bottom : " conv15_2_mbox_loc_perm " <nl> + top : " conv15_2_mbox_loc_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_conf " <nl> + type : " Convolution " <nl> + bottom : " conv15_2 " <nl> + top : " conv15_2_mbox_conf " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 126 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_conf_perm " <nl> + type : " Permute " <nl> + bottom : " conv15_2_mbox_conf " <nl> + top : " conv15_2_mbox_conf_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_conf_flat " <nl> + type : " Flatten " <nl> + bottom : " conv15_2_mbox_conf_perm " <nl> + top : " conv15_2_mbox_conf_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv15_2_mbox_priorbox " <nl> + type : " PriorBox " <nl> + bottom : " conv15_2 " <nl> + bottom : " data " <nl> + top : " conv15_2_mbox_priorbox " <nl> + prior_box_param { <nl> + min_size : 195 . 0 <nl> + max_size : 240 . 0 <nl> + aspect_ratio : 2 . 0 <nl> + aspect_ratio : 3 . 0 <nl> + flip : true <nl> + clip : false <nl> + variance : 0 . 1 <nl> + variance : 0 . 1 <nl> + variance : 0 . 2 <nl> + variance : 0 . 2 <nl> + offset : 0 . 5 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_loc " <nl> + type : " Convolution " <nl> + bottom : " conv16_2 " <nl> + top : " conv16_2_mbox_loc " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 24 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_loc_perm " <nl> + type : " Permute " <nl> + bottom : " conv16_2_mbox_loc " <nl> + top : " conv16_2_mbox_loc_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_loc_flat " <nl> + type : " Flatten " <nl> + bottom : " conv16_2_mbox_loc_perm " <nl> + top : " conv16_2_mbox_loc_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_conf " <nl> + type : " Convolution " <nl> + bottom : " conv16_2 " <nl> + top : " conv16_2_mbox_conf " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 126 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_conf_perm " <nl> + type : " Permute " <nl> + bottom : " conv16_2_mbox_conf " <nl> + top : " conv16_2_mbox_conf_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_conf_flat " <nl> + type : " Flatten " <nl> + bottom : " conv16_2_mbox_conf_perm " <nl> + top : " conv16_2_mbox_conf_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv16_2_mbox_priorbox " <nl> + type : " PriorBox " <nl> + bottom : " conv16_2 " <nl> + bottom : " data " <nl> + top : " conv16_2_mbox_priorbox " <nl> + prior_box_param { <nl> + min_size : 240 . 0 <nl> + max_size : 285 . 0 <nl> + aspect_ratio : 2 . 0 <nl> + aspect_ratio : 3 . 0 <nl> + flip : true <nl> + clip : false <nl> + variance : 0 . 1 <nl> + variance : 0 . 1 <nl> + variance : 0 . 2 <nl> + variance : 0 . 2 <nl> + offset : 0 . 5 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_loc " <nl> + type : " Convolution " <nl> + bottom : " conv17_2 " <nl> + top : " conv17_2_mbox_loc " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 24 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_loc_perm " <nl> + type : " Permute " <nl> + bottom : " conv17_2_mbox_loc " <nl> + top : " conv17_2_mbox_loc_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_loc_flat " <nl> + type : " Flatten " <nl> + bottom : " conv17_2_mbox_loc_perm " <nl> + top : " conv17_2_mbox_loc_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_conf " <nl> + type : " Convolution " <nl> + bottom : " conv17_2 " <nl> + top : " conv17_2_mbox_conf " <nl> + param { <nl> + lr_mult : 1 . 0 <nl> + decay_mult : 1 . 0 <nl> + } <nl> + param { <nl> + lr_mult : 2 . 0 <nl> + decay_mult : 0 . 0 <nl> + } <nl> + convolution_param { <nl> + num_output : 126 <nl> + kernel_size : 1 <nl> + weight_filler { <nl> + type : " msra " <nl> + } <nl> + bias_filler { <nl> + type : " constant " <nl> + value : 0 . 0 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_conf_perm " <nl> + type : " Permute " <nl> + bottom : " conv17_2_mbox_conf " <nl> + top : " conv17_2_mbox_conf_perm " <nl> + permute_param { <nl> + order : 0 <nl> + order : 2 <nl> + order : 3 <nl> + order : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_conf_flat " <nl> + type : " Flatten " <nl> + bottom : " conv17_2_mbox_conf_perm " <nl> + top : " conv17_2_mbox_conf_flat " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " conv17_2_mbox_priorbox " <nl> + type : " PriorBox " <nl> + bottom : " conv17_2 " <nl> + bottom : " data " <nl> + top : " conv17_2_mbox_priorbox " <nl> + prior_box_param { <nl> + min_size : 285 . 0 <nl> + max_size : 300 . 0 <nl> + aspect_ratio : 2 . 0 <nl> + aspect_ratio : 3 . 0 <nl> + flip : true <nl> + clip : false <nl> + variance : 0 . 1 <nl> + variance : 0 . 1 <nl> + variance : 0 . 2 <nl> + variance : 0 . 2 <nl> + offset : 0 . 5 <nl> + } <nl> + } <nl> + layer { <nl> + name : " mbox_loc " <nl> + type : " Concat " <nl> + bottom : " conv11_mbox_loc_flat " <nl> + bottom : " conv13_mbox_loc_flat " <nl> + bottom : " conv14_2_mbox_loc_flat " <nl> + bottom : " conv15_2_mbox_loc_flat " <nl> + bottom : " conv16_2_mbox_loc_flat " <nl> + bottom : " conv17_2_mbox_loc_flat " <nl> + top : " mbox_loc " <nl> + concat_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " mbox_conf " <nl> + type : " Concat " <nl> + bottom : " conv11_mbox_conf_flat " <nl> + bottom : " conv13_mbox_conf_flat " <nl> + bottom : " conv14_2_mbox_conf_flat " <nl> + bottom : " conv15_2_mbox_conf_flat " <nl> + bottom : " conv16_2_mbox_conf_flat " <nl> + bottom : " conv17_2_mbox_conf_flat " <nl> + top : " mbox_conf " <nl> + concat_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " mbox_priorbox " <nl> + type : " Concat " <nl> + bottom : " conv11_mbox_priorbox " <nl> + bottom : " conv13_mbox_priorbox " <nl> + bottom : " conv14_2_mbox_priorbox " <nl> + bottom : " conv15_2_mbox_priorbox " <nl> + bottom : " conv16_2_mbox_priorbox " <nl> + bottom : " conv17_2_mbox_priorbox " <nl> + top : " mbox_priorbox " <nl> + concat_param { <nl> + axis : 2 <nl> + } <nl> + } <nl> + layer { <nl> + name : " mbox_conf_reshape " <nl> + type : " Reshape " <nl> + bottom : " mbox_conf " <nl> + top : " mbox_conf_reshape " <nl> + reshape_param { <nl> + shape { <nl> + dim : 0 <nl> + dim : - 1 <nl> + dim : 21 <nl> + } <nl> + } <nl> + } <nl> + layer { <nl> + name : " mbox_conf_softmax " <nl> + type : " Softmax " <nl> + bottom : " mbox_conf_reshape " <nl> + top : " mbox_conf_softmax " <nl> + softmax_param { <nl> + axis : 2 <nl> + } <nl> + } <nl> + layer { <nl> + name : " mbox_conf_flatten " <nl> + type : " Flatten " <nl> + bottom : " mbox_conf_softmax " <nl> + top : " mbox_conf_flatten " <nl> + flatten_param { <nl> + axis : 1 <nl> + } <nl> + } <nl> + layer { <nl> + name : " detection_out " <nl> + type : " DetectionOutput " <nl> + bottom : " mbox_loc " <nl> + bottom : " mbox_conf_flatten " <nl> + bottom : " mbox_priorbox " <nl> + top : " detection_out " <nl> + include { <nl> + phase : TEST <nl> + } <nl> + detection_output_param { <nl> + num_classes : 21 <nl> + share_location : true <nl> + background_label_id : 0 <nl> + nms_param { <nl> + nms_threshold : 0 . 45 <nl> + top_k : 100 <nl> + } <nl> + code_type : CENTER_SIZE <nl> + keep_top_k : 100 <nl> + confidence_threshold : 0 . 25 <nl> + } <nl> + } <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 44b5b6000a3 <nl> mmm / dev / null <nl> ppp b / samples / dnn / mobilenet_ssd_python . py <nl> <nl> + import numpy as np <nl> + import argparse <nl> + <nl> + try : <nl> + import cv2 as cv <nl> + except ImportError : <nl> + raise ImportError ( ' Can \ ' t find OpenCV Python module . If you \ ' ve built it from sources without installation , ' <nl> + ' configure environemnt variable PYTHONPATH to " opencv_build_dir / lib " directory ( with " python3 " subdirectory if required ) ' ) <nl> + <nl> + inWidth = 300 <nl> + inHeight = 300 <nl> + WHRatio = inWidth / float ( inHeight ) <nl> + inScaleFactor = 0 . 007843 <nl> + meanVal = 127 . 5 <nl> + <nl> + classNames = ( ' background ' , <nl> + ' aeroplane ' , ' bicycle ' , ' bird ' , ' boat ' , <nl> + ' bottle ' , ' bus ' , ' car ' , ' cat ' , ' chair ' , <nl> + ' cow ' , ' diningtable ' , ' dog ' , ' horse ' , <nl> + ' motorbike ' , ' person ' , ' pottedplant ' , <nl> + ' sheep ' , ' sofa ' , ' train ' , ' tvmonitor ' ) <nl> + <nl> + if __name__ = = " __main__ " : <nl> + parser = argparse . ArgumentParser ( ) <nl> + parser . add_argument ( " - - video " , help = " path to video file . If empty , camera ' s stream will be used " ) <nl> + parser . add_argument ( " - - prototxt " , default = " MobileNetSSD_300x300 . prototxt " , <nl> + help = " path to caffe prototxt " ) <nl> + parser . add_argument ( " - c " , " - - caffemodel " , help = " path to caffemodel file , download it here : " <nl> + " https : / / github . com / chuanqi305 / MobileNet - SSD / blob / master / MobileNetSSD_train . caffemodel " ) <nl> + parser . add_argument ( " - - thr " , default = 0 . 2 , help = " confidence threshold to filter out weak detections " ) <nl> + args = parser . parse_args ( ) <nl> + <nl> + net = dnn . readNetFromCaffe ( args . prototxt , args . caffemodel ) <nl> + <nl> + if len ( args . video ) : <nl> + cap = cv2 . VideoCapture ( args . video ) <nl> + else : <nl> + cap = cv2 . VideoCapture ( 0 ) <nl> + <nl> + while True : <nl> + # Capture frame - by - frame <nl> + ret , frame = cap . read ( ) <nl> + blob = dnn . blobFromImage ( frame , inScaleFactor , ( inWidth , inHeight ) , meanVal ) <nl> + net . setInput ( blob ) <nl> + detections = net . forward ( ) <nl> + <nl> + cols = frame . shape [ 1 ] <nl> + rows = frame . shape [ 0 ] <nl> + <nl> + if cols / float ( rows ) > WHRatio : <nl> + cropSize = ( int ( rows * WHRatio ) , rows ) <nl> + else : <nl> + cropSize = ( cols , int ( cols / WHRatio ) ) <nl> + <nl> + y1 = ( rows - cropSize [ 1 ] ) / 2 <nl> + y2 = y1 + cropSize [ 1 ] <nl> + x1 = ( cols - cropSize [ 0 ] ) / 2 <nl> + x2 = x1 + cropSize [ 0 ] <nl> + frame = frame [ y1 : y2 , x1 : x2 ] <nl> + <nl> + cols = frame . shape [ 1 ] <nl> + rows = frame . shape [ 0 ] <nl> + <nl> + for i in range ( detections . shape [ 2 ] ) : <nl> + confidence = detections [ 0 , 0 , i , 2 ] <nl> + if confidence > args . thr : <nl> + class_id = int ( detections [ 0 , 0 , i , 1 ] ) <nl> + <nl> + xLeftBottom = int ( detections [ 0 , 0 , i , 3 ] * cols ) <nl> + yLeftBottom = int ( detections [ 0 , 0 , i , 4 ] * rows ) <nl> + xRightTop = int ( detections [ 0 , 0 , i , 5 ] * cols ) <nl> + yRightTop = int ( detections [ 0 , 0 , i , 6 ] * rows ) <nl> + <nl> + cv2 . rectangle ( frame , ( xLeftBottom , yLeftBottom ) , ( xRightTop , yRightTop ) , <nl> + ( 0 , 255 , 0 ) ) <nl> + label = classNames [ class_id ] + " : " + str ( confidence ) <nl> + labelSize , baseLine = cv2 . getTextSize ( label , cv2 . FONT_HERSHEY_SIMPLEX , 0 . 5 , 1 ) <nl> + <nl> + cv2 . rectangle ( frame , ( xLeftBottom , yLeftBottom - labelSize [ 1 ] ) , <nl> + ( xLeftBottom + labelSize [ 0 ] , yLeftBottom + baseLine ) , <nl> + ( 255 , 255 , 255 ) , cv2 . FILLED ) <nl> + cv2 . putText ( frame , label , ( xLeftBottom , yLeftBottom ) , <nl> + cv2 . FONT_HERSHEY_SIMPLEX , 0 . 5 , ( 0 , 0 , 0 ) ) <nl> + <nl> + cv2 . imshow ( " detections " , frame ) <nl> + if cv2 . waitKey ( 1 ) > = 0 : <nl> + break <nl> new file mode 100644 <nl> index 00000000000 . . 423b34dbcf9 <nl> mmm / dev / null <nl> ppp b / samples / dnn / ssd_mobilenet_object_detection . cpp <nl> <nl> + # include < opencv2 / dnn . hpp > <nl> + # include < opencv2 / dnn / shape_utils . hpp > <nl> + # include < opencv2 / imgproc . hpp > <nl> + # include < opencv2 / highgui . hpp > <nl> + <nl> + using namespace cv ; <nl> + using namespace cv : : dnn ; <nl> + <nl> + # include < fstream > <nl> + # include < iostream > <nl> + # include < cstdlib > <nl> + using namespace std ; <nl> + <nl> + const size_t inWidth = 300 ; <nl> + const size_t inHeight = 300 ; <nl> + const float WHRatio = inWidth / ( float ) inHeight ; <nl> + const float inScaleFactor = 0 . 007843f ; <nl> + const float meanVal = 127 . 5 ; <nl> + const char * classNames [ ] = { " background " , <nl> + " aeroplane " , " bicycle " , " bird " , " boat " , <nl> + " bottle " , " bus " , " car " , " cat " , " chair " , <nl> + " cow " , " diningtable " , " dog " , " horse " , <nl> + " motorbike " , " person " , " pottedplant " , <nl> + " sheep " , " sofa " , " train " , " tvmonitor " } ; <nl> + <nl> + const char * about = " This sample uses Single - Shot Detector " <nl> + " ( https : / / arxiv . org / abs / 1512 . 02325 ) " <nl> + " to detect objects on image . \ n " <nl> + " . caffemodel model ' s file is avaliable here : " <nl> + " https : / / github . com / chuanqi305 / MobileNet - SSD / blob / master / MobileNetSSD_train . caffemodel \ n " ; <nl> + <nl> + const char * params <nl> + = " { help | false | print usage } " <nl> + " { proto | MobileNetSSD_300x300 . prototxt | model configuration } " <nl> + " { model | | model weights } " <nl> + " { video | | video for detection } " <nl> + " { out | | path to output video file } " <nl> + " { min_confidence | 0 . 2 | min confidence } " ; <nl> + <nl> + int main ( int argc , char * * argv ) <nl> + { <nl> + cv : : CommandLineParser parser ( argc , argv , params ) ; <nl> + <nl> + if ( parser . get < bool > ( " help " ) ) <nl> + { <nl> + cout < < about < < endl ; <nl> + parser . printMessage ( ) ; <nl> + return 0 ; <nl> + } <nl> + <nl> + String modelConfiguration = parser . get < string > ( " proto " ) ; <nl> + String modelBinary = parser . get < string > ( " model " ) ; <nl> + <nl> + / / ! [ Initialize network ] <nl> + dnn : : Net net = readNetFromCaffe ( modelConfiguration , modelBinary ) ; <nl> + / / ! [ Initialize network ] <nl> + <nl> + VideoCapture cap ( parser . get < String > ( " video " ) ) ; <nl> + if ( ! cap . isOpened ( ) ) / / check if we succeeded <nl> + { <nl> + cap = VideoCapture ( 0 ) ; <nl> + if ( ! cap . isOpened ( ) ) <nl> + { <nl> + cout < < " Couldn ' t find camera " < < endl ; <nl> + return - 1 ; <nl> + } <nl> + } <nl> + <nl> + Size inVideoSize = Size ( ( int ) cap . get ( CV_CAP_PROP_FRAME_WIDTH ) , / / Acquire input size <nl> + ( int ) cap . get ( CV_CAP_PROP_FRAME_HEIGHT ) ) ; <nl> + <nl> + Size cropSize ; <nl> + if ( inVideoSize . width / ( float ) inVideoSize . height > WHRatio ) <nl> + { <nl> + cropSize = Size ( static_cast < int > ( inVideoSize . height * WHRatio ) , <nl> + inVideoSize . height ) ; <nl> + } <nl> + else <nl> + { <nl> + cropSize = Size ( inVideoSize . width , <nl> + static_cast < int > ( inVideoSize . width / WHRatio ) ) ; <nl> + } <nl> + <nl> + Rect crop ( Point ( ( inVideoSize . width - cropSize . width ) / 2 , <nl> + ( inVideoSize . height - cropSize . height ) / 2 ) , <nl> + cropSize ) ; <nl> + <nl> + VideoWriter outputVideo ; <nl> + outputVideo . open ( parser . get < String > ( " out " ) , <nl> + static_cast < int > ( cap . get ( CV_CAP_PROP_FOURCC ) ) , <nl> + cap . get ( CV_CAP_PROP_FPS ) , cropSize , true ) ; <nl> + <nl> + for ( ; ; ) <nl> + { <nl> + Mat frame ; <nl> + cap > > frame ; / / get a new frame from camera <nl> + / / ! [ Prepare blob ] <nl> + <nl> + Mat inputBlob = blobFromImage ( frame , inScaleFactor , <nl> + Size ( inWidth , inHeight ) , meanVal ) ; / / Convert Mat to batch of images <nl> + / / ! [ Prepare blob ] <nl> + <nl> + / / ! [ Set input blob ] <nl> + net . setInput ( inputBlob , " data " ) ; / / set the network input <nl> + / / ! [ Set input blob ] <nl> + <nl> + TickMeter tm ; <nl> + tm . start ( ) ; <nl> + / / ! [ Make forward pass ] <nl> + Mat detection = net . forward ( " detection_out " ) ; / / compute output <nl> + tm . stop ( ) ; <nl> + cout < < " Inference time , ms : " < < tm . getTimeMilli ( ) < < endl ; <nl> + / / ! [ Make forward pass ] <nl> + <nl> + Mat detectionMat ( detection . size [ 2 ] , detection . size [ 3 ] , CV_32F , detection . ptr < float > ( ) ) ; <nl> + <nl> + frame = frame ( crop ) ; <nl> + <nl> + float confidenceThreshold = parser . get < float > ( " min_confidence " ) ; <nl> + for ( int i = 0 ; i < detectionMat . rows ; i + + ) <nl> + { <nl> + float confidence = detectionMat . at < float > ( i , 2 ) ; <nl> + <nl> + if ( confidence > confidenceThreshold ) <nl> + { <nl> + size_t objectClass = ( size_t ) ( detectionMat . at < float > ( i , 1 ) ) ; <nl> + <nl> + int xLeftBottom = static_cast < int > ( detectionMat . at < float > ( i , 3 ) * frame . cols ) ; <nl> + int yLeftBottom = static_cast < int > ( detectionMat . at < float > ( i , 4 ) * frame . rows ) ; <nl> + int xRightTop = static_cast < int > ( detectionMat . at < float > ( i , 5 ) * frame . cols ) ; <nl> + int yRightTop = static_cast < int > ( detectionMat . at < float > ( i , 6 ) * frame . rows ) ; <nl> + <nl> + ostringstream ss ; <nl> + ss < < confidence ; <nl> + String conf ( ss . str ( ) ) ; <nl> + <nl> + Rect object ( ( int ) xLeftBottom , ( int ) yLeftBottom , <nl> + ( int ) ( xRightTop - xLeftBottom ) , <nl> + ( int ) ( yRightTop - yLeftBottom ) ) ; <nl> + <nl> + rectangle ( frame , object , Scalar ( 0 , 255 , 0 ) ) ; <nl> + String label = String ( classNames [ objectClass ] ) + " : " + conf ; <nl> + int baseLine = 0 ; <nl> + Size labelSize = getTextSize ( label , FONT_HERSHEY_SIMPLEX , 0 . 5 , 1 , & baseLine ) ; <nl> + rectangle ( frame , Rect ( Point ( xLeftBottom , yLeftBottom - labelSize . height ) , <nl> + Size ( labelSize . width , labelSize . height + baseLine ) ) , <nl> + Scalar ( 255 , 255 , 255 ) , CV_FILLED ) ; <nl> + putText ( frame , label , Point ( xLeftBottom , yLeftBottom ) , <nl> + FONT_HERSHEY_SIMPLEX , 0 . 5 , Scalar ( 0 , 0 , 0 ) ) ; <nl> + } <nl> + } <nl> + <nl> + if ( outputVideo . isOpened ( ) ) <nl> + outputVideo < < frame ; <nl> + <nl> + imshow ( " detections " , frame ) ; <nl> + if ( waitKey ( 1 ) > = 0 ) break ; <nl> + } <nl> + <nl> + return 0 ; <nl> + } / / main <nl>
|
Merge pull request from arrybn : mobilenet_ssd_sample
|
opencv/opencv
|
2959e7aba9267b7c71dd3ff98aa7c9ebb9751a20
|
2017-08-01T11:12:54Z
|
mmm a / db / namespace - inl . h <nl> ppp b / db / namespace - inl . h <nl> namespace mongo { <nl> / / if profiling indicates this method is a significant bottleneck , we could have a version we <nl> / / use for reads which does not fill with zeroes , and keep the zeroing behavior on writes . <nl> / / <nl> - int i = 0 ; <nl> - while ( ns [ i ] ) { <nl> - buf [ i ] = ns [ i ] ; <nl> - if ( + + i > = MaxNsLen - 1 ) <nl> - uasserted ( 10080 , " ns name too long , max size is 128 " ) ; <nl> - } <nl> - do { <nl> - buf [ i + + ] = 0 ; <nl> - } while ( i < MaxNsLen ) ; <nl> + unsigned len = strlen ( ns ) ; <nl> + uassert ( 10080 , " ns name too long , max size is 128 " , len < MaxNsLen ) ; <nl> + memset ( buf , 0 , MaxNsLen ) ; <nl> + memcpy ( buf , ns , len ) ; <nl> return * this ; <nl> } <nl> <nl>
|
faster
|
mongodb/mongo
|
8f98101ecb5293a1c28f09d7be4540b2f67bd920
|
2010-12-14T01:45:16Z
|
mmm a / example / redis_c + + / redis_server . cpp <nl> ppp b / example / redis_c + + / redis_server . cpp <nl> <nl> class RedisServiceImpl : public brpc : : RedisService { <nl> public : <nl> bool Set ( const std : : string & key , const std : : string & value ) { <nl> - int slot = butil : : crc32c : : Value ( key . c_str ( ) , key . size ( ) ) % HashSlotNum ; <nl> + int slot = butil : : crc32c : : Value ( key . c_str ( ) , key . size ( ) ) % kHashSlotNum ; <nl> _mutex [ slot ] . lock ( ) ; <nl> _map [ slot ] [ key ] = value ; <nl> _mutex [ slot ] . unlock ( ) ; <nl> class RedisServiceImpl : public brpc : : RedisService { <nl> } <nl> <nl> bool Get ( const std : : string & key , std : : string * value ) { <nl> - int slot = butil : : crc32c : : Value ( key . c_str ( ) , key . size ( ) ) % HashSlotNum ; <nl> + int slot = butil : : crc32c : : Value ( key . c_str ( ) , key . size ( ) ) % kHashSlotNum ; <nl> _mutex [ slot ] . lock ( ) ; <nl> auto it = _map [ slot ] . find ( key ) ; <nl> if ( it = = _map [ slot ] . end ( ) ) { <nl> class RedisServiceImpl : public brpc : : RedisService { <nl> } <nl> <nl> private : <nl> - const static int HashSlotNum = 32 ; <nl> - std : : unordered_map < std : : string , std : : string > _map [ HashSlotNum ] ; <nl> - butil : : Mutex _mutex [ HashSlotNum ] ; <nl> + const static int kHashSlotNum = 32 ; <nl> + std : : unordered_map < std : : string , std : : string > _map [ kHashSlotNum ] ; <nl> + butil : : Mutex _mutex [ kHashSlotNum ] ; <nl> } ; <nl> <nl> class GetCommandHandler : public brpc : : RedisCommandHandler { <nl> mmm a / src / brpc / policy / redis_protocol . cpp <nl> ppp b / src / brpc / policy / redis_protocol . cpp <nl> struct InputResponse : public InputMessageBase { <nl> } <nl> } ; <nl> <nl> + / / This struct is pushed into ExecutionQueue of each connection . <nl> + struct CommandInfo { <nl> + std : : string command ; <nl> + } ; <nl> + <nl> / / This class is as parsing_context in socket . <nl> class RedisConnContext : public Destroyable { <nl> public : <nl> class RedisConnContext : public Destroyable { <nl> / / first handler pointer that triggers the transaction . <nl> RedisCommandHandler * handler_continue ; <nl> / / The redis command are parsed and pushed into this queue <nl> - bthread : : ExecutionQueueId < std : : string * > queue ; <nl> + bthread : : ExecutionQueueId < CommandInfo * > queue ; <nl> <nl> RedisCommandParser parser ; <nl> + std : : string command ; <nl> } ; <nl> <nl> - int ConsumeTask ( RedisConnContext * ctx , std : : string * command , butil : : IOBuf * sendbuf ) { <nl> + int ConsumeTask ( RedisConnContext * ctx , const std : : string & command , butil : : IOBuf * sendbuf ) { <nl> butil : : Arena arena ; <nl> RedisReply output ( & arena ) ; <nl> if ( ctx - > handler_continue ) { <nl> RedisCommandHandler : : Result result = <nl> - ctx - > handler_continue - > Run ( command - > c_str ( ) , & output ) ; <nl> + ctx - > handler_continue - > Run ( command . c_str ( ) , & output ) ; <nl> if ( result = = RedisCommandHandler : : OK ) { <nl> ctx - > handler_continue = NULL ; <nl> } <nl> } else { <nl> std : : string comm ; <nl> comm . reserve ( 8 ) ; <nl> - for ( int i = 0 ; i < ( int ) command - > size ( ) & & ( * command ) [ i ] ! = ' ' ; + + i ) { <nl> - comm . push_back ( std : : tolower ( ( * command ) [ i ] ) ) ; <nl> + for ( int i = 0 ; i < ( int ) command . size ( ) & & command [ i ] ! = ' ' ; + + i ) { <nl> + comm . push_back ( std : : tolower ( command [ i ] ) ) ; <nl> } <nl> RedisCommandHandler * ch = ctx - > redis_service - > FindCommandHandler ( comm ) ; <nl> if ( ! ch ) { <nl> int ConsumeTask ( RedisConnContext * ctx , std : : string * command , butil : : IOBuf * sendb <nl> snprintf ( buf , sizeof ( buf ) , " ERR unknown command ` % s ` " , comm . c_str ( ) ) ; <nl> output . SetError ( buf ) ; <nl> } else { <nl> - RedisCommandHandler : : Result result = ch - > Run ( command - > c_str ( ) , & output ) ; <nl> + RedisCommandHandler : : Result result = ch - > Run ( command . c_str ( ) , & output ) ; <nl> if ( result = = RedisCommandHandler : : CONTINUE ) { <nl> ctx - > handler_continue = ch ; <nl> } <nl> } <nl> } <nl> - output . SerializeToIOBuf ( sendbuf ) ; <nl> + output . SerializeTo ( sendbuf ) ; <nl> return 0 ; <nl> } <nl> <nl> - int Consume ( void * ctx , bthread : : TaskIterator < std : : string * > & iter ) { <nl> + int Consume ( void * ctx , bthread : : TaskIterator < CommandInfo * > & iter ) { <nl> RedisConnContext * qctx = static_cast < RedisConnContext * > ( ctx ) ; <nl> if ( iter . is_queue_stopped ( ) ) { <nl> delete qctx ; <nl> int Consume ( void * ctx , bthread : : TaskIterator < std : : string * > & iter ) { <nl> wopt . ignore_eovercrowded = true ; <nl> butil : : IOBuf sendbuf ; <nl> for ( ; iter ; + + iter ) { <nl> - std : : unique_ptr < std : : string > guard ( * iter ) ; <nl> + std : : unique_ptr < CommandInfo > guard ( * iter ) ; <nl> if ( has_err ) { <nl> continue ; <nl> } <nl> - ConsumeTask ( qctx , * iter , & sendbuf ) ; <nl> + ConsumeTask ( qctx , ( * iter ) - > command , & sendbuf ) ; <nl> / / If there are too many tasks to execute , latency of the front <nl> / / responses will be increased by waiting the following tasks to <nl> / / be completed . To prevent this , if the current buf size is greater <nl> ParseResult ParseRedisMessage ( butil : : IOBuf * source , Socket * socket , <nl> } <nl> socket - > reset_parsing_context ( ctx ) ; <nl> } <nl> - ParseError err = ctx - > parser . Parse ( * source ) ; <nl> + ParseError err = ctx - > parser . Consume ( * source , & ctx - > command ) ; <nl> if ( err ! = PARSE_OK ) { <nl> return MakeParseError ( err ) ; <nl> } <nl> - std : : unique_ptr < std : : string > command ( new std : : string ) ; <nl> - ctx - > parser . SwapCommandTo ( command . get ( ) ) ; <nl> - if ( bthread : : execution_queue_execute ( ctx - > queue , command . get ( ) ) ! = 0 ) { <nl> + std : : unique_ptr < CommandInfo > info ( new CommandInfo ) ; <nl> + info - > command . swap ( ctx - > command ) ; <nl> + if ( bthread : : execution_queue_execute ( ctx - > queue , info . get ( ) ) ! = 0 ) { <nl> LOG ( ERROR ) < < " Fail to push execution queue " ; <nl> return MakeParseError ( PARSE_ERROR_NO_RESOURCE ) ; <nl> } <nl> - command . release ( ) ; <nl> + info . release ( ) ; <nl> return MakeMessage ( NULL ) ; <nl> } else { <nl> / / NOTE ( gejun ) : PopPipelinedInfo ( ) is actually more contended than what <nl> mmm a / src / brpc / policy / redis_protocol . h <nl> ppp b / src / brpc / policy / redis_protocol . h <nl> void ProcessRedisResponse ( InputMessageBase * msg ) ; <nl> / / Actions to a redis request , which is left unimplemented . <nl> / / All requests are processed in execution queue pushed in <nl> / / the parsing process . This function must be declared since <nl> - / / server side will enable redis as a server side protocol <nl> - / / when this function is declared . <nl> + / / server only enables redis as a server - side protocol when <nl> + / / this function is declared . <nl> void ProcessRedisRequest ( InputMessageBase * msg ) ; <nl> <nl> / / Serialize a redis request . <nl> mmm a / src / brpc / redis . cpp <nl> ppp b / src / brpc / redis . cpp <nl> std : : ostream & operator < < ( std : : ostream & os , const RedisResponse & response ) { <nl> return os ; <nl> } <nl> <nl> - bool RedisReply : : SetArray ( int size ) { <nl> - if ( ! _arena | | _has_set ) { <nl> - return false ; <nl> - } <nl> - _type = REDIS_REPLY_ARRAY ; <nl> - if ( size < 0 ) { <nl> - _length = npos ; <nl> - return true ; <nl> - } else if ( size = = 0 ) { <nl> - _length = 0 ; <nl> - return true ; <nl> - } <nl> - RedisReply * subs = ( RedisReply * ) _arena - > allocate ( sizeof ( RedisReply ) * size ) ; <nl> - if ( ! subs ) { <nl> - LOG ( FATAL ) < < " Fail to allocate RedisReply [ " < < size < < " ] " ; <nl> - return false ; <nl> - } <nl> - for ( int i = 0 ; i < size ; + + i ) { <nl> - new ( & subs [ i ] ) RedisReply ( _arena ) ; <nl> - } <nl> - _length = size ; <nl> - _data . array . replies = subs ; <nl> - _has_set = true ; <nl> - return true ; <nl> - } <nl> - <nl> - bool RedisReply : : SetBasicString ( const std : : string & str , RedisReplyType type ) { <nl> - if ( ! _arena | | _has_set ) { <nl> - return false ; <nl> - } <nl> - const size_t size = str . size ( ) ; <nl> - if ( size < sizeof ( _data . short_str ) ) { <nl> - memcpy ( _data . short_str , str . c_str ( ) , size ) ; <nl> - _data . short_str [ size ] = ' \ 0 ' ; <nl> - } else { <nl> - char * d = ( char * ) _arena - > allocate ( ( size / 8 + 1 ) * 8 ) ; <nl> - if ( ! d ) { <nl> - LOG ( FATAL ) < < " Fail to allocate string [ " < < size < < " ] " ; <nl> - return false ; <nl> - } <nl> - memcpy ( d , str . c_str ( ) , size ) ; <nl> - d [ size ] = ' \ 0 ' ; <nl> - _data . long_str = d ; <nl> - } <nl> - _type = type ; <nl> - _length = size ; <nl> - _has_set = true ; <nl> - return true ; <nl> - } <nl> - <nl> - <nl> bool RedisService : : AddCommandHandler ( const std : : string & name , RedisCommandHandler * handler ) { <nl> std : : string lcname ; <nl> lcname . resize ( name . size ( ) ) ; <nl> bool RedisService : : AddCommandHandler ( const std : : string & name , RedisCommandHandle <nl> LOG ( ERROR ) < < " redis command name = " < < name < < " exist " ; <nl> return false ; <nl> } <nl> - _command_map [ lcname ] . reset ( handler ) ; <nl> + _command_map [ lcname ] = handler ; <nl> return true ; <nl> } <nl> <nl> RedisCommandHandler * RedisService : : FindCommandHandler ( const std : : string & name ) { <nl> auto it = _command_map . find ( name ) ; <nl> if ( it ! = _command_map . end ( ) ) { <nl> - return it - > second . get ( ) ; <nl> + return it - > second ; <nl> } <nl> return NULL ; <nl> } <nl> mmm a / src / brpc / redis . h <nl> ppp b / src / brpc / redis . h <nl> class RedisCommandHandler ; <nl> / / to enable redis support . <nl> class RedisService { <nl> public : <nl> - typedef std : : unordered_map < std : : string , std : : shared_ptr < RedisCommandHandler > > CommandMap ; <nl> + typedef std : : unordered_map < std : : string , RedisCommandHandler * > CommandMap ; <nl> virtual ~ RedisService ( ) { } <nl> <nl> / / Call this function to register ` handler ` that can handle command ` name ` . <nl> bool AddCommandHandler ( const std : : string & name , RedisCommandHandler * handler ) ; <nl> <nl> - / / This function should be touched by user and used by brpc deverloper only . <nl> + / / This function should not be touched by user and used by brpc deverloper only . <nl> RedisCommandHandler * FindCommandHandler ( const std : : string & name ) ; <nl> private : <nl> CommandMap _command_map ; <nl> mmm a / src / brpc / redis_command . cpp <nl> ppp b / src / brpc / redis_command . cpp <nl> RedisCommandParser : : RedisCommandParser ( ) { <nl> Reset ( ) ; <nl> } <nl> <nl> - ParseError RedisCommandParser : : Parse ( butil : : IOBuf & buf ) { <nl> + ParseError RedisCommandParser : : Consume ( butil : : IOBuf & buf , std : : string * command ) { <nl> const char * pfc = ( const char * ) buf . fetch1 ( ) ; <nl> if ( pfc = = NULL ) { <nl> return PARSE_ERROR_NOT_ENOUGH_DATA ; <nl> ParseError RedisCommandParser : : Parse ( butil : : IOBuf & buf ) { <nl> _length = value ; <nl> _index = 0 ; <nl> _command . clear ( ) ; <nl> - return Parse ( buf ) ; <nl> + return Consume ( buf , command ) ; <nl> } <nl> CHECK ( _index < _length ) < < " a complete command has been parsed . " <nl> " impl of RedisCommandParser : : Parse is buggy " ; <nl> ParseError RedisCommandParser : : Parse ( butil : : IOBuf & buf ) { <nl> return PARSE_ERROR_ABSOLUTELY_WRONG ; <nl> } <nl> if ( + + _index < _length ) { <nl> - return Parse ( buf ) ; <nl> + return Consume ( buf , command ) ; <nl> } <nl> + command - > clear ( ) ; <nl> + command - > swap ( _command ) ; <nl> Reset ( ) ; <nl> return PARSE_OK ; <nl> } <nl> <nl> - void RedisCommandParser : : SwapCommandTo ( std : : string * out ) { <nl> - out - > clear ( ) ; <nl> - out - > swap ( _command ) ; <nl> - } <nl> - <nl> void RedisCommandParser : : Reset ( ) { <nl> _parsing_array = false ; <nl> _length = 0 ; <nl> mmm a / src / brpc / redis_command . h <nl> ppp b / src / brpc / redis_command . h <nl> class RedisCommandParser { <nl> public : <nl> RedisCommandParser ( ) ; <nl> <nl> - / / Parse raw message from ` buf ' . Return PARSE_OK if successful . <nl> - ParseError Parse ( butil : : IOBuf & buf ) ; <nl> - <nl> - / / After Parse returns PARSE_OK , call this function to swap <nl> - / / the parsed command string to ` out ' . <nl> - void SwapCommandTo ( std : : string * out ) ; <nl> + / / Parse raw message from ` buf ' . Return PARSE_OK and set the parsed command <nl> + / / to ` command ' if successful . <nl> + ParseError Consume ( butil : : IOBuf & buf , std : : string * command ) ; <nl> <nl> private : <nl> / / Reset parser to the initial state . <nl> mmm a / src / brpc / redis_reply . cpp <nl> ppp b / src / brpc / redis_reply . cpp <nl> const char * RedisReplyTypeToString ( RedisReplyType type ) { <nl> } <nl> } <nl> <nl> - bool RedisReply : : SerializeToIOBuf ( butil : : IOBuf * buf ) { <nl> + bool RedisReply : : SerializeTo ( butil : : IOBuf * buf ) { <nl> butil : : IOBufBuilder builder ; <nl> switch ( _type ) { <nl> case REDIS_REPLY_ERROR : <nl> bool RedisReply : : SerializeToIOBuf ( butil : : IOBuf * buf ) { <nl> break ; <nl> } <nl> for ( size_t i = 0 ; i < _length ; + + i ) { <nl> - if ( ! _data . array . replies [ i ] . SerializeToIOBuf ( buf ) ) { <nl> + if ( ! _data . array . replies [ i ] . SerializeTo ( buf ) ) { <nl> return false ; <nl> } <nl> } <nl> ParseError RedisReply : : ConsumePartialIOBuf ( butil : : IOBuf & buf , butil : : Arena * aren <nl> return PARSE_ERROR_ABSOLUTELY_WRONG ; <nl> } <nl> for ( int64_t i = 0 ; i < count ; + + i ) { <nl> - new ( & subs [ i ] ) RedisReply ( NULL ) ; <nl> + new ( & subs [ i ] ) RedisReply ; <nl> } <nl> buf . pop_front ( crlf_pos + 2 / * CRLF * / ) ; <nl> _type = REDIS_REPLY_ARRAY ; <nl> void RedisReply : : CopyFromDifferentArena ( const RedisReply & other , <nl> } <nl> } <nl> <nl> + bool RedisReply : : SetArray ( int size ) { <nl> + if ( ! _arena | | _type ! = REDIS_REPLY_NIL ) { <nl> + return false ; <nl> + } <nl> + _type = REDIS_REPLY_ARRAY ; <nl> + if ( size < 0 ) { <nl> + _length = npos ; <nl> + return true ; <nl> + } else if ( size = = 0 ) { <nl> + _length = 0 ; <nl> + return true ; <nl> + } <nl> + RedisReply * subs = ( RedisReply * ) _arena - > allocate ( sizeof ( RedisReply ) * size ) ; <nl> + if ( ! subs ) { <nl> + LOG ( FATAL ) < < " Fail to allocate RedisReply [ " < < size < < " ] " ; <nl> + return false ; <nl> + } <nl> + for ( int i = 0 ; i < size ; + + i ) { <nl> + new ( & subs [ i ] ) RedisReply ( _arena ) ; <nl> + } <nl> + _length = size ; <nl> + _data . array . replies = subs ; <nl> + return true ; <nl> + } <nl> + <nl> + bool RedisReply : : SetBasicString ( const std : : string & str , RedisReplyType type ) { <nl> + if ( ! _arena | | _type ! = REDIS_REPLY_NIL ) { <nl> + return false ; <nl> + } <nl> + const size_t size = str . size ( ) ; <nl> + if ( size < sizeof ( _data . short_str ) ) { <nl> + memcpy ( _data . short_str , str . c_str ( ) , size ) ; <nl> + _data . short_str [ size ] = ' \ 0 ' ; <nl> + } else { <nl> + char * d = ( char * ) _arena - > allocate ( ( size / 8 + 1 ) * 8 ) ; <nl> + if ( ! d ) { <nl> + LOG ( FATAL ) < < " Fail to allocate string [ " < < size < < " ] " ; <nl> + return false ; <nl> + } <nl> + memcpy ( d , str . c_str ( ) , size ) ; <nl> + d [ size ] = ' \ 0 ' ; <nl> + _data . long_str = d ; <nl> + } <nl> + _type = type ; <nl> + _length = size ; <nl> + return true ; <nl> + } <nl> + <nl> } / / namespace brpc <nl> mmm a / src / brpc / redis_reply . h <nl> ppp b / src / brpc / redis_reply . h <nl> class RedisReply { <nl> ParseError ConsumePartialIOBuf ( butil : : IOBuf & buf , butil : : Arena * arena ) ; <nl> <nl> / / Serialize to buf using redis protocol <nl> - bool SerializeToIOBuf ( butil : : IOBuf * buf ) ; <nl> + bool SerializeTo ( butil : : IOBuf * buf ) ; <nl> <nl> / / Swap internal fields with another reply . <nl> void Swap ( RedisReply & other ) ; <nl> class RedisReply { <nl> uint64_t padding [ 2 ] ; / / For swapping , must cover all bytes . <nl> } _data ; <nl> butil : : Arena * _arena ; <nl> - bool _has_set ; <nl> } ; <nl> <nl> / / = = = = = = = = = = = inline impl . = = = = = = = = = = = = = = <nl> inline RedisReply : : RedisReply ( butil : : Arena * arena ) <nl> inline RedisReply : : RedisReply ( ) <nl> : _type ( REDIS_REPLY_NIL ) <nl> , _length ( 0 ) <nl> - , _arena ( NULL ) <nl> - , _has_set ( false ) { <nl> + , _arena ( NULL ) { <nl> _data . array . last_index = - 1 ; <nl> _data . array . replies = NULL ; <nl> } <nl> inline int64_t RedisReply : : integer ( ) const { <nl> } <nl> <nl> inline bool RedisReply : : SetNilString ( ) { <nl> - if ( ! _arena | | _has_set ) return false ; <nl> + if ( ! _arena | | _type ! = REDIS_REPLY_NIL ) { <nl> + return false ; <nl> + } <nl> _type = REDIS_REPLY_STRING ; <nl> _length = npos ; <nl> - _has_set = true ; <nl> return true ; <nl> } <nl> <nl> inline bool RedisReply : : SetError ( const std : : string & str ) { <nl> } <nl> <nl> inline bool RedisReply : : SetInteger ( int64_t value ) { <nl> - if ( ! _arena | | _has_set ) { <nl> + if ( ! _arena | | _type ! = REDIS_REPLY_NIL ) { <nl> return false ; <nl> } <nl> _type = REDIS_REPLY_INTEGER ; <nl> _length = 0 ; <nl> _data . integer = value ; <nl> - _has_set = true ; <nl> return true ; <nl> } <nl> <nl> inline void RedisReply : : Clear ( ) { <nl> _length = 0 ; <nl> _data . array . last_index = - 1 ; <nl> _data . array . replies = NULL ; <nl> - _has_set = false ; <nl> } <nl> <nl> inline void RedisReply : : CopyFromSameArena ( const RedisReply & other ) { <nl> mmm a / test / CMakeLists . txt <nl> ppp b / test / CMakeLists . txt <nl> endif ( ) <nl> <nl> set ( CMAKE_CPP_FLAGS " $ { DEFINE_CLOCK_GETTIME } - DBRPC_WITH_GLOG = $ { WITH_GLOG_VAL } - DGFLAGS_NS = $ { GFLAGS_NS } " ) <nl> set ( CMAKE_CPP_FLAGS " $ { CMAKE_CPP_FLAGS } - DBTHREAD_USE_FAST_PTHREAD_MUTEX - D__const__ = - D_GNU_SOURCE - DUSE_SYMBOLIZE - DNO_TCMALLOC - D__STDC_FORMAT_MACROS - D__STDC_LIMIT_MACROS - D__STDC_CONSTANT_MACROS - DUNIT_TEST - Dprivate = public - Dprotected = public - DBVAR_NOT_LINK_DEFAULT_VARIABLES - D__STRICT_ANSI__ - include $ { PROJECT_SOURCE_DIR } / test / sstream_workaround . h " ) <nl> - set ( CMAKE_CXX_FLAGS " $ { CMAKE_CPP_FLAGS } - O2 - pipe - Wall - W - fPIC - fstrict - aliasing - Wno - invalid - offsetof - Wno - unused - parameter - fno - omit - frame - pointer " ) <nl> + set ( CMAKE_CXX_FLAGS " $ { CMAKE_CPP_FLAGS } - g - O2 - pipe - Wall - W - fPIC - fstrict - aliasing - Wno - invalid - offsetof - Wno - unused - parameter - fno - omit - frame - pointer " ) <nl> use_cxx11 ( ) <nl> <nl> if ( CMAKE_CXX_COMPILER_ID STREQUAL " GNU " ) <nl> mmm a / test / brpc_redis_unittest . cpp <nl> ppp b / test / brpc_redis_unittest . cpp <nl> TEST_F ( RedisTest , quote_and_escape ) { <nl> TEST_F ( RedisTest , command_parser ) { <nl> brpc : : RedisCommandParser parser ; <nl> butil : : IOBuf buf ; <nl> + std : : string command_out ; <nl> { <nl> / / parse from whole command <nl> std : : string command = " set abc edc " ; <nl> ASSERT_TRUE ( brpc : : RedisCommandNoFormat ( & buf , command . c_str ( ) ) . ok ( ) ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_OK , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_OK , parser . Consume ( buf , & command_out ) ) ; <nl> ASSERT_TRUE ( buf . empty ( ) ) ; <nl> - std : : string command_out ; <nl> - parser . SwapCommandTo ( & command_out ) ; <nl> ASSERT_STREQ ( command . c_str ( ) , command_out . c_str ( ) ) ; <nl> } <nl> { <nl> TEST_F ( RedisTest , command_parser ) { <nl> for ( int i = 0 ; i < size ; + + i ) { <nl> buf . push_back ( raw_string [ i ] ) ; <nl> if ( i = = size - 1 ) { <nl> - ASSERT_EQ ( brpc : : PARSE_OK , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_OK , parser . Consume ( buf , & command_out ) ) ; <nl> } else { <nl> if ( butil : : fast_rand_less_than ( 2 ) = = 0 ) { <nl> ASSERT_EQ ( brpc : : PARSE_ERROR_NOT_ENOUGH_DATA , <nl> - parser . Parse ( buf ) ) ; <nl> + parser . Consume ( buf , & command_out ) ) ; <nl> } <nl> } <nl> } <nl> ASSERT_TRUE ( buf . empty ( ) ) ; <nl> - std : : string command_out ; <nl> - parser . SwapCommandTo ( & command_out ) ; <nl> ASSERT_STREQ ( command_out . c_str ( ) , " set abc def " ) ; <nl> } <nl> } <nl> { <nl> / / there is a non - string message in command and parse should fail <nl> buf . append ( " * 3 \ r \ n $ 3 " ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_ERROR_NOT_ENOUGH_DATA , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_ERROR_NOT_ENOUGH_DATA , parser . Consume ( buf , & command_out ) ) ; <nl> ASSERT_EQ ( ( int ) buf . size ( ) , 2 ) ; / / left " $ 3 " <nl> buf . append ( " \ r \ nset \ r \ n : 123 \ r \ n $ 3 \ r \ ndef \ r \ n " ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_ERROR_ABSOLUTELY_WRONG , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_ERROR_ABSOLUTELY_WRONG , parser . Consume ( buf , & command_out ) ) ; <nl> parser . Reset ( ) ; <nl> } <nl> { <nl> / / not array <nl> buf . append ( " : 123456 \ r \ n " ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Consume ( buf , & command_out ) ) ; <nl> parser . Reset ( ) ; <nl> } <nl> { <nl> / / not array <nl> buf . append ( " + Error \ r \ n " ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Consume ( buf , & command_out ) ) ; <nl> parser . Reset ( ) ; <nl> } <nl> { <nl> / / not array <nl> buf . append ( " + OK \ r \ n " ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Consume ( buf , & command_out ) ) ; <nl> parser . Reset ( ) ; <nl> } <nl> { <nl> / / not array <nl> buf . append ( " $ 5 \ r \ nhello \ r \ n " ) ; <nl> - ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Parse ( buf ) ) ; <nl> + ASSERT_EQ ( brpc : : PARSE_ERROR_TRY_OTHERS , parser . Consume ( buf , & command_out ) ) ; <nl> parser . Reset ( ) ; <nl> } <nl> } <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> brpc : : RedisReply r ( & arena ) ; <nl> butil : : IOBuf buf ; <nl> ASSERT_TRUE ( r . SetStatus ( " OK " ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , " + OK \ r \ n " ) ; <nl> ASSERT_STREQ ( r . c_str ( ) , " OK " ) ; <nl> r . Clear ( ) ; <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> brpc : : RedisReply r ( & arena ) ; <nl> butil : : IOBuf buf ; <nl> ASSERT_TRUE ( r . SetError ( " not exist \ ' key \ ' " ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , " - not exist \ ' key \ ' \ r \ n " ) ; <nl> r . Clear ( ) ; <nl> brpc : : ParseError err = r . ConsumePartialIOBuf ( buf , & arena ) ; <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> brpc : : RedisReply r ( & arena ) ; <nl> butil : : IOBuf buf ; <nl> ASSERT_TRUE ( r . SetNilString ( ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , " $ - 1 \ r \ n " ) ; <nl> r . Clear ( ) ; <nl> brpc : : ParseError err = r . ConsumePartialIOBuf ( buf , & arena ) ; <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> <nl> r . Clear ( ) ; <nl> ASSERT_TRUE ( r . SetString ( " abcde ' hello world " ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , " $ 17 \ r \ nabcde ' hello world \ r \ n " ) ; <nl> ASSERT_STREQ ( r . c_str ( ) , " abcde ' hello world " ) ; <nl> r . Clear ( ) ; <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> for ( int i = 0 ; i < t ; + + i ) { <nl> r . Clear ( ) ; <nl> ASSERT_TRUE ( r . SetInteger ( input [ i ] ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , output [ i ] ) ; <nl> r . Clear ( ) ; <nl> brpc : : ParseError err = r . ConsumePartialIOBuf ( buf , & arena ) ; <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> r [ 1 ] . SetString ( " To go over everything " ) ; <nl> r [ 2 ] . SetInteger ( 1 ) ; <nl> ASSERT_TRUE ( r [ 3 ] . is_nil ( ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , <nl> " * 3 \ r \ n * 2 \ r \ n $ 14 \ r \ nhello , it ' s me \ r \ n : 422 \ r \ n $ 21 \ r \ n " <nl> " To go over everything \ r \ n : 1 \ r \ n " ) ; <nl> TEST_F ( RedisTest , redis_reply_codec ) { <nl> r . Clear ( ) ; <nl> / / nil array <nl> ASSERT_TRUE ( r . SetArray ( - 1 ) ) ; <nl> - ASSERT_TRUE ( r . SerializeToIOBuf ( & buf ) ) ; <nl> + ASSERT_TRUE ( r . SerializeTo ( & buf ) ) ; <nl> ASSERT_STREQ ( buf . to_string ( ) . c_str ( ) , " * - 1 \ r \ n " ) ; <nl> ASSERT_EQ ( r . ConsumePartialIOBuf ( buf , & arena ) , brpc : : PARSE_OK ) ; <nl> ASSERT_TRUE ( r . is_nil ( ) ) ; <nl>
|
redis_server_protocol : refine code
|
apache/incubator-brpc
|
520858cfc56d8f2a3d0847e77d4e2bb258c4d249
|
2019-12-12T09:24:36Z
|
mmm a / tests / testflows / rbac / docker - compose / clickhouse - service . yml <nl> ppp b / tests / testflows / rbac / docker - compose / clickhouse - service . yml <nl> services : <nl> - " 8123 " <nl> volumes : <nl> - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse / config . d : / etc / clickhouse - server / config . d " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse / users . d / : / etc / clickhouse - server / users . d " <nl> + - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse / users . d : / etc / clickhouse - server / users . d " <nl> - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse / ssl : / etc / clickhouse - server / ssl " <nl> - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse / config . xml : / etc / clickhouse - server / config . xml " <nl> - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse / users . xml : / etc / clickhouse - server / users . xml " <nl> mmm a / tests / testflows / rbac / docker - compose / docker - compose . yml <nl> ppp b / tests / testflows / rbac / docker - compose / docker - compose . yml <nl> services : <nl> volumes : <nl> - " $ { CLICKHOUSE_TESTS_DIR } / _instances / clickhouse1 / database / : / var / lib / clickhouse / " <nl> - " $ { CLICKHOUSE_TESTS_DIR } / _instances / clickhouse1 / logs / : / var / log / clickhouse - server / " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse1 / config . d : / etc / clickhouse - server / config . d " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse1 / users . d : / etc / clickhouse - server / users . d " <nl> + - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse1 / config . d / macros . xml : / etc / clickhouse - server / config . d / macros . xml " <nl> depends_on : <nl> zookeeper : <nl> condition : service_healthy <nl> services : <nl> volumes : <nl> - " $ { CLICKHOUSE_TESTS_DIR } / _instances / clickhouse2 / database / : / var / lib / clickhouse / " <nl> - " $ { CLICKHOUSE_TESTS_DIR } / _instances / clickhouse2 / logs / : / var / log / clickhouse - server / " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse2 / config . d : / etc / clickhouse - server / config . d " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse2 / users . d : / etc / clickhouse - server / users . d " <nl> + - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse2 / config . d / macros . xml : / etc / clickhouse - server / config . d / macros . xml " <nl> depends_on : <nl> zookeeper : <nl> condition : service_healthy <nl> services : <nl> volumes : <nl> - " $ { CLICKHOUSE_TESTS_DIR } / _instances / clickhouse3 / database / : / var / lib / clickhouse / " <nl> - " $ { CLICKHOUSE_TESTS_DIR } / _instances / clickhouse3 / logs / : / var / log / clickhouse - server / " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse3 / config . d : / etc / clickhouse - server / config . d " <nl> - - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse3 / users . d : / etc / clickhouse - server / users . d " <nl> + - " $ { CLICKHOUSE_TESTS_DIR } / configs / clickhouse3 / config . d / macros . xml : / etc / clickhouse - server / config . d / macros . xml " <nl> depends_on : <nl> zookeeper : <nl> condition : service_healthy <nl> mmm a / tests / testflows / rbac / tests / syntax / alter_quota . py <nl> ppp b / tests / testflows / rbac / tests / syntax / alter_quota . py <nl> <nl> from testflows . core import * <nl> <nl> from rbac . requirements import * <nl> - import rbac . tests . errors <nl> + import rbac . tests . errors as errors <nl> <nl> @ TestFeature <nl> @ Name ( " alter quota " ) <nl> def cleanup_quota ( quota ) : <nl> RQ_SRS_006_RBAC_Quota_Alter_Assignment ( " 1 . 0 " ) ] ) : <nl> with When ( " I alter quota to a role and a user " ) : <nl> node . query ( " ALTER QUOTA quota0 TO role0 , user0 " ) <nl> - <nl> + <nl> with Scenario ( " I alter quota assigned to none " , flags = TE , requirements = [ <nl> RQ_SRS_006_RBAC_Quota_Alter_Assignment_None ( " 1 . 0 " ) ] ) : <nl> with When ( " I alter quota to none " ) : <nl>
|
* Fixing issue with configs missings in the config . d folder
|
ClickHouse/ClickHouse
|
086d133df7431c7d01184aacf22a2992c676ca96
|
2020-07-21T18:40:43Z
|
mmm a / include / swift / AST / ArchetypeBuilder . h <nl> ppp b / include / swift / AST / ArchetypeBuilder . h <nl> class ArchetypeBuilder { <nl> class InferRequirementsWalker ; <nl> friend class InferRequirementsWalker ; <nl> <nl> - ModuleDecl & Mod ; <nl> ASTContext & Context ; <nl> DiagnosticEngine & Diags ; <nl> struct Implementation ; <nl> class ArchetypeBuilder { <nl> public : <nl> / / / Construct a new archetype builder . <nl> / / / <nl> - / / / \ param mod The module in which the builder will create archetypes . <nl> - explicit ArchetypeBuilder ( ModuleDecl & mod ) ; <nl> + / / / \ param lookupConformance Conformance - lookup routine that will be used <nl> + / / / to satisfy conformance requirements for concrete types . <nl> + explicit ArchetypeBuilder ( ASTContext & ctx , <nl> + std : : function < GenericFunction > lookupConformance ) ; <nl> <nl> ArchetypeBuilder ( ArchetypeBuilder & & ) ; <nl> ~ ArchetypeBuilder ( ) ; <nl> class ArchetypeBuilder { <nl> / / / Retrieve the AST context . <nl> ASTContext & getASTContext ( ) const { return Context ; } <nl> <nl> - / / / Retrieve the module . <nl> - ModuleDecl & getModule ( ) const { return Mod ; } <nl> + / / / Retrieve the conformance - lookup function used by this archetype builder . <nl> + std : : function < GenericFunction > getLookupConformanceFn ( ) const ; <nl> <nl> / / / Retrieve the lazy resolver , if there is one . <nl> LazyResolver * getLazyResolver ( ) const ; <nl> mmm a / include / swift / AST / GenericEnvironment . h <nl> ppp b / include / swift / AST / GenericEnvironment . h <nl> class alignas ( 1 < < DeclAlignInBits ) GenericEnvironment final <nl> / / / Map an interface type to a contextual type . <nl> Type mapTypeIntoContext ( ModuleDecl * M , Type type ) const ; <nl> <nl> + / / / Map an interface type to a contextual type . <nl> + Type mapTypeIntoContext ( Type type , <nl> + LookupConformanceFn lookupConformance ) const ; <nl> + <nl> / / / Map a generic parameter type to a contextual type . <nl> Type mapTypeIntoContext ( GenericTypeParamType * type ) const ; <nl> <nl> mmm a / include / swift / AST / Types . h <nl> ppp b / include / swift / AST / Types . h <nl> class DependentMemberType : public TypeBase { <nl> Type base , <nl> LazyResolver * resolver = nullptr ) ; <nl> <nl> + / / / Substitute the base type , looking up our associated type in it if it is <nl> + / / / non - dependent . Returns null if the member could not be found in the new <nl> + / / / base . <nl> + Type substBaseType ( Type base , LookupConformanceFn lookupConformance ) ; <nl> + <nl> / / Implement isa / cast / dyncast / etc . <nl> static bool classof ( const TypeBase * T ) { <nl> return T - > getKind ( ) = = TypeKind : : DependentMember ; <nl> mmm a / lib / AST / ASTContext . cpp <nl> ppp b / lib / AST / ASTContext . cpp <nl> ArchetypeBuilder * ASTContext : : getOrCreateArchetypeBuilder ( <nl> return known - > second . get ( ) ; <nl> <nl> / / Create a new archetype builder with the given signature . <nl> - auto builder = new ArchetypeBuilder ( * mod ) ; <nl> + auto builder = new ArchetypeBuilder ( * this , LookUpConformanceInModule ( mod ) ) ; <nl> builder - > addGenericSignature ( sig ) ; <nl> <nl> / / Store this archetype builder ( no generic environment yet ) . <nl> mmm a / lib / AST / ArchetypeBuilder . cpp <nl> ppp b / lib / AST / ArchetypeBuilder . cpp <nl> static void updateRequirementSource ( RequirementSource & source , <nl> } <nl> <nl> struct ArchetypeBuilder : : Implementation { <nl> + / / / Function used to look up conformances . <nl> + std : : function < GenericFunction > LookupConformance ; <nl> + <nl> / / / The generic parameters that this archetype builder is working with . <nl> SmallVector < GenericTypeParamType * , 4 > GenericParams ; <nl> <nl> static ProtocolConformance * getSuperConformance ( <nl> <nl> / / Lookup the conformance of the superclass to this protocol . <nl> auto conformance = <nl> - builder . getModule ( ) . lookupConformance ( superclass , proto , <nl> - builder . getLazyResolver ( ) ) ; <nl> + builder . getLookupConformanceFn ( ) ( pa - > getDependentType ( <nl> + { } , / * allowUnresolved = * / true ) <nl> + - > getCanonicalType ( ) , <nl> + superclass , <nl> + proto - > getDeclaredInterfaceType ( ) <nl> + - > castTo < ProtocolType > ( ) ) ; <nl> if ( ! conformance ) return nullptr ; <nl> <nl> / / Conformance to this protocol is redundant ; update the requirement source <nl> Type ArchetypeBuilder : : PotentialArchetype : : getTypeInContext ( <nl> builder . Impl - > ConcreteSubs . erase ( { genericEnv , representative } ) ; <nl> } ; <nl> <nl> - return genericEnv - > mapTypeIntoContext ( & builder . getModule ( ) , concreteType ) ; <nl> + return genericEnv - > mapTypeIntoContext ( concreteType , <nl> + builder . getLookupConformanceFn ( ) ) ; <nl> } <nl> <nl> / / Check that we haven ' t referenced this type while substituting into the <nl> Type ArchetypeBuilder : : PotentialArchetype : : getTypeInContext ( <nl> if ( auto parent = getParent ( ) ) { <nl> / / For nested types , first substitute into the parent so we can form the <nl> / / proper nested type . <nl> - auto & mod = builder . getModule ( ) ; <nl> - <nl> auto parentTy = parent - > getTypeInContext ( builder , genericEnv ) ; <nl> if ( ! parentTy ) <nl> return ErrorType : : get ( getDependentType ( genericParams , <nl> Type ArchetypeBuilder : : PotentialArchetype : : getTypeInContext ( <nl> return type ; <nl> <nl> auto depMemberType = type - > castTo < DependentMemberType > ( ) ; <nl> - Type memberType = depMemberType - > substBaseType ( & mod , parentTy , resolver ) ; <nl> + Type memberType = <nl> + depMemberType - > substBaseType ( parentTy , <nl> + builder . getLookupConformanceFn ( ) ) ; <nl> <nl> / / If the member type maps to an archetype , resolve that archetype . <nl> if ( auto memberPA = builder . resolveArchetype ( memberType ) ) { <nl> Type ArchetypeBuilder : : PotentialArchetype : : getTypeInContext ( <nl> / / that a same - type constraint affects this so late in the game . <nl> representative - > SameTypeSource = parent - > SameTypeSource ; <nl> <nl> - return genericEnv - > mapTypeIntoContext ( & builder . getModule ( ) , memberType ) ; <nl> + return genericEnv - > mapTypeIntoContext ( memberType , <nl> + builder . getLookupConformanceFn ( ) ) ; <nl> } <nl> <nl> / / Check whether the parent already has a nested type with this name . If <nl> Type ArchetypeBuilder : : PotentialArchetype : : getTypeInContext ( <nl> SWIFT_DEFER { <nl> builder . Impl - > SuperclassSubs . erase ( { genericEnv , representative } ) ; <nl> } ; <nl> - superclass = genericEnv - > mapTypeIntoContext ( & builder . getModule ( ) , <nl> - superclass ) ; <nl> + superclass = genericEnv - > mapTypeIntoContext ( superclass , <nl> + builder . getLookupConformanceFn ( ) ) ; <nl> <nl> / / We might have recursively recorded the archetype ; if so , return early . <nl> / / FIXME : This should be detectable before we end up building archetypes . <nl> void ArchetypeBuilder : : PotentialArchetype : : dump ( llvm : : raw_ostream & Out , <nl> } <nl> } <nl> <nl> - ArchetypeBuilder : : ArchetypeBuilder ( ModuleDecl & mod ) <nl> - : Mod ( mod ) , Context ( mod . getASTContext ( ) ) , Diags ( Context . Diags ) , <nl> - Impl ( new Implementation ) <nl> - { <nl> + ArchetypeBuilder : : ArchetypeBuilder ( <nl> + ASTContext & ctx , <nl> + std : : function < GenericFunction > lookupConformance ) <nl> + : Context ( ctx ) , Diags ( Context . Diags ) , Impl ( new Implementation ) { <nl> + Impl - > LookupConformance = std : : move ( lookupConformance ) ; <nl> } <nl> <nl> ArchetypeBuilder : : ArchetypeBuilder ( ArchetypeBuilder & & ) = default ; <nl> ArchetypeBuilder : : ~ ArchetypeBuilder ( ) { <nl> delete PA ; <nl> } <nl> <nl> + std : : function < GenericFunction > <nl> + ArchetypeBuilder : : getLookupConformanceFn ( ) const { <nl> + return Impl - > LookupConformance ; <nl> + } <nl> + <nl> LazyResolver * ArchetypeBuilder : : getLazyResolver ( ) const { <nl> return Context . getLazyResolver ( ) ; <nl> } <nl> bool ArchetypeBuilder : : addSameTypeRequirementToConcrete ( <nl> / / Make sure the concrete type fulfills the requirements on the archetype . <nl> DenseMap < ProtocolDecl * , ProtocolConformanceRef > conformances ; <nl> if ( ! Concrete - > is < ArchetypeType > ( ) ) { <nl> + CanType depTy = T - > getDependentType ( { } , / * allowUnresolved = * / true ) <nl> + - > getCanonicalType ( ) ; <nl> for ( auto conforms : T - > getConformsTo ( ) ) { <nl> auto protocol = conforms . first ; <nl> - auto conformance = Mod . lookupConformance ( Concrete , protocol , <nl> - getLazyResolver ( ) ) ; <nl> + auto conformance = <nl> + getLookupConformanceFn ( ) ( depTy , Concrete , <nl> + protocol - > getDeclaredInterfaceType ( ) <nl> + - > castTo < ProtocolType > ( ) ) ; <nl> if ( ! conformance ) { <nl> Diags . diagnose ( Source . getLoc ( ) , <nl> diag : : requires_generic_param_same_type_does_not_conform , <nl> bool ArchetypeBuilder : : addAbstractTypeParamRequirements ( <nl> - > getGenericEnvironmentOfContext ( ) ; <nl> if ( isa < AssociatedTypeDecl > ( decl ) & & genericEnv ! = nullptr ) { <nl> auto * archetype = genericEnv - > mapTypeIntoContext ( <nl> - & Mod , <nl> - decl - > getDeclaredInterfaceType ( ) ) <nl> + decl - > getDeclaredInterfaceType ( ) , <nl> + getLookupConformanceFn ( ) ) <nl> - > getAs < ArchetypeType > ( ) ; <nl> <nl> if ( archetype ) { <nl> class ArchetypeBuilder : : InferRequirementsWalker : public TypeWalker { <nl> switch ( req . getKind ( ) ) { <nl> case RequirementKind : : SameType : { <nl> auto firstType = req . getFirstType ( ) . subst ( <nl> - & Builder . getModule ( ) , <nl> - substitutions ) ; <nl> + QueryTypeSubstitutionMap { substitutions } , <nl> + Builder . getLookupConformanceFn ( ) ) ; <nl> if ( ! firstType ) <nl> break ; <nl> <nl> class ArchetypeBuilder : : InferRequirementsWalker : public TypeWalker { <nl> return Action : : Continue ; <nl> <nl> auto secondType = req . getSecondType ( ) . subst ( <nl> - & Builder . getModule ( ) , <nl> - substitutions ) ; <nl> + QueryTypeSubstitutionMap { substitutions } , <nl> + Builder . getLookupConformanceFn ( ) ) ; <nl> if ( ! secondType ) <nl> break ; <nl> auto secondPA = Builder . resolveArchetype ( secondType ) ; <nl> class ArchetypeBuilder : : InferRequirementsWalker : public TypeWalker { <nl> case RequirementKind : : Superclass : <nl> case RequirementKind : : Conformance : { <nl> auto subjectType = req . getFirstType ( ) . subst ( <nl> - & Builder . getModule ( ) , <nl> - substitutions ) ; <nl> + QueryTypeSubstitutionMap { substitutions } , <nl> + Builder . getLookupConformanceFn ( ) ) ; <nl> if ( ! subjectType ) <nl> break ; <nl> <nl> GenericEnvironment * ArchetypeBuilder : : getGenericEnvironment ( <nl> visitPotentialArchetypes ( [ & ] ( PotentialArchetype * pa ) { <nl> if ( auto archetype = <nl> genericEnv - > mapTypeIntoContext ( <nl> - & getModule ( ) , <nl> pa - > getDependentType ( signature - > getGenericParams ( ) , <nl> - / * allowUnresolved = * / false ) ) <nl> + / * allowUnresolved = * / false ) , <nl> + getLookupConformanceFn ( ) ) <nl> - > getAs < ArchetypeType > ( ) ) <nl> ( void ) archetype - > getAllNestedTypes ( ) ; <nl> } ) ; <nl> GenericEnvironment * ArchetypeBuilder : : getGenericEnvironment ( <nl> <nl> auto depTy = pa - > getDependentType ( genericParams , <nl> / * allowUnresolved = * / false ) ; <nl> - auto inContext = genericEnv - > mapTypeIntoContext ( & getModule ( ) , depTy ) ; <nl> + auto inContext = genericEnv - > mapTypeIntoContext ( depTy , <nl> + getLookupConformanceFn ( ) ) ; <nl> <nl> auto repDepTy = pa - > getRepresentative ( ) - > getDependentType ( <nl> genericParams , <nl> / * allowUnresolved = * / false ) ; <nl> - auto repInContext = genericEnv - > mapTypeIntoContext ( & getModule ( ) , repDepTy ) ; <nl> + auto repInContext = <nl> + genericEnv - > mapTypeIntoContext ( repDepTy , getLookupConformanceFn ( ) ) ; <nl> assert ( ( inContext - > isEqual ( repInContext ) | | <nl> inContext - > hasError ( ) | | <nl> repInContext - > hasError ( ) ) & & <nl> mmm a / lib / AST / Builtins . cpp <nl> ppp b / lib / AST / Builtins . cpp <nl> namespace { <nl> TheGenericParamList = getGenericParams ( ctx , numGenericParams , <nl> GenericTypeParams ) ; <nl> <nl> - ArchetypeBuilder Builder ( * ctx . TheBuiltinModule ) ; <nl> + ArchetypeBuilder Builder ( ctx , <nl> + LookUpConformanceInModule ( ctx . TheBuiltinModule ) ) ; <nl> for ( auto gp : GenericTypeParams ) <nl> Builder . addGenericParameter ( gp ) ; <nl> <nl> mmm a / lib / AST / GenericEnvironment . cpp <nl> ppp b / lib / AST / GenericEnvironment . cpp <nl> Type GenericEnvironment : : QueryArchetypeToInterfaceSubstitutions : : operator ( ) ( <nl> return Type ( ) ; <nl> } <nl> <nl> - Type GenericEnvironment : : mapTypeIntoContext ( ModuleDecl * M , Type type ) const { <nl> + Type GenericEnvironment : : mapTypeIntoContext ( <nl> + Type type , <nl> + LookupConformanceFn lookupConformance ) const { <nl> Type result = type . subst ( QueryInterfaceTypeSubstitutions ( this ) , <nl> - LookUpConformanceInModule ( M ) , <nl> + lookupConformance , <nl> ( SubstFlags : : AllowLoweredTypes | <nl> SubstFlags : : UseErrorType ) ) ; <nl> assert ( ( ! result - > hasTypeParameter ( ) | | result - > hasError ( ) ) & & <nl> " not fully substituted " ) ; <nl> return result ; <nl> + <nl> + } <nl> + <nl> + Type GenericEnvironment : : mapTypeIntoContext ( ModuleDecl * M , Type type ) const { <nl> + return mapTypeIntoContext ( type , LookUpConformanceInModule ( M ) ) ; <nl> } <nl> <nl> Type GenericEnvironment : : mapTypeIntoContext ( GenericTypeParamType * type ) const { <nl> mmm a / lib / AST / Type . cpp <nl> ppp b / lib / AST / Type . cpp <nl> MakeAbstractConformanceForGenericType : : operator ( ) ( CanType dependentType , <nl> Type DependentMemberType : : substBaseType ( ModuleDecl * module , <nl> Type substBase , <nl> LazyResolver * resolver ) { <nl> + return substBaseType ( substBase , LookUpConformanceInModule ( module ) ) ; <nl> + } <nl> + <nl> + Type DependentMemberType : : substBaseType ( Type substBase , <nl> + LookupConformanceFn lookupConformance ) { <nl> if ( substBase . getPointer ( ) = = getBase ( ) . getPointer ( ) & & <nl> substBase - > hasTypeParameter ( ) ) <nl> return this ; <nl> <nl> - return getMemberForBaseType ( LookUpConformanceInModule ( module ) , <nl> - Type ( ) , substBase , <nl> - getAssocType ( ) , getName ( ) , <nl> - None ) ; <nl> + return getMemberForBaseType ( lookupConformance , Type ( ) , substBase , <nl> + getAssocType ( ) , getName ( ) , None ) ; <nl> } <nl> <nl> static Type substType ( Type derivedType , <nl> mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> DeclContext * ClangImporter : : Implementation : : importDeclContextImpl ( <nl> / / Calculate the generic environment from an imported generic param list . <nl> GenericEnvironment * ClangImporter : : Implementation : : buildGenericEnvironment ( <nl> GenericParamList * genericParams , DeclContext * dc ) { <nl> - ArchetypeBuilder builder ( * dc - > getParentModule ( ) ) ; <nl> + ArchetypeBuilder builder ( SwiftContext , <nl> + LookUpConformanceInModule ( dc - > getParentModule ( ) ) ) ; <nl> for ( auto param : * genericParams ) <nl> builder . addGenericParameter ( param ) ; <nl> for ( auto param : * genericParams ) { <nl> mmm a / lib / SILGen / SILGenPoly . cpp <nl> ppp b / lib / SILGen / SILGenPoly . cpp <nl> buildThunkSignature ( SILGenFunction & gen , <nl> return genericSig ; <nl> } <nl> <nl> - ArchetypeBuilder builder ( * mod ) ; <nl> + ArchetypeBuilder builder ( ctx , LookUpConformanceInModule ( mod ) ) ; <nl> <nl> / / Add the existing generic signature . <nl> int depth = 0 ; <nl> mmm a / lib / Sema / TypeCheckDecl . cpp <nl> ppp b / lib / Sema / TypeCheckDecl . cpp <nl> static void setBoundVarsTypeError ( Pattern * pattern , ASTContext & ctx ) { <nl> <nl> / / / Create a fresh archetype builder . <nl> ArchetypeBuilder TypeChecker : : createArchetypeBuilder ( ModuleDecl * mod ) { <nl> - return ArchetypeBuilder ( * mod ) ; <nl> + return ArchetypeBuilder ( Context , LookUpConformanceInModule ( mod ) ) ; <nl> } <nl> <nl> / / / Expose TypeChecker ' s handling of GenericParamList to SIL parsing . <nl> mmm a / lib / Sema / TypeCheckProtocol . cpp <nl> ppp b / lib / Sema / TypeCheckProtocol . cpp <nl> RequirementEnvironment : : RequirementEnvironment ( <nl> / / Construct an archetype builder by collecting the constraints from the <nl> / / requirement and the context of the conformance together , because both <nl> / / define the capabilities of the requirement . <nl> - ArchetypeBuilder builder ( * conformanceDC - > getParentModule ( ) ) ; <nl> + ArchetypeBuilder builder ( <nl> + ctx , <nl> + LookUpConformanceInModule ( conformanceDC - > getParentModule ( ) ) ) ; <nl> SmallVector < GenericTypeParamType * , 4 > allGenericParams ; <nl> <nl> / / Add the generic signature of the context of the conformance . This includes <nl> mmm a / lib / Serialization / Deserialization . cpp <nl> ppp b / lib / Serialization / Deserialization . cpp <nl> void ModuleFile : : finishNormalConformance ( NormalProtocolConformance * conformance , <nl> <nl> / / Create an archetype builder , which will help us create the <nl> / / synthetic environment . <nl> - ArchetypeBuilder builder ( * getAssociatedModule ( ) ) ; <nl> + ArchetypeBuilder builder ( <nl> + getContext ( ) , <nl> + LookUpConformanceInModule ( getAssociatedModule ( ) ) ) ; <nl> builder . addGenericSignature ( syntheticSig ) ; <nl> builder . finalize ( SourceLoc ( ) ) ; <nl> syntheticEnv = builder . getGenericEnvironment ( syntheticSig ) ; <nl>
|
[ Archetype builder ] Use a LookupConformanceFn to resolve protocol conformances .
|
apple/swift
|
ac5e74601ba9439422c617d91b4113c8a73e0fd5
|
2017-01-11T00:40:38Z
|
new file mode 100644 <nl> index 0000000000 . . a156e08e25 <nl> mmm / dev / null <nl> ppp b / code / data_structures / linked_list / linked_list / linked_list . cs <nl> <nl> + using System ; <nl> + <nl> + namespace LinkedList <nl> + { <nl> + class Node < T > <nl> + { <nl> + / / properties <nl> + private T value ; <nl> + private Node < T > nextNode ; <nl> + <nl> + <nl> + / / constructors <nl> + public Node ( T value , Node < T > nextNode ) <nl> + { <nl> + this . value = value ; <nl> + this . nextNode = nextNode ; <nl> + } <nl> + <nl> + public Node ( T value ) <nl> + { <nl> + this . value = value ; <nl> + this . nextNode = null ; <nl> + } <nl> + <nl> + public Node ( ) <nl> + { <nl> + this . value = default ( T ) ; <nl> + this . nextNode = null ; <nl> + } <nl> + <nl> + <nl> + / / getters <nl> + public T getValue ( ) <nl> + { <nl> + return this . value ; <nl> + } <nl> + <nl> + public Node < T > getNextNode ( ) <nl> + { <nl> + return nextNode ; <nl> + } <nl> + <nl> + <nl> + / / setters <nl> + public void setValue ( T value ) <nl> + { <nl> + this . value = value ; <nl> + } <nl> + <nl> + public void setNextNode ( Node < T > nextNode ) <nl> + { <nl> + this . nextNode = nextNode ; <nl> + } <nl> + <nl> + <nl> + / / sets the current object to the next node <nl> + / / in the linked list <nl> + public void setNext ( ) <nl> + { <nl> + if ( this . nextNode = = null ) <nl> + throw new Exception ( " Trying to move to null node " ) ; <nl> + else <nl> + { <nl> + this . value = nextNode . value ; <nl> + this . nextNode = nextNode . nextNode ; <nl> + } <nl> + } <nl> + <nl> + public override string ToString ( ) <nl> + { <nl> + return value . ToString ( ) ; <nl> + } <nl> + } <nl> + <nl> + class LinkedList < T > <nl> + { <nl> + / / properties <nl> + private Node < T > head = null ; <nl> + <nl> + / / constructors <nl> + public LinkedList ( ) <nl> + { <nl> + / / empty <nl> + } <nl> + <nl> + / / random access <nl> + public T getValue ( int index ) <nl> + { <nl> + Node < T > node = head ; <nl> + <nl> + for ( int i = 0 ; i < index ; i + + ) <nl> + { <nl> + node . setNext ( ) ; <nl> + } <nl> + <nl> + return node . getValue ( ) ; <nl> + } <nl> + <nl> + / / overriding the index operator <nl> + public T this [ int index ] <nl> + { <nl> + get { return getValue ( index ) ; } <nl> + } <nl> + <nl> + / / the amount of items in the list <nl> + public int count ( ) <nl> + { <nl> + int counter = 0 ; <nl> + Node < T > node = head ; <nl> + <nl> + while ( node . getNextNode ( ) ! = null ) <nl> + { <nl> + node . setNext ( ) ; <nl> + counter + + ; <nl> + } <nl> + <nl> + return counter ; <nl> + <nl> + } <nl> + <nl> + / / removes a node from the list <nl> + public void remove ( int index ) <nl> + { <nl> + Node < T > node = head ; <nl> + <nl> + for ( int i = 0 ; i < index - 1 ; i + + ) <nl> + { <nl> + node . setNext ( ) ; <nl> + } <nl> + <nl> + node . setNextNode ( node . getNextNode ( ) . getNextNode ( ) ) ; <nl> + } <nl> + <nl> + / / ToString method <nl> + public override string ToString ( ) <nl> + { <nl> + string s = " " ; <nl> + Node < T > node = head ; <nl> + <nl> + while ( node ! = null | | node . getNextNode ( ) ! = null ) <nl> + { <nl> + s + = node . getValue ( ) . ToString ( ) + ' \ n ' ; <nl> + node . setNext ( ) ; <nl> + } <nl> + <nl> + return s ; <nl> + } <nl> + <nl> + / / returns the head node <nl> + public Node < T > getHeadNode ( ) <nl> + { <nl> + return head ; <nl> + } <nl> + <nl> + <nl> + / / add a value to the end <nl> + public void append ( T value ) <nl> + { <nl> + Node < T > node = head ; <nl> + Node < T > newNode = new Node < T > ( value ) ; <nl> + <nl> + if ( head = = null ) <nl> + { <nl> + head = newNode ; <nl> + } <nl> + else <nl> + { <nl> + while ( node . getNextNode ( ) ! = null ) <nl> + node . setNext ( ) ; <nl> + node . setNextNode ( node ) ; <nl> + } <nl> + } <nl> + <nl> + / / insert a node in the middle <nl> + public void insert ( int index , T value ) <nl> + { <nl> + Node < T > node = head ; <nl> + Node < T > newNode = new Node < T > ( value ) ; <nl> + <nl> + for ( int i = 0 ; i < index - 1 ; i + + ) <nl> + { <nl> + node . setNext ( ) ; <nl> + } <nl> + <nl> + newNode . setNextNode ( node . getNextNode ( ) ) ; <nl> + node . setNextNode ( newNode ) ; <nl> + } <nl> + } <nl> + <nl> + class debug <nl> + { <nl> + public static void Main ( ) <nl> + { <nl> + LinkedList < int > list = new LinkedList < int > ( ) ; <nl> + <nl> + Console . WriteLine ( " Check 1 " ) ; <nl> + <nl> + list . append ( 2 ) ; <nl> + Console . WriteLine ( " Check 11 " ) ; <nl> + list . append ( 3 ) ; <nl> + Console . WriteLine ( " Check 12 " ) ; <nl> + list . append ( 4 ) ; <nl> + Console . WriteLine ( " Check 13 " ) ; <nl> + list . append ( 5 ) ; <nl> + <nl> + Console . WriteLine ( " Check 2 " ) ; <nl> + <nl> + Console . WriteLine ( list . ToString ( ) ) ; <nl> + <nl> + Console . WriteLine ( " Check 3 " ) ; <nl> + <nl> + list . remove ( 1 ) ; <nl> + <nl> + Console . WriteLine ( " Check 4 " ) ; <nl> + <nl> + Console . WriteLine ( list . ToString ( ) ) ; <nl> + } <nl> + } <nl> + } <nl>
|
Linked list in C #
|
OpenGenus/cosmos
|
b0a7c179cdebf7681047482d709321b8510dd4d0
|
2017-10-21T20:52:04Z
|
mmm a / xbmc / music / MusicDatabase . cpp <nl> ppp b / xbmc / music / MusicDatabase . cpp <nl> int CMusicDatabase : : GetSongsCount ( const Filter & filter ) <nl> return 0 ; <nl> } <nl> <nl> - bool CMusicDatabase : : GetAlbumPath ( int idAlbum , std : : string & basePath ) <nl> + bool CMusicDatabase : : GetAlbumPath ( int idAlbum , std : : string & basePath ) <nl> { <nl> + basePath . clear ( ) ; <nl> + std : : vector < std : : pair < std : : string , int > > paths ; <nl> + if ( ! GetAlbumPaths ( idAlbum , paths ) ) <nl> + return false ; <nl> + <nl> + for ( auto pathpair : paths ) <nl> + { <nl> + if ( basePath . empty ( ) ) <nl> + basePath = pathpair . first . c_str ( ) ; <nl> + else <nl> + URIUtils : : GetCommonPath ( basePath , pathpair . first . c_str ( ) ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + bool CMusicDatabase : : GetAlbumPaths ( int idAlbum , std : : vector < std : : pair < std : : string , int > > & paths ) <nl> + { <nl> + paths . clear ( ) ; <nl> std : : string strSQL ; <nl> try <nl> { <nl> if ( NULL = = m_pDB . get ( ) ) return false ; <nl> if ( NULL = = m_pDS2 . get ( ) ) return false ; <nl> <nl> - basePath . clear ( ) ; <nl> - <nl> / / Get the unique paths of songs on the album , providing there are no songs from <nl> / / other albums with the same path . This returns <nl> / / a ) < album > if is contains all the songs and no others , or <nl> bool CMusicDatabase : : GetAlbumPath ( int idAlbum , std : : string & basePath ) <nl> / / but does * not * return any path when albums are mixed together . That could be because of <nl> / / deliberate file organisation , or ( more likely ) because of a tagging error in album name <nl> / / or Musicbrainzalbumid . Thus it avoids finding somme generic music path . <nl> - strSQL = PrepareSQL ( " SELECT DISTINCT strPath FROM song " <nl> + strSQL = PrepareSQL ( " SELECT DISTINCT strPath , song . idPath FROM song " <nl> " JOIN path ON song . idPath = path . idPath " <nl> " WHERE song . idAlbum = % ld " <nl> " AND ( SELECT COUNT ( DISTINCT ( idAlbum ) ) FROM song AS song2 " <nl> " WHERE idPath = song . idPath ) = 1 " , idAlbum ) ; <nl> <nl> - if ( ! m_pDS2 - > query ( strSQL ) ) return false ; <nl> - int iRowsFound = m_pDS2 - > num_rows ( ) ; <nl> - <nl> - if ( iRowsFound = = 0 ) <nl> + if ( ! m_pDS2 - > query ( strSQL ) ) <nl> + return false ; <nl> + if ( m_pDS2 - > num_rows ( ) = = 0 ) <nl> { <nl> / / Album does not have a unique path , files are mixed <nl> m_pDS2 - > close ( ) ; <nl> return false ; <nl> } <nl> - else if ( iRowsFound = = 1 ) <nl> - { <nl> - / / Path contains all the songs and no others <nl> - basePath = m_pDS2 - > fv ( " strPath " ) . get_asString ( ) ; <nl> - } <nl> - else <nl> - { <nl> - / / e . g . < album > / cd1 , < album > / cd2 etc . for disc sets <nl> - / / Find the common path <nl> - while ( ! m_pDS2 - > eof ( ) ) <nl> - { <nl> - std : : string path = m_pDS2 - > fv ( " strPath " ) . get_asString ( ) ; <nl> - if ( basePath . empty ( ) ) <nl> - basePath = path ; <nl> - else <nl> - URIUtils : : GetCommonPath ( basePath , path ) ; <nl> <nl> - m_pDS2 - > next ( ) ; <nl> - } <nl> + while ( ! m_pDS2 - > eof ( ) ) <nl> + { <nl> + paths . emplace_back ( m_pDS2 - > fv ( " strPath " ) . get_asString ( ) , m_pDS2 - > fv ( " song . idPath " ) . get_asInt ( ) ) ; <nl> + m_pDS2 - > next ( ) ; <nl> } <nl> / / Cleanup recordset data <nl> - m_pDS2 - > close ( ) ; <nl> + m_pDS2 - > close ( ) ; <nl> return true ; <nl> } <nl> catch ( . . . ) <nl> { <nl> - CLog : : Log ( LOGERROR , " CMusicDatabase : : % s - failed to execute % s " , __FUNCTION__ , strSQL . c_str ( ) ) ; <nl> + CLog : : Log ( LOGERROR , " CMusicDatabase : : % s - failed to execute % s " , __FUNCTION__ , strSQL . c_str ( ) ) ; <nl> } <nl> <nl> return false ; <nl> } <nl> <nl> + int CMusicDatabase : : GetDiscnumberForPathID ( int idPath ) <nl> + { <nl> + std : : string strSQL ; <nl> + int result = - 1 ; <nl> + try <nl> + { <nl> + if ( NULL = = m_pDB . get ( ) ) return - 1 ; <nl> + if ( NULL = = m_pDS2 . get ( ) ) return - 1 ; <nl> + <nl> + strSQL = PrepareSQL ( " SELECT DISTINCT ( song . iTrack > > 16 ) AS discnum FROM song " <nl> + " WHERE idPath = % i " , idPath ) ; <nl> + <nl> + if ( ! m_pDS2 - > query ( strSQL ) ) <nl> + return - 1 ; <nl> + if ( m_pDS2 - > num_rows ( ) = = 1 ) <nl> + { / / Songs with this path have a unique disc number <nl> + result = m_pDS2 - > fv ( " discnum " ) . get_asInt ( ) ; <nl> + } <nl> + / / Cleanup recordset data <nl> + m_pDS2 - > close ( ) ; <nl> + } <nl> + catch ( . . . ) <nl> + { <nl> + CLog : : Log ( LOGERROR , " CMusicDatabase : : % s - failed to execute % s " , __FUNCTION__ , strSQL . c_str ( ) ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> / / Get old " artist path " - where artist . nfo and art was located v17 and below . <nl> / / It is the path common to all albums by an ( album ) artist , but ensure it is unique <nl> / / to that artist and not shared with other artists . Previously this caused incorrect nfo <nl> mmm a / xbmc / music / MusicDatabase . h <nl> ppp b / xbmc / music / MusicDatabase . h <nl> class CMusicDatabase : public CDatabase <nl> bool GetPaths ( std : : set < std : : string > & paths ) ; <nl> bool SetPathHash ( const std : : string & path , const std : : string & hash ) ; <nl> bool GetPathHash ( const std : : string & path , std : : string & hash ) ; <nl> + bool GetAlbumPaths ( int idAlbum , std : : vector < std : : pair < std : : string , int > > & paths ) ; <nl> bool GetAlbumPath ( int idAlbum , std : : string & basePath ) ; <nl> + int GetDiscnumberForPathID ( int idPath ) ; <nl> bool GetOldArtistPath ( int idArtist , std : : string & path ) ; <nl> bool GetArtistPath ( const CArtist & artist , std : : string & path ) ; <nl> bool GetAlbumFolder ( const CAlbum & album , const std : : string & strAlbumPath , std : : string & strFolder ) ; <nl> mmm a / xbmc / music / MusicThumbLoader . cpp <nl> ppp b / xbmc / music / MusicThumbLoader . cpp <nl> bool CMusicThumbLoader : : FillLibraryArt ( CFileItem & item ) <nl> if ( artfound ) <nl> { <nl> std : : string fanartfallback ; <nl> + bool bDiscSetThumbSet = false ; <nl> std : : map < std : : string , std : : string > artmap ; <nl> for ( auto artitem : art ) <nl> { <nl> bool CMusicThumbLoader : : FillLibraryArt ( CFileItem & item ) <nl> <nl> / / Add fallback art for " thumb " and " fanart " art types only <nl> / / Set album thumb as the fallback used when song thumb is missing <nl> - if ( tag . GetType ( ) = = MediaTypeSong & & artitem . mediaType = = MediaTypeAlbum & & artitem . artType = = " thumb " ) <nl> - item . SetArtFallback ( artitem . artType , artname ) ; <nl> + / / or use extra album thumb when part of disc set <nl> + if ( tag . GetType ( ) = = MediaTypeSong & & artitem . mediaType = = MediaTypeAlbum ) <nl> + { <nl> + if ( artitem . artType = = " thumb " & & ! bDiscSetThumbSet ) <nl> + item . SetArtFallback ( artitem . artType , artname ) ; <nl> + else if ( StringUtils : : StartsWith ( artitem . artType , " thumb " ) ) <nl> + { <nl> + int number = atoi ( artitem . artType . substr ( 5 ) . c_str ( ) ) ; <nl> + if ( number > 0 & & tag . GetDiscNumber ( ) = = number ) <nl> + { <nl> + item . SetArtFallback ( " thumb " , artname ) ; <nl> + bDiscSetThumbSet = true ; <nl> + } <nl> + } <nl> + } <nl> <nl> / / For albums and songs set fallback fanart from the artist . <nl> / / For songs prefer primary song artist over primary albumartist fanart as fallback fanart <nl> mmm a / xbmc / music / infoscanner / MusicInfoScanner . cpp <nl> ppp b / xbmc / music / infoscanner / MusicInfoScanner . cpp <nl> void CMusicInfoScanner : : Process ( ) <nl> continue ; <nl> } <nl> <nl> + / / Clear list of albums added by this scan <nl> + m_albumsAdded . clear ( ) ; <nl> bool scancomplete = DoScan ( * it ) ; <nl> if ( scancomplete ) <nl> { <nl> if ( m_albumsAdded . size ( ) > 0 ) <nl> { <nl> + / / Set local art for added album disc sets and primary album artists <nl> + RetrieveLocalArt ( ) ; <nl> + <nl> if ( m_flags & SCAN_ONLINE ) <nl> / / Download additional album and artist information for the recently added albums . <nl> - / / This also identifies any local artist thumb and fanart if it exitsts , and gives it priority , <nl> + / / This also identifies any local artist thumb and fanart if it exists , and gives it priority , <nl> / / otherwise it is set to the first available from the remote thumbs and fanart that was scraped . <nl> ScrapeInfoAddedAlbums ( ) ; <nl> } <nl> void CMusicInfoScanner : : Start ( const std : : string & strDirectory , int flags ) <nl> m_pathsToScan . clear ( ) ; <nl> m_seenPaths . clear ( ) ; <nl> m_albumsAdded . clear ( ) ; <nl> - m_artistsArt . clear ( ) ; <nl> m_flags = flags ; <nl> <nl> if ( strDirectory . empty ( ) ) <nl> int CMusicInfoScanner : : RetrieveMusicInfo ( const std : : string & strDirectory , CFileI <nl> <nl> VECALBUMS albums ; <nl> FileItemsToAlbums ( scannedItems , albums , & songsMap ) ; <nl> + <nl> + / * <nl> + Set thumb for songs and , if only one album in folder , store the thumb for <nl> + the album ( music db ) and the folder path ( in Textures db ) too . <nl> + The album and path thumb is either set to the folder art , or failing that to <nl> + the art embedded in the first music file . <nl> + Song thumb is only set when it varies , otherwise it is cleared so that it will <nl> + fallback to the album art ( that may be from the first file , or that of the <nl> + folder or set later by scraping from NFO files or remote sources ) . Clearing <nl> + saves caching repeats of the same image . <nl> + <nl> + However even if all songs are from one album this may not be the album <nl> + folder . It could be just a subfolder containing some of the songs from a disc <nl> + set e . g . CD1 , CD2 etc . , or the album could spread across many folders . In <nl> + this case the album art gets reset every time a folder with songs from just <nl> + that album is processed , and needs to be corrected later once all the parts <nl> + of the album have been scanned . <nl> + * / <nl> FindArtForAlbums ( albums , items . GetPath ( ) ) ; <nl> <nl> / * Strategy : Having scanned tags and made a list of albums , add them to the library . Only then try <nl> int CMusicInfoScanner : : RetrieveMusicInfo ( const std : : string & strDirectory , CFileI <nl> <nl> album - > strPath = strDirectory ; <nl> m_musicDatabase . AddAlbum ( * album ) ; <nl> - m_albumsAdded . emplace_back ( album - > idAlbum ) ; <nl> - <nl> - / * <nl> - Make the first attempt ( during scanning ) to get local album artist art looking for thumbs and <nl> - fanart in the folder immediately above the album folder . This is for backwards compatibility . <nl> - It can only do this if the folder being processed contains only one album , and can only do so for <nl> - the first album artist if the album is a collaboration e . g . composer , conductor , orchestra , or by <nl> - several pop artists in their own right . <nl> - It avoids repeatedly processing the same artist by maintaining a set . Adding the album may have added <nl> - new artists , or provide art for an existing ( song ) artist , but does not replace any artwork already set . <nl> - Hence once art has been found for an album artist , art is not searched for in other folders . <nl> - <nl> - It will find art for " various artists " , if artwork is located above the folder containing compilatons . <nl> - * / <nl> - if ( albums . size ( ) = = 1 & & ! album - > artistCredits . empty ( ) ) <nl> - { <nl> - if ( m_artistsArt . find ( album - > artistCredits [ 0 ] . GetArtistId ( ) ) = = m_artistsArt . end ( ) ) <nl> - { <nl> - m_artistsArt . insert ( album - > artistCredits [ 0 ] . GetArtistId ( ) ) ; / / Artist processed <nl> - std : : map < std : : string , std : : string > art ; <nl> - if ( ! m_musicDatabase . GetArtForItem ( album - > artistCredits [ 0 ] . GetArtistId ( ) , MediaTypeArtist , art ) ) <nl> - { <nl> - / / Artist does not already have art , so try to find some . <nl> - / / Do not have URL of other available art before scraping , so only ID and path needed <nl> - CArtist artist ; <nl> - artist . idArtist = album - > artistCredits [ 0 ] . GetArtistId ( ) ; <nl> - artist . strPath = URIUtils : : GetParentPath ( album - > strPath ) ; <nl> - m_musicDatabase . SetArtForItem ( album - > artistCredits [ 0 ] . GetArtistId ( ) , MediaTypeArtist , GetArtistArtwork ( artist , 1 ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> + m_albumsAdded . insert ( album - > idAlbum ) ; <nl> + <nl> numAdded + = album - > songs . size ( ) ; <nl> } <nl> return numAdded ; <nl> void MUSIC_INFO : : CMusicInfoScanner : : ScrapeInfoAddedAlbums ( ) <nl> if ( ! albumScraper | | ! artistScraper ) <nl> return ; <nl> <nl> + int i = 0 ; <nl> std : : set < int > artists ; <nl> - for ( auto i = 0u ; i < m_albumsAdded . size ( ) ; + + i ) <nl> + for ( auto albumId : m_albumsAdded ) <nl> { <nl> + i + + ; <nl> if ( m_bStop ) <nl> break ; <nl> / / Scrape album data <nl> - int albumId = m_albumsAdded [ i ] ; <nl> CAlbum album ; <nl> if ( ! m_musicDatabase . HasAlbumBeenScraped ( albumId ) ) <nl> { <nl> if ( m_handle ) <nl> { <nl> - float percentage = static_cast < float > ( i * 100 ) / static_cast < float > ( m_albumsAdded . size ( ) ) ; <nl> m_handle - > SetText ( album . GetAlbumArtistString ( ) + " - " + album . strAlbum ) ; <nl> - m_handle - > SetPercentage ( percentage ) ; <nl> + m_handle - > SetProgress ( i , m_albumsAdded . size ( ) ) ; <nl> } <nl> <nl> / / Fetch any artist mbids for album artist ( s ) and song artists when scraping those too . <nl> void MUSIC_INFO : : CMusicInfoScanner : : ScrapeInfoAddedAlbums ( ) <nl> } <nl> } <nl> } <nl> - / / Clear list of albums added to prevent them being scraped again <nl> - m_albumsAdded . clear ( ) ; <nl> } <nl> <nl> void MUSIC_INFO : : CMusicInfoScanner : : RetrieveArtistArt ( ) <nl> { <nl> bool albumartistsonly = ! CServiceBroker : : GetSettings ( ) . GetBool ( CSettings : : SETTING_MUSICLIBRARY_SHOWCOMPILATIONARTISTS ) ; <nl> + int i = 0 ; <nl> std : : set < int > artists ; <nl> - for ( auto i = 0u ; i < m_albumsAdded . size ( ) ; + + i ) <nl> + for ( auto albumId : m_albumsAdded ) <nl> { <nl> + i + + ; <nl> if ( m_bStop ) <nl> break ; <nl> - int albumId = m_albumsAdded [ i ] ; <nl> CAlbum album ; <nl> / / Fetch album artist ( s ) ids <nl> - m_musicDatabase . GetAlbum ( albumId , album , false ) ; <nl> + m_musicDatabase . GetAlbum ( albumId , album , ! albumartistsonly ) ; <nl> if ( m_handle ) <nl> { <nl> - float percentage = static_cast < float > ( i * 100 ) / static_cast < float > ( m_albumsAdded . size ( ) ) ; <nl> m_handle - > SetText ( album . GetAlbumArtistString ( ) + " - " + album . strAlbum ) ; <nl> - m_handle - > SetPercentage ( percentage ) ; <nl> + m_handle - > SetProgress ( i , m_albumsAdded . size ( ) ) ; <nl> } <nl> <nl> / / Set art for album artists that have not been processed before , avoiding repeating <nl> void MUSIC_INFO : : CMusicInfoScanner : : RetrieveArtistArt ( ) <nl> } <nl> } <nl> <nl> + / * <nl> + Set thumb for songs and the album ( if only one album in folder ) . <nl> + The album thumb is either set to the folder art , or failing that to the art <nl> + embedded in the first music file . However this does not allow for there being <nl> + other folders with more songs from the album e . g . this was a subfolder CD1 <nl> + and there is CD2 etc . yet to be processed <nl> + Song thumb is only set when it varies , otherwise it is cleared so that it will <nl> + fallback to the album art ( that may be from the first file , or that of the <nl> + folder or set later by scraping from NFO files or remote sources ) . Clearing <nl> + saves caching repeats of the same image . <nl> + * / <nl> void CMusicInfoScanner : : FindArtForAlbums ( VECALBUMS & albums , const std : : string & path ) <nl> { <nl> / * <nl> void CMusicInfoScanner : : FindArtForAlbums ( VECALBUMS & albums , const std : : string & p <nl> } <nl> } <nl> <nl> + void MUSIC_INFO : : CMusicInfoScanner : : RetrieveLocalArt ( ) <nl> + { <nl> + if ( m_handle ) <nl> + { <nl> + m_handle - > SetTitle ( g_localizeStrings . Get ( 506 ) ) ; / / " Checking media files . . . " <nl> + / / ! @ todo : title = Checking for local art <nl> + } <nl> + <nl> + std : : set < int > artistsArtDone ; / / artists processed to avoid unsuccessful repeats <nl> + int count = 0 ; <nl> + for ( auto albumId : m_albumsAdded ) <nl> + { <nl> + count + + ; <nl> + if ( m_bStop ) <nl> + break ; <nl> + CAlbum album ; <nl> + m_musicDatabase . GetAlbum ( albumId , album , false ) ; <nl> + if ( m_handle ) <nl> + { <nl> + m_handle - > SetText ( album . GetAlbumArtistString ( ) + " - " + album . strAlbum ) ; <nl> + m_handle - > SetProgress ( count , m_albumsAdded . size ( ) ) ; <nl> + } <nl> + <nl> + / * <nl> + Adjust album art for disc sets <nl> + <nl> + When songs from an album are are all under a unique common folder ( no songs <nl> + from other albums ) but spread over multiple subfolders , then adjust the <nl> + album art by looking for local art in the ( common ) album folder . <nl> + It has already been during set by FindArtForAlbums ( ) to either the art of <nl> + the last subfolder processed ( if there is any ) , or to the first song in <nl> + that subfolder with embedded art ( if there is any ) . <nl> + Not when songs from different albums are in one folder , no paths are returned . <nl> + * / <nl> + <nl> + std : : vector < std : : pair < std : : string , int > > paths ; <nl> + m_musicDatabase . GetAlbumPaths ( albumId , paths ) ; <nl> + / / Get album path , the common path when more than one <nl> + for ( auto pathpair : paths ) <nl> + { <nl> + if ( album . strPath . empty ( ) ) <nl> + album . strPath = pathpair . first . c_str ( ) ; <nl> + else <nl> + URIUtils : : GetCommonPath ( album . strPath , pathpair . first . c_str ( ) ) ; <nl> + } <nl> + if ( paths . size ( ) > 1 ) <nl> + { <nl> + / / Get art from any local files in album folder . <nl> + / / This has not been done during scan <nl> + CFileItem albumItem ( album . strPath , true ) ; <nl> + std : : string albumArt = albumItem . GetUserMusicThumb ( true ) ; <nl> + <nl> + / * <nl> + When we have a true disc set - subfolders AND songs tagged with same <nl> + unique discnumber in in each subfolder - save the disc cover art , and if <nl> + we don ' t have album folder art then use the first disc in set rather <nl> + than the last processed . <nl> + * / <nl> + CMusicThumbLoader loader ; <nl> + for ( auto pathpair : paths ) <nl> + { <nl> + int discnum = m_musicDatabase . GetDiscnumberForPathID ( pathpair . second ) ; <nl> + if ( discnum > 0 ) <nl> + { <nl> + / / Get art for path from textures db ( could be embedded or local file ) <nl> + CFileItem discItem ( pathpair . first . c_str ( ) , true ) ; <nl> + std : : string artURL = loader . GetCachedImage ( discItem , " thumb " ) ; <nl> + if ( ! artURL . empty ( ) ) <nl> + { <nl> + / / Save the disc set cover art as album " thumb < disc number > " <nl> + std : : string strArtType = StringUtils : : Format ( " thumb % i " , discnum ) ; <nl> + m_musicDatabase . SetArtForItem ( album . idAlbum , MediaTypeAlbum , strArtType , artURL ) ; <nl> + <nl> + if ( albumArt . empty ( ) & & discnum = = 1 ) <nl> + { / / Use art for first disc in set as album art <nl> + albumArt = artURL ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + / / Save Album thumb <nl> + if ( ! albumArt . empty ( ) ) <nl> + { <nl> + m_musicDatabase . SetArtForItem ( album . idAlbum , MediaTypeAlbum , " thumb " , albumArt ) ; <nl> + / / Assign art as folder thumb ( in textures db ) as well <nl> + loader . SetCachedImage ( albumItem , " thumb " , albumArt ) ; <nl> + } <nl> + } <nl> + <nl> + / * <nl> + Local album artist art <nl> + <nl> + Look in the nominated " Artist Information Folder " for thumbs and fanart . <nl> + Failing that , for backward compatibility , fallback to the folder immediately <nl> + above the album folder . <nl> + It can only fallback if the album has a unique folder , and can only do so <nl> + for the first album artist if the album is a collaboration e . g . composer , <nl> + conductor , orchestra , or by several pop artists in their own right . <nl> + Avoids repeatedly processing the same artist by maintaining a set . <nl> + <nl> + Adding the album may have added new artists , or provide art for an existing <nl> + ( song ) artist , but does not replace any artwork already set . Hence once art <nl> + has been found for an album artist , art is not searched for in other folders . <nl> + <nl> + It will find art for " various artists " , if artwork is located above the <nl> + folder containing compilatons . <nl> + * / <nl> + for ( auto artistCredit = album . artistCredits . begin ( ) ; artistCredit ! = album . artistCredits . end ( ) ; + + artistCredit ) <nl> + { <nl> + if ( m_bStop ) <nl> + break ; <nl> + int idArtist = artistCredit - > GetArtistId ( ) ; <nl> + if ( artistsArtDone . find ( idArtist ) = = artistsArtDone . end ( ) ) <nl> + { <nl> + artistsArtDone . insert ( idArtist ) ; / / Artist processed <nl> + std : : map < std : : string , std : : string > art ; <nl> + if ( ! m_musicDatabase . GetArtForItem ( idArtist , MediaTypeArtist , art ) ) <nl> + { <nl> + CArtist artist ; <nl> + / / Get artist and path for artist in the Artists Info folder <nl> + m_musicDatabase . GetArtist ( idArtist , artist ) ; <nl> + m_musicDatabase . GetArtistPath ( artist , artist . strPath ) ; <nl> + art = GetArtistArtwork ( artist , 1 ) ; <nl> + / / If no art has been found in the Artists Info folder , for primary <nl> + / / album artist look in the folder immediately above the album folder <nl> + if ( art . empty ( ) & & ! album . strPath . empty ( ) & & artistCredit = = album . artistCredits . begin ( ) ) <nl> + { <nl> + artist . strPath = URIUtils : : GetParentPath ( album . strPath ) ; <nl> + art = GetArtistArtwork ( artist , 1 ) ; <nl> + } <nl> + m_musicDatabase . SetArtForItem ( artist . idArtist , MediaTypeArtist , art ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> int CMusicInfoScanner : : GetPathHash ( const CFileItemList & items , std : : string & hash ) <nl> { <nl> / / Create a hash based on the filenames , filesize and filedate . Also count the number of files <nl> CMusicInfoScanner : : UpdateDatabaseAlbumInfo ( CAlbum & album , <nl> { <nl> bool overridetags = CServiceBroker : : GetSettings ( ) . GetBool ( CSettings : : SETTING_MUSICLIBRARY_OVERRIDETAGS ) ; <nl> album . MergeScrapedAlbum ( albumInfo . GetAlbum ( ) , overridetags ) ; <nl> - m_musicDatabase . Open ( ) ; <nl> m_musicDatabase . UpdateAlbum ( album ) ; <nl> GetAlbumArtwork ( album . idAlbum , album ) ; <nl> - m_musicDatabase . Close ( ) ; <nl> albumInfo . SetLoaded ( true ) ; <nl> } <nl> return albumDownloadStatus ; <nl> CMusicInfoScanner : : UpdateDatabaseArtistInfo ( CArtist & artist , <nl> else if ( artistDownloadStatus = = INFO_ADDED ) <nl> { <nl> artist . MergeScrapedArtist ( artistInfo . GetArtist ( ) , CServiceBroker : : GetSettings ( ) . GetBool ( CSettings : : SETTING_MUSICLIBRARY_OVERRIDETAGS ) ) ; <nl> - m_musicDatabase . Open ( ) ; <nl> m_musicDatabase . UpdateArtist ( artist ) ; <nl> - / / If artist art has not been set from < art > tag then look in path or use first available from scraped list <nl> - if ( artist . art . empty ( ) ) <nl> - { <nl> - m_musicDatabase . GetArtistPath ( artist , artist . strPath ) ; <nl> - m_musicDatabase . SetArtForItem ( artist . idArtist , MediaTypeArtist , GetArtistArtwork ( artist , 1 ) ) ; <nl> - } <nl> - m_musicDatabase . Close ( ) ; <nl> artistInfo . SetLoaded ( ) ; <nl> } <nl> + <nl> + / / When artist still has no art look in Artists Info folder ( there may be art <nl> + / / files , but no NFO ) or use first available from scraped list when it has <nl> + / / been scraped <nl> + if ( artist . art . empty ( ) ) <nl> + { <nl> + m_musicDatabase . GetArtistPath ( artist , artist . strPath ) ; <nl> + m_musicDatabase . SetArtForItem ( artist . idArtist , MediaTypeArtist , GetArtistArtwork ( artist , 1 ) ) ; <nl> + } <nl> + <nl> return artistDownloadStatus ; <nl> } <nl> <nl> mmm a / xbmc / music / infoscanner / MusicInfoScanner . h <nl> ppp b / xbmc / music / infoscanner / MusicInfoScanner . h <nl> class CMusicInfoScanner : public IRunnable , public CInfoScanner <nl> * / <nl> int RetrieveMusicInfo ( const std : : string & strDirectory , CFileItemList & items ) ; <nl> <nl> + void RetrieveLocalArt ( ) ; <nl> void ScrapeInfoAddedAlbums ( ) ; <nl> void RetrieveArtistArt ( ) ; <nl> <nl> class CMusicInfoScanner : public IRunnable , public CInfoScanner <nl> int m_scanType ; / / 0 - load from files , 1 - albums , 2 - artists <nl> CMusicDatabase m_musicDatabase ; <nl> <nl> - std : : vector < int > m_albumsAdded ; <nl> - std : : set < int > m_artistsArt ; <nl> - <nl> + std : : set < int > m_albumsAdded ; <nl> + <nl> std : : set < std : : string > m_seenPaths ; <nl> int m_flags ; <nl> CThread m_fileCountReader ; <nl>
|
Fix fetching album and album artist art from local image files when album split across multiple subfolders e . g . disc sets . Handle local disc subfolder art as extra thumbs for the album , and use as fallback song thumbs .
|
xbmc/xbmc
|
7b1a4fc62e79bc9b1ef7e3a5e6fc82ca1cd774d9
|
2018-04-15T07:21:56Z
|
mmm a / xbmc / cores / VideoPlayer / VideoRenderers / WinRenderer . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoRenderers / WinRenderer . cpp <nl> bool CWinRenderer : : Configure ( unsigned int width , unsigned int height , unsigned i <nl> ManageDisplay ( ) ; <nl> <nl> SelectRenderMethod ( ) ; <nl> + ManageTextures ( ) ; <nl> m_bConfigured = true ; <nl> <nl> return true ; <nl>
|
VideoPlayer : WinRenderer - create textures on configure
|
xbmc/xbmc
|
29e3ed89085f83ac2edb1dcba8c95c904bf2336d
|
2016-01-17T15:24:41Z
|
mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_BOOL ( optimize_ephemerons , true , <nl> DEFINE_NEG_NEG_IMPLICATION ( optimize_ephemerons , parallel_ephemeron_marking ) <nl> DEFINE_NEG_NEG_IMPLICATION ( optimize_ephemerons , parallel_ephemeron_visiting ) <nl> <nl> - DEFINE_BOOL ( young_generation_large_objects , false , <nl> - " allocates large objects by default in the young generation large " <nl> - " object space " ) <nl> - <nl> / / assembler - ia32 . cc / assembler - arm . cc / assembler - x64 . cc <nl> DEFINE_BOOL ( debug_code , DEBUG_BOOL , <nl> " generate extra code ( assertions ) for debugging " ) <nl> mmm a / src / globals . h <nl> ppp b / src / globals . h <nl> class MapSpace ; <nl> class MarkCompactCollector ; <nl> class MaybeObject ; <nl> class NewSpace ; <nl> - class NewLargeObjectSpace ; <nl> class Object ; <nl> class OldSpace ; <nl> class ParameterCount ; <nl> typedef bool ( * WeakSlotCallbackWithHeap ) ( Heap * heap , Object * * pointer ) ; <nl> enum AllocationSpace { <nl> / / TODO ( v8 : 7464 ) : Actually map this space ' s memory as read - only . <nl> RO_SPACE , / / Immortal , immovable and immutable objects , <nl> - NEW_SPACE , / / Young generation semispaces for regular objects collected with <nl> - / / Scavenger . <nl> - OLD_SPACE , / / Old generation regular object space . <nl> - CODE_SPACE , / / Old generation code object space , marked executable . <nl> - MAP_SPACE , / / Old generation map object space , non - movable . <nl> - LO_SPACE , / / Old generation large object space . <nl> - NEW_LO_SPACE , / / Young generation large object space . <nl> + NEW_SPACE , / / Semispaces collected with copying collector . <nl> + OLD_SPACE , / / May contain pointers to new space . <nl> + CODE_SPACE , / / No pointers to new space , marked executable . <nl> + MAP_SPACE , / / Only and all map objects . <nl> + LO_SPACE , / / Promoted large objects . <nl> <nl> FIRST_SPACE = RO_SPACE , <nl> - LAST_SPACE = NEW_LO_SPACE , <nl> + LAST_SPACE = LO_SPACE , <nl> FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE , <nl> LAST_GROWABLE_PAGED_SPACE = MAP_SPACE <nl> } ; <nl> mmm a / src / heap / heap - inl . h <nl> ppp b / src / heap / heap - inl . h <nl> AllocationResult Heap : : AllocateRaw ( int size_in_bytes , AllocationSpace space , <nl> } <nl> } else if ( LO_SPACE = = space ) { <nl> DCHECK ( large_object ) ; <nl> - if ( FLAG_young_generation_large_objects ) { <nl> - allocation = new_lo_space_ - > AllocateRaw ( size_in_bytes , NOT_EXECUTABLE ) ; <nl> - } else { <nl> - allocation = lo_space_ - > AllocateRaw ( size_in_bytes , NOT_EXECUTABLE ) ; <nl> - } <nl> + allocation = lo_space_ - > AllocateRaw ( size_in_bytes , NOT_EXECUTABLE ) ; <nl> } else if ( MAP_SPACE = = space ) { <nl> allocation = map_space_ - > AllocateRawUnaligned ( size_in_bytes ) ; <nl> } else if ( RO_SPACE = = space ) { <nl> mmm a / src / heap / heap . cc <nl> ppp b / src / heap / heap . cc <nl> Heap : : Heap ( ) <nl> code_space_ ( nullptr ) , <nl> map_space_ ( nullptr ) , <nl> lo_space_ ( nullptr ) , <nl> - new_lo_space_ ( nullptr ) , <nl> read_only_space_ ( nullptr ) , <nl> write_protect_code_memory_ ( false ) , <nl> code_space_memory_modification_scope_depth_ ( 0 ) , <nl> const char * Heap : : GetSpaceName ( int idx ) { <nl> return " code_space " ; <nl> case LO_SPACE : <nl> return " large_object_space " ; <nl> - case NEW_LO_SPACE : <nl> - return " new_large_object_space " ; <nl> case RO_SPACE : <nl> return " read_only_space " ; <nl> default : <nl> bool Heap : : InSpace ( HeapObject * value , AllocationSpace space ) { <nl> return map_space_ - > Contains ( value ) ; <nl> case LO_SPACE : <nl> return lo_space_ - > Contains ( value ) ; <nl> - case NEW_LO_SPACE : <nl> - return new_lo_space_ - > Contains ( value ) ; <nl> case RO_SPACE : <nl> return read_only_space_ - > Contains ( value ) ; <nl> } <nl> bool Heap : : InSpaceSlow ( Address addr , AllocationSpace space ) { <nl> return map_space_ - > ContainsSlow ( addr ) ; <nl> case LO_SPACE : <nl> return lo_space_ - > ContainsSlow ( addr ) ; <nl> - case NEW_LO_SPACE : <nl> - return new_lo_space_ - > ContainsSlow ( addr ) ; <nl> case RO_SPACE : <nl> return read_only_space_ - > ContainsSlow ( addr ) ; <nl> } <nl> UNREACHABLE ( ) ; <nl> } <nl> <nl> + <nl> bool Heap : : IsValidAllocationSpace ( AllocationSpace space ) { <nl> switch ( space ) { <nl> case NEW_SPACE : <nl> bool Heap : : IsValidAllocationSpace ( AllocationSpace space ) { <nl> case CODE_SPACE : <nl> case MAP_SPACE : <nl> case LO_SPACE : <nl> - case NEW_LO_SPACE : <nl> case RO_SPACE : <nl> return true ; <nl> default : <nl> void Heap : : SetUp ( ) { <nl> space_ [ CODE_SPACE ] = code_space_ = new CodeSpace ( this ) ; <nl> space_ [ MAP_SPACE ] = map_space_ = new MapSpace ( this ) ; <nl> space_ [ LO_SPACE ] = lo_space_ = new LargeObjectSpace ( this ) ; <nl> - space_ [ NEW_LO_SPACE ] = new_lo_space_ = new NewLargeObjectSpace ( this ) ; <nl> <nl> / / Set up the seed that is used to randomize the string hash function . <nl> DCHECK_EQ ( Smi : : kZero , hash_seed ( ) ) ; <nl> const char * AllocationSpaceName ( AllocationSpace space ) { <nl> return " MAP_SPACE " ; <nl> case LO_SPACE : <nl> return " LO_SPACE " ; <nl> - case NEW_LO_SPACE : <nl> - return " NEW_LO_SPACE " ; <nl> case RO_SPACE : <nl> return " RO_SPACE " ; <nl> default : <nl> bool Heap : : AllowedToBeMigrated ( HeapObject * obj , AllocationSpace dst ) { <nl> return dst = = CODE_SPACE & & type = = CODE_TYPE ; <nl> case MAP_SPACE : <nl> case LO_SPACE : <nl> - case NEW_LO_SPACE : <nl> case RO_SPACE : <nl> return false ; <nl> } <nl> mmm a / src / heap / heap . h <nl> ppp b / src / heap / heap . h <nl> class Heap { <nl> CodeSpace * code_space ( ) { return code_space_ ; } <nl> MapSpace * map_space ( ) { return map_space_ ; } <nl> LargeObjectSpace * lo_space ( ) { return lo_space_ ; } <nl> - NewLargeObjectSpace * new_lo_space ( ) { return new_lo_space_ ; } <nl> ReadOnlySpace * read_only_space ( ) { return read_only_space_ ; } <nl> <nl> inline PagedSpace * paged_space ( int idx ) ; <nl> class Heap { <nl> CodeSpace * code_space_ ; <nl> MapSpace * map_space_ ; <nl> LargeObjectSpace * lo_space_ ; <nl> - NewLargeObjectSpace * new_lo_space_ ; <nl> ReadOnlySpace * read_only_space_ ; <nl> / / Map from the space id to the space . <nl> Space * space_ [ LAST_SPACE + 1 ] ; <nl> mmm a / src / heap / spaces . cc <nl> ppp b / src / heap / spaces . cc <nl> HeapObject * LargeObjectIterator : : Next ( ) { <nl> / / LargeObjectSpace <nl> <nl> LargeObjectSpace : : LargeObjectSpace ( Heap * heap ) <nl> - : LargeObjectSpace ( heap , LO_SPACE ) { } <nl> - <nl> - LargeObjectSpace : : LargeObjectSpace ( Heap * heap , AllocationSpace id ) <nl> - : Space ( heap , id ) , <nl> + : Space ( heap , LO_SPACE ) , / / Managed on a per - allocation basis <nl> size_ ( 0 ) , <nl> page_count_ ( 0 ) , <nl> objects_size_ ( 0 ) , <nl> void Page : : Print ( ) { <nl> } <nl> <nl> # endif / / DEBUG <nl> - <nl> - NewLargeObjectSpace : : NewLargeObjectSpace ( Heap * heap ) <nl> - : LargeObjectSpace ( heap , NEW_LO_SPACE ) { } <nl> - <nl> - size_t NewLargeObjectSpace : : Available ( ) { <nl> - / / TODO ( hpayer ) : Update as soon as we have a growing strategy . <nl> - return 0 ; <nl> - } <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / heap / spaces . h <nl> ppp b / src / heap / spaces . h <nl> class LargeObjectSpace : public Space { <nl> typedef LargePageIterator iterator ; <nl> <nl> explicit LargeObjectSpace ( Heap * heap ) ; <nl> - LargeObjectSpace ( Heap * heap , AllocationSpace id ) ; <nl> - <nl> ~ LargeObjectSpace ( ) override { TearDown ( ) ; } <nl> <nl> / / Releases internal resources , frees objects in this space . <nl> class LargeObjectSpace : public Space { <nl> friend class LargeObjectIterator ; <nl> } ; <nl> <nl> - class NewLargeObjectSpace : public LargeObjectSpace { <nl> - public : <nl> - explicit NewLargeObjectSpace ( Heap * heap ) ; <nl> - <nl> - / / Available bytes for objects in this space . <nl> - size_t Available ( ) override ; <nl> - } ; <nl> <nl> class LargeObjectIterator : public ObjectIterator { <nl> public : <nl> mmm a / src / snapshot / serializer - common . h <nl> ppp b / src / snapshot / serializer - common . h <nl> class SerializerDeserializer : public RootVisitor { <nl> / / No reservation for large object space necessary . <nl> / / We also handle map space differenly . <nl> STATIC_ASSERT ( MAP_SPACE = = CODE_SPACE + 1 ) ; <nl> - <nl> - / / We do not support young generation large objects . <nl> - STATIC_ASSERT ( LAST_SPACE = = NEW_LO_SPACE ) ; <nl> - STATIC_ASSERT ( LAST_SPACE - 1 = = LO_SPACE ) ; <nl> static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1 ; <nl> - static const int kNumberOfSpaces = LO_SPACE + 1 ; <nl> + static const int kNumberOfSpaces = LAST_SPACE + 1 ; <nl> <nl> protected : <nl> static bool CanBeDeferred ( HeapObject * o ) ; <nl> mmm a / src / snapshot / serializer . cc <nl> ppp b / src / snapshot / serializer . cc <nl> void Serializer < AllocatorT > : : ObjectSerializer : : SerializeObject ( ) { <nl> Map * map = object_ - > map ( ) ; <nl> AllocationSpace space = <nl> MemoryChunk : : FromAddress ( object_ - > address ( ) ) - > owner ( ) - > identity ( ) ; <nl> - DCHECK ( space ! = NEW_LO_SPACE ) ; <nl> SerializePrologue ( space , size , map ) ; <nl> <nl> / / Serialize the rest of the object . <nl> mmm a / test / cctest / heap / test - heap . cc <nl> ppp b / test / cctest / heap / test - heap . cc <nl> TEST ( Regress618958 ) { <nl> ! heap - > incremental_marking ( ) - > IsStopped ( ) ) ) ; <nl> } <nl> <nl> - TEST ( YoungGenerationLargeObjectAllocation ) { <nl> - FLAG_young_generation_large_objects = true ; <nl> - CcTest : : InitializeVM ( ) ; <nl> - v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> - Heap * heap = CcTest : : heap ( ) ; <nl> - Isolate * isolate = heap - > isolate ( ) ; <nl> - <nl> - Handle < FixedArray > array = isolate - > factory ( ) - > NewFixedArray ( 200000 ) ; <nl> - MemoryChunk * chunk = MemoryChunk : : FromAddress ( array - > address ( ) ) ; <nl> - CHECK ( chunk - > owner ( ) - > identity ( ) = = NEW_LO_SPACE ) ; <nl> - } <nl> - <nl> TEST ( UncommitUnusedLargeObjectMemory ) { <nl> CcTest : : InitializeVM ( ) ; <nl> v8 : : HandleScope scope ( CcTest : : isolate ( ) ) ; <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> TEST ( GetHeapSpaceStatistics ) { <nl> v8 : : HeapSpaceStatistics space_statistics ; <nl> isolate - > GetHeapSpaceStatistics ( & space_statistics , i ) ; <nl> CHECK_NOT_NULL ( space_statistics . space_name ( ) ) ; <nl> - if ( strcmp ( space_statistics . space_name ( ) , " new_large_object_space " ) = = 0 ) { <nl> - continue ; <nl> - } <nl> CHECK_GT ( space_statistics . space_size ( ) , 0u ) ; <nl> total_size + = space_statistics . space_size ( ) ; <nl> CHECK_GT ( space_statistics . space_used_size ( ) , 0u ) ; <nl>
|
Revert " [ heap ] Adds a young generation large object space "
|
v8/v8
|
e5416386e615cca88b1f70aa7d1b6e7161ed1ba6
|
2018-06-28T06:18:35Z
|
mmm a / Marlin / Configuration_adv . h <nl> ppp b / Marlin / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / Marlin / src / gcode / motion / G2_G3 . cpp <nl> ppp b / Marlin / src / gcode / motion / G2_G3 . cpp <nl> void plan_arc ( <nl> mm_of_travel = linear_travel ? HYPOT ( flat_mm , linear_travel ) : ABS ( flat_mm ) ; <nl> if ( mm_of_travel < 0 . 001f ) return ; <nl> <nl> - uint16_t segments = FLOOR ( mm_of_travel / ( MM_PER_ARC_SEGMENT ) ) ; <nl> + const feedRate_t scaled_fr_mm_s = MMS_SCALED ( feedrate_mm_s ) ; <nl> + <nl> + # ifdef ARC_SEGMENTS_PER_SEC <nl> + float seg_length = scaled_fr_mm_s * _RECIP ( ARC_SEGMENTS_PER_SEC ) ; <nl> + NOLESS ( seg_length , MM_PER_ARC_SEGMENT ) ; <nl> + # else <nl> + constexpr float seg_length = MM_PER_ARC_SEGMENT ; <nl> + # endif <nl> + uint16_t segments = FLOOR ( mm_of_travel / seg_length ) ; <nl> NOLESS ( segments , min_segments ) ; <nl> <nl> / * * <nl> void plan_arc ( <nl> / / Initialize the extruder axis <nl> raw . e = current_position . e ; <nl> <nl> - const feedRate_t scaled_fr_mm_s = MMS_SCALED ( feedrate_mm_s ) ; <nl> <nl> # if ENABLED ( SCARA_FEEDRATE_SCALING ) <nl> - const float inv_duration = scaled_fr_mm_s / MM_PER_ARC_SEGMENT ; <nl> + const float inv_duration = scaled_fr_mm_s / seg_length ; <nl> # endif <nl> <nl> millis_t next_idle_ms = millis ( ) + 200UL ; <nl> void plan_arc ( <nl> planner . apply_leveling ( raw ) ; <nl> # endif <nl> <nl> - if ( ! planner . buffer_line ( raw , scaled_fr_mm_s , active_extruder , MM_PER_ARC_SEGMENT <nl> + if ( ! planner . buffer_line ( raw , scaled_fr_mm_s , active_extruder , seg_length <nl> # if ENABLED ( SCARA_FEEDRATE_SCALING ) <nl> , inv_duration <nl> # endif <nl> void plan_arc ( <nl> planner . apply_leveling ( raw ) ; <nl> # endif <nl> <nl> - planner . buffer_line ( raw , scaled_fr_mm_s , active_extruder , MM_PER_ARC_SEGMENT <nl> + planner . buffer_line ( raw , scaled_fr_mm_s , active_extruder , seg_length <nl> # if ENABLED ( SCARA_FEEDRATE_SCALING ) <nl> , inv_duration <nl> # endif <nl> mmm a / config / default / Configuration_adv . h <nl> ppp b / config / default / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / 3DFabXYZ / Migbot / Configuration_adv . h <nl> ppp b / config / examples / 3DFabXYZ / Migbot / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / ADIMLab / Gantry v1 / Configuration_adv . h b / config / examples / ADIMLab / Gantry v1 / Configuration_adv . h <nl> mmm a / config / examples / ADIMLab / Gantry v1 / Configuration_adv . h <nl> ppp b / config / examples / ADIMLab / Gantry v1 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / ADIMLab / Gantry v2 / Configuration_adv . h b / config / examples / ADIMLab / Gantry v2 / Configuration_adv . h <nl> mmm a / config / examples / ADIMLab / Gantry v2 / Configuration_adv . h <nl> ppp b / config / examples / ADIMLab / Gantry v2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / AlephObjects / TAZ4 / Configuration_adv . h <nl> ppp b / config / examples / AlephObjects / TAZ4 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Alfawise / U20 - bltouch / Configuration_adv . h <nl> ppp b / config / examples / Alfawise / U20 - bltouch / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Alfawise / U20 / Configuration_adv . h <nl> ppp b / config / examples / Alfawise / U20 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / AliExpress / UM2pExt / Configuration_adv . h <nl> ppp b / config / examples / AliExpress / UM2pExt / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / A2 / Configuration_adv . h <nl> ppp b / config / examples / Anet / A2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / A2plus / Configuration_adv . h <nl> ppp b / config / examples / Anet / A2plus / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / A6 / Configuration_adv . h <nl> ppp b / config / examples / Anet / A6 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / A8 / Configuration_adv . h <nl> ppp b / config / examples / Anet / A8 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / A8plus / Configuration_adv . h <nl> ppp b / config / examples / Anet / A8plus / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / E10 / Configuration_adv . h <nl> ppp b / config / examples / Anet / E10 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Anet / E16 / Configuration_adv . h <nl> ppp b / config / examples / Anet / E16 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / AnyCubic / i3 / Configuration_adv . h <nl> ppp b / config / examples / AnyCubic / i3 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / ArmEd / Configuration_adv . h <nl> ppp b / config / examples / ArmEd / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Artillery / Genius / Configuration_adv . h <nl> ppp b / config / examples / Artillery / Genius / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Artillery / Sidewinder X1 / Configuration_adv . h b / config / examples / Artillery / Sidewinder X1 / Configuration_adv . h <nl> mmm a / config / examples / Artillery / Sidewinder X1 / Configuration_adv . h <nl> ppp b / config / examples / Artillery / Sidewinder X1 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / BIBO / TouchX / cyclops / Configuration_adv . h <nl> ppp b / config / examples / BIBO / TouchX / cyclops / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / BIBO / TouchX / default / Configuration_adv . h <nl> ppp b / config / examples / BIBO / TouchX / default / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / BQ / Hephestos / Configuration_adv . h <nl> ppp b / config / examples / BQ / Hephestos / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / BQ / Hephestos_2 / Configuration_adv . h <nl> ppp b / config / examples / BQ / Hephestos_2 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / BQ / WITBOX / Configuration_adv . h <nl> ppp b / config / examples / BQ / WITBOX / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / BigTreeTech / SKR Mini E3 1 . 0 / Configuration_adv . h b / config / examples / BigTreeTech / SKR Mini E3 1 . 0 / Configuration_adv . h <nl> mmm a / config / examples / BigTreeTech / SKR Mini E3 1 . 0 / Configuration_adv . h <nl> ppp b / config / examples / BigTreeTech / SKR Mini E3 1 . 0 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / BigTreeTech / SKR Mini E3 1 . 2 / Configuration_adv . h b / config / examples / BigTreeTech / SKR Mini E3 1 . 2 / Configuration_adv . h <nl> mmm a / config / examples / BigTreeTech / SKR Mini E3 1 . 2 / Configuration_adv . h <nl> ppp b / config / examples / BigTreeTech / SKR Mini E3 1 . 2 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Cartesio / Configuration_adv . h <nl> ppp b / config / examples / Cartesio / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / CR - 10 / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 10 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / CR - 10S / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 10S / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / CR - 10_5S / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 10_5S / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / CR - 10mini / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 10mini / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Creality / CR - 20 Pro / Configuration_adv . h b / config / examples / Creality / CR - 20 Pro / Configuration_adv . h <nl> mmm a / config / examples / Creality / CR - 20 Pro / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 20 Pro / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / CR - 20 / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 20 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / CR - 8 / Configuration_adv . h <nl> ppp b / config / examples / Creality / CR - 8 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / Ender - 2 / Configuration_adv . h <nl> ppp b / config / examples / Creality / Ender - 2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / Ender - 3 / Configuration_adv . h <nl> ppp b / config / examples / Creality / Ender - 3 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / Ender - 4 / Configuration_adv . h <nl> ppp b / config / examples / Creality / Ender - 4 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Creality / Ender - 5 Pro / Configuration_adv . h b / config / examples / Creality / Ender - 5 Pro / Configuration_adv . h <nl> mmm a / config / examples / Creality / Ender - 5 Pro / Configuration_adv . h <nl> ppp b / config / examples / Creality / Ender - 5 Pro / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Creality / Ender - 5 / Configuration_adv . h <nl> ppp b / config / examples / Creality / Ender - 5 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Dagoma / Disco Ultimate / Configuration_adv . h b / config / examples / Dagoma / Disco Ultimate / Configuration_adv . h <nl> mmm a / config / examples / Dagoma / Disco Ultimate / Configuration_adv . h <nl> ppp b / config / examples / Dagoma / Disco Ultimate / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / EXP3D / Imprimante multifonction / Configuration_adv . h b / config / examples / EXP3D / Imprimante multifonction / Configuration_adv . h <nl> mmm a / config / examples / EXP3D / Imprimante multifonction / Configuration_adv . h <nl> ppp b / config / examples / EXP3D / Imprimante multifonction / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> - # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> - # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> + # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> <nl> / / Support for G5 with XYZE destination and IJPQ offsets . Requires ~ 2666 bytes . <nl> mmm a / config / examples / Einstart - S / Configuration_adv . h <nl> ppp b / config / examples / Einstart - S / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FYSETC / AIO_II / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / AIO_II / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / FYSETC / Cheetah 1 . 2 / BLTouch / Configuration_adv . h b / config / examples / FYSETC / Cheetah 1 . 2 / BLTouch / Configuration_adv . h <nl> mmm a / config / examples / FYSETC / Cheetah 1 . 2 / BLTouch / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / Cheetah 1 . 2 / BLTouch / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / FYSETC / Cheetah 1 . 2 / base / Configuration_adv . h b / config / examples / FYSETC / Cheetah 1 . 2 / base / Configuration_adv . h <nl> mmm a / config / examples / FYSETC / Cheetah 1 . 2 / base / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / Cheetah 1 . 2 / base / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FYSETC / Cheetah / BLTouch / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / Cheetah / BLTouch / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FYSETC / Cheetah / base / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / Cheetah / base / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FYSETC / F6_13 / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / F6_13 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FYSETC / S6 / Configuration_adv . h <nl> ppp b / config / examples / FYSETC / S6 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Felix / DUAL / Configuration_adv . h <nl> ppp b / config / examples / Felix / DUAL / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Felix / Single / Configuration_adv . h <nl> ppp b / config / examples / Felix / Single / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FlashForge / CreatorPro / Configuration_adv . h <nl> ppp b / config / examples / FlashForge / CreatorPro / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / FolgerTech / i3 - 2020 / Configuration_adv . h <nl> ppp b / config / examples / FolgerTech / i3 - 2020 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Formbot / Raptor / Configuration_adv . h <nl> ppp b / config / examples / Formbot / Raptor / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Formbot / T_Rex_2 + / Configuration_adv . h <nl> ppp b / config / examples / Formbot / T_Rex_2 + / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Formbot / T_Rex_3 / Configuration_adv . h <nl> ppp b / config / examples / Formbot / T_Rex_3 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A10 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A10 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A10D / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A10D / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A10M / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A10M / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A10T / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A10T / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A20 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A20 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A20M / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A20M / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A20T / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A20T / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Geeetech / A30 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / A30 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> - # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> - # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> + # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> <nl> / / Support for G5 with XYZE destination and IJPQ offsets . Requires ~ 2666 bytes . - unused by most slicers <nl> mmm a / config / examples / Geeetech / D200 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / D200 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> - # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> - # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> + # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> <nl> / / Support for G5 with XYZE destination and IJPQ offsets . Requires ~ 2666 bytes . - unused by most slicers <nl> mmm a / config / examples / Geeetech / E180 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / E180 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> - # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> - # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> + # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> <nl> / / Support for G5 with XYZE destination and IJPQ offsets . Requires ~ 2666 bytes . - unused by most slicers <nl> mmm a / config / examples / Geeetech / M201 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / M201 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> - # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> - # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> + # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> <nl> / / Support for G5 with XYZE destination and IJPQ offsets . Requires ~ 2666 bytes . - unused by most slicers <nl> mmm a / config / examples / Geeetech / MeCreator2 / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / MeCreator2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Geeetech / PI3A PRO / Configuration_adv . h b / config / examples / Geeetech / PI3A PRO / Configuration_adv . h <nl> mmm a / config / examples / Geeetech / PI3A PRO / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / PI3A PRO / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h b / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h <nl> mmm a / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / Prusa i3 Pro C / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h b / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h <nl> mmm a / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h <nl> ppp b / config / examples / Geeetech / Prusa i3 Pro W / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / HMS434 / Configuration_adv . h <nl> ppp b / config / examples / HMS434 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Infitary / i3 - M508 / Configuration_adv . h <nl> ppp b / config / examples / Infitary / i3 - M508 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / JGAurora / A1 / Configuration_adv . h <nl> ppp b / config / examples / JGAurora / A1 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / JGAurora / A5 / Configuration_adv . h <nl> ppp b / config / examples / JGAurora / A5 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / JGAurora / A5S / Configuration_adv . h <nl> ppp b / config / examples / JGAurora / A5S / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / JGAurora / Magic / Configuration_adv . h <nl> ppp b / config / examples / JGAurora / Magic / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / MakerFarm / Pegasus_12 / Configuration_adv . h <nl> ppp b / config / examples / MakerFarm / Pegasus_12 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / MakerParts / Configuration_adv . h <nl> ppp b / config / examples / MakerParts / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Malyan / M150 / Configuration_adv . h <nl> ppp b / config / examples / Malyan / M150 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Malyan / M200 / Configuration_adv . h <nl> ppp b / config / examples / Malyan / M200 / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Micromake / C1 / enhanced / Configuration_adv . h <nl> ppp b / config / examples / Micromake / C1 / enhanced / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Mks / Robin / Configuration_adv . h <nl> ppp b / config / examples / Mks / Robin / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Mks / Sbase / Configuration_adv . h <nl> ppp b / config / examples / Mks / Sbase / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Modix / Big60 / Configuration_adv . h <nl> ppp b / config / examples / Modix / Big60 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / RapideLite / RL200 / Configuration_adv . h <nl> ppp b / config / examples / RapideLite / RL200 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Renkforce / RF100 / Configuration_adv . h <nl> ppp b / config / examples / Renkforce / RF100 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Renkforce / RF100XL / Configuration_adv . h <nl> ppp b / config / examples / Renkforce / RF100XL / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Renkforce / RF100v2 / Configuration_adv . h <nl> ppp b / config / examples / Renkforce / RF100v2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / RigidBot / Configuration_adv . h <nl> ppp b / config / examples / RigidBot / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / SCARA / MP_SCARA / Configuration_adv . h <nl> ppp b / config / examples / SCARA / MP_SCARA / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / SCARA / Morgan / Configuration_adv . h <nl> ppp b / config / examples / SCARA / Morgan / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / STM32 / Black_STM32F407VET6 / Configuration_adv . h <nl> ppp b / config / examples / STM32 / Black_STM32F407VET6 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Sanguinololu / Configuration_adv . h <nl> ppp b / config / examples / Sanguinololu / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Tevo / Michelangelo / Configuration_adv . h <nl> ppp b / config / examples / Tevo / Michelangelo / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Tevo / Nereus / Configuration_adv . h <nl> ppp b / config / examples / Tevo / Nereus / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Tevo / Tarantula Pro / Configuration_adv . h b / config / examples / Tevo / Tarantula Pro / Configuration_adv . h <nl> mmm a / config / examples / Tevo / Tarantula Pro / Configuration_adv . h <nl> ppp b / config / examples / Tevo / Tarantula Pro / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Tevo / Tornado / V1 ( MKS Base ) / Configuration_adv . h b / config / examples / Tevo / Tornado / V1 ( MKS Base ) / Configuration_adv . h <nl> mmm a / config / examples / Tevo / Tornado / V1 ( MKS Base ) / Configuration_adv . h <nl> ppp b / config / examples / Tevo / Tornado / V1 ( MKS Base ) / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Tevo / Tornado / V2 ( MKS GEN - L ) / Configuration_adv . h b / config / examples / Tevo / Tornado / V2 ( MKS GEN - L ) / Configuration_adv . h <nl> mmm a / config / examples / Tevo / Tornado / V2 ( MKS GEN - L ) / Configuration_adv . h <nl> ppp b / config / examples / Tevo / Tornado / V2 ( MKS GEN - L ) / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / TheBorg / Configuration_adv . h <nl> ppp b / config / examples / TheBorg / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / TinyBoy2 / Configuration_adv . h <nl> ppp b / config / examples / TinyBoy2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Tronxy / X3A / Configuration_adv . h <nl> ppp b / config / examples / Tronxy / X3A / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Tronxy / X5S - 2E / Configuration_adv . h <nl> ppp b / config / examples / Tronxy / X5S - 2E / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / UltiMachine / Archim1 / Configuration_adv . h <nl> ppp b / config / examples / UltiMachine / Archim1 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / UltiMachine / Archim2 / Configuration_adv . h <nl> ppp b / config / examples / UltiMachine / Archim2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / VORONDesign / Configuration_adv . h <nl> ppp b / config / examples / VORONDesign / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Velleman / K8200 / Configuration_adv . h <nl> ppp b / config / examples / Velleman / K8200 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Velleman / K8400 / Dual - head / Configuration_adv . h <nl> ppp b / config / examples / Velleman / K8400 / Dual - head / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / Velleman / K8400 / Single - head / Configuration_adv . h <nl> ppp b / config / examples / Velleman / K8400 / Single - head / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / WASP / PowerWASP / Configuration_adv . h <nl> ppp b / config / examples / WASP / PowerWASP / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h b / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h <nl> mmm a / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h <nl> ppp b / config / examples / Wanhao / Duplicator 6 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Wanhao / Duplicator i3 2 . 1 / Configuration_adv . h b / config / examples / Wanhao / Duplicator i3 2 . 1 / Configuration_adv . h <nl> mmm a / config / examples / Wanhao / Duplicator i3 2 . 1 / Configuration_adv . h <nl> ppp b / config / examples / Wanhao / Duplicator i3 2 . 1 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / Wanhao / Duplicator i3 Mini / Configuration_adv . h b / config / examples / Wanhao / Duplicator i3 Mini / Configuration_adv . h <nl> mmm a / config / examples / Wanhao / Duplicator i3 Mini / Configuration_adv . h <nl> ppp b / config / examples / Wanhao / Duplicator i3 Mini / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / Anycubic / Kossel / Configuration_adv . h <nl> ppp b / config / examples / delta / Anycubic / Kossel / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / Dreammaker / Overlord / Configuration_adv . h <nl> ppp b / config / examples / delta / Dreammaker / Overlord / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / Dreammaker / Overlord_Pro / Configuration_adv . h <nl> ppp b / config / examples / delta / Dreammaker / Overlord_Pro / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / FLSUN / QQ - S / Configuration_adv . h <nl> ppp b / config / examples / delta / FLSUN / QQ - S / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / FLSUN / auto_calibrate / Configuration_adv . h <nl> ppp b / config / examples / delta / FLSUN / auto_calibrate / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / FLSUN / kossel / Configuration_adv . h <nl> ppp b / config / examples / delta / FLSUN / kossel / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / FLSUN / kossel_mini / Configuration_adv . h <nl> ppp b / config / examples / delta / FLSUN / kossel_mini / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / delta / Geeetech / Rostock 301 / Configuration_adv . h b / config / examples / delta / Geeetech / Rostock 301 / Configuration_adv . h <nl> mmm a / config / examples / delta / Geeetech / Rostock 301 / Configuration_adv . h <nl> ppp b / config / examples / delta / Geeetech / Rostock 301 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / MKS / SBASE / Configuration_adv . h <nl> ppp b / config / examples / delta / MKS / SBASE / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> diff - - git a / config / examples / delta / Tevo Little Monster / Configuration_adv . h b / config / examples / delta / Tevo Little Monster / Configuration_adv . h <nl> mmm a / config / examples / delta / Tevo Little Monster / Configuration_adv . h <nl> ppp b / config / examples / delta / Tevo Little Monster / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / generic / Configuration_adv . h <nl> ppp b / config / examples / delta / generic / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / kossel_clear / Configuration_adv . h <nl> ppp b / config / examples / delta / kossel_clear / Configuration_adv . h <nl> <nl> / / <nl> / / # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / kossel_mini / Configuration_adv . h <nl> ppp b / config / examples / delta / kossel_mini / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / delta / kossel_xl / Configuration_adv . h <nl> ppp b / config / examples / delta / kossel_xl / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / gCreate / gMax1 . 5 + / Configuration_adv . h <nl> ppp b / config / examples / gCreate / gMax1 . 5 + / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / makibox / Configuration_adv . h <nl> ppp b / config / examples / makibox / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / tvrrug / Round2 / Configuration_adv . h <nl> ppp b / config / examples / tvrrug / Round2 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl> mmm a / config / examples / wt150 / Configuration_adv . h <nl> ppp b / config / examples / wt150 / Configuration_adv . h <nl> <nl> / / <nl> # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> # if ENABLED ( ARC_SUPPORT ) <nl> - # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> - # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> - # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> + # define MM_PER_ARC_SEGMENT 1 / / ( mm ) Length ( or minimum length ) of each arc segment <nl> + # define MIN_ARC_SEGMENTS 24 / / Minimum number of segments in a complete circle <nl> + / / # define ARC_SEGMENTS_PER_SEC 50 / / Use feedrate to choose segment length ( with MM_PER_ARC_SEGMENT as the minimum ) <nl> + # define N_ARC_CORRECTION 25 / / Number of interpolated segments between corrections <nl> / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> # endif <nl>
|
Add ARC_SEGMENTS_PER_SEC for finer G2 / G3 arcs ( )
|
MarlinFirmware/Marlin
|
36d08f8ad354005dcb45e2907fec5ece45f309a7
|
2020-01-09T10:48:00Z
|
mmm a / contracts / eosiolib / db . h <nl> ppp b / contracts / eosiolib / db . h <nl> int32_t db_get_i64 ( int32_t iterator , const void * data , uint32_t len ) ; <nl> int32_t db_next_i64 ( int32_t iterator , uint64_t * primary ) ; <nl> int32_t db_previous_i64 ( int32_t iterator , uint64_t * primary ) ; <nl> int32_t db_find_i64 ( account_name code , account_name scope , table_name table , uint64_t id ) ; <nl> - int32_t db_lowerbound_i64 ( account_name code , account_name scope , table_name table , uint64_t id ) ; <nl> - int32_t db_upperbound_i64 ( account_name code , account_name scope , table_name table , uint64_t id ) ; <nl> + int32_t db_lowerbound_i64 ( account_name code , account_name scope , table_name table , uint64_t id ) ; <nl> + int32_t db_upperbound_i64 ( account_name code , account_name scope , table_name table , uint64_t id ) ; <nl> + int32_t db_end_i64 ( account_name code , account_name scope , table_name table ) ; <nl> <nl> int32_t db_idx64_store ( account_name scope , table_name table , account_name payer , uint64_t id , const uint64_t * secondary ) ; <nl> void db_idx64_update ( int32_t iterator , account_name payer , const uint64_t * secondary ) ; <nl> int32_t db_idx64_find_primary ( account_name code , account_name scope , table_name <nl> int32_t db_idx64_find_secondary ( account_name code , account_name scope , table_name table , const uint64_t * secondary , uint64_t * primary ) ; <nl> int32_t db_idx64_lowerbound ( account_name code , account_name scope , table_name table , uint64_t * secondary , uint64_t * primary ) ; <nl> int32_t db_idx64_upperbound ( account_name code , account_name scope , table_name table , uint64_t * secondary , uint64_t * primary ) ; <nl> + int32_t db_idx64_end ( account_name code , account_name scope , table_name table ) ; <nl> <nl> int32_t db_idx128_store ( account_name scope , table_name table , account_name payer , uint64_t id , const uint128_t * secondary ) ; <nl> void db_idx128_update ( int32_t iterator , account_name payer , const uint128_t * secondary ) ; <nl> int32_t db_idx128_find_primary ( account_name code , account_name scope , table_name <nl> int32_t db_idx128_find_secondary ( account_name code , account_name scope , table_name table , const uint128_t * secondary , uint64_t * primary ) ; <nl> int32_t db_idx128_lowerbound ( account_name code , account_name scope , table_name table , uint128_t * secondary , uint64_t * primary ) ; <nl> int32_t db_idx128_upperbound ( account_name code , account_name scope , table_name table , uint128_t * secondary , uint64_t * primary ) ; <nl> + int32_t db_idx128_end ( account_name code , account_name scope , table_name table ) ; <nl> <nl> int32_t db_idx256_store ( account_name scope , table_name table , account_name payer , uint64_t id , const uint256 * secondary ) ; <nl> void db_idx256_update ( int32_t iterator , account_name payer , const uint256 * secondary ) ; <nl> int32_t db_idx256_find_primary ( account_name code , account_name scope , table_name <nl> int32_t db_idx256_find_secondary ( account_name code , account_name scope , table_name table , const uint256 * secondary , uint64_t * primary ) ; <nl> int32_t db_idx256_lowerbound ( account_name code , account_name scope , table_name table , uint256 * secondary , uint64_t * primary ) ; <nl> int32_t db_idx256_upperbound ( account_name code , account_name scope , table_name table , uint256 * secondary , uint64_t * primary ) ; <nl> + int32_t db_idx256_end ( account_name code , account_name scope , table_name table ) ; <nl> <nl> } <nl> mmm a / contracts / eosiolib / multi_index . hpp <nl> ppp b / contracts / eosiolib / multi_index . hpp <nl> struct secondary_iterator ; <nl> template < > \ <nl> struct secondary_iterator < TYPE > { \ <nl> static int db_idx_next ( int iterator , uint64_t * primary ) { return db_ # # IDX # # _next ( iterator , primary ) ; } \ <nl> - static int db_idx_prev ( int iterator , uint64_t * primary ) { return db_ # # IDX # # _previous ( iterator , primary ) ; } \ <nl> + static int db_idx_previous ( int iterator , uint64_t * primary ) { return db_ # # IDX # # _previous ( iterator , primary ) ; } \ <nl> static void db_idx_remove ( int iterator ) { db_ # # IDX # # _remove ( iterator ) ; } \ <nl> + static int db_idx_end ( uint64_t code , uint64_t scope , uint64_t table ) { return db_ # # IDX # # _end ( code , scope , table ) ; } \ <nl> } ; \ <nl> int db_idx_store ( uint64_t scope , uint64_t table , uint64_t payer , uint64_t id , const TYPE & secondary ) { \ <nl> return db_ # # IDX # # _store ( scope , table , payer , id , & secondary ) ; \ <nl> struct index_by { <nl> int lower_bound ( uint64_t code , uint64_t scope , secondary_type & secondary , uint64_t & primary ) const { <nl> return db_idx_lowerbound ( code , scope , TableName , secondary , primary ) ; <nl> } <nl> + <nl> int upper_bound ( uint64_t code , uint64_t scope , secondary_type & secondary , uint64_t & primary ) const { <nl> return db_idx_upperbound ( code , scope , TableName , secondary , primary ) ; <nl> } <nl> class multi_index <nl> public : <nl> <nl> struct const_iterator { <nl> - private : <nl> - <nl> public : <nl> friend bool operator = = ( const const_iterator & a , const const_iterator & b ) { <nl> return a . _item = = b . _item ; <nl> class multi_index <nl> } <nl> <nl> const_iterator & operator + + ( ) { <nl> + if ( ! _item ) return * this ; <nl> + <nl> uint64_t next_pk = 0 ; <nl> auto next_itr = secondary_iterator < secondary_key_type > : : db_idx_next ( _item - > __iters [ IndexType : : index_number ] , & next_pk ) ; <nl> - if ( next_itr = = - 1 ) { <nl> + if ( next_itr < 0 ) { <nl> _item = nullptr ; <nl> return * this ; <nl> } <nl> class multi_index <nl> <nl> const_iterator & operator - - ( ) { <nl> uint64_t prev_pk = 0 ; <nl> - auto prev_itr = secondary_iterator < secondary_key_type > : : db_idx_prev ( _item - > __iters [ IndexType : : index_number ] , & prev_pk ) ; <nl> - if ( prev_itr = = - 1 ) { <nl> + int prev_itr = - 1 ; <nl> + <nl> + if ( ! _item ) { <nl> + auto ei = secondary_iterator < secondary_key_type > : : db_idx_end ( _idx . _multidx . _code , _idx . _multidx . _scope , TableName ) ; <nl> + prev_itr = secondary_iterator < secondary_key_type > : : db_idx_previous ( ei , & prev_pk ) ; <nl> + } <nl> + else <nl> + prev_itr = secondary_iterator < secondary_key_type > : : db_idx_previous ( _item - > __iters [ IndexType : : index_number ] , & prev_pk ) ; <nl> + <nl> + if ( prev_itr < 0 ) { <nl> _item = nullptr ; <nl> return * this ; <nl> } <nl> class multi_index <nl> uint64_t primary = 0 ; <nl> typename IndexType : : secondary_type secondary_copy ( secondary ) ; <nl> auto itr = _idx . lower_bound ( _multidx . _code , _multidx . _scope , secondary_copy , primary ) ; <nl> - if ( itr = = - 1 ) return end ( ) ; <nl> + if ( itr < 0 ) return end ( ) ; <nl> <nl> const T & obj = * _multidx . find ( primary ) ; <nl> auto & mi = const_cast < item_type & > ( static_cast < const item_type & > ( obj ) ) ; <nl> class multi_index <nl> uint64_t primary = 0 ; <nl> typename IndexType : : secondary_type secondary_copy ( secondary ) ; <nl> auto itr = _idx . upper_bound ( _multidx . _code , _multidx . _scope , secondary_copy , primary ) ; <nl> - if ( itr = = - 1 ) return end ( ) ; <nl> + if ( itr < 0 ) return end ( ) ; <nl> <nl> const T & obj = * _multidx . find ( primary ) ; <nl> auto & mi = const_cast < item_type & > ( static_cast < const item_type & > ( obj ) ) ; <nl> class multi_index <nl> } <nl> <nl> private : <nl> + friend class const_iterator ; <nl> friend class multi_index ; <nl> index ( const MultiIndexType & midx , const IndexType & idx ) <nl> : _multidx ( midx ) , _idx ( idx ) { } <nl> class multi_index <nl> <nl> const_iterator & operator + + ( ) { <nl> / / eosio_assert ( _item , " null ptr " ) ; <nl> - uint64_t pk ; <nl> - auto next_itr = db_next_i64 ( _item - > __primary_itr , & pk ) ; <nl> - if ( next_itr = = - 1 ) { <nl> + if ( ! _item ) return * this ; <nl> + <nl> + uint64_t next_pk ; <nl> + auto next_itr = db_next_i64 ( _item - > __primary_itr , & next_pk ) ; <nl> + if ( next_itr < 0 ) { <nl> _item = nullptr ; <nl> return * this ; <nl> } <nl> class multi_index <nl> return * this ; <nl> } <nl> const_iterator & operator - - ( ) { <nl> + uint64_t prev_pk ; <nl> + int prev_itr = - 1 ; <nl> + <nl> / / eosio_assert ( _item , " null ptr " ) ; <nl> - uint64_t pk ; <nl> - auto next_itr = db_previous_i64 ( _item - > __primary_itr , & pk ) ; <nl> - if ( next_itr = = - 1 ) { <nl> + if ( ! _item ) { <nl> + auto ei = db_end_i64 ( _multidx . _code , _multidx . _scope , TableName ) ; <nl> + prev_itr = db_previous_i64 ( ei , & prev_pk ) ; <nl> + } <nl> + else <nl> + prev_itr = db_previous_i64 ( _item - > __primary_itr , & prev_pk ) ; <nl> + <nl> + if ( prev_itr < 0 ) { <nl> _item = nullptr ; <nl> return * this ; <nl> } <nl> - _item = & _multidx . load_object_by_primary_iterator ( next_itr ) ; <nl> + _item = & _multidx . load_object_by_primary_iterator ( prev_itr ) ; <nl> return * this ; <nl> } <nl> <nl> class multi_index <nl> <nl> const_iterator lower_bound ( uint64_t primary = 0 ) const { <nl> auto itr = db_lowerbound_i64 ( _code , _scope , TableName , primary ) ; <nl> - if ( itr = = - 1 ) return end ( ) ; <nl> + if ( itr < 0 ) return end ( ) ; <nl> auto & obj = load_object_by_primary_iterator ( itr ) ; <nl> return const_iterator ( * this , & obj ) ; <nl> } <nl> <nl> const_iterator upper_bound ( uint64_t primary = 0 ) const { <nl> auto itr = db_upperbound_i64 ( _code , _scope , TableName , primary ) ; <nl> - if ( itr = = - 1 ) return end ( ) ; <nl> + if ( itr < 0 ) return end ( ) ; <nl> auto & obj = load_object_by_primary_iterator ( itr ) ; <nl> return const_iterator ( * this , & obj ) ; <nl> } <nl> <nl> - void new_id ( uint64_t payer ) const { <nl> - uint64_t val = 1 ; <nl> - auto itr = db_find_i64 ( _code , _scope , TableName + 1 , 0 ) ; <nl> - if ( itr ! = - 1 ) { <nl> - auto s = db_get_i64 ( itr , ( char * ) & val , sizeof ( val ) ) ; <nl> - + + val ; <nl> - db_update_i64 ( itr , 0 , ( const char * ) & val , sizeof ( val ) ) ; <nl> - } <nl> - else { <nl> - db_store_i64 ( _scope , TableName + 1 , payer , 0 , ( char * ) & val , sizeof ( val ) ) ; <nl> - } <nl> - } <nl> + <nl> template < uint64_t IndexName > <nl> auto get_index ( ) const { <nl> const auto & idx = boost : : hana : : find_if ( _indices , [ ] ( auto x ) { <nl> class multi_index <nl> if ( boost : : hana : : at_c < std : : decay < decltype ( idx ) > : : type : : index_number > ( secondary_keys ) ! = secondary ) { <nl> auto indexitr = mutableitem . __iters [ idx . index_number ] ; <nl> <nl> - if ( indexitr = = - 1 ) <nl> + if ( indexitr < 0 ) <nl> indexitr = mutableitem . __iters [ idx . index_number ] = idx . find_primary ( _code , _scope , pk , secondary ) ; <nl> <nl> idx . update ( indexitr , payer , secondary ) ; <nl> class multi_index <nl> return & * cacheitr ; <nl> <nl> int itr = db_find_i64 ( _code , _scope , TableName , primary ) ; <nl> - if ( itr = = - 1 ) return nullptr ; <nl> + if ( itr < 0 ) return nullptr ; <nl> <nl> const item & i = load_object_by_primary_iterator ( itr ) ; <nl> return & static_cast < const T & > ( i ) ; <nl> class multi_index <nl> <nl> boost : : hana : : for_each ( _indices , [ & ] ( auto & idx ) { <nl> auto i = objitem . __iters [ idx . index_number ] ; <nl> - if ( i = = - 1 ) { <nl> + if ( i < 0 ) { <nl> typename std : : decay < decltype ( idx ) > : : type : : secondary_type second ; <nl> i = idx . find_primary ( _code , _scope , objitem . primary_key ( ) , second ) ; <nl> } <nl> - if ( i ! = - 1 ) <nl> + if ( i > = 0 ) <nl> idx . remove ( i ) ; <nl> } ) ; <nl> <nl> mmm a / contracts / test_api / test_db . cpp <nl> ppp b / contracts / test_api / test_db . cpp <nl> unsigned int test_db : : key_str_table ( ) { <nl> <nl> const char * atr [ ] = { " atr " , " atr " , " atr " , " atr " } ; <nl> const char * ztr [ ] = { " ztr " , " ztr " , " ztr " , " ztr " } ; <nl> - <nl> + <nl> eosio : : var_table < N ( tester ) , N ( tester ) , N ( atr ) , N ( tester ) , char * > StringTableAtr ; <nl> eosio : : var_table < N ( tester ) , N ( tester ) , N ( ztr ) , N ( tester ) , char * > StringTableZtr ; <nl> eosio : : var_table < N ( tester ) , N ( tester ) , N ( str ) , N ( tester ) , char * > StringTableStr ; <nl> unsigned int test_db : : key_str_table ( ) { <nl> for ( int ii = 0 ; ii < 4 ; + + ii ) { <nl> res = StringTableAtr . store ( ( char * ) keys [ ii ] , STRLEN ( keys [ ii ] ) , ( char * ) atr [ ii ] , STRLEN ( atr [ ii ] ) ) ; <nl> eos_assert ( res ! = 0 , " atr " ) ; <nl> - <nl> + <nl> res = StringTableZtr . store ( ( char * ) keys [ ii ] , STRLEN ( keys [ ii ] ) , ( char * ) ztr [ ii ] , STRLEN ( ztr [ ii ] ) ) ; <nl> eos_assert ( res ! = 0 , " ztr " ) ; <nl> } <nl> <nl> char tmp [ 64 ] ; <nl> - <nl> + <nl> res = StringTableStr . store ( ( char * ) keys [ 0 ] , STRLEN ( keys [ 0 ] ) , ( char * ) vals [ 0 ] , STRLEN ( vals [ 0 ] ) ) ; <nl> eos_assert ( res ! = 0 , " store alice " ) ; <nl> <nl> unsigned int test_db : : key_i64_general ( ) { <nl> <nl> res = previous_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & tmp , sizeof ( test_model ) ) ; <nl> eos_assert ( res = = sizeof ( test_model ) & & tmp . name = = N ( carol ) & & tmp . age = = 30 & & tmp . phone = = 545342453 , " carol previous " ) ; <nl> - <nl> + <nl> res = previous_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & tmp , sizeof ( test_model ) ) ; <nl> eos_assert ( res = = sizeof ( test_model ) & & tmp . name = = N ( bob ) & & tmp . age = = 15 & & tmp . phone = = 11932435 , " bob previous " ) ; <nl> <nl> unsigned int test_db : : key_i64_general ( ) { <nl> <nl> alice . age = 21 ; <nl> alice . phone = 1234 ; <nl> - <nl> + <nl> res = store_i64 ( current_receiver ( ) , N ( test_table ) , & alice , sizeof ( test_model ) ) ; <nl> eos_assert ( res = = 0 , " store alice 2 " ) ; <nl> <nl> my_memset ( & alice , 0 , sizeof ( test_model ) ) ; <nl> alice . name = N ( alice ) ; <nl> - <nl> + <nl> res = load_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & alice , sizeof ( test_model ) ) ; <nl> eos_assert ( res = = sizeof ( test_model ) & & alice . age = = 21 & & alice . phone = = 1234 , " alice error 2 " ) ; <nl> <nl> unsigned int test_db : : key_i64_general ( ) { <nl> <nl> res = load_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & tmp2 , sizeof ( test_model_v3 ) ) ; <nl> eos_assert ( res = = sizeof ( test_model_v2 ) & & <nl> - tmp2 . age = = 21 & & <nl> + tmp2 . age = = 21 & & <nl> tmp2 . phone = = 1234 & & <nl> tmp2 . new_field = = 66655444 , <nl> " load4update " ) ; <nl> unsigned int test_db : : key_i64_general ( ) { <nl> <nl> res = load_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & tmp2 , sizeof ( test_model_v3 ) ) ; <nl> eos_assert ( res = = sizeof ( test_model_v3 ) & & <nl> - tmp2 . age = = 21 & & <nl> + tmp2 . age = = 21 & & <nl> tmp2 . phone = = 1234 & & <nl> tmp2 . new_field = = 66655444 & & <nl> tmp2 . another_field = = 221122 , <nl> unsigned int test_db : : key_i64_general ( ) { <nl> <nl> res = load_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & tmp2 , sizeof ( test_model_v3 ) ) ; <nl> eos_assert ( res = = sizeof ( test_model_v3 ) & & <nl> - tmp2 . age = = 11 & & <nl> + tmp2 . age = = 11 & & <nl> tmp2 . phone = = 1234 & & <nl> tmp2 . new_field = = 66655444 & & <nl> tmp2 . another_field = = 221122 , <nl> unsigned int test_db : : key_i64_general ( ) { <nl> # if 0 <nl> <nl> unsigned int test_db : : key_i64_remove_all ( ) { <nl> - <nl> + <nl> uint32_t res = 0 ; <nl> uint64_t key ; <nl> <nl> key = N ( alice ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 1 , " remove alice " ) ; <nl> - <nl> + <nl> key = N ( bob ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 1 , " remove bob " ) ; <nl> - <nl> + <nl> key = N ( carol ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 1 , " remove carol " ) ; <nl> - <nl> + <nl> key = N ( dave ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 1 , " remove dave " ) ; <nl> unsigned int test_db : : key_i64_remove_all ( ) { <nl> <nl> res = back_i64 ( current_receiver ( ) , current_receiver ( ) , N ( test_table ) , & tmp , sizeof ( test_model ) ) ; <nl> eos_assert ( res = = - 1 , " back_i64_i64 remove " ) ; <nl> - <nl> + <nl> key = N ( alice ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 0 , " remove alice 1 " ) ; <nl> - <nl> + <nl> key = N ( bob ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 0 , " remove bob 1 " ) ; <nl> - <nl> + <nl> key = N ( carol ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 0 , " remove carol 1 " ) ; <nl> - <nl> + <nl> key = N ( dave ) ; <nl> res = remove_i64 ( current_receiver ( ) , N ( test_table ) , & key ) ; <nl> eos_assert ( res = = 0 , " remove dave 1 " ) ; <nl> <nl> - <nl> + <nl> return WASM_TEST_PASS ; <nl> } <nl> <nl> unsigned int test_db : : key_i64_not_found ( ) { <nl> } <nl> <nl> unsigned int test_db : : key_i64_front_back ( ) { <nl> - <nl> + <nl> uint32_t res = 0 ; <nl> <nl> test_model dave { N ( dave ) , 46 , 6535354 } ; <nl> unsigned int test_db : : key_i64_front_back ( ) { <nl> <nl> key = N ( dave ) ; <nl> remove_i64 ( current_receiver ( ) , N ( b ) , & key ) ; <nl> - <nl> + <nl> res = front_i64 ( current_receiver ( ) , current_receiver ( ) , N ( b ) , & tmp , sizeof ( test_model ) ) ; <nl> eos_assert ( res = = - 1 , " key_i64_front 9 " ) ; <nl> res = back_i64 ( current_receiver ( ) , current_receiver ( ) , N ( b ) , & tmp , sizeof ( test_model ) ) ; <nl> unsigned int store_set_in_table ( uint64_t table_name ) <nl> { <nl> <nl> uint32_t res = 0 ; <nl> - <nl> + <nl> TestModel128x2 alice0 { 0 , 500 , N ( alice0 ) , table_name } ; <nl> TestModel128x2 alice1 { 1 , 400 , N ( alice1 ) , table_name } ; <nl> TestModel128x2 alice2 { 2 , 300 , N ( alice2 ) , table_name } ; <nl> unsigned int store_set_in_table ( TestModel3xi64 * records , int len , uint64_t table <nl> # if 0 <nl> <nl> unsigned int test_db : : key_i64i64i64_general ( ) { <nl> - <nl> + <nl> uint32_t res = 0 ; <nl> <nl> TestModel3xi64 records [ ] = { <nl> unsigned int test_db : : key_i64i64i64_general ( ) { <nl> V = { 4 } ; LOAD_OK ( primary , i64i64i64 , N ( table2 ) , 7 , " i64x3 LOAD primary 4 " ) ; <nl> V = { 5 } ; LOAD_OK ( primary , i64i64i64 , N ( table2 ) , 9 , " i64x3 LOAD primary 5 " ) ; <nl> V = { 6 } ; LOAD_ER ( primary , i64i64i64 , N ( table2 ) , " i64x3 LOAD primary fail 6 " ) ; <nl> - <nl> + <nl> V = { 11 , 0 } ; LOAD_OK ( secondary , i64i64i64 , N ( table2 ) , 7 , " i64x3 LOAD secondary 0 " ) ; <nl> V = { 11 , 1 } ; LOAD_OK ( secondary , i64i64i64 , N ( table2 ) , 0 , " i64x3 LOAD secondary 1 " ) ; <nl> V = { 11 , 2 } ; LOAD_OK ( secondary , i64i64i64 , N ( table2 ) , 10 , " i64x3 LOAD secondary 2 " ) ; <nl> unsigned int test_db : : key_i64i64i64_general ( ) { <nl> v2 . new_field = 555 ; <nl> <nl> res = update_i64i64i64 ( current_receiver ( ) , N ( table2 ) , & v2 , sizeof ( TestModel3xi64_V2 ) ) ; <nl> - eos_assert ( res = = 1 , " store v2 " ) ; <nl> + eos_assert ( res = = 1 , " store v2 " ) ; <nl> <nl> res = LOAD ( primary , i64i64i64 , N ( table2 ) , v2 ) ; <nl> eos_assert ( res = = sizeof ( TestModel3xi64_V2 ) , " load v2 updated " ) ; <nl> unsigned int test_db : : key_i128i128_general ( ) { <nl> <nl> my_memset ( & tmp , 0 , sizeof ( TestModel128x2 ) ) ; <nl> tmp . price = 4 ; <nl> - <nl> + <nl> res = load_secondary_i128i128 ( current_receiver ( ) , current_receiver ( ) , N ( table5 ) , & tmp , sizeof ( TestModel128x2 ) ) ; <nl> eos_assert ( res = = sizeof ( TestModel128x2 ) & & <nl> tmp . number = = 13 & & <nl> unsigned int test_db : : key_i128i128_general ( ) { <nl> tmp . extra = = N ( alice0 ) & & <nl> tmp . table_name = = N ( table5 ) , <nl> " front primary load " ) ; <nl> - <nl> + <nl> res = previous_primary_i128i128 ( current_receiver ( ) , current_receiver ( ) , N ( table5 ) , & tmp , sizeof ( TestModel128x2 ) ) ; <nl> eos_assert ( res = = - 1 , " previous primary fail " ) ; <nl> <nl> unsigned int test_db : : key_i128i128_general ( ) { <nl> tmp . extra = = N ( bob0 ) & & <nl> tmp . table_name = = N ( table5 ) , <nl> " front secondary ok " ) ; <nl> - <nl> + <nl> res = previous_secondary_i128i128 ( current_receiver ( ) , current_receiver ( ) , N ( table5 ) , & tmp , sizeof ( TestModel128x2 ) ) ; <nl> eos_assert ( res = = - 1 , " previous secondary fail " ) ; <nl> <nl> unsigned int test_db : : key_i128i128_general ( ) { <nl> tmp . extra = = N ( dave3 ) & & <nl> tmp . table_name = = N ( table5 ) , <nl> " back primary ok " ) ; <nl> - <nl> + <nl> res = next_primary_i128i128 ( current_receiver ( ) , current_receiver ( ) , N ( table5 ) , & tmp , sizeof ( TestModel128x2 ) ) ; <nl> eos_assert ( res = = - 1 , " next primary fail " ) ; <nl> <nl> unsigned int test_db : : key_i128i128_general ( ) { <nl> tmp . extra = = N ( carol0 ) & & <nl> tmp . table_name = = N ( table5 ) , <nl> " back secondary ok " ) ; <nl> - <nl> + <nl> res = next_secondary_i128i128 ( current_receiver ( ) , current_receiver ( ) , N ( table5 ) , & tmp , sizeof ( TestModel128x2 ) ) ; <nl> eos_assert ( res = = - 1 , " next secondary fail " ) ; <nl> <nl> res = previous_secondary_i128i128 ( current_receiver ( ) , current_receiver ( ) , N ( table5 ) , & tmp , sizeof ( TestModel128x2 ) ) ; <nl> - <nl> + <nl> eos_assert ( res = = sizeof ( TestModel128x2 ) & & <nl> tmp . number = = 21 & & <nl> tmp . price = = 800 & & <nl> unsigned int test_db : : key_i128i128_general ( ) { <nl> tmp2 . extra = = N ( carol0 ) & & <nl> tmp2 . table_name = = N ( table5 ) , <nl> " ub secondary ok " ) ; <nl> - <nl> + <nl> tmp2 . new_field = 123456 ; <nl> res = update_i128i128 ( current_receiver ( ) , N ( table5 ) , & tmp2 , sizeof ( TestModel128x2_V2 ) ) ; <nl> eos_assert ( res = = 1 , " update_i128i128 ok " ) ; <nl> void test_db : : primary_i64_general ( ) <nl> / / nothing after charlie <nl> uint64_t prim = 0 ; <nl> int end_itr = db_next_i64 ( charlie_itr , & prim ) ; <nl> - eosio_assert ( end_itr = = - 1 , " primary_i64_general - db_next_i64 " ) ; <nl> + eosio_assert ( end_itr < 0 , " primary_i64_general - db_next_i64 " ) ; <nl> / / prim didn ' t change <nl> eosio_assert ( prim = = 0 , " primary_i64_general - db_next_i64 " ) ; <nl> } <nl> void test_db : : primary_i64_general ( ) <nl> eosio_assert ( itr_prev = = itr_prev_expected & & prim = = N ( alice ) , " primary_i64_general - db_previous_i64 " ) ; <nl> <nl> itr_prev = db_previous_i64 ( itr_prev , & prim ) ; <nl> - itr_prev_expected = - 1 ; <nl> - eosio_assert ( itr_prev = = itr_prev_expected & & prim = = N ( alice ) , " primary_i64_general - db_previous_i64 " ) ; <nl> + eosio_assert ( itr_prev < 0 & & prim = = N ( alice ) , " primary_i64_general - db_previous_i64 " ) ; <nl> } <nl> <nl> / / remove <nl> { <nl> int itr = db_find_i64 ( current_receiver ( ) , current_receiver ( ) , table1 , N ( alice ) ) ; <nl> - eosio_assert ( itr ! = - 1 , " primary_i64_general - db_find_i64 " ) ; <nl> + eosio_assert ( itr > = 0 , " primary_i64_general - db_find_i64 " ) ; <nl> db_remove_i64 ( itr ) ; <nl> itr = db_find_i64 ( current_receiver ( ) , current_receiver ( ) , table1 , N ( alice ) ) ; <nl> - eosio_assert ( itr = = - 1 , " primary_i64_general - db_find_i64 " ) ; <nl> + eosio_assert ( itr < 0 , " primary_i64_general - db_find_i64 " ) ; <nl> } <nl> <nl> / / get <nl> { <nl> int itr = db_find_i64 ( current_receiver ( ) , current_receiver ( ) , table1 , N ( bob ) ) ; <nl> - eosio_assert ( itr ! = - 1 , " " ) ; <nl> + eosio_assert ( itr > = 0 , " " ) ; <nl> int buffer_len = 5 ; <nl> char value [ 50 ] ; <nl> auto len = db_get_i64 ( itr , value , buffer_len ) ; <nl> void test_db : : primary_i64_general ( ) <nl> std : : string s ( value ) ; <nl> eosio_assert ( len = = strlen ( " bob ' s info " ) , " primary_i64_general - db_get_i64 " ) ; <nl> eosio_assert ( s = = " bob ' s " , " primary_i64_general - db_get_i64 " ) ; <nl> - <nl> + <nl> buffer_len = 20 ; <nl> db_get_i64 ( itr , value , buffer_len ) ; <nl> value [ buffer_len ] = ' \ 0 ' ; <nl> void test_db : : primary_i64_general ( ) <nl> / / update <nl> { <nl> int itr = db_find_i64 ( current_receiver ( ) , current_receiver ( ) , table1 , N ( bob ) ) ; <nl> - eosio_assert ( itr ! = - 1 , " " ) ; <nl> + eosio_assert ( itr > = 0 , " " ) ; <nl> const char * new_value = " bob ' s new info " ; <nl> - int new_value_len = strlen ( new_value ) ; <nl> + int new_value_len = strlen ( new_value ) ; <nl> db_update_i64 ( itr , current_receiver ( ) , new_value , new_value_len ) ; <nl> char ret_value [ 50 ] ; <nl> auto len = db_get_i64 ( itr , ret_value , new_value_len ) ; <nl> void test_db : : primary_i64_lowerbound ( ) <nl> } <nl> { <nl> int lb = db_lowerbound_i64 ( current_receiver ( ) , current_receiver ( ) , table , N ( kevin ) ) ; <nl> - eosio_assert ( lb = = - 1 , err . c_str ( ) ) ; <nl> + eosio_assert ( lb < 0 , err . c_str ( ) ) ; <nl> } <nl> } <nl> <nl> void test_db : : primary_i64_upperbound ( ) <nl> } <nl> { <nl> int ub = db_upperbound_i64 ( current_receiver ( ) , current_receiver ( ) , table , N ( joe ) ) ; <nl> - eosio_assert ( ub = = - 1 , err . c_str ( ) ) ; <nl> + eosio_assert ( ub < 0 , err . c_str ( ) ) ; <nl> } <nl> { <nl> int ub = db_upperbound_i64 ( current_receiver ( ) , current_receiver ( ) , table , N ( kevin ) ) ; <nl> - eosio_assert ( ub = = - 1 , err . c_str ( ) ) ; <nl> + eosio_assert ( ub < 0 , err . c_str ( ) ) ; <nl> } <nl> } <nl> <nl> void test_db : : idx64_general ( ) <nl> { <nl> secondary_type sec = 0 ; <nl> int itr = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec , 999 ) ; <nl> - eosio_assert ( itr = = - 1 & & sec = = 0 , " idx64_general - db_idx64_find_primary " ) ; <nl> + eosio_assert ( itr < 0 & & sec = = 0 , " idx64_general - db_idx64_find_primary " ) ; <nl> itr = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec , 110 ) ; <nl> - eosio_assert ( itr ! = - 1 & & sec = = N ( joe ) , " idx64_general - db_idx64_find_primary " ) ; <nl> + eosio_assert ( itr > = 0 & & sec = = N ( joe ) , " idx64_general - db_idx64_find_primary " ) ; <nl> uint64_t prim_next = 0 ; <nl> int itr_next = db_idx64_next ( itr , & prim_next ) ; <nl> - eosio_assert ( itr_next = = - 1 & & prim_next = = 0 , " idx64_general - db_idx64_find_primary " ) ; <nl> + eosio_assert ( itr_next < 0 & & prim_next = = 0 , " idx64_general - db_idx64_find_primary " ) ; <nl> } <nl> <nl> / / iterate forward starting with charlie <nl> { <nl> secondary_type sec = 0 ; <nl> int itr = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec , 234 ) ; <nl> - eosio_assert ( itr ! = - 1 & & sec = = N ( charlie ) , " idx64_general - db_idx64_find_primary " ) ; <nl> + eosio_assert ( itr > = 0 & & sec = = N ( charlie ) , " idx64_general - db_idx64_find_primary " ) ; <nl> <nl> uint64_t prim_next = 0 ; <nl> int itr_next = db_idx64_next ( itr , & prim_next ) ; <nl> - eosio_assert ( itr_next ! = - 1 & & prim_next = = 976 , " idx64_general - db_idx64_next " ) ; <nl> + eosio_assert ( itr_next > = 0 & & prim_next = = 976 , " idx64_general - db_idx64_next " ) ; <nl> secondary_type sec_next = 0 ; <nl> int itr_next_expected = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec_next , prim_next ) ; <nl> eosio_assert ( itr_next = = itr_next_expected & & sec_next = = N ( emily ) , " idx64_general - db_idx64_next " ) ; <nl> <nl> itr_next = db_idx64_next ( itr_next , & prim_next ) ; <nl> - eosio_assert ( itr_next ! = - 1 & & prim_next = = 110 , " idx64_general - db_idx64_next " ) ; <nl> + eosio_assert ( itr_next > = 0 & & prim_next = = 110 , " idx64_general - db_idx64_next " ) ; <nl> itr_next_expected = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec_next , prim_next ) ; <nl> eosio_assert ( itr_next = = itr_next_expected & & sec_next = = N ( joe ) , " idx64_general - db_idx64_next " ) ; <nl> <nl> itr_next = db_idx64_next ( itr_next , & prim_next ) ; <nl> - eosio_assert ( itr_next = = - 1 & & prim_next = = 110 , " idx64_general - db_idx64_next " ) ; <nl> + eosio_assert ( itr_next < 0 & & prim_next = = 110 , " idx64_general - db_idx64_next " ) ; <nl> } <nl> <nl> / / iterate backward staring with second bob <nl> { <nl> secondary_type sec = 0 ; <nl> int itr = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec , 781 ) ; <nl> - eosio_assert ( itr ! = - 1 & & sec = = N ( bob ) , " idx64_general - db_idx64_find_primary " ) ; <nl> + eosio_assert ( itr > = 0 & & sec = = N ( bob ) , " idx64_general - db_idx64_find_primary " ) ; <nl> <nl> uint64_t prim_prev = 0 ; <nl> int itr_prev = db_idx64_previous ( itr , & prim_prev ) ; <nl> - eosio_assert ( itr_prev ! = - 1 & & prim_prev = = 540 , " idx64_general - db_idx64_previous " ) ; <nl> + eosio_assert ( itr_prev > = 0 & & prim_prev = = 540 , " idx64_general - db_idx64_previous " ) ; <nl> <nl> secondary_type sec_prev = 0 ; <nl> int itr_prev_expected = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec_prev , prim_prev ) ; <nl> eosio_assert ( itr_prev = = itr_prev_expected & & sec_prev = = N ( bob ) , " idx64_general - db_idx64_previous " ) ; <nl> <nl> itr_prev = db_idx64_previous ( itr_prev , & prim_prev ) ; <nl> - eosio_assert ( itr_prev ! = - 1 & & prim_prev = = 650 , " idx64_general - db_idx64_previous " ) ; <nl> + eosio_assert ( itr_prev > = 0 & & prim_prev = = 650 , " idx64_general - db_idx64_previous " ) ; <nl> itr_prev_expected = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec_prev , prim_prev ) ; <nl> eosio_assert ( itr_prev = = itr_prev_expected & & sec_prev = = N ( allyson ) , " idx64_general - db_idx64_previous " ) ; <nl> <nl> itr_prev = db_idx64_previous ( itr_prev , & prim_prev ) ; <nl> - eosio_assert ( itr_prev ! = - 1 & & prim_prev = = 265 , " idx64_general - db_idx64_previous " ) ; <nl> + eosio_assert ( itr_prev > = 0 & & prim_prev = = 265 , " idx64_general - db_idx64_previous " ) ; <nl> itr_prev_expected = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec_prev , prim_prev ) ; <nl> eosio_assert ( itr_prev = = itr_prev_expected & & sec_prev = = N ( alice ) , " idx64_general - db_idx64_previous " ) ; <nl> <nl> itr_prev = db_idx64_previous ( itr_prev , & prim_prev ) ; <nl> - eosio_assert ( itr_prev = = - 1 & & prim_prev = = 265 , " idx64_general - db_idx64_previous " ) ; <nl> + eosio_assert ( itr_prev < 0 & & prim_prev = = 265 , " idx64_general - db_idx64_previous " ) ; <nl> } <nl> <nl> / / find_secondary <nl> void test_db : : idx64_general ( ) <nl> uint64_t prim = 0 ; <nl> auto sec = N ( bob ) ; <nl> int itr = db_idx64_find_secondary ( current_receiver ( ) , current_receiver ( ) , table , & sec , & prim ) ; <nl> - eosio_assert ( itr ! = - 1 & & prim = = 540 , " idx64_general - db_idx64_find_secondary " ) ; <nl> + eosio_assert ( itr > = 0 & & prim = = 540 , " idx64_general - db_idx64_find_secondary " ) ; <nl> <nl> sec = N ( emily ) ; <nl> itr = db_idx64_find_secondary ( current_receiver ( ) , current_receiver ( ) , table , & sec , & prim ) ; <nl> - eosio_assert ( itr ! = - 1 & & prim = = 976 , " idx64_general - db_idx64_find_secondary " ) ; <nl> + eosio_assert ( itr > = 0 & & prim = = 976 , " idx64_general - db_idx64_find_secondary " ) ; <nl> <nl> sec = N ( frank ) ; <nl> itr = db_idx64_find_secondary ( current_receiver ( ) , current_receiver ( ) , table , & sec , & prim ) ; <nl> - eosio_assert ( itr = = - 1 & & prim = = 976 , " idx64_general - db_idx64_find_secondary " ) ; <nl> + eosio_assert ( itr < 0 & & prim = = 976 , " idx64_general - db_idx64_find_secondary " ) ; <nl> } <nl> <nl> / / update and remove <nl> void test_db : : idx64_general ( ) <nl> eosio_assert ( sec_itr = = itr & & sec = = new_name , " idx64_general - db_idx64_update " ) ; <nl> db_idx64_remove ( itr ) ; <nl> int itrf = db_idx64_find_primary ( current_receiver ( ) , current_receiver ( ) , table , & sec , ssn ) ; <nl> - eosio_assert ( itrf = = - 1 , " idx64_general - db_idx64_remove " ) ; <nl> + eosio_assert ( itrf < 0 , " idx64_general - db_idx64_remove " ) ; <nl> } <nl> } <nl> <nl> void test_db : : idx64_lowerbound ( ) <nl> uint64_t lb_prim = 0 ; <nl> int lb = db_idx64_lowerbound ( current_receiver ( ) , current_receiver ( ) , table , & lb_sec , & lb_prim ) ; <nl> eosio_assert ( lb_prim = = 0 & & lb_sec = = N ( kevin ) , err . c_str ( ) ) ; <nl> - eosio_assert ( lb = = - 1 , " " ) ; <nl> + eosio_assert ( lb < 0 , " " ) ; <nl> } <nl> } <nl> <nl> void test_db : : idx64_upperbound ( ) <nl> const uint64_t ssn = 110 ; <nl> int ub = db_idx64_upperbound ( current_receiver ( ) , current_receiver ( ) , table , & ub_sec , & ub_prim ) ; <nl> eosio_assert ( ub_prim = = 0 & & ub_sec = = N ( joe ) , err . c_str ( ) ) ; <nl> - eosio_assert ( ub = = - 1 , err . c_str ( ) ) ; <nl> + eosio_assert ( ub < 0 , err . c_str ( ) ) ; <nl> } <nl> { <nl> secondary_type ub_sec = N ( kevin ) ; <nl> uint64_t ub_prim = 0 ; <nl> int ub = db_idx64_upperbound ( current_receiver ( ) , current_receiver ( ) , table , & ub_sec , & ub_prim ) ; <nl> eosio_assert ( ub_prim = = 0 & & ub_sec = = N ( kevin ) , err . c_str ( ) ) ; <nl> - eosio_assert ( ub = = - 1 , err . c_str ( ) ) ; <nl> + eosio_assert ( ub < 0 , err . c_str ( ) ) ; <nl> } <nl> } <nl> mmm a / libraries / chain / apply_context . cpp <nl> ppp b / libraries / chain / apply_context . cpp <nl> void apply_context : : db_remove_i64 ( int iterator ) { <nl> int apply_context : : db_get_i64 ( int iterator , char * buffer , size_t buffer_size ) { <nl> const key_value_object & obj = keyval_cache . get ( iterator ) ; <nl> memcpy ( buffer , obj . value . data ( ) , std : : min ( obj . value . size ( ) , buffer_size ) ) ; <nl> - <nl> + <nl> return obj . value . size ( ) ; <nl> } <nl> <nl> int apply_context : : db_next_i64 ( int iterator , uint64_t & primary ) { <nl> + if ( iterator < - 1 ) / / is end iterator <nl> + return iterator ; / / Is + + end ( ) = = end ( ) desired behavior ? <nl> + <nl> const auto & obj = keyval_cache . get ( iterator ) ; <nl> const auto & idx = db . get_index < contracts : : key_value_index , contracts : : by_scope_primary > ( ) ; <nl> <nl> auto itr = idx . iterator_to ( obj ) ; <nl> + + itr ; <nl> <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = obj . t_id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) | | itr - > t_id ! = obj . t_id ) return keyval_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> primary = itr - > primary_key ; <nl> return keyval_cache . add ( * itr ) ; <nl> } <nl> <nl> int apply_context : : db_previous_i64 ( int iterator , uint64_t & primary ) { <nl> - const auto & obj = keyval_cache . get ( iterator ) ; <nl> const auto & idx = db . get_index < contracts : : key_value_index , contracts : : by_scope_primary > ( ) ; <nl> <nl> + if ( iterator < - 1 ) / / is end iterator <nl> + { <nl> + auto tab = keyval_cache . find_table_by_end_iterator ( iterator ) ; <nl> + FC_ASSERT ( tab , " not a valid end iterator " ) ; <nl> + <nl> + auto itr = idx . upper_bound ( tab - > id ) ; <nl> + if ( itr = = idx . begin ( ) ) return iterator ; / / Empty table <nl> + <nl> + - - itr ; <nl> + <nl> + if ( itr - > t_id ! = tab - > id ) return iterator ; / / Empty table <nl> + <nl> + primary = itr - > primary_key ; <nl> + return keyval_cache . add ( * itr ) ; <nl> + } <nl> + <nl> + const auto & obj = keyval_cache . get ( iterator ) ; <nl> + <nl> auto itr = idx . iterator_to ( obj ) ; <nl> - if ( itr = = idx . end ( ) | | itr = = idx . begin ( ) ) return - 1 ; <nl> + if ( itr = = idx . begin ( ) ) return keyval_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> - - itr ; <nl> <nl> - if ( itr - > t_id ! = obj . t_id ) return - 1 ; <nl> + if ( itr - > t_id ! = obj . t_id ) return keyval_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> primary = itr - > primary_key ; <nl> return keyval_cache . add ( * itr ) ; <nl> } <nl> <nl> int apply_context : : db_find_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) { <nl> - require_read_lock ( code , scope ) ; <nl> + require_read_lock ( code , scope ) ; / / redundant ? <nl> <nl> const auto * tab = find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> validate_table_key ( * tab , contracts : : table_key_type : : type_i64 ) ; <nl> <nl> + auto table_end_itr = keyval_cache . cache_table ( * tab ) ; <nl> <nl> const key_value_object * obj = db . find < key_value_object , contracts : : by_scope_primary > ( boost : : make_tuple ( tab - > id , id ) ) ; <nl> - if ( ! obj ) return - 1 ; <nl> + if ( ! obj ) return table_end_itr ; <nl> <nl> - keyval_cache . cache_table ( * tab ) ; <nl> return keyval_cache . add ( * obj ) ; <nl> } <nl> <nl> int apply_context : : db_lowerbound_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) { <nl> - require_read_lock ( code , scope ) ; <nl> + require_read_lock ( code , scope ) ; / / redundant ? <nl> <nl> const auto * tab = find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> validate_table_key ( * tab , contracts : : table_key_type : : type_i64 ) ; <nl> <nl> + auto table_end_itr = keyval_cache . cache_table ( * tab ) ; <nl> <nl> const auto & idx = db . get_index < contracts : : key_value_index , contracts : : by_scope_primary > ( ) ; <nl> auto itr = idx . lower_bound ( boost : : make_tuple ( tab - > id , id ) ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = tab - > id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) ) return table_end_itr ; <nl> + if ( itr - > t_id ! = tab - > id ) return table_end_itr ; <nl> <nl> - keyval_cache . cache_table ( * tab ) ; <nl> return keyval_cache . add ( * itr ) ; <nl> } <nl> <nl> int apply_context : : db_upperbound_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) { <nl> - require_read_lock ( code , scope ) ; <nl> + require_read_lock ( code , scope ) ; / / redundant ? <nl> <nl> const auto * tab = find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> validate_table_key ( * tab , contracts : : table_key_type : : type_i64 ) ; <nl> <nl> + auto table_end_itr = keyval_cache . cache_table ( * tab ) ; <nl> <nl> const auto & idx = db . get_index < contracts : : key_value_index , contracts : : by_scope_primary > ( ) ; <nl> auto itr = idx . upper_bound ( boost : : make_tuple ( tab - > id , id ) ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = tab - > id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) ) return table_end_itr ; <nl> + if ( itr - > t_id ! = tab - > id ) return table_end_itr ; <nl> <nl> - keyval_cache . cache_table ( * tab ) ; <nl> return keyval_cache . add ( * itr ) ; <nl> } <nl> <nl> + int apply_context : : db_end_i64 ( uint64_t code , uint64_t scope , uint64_t table ) { <nl> + require_read_lock ( code , scope ) ; / / redundant ? <nl> + <nl> + const auto * tab = find_table ( code , scope , table ) ; <nl> + if ( ! tab ) return - 1 ; <nl> + validate_table_key ( * tab , contracts : : table_key_type : : type_i64 ) ; <nl> + <nl> + return keyval_cache . cache_table ( * tab ) ; <nl> + } <nl> + <nl> template < > <nl> contracts : : table_key_type apply_context : : get_key_type < contracts : : key_value_object > ( ) { <nl> return contracts : : table_key_type : : type_i64 ; <nl> mmm a / libraries / chain / include / eosio / chain / apply_context . hpp <nl> ppp b / libraries / chain / include / eosio / chain / apply_context . hpp <nl> class apply_context { <nl> typedef contracts : : table_id_object table_id_object ; <nl> <nl> iterator_cache ( ) { <nl> + _end_iterator_to_table . reserve ( 8 ) ; <nl> _iterator_to_object . reserve ( 32 ) ; <nl> } <nl> <nl> - void cache_table ( const table_id_object & tobj ) { <nl> - _table_cache [ tobj . id ] = & tobj ; <nl> + / / / Returns end iterator of the table . <nl> + int cache_table ( const table_id_object & tobj ) { <nl> + auto itr = _table_cache . find ( tobj . id ) ; <nl> + if ( itr ! = _table_cache . end ( ) ) <nl> + return itr - > second . second ; <nl> + <nl> + auto ei = index_to_end_iterator ( _end_iterator_to_table . size ( ) ) ; <nl> + _end_iterator_to_table . push_back ( & tobj ) ; <nl> + _table_cache . emplace ( tobj . id , make_pair ( & tobj , ei ) ) ; <nl> + return ei ; <nl> + } <nl> + <nl> + const table_id_object & get_table ( table_id_object : : id_type i ) const { <nl> + auto itr = _table_cache . find ( i ) ; <nl> + FC_ASSERT ( itr ! = _table_cache . end ( ) , " an invariant was broken , table should be in cache " ) ; <nl> + return * itr - > second . first ; <nl> } <nl> <nl> - const table_id_object & get_table ( table_id_object : : id_type i ) { <nl> + int get_end_iterator_by_table_id ( table_id_object : : id_type i ) const { <nl> auto itr = _table_cache . find ( i ) ; <nl> FC_ASSERT ( itr ! = _table_cache . end ( ) , " an invariant was broken , table should be in cache " ) ; <nl> - return * itr - > second ; <nl> + return itr - > second . second ; <nl> + } <nl> + <nl> + const table_id_object * find_table_by_end_iterator ( int ei ) const { <nl> + FC_ASSERT ( ei < - 1 , " not an end iterator " ) ; <nl> + auto indx = end_iterator_to_index ( ei ) ; <nl> + if ( indx > = _end_iterator_to_table . size ( ) ) return nullptr ; <nl> + return _end_iterator_to_table [ indx ] ; <nl> } <nl> <nl> const T & get ( int iterator ) { <nl> - FC_ASSERT ( iterator > = 0 , " invalid iterator " ) ; <nl> + FC_ASSERT ( iterator ! = - 1 , " invalid iterator " ) ; <nl> + FC_ASSERT ( iterator > = 0 , " reference of end iterator " ) ; <nl> FC_ASSERT ( iterator < _iterator_to_object . size ( ) , " iterator out of range " ) ; <nl> auto result = _iterator_to_object [ iterator ] ; <nl> FC_ASSERT ( result , " reference of deleted object " ) ; <nl> class apply_context { <nl> } <nl> <nl> void remove ( int iterator ) { <nl> + FC_ASSERT ( iterator ! = - 1 , " invalid iterator " ) ; <nl> + FC_ASSERT ( iterator > = 0 , " cannot call remove on end iterators " ) ; <nl> + FC_ASSERT ( iterator < _iterator_to_object . size ( ) , " iterator out of range " ) ; <nl> auto obj_ptr = _iterator_to_object [ iterator ] ; <nl> if ( ! obj_ptr ) return ; <nl> _iterator_to_object [ iterator ] = nullptr ; <nl> class apply_context { <nl> } <nl> <nl> private : <nl> - map < table_id_object : : id_type , const table_id_object * > _table_cache ; <nl> + map < table_id_object : : id_type , pair < const table_id_object * , int > > _table_cache ; <nl> + vector < const table_id_object * > _end_iterator_to_table ; <nl> vector < const T * > _iterator_to_object ; <nl> map < const T * , int > _object_to_iterator ; <nl> + <nl> + / / / Precondition : std : : numeric_limits < int > : : min ( ) < ei < - 1 <nl> + / / / Iterator of - 1 is reserved for invalid iterators ( i . e . when the appropriate table has not yet been created ) . <nl> + inline size_t end_iterator_to_index ( int ei ) const { return ( - ei - 2 ) ; } <nl> + / / / Precondition : indx < _end_iterator_to_table . size ( ) < = std : : numeric_limits < int > : : max ( ) <nl> + inline int index_to_end_iterator ( size_t indx ) const { return - ( indx + 2 ) ; } <nl> } ; <nl> <nl> public : <nl> class apply_context { <nl> void update ( int iterator , account_name payer , const secondary_key_type & secondary ) { <nl> const auto & obj = itr_cache . get ( iterator ) ; <nl> <nl> + context . require_write_lock ( itr_cache . get_table ( obj . t_id ) . scope ) ; <nl> + <nl> if ( payer = = account_name ( ) ) payer = obj . payer ; <nl> <nl> if ( obj . payer ! = payer ) { <nl> class apply_context { <nl> } <nl> <nl> int find_secondary ( uint64_t code , uint64_t scope , uint64_t table , const secondary_key_type & secondary , uint64_t & primary ) { <nl> - auto tab = context . find_table ( context . receiver , scope , table ) ; <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> <nl> + auto table_end_itr = itr_cache . cache_table ( * tab ) ; <nl> + <nl> const auto * obj = context . db . find < ObjectType , contracts : : by_secondary > ( boost : : make_tuple ( tab - > id , secondary ) ) ; <nl> - if ( ! obj ) return - 1 ; <nl> + if ( ! obj ) return table_end_itr ; <nl> <nl> primary = obj - > primary_key ; <nl> <nl> - itr_cache . cache_table ( * tab ) ; <nl> return itr_cache . add ( * obj ) ; <nl> } <nl> <nl> int lowerbound_secondary ( uint64_t code , uint64_t scope , uint64_t table , secondary_key_type & secondary , uint64_t & primary ) { <nl> - auto tab = context . find_table ( context . receiver , scope , table ) ; <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> <nl> + auto table_end_itr = itr_cache . cache_table ( * tab ) ; <nl> + <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_secondary > ( ) ; <nl> auto itr = idx . lower_bound ( boost : : make_tuple ( tab - > id , secondary ) ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = tab - > id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) ) return table_end_itr ; <nl> + if ( itr - > t_id ! = tab - > id ) return table_end_itr ; <nl> <nl> primary = itr - > primary_key ; <nl> secondary = itr - > secondary_key ; <nl> <nl> - itr_cache . cache_table ( * tab ) ; <nl> return itr_cache . add ( * itr ) ; <nl> } <nl> <nl> int upperbound_secondary ( uint64_t code , uint64_t scope , uint64_t table , secondary_key_type & secondary , uint64_t & primary ) { <nl> - auto tab = context . find_table ( context . receiver , scope , table ) ; <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> <nl> + auto table_end_itr = itr_cache . cache_table ( * tab ) ; <nl> + <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_secondary > ( ) ; <nl> auto itr = idx . upper_bound ( boost : : make_tuple ( tab - > id , secondary ) ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = tab - > id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) ) return table_end_itr ; <nl> + if ( itr - > t_id ! = tab - > id ) return table_end_itr ; <nl> <nl> primary = itr - > primary_key ; <nl> secondary = itr - > secondary_key ; <nl> <nl> - itr_cache . cache_table ( * tab ) ; <nl> return itr_cache . add ( * itr ) ; <nl> } <nl> <nl> + int end_secondary ( uint64_t code , uint64_t scope , uint64_t table ) { <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> + if ( ! tab ) return - 1 ; <nl> + <nl> + return itr_cache . cache_table ( * tab ) ; <nl> + } <nl> + <nl> int next_secondary ( int iterator , uint64_t & primary ) { <nl> + if ( iterator < - 1 ) / / is end iterator <nl> + return iterator ; <nl> + <nl> const auto & obj = itr_cache . get ( iterator ) ; <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_secondary > ( ) ; <nl> <nl> auto itr = idx . iterator_to ( obj ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - <nl> + + itr ; <nl> <nl> - if ( itr = = idx . end ( ) | | itr - > t_id ! = obj . t_id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) | | itr - > t_id ! = obj . t_id ) return itr_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> primary = itr - > primary_key ; <nl> return itr_cache . add ( * itr ) ; <nl> } <nl> <nl> int previous_secondary ( int iterator , uint64_t & primary ) { <nl> - const auto & obj = itr_cache . get ( iterator ) ; <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_secondary > ( ) ; <nl> <nl> + if ( iterator < - 1 ) / / is end iterator <nl> + { <nl> + auto tab = itr_cache . find_table_by_end_iterator ( iterator ) ; <nl> + FC_ASSERT ( tab , " not a valid end iterator " ) ; <nl> + <nl> + auto itr = idx . upper_bound ( tab - > id ) ; <nl> + if ( itr = = idx . begin ( ) ) return iterator ; / / Empty table <nl> + <nl> + - - itr ; <nl> + <nl> + if ( itr - > t_id ! = tab - > id ) return iterator ; / / Empty table <nl> + <nl> + primary = itr - > primary_key ; <nl> + return itr_cache . add ( * itr ) ; <nl> + } <nl> + <nl> + const auto & obj = itr_cache . get ( iterator ) ; <nl> + <nl> auto itr = idx . iterator_to ( obj ) ; <nl> - if ( itr = = idx . end ( ) | | itr = = idx . begin ( ) ) return - 1 ; <nl> + if ( itr = = idx . begin ( ) ) return itr_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> - - itr ; <nl> <nl> - if ( itr - > t_id ! = obj . t_id ) return - 1 ; <nl> + if ( itr - > t_id ! = obj . t_id ) return itr_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> primary = itr - > primary_key ; <nl> return itr_cache . add ( * itr ) ; <nl> - } <nl> + } <nl> <nl> int find_primary ( uint64_t code , uint64_t scope , uint64_t table , secondary_key_type & secondary , uint64_t primary ) { <nl> - auto tab = context . find_table ( context . receiver , scope , table ) ; <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> <nl> + auto table_end_itr = itr_cache . cache_table ( * tab ) ; <nl> + <nl> const auto * obj = context . db . find < ObjectType , contracts : : by_primary > ( boost : : make_tuple ( tab - > id , primary ) ) ; <nl> - if ( ! obj ) return - 1 ; <nl> + if ( ! obj ) return table_end_itr ; <nl> secondary = obj - > secondary_key ; <nl> <nl> - itr_cache . cache_table ( * tab ) ; <nl> return itr_cache . add ( * obj ) ; <nl> } <nl> <nl> int lowerbound_primary ( uint64_t code , uint64_t scope , uint64_t table , uint64_t primary ) { <nl> - auto tab = context . find_table ( context . receiver , scope , table ) ; <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> <nl> + auto table_end_itr = itr_cache . cache_table ( * tab ) ; <nl> + <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_primary > ( ) ; <nl> auto itr = idx . lower_bound ( boost : : make_tuple ( tab - > id , primary ) ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = tab - > id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) ) return table_end_itr ; <nl> + if ( itr - > t_id ! = tab - > id ) return table_end_itr ; <nl> <nl> - itr_cache . cache_table ( * tab ) ; <nl> return itr_cache . add ( * itr ) ; <nl> } <nl> <nl> int upperbound_primary ( uint64_t code , uint64_t scope , uint64_t table , uint64_t primary ) { <nl> - auto tab = context . find_table ( context . receiver , scope , table ) ; <nl> + auto tab = context . find_table ( code , scope , table ) ; <nl> if ( ! tab ) return - 1 ; <nl> <nl> + auto table_end_itr = itr_cache . cache_table ( * tab ) ; <nl> + <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_primary > ( ) ; <nl> auto itr = idx . upper_bound ( boost : : make_tuple ( tab - > id , primary ) ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - if ( itr - > t_id ! = tab - > id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) ) return table_end_itr ; <nl> + if ( itr - > t_id ! = tab - > id ) return table_end_itr ; <nl> <nl> itr_cache . cache_table ( * tab ) ; <nl> return itr_cache ( * itr ) ; <nl> } <nl> <nl> int next_primary ( int iterator , uint64_t & primary ) { <nl> + if ( iterator < - 1 ) / / is end iterator <nl> + return iterator ; <nl> + <nl> const auto & obj = itr_cache . get ( iterator ) ; <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_primary > ( ) ; <nl> <nl> auto itr = idx . iterator_to ( obj ) ; <nl> - if ( itr = = idx . end ( ) ) return - 1 ; <nl> - <nl> + + itr ; <nl> <nl> - if ( itr = = idx . end ( ) | | itr - > t_id ! = obj . t_id ) return - 1 ; <nl> + if ( itr = = idx . end ( ) | | itr - > t_id ! = obj . t_id ) return itr_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> primary = itr - > primary_key ; <nl> return itr_cache . add ( * itr ) ; <nl> } <nl> <nl> int previous_primary ( int iterator , uint64_t & primary ) { <nl> - const auto & obj = itr_cache . get ( iterator ) ; <nl> const auto & idx = context . db . get_index < typename chainbase : : get_index_type < ObjectType > : : type , contracts : : by_primary > ( ) ; <nl> <nl> + if ( iterator < - 1 ) / / is end iterator <nl> + { <nl> + auto tab = itr_cache . find_table_by_end_iterator ( iterator ) ; <nl> + FC_ASSERT ( tab , " not a valid end iterator " ) ; <nl> + <nl> + auto itr = idx . upper_bound ( tab - > id ) ; <nl> + if ( itr = = idx . begin ( ) ) return iterator ; / / Empty table <nl> + <nl> + - - itr ; <nl> + <nl> + if ( itr - > t_id ! = tab - > id ) return iterator ; / / Empty table <nl> + <nl> + primary = itr - > primary_key ; <nl> + return itr_cache . add ( * itr ) ; <nl> + } <nl> + <nl> + const auto & obj = itr_cache . get ( iterator ) ; <nl> + <nl> auto itr = idx . iterator_to ( obj ) ; <nl> - if ( itr = = idx . end ( ) | | itr = = idx . begin ( ) ) return - 1 ; <nl> + if ( itr = = idx . begin ( ) ) return itr_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> - - itr ; <nl> <nl> - if ( itr - > t_id ! = obj . t_id ) return - 1 ; <nl> + if ( itr - > t_id ! = obj . t_id ) return itr_cache . get_end_iterator_by_table_id ( obj . t_id ) ; <nl> <nl> primary = itr - > primary_key ; <nl> return itr_cache . add ( * itr ) ; <nl> class apply_context { <nl> int db_find_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) ; <nl> int db_lowerbound_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) ; <nl> int db_upperbound_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) ; <nl> + int db_end_i64 ( uint64_t code , uint64_t scope , uint64_t table ) ; <nl> <nl> generic_index < contracts : : index64_object > idx64 ; <nl> generic_index < contracts : : index128_object > idx128 ; <nl> mmm a / libraries / chain / wasm_interface . cpp <nl> ppp b / libraries / chain / wasm_interface . cpp <nl> class console_api : public context_aware_api { <nl> int db_ # # IDX # # _upperbound ( uint64_t code , uint64_t scope , uint64_t table , TYPE & secondary , uint64_t & primary ) { \ <nl> return context . IDX . upperbound_secondary ( code , scope , table , secondary , primary ) ; \ <nl> } \ <nl> + int db_ # # IDX # # _end ( uint64_t code , uint64_t scope , uint64_t table ) { \ <nl> + return context . IDX . end_secondary ( code , scope , table ) ; \ <nl> + } \ <nl> int db_ # # IDX # # _next ( int iterator , uint64_t & primary ) { \ <nl> return context . IDX . next_secondary ( iterator , primary ) ; \ <nl> } \ <nl> class database_api : public context_aware_api { <nl> int db_upperbound_i64 ( uint64_t code , uint64_t scope , uint64_t table , uint64_t id ) { <nl> return context . db_upperbound_i64 ( code , scope , table , id ) ; <nl> } <nl> + int db_end_i64 ( uint64_t code , uint64_t scope , uint64_t table ) { <nl> + return context . db_end_i64 ( code , scope , table ) ; <nl> + } <nl> <nl> DB_API_METHOD_WRAPPERS ( idx64 , uint64_t ) <nl> DB_API_METHOD_WRAPPERS ( idx128 , uint128_t ) <nl> REGISTER_INTRINSICS ( producer_api , <nl> ( db_ # # IDX # # _find_secondary , int ( int64_t , int64_t , int64_t , int , int ) ) \ <nl> ( db_ # # IDX # # _lowerbound , int ( int64_t , int64_t , int64_t , int , int ) ) \ <nl> ( db_ # # IDX # # _upperbound , int ( int64_t , int64_t , int64_t , int , int ) ) \ <nl> + ( db_ # # IDX # # _end , int ( int64_t , int64_t , int64_t ) ) \ <nl> ( db_ # # IDX # # _next , int ( int , int ) ) \ <nl> ( db_ # # IDX # # _previous , int ( int , int ) ) <nl> <nl> REGISTER_INTRINSICS ( database_api , <nl> ( db_find_i64 , int ( int64_t , int64_t , int64_t , int64_t ) ) <nl> ( db_lowerbound_i64 , int ( int64_t , int64_t , int64_t , int64_t ) ) <nl> ( db_upperbound_i64 , int ( int64_t , int64_t , int64_t , int64_t ) ) <nl> + ( db_end_i64 , int ( int64_t , int64_t , int64_t ) ) <nl> <nl> DB_SECONDARY_INDEX_METHOD_SEQ ( idx64 ) <nl> DB_SECONDARY_INDEX_METHOD_SEQ ( idx128 ) <nl>
|
change the way iterators work to enable access to last item in table
|
EOSIO/eos
|
15cca1bd7ae8bca0a4017d058963a86b19fd28d8
|
2018-02-16T05:28:10Z
|
new file mode 100644 <nl> index 000000000000 . . bb94f9a93f67 <nl> mmm / dev / null <nl> ppp b / . bazelrc <nl> <nl> + build - - copt = - - std = c + + 14 <nl> + build - - copt = - I . <nl> + build - - copt = - isystem - - copt bazel - out / k8 - fastbuild / bin <nl> new file mode 100644 <nl> index 000000000000 . . ccbccc3dc626 <nl> mmm / dev / null <nl> ppp b / . bazelversion <nl> @ @ - 0 , 0 + 1 @ @ <nl> + 2 . 2 . 0 <nl> mmm a / . circleci / cimodel / data / pytorch_build_definitions . py <nl> ppp b / . circleci / cimodel / data / pytorch_build_definitions . py <nl> def instantiate_configs ( ) : <nl> <nl> root = get_root ( ) <nl> found_configs = conf_tree . dfs ( root ) <nl> - restrict_phases = None <nl> for fc in found_configs : <nl> <nl> + restrict_phases = None <nl> distro_name = fc . find_prop ( " distro_name " ) <nl> compiler_name = fc . find_prop ( " compiler_name " ) <nl> compiler_version = fc . find_prop ( " compiler_version " ) <nl> mmm a / . circleci / config . yml <nl> ppp b / . circleci / config . yml <nl> jobs : <nl> cd $ { PROJ_ROOT } / ios / TestApp <nl> instruments - s - devices <nl> fastlane scan <nl> + pytorch_linux_bazel_build : <nl> + < < : * pytorch_params <nl> + machine : <nl> + image : ubuntu - 1604 : 201903 - 01 <nl> + steps : <nl> + # See Note [ Workspace for CircleCI scripts ] in job - specs - setup . yml <nl> + - attach_scripts <nl> + - setup_linux_system_environment <nl> + - checkout <nl> + - setup_ci_environment <nl> + - run : <nl> + name : Bazel Build <nl> + no_output_timeout : " 1h " <nl> + command : | <nl> + set - e <nl> + # Pull Docker image and run build <nl> + echo " DOCKER_IMAGE : " $ { DOCKER_IMAGE } <nl> + time docker pull $ { DOCKER_IMAGE } > / dev / null <nl> + export id = $ ( docker run - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - t - d - w / var / lib / jenkins $ { DOCKER_IMAGE } ) <nl> + <nl> + echo " Do NOT merge master branch into $ CIRCLE_BRANCH in environment $ BUILD_ENVIRONMENT " <nl> + <nl> + git submodule sync & & git submodule update - q - - init - - recursive <nl> + <nl> + docker cp / home / circleci / project / . $ id : / var / lib / jenkins / workspace <nl> + <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / build . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + <nl> + echo $ { COMMAND } > . / command . sh & & unbuffer bash . / command . sh | ts <nl> + <nl> + # Push intermediate Docker image for next phase to use <nl> + if [ - z " $ { BUILD_ONLY } " ] ; then <nl> + # Augment our output image name with bazel to avoid collisions <nl> + output_image = $ { DOCKER_IMAGE } - bazel - $ { CIRCLE_SHA1 } <nl> + export COMMIT_DOCKER_IMAGE = $ output_image <nl> + docker commit " $ id " $ { COMMIT_DOCKER_IMAGE } <nl> + time docker push $ { COMMIT_DOCKER_IMAGE } <nl> + fi <nl> + <nl> + pytorch_linux_bazel_test : <nl> + < < : * pytorch_params <nl> + machine : <nl> + image : ubuntu - 1604 : 201903 - 01 <nl> + steps : <nl> + # See Note [ Workspace for CircleCI scripts ] in job - specs - setup . yml <nl> + - attach_scripts <nl> + - setup_linux_system_environment <nl> + - setup_ci_environment <nl> + - run : <nl> + name : Test <nl> + no_output_timeout : " 90m " <nl> + command : | <nl> + set - e <nl> + output_image = $ { DOCKER_IMAGE } - bazel - $ { CIRCLE_SHA1 } <nl> + export COMMIT_DOCKER_IMAGE = $ output_image <nl> + echo " DOCKER_IMAGE : " $ { COMMIT_DOCKER_IMAGE } <nl> + <nl> + time docker pull $ { COMMIT_DOCKER_IMAGE } > / dev / null <nl> + <nl> + if [ - n " $ { USE_CUDA_DOCKER_RUNTIME } " ] ; then <nl> + export id = $ ( docker run - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - - runtime = nvidia - t - d - w / var / lib / jenkins $ { COMMIT_DOCKER_IMAGE } ) <nl> + else <nl> + export id = $ ( docker run - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - t - d - w / var / lib / jenkins $ { COMMIT_DOCKER_IMAGE } ) <nl> + fi <nl> + <nl> + if [ [ $ { BUILD_ENVIRONMENT } = = * " multigpu " * ] ] ; then <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / multigpu - test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + else <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " export CIRCLE_PULL_REQUEST = $ { CIRCLE_PULL_REQUEST } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + fi <nl> + echo $ { COMMAND } > . / command . sh & & unbuffer bash . / command . sh | ts <nl> <nl> promote_s3 : <nl> < < : * promote_common <nl> workflows : <nl> docker_image : " 308535385114 . dkr . ecr . us - east - 1 . amazonaws . com / pytorch / pytorch - linux - xenial - cuda10 . 2 - cudnn7 - py3 - gcc7 : f990c76a - a798 - 42bb - 852f - 5be5006f8026 " <nl> use_cuda_docker_runtime : " 1 " <nl> resource_class : gpu . medium <nl> + - pytorch_linux_bazel_build : <nl> + name : pytorch_bazel_build <nl> + requires : <nl> + - setup <nl> + build_environment : " pytorch - linux - xenial - py3 . 6 - gcc7 - bazel - build " <nl> + docker_image : " 308535385114 . dkr . ecr . us - east - 1 . amazonaws . com / pytorch / pytorch - linux - xenial - py3 . 6 - gcc7 : f990c76a - a798 - 42bb - 852f - 5be5006f8026 " <nl> + resource_class : large <nl> + - pytorch_linux_bazel_test : <nl> + name : pytorch_bazel_test <nl> + requires : <nl> + - setup <nl> + - pytorch_bazel_build <nl> + build_environment : " pytorch - linux - xenial - py3 . 6 - gcc7 - bazel - test " <nl> + docker_image : " 308535385114 . dkr . ecr . us - east - 1 . amazonaws . com / pytorch / pytorch - linux - xenial - py3 . 6 - gcc7 : f990c76a - a798 - 42bb - 852f - 5be5006f8026 " <nl> - caffe2_linux_build : <nl> name : caffe2_onnx_main_py3_6_clang7_ubuntu16_04_build <nl> requires : <nl> mmm a / . circleci / generate_config_yml . py <nl> ppp b / . circleci / generate_config_yml . py <nl> def write ( self , output_filehandle ) : <nl> File ( " workflows - pytorch - ios - builds . yml " ) , <nl> File ( " workflows - pytorch - mobile - builds . yml " ) , <nl> File ( " workflows - pytorch - ge - config - tests . yml " ) , <nl> + File ( " workflows - pytorch - bazel - builds . yml " ) , <nl> Listgen ( caffe2_build_definitions . get_workflow_jobs , 3 ) , <nl> File ( " workflows - binary - builds - smoke - subset . yml " ) , <nl> Listgen ( binary_build_definitions . get_binary_smoke_test_jobs , 3 ) , <nl> mmm a / . circleci / verbatim - sources / job - specs - custom . yml <nl> ppp b / . circleci / verbatim - sources / job - specs - custom . yml <nl> <nl> cd $ { PROJ_ROOT } / ios / TestApp <nl> instruments - s - devices <nl> fastlane scan <nl> + pytorch_linux_bazel_build : <nl> + < < : * pytorch_params <nl> + machine : <nl> + image : ubuntu - 1604 : 201903 - 01 <nl> + steps : <nl> + # See Note [ Workspace for CircleCI scripts ] in job - specs - setup . yml <nl> + - attach_scripts <nl> + - setup_linux_system_environment <nl> + - checkout <nl> + - setup_ci_environment <nl> + - run : <nl> + name : Bazel Build <nl> + no_output_timeout : " 1h " <nl> + command : | <nl> + set - e <nl> + # Pull Docker image and run build <nl> + echo " DOCKER_IMAGE : " $ { DOCKER_IMAGE } <nl> + time docker pull $ { DOCKER_IMAGE } > / dev / null <nl> + export id = $ ( docker run - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - t - d - w / var / lib / jenkins $ { DOCKER_IMAGE } ) <nl> + <nl> + echo " Do NOT merge master branch into $ CIRCLE_BRANCH in environment $ BUILD_ENVIRONMENT " <nl> + <nl> + git submodule sync & & git submodule update - q - - init - - recursive <nl> + <nl> + docker cp / home / circleci / project / . $ id : / var / lib / jenkins / workspace <nl> + <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / build . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + <nl> + echo $ { COMMAND } > . / command . sh & & unbuffer bash . / command . sh | ts <nl> + <nl> + # Push intermediate Docker image for next phase to use <nl> + if [ - z " $ { BUILD_ONLY } " ] ; then <nl> + # Augment our output image name with bazel to avoid collisions <nl> + output_image = $ { DOCKER_IMAGE } - bazel - $ { CIRCLE_SHA1 } <nl> + export COMMIT_DOCKER_IMAGE = $ output_image <nl> + docker commit " $ id " $ { COMMIT_DOCKER_IMAGE } <nl> + time docker push $ { COMMIT_DOCKER_IMAGE } <nl> + fi <nl> + <nl> + pytorch_linux_bazel_test : <nl> + < < : * pytorch_params <nl> + machine : <nl> + image : ubuntu - 1604 : 201903 - 01 <nl> + steps : <nl> + # See Note [ Workspace for CircleCI scripts ] in job - specs - setup . yml <nl> + - attach_scripts <nl> + - setup_linux_system_environment <nl> + - setup_ci_environment <nl> + - run : <nl> + name : Test <nl> + no_output_timeout : " 90m " <nl> + command : | <nl> + set - e <nl> + output_image = $ { DOCKER_IMAGE } - bazel - $ { CIRCLE_SHA1 } <nl> + export COMMIT_DOCKER_IMAGE = $ output_image <nl> + echo " DOCKER_IMAGE : " $ { COMMIT_DOCKER_IMAGE } <nl> + <nl> + time docker pull $ { COMMIT_DOCKER_IMAGE } > / dev / null <nl> + <nl> + if [ - n " $ { USE_CUDA_DOCKER_RUNTIME } " ] ; then <nl> + export id = $ ( docker run - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - - runtime = nvidia - t - d - w / var / lib / jenkins $ { COMMIT_DOCKER_IMAGE } ) <nl> + else <nl> + export id = $ ( docker run - - cap - add = SYS_PTRACE - - security - opt seccomp = unconfined - t - d - w / var / lib / jenkins $ { COMMIT_DOCKER_IMAGE } ) <nl> + fi <nl> + <nl> + if [ [ $ { BUILD_ENVIRONMENT } = = * " multigpu " * ] ] ; then <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / multigpu - test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + else <nl> + export COMMAND = ' ( ( echo " export BUILD_ENVIRONMENT = $ { BUILD_ENVIRONMENT } " & & echo " export CIRCLE_PULL_REQUEST = $ { CIRCLE_PULL_REQUEST } " & & echo " source . / workspace / env " & & echo " sudo chown - R jenkins workspace & & cd workspace & & . jenkins / pytorch / test . sh " ) | docker exec - u jenkins - i " $ id " bash ) 2 > & 1 ' <nl> + fi <nl> + echo $ { COMMAND } > . / command . sh & & unbuffer bash . / command . sh | ts <nl> new file mode 100644 <nl> index 000000000000 . . 825ea500ad1d <nl> mmm / dev / null <nl> ppp b / . circleci / verbatim - sources / workflows - pytorch - bazel - builds . yml <nl> <nl> + - pytorch_linux_bazel_build : <nl> + name : pytorch_bazel_build <nl> + requires : <nl> + - setup <nl> + build_environment : " pytorch - linux - xenial - py3 . 6 - gcc7 - bazel - build " <nl> + docker_image : " 308535385114 . dkr . ecr . us - east - 1 . amazonaws . com / pytorch / pytorch - linux - xenial - py3 . 6 - gcc7 : f990c76a - a798 - 42bb - 852f - 5be5006f8026 " <nl> + resource_class : large <nl> + - pytorch_linux_bazel_test : <nl> + name : pytorch_bazel_test <nl> + requires : <nl> + - setup <nl> + - pytorch_bazel_build <nl> + build_environment : " pytorch - linux - xenial - py3 . 6 - gcc7 - bazel - test " <nl> + docker_image : " 308535385114 . dkr . ecr . us - east - 1 . amazonaws . com / pytorch / pytorch - linux - xenial - py3 . 6 - gcc7 : f990c76a - a798 - 42bb - 852f - 5be5006f8026 " <nl> mmm a / . gitignore <nl> ppp b / . gitignore <nl> TAGS <nl> <nl> # clangd background index <nl> . clangd / <nl> + <nl> + # bazel symlinks <nl> + bazel - * <nl> mmm a / . jenkins / pytorch / build . sh <nl> ppp b / . jenkins / pytorch / build . sh <nl> if [ [ " $ { BUILD_ENVIRONMENT } " = = * clang * ] ] ; then <nl> export CXX = clang + + <nl> fi <nl> <nl> + if [ [ " $ BUILD_ENVIRONMENT " = = * - bazel - * ] ] ; then <nl> + set - e <nl> <nl> - # check that setup . py would fail with bad arguments <nl> - echo " The next three invocations are expected to fail with invalid command error messages . " <nl> - ( ! get_exit_code python setup . py bad_argument ) <nl> - ( ! get_exit_code python setup . py clean ] ) <nl> - ( ! get_exit_code python setup . py clean bad_argument ) <nl> + get_bazel <nl> <nl> - if [ [ " $ BUILD_ENVIRONMENT " ! = * libtorch * ] ] ; then <nl> - <nl> - # ppc64le build fails when WERROR = 1 <nl> - # set only when building other architectures <nl> - # only use for " python setup . py install " line <nl> - if [ [ " $ BUILD_ENVIRONMENT " ! = * ppc64le * & & " $ BUILD_ENVIRONMENT " ! = * clang * ] ] ; then <nl> - WERROR = 1 python setup . py install <nl> - else <nl> - python setup . py install <nl> - fi <nl> + tools / bazel build : torch <nl> + else <nl> + # check that setup . py would fail with bad arguments <nl> + echo " The next three invocations are expected to fail with invalid command error messages . " <nl> + ( ! get_exit_code python setup . py bad_argument ) <nl> + ( ! get_exit_code python setup . py clean ] ) <nl> + ( ! get_exit_code python setup . py clean bad_argument ) <nl> + <nl> + if [ [ " $ BUILD_ENVIRONMENT " ! = * libtorch * ] ] ; then <nl> + <nl> + # ppc64le build fails when WERROR = 1 <nl> + # set only when building other architectures <nl> + # only use for " python setup . py install " line <nl> + if [ [ " $ BUILD_ENVIRONMENT " ! = * ppc64le * & & " $ BUILD_ENVIRONMENT " ! = * clang * ] ] ; then <nl> + WERROR = 1 python setup . py install <nl> + else <nl> + python setup . py install <nl> + fi <nl> <nl> - # TODO : I ' m not sure why , but somehow we lose verbose commands <nl> - set - x <nl> + # TODO : I ' m not sure why , but somehow we lose verbose commands <nl> + set - x <nl> <nl> - if which sccache > / dev / null ; then <nl> - echo ' PyTorch Build Statistics ' <nl> - sccache - - show - stats <nl> - fi <nl> + if which sccache > / dev / null ; then <nl> + echo ' PyTorch Build Statistics ' <nl> + sccache - - show - stats <nl> + fi <nl> <nl> - assert_git_not_dirty <nl> + assert_git_not_dirty <nl> <nl> - # Build custom operator tests . <nl> - CUSTOM_OP_BUILD = " $ PWD / . . / custom - op - build " <nl> - CUSTOM_OP_TEST = " $ PWD / test / custom_operator " <nl> - python - - version <nl> - SITE_PACKAGES = " $ ( python - c ' from distutils . sysconfig import get_python_lib ; print ( get_python_lib ( ) ) ' ) " <nl> - mkdir " $ CUSTOM_OP_BUILD " <nl> - pushd " $ CUSTOM_OP_BUILD " <nl> - cmake " $ CUSTOM_OP_TEST " - DCMAKE_PREFIX_PATH = " $ SITE_PACKAGES / torch " - DPYTHON_EXECUTABLE = " $ ( which python ) " <nl> - make VERBOSE = 1 <nl> - popd <nl> - assert_git_not_dirty <nl> - else <nl> - # Test standalone c10 build <nl> - if [ [ " $ BUILD_ENVIRONMENT " = = * xenial - cuda10 . 1 - cudnn7 - py3 * ] ] ; then <nl> - mkdir - p c10 / build <nl> - pushd c10 / build <nl> - cmake . . <nl> - make - j <nl> + # Build custom operator tests . <nl> + CUSTOM_OP_BUILD = " $ PWD / . . / custom - op - build " <nl> + CUSTOM_OP_TEST = " $ PWD / test / custom_operator " <nl> + python - - version <nl> + SITE_PACKAGES = " $ ( python - c ' from distutils . sysconfig import get_python_lib ; print ( get_python_lib ( ) ) ' ) " <nl> + mkdir " $ CUSTOM_OP_BUILD " <nl> + pushd " $ CUSTOM_OP_BUILD " <nl> + cmake " $ CUSTOM_OP_TEST " - DCMAKE_PREFIX_PATH = " $ SITE_PACKAGES / torch " - DPYTHON_EXECUTABLE = " $ ( which python ) " <nl> + make VERBOSE = 1 <nl> popd <nl> assert_git_not_dirty <nl> - fi <nl> + else <nl> + # Test standalone c10 build <nl> + if [ [ " $ BUILD_ENVIRONMENT " = = * xenial - cuda10 . 1 - cudnn7 - py3 * ] ] ; then <nl> + mkdir - p c10 / build <nl> + pushd c10 / build <nl> + cmake . . <nl> + make - j <nl> + popd <nl> + assert_git_not_dirty <nl> + fi <nl> <nl> - # Test no - Python build <nl> - echo " Building libtorch " <nl> - # NB : Install outside of source directory ( at the same level as the root <nl> - # pytorch folder ) so that it doesn ' t get cleaned away prior to docker push . <nl> - BUILD_LIBTORCH_PY = $ PWD / tools / build_libtorch . py <nl> - mkdir - p . . / cpp - build / caffe2 <nl> - pushd . . / cpp - build / caffe2 <nl> - WERROR = 1 VERBOSE = 1 DEBUG = 1 python $ BUILD_LIBTORCH_PY <nl> - popd <nl> + # Test no - Python build <nl> + echo " Building libtorch " <nl> + # NB : Install outside of source directory ( at the same level as the root <nl> + # pytorch folder ) so that it doesn ' t get cleaned away prior to docker push . <nl> + BUILD_LIBTORCH_PY = $ PWD / tools / build_libtorch . py <nl> + mkdir - p . . / cpp - build / caffe2 <nl> + pushd . . / cpp - build / caffe2 <nl> + WERROR = 1 VERBOSE = 1 DEBUG = 1 python $ BUILD_LIBTORCH_PY <nl> + popd <nl> + fi <nl> fi <nl> <nl> # Test XLA build <nl> mmm a / . jenkins / pytorch / common . sh <nl> ppp b / . jenkins / pytorch / common . sh <nl> function file_diff_from_base ( ) { <nl> set - e <nl> git diff - - name - only " $ ( git merge - base origin master HEAD ) " > " $ 1 " <nl> } <nl> + <nl> + function get_bazel ( ) { <nl> + # download bazel version <nl> + wget https : / / github . com / bazelbuild / bazel / releases / download / 2 . 2 . 0 / bazel - 2 . 2 . 0 - linux - x86_64 - O tools / bazel <nl> + # verify content <nl> + echo ' b2f002ea0e6194a181af6ac84cd94bd8dc797722eb2354690bebac92dda233ff tools / bazel ' | sha256sum - - quiet - c <nl> + <nl> + chmod + x tools / bazel <nl> + } <nl> mmm a / . jenkins / pytorch / test . sh <nl> ppp b / . jenkins / pytorch / test . sh <nl> if [ [ " $ BUILD_ENVIRONMENT " = = * rocm * ] ] ; then <nl> fi <nl> <nl> # - - user breaks ppc64le builds and these packages are already in ppc64le docker <nl> - if [ [ " $ BUILD_ENVIRONMENT " ! = * ppc64le * ] ] ; then <nl> + if [ [ " $ BUILD_ENVIRONMENT " ! = * ppc64le * ] ] & & [ [ " $ BUILD_ENVIRONMENT " ! = * - bazel - * ] ] ; then <nl> # JIT C + + extensions require ninja . <nl> pip_install - - user ninja <nl> # ninja is installed in / var / lib / jenkins / . local / bin <nl> test_backward_compatibility ( ) { <nl> assert_git_not_dirty <nl> } <nl> <nl> - if ! [ [ " $ { BUILD_ENVIRONMENT } " = = * libtorch * ] ] ; then <nl> + test_bazel ( ) { <nl> + set - e <nl> + <nl> + get_bazel <nl> + <nl> + tools / bazel test - - test_tag_filters = - gpu - required - - test_filter = - * _CUDA : all_tests <nl> + } <nl> + <nl> + if ! [ [ " $ { BUILD_ENVIRONMENT } " = = * libtorch * | | " $ { BUILD_ENVIRONMENT } " = = * - bazel - * ] ] ; then <nl> ( cd test & & python - c " import torch ; print ( torch . __config__ . show ( ) ) " ) <nl> ( cd test & & python - c " import torch ; print ( torch . __config__ . parallel_info ( ) ) " ) <nl> fi <nl> elif [ [ " $ { BUILD_ENVIRONMENT } " = = * - test2 | | " $ { JOB_BASE_NAME } " = = * - test2 ] ] ; t <nl> test_aten <nl> test_libtorch <nl> test_custom_script_ops <nl> + elif [ [ " $ { BUILD_ENVIRONMENT } " = = * - bazel - * ] ] ; then <nl> + test_bazel <nl> else <nl> test_torchvision <nl> test_python_nn <nl> new file mode 100644 <nl> index 000000000000 . . 3556452d45ae <nl> mmm / dev / null <nl> ppp b / BUILD . bazel <nl> <nl> + load ( " @ rules_proto / / proto : defs . bzl " , " proto_library " ) <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_binary " , " cc_library " , " cc_proto_library " , " cc_test " ) <nl> + load ( " / / third_party : substitution . bzl " , " template_rule " ) <nl> + load ( " / / tools / rules : cu . bzl " , " cu_library " ) <nl> + load ( " / / tools / config : defs . bzl " , " if_cuda " ) <nl> + load ( " / / : aten . bzl " , " intern_build_aten_ops " ) <nl> + <nl> + COMMON_COPTS = [ <nl> + " - DHAVE_MALLOC_USABLE_SIZE = 1 " , <nl> + " - DHAVE_MMAP = 1 " , <nl> + " - DHAVE_SHM_OPEN = 1 " , <nl> + " - DHAVE_SHM_UNLINK = 1 " , <nl> + " - D_FILE_OFFSET_BITS = 64 " , <nl> + " - DHAVE_GCC_GET_CPUID " , <nl> + " - DUSE_GCC_GET_CPUID " , <nl> + " - DTH_HAVE_THREAD " , <nl> + " - DUSE_FBGEMM " , <nl> + " - DUSE_DISTRIBUTED " , <nl> + " - DAT_PARALLEL_NATIVE = 1 " , <nl> + " - DATEN_THREADING = NATIVE " , <nl> + " - DNO_CUDNN_DESTROY_HANDLE " , <nl> + ] + if_cuda ( [ <nl> + " - DUSE_CUDA " , <nl> + " - DUSE_CUDNN " , <nl> + ] ) <nl> + <nl> + # c10 <nl> + template_rule ( <nl> + name = " cmake_macros_h " , <nl> + src = " c10 / macros / cmake_macros . h . in " , <nl> + out = " c10 / macros / cmake_macros . h " , <nl> + substitutions = { <nl> + " cmakedefine " : " define " , <nl> + " # define FEATURE_TORCH_MOBILE " : " / * # undef FEATURE_TORCH_MOBILE * / " , <nl> + " # define USE_STATIC_DISPATCH " : " / * # undef USE_STATIC_DISPATCH * / " , <nl> + " # define C10_USE_NUMA " : " / * # undef C10_USE_NUMA * / " , <nl> + } , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " cuda_cmake_macros_h " , <nl> + src = " c10 / cuda / impl / cuda_cmake_macros . h . in " , <nl> + out = " c10 / cuda / impl / cuda_cmake_macros . h " , <nl> + substitutions = { <nl> + " cmakedefine " : " define " , <nl> + } , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " c10_headers " , <nl> + hdrs = glob ( [ <nl> + " c10 / core / * . h " , <nl> + " c10 / core / impl / * . h " , <nl> + " c10 / cuda / * . h " , <nl> + " c10 / cuda / impl / * . h " , <nl> + " c10 / macros / * . h " , <nl> + " c10 / util / * . h " , <nl> + ] ) + [ <nl> + " c10 / macros / cmake_macros . h " , <nl> + " c10 / cuda / impl / cuda_cmake_macros . h " , <nl> + ] , <nl> + deps = [ <nl> + " @ com_github_gflags_gflags / / : gflags " , <nl> + " @ com_github_glog / / : glog " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " c10 " , <nl> + srcs = glob ( [ <nl> + " c10 / core / * . cpp " , <nl> + " c10 / core / impl / * . cpp " , <nl> + " c10 / util / * . cpp " , <nl> + ] ) + if_cuda ( <nl> + glob ( [ <nl> + " c10 / cuda / * . cpp " , <nl> + " c10 / cuda / impl / * . cpp " , <nl> + ] ) , <nl> + [ ] , <nl> + ) , <nl> + copts = [ " - DCAFFE2_BUILD_MAIN_LIB " ] , <nl> + deps = [ <nl> + " : c10_headers " , <nl> + ] + if_cuda ( <nl> + [ " @ cuda " ] , <nl> + [ ] , <nl> + ) , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " c10_tests " , <nl> + size = " small " , <nl> + srcs = glob ( [ <nl> + " c10 / test / util / * . cpp " , <nl> + " c10 / test / util / * . h " , <nl> + " c10 / test / core / * . cpp " , <nl> + " c10 / test / core / impl / * . cpp " , <nl> + ] ) , <nl> + copts = [ " - Wno - deprecated - declarations " ] , <nl> + deps = [ <nl> + " : c10 " , <nl> + " : c10_headers " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + py_binary ( <nl> + name = " gen " , <nl> + srcs = [ " aten / src / ATen / gen . py " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " generated_cpp " , <nl> + srcs = [ <nl> + " aten / src / ATen / Declarations . cwrap " , <nl> + " aten / src / THCUNN / generic / THCUNN . h " , <nl> + " aten / src / ATen / nn . yaml " , <nl> + " aten / src / ATen / native / native_functions . yaml " , <nl> + ] + glob ( [ " aten / src / ATen / templates / * * " ] ) , <nl> + outs = [ <nl> + " aten / src / ATen / Declarations . yaml " , <nl> + " aten / src / ATen / CPUType . h " , <nl> + " aten / src / ATen / CPUType . cpp " , <nl> + " aten / src / ATen / Functions . h " , <nl> + " aten / src / ATen / LegacyTHFunctionsCPU . h " , <nl> + " aten / src / ATen / LegacyTHFunctionsCPU . cpp " , <nl> + " aten / src / ATen / NativeFunctions . h " , <nl> + " aten / src / ATen / MkldnnCPUType . h " , <nl> + " aten / src / ATen / MkldnnCPUType . cpp " , <nl> + " aten / src / ATen / QuantizedCPUType . h " , <nl> + " aten / src / ATen / QuantizedCPUType . cpp " , <nl> + " aten / src / ATen / SparseCPUType . h " , <nl> + " aten / src / ATen / SparseCPUType . cpp " , <nl> + " aten / src / ATen / TypeDefault . h " , <nl> + " aten / src / ATen / TypeDefault . cpp " , <nl> + " aten / src / ATen / core / TensorBody . h " , <nl> + " aten / src / ATen / core / TensorMethods . h " , <nl> + " aten / src / ATen / core / OpsAlreadyMovedToC10 . cpp " , <nl> + ] , <nl> + cmd = " $ ( location : gen ) - - source - path aten / src / ATen - - install_dir ` dirname $ ( location aten / src / ATen / Declarations . yaml ) ` aten / src / ATen / Declarations . cwrap aten / src / THCUNN / generic / THCUNN . h aten / src / ATen / nn . yaml aten / src / ATen / native / native_functions . yaml " , <nl> + tools = [ " : gen " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " code_template " , <nl> + srcs = [ " aten / src / ATen / code_template . py " ] , <nl> + imports = [ " aten " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " tools_autograd " , <nl> + srcs = glob ( [ " tools / autograd / * . py " ] ) , <nl> + data = glob ( [ <nl> + " tools / autograd / * . yaml " , <nl> + " tools / autograd / templates / * " , <nl> + ] ) , <nl> + deps = [ " : code_template " ] , <nl> + ) <nl> + <nl> + py_library ( <nl> + name = " tools_jit " , <nl> + srcs = glob ( [ " tools / jit / * . py " ] ) , <nl> + data = glob ( [ " tools / jit / templates / * " ] ) , <nl> + ) <nl> + <nl> + py_binary ( <nl> + name = " generate_code " , <nl> + srcs = [ " tools / setup_helpers / generate_code . py " ] , <nl> + deps = [ <nl> + " : tools_autograd " , <nl> + " : tools_jit " , <nl> + ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " generated_code " , <nl> + srcs = [ <nl> + " aten / src / ATen / Declarations . yaml " , <nl> + ] , <nl> + outs = [ <nl> + " torch / csrc / autograd / generated / python_functions . h " , <nl> + " torch / csrc / autograd / generated / python_functions . cpp " , <nl> + " torch / csrc / autograd / generated / python_variable_methods . cpp " , <nl> + " torch / csrc / autograd / generated / python_torch_functions . cpp " , <nl> + " torch / csrc / autograd / generated / python_nn_functions . cpp " , <nl> + " torch / csrc / autograd / generated / VariableType . h " , <nl> + " torch / csrc / autograd / generated / VariableType_0 . cpp " , <nl> + " torch / csrc / autograd / generated / VariableType_1 . cpp " , <nl> + " torch / csrc / autograd / generated / VariableType_2 . cpp " , <nl> + " torch / csrc / autograd / generated / VariableType_3 . cpp " , <nl> + " torch / csrc / autograd / generated / VariableType_4 . cpp " , <nl> + # " torch / csrc / autograd / generated / VariableTypeEverything . cpp " , <nl> + " torch / csrc / autograd / generated / RegistrationDeclarations . h " , <nl> + " torch / csrc / autograd / generated / Functions . h " , <nl> + " torch / csrc / autograd / generated / Functions . cpp " , <nl> + " torch / csrc / autograd / generated / variable_factories . h " , <nl> + " torch / csrc / jit / generated / register_aten_ops_0 . cpp " , <nl> + " torch / csrc / jit / generated / register_aten_ops_1 . cpp " , <nl> + " torch / csrc / jit / generated / register_aten_ops_2 . cpp " , <nl> + ] , <nl> + cmd = " $ ( location : generate_code ) - - install_dir ` dirname $ ( location torch / csrc / autograd / generated / variable_factories . h ) ` / . . / . . - - declarations - path $ ( location aten / src / ATen / Declarations . yaml ) - - nn - path aten / src " , <nl> + tools = [ " : generate_code " ] , <nl> + ) <nl> + <nl> + exports_files ( <nl> + srcs = [ " aten / src / ATen / cpu / tbb / extra / version_string . ver . in " ] , <nl> + ) <nl> + <nl> + # ATen <nl> + filegroup ( <nl> + name = " aten_base_cpp " , <nl> + srcs = glob ( [ <nl> + " aten / src / ATen / * . cpp " , <nl> + " aten / src / ATen / detail / * . cpp " , <nl> + " aten / src / ATen / cpu / * . cpp " , <nl> + ] ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " ATen_CORE_SRCS " , <nl> + srcs = glob ( <nl> + [ <nl> + " aten / src / ATen / core / * * / * . cpp " , <nl> + ] , <nl> + exclude = [ <nl> + " aten / src / ATen / core / * * / * _test . cpp " , <nl> + ] , <nl> + ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_native_cpp " , <nl> + srcs = glob ( [ " aten / src / ATen / native / * . cpp " ] ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_native_sparse_cpp " , <nl> + srcs = glob ( [ " aten / src / ATen / native / sparse / * . cpp " ] ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_native_quantized_cpp " , <nl> + srcs = glob ( <nl> + [ <nl> + " aten / src / ATen / native / quantized / * . cpp " , <nl> + " aten / src / ATen / native / quantized / cpu / * . cpp " , <nl> + ] , <nl> + ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_native_mkl_cpp " , <nl> + srcs = glob ( [ " aten / src / ATen / native / mkl / * . cpp " ] ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_native_mkldnn_cpp " , <nl> + srcs = glob ( [ " aten / src / ATen / native / mkldnn / * . cpp " ] ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_native_xnnpack " , <nl> + srcs = glob ( [ " aten / src / ATen / native / xnnpack / * . cpp " ] ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " ATen_QUANTIZED_SRCS " , <nl> + srcs = glob ( <nl> + [ <nl> + " aten / src / ATen / quantized / * * / * . cpp " , <nl> + ] , <nl> + exclude = [ <nl> + " aten / src / ATen / quantized / * * / * _test . cpp " , <nl> + ] , <nl> + ) , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " th_srcs " , <nl> + srcs = [ <nl> + " aten / src / TH / THAllocator . cpp " , <nl> + " aten / src / TH / THBlas . cpp " , <nl> + " aten / src / TH / THDiskFile . cpp " , <nl> + " aten / src / TH / THFile . cpp " , <nl> + " aten / src / TH / THGeneral . cpp " , <nl> + " aten / src / TH / THLapack . cpp " , <nl> + " aten / src / TH / THMemoryFile . cpp " , <nl> + " aten / src / TH / THStorageFunctions . cpp " , <nl> + " aten / src / TH / THTensor . cpp " , <nl> + " aten / src / TH / THTensorEvenMoreMath . cpp " , <nl> + " aten / src / TH / THTensorFill . cpp " , <nl> + " aten / src / TH / THTensorLapack . cpp " , <nl> + " aten / src / TH / THTensorMath . cpp " , <nl> + " aten / src / TH / THTensorMoreMath . cpp " , <nl> + " aten / src / TH / THTensorRandom . cpp " , <nl> + " aten / src / TH / THVector . cpp " , <nl> + " aten / src / TH / vector / AVX . cpp " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_cuda_srcs " , <nl> + srcs = [ <nl> + " aten / src / ATen / cuda / CUDABlas . cpp " , <nl> + " aten / src / ATen / cuda / CUDAContext . cpp " , <nl> + " aten / src / ATen / cuda / CUDAGenerator . cpp " , <nl> + " aten / src / ATen / cuda / CuSparseHandlePool . cpp " , <nl> + " aten / src / ATen / cuda / CublasHandlePool . cpp " , <nl> + " aten / src / ATen / cuda / PinnedMemoryAllocator . cpp " , <nl> + " aten / src / ATen / cuda / detail / CUDAHooks . cpp " , <nl> + " aten / src / ATen / cudnn / Descriptors . cpp " , <nl> + " aten / src / ATen / cudnn / Handle . cpp " , <nl> + " aten / src / ATen / cudnn / Types . cpp " , <nl> + " aten / src / ATen / native / cuda / CUDAUnaryOps . cpp " , <nl> + " aten / src / ATen / native / cuda / LegacyDefinitions . cpp " , <nl> + " aten / src / ATen / native / cuda / TensorShapeCUDA . cpp " , <nl> + " aten / src / ATen / native / cudnn / AffineGridGenerator . cpp " , <nl> + " aten / src / ATen / native / cudnn / BatchNorm . cpp " , <nl> + " aten / src / ATen / native / cudnn / Conv . cpp " , <nl> + " aten / src / ATen / native / cudnn / GridSampler . cpp " , <nl> + " aten / src / ATen / native / cudnn / LossCTC . cpp " , <nl> + " aten / src / ATen / native / cudnn / RNN . cpp " , <nl> + " aten / src / ATen / native / miopen / BatchNorm_miopen . cpp " , <nl> + " aten / src / ATen / native / miopen / Conv_miopen . cpp " , <nl> + " aten / src / ATen / native / miopen / RNN_miopen . cpp " , <nl> + " aten / src / ATen / native / sparse / cuda / SparseCUDATensor . cpp " , <nl> + " aten / src / THC / THCCachingHostAllocator . cpp " , <nl> + " aten / src / THC / THCGeneral . cpp " , <nl> + " aten / src / THC / THCStorageCopy . cpp " , <nl> + " aten / src / THC / THCTensor . cpp " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " thc_srcs_cu " , <nl> + srcs = [ <nl> + " aten / src / THC / THCBlas . cu . cc " , <nl> + " aten / src / THC / THCReduceApplyUtils . cu . cc " , <nl> + " aten / src / THC / THCSleep . cu . cc " , <nl> + " aten / src / THC / THCSortUtils . cu . cc " , <nl> + " aten / src / THC / THCStorage . cu . cc " , <nl> + " aten / src / THC / THCStorageCopy . cu . cc " , <nl> + " aten / src / THC / THCTensor . cu . cc " , <nl> + " aten / src / THC / THCTensorCopy . cu . cc " , <nl> + " aten / src / THC / THCTensorIndex . cu . cc " , <nl> + " aten / src / THC / THCTensorMath . cu . cc " , <nl> + " aten / src / THC / THCTensorMathBlas . cu . cc " , <nl> + " aten / src / THC / THCTensorMathMagma . cu . cc " , <nl> + " aten / src / THC / THCTensorMathPairwise . cu . cc " , <nl> + " aten / src / THC / THCTensorMathReduce . cu . cc " , <nl> + " aten / src / THC / THCTensorMathScan . cu . cc " , <nl> + " aten / src / THC / THCTensorMode . cu . cc " , <nl> + " aten / src / THC / THCTensorRandom . cu . cc " , <nl> + " aten / src / THC / THCTensorScatterGather . cu . cc " , <nl> + " aten / src / THC / THCTensorSort . cu . cc " , <nl> + " aten / src / THC / THCTensorTopK . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedBFloat16 . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedBool . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedByte . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedChar . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedDouble . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedFloat . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedHalf . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedInt . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedLong . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMaskedShort . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseBool . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseByte . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseChar . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseDouble . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseFloat . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseHalf . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseInt . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseLong . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathPointwiseShort . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceBFloat16 . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceBool . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceByte . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceChar . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceDouble . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceFloat . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceHalf . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceInt . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceLong . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorMathReduceShort . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortByte . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortChar . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortDouble . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortFloat . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortHalf . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortInt . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortLong . cu . cc " , <nl> + " aten / src / THC / generated / THCTensorSortShort . cu . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " thcunn_srcs_cu " , <nl> + srcs = [ <nl> + " aten / src / THCUNN / BCECriterion . cu . cc " , <nl> + " aten / src / THCUNN / ClassNLLCriterion . cu . cc " , <nl> + " aten / src / THCUNN / ELU . cu . cc " , <nl> + " aten / src / THCUNN / GatedLinearUnit . cu . cc " , <nl> + " aten / src / THCUNN / HardTanh . cu . cc " , <nl> + " aten / src / THCUNN / LeakyReLU . cu . cc " , <nl> + " aten / src / THCUNN / LogSigmoid . cu . cc " , <nl> + " aten / src / THCUNN / MultiLabelMarginCriterion . cu . cc " , <nl> + " aten / src / THCUNN / MultiMarginCriterion . cu . cc " , <nl> + " aten / src / THCUNN / RReLU . cu . cc " , <nl> + " aten / src / THCUNN / SoftMarginCriterion . cu . cc " , <nl> + " aten / src / THCUNN / SoftPlus . cu . cc " , <nl> + " aten / src / THCUNN / SoftShrink . cu . cc " , <nl> + " aten / src / THCUNN / SpatialClassNLLCriterion . cu . cc " , <nl> + " aten / src / THCUNN / SpatialConvolutionMM . cu . cc " , <nl> + " aten / src / THCUNN / SpatialDepthwiseConvolution . cu . cc " , <nl> + " aten / src / THCUNN / Tanh . cu . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " aten_srcs_cu " , <nl> + srcs = [ <nl> + " aten / src / ATen / cuda / detail / IndexUtils . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Activation . cu . cc " , <nl> + " aten / src / ATen / native / cuda / AdaptiveAveragePooling . cu . cc " , <nl> + " aten / src / ATen / native / cuda / AdaptiveAveragePooling3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / AdaptiveMaxPooling2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / AdaptiveMaxPooling3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / AveragePool2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / AveragePool3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / BatchLinearAlgebra . cu . cc " , <nl> + " aten / src / ATen / native / cuda / BinaryArithmeticKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / BinaryCompareKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / BinaryMiscOpsKernels . cu . cc " , <nl> + " aten / src / ATen / native / cuda / CUDAScalar . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Col2Im . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Copy . cu . cc " , <nl> + " aten / src / ATen / native / cuda / CrossKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / DilatedMaxPool2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / DilatedMaxPool3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / DistanceKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Distributions . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Dropout . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Embedding . cu . cc " , <nl> + " aten / src / ATen / native / cuda / EmbeddingBackwardKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / EmbeddingBag . cu . cc " , <nl> + " aten / src / ATen / native / cuda / FillKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / FractionalMaxPool2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / FractionalMaxPool3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / GridSampler . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Im2Col . cu . cc " , <nl> + " aten / src / ATen / native / cuda / IndexKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Indexing . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Lerp . cu . cc " , <nl> + " aten / src / ATen / native / cuda / LinearAlgebra . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Loss . cu . cc " , <nl> + " aten / src / ATen / native / cuda / LossCTC . cu . cc " , <nl> + " aten / src / ATen / native / cuda / MaxUnpooling . cu . cc " , <nl> + " aten / src / ATen / native / cuda / MultinomialKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / NaiveConvolutionTranspose2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / NaiveConvolutionTranspose3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / NaiveDilatedConvolution . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Normalization . cu . cc " , <nl> + " aten / src / ATen / native / cuda / PointwiseOpsKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / PowKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / RNN . cu . cc " , <nl> + " aten / src / ATen / native / cuda / RangeFactories . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Reduce . cu . cc " , <nl> + " aten / src / ATen / native / cuda / ReduceOpsKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / ReflectionPad . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Repeat . cu . cc " , <nl> + " aten / src / ATen / native / cuda / ReplicationPadding . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Resize . cu . cc " , <nl> + " aten / src / ATen / native / cuda / SoftMax . cu . cc " , <nl> + " aten / src / ATen / native / cuda / SortingKthValue . cu . cc " , <nl> + " aten / src / ATen / native / cuda / SparseMM . cu . cc " , <nl> + " aten / src / ATen / native / cuda / SpectralOps . cu . cc " , <nl> + " aten / src / ATen / native / cuda / SummaryOps . cu . cc " , <nl> + " aten / src / ATen / native / cuda / TensorCompare . cu . cc " , <nl> + " aten / src / ATen / native / cuda / TensorFactories . cu . cc " , <nl> + " aten / src / ATen / native / cuda / TensorTransformations . cu . cc " , <nl> + " aten / src / ATen / native / cuda / TriangularOps . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UnaryOpsKernel . cu . cc " , <nl> + " aten / src / ATen / native / cuda / Unique . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleBicubic2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleBilinear2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleLinear1d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleNearest1d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleNearest2d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleNearest3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / UpSampleTrilinear3d . cu . cc " , <nl> + " aten / src / ATen / native / cuda / WeightNorm . cu . cc " , <nl> + " aten / src / ATen / native / cuda / layer_norm_kernel . cu . cc " , <nl> + " aten / src / ATen / native / quantized / cuda / fake_quantize_core . cu . cc " , <nl> + " aten / src / ATen / native / sparse / cuda / SparseCUDABlas . cu . cc " , <nl> + " aten / src / ATen / native / sparse / cuda / SparseCUDATensor . cu . cc " , <nl> + " aten / src / ATen / native / sparse / cuda / SparseCUDATensorMath . cu . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " aten_src_ATen_config " , <nl> + src = " aten / src / ATen / Config . h . in " , <nl> + out = " aten / src / ATen / Config . h " , <nl> + substitutions = { <nl> + " @ AT_MKLDNN_ENABLED @ " : " 1 " , <nl> + " @ AT_MKL_ENABLED @ " : " 0 " , <nl> + " @ AT_NNPACK_ENABLED @ " : " 0 " , <nl> + " @ CAFFE2_STATIC_LINK_CUDA_INT @ " : " 0 " , <nl> + } , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " aten_src_ATen_cuda_config " , <nl> + src = " aten / src / ATen / cuda / CUDAConfig . h . in " , <nl> + out = " aten / src / ATen / cuda / CUDAConfig . h " , <nl> + substitutions = { <nl> + " @ AT_CUDNN_ENABLED @ " : " 1 " , <nl> + " @ AT_ROCM_ENABLED @ " : " 0 " , <nl> + " @ NVCC_FLAGS_EXTRA @ " : " " , <nl> + } , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " aten_src_TH_THGeneral " , <nl> + src = " aten / src / TH / THGeneral . h . in " , <nl> + out = " aten / src / TH / THGeneral . h " , <nl> + substitutions = { <nl> + " cmakedefine " : " define " , <nl> + } , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " aten_src_THC_THCGeneral " , <nl> + src = " aten / src / THC / THCGeneral . h . in " , <nl> + out = " aten / src / THC / THCGeneral . h " , <nl> + substitutions = { <nl> + " # cmakedefine USE_MAGMA " : " " , <nl> + } , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " aten_headers " , <nl> + hdrs = [ <nl> + " aten / src / TH / THGeneral . h " , <nl> + " aten / src / THC / THCGeneral . h " , <nl> + " torch / csrc / WindowsTorchApiMacro . h " , <nl> + " torch / csrc / jit / frontend / function_schema_parser . h " , <nl> + " aten / src / ATen / templates / TensorBody . h " , <nl> + " aten / src / ATen / templates / TensorMethods . h " , <nl> + ] + glob ( [ <nl> + " aten / src / ATen / * . h " , <nl> + " aten / src / ATen / core / * . h " , <nl> + " aten / src / ATen / core / boxing / * . h " , <nl> + " aten / src / ATen / core / dispatch / * . h " , <nl> + " aten / src / ATen / core / op_registration / * . h " , <nl> + " aten / src / ATen / cpu / * . h " , <nl> + " aten / src / ATen / cpu / vec256 / * . h " , <nl> + " aten / src / ATen / cuda / * . cuh " , <nl> + " aten / src / ATen / cuda / * . h " , <nl> + " aten / src / ATen / cuda / detail / * . cuh " , <nl> + " aten / src / ATen / cuda / detail / * . h " , <nl> + " aten / src / ATen / cuda / nvrtc_stub / * . h " , <nl> + " aten / src / ATen / cudnn / * . h " , <nl> + " aten / src / ATen / detail / * . h " , <nl> + " aten / src / ATen / mkl / * . h " , <nl> + " aten / src / ATen / mkldnn / * . h " , <nl> + " aten / src / ATen / native / * . h " , <nl> + " aten / src / ATen / native / cpu / * . h " , <nl> + " aten / src / ATen / native / cuda / * . cuh " , <nl> + " aten / src / ATen / native / cuda / * . h " , <nl> + " aten / src / ATen / native / mkldnn / * . h " , <nl> + " aten / src / ATen / native / quantized / * . h " , <nl> + " aten / src / ATen / native / quantized / cuda / * . h " , <nl> + " aten / src / ATen / native / quantized / cpu / * . h " , <nl> + " aten / src / ATen / native / sparse / * . h " , <nl> + " aten / src / ATen / native / sparse / cuda / * . cuh " , <nl> + " aten / src / ATen / native / utils / * . h " , <nl> + " aten / src / ATen / native / xnnpack / * . h " , <nl> + " aten / src / ATen / quantized / * . h " , <nl> + " aten / src / TH / * . hpp " , <nl> + " aten / src / TH / * . h " , <nl> + " aten / src / TH / vector / * . h " , <nl> + " aten / src / TH / generic / * . hpp " , <nl> + " aten / src / TH / generic / * . h " , <nl> + " aten / src / TH / generic / * . cpp " , <nl> + " aten / src / TH / generic / * . c " , <nl> + " aten / src / THC / * . cpp " , <nl> + " aten / src / THC / * . cuh " , <nl> + " aten / src / THC / * . h " , <nl> + " aten / src / THC / * . hpp " , <nl> + " aten / src / THC / generic / * . cpp " , <nl> + " aten / src / THC / generic / * . cu . cc " , <nl> + " aten / src / THC / generic / * . h " , <nl> + " aten / src / THC / generic / * . hpp " , <nl> + " aten / src / THCUNN / * . h " , <nl> + " aten / src / THCUNN / * . cuh " , <nl> + " aten / src / THCUNN / generic / * . h " , <nl> + " aten / src / THCUNN / generic / * . cu . cc " , <nl> + ] ) + [ <nl> + " : generated_cpp " , <nl> + " : aten_src_ATen_config " , <nl> + ] , <nl> + includes = [ <nl> + " aten / src " , <nl> + " aten / src / TH " , <nl> + ] , <nl> + deps = [ <nl> + " : c10_headers " , <nl> + ] , <nl> + ) <nl> + <nl> + ATEN_COPTS = COMMON_COPTS + [ <nl> + " - DUSE_AVX " , <nl> + " - DUSE_AVX2 " , <nl> + " - DCAFFE2_BUILD_MAIN_LIBS " , <nl> + " - DHAVE_AVX_CPU_DEFINITION " , <nl> + " - DHAVE_AVX2_CPU_DEFINITION " , <nl> + " - fvisibility - inlines - hidden " , <nl> + " - fno - math - errno " , <nl> + " - fno - trapping - math " , <nl> + ] <nl> + <nl> + intern_build_aten_ops ( <nl> + copts = ATEN_COPTS , <nl> + deps = [ <nl> + " : aten_headers " , <nl> + " @ fbgemm " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " th " , <nl> + srcs = [ <nl> + " : th_srcs " , <nl> + ] , <nl> + copts = ATEN_COPTS + [ <nl> + " - mavx " , <nl> + ] , <nl> + deps = [ <nl> + " : aten_headers " , <nl> + " @ fbgemm " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " aten " , <nl> + srcs = [ <nl> + " : ATen_CORE_SRCS " , <nl> + " : ATen_QUANTIZED_SRCS " , <nl> + " : aten_base_cpp " , <nl> + " : aten_native_cpp " , <nl> + " : aten_native_mkl_cpp " , <nl> + " : aten_native_mkldnn_cpp " , <nl> + " : aten_native_quantized_cpp " , <nl> + " : aten_native_sparse_cpp " , <nl> + " : aten_native_xnnpack " , <nl> + " : aten_src_ATen_config " , <nl> + " : generated_cpp " , <nl> + ] , <nl> + copts = ATEN_COPTS , <nl> + data = if_cuda ( <nl> + [ " : libcaffe2_nvrtc . so " ] , <nl> + [ ] , <nl> + ) , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : ATen_CPU " , <nl> + " : aten_headers " , <nl> + " : caffe2_for_aten_headers " , <nl> + " : th " , <nl> + " : torch_headers " , <nl> + " @ fbgemm " , <nl> + " @ ideep " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " aten_nvrtc " , <nl> + srcs = glob ( [ <nl> + " aten / src / ATen / cuda / nvrtc_stub / * . cpp " , <nl> + ] ) , <nl> + copts = ATEN_COPTS , <nl> + linkstatic = True , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten_headers " , <nl> + " : c10_headers " , <nl> + " @ cuda " , <nl> + " @ cuda / / : cuda_driver " , <nl> + " @ cuda / / : nvrtc " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " libcaffe2_nvrtc . so " , <nl> + linkshared = True , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten_nvrtc " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " aten_cuda_cpp " , <nl> + srcs = [ " : aten_cuda_srcs " ] , <nl> + copts = ATEN_COPTS , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten " , <nl> + " @ cuda " , <nl> + " @ cuda / / : nvrtc " , <nl> + " @ cudnn " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + torch_cuda_half_options = [ <nl> + " - DCUDA_HAS_FP16 = 1 " , <nl> + " - D__CUDA_NO_HALF_OPERATORS__ " , <nl> + " - D__CUDA_NO_HALF_CONVERSIONS__ " , <nl> + " - D__CUDA_NO_HALF2_OPERATORS__ " , <nl> + ] <nl> + <nl> + cu_library ( <nl> + name = " aten_cuda " , <nl> + srcs = [ <nl> + " : aten_srcs_cu " , <nl> + " : thc_srcs_cu " , <nl> + " : thcunn_srcs_cu " , <nl> + ] , <nl> + copts = ATEN_COPTS + torch_cuda_half_options , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten_cuda_cpp " , <nl> + " @ cuda / / : cublas " , <nl> + " @ cuda / / : cufft " , <nl> + " @ cuda / / : cusparse " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + # caffe2 <nl> + CAFFE2_COPTS = COMMON_COPTS + [ <nl> + " - Dcaffe2_EXPORTS " , <nl> + " - DCAFFE2_USE_GLOO " , <nl> + " - DCAFFE2_USE_CUDNN " , <nl> + " - DCAFFE2_BUILD_MAIN_LIB " , <nl> + " - fvisibility - inlines - hidden " , <nl> + " - fno - math - errno " , <nl> + " - fno - trapping - math " , <nl> + ] <nl> + <nl> + proto_library ( <nl> + name = " caffe2_proto_source " , <nl> + srcs = glob ( [ <nl> + " caffe2 / proto / * . proto " , <nl> + ] ) , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> + <nl> + cc_proto_library ( <nl> + name = " caffe2_protos " , <nl> + deps = [ " : caffe2_proto_source " ] , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " caffe2_core_macros_h " , <nl> + src = " caffe2 / core / macros . h . in " , <nl> + out = " caffe2 / core / macros . h " , <nl> + substitutions = { <nl> + " @ CAFFE2_VERSION_MAJOR @ " : " 1 " , <nl> + " @ CAFFE2_VERSION_MINOR @ " : " 3 " , <nl> + " @ CAFFE2_VERSION_PATCH @ " : " 0 " , <nl> + " cmakedefine " : " define " , <nl> + " # define CAFFE2_FORCE_FALLBACK_CUDA_MPI " : " / * # undef CAFFE2_FORCE_FALLBACK_CUDA_MPI * / " , <nl> + " # define CAFFE2_HAS_MKL_DNN " : " / * # undef CAFFE2_HAS_MKL_DNN * / " , <nl> + " # define CAFFE2_HAS_MKL_SGEMM_PACK " : " / * # undef CAFFE2_HAS_MKL_SGEMM_PACK * / " , <nl> + " # define CAFFE2_THREADPOOL_MAIN_IMBALANCE " : " / * # undef CAFFE2_THREADPOOL_MAIN_IMBALANCE * / " , <nl> + " # define CAFFE2_THREADPOOL_STATS " : " / * # undef CAFFE2_THREADPOOL_STATS * / " , <nl> + " # define CAFFE2_USE_ACCELERATE " : " / * # undef CAFFE2_USE_ACCELERATE * / " , <nl> + " # define CAFFE2_USE_EIGEN_FOR_BLAS " : " / * # undef CAFFE2_USE_EIGEN_FOR_BLAS * / " , <nl> + " # define CAFFE2_USE_FBCODE " : " / * # undef CAFFE2_USE_FBCODE * / " , <nl> + " # define CAFFE2_USE_GOOGLE_GLOG " : " / * # undef CAFFE2_USE_GOOGLE_GLOG * / " , <nl> + " # define CAFFE2_USE_LITE_PROTO " : " / * # undef CAFFE2_USE_LITE_PROTO * / " , <nl> + " # define CAFFE2_USE_MKL \ n " : " / * # undef CAFFE2_USE_MKL * / \ n " , <nl> + " # define CAFFE2_USE_NVTX " : " / * # undef CAFFE2_USE_NVTX * / " , <nl> + " # define CAFFE2_USE_TRT " : " / * # undef CAFFE2_USE_TRT * / " , <nl> + } , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_contrib_srcs " , <nl> + srcs = [ <nl> + " caffe2 / contrib / gloo / allgather_ops . cc " , <nl> + " caffe2 / contrib / gloo / allreduce_ops . cc " , <nl> + " caffe2 / contrib / gloo / barrier_ops . cc " , <nl> + " caffe2 / contrib / gloo / broadcast_ops . cc " , <nl> + " caffe2 / contrib / gloo / common . cc " , <nl> + " caffe2 / contrib / gloo / common_world_ops . cc " , <nl> + " caffe2 / contrib / gloo / context . cc " , <nl> + " caffe2 / contrib / gloo / reduce_scatter_ops . cc " , <nl> + " caffe2 / contrib / gloo / store_handler . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_core_srcs " , <nl> + srcs = [ <nl> + " caffe2 / core / allocator . cc " , <nl> + " caffe2 / core / blob_serialization . cc " , <nl> + " caffe2 / core / blob_stats . cc " , <nl> + " caffe2 / core / common . cc " , <nl> + " caffe2 / core / context . cc " , <nl> + " caffe2 / core / context_base . cc " , <nl> + " caffe2 / core / db . cc " , <nl> + " caffe2 / core / event . cc " , <nl> + " caffe2 / core / export_c10_op_to_caffe2 . cc " , <nl> + " caffe2 / core / graph . cc " , <nl> + " caffe2 / core / init . cc " , <nl> + " caffe2 / core / init_denormals . cc " , <nl> + " caffe2 / core / init_intrinsics_check . cc " , <nl> + " caffe2 / core / init_omp . cc " , <nl> + " caffe2 / core / int8_serialization . cc " , <nl> + " caffe2 / core / memonger . cc " , <nl> + " caffe2 / core / module . cc " , <nl> + " caffe2 / core / net . cc " , <nl> + " caffe2 / core / net_async_base . cc " , <nl> + " caffe2 / core / net_async_scheduling . cc " , <nl> + " caffe2 / core / net_async_task . cc " , <nl> + " caffe2 / core / net_async_task_future . cc " , <nl> + " caffe2 / core / net_async_task_graph . cc " , <nl> + " caffe2 / core / net_async_tracing . cc " , <nl> + " caffe2 / core / net_dag_utils . cc " , <nl> + " caffe2 / core / net_parallel . cc " , <nl> + " caffe2 / core / net_simple . cc " , <nl> + " caffe2 / core / net_simple_refcount . cc " , <nl> + " caffe2 / core / nomnigraph / Representations / NeuralNet . cc " , <nl> + " caffe2 / core / nomnigraph / tests / test_util . cc " , <nl> + " caffe2 / core / numa . cc " , <nl> + " caffe2 / core / operator . cc " , <nl> + " caffe2 / core / operator_schema . cc " , <nl> + " caffe2 / core / plan_executor . cc " , <nl> + " caffe2 / core / prof_dag_counters . cc " , <nl> + " caffe2 / core / qtensor . cc " , <nl> + " caffe2 / core / qtensor_serialization . cc " , <nl> + " caffe2 / core / stats . cc " , <nl> + " caffe2 / core / tensor . cc " , <nl> + " caffe2 / core / tensor_int8 . cc " , <nl> + " caffe2 / core / test_utils . cc " , <nl> + " caffe2 / core / transform . cc " , <nl> + " caffe2 / core / types . cc " , <nl> + " caffe2 / core / workspace . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_distributed_srcs " , <nl> + srcs = [ <nl> + " caffe2 / distributed / file_store_handler . cc " , <nl> + " caffe2 / distributed / file_store_handler_op . cc " , <nl> + " caffe2 / distributed / store_handler . cc " , <nl> + " caffe2 / distributed / store_ops . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_ideep_srcs " , <nl> + srcs = [ <nl> + " caffe2 / ideep / operators / adam_op . cc " , <nl> + " caffe2 / ideep / operators / channel_shuffle_op . cc " , <nl> + " caffe2 / ideep / operators / concat_split_op . cc " , <nl> + " caffe2 / ideep / operators / conv_op . cc " , <nl> + " caffe2 / ideep / operators / conv_transpose_op . cc " , <nl> + " caffe2 / ideep / operators / dropout_op . cc " , <nl> + " caffe2 / ideep / operators / elementwise_sum_op . cc " , <nl> + " caffe2 / ideep / operators / expand_squeeze_dims_op . cc " , <nl> + " caffe2 / ideep / operators / fully_connected_op . cc " , <nl> + " caffe2 / ideep / operators / local_response_normalization_op . cc " , <nl> + " caffe2 / ideep / operators / momentum_sgd_op . cc " , <nl> + " caffe2 / ideep / operators / operator_fallback_ideep . cc " , <nl> + " caffe2 / ideep / operators / order_switch_ops . cc " , <nl> + " caffe2 / ideep / operators / pool_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_add_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_conv_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_dequantize_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_fully_connected_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_given_tensor_fill_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_pool_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_quantize_op . cc " , <nl> + " caffe2 / ideep / operators / quantization / int8_relu_op . cc " , <nl> + " caffe2 / ideep / operators / queue_ops . cc " , <nl> + " caffe2 / ideep / operators / relu_op . cc " , <nl> + " caffe2 / ideep / operators / reshape_op . cc " , <nl> + " caffe2 / ideep / operators / shape_op . cc " , <nl> + " caffe2 / ideep / operators / sigmoid_op . cc " , <nl> + " caffe2 / ideep / operators / spatial_batch_norm_op . cc " , <nl> + " caffe2 / ideep / operators / transpose_op . cc " , <nl> + " caffe2 / ideep / operators / utility_ops . cc " , <nl> + " caffe2 / ideep / utils / ideep_register . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_onnx_srcs " , <nl> + srcs = [ <nl> + " caffe2 / onnx / backend . cc " , <nl> + " caffe2 / onnx / backend_rep . cc " , <nl> + " caffe2 / onnx / device . cc " , <nl> + " caffe2 / onnx / helper . cc " , <nl> + " caffe2 / onnx / offline_tensor . cc " , <nl> + " caffe2 / onnx / onnx_exporter . cc " , <nl> + " caffe2 / onnx / onnxifi_graph_info . cc " , <nl> + " caffe2 / onnx / onnxifi_init . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_operators_srcs " , <nl> + srcs = [ <nl> + " caffe2 / operators / abs_op . cc " , <nl> + " caffe2 / operators / accumulate_op . cc " , <nl> + " caffe2 / operators / accuracy_op . cc " , <nl> + " caffe2 / operators / acos_op . cc " , <nl> + " caffe2 / operators / affine_channel_op . cc " , <nl> + " caffe2 / operators / alias_with_name . cc " , <nl> + " caffe2 / operators / apmeter_op . cc " , <nl> + " caffe2 / operators / arg_ops . cc " , <nl> + " caffe2 / operators / asin_op . cc " , <nl> + " caffe2 / operators / assert_op . cc " , <nl> + " caffe2 / operators / atan_op . cc " , <nl> + " caffe2 / operators / atomic_ops . cc " , <nl> + " caffe2 / operators / batch_box_cox_op . cc " , <nl> + " caffe2 / operators / batch_bucketize_op . cc " , <nl> + " caffe2 / operators / batch_gather_ops . cc " , <nl> + " caffe2 / operators / batch_matmul_op . cc " , <nl> + " caffe2 / operators / batch_moments_op . cc " , <nl> + " caffe2 / operators / batch_permutation_op . cc " , <nl> + " caffe2 / operators / batch_sparse_to_dense_op . cc " , <nl> + " caffe2 / operators / bbox_transform_op . cc " , <nl> + " caffe2 / operators / bisect_percentile_op . cc " , <nl> + " caffe2 / operators / boolean_mask_ops . cc " , <nl> + " caffe2 / operators / boolean_unmask_ops . cc " , <nl> + " caffe2 / operators / box_with_nms_limit_op . cc " , <nl> + " caffe2 / operators / bucketize_op . cc " , <nl> + " caffe2 / operators / byte_weight_dequant_op . cc " , <nl> + " caffe2 / operators / cast_op . cc " , <nl> + " caffe2 / operators / cbrt_op . cc " , <nl> + " caffe2 / operators / cc_bmm_bg_op . cc " , <nl> + " caffe2 / operators / ceil_op . cc " , <nl> + " caffe2 / operators / channel_backprop_stats_op . cc " , <nl> + " caffe2 / operators / channel_shuffle_op . cc " , <nl> + " caffe2 / operators / channel_stats_op . cc " , <nl> + " caffe2 / operators / clip_op . cc " , <nl> + " caffe2 / operators / collect_and_distribute_fpn_rpn_proposals_op . cc " , <nl> + " caffe2 / operators / communicator_op . cc " , <nl> + " caffe2 / operators / concat_split_op . cc " , <nl> + " caffe2 / operators / conditional_op . cc " , <nl> + " caffe2 / operators / conv_gradient_op . cc " , <nl> + " caffe2 / operators / conv_op . cc " , <nl> + " caffe2 / operators / conv_op_eigen . cc " , <nl> + " caffe2 / operators / conv_op_shared . cc " , <nl> + " caffe2 / operators / conv_transpose_gradient_op . cc " , <nl> + " caffe2 / operators / conv_transpose_op_mobile . cc " , <nl> + " caffe2 / operators / copy_op . cc " , <nl> + " caffe2 / operators / copy_rows_to_tensor_op . cc " , <nl> + " caffe2 / operators / cos_op . cc " , <nl> + " caffe2 / operators / cosh_op . cc " , <nl> + " caffe2 / operators / cosine_embedding_criterion_op . cc " , <nl> + " caffe2 / operators / counter_ops . cc " , <nl> + " caffe2 / operators / crash_op . cc " , <nl> + " caffe2 / operators / create_scope_op . cc " , <nl> + " caffe2 / operators / crf_viterbi_op . cc " , <nl> + " caffe2 / operators / cross_entropy_op . cc " , <nl> + " caffe2 / operators / ctc_beam_search_decoder_op . cc " , <nl> + " caffe2 / operators / ctc_greedy_decoder_op . cc " , <nl> + " caffe2 / operators / cube_op . cc " , <nl> + " caffe2 / operators / data_couple . cc " , <nl> + " caffe2 / operators / dataset_ops . cc " , <nl> + " caffe2 / operators / deform_conv_gradient_op . cc " , <nl> + " caffe2 / operators / deform_conv_op . cc " , <nl> + " caffe2 / operators / dense_vector_to_id_list_op . cc " , <nl> + " caffe2 / operators / distance_op . cc " , <nl> + " caffe2 / operators / do_op . cc " , <nl> + " caffe2 / operators / dropout_op . cc " , <nl> + " caffe2 / operators / elementwise_add_gradient_op . cc " , <nl> + " caffe2 / operators / elementwise_add_op . cc " , <nl> + " caffe2 / operators / elementwise_div_gradient_op . cc " , <nl> + " caffe2 / operators / elementwise_div_op . cc " , <nl> + " caffe2 / operators / elementwise_linear_op . cc " , <nl> + " caffe2 / operators / elementwise_logical_ops . cc " , <nl> + " caffe2 / operators / elementwise_mul_gradient_op . cc " , <nl> + " caffe2 / operators / elementwise_mul_op . cc " , <nl> + " caffe2 / operators / elementwise_ops . cc " , <nl> + " caffe2 / operators / elementwise_ops_schema . cc " , <nl> + " caffe2 / operators / elementwise_ops_utils . cc " , <nl> + " caffe2 / operators / elementwise_sub_gradient_op . cc " , <nl> + " caffe2 / operators / elementwise_sub_op . cc " , <nl> + " caffe2 / operators / elementwise_sum_op . cc " , <nl> + " caffe2 / operators / elu_op . cc " , <nl> + " caffe2 / operators / enforce_finite_op . cc " , <nl> + " caffe2 / operators / ensure_clipped_op . cc " , <nl> + " caffe2 / operators / ensure_cpu_output_op . cc " , <nl> + " caffe2 / operators / erf_op . cc " , <nl> + " caffe2 / operators / exp_op . cc " , <nl> + " caffe2 / operators / expand_op . cc " , <nl> + " caffe2 / operators / expand_squeeze_dims_op . cc " , <nl> + " caffe2 / operators / fc_inference . cc " , <nl> + " caffe2 / operators / feature_maps_ops . cc " , <nl> + " caffe2 / operators / feed_blob_op . cc " , <nl> + " caffe2 / operators / filler_op . cc " , <nl> + " caffe2 / operators / find_duplicate_elements_op . cc " , <nl> + " caffe2 / operators / find_op . cc " , <nl> + " caffe2 / operators / flatten_op . cc " , <nl> + " caffe2 / operators / flexible_top_k . cc " , <nl> + " caffe2 / operators / floor_op . cc " , <nl> + " caffe2 / operators / free_op . cc " , <nl> + " caffe2 / operators / fully_connected_op . cc " , <nl> + " caffe2 / operators / fused_rowwise_8bit_conversion_ops . cc " , <nl> + " caffe2 / operators / fused_rowwise_random_quantization_ops . cc " , <nl> + " caffe2 / operators / gather_fused_8bit_rowwise_op . cc " , <nl> + " caffe2 / operators / gather_op . cc " , <nl> + " caffe2 / operators / gather_ranges_to_dense_op . cc " , <nl> + " caffe2 / operators / gelu_op . cc " , <nl> + " caffe2 / operators / generate_proposals_op . cc " , <nl> + " caffe2 / operators / given_tensor_byte_string_to_uint8_fill_op . cc " , <nl> + " caffe2 / operators / given_tensor_fill_op . cc " , <nl> + " caffe2 / operators / glu_op . cc " , <nl> + " caffe2 / operators / group_norm_op . cc " , <nl> + " caffe2 / operators / gru_unit_op . cc " , <nl> + " caffe2 / operators / h_softmax_op . cc " , <nl> + " caffe2 / operators / half_float_ops . cc " , <nl> + " caffe2 / operators / hard_sigmoid_op . cc " , <nl> + " caffe2 / operators / heatmap_max_keypoint_op . cc " , <nl> + " caffe2 / operators / if_op . cc " , <nl> + " caffe2 / operators / im2col_op . cc " , <nl> + " caffe2 / operators / index_hash_ops . cc " , <nl> + " caffe2 / operators / index_ops . cc " , <nl> + " caffe2 / operators / inference_lstm_op . cc " , <nl> + " caffe2 / operators / instance_norm_gradient_op . cc " , <nl> + " caffe2 / operators / instance_norm_op . cc " , <nl> + " caffe2 / operators / integral_image_op . cc " , <nl> + " caffe2 / operators / is_empty_op . cc " , <nl> + " caffe2 / operators / jsd_op . cc " , <nl> + " caffe2 / operators / key_split_ops . cc " , <nl> + " caffe2 / operators / last_n_window_collector . cc " , <nl> + " caffe2 / operators / layer_norm_op . cc " , <nl> + " caffe2 / operators / leaky_relu_op . cc " , <nl> + " caffe2 / operators / length_split_op . cc " , <nl> + " caffe2 / operators / lengths_pad_op . cc " , <nl> + " caffe2 / operators / lengths_reducer_fused_8bit_rowwise_ops . cc " , <nl> + " caffe2 / operators / lengths_reducer_ops . cc " , <nl> + " caffe2 / operators / lengths_reducer_rowwise_8bit_ops . cc " , <nl> + " caffe2 / operators / lengths_tile_op . cc " , <nl> + " caffe2 / operators / lengths_top_k_op . cc " , <nl> + " caffe2 / operators / listwise_l2r_op . cc " , <nl> + " caffe2 / operators / load_save_op . cc " , <nl> + " caffe2 / operators / load_save_op_util . cc " , <nl> + " caffe2 / operators / local_response_normalization_op . cc " , <nl> + " caffe2 / operators / locally_connected_op . cc " , <nl> + " caffe2 / operators / locally_connected_op_util . cc " , <nl> + " caffe2 / operators / log_op . cc " , <nl> + " caffe2 / operators / logit_op . cc " , <nl> + " caffe2 / operators / loss_op . cc " , <nl> + " caffe2 / operators / lp_pool_op . cc " , <nl> + " caffe2 / operators / lpnorm_op . cc " , <nl> + " caffe2 / operators / lstm_unit_op . cc " , <nl> + " caffe2 / operators / map_ops . cc " , <nl> + " caffe2 / operators / margin_ranking_criterion_op . cc " , <nl> + " caffe2 / operators / matmul_op . cc " , <nl> + " caffe2 / operators / mean_op . cc " , <nl> + " caffe2 / operators / merge_id_lists_op . cc " , <nl> + " caffe2 / operators / minmax_gradient_ops . cc " , <nl> + " caffe2 / operators / minmax_ops . cc " , <nl> + " caffe2 / operators / mod_op . cc " , <nl> + " caffe2 / operators / moments_op . cc " , <nl> + " caffe2 / operators / multi_class_accuracy_op . cc " , <nl> + " caffe2 / operators / negate_gradient_op . cc " , <nl> + " caffe2 / operators / negative_op . cc " , <nl> + " caffe2 / operators / ngram_ops . cc " , <nl> + " caffe2 / operators / norm_planar_yuv_op . cc " , <nl> + " caffe2 / operators / normalize_l1_op . cc " , <nl> + " caffe2 / operators / normalize_op . cc " , <nl> + " caffe2 / operators / numpy_tile_op . cc " , <nl> + " caffe2 / operators / one_hot_ops . cc " , <nl> + " caffe2 / operators / onnx_while_op . cc " , <nl> + " caffe2 / operators / order_switch_ops . cc " , <nl> + " caffe2 / operators / pack_rnn_sequence_op . cc " , <nl> + " caffe2 / operators / pack_segments . cc " , <nl> + " caffe2 / operators / pad_op . cc " , <nl> + " caffe2 / operators / partition_ops . cc " , <nl> + " caffe2 / operators / percentile_op . cc " , <nl> + " caffe2 / operators / perplexity_op . cc " , <nl> + " caffe2 / operators / piecewise_linear_transform_op . cc " , <nl> + " caffe2 / operators / pool_gradient_op . cc " , <nl> + " caffe2 / operators / pool_op . cc " , <nl> + " caffe2 / operators / pool_op_util . cc " , <nl> + " caffe2 / operators / pow_op . cc " , <nl> + " caffe2 / operators / prelu_op . cc " , <nl> + " caffe2 / operators / prepend_dim_op . cc " , <nl> + " caffe2 / operators / quant_decode_op . cc " , <nl> + " caffe2 / operators / rank_loss_op . cc " , <nl> + " caffe2 / operators / reciprocal_gradient_op . cc " , <nl> + " caffe2 / operators / reciprocal_op . cc " , <nl> + " caffe2 / operators / reduce_front_back_max_ops . cc " , <nl> + " caffe2 / operators / reduce_front_back_mean_ops . cc " , <nl> + " caffe2 / operators / reduce_front_back_sum_ops . cc " , <nl> + " caffe2 / operators / reduce_ops . cc " , <nl> + " caffe2 / operators / reduction_ops . cc " , <nl> + " caffe2 / operators / relu_n_op . cc " , <nl> + " caffe2 / operators / relu_op . cc " , <nl> + " caffe2 / operators / remove_data_blocks_op . cc " , <nl> + " caffe2 / operators / replace_nan_op . cc " , <nl> + " caffe2 / operators / reservoir_sampling . cc " , <nl> + " caffe2 / operators / reshape_op . cc " , <nl> + " caffe2 / operators / resize_3d_op . cc " , <nl> + " caffe2 / operators / resize_op . cc " , <nl> + " caffe2 / operators / reverse_packed_segs_op . cc " , <nl> + " caffe2 / operators / rmac_regions_op . cc " , <nl> + " caffe2 / operators / rnn / recurrent_network_blob_fetcher_op . cc " , <nl> + " caffe2 / operators / rnn / recurrent_network_executor . cc " , <nl> + " caffe2 / operators / rnn / recurrent_network_op . cc " , <nl> + " caffe2 / operators / roi_align_gradient_op . cc " , <nl> + " caffe2 / operators / roi_align_op . cc " , <nl> + " caffe2 / operators / roi_align_rotated_gradient_op . cc " , <nl> + " caffe2 / operators / roi_align_rotated_op . cc " , <nl> + " caffe2 / operators / roi_pool_op . cc " , <nl> + " caffe2 / operators / rowmul_op . cc " , <nl> + " caffe2 / operators / rsqrt_op . cc " , <nl> + " caffe2 / operators / scale_blobs_op . cc " , <nl> + " caffe2 / operators / scale_op . cc " , <nl> + " caffe2 / operators / segment_reduction_op . cc " , <nl> + " caffe2 / operators / selu_op . cc " , <nl> + " caffe2 / operators / sequence_ops . cc " , <nl> + " caffe2 / operators / shape_op . cc " , <nl> + " caffe2 / operators / sigmoid_gradient_op . cc " , <nl> + " caffe2 / operators / sigmoid_op . cc " , <nl> + " caffe2 / operators / sin_op . cc " , <nl> + " caffe2 / operators / sinh_op . cc " , <nl> + " caffe2 / operators / sinusoid_position_encoding_op . cc " , <nl> + " caffe2 / operators / slice_op . cc " , <nl> + " caffe2 / operators / softmax_op . cc " , <nl> + " caffe2 / operators / softmax_utils . cc " , <nl> + " caffe2 / operators / softmax_with_loss_op . cc " , <nl> + " caffe2 / operators / softplus_op . cc " , <nl> + " caffe2 / operators / softsign_op . cc " , <nl> + " caffe2 / operators / space_batch_op . cc " , <nl> + " caffe2 / operators / sparse_dropout_with_replacement_op . cc " , <nl> + " caffe2 / operators / sparse_normalize_op . cc " , <nl> + " caffe2 / operators / sparse_to_dense_mask_op . cc " , <nl> + " caffe2 / operators / sparse_to_dense_op . cc " , <nl> + " caffe2 / operators / spatial_batch_norm_gradient_op . cc " , <nl> + " caffe2 / operators / spatial_batch_norm_op . cc " , <nl> + " caffe2 / operators / spatial_softmax_with_loss_op . cc " , <nl> + " caffe2 / operators / sqr_op . cc " , <nl> + " caffe2 / operators / sqrt_op . cc " , <nl> + " caffe2 / operators / square_root_divide_op . cc " , <nl> + " caffe2 / operators / stats_ops . cc " , <nl> + " caffe2 / operators / stats_put_ops . cc " , <nl> + " caffe2 / operators / stop_gradient . cc " , <nl> + " caffe2 / operators / string_ops . cc " , <nl> + " caffe2 / operators / stump_func_op . cc " , <nl> + " caffe2 / operators / stylizer_ops . cc " , <nl> + " caffe2 / operators / summarize_op . cc " , <nl> + " caffe2 / operators / swish_op . cc " , <nl> + " caffe2 / operators / tan_op . cc " , <nl> + " caffe2 / operators / tanh_gradient_op . cc " , <nl> + " caffe2 / operators / tanh_op . cc " , <nl> + " caffe2 / operators / tensor_protos_db_input . cc " , <nl> + " caffe2 / operators / text_file_reader . cc " , <nl> + " caffe2 / operators / text_file_reader_utils . cc " , <nl> + " caffe2 / operators / thresholded_relu_op . cc " , <nl> + " caffe2 / operators / tile_op . cc " , <nl> + " caffe2 / operators / top_k . cc " , <nl> + " caffe2 / operators / transpose_op . cc " , <nl> + " caffe2 / operators / tt_linear_op . cc " , <nl> + " caffe2 / operators / unique_ops . cc " , <nl> + " caffe2 / operators / upsample_op . cc " , <nl> + " caffe2 / operators / utility_ops . cc " , <nl> + " caffe2 / operators / variable_length_sequence_padding . cc " , <nl> + " caffe2 / operators / weighted_multi_sampling_op . cc " , <nl> + " caffe2 / operators / weighted_sample_op . cc " , <nl> + " caffe2 / operators / while_op . cc " , <nl> + " caffe2 / operators / workspace_ops . cc " , <nl> + " caffe2 / operators / zero_gradient_op . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_opt_srcs " , <nl> + srcs = [ <nl> + " caffe2 / opt / annotations . cc " , <nl> + " caffe2 / opt / backend_cutting . cc " , <nl> + " caffe2 / opt / backend_transformer_base . cc " , <nl> + " caffe2 / opt / bound_shape_inferencer . cc " , <nl> + " caffe2 / opt / converter . cc " , <nl> + " caffe2 / opt / dead_code_elim . cc " , <nl> + " caffe2 / opt / device . cc " , <nl> + " caffe2 / opt / distributed . cc " , <nl> + " caffe2 / opt / distributed_converter . cc " , <nl> + " caffe2 / opt / fusion . cc " , <nl> + " caffe2 / opt / mobile . cc " , <nl> + " caffe2 / opt / onnxifi_op . cc " , <nl> + " caffe2 / opt / onnxifi_transformer . cc " , <nl> + " caffe2 / opt / optimize_ideep . cc " , <nl> + " caffe2 / opt / optimizer . cc " , <nl> + " caffe2 / opt / passes . cc " , <nl> + " caffe2 / opt / shape_info . cc " , <nl> + " caffe2 / opt / tvm_transformer . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_perfkernels_srcs " , <nl> + srcs = [ <nl> + " caffe2 / perfkernels / adagrad . cc " , <nl> + " caffe2 / perfkernels / embedding_lookup . cc " , <nl> + " caffe2 / perfkernels / embedding_lookup_idx . cc " , <nl> + " caffe2 / perfkernels / fused_8bit_rowwise_conversion . cc " , <nl> + " caffe2 / perfkernels / fused_8bit_rowwise_embedding_lookup . cc " , <nl> + " caffe2 / perfkernels / fused_8bit_rowwise_embedding_lookup_idx . cc " , <nl> + " caffe2 / perfkernels / math_cpu_base . cc " , <nl> + " caffe2 / perfkernels / typed_axpy . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_predictor_srcs " , <nl> + srcs = [ <nl> + " caffe2 / predictor / emulator / data_filler . cc " , <nl> + " caffe2 / predictor / emulator / data_filler . h " , <nl> + " caffe2 / predictor / predictor . cc " , <nl> + " caffe2 / predictor / predictor_config . cc " , <nl> + " caffe2 / predictor / predictor_utils . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_quantization_srcs " , <nl> + srcs = [ <nl> + " caffe2 / quantization / server / activation_distribution_observer . cc " , <nl> + " caffe2 / quantization / server / batch_matmul_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / caffe2_dnnlowp_utils . cc " , <nl> + " caffe2 / quantization / server / channel_shuffle_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / concat_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / conv_dnnlowp_acc16_op . cc " , <nl> + " caffe2 / quantization / server / conv_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / conv_relu_op . cc " , <nl> + " caffe2 / quantization / server / dequantize_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / dnnlowp . cc " , <nl> + " caffe2 / quantization / server / dnnlowp_partition . cc " , <nl> + " caffe2 / quantization / server / dynamic_histogram . cc " , <nl> + " caffe2 / quantization / server / elementwise_add_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / elementwise_linear_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / elementwise_mul_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / elementwise_sum_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / elementwise_sum_relu_op . cc " , <nl> + " caffe2 / quantization / server / fbgemm_pack_matrix_cache . cc " , <nl> + " caffe2 / quantization / server / fbgemm_pack_op . cc " , <nl> + " caffe2 / quantization / server / fully_connected_dnnlowp_acc16_op . cc " , <nl> + " caffe2 / quantization / server / fully_connected_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / fully_connected_fake_lowp_op . cc " , <nl> + " caffe2 / quantization / server / group_norm_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / kl_minimization . cc " , <nl> + " caffe2 / quantization / server / lstm_unit_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / norm_minimization . cc " , <nl> + " caffe2 / quantization / server / p99 . cc " , <nl> + " caffe2 / quantization / server / pool_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / quantize_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / relu_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / sigmoid . cc " , <nl> + " caffe2 / quantization / server / sigmoid_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / spatial_batch_norm_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / tanh . cc " , <nl> + " caffe2 / quantization / server / tanh_dnnlowp_op . cc " , <nl> + " caffe2 / quantization / server / utility_dnnlowp_ops . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_queue_srcs " , <nl> + srcs = [ <nl> + " caffe2 / queue / blobs_queue . cc " , <nl> + " caffe2 / queue / blobs_queue_db . cc " , <nl> + " caffe2 / queue / queue_ops . cc " , <nl> + " caffe2 / queue / rebatching_queue . cc " , <nl> + " caffe2 / queue / rebatching_queue_ops . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_serialize_srcs " , <nl> + srcs = [ <nl> + " caffe2 / serialize / file_adapter . cc " , <nl> + " caffe2 / serialize / inline_container . cc " , <nl> + " caffe2 / serialize / istream_adapter . cc " , <nl> + " caffe2 / serialize / read_adapter_interface . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_sgd_srcs " , <nl> + srcs = [ <nl> + " caffe2 / sgd / adadelta_op . cc " , <nl> + " caffe2 / sgd / adagrad_op . cc " , <nl> + " caffe2 / sgd / adam_op . cc " , <nl> + " caffe2 / sgd / clip_tensor_op . cc " , <nl> + " caffe2 / sgd / ftrl_op . cc " , <nl> + " caffe2 / sgd / gftrl_op . cc " , <nl> + " caffe2 / sgd / iter_op . cc " , <nl> + " caffe2 / sgd / lars_op . cc " , <nl> + " caffe2 / sgd / learning_rate_adaption_op . cc " , <nl> + " caffe2 / sgd / learning_rate_op . cc " , <nl> + " caffe2 / sgd / momentum_sgd_op . cc " , <nl> + " caffe2 / sgd / rmsprop_op . cc " , <nl> + " caffe2 / sgd / wngrad_op . cc " , <nl> + " caffe2 / sgd / yellowfin_op . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_transforms_srcs " , <nl> + srcs = [ <nl> + " caffe2 / transforms / common_subexpression_elimination . cc " , <nl> + " caffe2 / transforms / conv_to_nnpack_transform . cc " , <nl> + " caffe2 / transforms / pattern_net_transform . cc " , <nl> + " caffe2 / transforms / single_op_transform . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_utils_srcs " , <nl> + srcs = [ <nl> + " caffe2 / utils / bench_utils . cc " , <nl> + " caffe2 / utils / cpuid . cc " , <nl> + " caffe2 / utils / math / broadcast . cc " , <nl> + " caffe2 / utils / math / elementwise . cc " , <nl> + " caffe2 / utils / math / reduce . cc " , <nl> + " caffe2 / utils / math / transpose . cc " , <nl> + " caffe2 / utils / math / utils . cc " , <nl> + " caffe2 / utils / math_cpu . cc " , <nl> + " caffe2 / utils / murmur_hash3 . cc " , <nl> + " caffe2 / utils / proto_convert . cc " , <nl> + " caffe2 / utils / proto_utils . cc " , <nl> + " caffe2 / utils / proto_wrap . cc " , <nl> + " caffe2 / utils / signal_handler . cc " , <nl> + " caffe2 / utils / smart_tensor_printer . cc " , <nl> + " caffe2 / utils / string_utils . cc " , <nl> + " caffe2 / utils / threadpool / ThreadPool . cc " , <nl> + " caffe2 / utils / threadpool / ThreadPoolMobile . cc " , <nl> + " caffe2 / utils / threadpool / pthreadpool . cc " , <nl> + " caffe2 / utils / threadpool / pthreadpool_impl . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_cuda_srcs " , <nl> + srcs = [ <nl> + " caffe2 / contrib / aten / aten_op_gpu . cc " , <nl> + " caffe2 / contrib / gloo / allreduce_ops_gpu . cc " , <nl> + " caffe2 / contrib / gloo / broadcast_ops_gpu . cc " , <nl> + " caffe2 / contrib / gloo / common_world_ops_gpu . cc " , <nl> + " caffe2 / core / blob_serialization_gpu . cc " , <nl> + " caffe2 / core / common_cudnn . cc " , <nl> + " caffe2 / core / common_gpu . cc " , <nl> + " caffe2 / core / event_gpu . cc " , <nl> + " caffe2 / db / create_db_op_gpu . cc " , <nl> + " caffe2 / distributed / file_store_handler_op_gpu . cc " , <nl> + " caffe2 / operators / communicator_op_gpu . cc " , <nl> + " caffe2 / operators / concat_split_op_gpu . cc " , <nl> + " caffe2 / operators / conv_op_cache_cudnn . cc " , <nl> + " caffe2 / operators / conv_op_cudnn . cc " , <nl> + " caffe2 / operators / conv_op_gpu . cc " , <nl> + " caffe2 / operators / conv_op_shared_gpu . cc " , <nl> + " caffe2 / operators / conv_transpose_op_cudnn . cc " , <nl> + " caffe2 / operators / conv_transpose_op_gpu . cc " , <nl> + " caffe2 / operators / counter_ops_gpu . cc " , <nl> + " caffe2 / operators / do_op_gpu . cc " , <nl> + " caffe2 / operators / dropout_op_cudnn . cc " , <nl> + " caffe2 / operators / elementwise_add_op_gpu . cc " , <nl> + " caffe2 / operators / elementwise_sub_op_gpu . cc " , <nl> + " caffe2 / operators / elu_op_cudnn . cc " , <nl> + " caffe2 / operators / exp_op_gpu . cc " , <nl> + " caffe2 / operators / expand_op_gpu . cc " , <nl> + " caffe2 / operators / expand_squeeze_dims_op_gpu . cc " , <nl> + " caffe2 / operators / free_op_gpu . cc " , <nl> + " caffe2 / operators / fully_connected_op_gpu . cc " , <nl> + " caffe2 / operators / if_op_gpu . cc " , <nl> + " caffe2 / operators / im2col_op_gpu . cc " , <nl> + " caffe2 / operators / load_save_op_gpu . cc " , <nl> + " caffe2 / operators / local_response_normalization_op_cudnn . cc " , <nl> + " caffe2 / operators / locally_connected_op_gpu . cc " , <nl> + " caffe2 / operators / log_op_gpu . cc " , <nl> + " caffe2 / operators / matmul_op_gpu . cc " , <nl> + " caffe2 / operators / negate_gradient_op_gpu . cc " , <nl> + " caffe2 / operators / negative_op_gpu . cc " , <nl> + " caffe2 / operators / order_switch_ops_cudnn . cc " , <nl> + " caffe2 / operators / order_switch_ops_gpu . cc " , <nl> + " caffe2 / operators / pool_op_cudnn . cc " , <nl> + " caffe2 / operators / prepend_dim_op_gpu . cc " , <nl> + " caffe2 / operators / reshape_op_gpu . cc " , <nl> + " caffe2 / operators / rnn / recurrent_network_blob_fetcher_op_gpu . cc " , <nl> + " caffe2 / operators / rnn / recurrent_network_executor_gpu . cc " , <nl> + " caffe2 / operators / rnn / recurrent_op_cudnn . cc " , <nl> + " caffe2 / operators / scale_op_gpu . cc " , <nl> + " caffe2 / operators / shape_op_gpu . cc " , <nl> + " caffe2 / operators / sigmoid_op_cudnn . cc " , <nl> + " caffe2 / operators / softmax_op_cudnn . cc " , <nl> + " caffe2 / operators / sqr_op_gpu . cc " , <nl> + " caffe2 / operators / sqrt_op_gpu . cc " , <nl> + " caffe2 / operators / stop_gradient_gpu . cc " , <nl> + " caffe2 / operators / tanh_op_cudnn . cc " , <nl> + " caffe2 / operators / tensor_protos_db_input_gpu . cc " , <nl> + " caffe2 / operators / transpose_op_cudnn . cc " , <nl> + " caffe2 / operators / while_op_gpu . cc " , <nl> + " caffe2 / operators / zero_gradient_op_gpu . cc " , <nl> + " caffe2 / queue / queue_ops_gpu . cc " , <nl> + " caffe2 / sgd / iter_op_gpu . cc " , <nl> + " caffe2 / sgd / learning_rate_op_gpu . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + filegroup ( <nl> + name = " caffe2_cu_srcs " , <nl> + srcs = [ <nl> + " caffe2 / core / context_gpu . cu . cc " , <nl> + " caffe2 / operators / abs_op . cu . cc " , <nl> + " caffe2 / operators / accumulate_op . cu . cc " , <nl> + " caffe2 / operators / accuracy_op . cu . cc " , <nl> + " caffe2 / operators / acos_op . cu . cc " , <nl> + " caffe2 / operators / affine_channel_op . cu . cc " , <nl> + " caffe2 / operators / alias_with_name . cu . cc " , <nl> + " caffe2 / operators / arg_ops . cu . cc " , <nl> + " caffe2 / operators / asin_op . cu . cc " , <nl> + " caffe2 / operators / assert_op . cu . cc " , <nl> + " caffe2 / operators / atan_op . cu . cc " , <nl> + " caffe2 / operators / batch_gather_ops . cu . cc " , <nl> + " caffe2 / operators / batch_matmul_op . cu . cc " , <nl> + " caffe2 / operators / batch_moments_op . cu . cc " , <nl> + " caffe2 / operators / batch_permutation_op . cu . cc " , <nl> + " caffe2 / operators / batch_sparse_to_dense_op . cu . cc " , <nl> + " caffe2 / operators / boolean_mask_ops . cu . cc " , <nl> + " caffe2 / operators / boolean_unmask_ops . cu . cc " , <nl> + " caffe2 / operators / bucketize_op . cu . cc " , <nl> + " caffe2 / operators / cast_op . cu . cc " , <nl> + " caffe2 / operators / cbrt_op . cu . cc " , <nl> + " caffe2 / operators / ceil_op . cu . cc " , <nl> + " caffe2 / operators / channel_backprop_stats_op . cu . cc " , <nl> + " caffe2 / operators / channel_shuffle_op . cu . cc " , <nl> + " caffe2 / operators / channel_stats_op . cu . cc " , <nl> + " caffe2 / operators / channelwise_conv3d_op_cudnn . cu . cc " , <nl> + " caffe2 / operators / clip_op . cu . cc " , <nl> + " caffe2 / operators / copy_op . cu . cc " , <nl> + " caffe2 / operators / cos_op . cu . cc " , <nl> + " caffe2 / operators / cosh_op . cu . cc " , <nl> + " caffe2 / operators / cosine_embedding_criterion_op . cu . cc " , <nl> + " caffe2 / operators / cross_entropy_op . cu . cc " , <nl> + " caffe2 / operators / cube_op . cu . cc " , <nl> + " caffe2 / operators / data_couple_gpu . cu . cc " , <nl> + " caffe2 / operators / deform_conv_op . cu . cc " , <nl> + " caffe2 / operators / depthwise_3x3_conv_op_cudnn . cu . cc " , <nl> + " caffe2 / operators / distance_op . cu . cc " , <nl> + " caffe2 / operators / dropout_op . cu . cc " , <nl> + " caffe2 / operators / elementwise_div_op . cu . cc " , <nl> + " caffe2 / operators / elementwise_linear_op . cu . cc " , <nl> + " caffe2 / operators / elementwise_mul_op . cu . cc " , <nl> + " caffe2 / operators / elementwise_ops . cu . cc " , <nl> + " caffe2 / operators / elu_op . cu . cc " , <nl> + " caffe2 / operators / enforce_finite_op . cu . cc " , <nl> + " caffe2 / operators / ensure_cpu_output_op . cu . cc " , <nl> + " caffe2 / operators / erf_op . cu . cc " , <nl> + " caffe2 / operators / filler_op . cu . cc " , <nl> + " caffe2 / operators / find_op . cu . cc " , <nl> + " caffe2 / operators / floor_op . cu . cc " , <nl> + " caffe2 / operators / gather_op . cu . cc " , <nl> + " caffe2 / operators / gelu_op . cu . cc " , <nl> + " caffe2 / operators / generate_proposals_op . cu . cc " , <nl> + " caffe2 / operators / generate_proposals_op_util_nms_gpu . cu . cc " , <nl> + " caffe2 / operators / given_tensor_byte_string_to_uint8_fill_op . cu . cc " , <nl> + " caffe2 / operators / given_tensor_fill_op . cu . cc " , <nl> + " caffe2 / operators / glu_op . cu . cc " , <nl> + " caffe2 / operators / group_norm_op . cu . cc " , <nl> + " caffe2 / operators / gru_unit_op_gpu . cu . cc " , <nl> + " caffe2 / operators / half_float_ops . cu . cc " , <nl> + " caffe2 / operators / hard_sigmoid_op . cu . cc " , <nl> + " caffe2 / operators / instance_norm_op . cu . cc " , <nl> + " caffe2 / operators / integral_image_op . cu . cc " , <nl> + " caffe2 / operators / layer_norm_op . cu . cc " , <nl> + " caffe2 / operators / leaky_relu_op . cu . cc " , <nl> + " caffe2 / operators / lengths_pad_op . cu . cc " , <nl> + " caffe2 / operators / lengths_tile_op . cu . cc " , <nl> + " caffe2 / operators / local_response_normalization_op . cu . cc " , <nl> + " caffe2 / operators / logit_op . cu . cc " , <nl> + " caffe2 / operators / loss_op . cu . cc " , <nl> + " caffe2 / operators / lp_pool_op . cu . cc " , <nl> + " caffe2 / operators / lstm_unit_op_gpu . cu . cc " , <nl> + " caffe2 / operators / margin_ranking_criterion_op . cu . cc " , <nl> + " caffe2 / operators / max_pool_with_index . cu . cc " , <nl> + " caffe2 / operators / mean_op . cu . cc " , <nl> + " caffe2 / operators / mem_query_op . cu . cc " , <nl> + " caffe2 / operators / minmax_ops . cu . cc " , <nl> + " caffe2 / operators / moments_op . cu . cc " , <nl> + " caffe2 / operators / multi_class_accuracy_op . cu . cc " , <nl> + " caffe2 / operators / normalize_ops . cu . cc " , <nl> + " caffe2 / operators / one_hot_ops . cu . cc " , <nl> + " caffe2 / operators / pack_segments . cu . cc " , <nl> + " caffe2 / operators / pad_op_gpu . cu . cc " , <nl> + " caffe2 / operators / perplexity_op . cu . cc " , <nl> + " caffe2 / operators / piecewise_linear_transform_op . cu . cc " , <nl> + " caffe2 / operators / pool_op . cu . cc " , <nl> + " caffe2 / operators / pow_op . cu . cc " , <nl> + " caffe2 / operators / prelu_op . cu . cc " , <nl> + " caffe2 / operators / reciprocal_op . cu . cc " , <nl> + " caffe2 / operators / reduce_front_back_max_ops . cu . cc " , <nl> + " caffe2 / operators / reduce_front_back_sum_mean_ops . cu . cc " , <nl> + " caffe2 / operators / reduce_ops . cu . cc " , <nl> + " caffe2 / operators / reduction_ops . cu . cc " , <nl> + " caffe2 / operators / relu_n_op . cu . cc " , <nl> + " caffe2 / operators / relu_op . cu . cc " , <nl> + " caffe2 / operators / replace_nan_op . cu . cc " , <nl> + " caffe2 / operators / resize_3d_op . cu . cc " , <nl> + " caffe2 / operators / resize_op . cu . cc " , <nl> + " caffe2 / operators / reverse_packed_segs_op . cu . cc " , <nl> + " caffe2 / operators / rmac_regions_op . cu . cc " , <nl> + " caffe2 / operators / rnn / recurrent_network_op_gpu . cu . cc " , <nl> + " caffe2 / operators / roi_align_gradient_op . cu . cc " , <nl> + " caffe2 / operators / roi_align_op . cu . cc " , <nl> + " caffe2 / operators / roi_align_rotated_gradient_op . cu . cc " , <nl> + " caffe2 / operators / roi_align_rotated_op . cu . cc " , <nl> + " caffe2 / operators / roi_pool_op . cu . cc " , <nl> + " caffe2 / operators / rsqrt_op . cu . cc " , <nl> + " caffe2 / operators / scale_blobs_op . cu . cc " , <nl> + " caffe2 / operators / segment_reduction_op_gpu . cu . cc " , <nl> + " caffe2 / operators / selu_op . cu . cc " , <nl> + " caffe2 / operators / sequence_ops . cu . cc " , <nl> + " caffe2 / operators / sigmoid_op . cu . cc " , <nl> + " caffe2 / operators / sin_op . cu . cc " , <nl> + " caffe2 / operators / sinh_op . cu . cc " , <nl> + " caffe2 / operators / slice_op . cu . cc " , <nl> + " caffe2 / operators / softmax_ops . cu . cc " , <nl> + " caffe2 / operators / softplus_op . cu . cc " , <nl> + " caffe2 / operators / softsign_op . cu . cc " , <nl> + " caffe2 / operators / space_batch_op_gpu . cu . cc " , <nl> + " caffe2 / operators / sparse_normalize_op_gpu . cu . cc " , <nl> + " caffe2 / operators / sparse_to_dense_op . cu . cc " , <nl> + " caffe2 / operators / spatial_batch_norm_op . cu . cc " , <nl> + " caffe2 / operators / spatial_batch_norm_op_cudnn . cu . cc " , <nl> + " caffe2 / operators / stump_func_op . cu . cc " , <nl> + " caffe2 / operators / summarize_op . cu . cc " , <nl> + " caffe2 / operators / swish_op . cu . cc " , <nl> + " caffe2 / operators / tan_op . cu . cc " , <nl> + " caffe2 / operators / tanh_op . cu . cc " , <nl> + " caffe2 / operators / thresholded_relu_op . cu . cc " , <nl> + " caffe2 / operators / tile_op . cu . cc " , <nl> + " caffe2 / operators / top_k . cu . cc " , <nl> + " caffe2 / operators / transpose_op . cu . cc " , <nl> + " caffe2 / operators / unique_ops . cu . cc " , <nl> + " caffe2 / operators / upsample_op . cu . cc " , <nl> + " caffe2 / operators / utility_ops . cu . cc " , <nl> + " caffe2 / operators / weighted_sample_op . cu . cc " , <nl> + " caffe2 / sgd / adadelta_op_gpu . cu . cc " , <nl> + " caffe2 / sgd / adagrad_op_gpu . cu . cc " , <nl> + " caffe2 / sgd / adam_op_gpu . cu . cc " , <nl> + " caffe2 / sgd / fp16_momentum_sgd_op . cu . cc " , <nl> + " caffe2 / sgd / fp32_momentum_sgd_op . cu . cc " , <nl> + " caffe2 / sgd / lars_op_gpu . cu . cc " , <nl> + " caffe2 / sgd / momentum_sgd_op_gpu . cu . cc " , <nl> + " caffe2 / sgd / rmsprop_op_gpu . cu . cc " , <nl> + " caffe2 / sgd / yellowfin_op_gpu . cu . cc " , <nl> + " caffe2 / utils / math / broadcast . cu . cc " , <nl> + " caffe2 / utils / math / elementwise . cu . cc " , <nl> + " caffe2 / utils / math / reduce . cu . cc " , <nl> + " caffe2 / utils / math / transpose . cu . cc " , <nl> + " caffe2 / utils / math_gpu . cu . cc " , <nl> + ] , <nl> + ) <nl> + <nl> + # To achieve finer granularity and make debug easier , caffe2 is split into three libraries : <nl> + # ATen , caffe2 and caffe2_for_aten_headers . ATen lib group up source codes under <nl> + # aten / directory and caffe2 contains most files under ` caffe2 / ` directory . Since the <nl> + # ATen lib and the caffe2 lib would depend on each other , ` caffe2_for_aten_headers ` is splitted <nl> + # out from ` caffe2 ` to avoid dependency cycle . <nl> + cc_library ( <nl> + name = " caffe2_for_aten_headers " , <nl> + hdrs = [ <nl> + " caffe2 / core / macros . h " , <nl> + " caffe2 / core / common . h " , <nl> + " caffe2 / core / logging . h " , <nl> + " caffe2 / core / types . h " , <nl> + " caffe2 / perfkernels / common . h " , <nl> + " caffe2 / perfkernels / embedding_lookup . h " , <nl> + " caffe2 / perfkernels / embedding_lookup_idx . h " , <nl> + " caffe2 / utils / fixed_divisor . h " , <nl> + " caffe2 / utils / cpuid . h " , <nl> + ] + glob ( [ <nl> + " caffe2 / utils / threadpool / * . h " , <nl> + " caffe2 / proto / * . h " , <nl> + ] ) , <nl> + copts = CAFFE2_COPTS , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : c10_headers " , <nl> + " : caffe2_protos " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2_headers " , <nl> + hdrs = glob ( [ <nl> + " caffe2 / contrib / aten / * . h " , <nl> + " caffe2 / contrib / gloo / * . h " , <nl> + " caffe2 / core / * . h " , <nl> + " caffe2 / core / nomnigraph / include / nomnigraph / Converters / * . h " , <nl> + " caffe2 / core / nomnigraph / include / nomnigraph / Generated / * . h " , <nl> + " caffe2 / core / nomnigraph / include / nomnigraph / Graph / * . h " , <nl> + " caffe2 / core / nomnigraph / include / nomnigraph / Representations / * . h " , <nl> + " caffe2 / core / nomnigraph / include / nomnigraph / Support / * . h " , <nl> + " caffe2 / core / nomnigraph / include / nomnigraph / Transformations / * . h " , <nl> + " caffe2 / core / nomnigraph / tests / * . h " , <nl> + " caffe2 / db / * . h " , <nl> + " caffe2 / distributed / * . h " , <nl> + " caffe2 / ideep / * . h " , <nl> + " caffe2 / ideep / operators / * . h " , <nl> + " caffe2 / ideep / operators / quantization / * . h " , <nl> + " caffe2 / ideep / utils / * . h " , <nl> + " caffe2 / onnx / * . h " , <nl> + " caffe2 / operators / * . h " , <nl> + " caffe2 / operators / experimental / c10 / cpu / * . h " , <nl> + " caffe2 / operators / rnn / * . h " , <nl> + " caffe2 / opt / * . h " , <nl> + " caffe2 / perfkernels / * . h " , <nl> + " caffe2 / predictor / * . h " , <nl> + " caffe2 / predictor / emulator / * . h " , <nl> + " caffe2 / proto / * . h " , <nl> + " caffe2 / quantization / server / * . h " , <nl> + " caffe2 / queue / * . h " , <nl> + " caffe2 / serialize / * . h " , <nl> + " caffe2 / sgd / * . h " , <nl> + " caffe2 / share / contrib / depthwise / * . h " , <nl> + " caffe2 / transforms / * . h " , <nl> + " caffe2 / utils / * . h " , <nl> + " caffe2 / utils / math / * . h " , <nl> + " caffe2 / utils / threadpool / * . h " , <nl> + " modules / * * / * . h " , <nl> + ] ) + if_cuda ( glob ( [ <nl> + " caffe2 / * * / * . cuh " , <nl> + " caffe2 / image / * . h " , <nl> + ] ) ) , <nl> + copts = CAFFE2_COPTS , <nl> + includes = [ <nl> + " caffe2 / contrib / aten " , <nl> + " caffe2 / core / nomnigraph / include " , <nl> + " third_party / miniz - 2 . 0 . 8 " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : caffe2_for_aten_headers " , <nl> + " : caffe2_protos " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2_dnnlowp_avx2_ops " , <nl> + srcs = [ <nl> + " caffe2 / quantization / server / elementwise_sum_dnnlowp_op_avx2 . cc " , <nl> + " caffe2 / quantization / server / fully_connected_fake_lowp_op_avx2 . cc " , <nl> + " caffe2 / quantization / server / group_norm_dnnlowp_op_avx2 . cc " , <nl> + " caffe2 / quantization / server / norm_minimization_avx2 . cc " , <nl> + " caffe2 / quantization / server / pool_dnnlowp_op_avx2 . cc " , <nl> + " caffe2 / quantization / server / relu_dnnlowp_op_avx2 . cc " , <nl> + " caffe2 / quantization / server / spatial_batch_norm_dnnlowp_op_avx2 . cc " , <nl> + " caffe2 / quantization / server / transpose . cc " , <nl> + ] , <nl> + copts = CAFFE2_COPTS + [ <nl> + " - mf16c " , <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + " - mxsave " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : caffe2_headers " , <nl> + " @ fbgemm " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2 " , <nl> + srcs = [ <nl> + " caffe2 / db / create_db_op . cc " , <nl> + " caffe2 / db / protodb . cc " , <nl> + " caffe2 / share / contrib / depthwise / depthwise3x3_conv_op . cc " , <nl> + " : caffe2_contrib_srcs " , <nl> + " : caffe2_core_srcs " , <nl> + " : caffe2_distributed_srcs " , <nl> + " : caffe2_ideep_srcs " , <nl> + " : caffe2_onnx_srcs " , <nl> + " : caffe2_operators_srcs " , <nl> + " : caffe2_opt_srcs " , <nl> + " : caffe2_perfkernels_srcs " , <nl> + " : caffe2_predictor_srcs " , <nl> + " : caffe2_quantization_srcs " , <nl> + " : caffe2_queue_srcs " , <nl> + " : caffe2_serialize_srcs " , <nl> + " : caffe2_sgd_srcs " , <nl> + " : caffe2_transforms_srcs " , <nl> + " : caffe2_utils_srcs " , <nl> + ] , <nl> + copts = CAFFE2_COPTS + [ " - mf16c " ] , <nl> + linkstatic = 1 , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : caffe2_headers " , <nl> + " : caffe2_dnnlowp_avx2_ops " , <nl> + " : caffe2_perfkernels_avx " , <nl> + " : caffe2_perfkernels_avx2 " , <nl> + " : caffe2_perfkernels_avx512 " , <nl> + " : caffe2_protos " , <nl> + " / / third_party / miniz - 2 . 0 . 8 : miniz " , <nl> + " @ com_google_protobuf / / : protobuf " , <nl> + " @ eigen " , <nl> + " @ foxi " , <nl> + " @ gloo " , <nl> + " @ onnx " , <nl> + ] + if_cuda ( <nl> + [ <nl> + " : caffe2_cpp_cuda " , <nl> + " : aten_cuda " , <nl> + ] , <nl> + [ " : aten " ] , <nl> + ) , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2_cpp_cuda " , <nl> + srcs = [ " : caffe2_cuda_srcs " ] , <nl> + copts = CAFFE2_COPTS , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : caffe2_cuda " , <nl> + " : caffe2_headers " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cu_library ( <nl> + name = " caffe2_cuda " , <nl> + srcs = [ " : caffe2_cu_srcs " ] , <nl> + copts = CAFFE2_COPTS + torch_cuda_half_options , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten " , <nl> + " : caffe2_headers " , <nl> + " @ cub " , <nl> + " @ cuda / / : cublas " , <nl> + " @ cuda / / : curand " , <nl> + " @ cudnn " , <nl> + " @ eigen " , <nl> + " @ gloo " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + PERF_COPTS = [ <nl> + " - DHAVE_GCC_GET_CPUID " , <nl> + " - DUSE_AVX " , <nl> + " - DUSE_AVX2 " , <nl> + " - DTH_HAVE_THREAD " , <nl> + " - DHAVE_AVX_CPU_DEFINITION " , <nl> + " - DHAVE_AVX2_CPU_DEFINITION " , <nl> + " - DENABLE_ALIAS = 1 " , <nl> + " - DHAVE_MALLOC_USABLE_SIZE = 1 " , <nl> + " - DHAVE_MMAP = 1 " , <nl> + " - DHAVE_SHM_OPEN = 1 " , <nl> + " - DHAVE_SHM_UNLINK = 1 " , <nl> + " - DSLEEF_STATIC_LIBS = 1 " , <nl> + " - D_FILE_OFFSET_BITS = 64 " , <nl> + " - DUSE_FBGEMM " , <nl> + " - fvisibility - inlines - hidden " , <nl> + " - Wunused - parameter " , <nl> + " - fno - math - errno " , <nl> + " - fno - trapping - math " , <nl> + " - mf16c " , <nl> + ] <nl> + <nl> + PERF_HEADERS = glob ( [ <nl> + " caffe2 / perfkernels / * . h " , <nl> + " caffe2 / core / * . h " , <nl> + ] ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2_perfkernels_avx " , <nl> + srcs = glob ( [ <nl> + " caffe2 / perfkernels / * _avx . cc " , <nl> + ] ) , <nl> + hdrs = PERF_HEADERS , <nl> + copts = PERF_COPTS + [ <nl> + " - mavx " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : c10 " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2_perfkernels_avx2 " , <nl> + srcs = glob ( [ <nl> + " caffe2 / perfkernels / * _avx2 . cc " , <nl> + ] ) , <nl> + hdrs = PERF_HEADERS , <nl> + copts = PERF_COPTS + [ <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + " - mavx " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : c10 " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " caffe2_perfkernels_avx512 " , <nl> + srcs = [ <nl> + " caffe2 / perfkernels / common_avx512 . cc " , <nl> + ] , <nl> + hdrs = PERF_HEADERS , <nl> + copts = PERF_COPTS + [ <nl> + " - mavx512f " , <nl> + " - mavx512dq " , <nl> + " - mavx512vl " , <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + " - mavx " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : c10 " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + # torch <nl> + cc_library ( <nl> + name = " torch_headers " , <nl> + hdrs = if_cuda ( <nl> + glob ( <nl> + [ <nl> + " torch / csrc / cuda / * . h " , <nl> + ] , <nl> + ) , <nl> + ) + glob ( <nl> + [ <nl> + " torch / * . h " , <nl> + " torch / csrc / * . h " , <nl> + " torch / csrc / api / include / torch / * . h " , <nl> + " torch / csrc / api / include / torch / data / * . h " , <nl> + " torch / csrc / api / include / torch / data / dataloader / * . h " , <nl> + " torch / csrc / api / include / torch / data / datasets / * . h " , <nl> + " torch / csrc / api / include / torch / data / detail / * . h " , <nl> + " torch / csrc / api / include / torch / data / samplers / * . h " , <nl> + " torch / csrc / api / include / torch / data / transforms / * . h " , <nl> + " torch / csrc / api / include / torch / detail / * . h " , <nl> + " torch / csrc / api / include / torch / nn / * . h " , <nl> + " torch / csrc / api / include / torch / nn / functional / * . h " , <nl> + " torch / csrc / api / include / torch / nn / modules / * . h " , <nl> + " torch / csrc / api / include / torch / nn / modules / container / * . h " , <nl> + " torch / csrc / api / include / torch / nn / options / * . h " , <nl> + " torch / csrc / api / include / torch / nn / parallel / * . h " , <nl> + " torch / csrc / api / include / torch / nn / utils / * . h " , <nl> + " torch / csrc / api / include / torch / optim / * . h " , <nl> + " torch / csrc / api / include / torch / python / * . h " , <nl> + " torch / csrc / api / include / torch / serialize / * . h " , <nl> + " torch / csrc / autograd / * . h " , <nl> + " torch / csrc / autograd / functions / * . h " , <nl> + " torch / csrc / autograd / utils / * . h " , <nl> + " torch / csrc / distributed / autograd / functions / * . h " , <nl> + " torch / csrc / distributed / autograd / context / * . h " , <nl> + " torch / csrc / distributed / autograd / engine / * . h " , <nl> + " torch / csrc / distributed / autograd / rpc_messages / * . h " , <nl> + " torch / csrc / distributed / autograd / * . h " , <nl> + " torch / csrc / distributed / c10d / * . h " , <nl> + " torch / csrc / distributed / rpc / * . h " , <nl> + " torch / csrc / generic / * . h " , <nl> + " torch / csrc / generic / * . cpp " , <nl> + " torch / csrc / jit / * . h " , <nl> + " torch / csrc / jit / api / * . h " , <nl> + " torch / csrc / jit / codegen / fuser / * . h " , <nl> + " torch / csrc / jit / codegen / fuser / cpu / * . h " , <nl> + " torch / csrc / jit / codegen / fuser / cuda / * . h " , <nl> + " torch / csrc / jit / ir / * . h " , <nl> + " torch / csrc / jit / fuser / * . h " , <nl> + " torch / csrc / jit / fuser / cpu / * . h " , <nl> + " torch / csrc / jit / fuser / cuda / * . h " , <nl> + " torch / csrc / jit / passes / * . h " , <nl> + " torch / csrc / jit / passes / onnx / * . h " , <nl> + " torch / csrc / jit / passes / utils / * . h " , <nl> + " torch / csrc / jit / python / * . h " , <nl> + " torch / csrc / jit / runtime / * . h " , <nl> + " torch / csrc / jit / frontend / * . h " , <nl> + " torch / csrc / jit / mobile / * . h " , <nl> + " torch / csrc / jit / serialization / * . h " , <nl> + " torch / csrc / jit / tensorexpr / * . h " , <nl> + " torch / csrc / jit / testing / * . h " , <nl> + " torch / csrc / multiprocessing / * . h " , <nl> + " torch / csrc / onnx / * . h " , <nl> + " torch / csrc / tensor / * . h " , <nl> + " torch / csrc / utils / * . h " , <nl> + " torch / lib / libshm / * . h " , <nl> + " torch / lib / c10d / * . hpp " , <nl> + ] , <nl> + exclude = [ <nl> + " torch / lib / c10d / ProcessGroupMPI . hpp " , <nl> + " torch / lib / c10d / ProcessGroupNCCL . hpp " , <nl> + ] , <nl> + ) + [ " : generated_code " ] , <nl> + includes = [ <nl> + " torch / csrc " , <nl> + " torch / csrc / api / include " , <nl> + " torch / lib " , <nl> + " torch / lib / libshm " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten_headers " , <nl> + " : c10_headers " , <nl> + " : caffe2_headers " , <nl> + " @ onnx " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + TORCH_COPTS = COMMON_COPTS + [ <nl> + " - Dtorch_EXPORTS " , <nl> + " - DHAVE_AVX_CPU_DEFINITION " , <nl> + " - DHAVE_AVX2_CPU_DEFINITION " , <nl> + " - DCAFFE2_USE_GLOO " , <nl> + " - fvisibility - inlines - hidden " , <nl> + " - fno - math - errno " , <nl> + " - fno - trapping - math " , <nl> + ] <nl> + <nl> + filegroup ( <nl> + name = " torch_srcs " , <nl> + srcs = [ <nl> + " torch / csrc / jit / tensorexpr / codegen . cpp " , <nl> + " torch / csrc / jit / tensorexpr / eval . cpp " , <nl> + " torch / csrc / jit / tensorexpr / expr . cpp " , <nl> + " torch / csrc / jit / tensorexpr / function . cpp " , <nl> + " torch / csrc / jit / tensorexpr / hash_provider . cpp " , <nl> + " torch / csrc / jit / tensorexpr / ir . cpp " , <nl> + " torch / csrc / jit / tensorexpr / ir_mutator . cpp " , <nl> + " torch / csrc / jit / tensorexpr / ir_printer . cpp " , <nl> + " torch / csrc / jit / tensorexpr / ir_simplifier . cpp " , <nl> + " torch / csrc / jit / tensorexpr / ir_visitor . cpp " , <nl> + " torch / csrc / jit / tensorexpr / kernel . cpp " , <nl> + " torch / csrc / jit / tensorexpr / llvm_codegen . cpp " , <nl> + " torch / csrc / jit / tensorexpr / llvm_jit . cpp " , <nl> + " torch / csrc / jit / tensorexpr / loopnest . cpp " , <nl> + " torch / csrc / jit / tensorexpr / mem_arena . cpp " , <nl> + " torch / csrc / jit / tensorexpr / tensor . cpp " , <nl> + " torch / csrc / jit / tensorexpr / types . cpp " , <nl> + " torch / csrc / jit / tensorexpr / unique_name_manager . cpp " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " torch " , <nl> + srcs = if_cuda ( glob ( <nl> + [ <nl> + " torch / csrc / cuda / * . cpp " , <nl> + " torch / csrc / autograd / functions / comm . cpp " , <nl> + " torch / csrc / jit / tensorexpr / * . cpp " , <nl> + ] , <nl> + exclude = [ <nl> + " torch / csrc / cuda / python_nccl . cpp " , <nl> + " torch / csrc / cuda / nccl . cpp " , <nl> + ] , <nl> + ) ) + glob ( <nl> + [ <nl> + " torch / csrc / * . cpp " , <nl> + " torch / csrc / api / src / * . cpp " , <nl> + " torch / csrc / api / src / data / datasets / * . cpp " , <nl> + " torch / csrc / api / src / data / samplers / * . cpp " , <nl> + " torch / csrc / api / src / nn / * . cpp " , <nl> + " torch / csrc / api / src / nn / modules / * . cpp " , <nl> + " torch / csrc / api / src / nn / modules / container / * . cpp " , <nl> + " torch / csrc / api / src / nn / options / * . cpp " , <nl> + " torch / csrc / api / src / optim / * . cpp " , <nl> + " torch / csrc / api / src / python / * . cpp " , <nl> + " torch / csrc / api / src / serialize / * . cpp " , <nl> + " torch / csrc / autograd / * . cpp " , <nl> + " torch / csrc / autograd / functions / * . cpp " , <nl> + " torch / csrc / autograd / generated / * . cpp " , <nl> + " torch / csrc / distributed / autograd / * . cpp " , <nl> + " torch / csrc / distributed / autograd / context / * . cpp " , <nl> + " torch / csrc / distributed / autograd / functions / * . cpp " , <nl> + " torch / csrc / distributed / autograd / engine / * . cpp " , <nl> + " torch / csrc / distributed / autograd / rpc_messages / * . cpp " , <nl> + " torch / csrc / distributed / rpc / * . cpp " , <nl> + " torch / csrc / jit / * . cpp " , <nl> + " torch / csrc / jit / api / * . cpp " , <nl> + " torch / csrc / jit / codegen / fuser / * . cpp " , <nl> + " torch / csrc / jit / frontend / * . cpp " , <nl> + " torch / csrc / jit / fuser / * . cpp " , <nl> + " torch / csrc / jit / fuser / cpu / * . cpp " , <nl> + " torch / csrc / jit / ir / * . cpp " , <nl> + " torch / csrc / jit / generated / * . cpp " , <nl> + " torch / csrc / jit / passes / * . cpp " , <nl> + " torch / csrc / jit / passes / onnx / * . cpp " , <nl> + " torch / csrc / jit / passes / utils / * . cpp " , <nl> + " torch / csrc / jit / mobile / * . cpp " , <nl> + " torch / csrc / jit / python / * . cpp " , <nl> + " torch / csrc / jit / runtime / * . cpp " , <nl> + " torch / csrc / jit / serialization / * . cpp " , <nl> + " torch / csrc / jit / testing / * . cpp " , <nl> + " torch / csrc / multiprocessing / * . cpp " , <nl> + " torch / csrc / onnx / * . cpp " , <nl> + " torch / csrc / tensor / * . cpp " , <nl> + " torch / csrc / utils / * . cpp " , <nl> + " torch / lib / libshm / * . cpp " , <nl> + " torch / lib / c10d / * . cpp " , <nl> + ] , <nl> + exclude = glob ( [ <nl> + " torch / csrc / autograd / * _cuda . cpp " , <nl> + ] ) + [ <nl> + " torch / csrc / autograd / functions / comm . cpp " , <nl> + " torch / csrc / autograd / generated / VariableTypeEverything . cpp " , <nl> + " torch / lib / libshm / manager . cpp " , <nl> + " torch / lib / c10d / NCCLUtils . cpp " , <nl> + " torch / lib / c10d / ProcessGroupMPI . cpp " , <nl> + " torch / lib / c10d / ProcessGroupNCCL . cpp " , <nl> + ] , <nl> + ) + [ <nl> + " : torch_srcs " , <nl> + " : generated_code " , <nl> + ] , <nl> + copts = TORCH_COPTS + if_cuda ( [ " - DUSE_CUDA = 1 " ] ) , <nl> + defines = [ <nl> + " CAFFE2_NIGHTLY_VERSION = 20200115 " , <nl> + ] , <nl> + linkopts = [ <nl> + " - Wl , - - rpath " , <nl> + " - Wl , / opt / conda / lib " , <nl> + " - L / opt / conda / lib " , <nl> + " - lpython3 . 6m " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : caffe2 " , <nl> + " : torch_headers " , <nl> + " @ local_config_python / / : python_headers " , <nl> + " @ pybind11 " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " libtorch_headers " , <nl> + hdrs = glob ( [ <nl> + " * * / * . h " , <nl> + " * * / * . cuh " , <nl> + ] ) + [ <nl> + " : generated_code " , <nl> + ] , <nl> + includes = [ <nl> + " . " , <nl> + " torch / csrc / api / include " , <nl> + " torch / lib " , <nl> + " torch / lib / libshm " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : aten_headers " , <nl> + " : c10_headers " , <nl> + " : caffe2_headers " , <nl> + ] , <nl> + ) <nl> + <nl> + # cpp api tests <nl> + cc_library ( <nl> + name = " test_support " , <nl> + testonly = True , <nl> + srcs = [ <nl> + " test / cpp / api / support . cpp " , <nl> + ] , <nl> + hdrs = [ <nl> + " test / cpp / api / support . h " , <nl> + " test / cpp / common / support . h " , <nl> + ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " any_test " , <nl> + srcs = [ " test / cpp / api / any . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " autograd_test " , <nl> + srcs = [ " test / cpp / api / autograd . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " dataloader_test " , <nl> + srcs = [ " test / cpp / api / dataloader . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " enum_test " , <nl> + srcs = [ " test / cpp / api / enum . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " expanding_array_test " , <nl> + srcs = [ " test / cpp / api / expanding - array . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " functional_test " , <nl> + srcs = [ " test / cpp / api / functional . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " init_test " , <nl> + srcs = [ <nl> + " test / cpp / api / init . cpp " , <nl> + " test / cpp / api / init_baseline . h " , <nl> + ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + # Torch integration tests rely on a labeled data set from the MNIST database . <nl> + # http : / / yann . lecun . com / exdb / mnist / <nl> + cc_test ( <nl> + name = " integration_test " , <nl> + srcs = [ " test / cpp / api / integration . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " jit_test " , <nl> + srcs = [ " test / cpp / api / jit . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " memory_test " , <nl> + srcs = [ " test / cpp / api / memory . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " misc_test " , <nl> + srcs = [ " test / cpp / api / misc . cpp " ] , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " module_test " , <nl> + srcs = [ " test / cpp / api / module . cpp " ] , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " modulelist_test " , <nl> + srcs = [ " test / cpp / api / modulelist . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " modules_test " , <nl> + srcs = [ " test / cpp / api / modules . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " nn_utils_test " , <nl> + srcs = [ " test / cpp / api / nn_utils . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " optim_test " , <nl> + srcs = [ <nl> + " test / cpp / api / optim . cpp " , <nl> + " test / cpp / api / optim_baseline . h " , <nl> + ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " ordered_dict_test " , <nl> + srcs = [ " test / cpp / api / ordered_dict . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " parallel_test " , <nl> + srcs = [ " test / cpp / api / parallel . cpp " ] , <nl> + copts = COMMON_COPTS , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " rnn_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / rnn . cpp " ] , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " sequential_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / sequential . cpp " ] , <nl> + copts = [ " - Wno - deprecated - declarations " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " serialize_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / serialize . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " static_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / static . cpp " ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " tensor_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / tensor . cpp " ] , <nl> + copts = [ " - Wno - deprecated - declarations " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " tensor_cuda_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / tensor_cuda . cpp " ] , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " tensor_options_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / tensor_options . cpp " ] , <nl> + deps = [ <nl> + " : test_support " , <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " tensor_options_cuda_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / tensor_options_cuda . cpp " ] , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_test ( <nl> + name = " torch_include_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / api / torch_include . cpp " ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + test_suite ( <nl> + name = " api_tests " , <nl> + tests = [ <nl> + " any_test " , <nl> + " autograd_test " , <nl> + " dataloader_test " , <nl> + " enum_test " , <nl> + " expanding_array_test " , <nl> + " functional_test " , <nl> + " init_test " , <nl> + " integration_test " , <nl> + " jit_test " , <nl> + " memory_test " , <nl> + " misc_test " , <nl> + " module_test " , <nl> + " modulelist_test " , <nl> + " modules_test " , <nl> + " nn_utils_test " , <nl> + " optim_test " , <nl> + " ordered_dict_test " , <nl> + " rnn_test " , <nl> + " sequential_test " , <nl> + " serialize_test " , <nl> + " static_test " , <nl> + " tensor_options_test " , <nl> + " tensor_test " , <nl> + " torch_include_test " , <nl> + ] , <nl> + ) <nl> + <nl> + # dist autograd tests <nl> + cc_test ( <nl> + name = " torch_dist_autograd_test " , <nl> + size = " small " , <nl> + srcs = [ " test / cpp / dist_autograd / test_dist_autograd . cpp " ] , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + # jit tests <nl> + # Because these individual unit tests require custom registering , <nl> + # it is easier to mimic the cmake build by globing together a single test . <nl> + cc_test ( <nl> + name = " jit_tests " , <nl> + size = " small " , <nl> + srcs = glob ( [ <nl> + " test / cpp / jit / * . cpp " , <nl> + " test / cpp / jit / * . h " , <nl> + " test / cpp / tensorexpr / * . cpp " , <nl> + " test / cpp / tensorexpr / * . h " , <nl> + ] ) , <nl> + linkstatic = True , <nl> + tags = [ <nl> + " exclusive " , <nl> + " gpu - required " , <nl> + ] , <nl> + deps = [ <nl> + " : torch " , <nl> + " @ com_google_googletest / / : gtest_main " , <nl> + ] , <nl> + ) <nl> + <nl> + # all tests <nl> + test_suite ( <nl> + name = " all_tests " , <nl> + tests = [ <nl> + " api_tests " , <nl> + " c10_tests " , <nl> + " jit_tests " , <nl> + " torch_dist_autograd_test " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 53696dc87f30 <nl> mmm / dev / null <nl> ppp b / WORKSPACE <nl> <nl> + workspace ( name = " pytorch " ) <nl> + <nl> + load ( " @ bazel_tools / / tools / build_defs / repo : http . bzl " , " http_archive " ) <nl> + load ( " / / tools / rules : workspace . bzl " , " new_patched_local_repository " ) <nl> + <nl> + http_archive ( <nl> + name = " bazel_skylib " , <nl> + urls = [ <nl> + " https : / / github . com / bazelbuild / bazel - skylib / releases / download / 1 . 0 . 2 / bazel - skylib - 1 . 0 . 2 . tar . gz " , <nl> + ] , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " com_google_googletest " , <nl> + strip_prefix = " googletest - cd6b9ae3243985d4dc725abd513a874ab4161f3e " , <nl> + urls = [ <nl> + " https : / / github . com / google / googletest / archive / cd6b9ae3243985d4dc725abd513a874ab4161f3e . tar . gz " , <nl> + ] , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " pybind11_bazel " , <nl> + strip_prefix = " pybind11_bazel - 7f397b5d2cc2434bbd651e096548f7b40c128044 " , <nl> + urls = [ " https : / / github . com / pybind / pybind11_bazel / archive / 7f397b5d2cc2434bbd651e096548f7b40c128044 . zip " ] , <nl> + sha256 = " e4a9536f49d4a88e3c5a09954de49c4a18d6b1632c457a62d6ec4878c27f1b5b " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " pybind11 " , <nl> + build_file = " @ pybind11_bazel / / : pybind11 . BUILD " , <nl> + path = " third_party / pybind11 " , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " com_github_glog " , <nl> + strip_prefix = " glog - 0 . 4 . 0 " , <nl> + urls = [ <nl> + " https : / / github . com / google / glog / archive / v0 . 4 . 0 . tar . gz " , <nl> + ] , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " com_github_gflags_gflags " , <nl> + strip_prefix = " gflags - 2 . 2 . 2 " , <nl> + urls = [ <nl> + " https : / / github . com / gflags / gflags / archive / v2 . 2 . 2 . tar . gz " , <nl> + ] , <nl> + sha256 = " 34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " gloo " , <nl> + build_file = " / / third_party : gloo . BUILD " , <nl> + path = " third_party / gloo " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " onnx " , <nl> + build_file = " / / third_party : onnx . BUILD " , <nl> + path = " third_party / onnx " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " foxi " , <nl> + build_file = " / / third_party : foxi . BUILD " , <nl> + path = " third_party / foxi " , <nl> + ) <nl> + <nl> + local_repository ( <nl> + name = " com_google_protobuf " , <nl> + path = " third_party / protobuf " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " eigen " , <nl> + build_file = " / / third_party : eigen . BUILD " , <nl> + path = " third_party / eigen " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " fbgemm " , <nl> + build_file = " / / third_party : fbgemm . BUILD " , <nl> + path = " third_party / fbgemm " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " ideep " , <nl> + build_file = " / / third_party : ideep . BUILD " , <nl> + path = " third_party / ideep " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " mkl_dnn " , <nl> + build_file = " / / third_party : mkl - dnn . BUILD " , <nl> + path = " third_party / ideep / mkl - dnn " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " cpuinfo " , <nl> + build_file = " / / third_party : cpuinfo . BUILD " , <nl> + path = " third_party / cpuinfo " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " asmjit " , <nl> + build_file = " / / third_party : asmjit . BUILD " , <nl> + path = " third_party / fbgemm / third_party / asmjit " , <nl> + ) <nl> + <nl> + new_local_repository ( <nl> + name = " sleef " , <nl> + build_file = " / / third_party : sleef . BUILD " , <nl> + path = " third_party / sleef " , <nl> + ) <nl> + <nl> + new_patched_local_repository ( <nl> + name = " tbb " , <nl> + patches = [ <nl> + " @ / / third_party : tbb . patch " , <nl> + ] , <nl> + patch_strip = 1 , <nl> + build_file = " / / third_party : tbb . BUILD " , <nl> + path = " third_party / tbb " , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " mkl " , <nl> + build_file = " / / third_party : mkl . BUILD " , <nl> + strip_prefix = " lib " , <nl> + sha256 = " 59154b30dd74561e90d547f9a3af26c75b6f4546210888f09c9d4db8f4bf9d4c " , <nl> + urls = [ <nl> + " https : / / anaconda . org / anaconda / mkl / 2020 . 0 / download / linux - 64 / mkl - 2020 . 0 - 166 . tar . bz2 " , <nl> + ] , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " mkl_headers " , <nl> + build_file = " / / third_party : mkl_headers . BUILD " , <nl> + sha256 = " 2af3494a4bebe5ddccfdc43bacc80fcd78d14c1954b81d2c8e3d73b55527af90 " , <nl> + urls = [ <nl> + " https : / / anaconda . org / anaconda / mkl - include / 2020 . 0 / download / linux - 64 / mkl - include - 2020 . 0 - 166 . tar . bz2 " , <nl> + ] , <nl> + ) <nl> + <nl> + http_archive ( <nl> + name = " rules_python " , <nl> + url = " https : / / github . com / bazelbuild / rules_python / releases / download / 0 . 0 . 1 / rules_python - 0 . 0 . 1 . tar . gz " , <nl> + sha256 = " aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161 " , <nl> + ) <nl> + <nl> + load ( " @ pybind11_bazel / / : python_configure . bzl " , " python_configure " ) <nl> + python_configure ( name = " local_config_python " ) <nl> + <nl> + load ( " @ com_google_protobuf / / : protobuf_deps . bzl " , " protobuf_deps " ) <nl> + <nl> + protobuf_deps ( ) <nl> + <nl> + load ( " @ rules_python / / python : repositories . bzl " , " py_repositories " ) <nl> + <nl> + py_repositories ( ) <nl> new file mode 100644 <nl> index 000000000000 . . e69de29bb2d1 <nl> new file mode 100644 <nl> index 000000000000 . . 13fadb2e8716 <nl> mmm / dev / null <nl> ppp b / third_party / asmjit . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " asmjit " , <nl> + srcs = glob ( [ <nl> + " src / asmjit / core / * . cpp " , <nl> + " src / asmjit / x86 / * . cpp " , <nl> + ] ) , <nl> + hdrs = glob ( [ <nl> + " src / asmjit / x86 / * . h " , <nl> + " src / asmjit / core / * . h " , <nl> + " src / asmjit / * . h " , <nl> + ] ) , <nl> + copts = [ <nl> + " - DASMJIT_STATIC " , <nl> + " - fno - tree - vectorize " , <nl> + " - std = c + + 17 " , <nl> + " - fmerge - all - constants " , <nl> + " - std = gnu + + 11 " , <nl> + " - DTH_BLAS_MKL " , <nl> + ] , <nl> + includes = [ <nl> + " asmjit / " , <nl> + " src / " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . c44895034ee2 <nl> mmm / dev / null <nl> ppp b / third_party / cpuinfo . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " clog " , <nl> + srcs = [ <nl> + " deps / clog / src / clog . c " , <nl> + ] , <nl> + hdrs = glob ( [ <nl> + " deps / clog / include / * . h " , <nl> + ] ) , <nl> + includes = [ <nl> + " deps / clog / include / " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " cpuinfo " , <nl> + srcs = glob ( <nl> + [ <nl> + " src / * . c " , <nl> + " src / linux / * . c " , <nl> + " src / x86 / * . c " , <nl> + " src / x86 / cache / * . c " , <nl> + " src / x86 / linux / * . c " , <nl> + ] , <nl> + exclude = [ <nl> + " src / x86 / mockcpuid . c " , <nl> + " src / linux / mockfile . c " , <nl> + ] , <nl> + ) , <nl> + hdrs = glob ( [ <nl> + " include / * . h " , <nl> + " src / * . h " , <nl> + " src / cpuinfo / * . h " , <nl> + " src / include / * . h " , <nl> + " src / x86 / * . h " , <nl> + " src / x86 / linux / * . h " , <nl> + " src / linux / * . h " , <nl> + ] ) , <nl> + copts = [ <nl> + " - DCPUINFO_LOG_LEVEL = 2 " , <nl> + " - DTH_BLAS_MKL " , <nl> + " - D_GNU_SOURCE = 1 " , <nl> + ] , <nl> + includes = [ <nl> + " include " , <nl> + " src " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : clog " , <nl> + ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . a6a735360633 <nl> mmm / dev / null <nl> ppp b / third_party / eigen . BUILD <nl> <nl> + # This is BUILD file is derived from https : / / github . com / tensorflow / tensorflow / blob / master / third_party / eigen . BUILD <nl> + <nl> + # Description : <nl> + # Eigen is a C + + template library for linear algebra : vectors , <nl> + # matrices , and related algorithms . <nl> + <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + licenses ( [ <nl> + # Note : Eigen is an MPL2 library that includes GPL v3 and LGPL v2 . 1 + code . <nl> + # We ' ve taken special care to not reference any restricted code . <nl> + " reciprocal " , # MPL2 <nl> + " notice " , # Portions BSD <nl> + ] ) <nl> + <nl> + exports_files ( [ " COPYING . MPL2 " ] ) <nl> + <nl> + # License - restricted ( i . e . not reciprocal or notice ) files inside Eigen / . . . <nl> + EIGEN_RESTRICTED_FILES = [ <nl> + " Eigen / src / OrderingMethods / Amd . h " , <nl> + " Eigen / src / SparseCholesky / * * " , <nl> + ] <nl> + <nl> + # Notable transitive dependencies of restricted files inside Eigen / . . . <nl> + EIGEN_RESTRICTED_DEPS = [ <nl> + " Eigen / Eigen " , <nl> + " Eigen / IterativeLinearSolvers " , <nl> + " Eigen / MetisSupport " , <nl> + " Eigen / Sparse " , <nl> + " Eigen / SparseCholesky " , <nl> + " Eigen / SparseLU " , <nl> + ] <nl> + <nl> + EIGEN_FILES = [ <nl> + " Eigen / * * " , <nl> + " unsupported / Eigen / CXX11 / * * " , <nl> + " unsupported / Eigen / FFT " , <nl> + " unsupported / Eigen / KroneckerProduct " , <nl> + " unsupported / Eigen / src / FFT / * * " , <nl> + " unsupported / Eigen / src / KroneckerProduct / * * " , <nl> + " unsupported / Eigen / MatrixFunctions " , <nl> + " unsupported / Eigen / SpecialFunctions " , <nl> + " unsupported / Eigen / Splines " , <nl> + " unsupported / Eigen / src / MatrixFunctions / * * " , <nl> + " unsupported / Eigen / src / SpecialFunctions / * * " , <nl> + " unsupported / Eigen / src / Splines / * * " , <nl> + " unsupported / Eigen / NonLinearOptimization " , <nl> + " unsupported / Eigen / NumericalDiff " , <nl> + " unsupported / Eigen / src / * * " , <nl> + " unsupported / Eigen / Polynomials " , <nl> + ] <nl> + <nl> + # List of files picked up by glob but actually part of another target . <nl> + EIGEN_EXCLUDE_FILES = [ " Eigen / src / Core / arch / AVX / PacketMathGoogleTest . cc " ] <nl> + <nl> + # Disallowed eigen modules / files in rNA : <nl> + # * Using the custom STL and memory support , it is not needed and should <nl> + # not be used with c + + 17 . <nl> + # * We will only support the EulerAnglesZYX provided by / / atg / geometry so <nl> + # just don ' t allow people to access the unsupported eigen module . <nl> + EIGEN_DISALLOW_FILES = [ <nl> + " Eigen / StlSupport / * . h " , <nl> + " unsupported / Eigen / EulerAngles " , <nl> + " unsupported / Eigen / src / EulerAngles / * * " , <nl> + ] <nl> + <nl> + # Files known to be under MPL2 license . <nl> + EIGEN_MPL2_HEADER_FILES = glob ( <nl> + EIGEN_FILES , <nl> + exclude = EIGEN_EXCLUDE_FILES + <nl> + EIGEN_RESTRICTED_FILES + <nl> + EIGEN_DISALLOW_FILES + <nl> + EIGEN_RESTRICTED_DEPS + [ <nl> + # Guarantees any file missed by excludes above will not compile . <nl> + " Eigen / src / Core / util / NonMPL2 . h " , <nl> + " Eigen / * * / CMakeLists . txt " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " eigen " , <nl> + hdrs = EIGEN_MPL2_HEADER_FILES , <nl> + defines = [ <nl> + # This define ( mostly ) guarantees we don ' t link any problematic <nl> + # code . We use it , but we do not rely on it , as evidenced above . <nl> + " EIGEN_MPL2_ONLY " , <nl> + " EIGEN_MAX_ALIGN_BYTES = 64 " , <nl> + ] , <nl> + includes = [ " . " ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 1769179baee7 <nl> mmm / dev / null <nl> ppp b / third_party / fbgemm . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_src_headers " , <nl> + hdrs = [ <nl> + " src / RefImplementations . h " , <nl> + ] , <nl> + include_prefix = " fbgemm " , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_base " , <nl> + srcs = [ <nl> + " src / EmbeddingSpMDM . cc " , <nl> + " src / EmbeddingSpMDMNBit . cc " , <nl> + " src / ExecuteKernel . cc " , <nl> + " src / ExecuteKernelU8S8 . cc " , <nl> + " src / Fbgemm . cc " , <nl> + " src / FbgemmBfloat16Convert . cc " , <nl> + " src / FbgemmConv . cc " , <nl> + " src / FbgemmFP16 . cc " , <nl> + " src / FbgemmFloat16Convert . cc " , <nl> + " src / FbgemmI64 . cc " , <nl> + " src / FbgemmI8Spmdm . cc " , <nl> + " src / GenerateKernelU8S8S32ACC16 . cc " , <nl> + " src / GenerateKernelU8S8S32ACC16Avx512 . cc " , <nl> + " src / GenerateKernelU8S8S32ACC16Avx512VNNI . cc " , <nl> + " src / GenerateKernelU8S8S32ACC32 . cc " , <nl> + " src / GenerateKernelU8S8S32ACC32Avx512 . cc " , <nl> + " src / GenerateKernelU8S8S32ACC32Avx512VNNI . cc " , <nl> + " src / GroupwiseConvAcc32Avx2 . cc " , <nl> + " src / PackAMatrix . cc " , <nl> + " src / PackAWithIm2Col . cc " , <nl> + " src / PackBMatrix . cc " , <nl> + " src / PackMatrix . cc " , <nl> + " src / PackAWithQuantRowOffset . cc " , <nl> + " src / PackAWithRowOffset . cc " , <nl> + " src / PackWeightMatrixForGConv . cc " , <nl> + " src / PackWeightsForConv . cc " , <nl> + " src / QuantUtils . cc " , <nl> + " src / RefImplementations . cc " , <nl> + " src / RowWiseSparseAdagradFused . cc " , <nl> + " src / SparseAdagrad . cc " , <nl> + " src / Utils . cc " , <nl> + # Private headers <nl> + " src / CodeCache . h " , <nl> + " src / CodeGenHelpers . h " , <nl> + " src / ExecuteKernel . h " , <nl> + " src / ExecuteKernelGeneric . h " , <nl> + " src / ExecuteKernelU8S8 . h " , <nl> + " src / FbgemmFP16Common . h " , <nl> + " src / GenerateKernel . h " , <nl> + " src / GroupwiseConv . h " , <nl> + " src / RefImplementations . h " , <nl> + " src / TransposeUtils . h " , <nl> + ] , <nl> + hdrs = [ <nl> + " include / fbgemm / FbgemmConvert . h " , <nl> + " include / fbgemm / FbgemmI64 . h " , <nl> + ] , <nl> + includes = [ <nl> + " . " , <nl> + " src " , <nl> + ] , <nl> + deps = [ <nl> + " : fbgemm_avx2 " , <nl> + " : fbgemm_avx512 " , <nl> + " : fbgemm_headers " , <nl> + " : fbgemm_src_headers " , <nl> + " @ asmjit " , <nl> + " @ cpuinfo " , <nl> + ] , <nl> + linkstatic = 1 , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_avx2_circular " , <nl> + srcs = [ <nl> + " src / FbgemmFloat16ConvertAvx2 . cc " , <nl> + ] , <nl> + copts = [ <nl> + " - mavx2 " , <nl> + " - mf16c " , <nl> + ] , <nl> + deps = [ <nl> + " : fbgemm_base " , <nl> + ] , <nl> + linkstatic = 1 , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm " , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : fbgemm_base " , <nl> + " : fbgemm_avx2_circular " , <nl> + ] , <nl> + linkstatic = 1 , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_avx2 " , <nl> + srcs = [ <nl> + " src / EmbeddingSpMDMAvx2 . cc " , <nl> + " src / FbgemmBfloat16ConvertAvx2 . cc " , <nl> + # " src / FbgemmFloat16ConvertAvx2 . cc " , <nl> + " src / FbgemmI8Depthwise3DAvx2 . cc " , <nl> + " src / FbgemmI8Depthwise3x3Avx2 . cc " , <nl> + " src / FbgemmI8DepthwiseAvx2 . cc " , <nl> + " src / FbgemmI8DepthwisePerChannelQuantAvx2 . cc " , <nl> + " src / OptimizedKernelsAvx2 . cc " , <nl> + " src / PackDepthwiseConvMatrixAvx2 . cc " , <nl> + " src / QuantUtilsAvx2 . cc " , <nl> + " src / UtilsAvx2 . cc " , <nl> + # Inline Assembly sources <nl> + " src / FbgemmFP16UKernelsAvx2 . cc " , <nl> + # Private headers <nl> + " src / FbgemmFP16Common . h " , <nl> + " src / FbgemmFP16UKernelsAvx2 . h " , <nl> + " src / FbgemmI8Depthwise2DAvx2 - inl . h " , <nl> + " src / FbgemmI8DepthwiseAvx2 - inl . h " , <nl> + " src / MaskAvx2 . h " , <nl> + " src / OptimizedKernelsAvx2 . h " , <nl> + " src / TransposeUtils . h " , <nl> + " src / TransposeUtilsAvx2 . h " , <nl> + ] , <nl> + copts = [ <nl> + " - m64 " , <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + " - mf16c " , <nl> + " - masm = intel " , <nl> + ] , <nl> + deps = [ <nl> + " : fbgemm_headers " , <nl> + ] , <nl> + linkstatic = 1 , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_avx2_headers " , <nl> + includes = [ <nl> + " src " , <nl> + ] , <nl> + hdrs = [ <nl> + " src / FbgemmFP16UKernelsAvx2 . h " , <nl> + " src / MaskAvx2 . h " , <nl> + " src / OptimizedKernelsAvx2 . h " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_avx512 " , <nl> + srcs = [ <nl> + " src / FbgemmBfloat16ConvertAvx512 . cc " , <nl> + " src / FbgemmFloat16ConvertAvx512 . cc " , <nl> + " src / UtilsAvx512 . cc " , <nl> + # Inline Assembly sources <nl> + " src / FbgemmFP16UKernelsAvx512 . cc " , <nl> + " src / FbgemmFP16UKernelsAvx512_256 . cc " , <nl> + # Private headers <nl> + " src / FbgemmFP16UKernelsAvx512 . h " , <nl> + " src / FbgemmFP16Common . h " , <nl> + " src / MaskAvx2 . h " , <nl> + " src / TransposeUtils . h " , <nl> + " src / TransposeUtilsAvx2 . h " , <nl> + ] , <nl> + hdrs = [ <nl> + " src / FbgemmFP16UKernelsAvx512_256 . h " , <nl> + ] , <nl> + copts = [ <nl> + " - m64 " , <nl> + " - mfma " , <nl> + " - mavx512f " , <nl> + " - mavx512bw " , <nl> + " - mavx512dq " , <nl> + " - mavx512vl " , <nl> + " - masm = intel " , <nl> + ] , <nl> + deps = [ <nl> + " : fbgemm_headers " , <nl> + ] , <nl> + linkstatic = 1 , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_avx512_headers " , <nl> + includes = [ <nl> + " src " , <nl> + ] , <nl> + hdrs = [ <nl> + " src / FbgemmFP16UKernelsAvx512 . h " , <nl> + " src / FbgemmFP16UKernelsAvx512_256 . h " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " fbgemm_headers " , <nl> + hdrs = [ <nl> + " include / fbgemm / ConvUtils . h " , <nl> + " include / fbgemm / Fbgemm . h " , <nl> + " include / fbgemm / FbgemmBuild . h " , <nl> + " include / fbgemm / FbgemmConvert . h " , <nl> + " include / fbgemm / FbgemmEmbedding . h " , <nl> + " include / fbgemm / FbgemmFP16 . h " , <nl> + " include / fbgemm / FbgemmI64 . h " , <nl> + " include / fbgemm / FbgemmI8DepthwiseAvx2 . h " , <nl> + " include / fbgemm / FbgemmI8Spmdm . h " , <nl> + " include / fbgemm / OutputProcessing - inl . h " , <nl> + " include / fbgemm / PackingTraits - inl . h " , <nl> + " include / fbgemm / QuantUtils . h " , <nl> + " include / fbgemm / QuantUtilsAvx2 . h " , <nl> + " include / fbgemm / Types . h " , <nl> + " include / fbgemm / Utils . h " , <nl> + " include / fbgemm / UtilsAvx2 . h " , <nl> + ] , <nl> + includes = [ <nl> + " include " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 6e19fa004bb3 <nl> mmm / dev / null <nl> ppp b / third_party / foxi . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " foxi " , <nl> + srcs = [ <nl> + " foxi / onnxifi_loader . c " , <nl> + ] , <nl> + hdrs = glob ( [ <nl> + " foxi / * . h " , <nl> + ] ) , <nl> + includes = [ <nl> + " . " , <nl> + ] , <nl> + linkstatic = 1 , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . e73b97738cf8 <nl> mmm / dev / null <nl> ppp b / third_party / gloo . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + load ( " @ / / tools / rules : cu . bzl " , " cu_library " ) <nl> + load ( " @ / / third_party : substitution . bzl " , " template_rule " ) <nl> + load ( " @ / / tools / config : defs . bzl " , " if_cuda " ) <nl> + <nl> + template_rule ( <nl> + name = " gloo_config_cmake_macros " , <nl> + src = " gloo / config . h . in " , <nl> + out = " gloo / config . h " , <nl> + substitutions = { <nl> + " @ GLOO_VERSION_MAJOR @ " : " 0 " , <nl> + " @ GLOO_VERSION_MINOR @ " : " 5 " , <nl> + " @ GLOO_VERSION_PATCH @ " : " 0 " , <nl> + " cmakedefine01 GLOO_USE_CUDA " : " define GLOO_USE_CUDA 1 " , <nl> + " cmakedefine01 GLOO_USE_NCCL " : " define GLOO_USE_NCCL 0 " , <nl> + " cmakedefine01 GLOO_USE_ROCM " : " define GLOO_USE_ROCM 0 " , <nl> + " cmakedefine01 GLOO_USE_RCCL " : " define GLOO_USE_RCCL 0 " , <nl> + " cmakedefine01 GLOO_USE_REDIS " : " define GLOO_USE_REDIS 0 " , <nl> + " cmakedefine01 GLOO_USE_IBVERBS " : " define GLOO_USE_IBVERBS 0 " , <nl> + " cmakedefine01 GLOO_USE_MPI " : " define GLOO_USE_MPI 0 " , <nl> + " cmakedefine01 GLOO_USE_AVX " : " define GLOO_USE_AVX 0 " , <nl> + " cmakedefine01 GLOO_USE_LIBUV " : " define GLOO_USE_LIBUV 0 " , <nl> + " cmakedefine01 GLOO_HAVE_TRANSPORT_TCP " : " define GLOO_HAVE_TRANSPORT_TCP 1 " , <nl> + " cmakedefine01 GLOO_HAVE_TRANSPORT_IBVERBS " : " define GLOO_HAVE_TRANSPORT_IBVERBS 0 " , <nl> + " cmakedefine01 GLOO_HAVE_TRANSPORT_UV " : " define GLOO_HAVE_TRANSPORT_UV 0 " , <nl> + } , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " gloo_headers " , <nl> + hdrs = glob ( <nl> + [ <nl> + " gloo / * . h " , <nl> + " gloo / common / * . h " , <nl> + " gloo / rendezvous / * . h " , <nl> + " gloo / transport / * . h " , <nl> + " gloo / transport / tcp / * . h " , <nl> + ] , <nl> + exclude = [ <nl> + " gloo / rendezvous / redis_store . h " , <nl> + ] , <nl> + ) + [ " gloo / config . h " ] , <nl> + includes = [ <nl> + " . " , <nl> + ] , <nl> + ) <nl> + <nl> + cu_library ( <nl> + name = " gloo_cuda " , <nl> + srcs = [ <nl> + " gloo / cuda . cu . cc " , <nl> + " gloo / cuda_private . cu . cc " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : gloo_headers " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " gloo " , <nl> + srcs = glob ( <nl> + [ <nl> + " gloo / * . cc " , <nl> + " gloo / common / * . cc " , <nl> + " gloo / rendezvous / * . cc " , <nl> + " gloo / transport / * . cc " , <nl> + " gloo / transport / tcp / * . cc " , <nl> + ] , <nl> + exclude = [ <nl> + " gloo / cuda * . cc " , <nl> + " gloo / rendezvous / redis_store . cc " , <nl> + ] , <nl> + ) , <nl> + copts = [ <nl> + " - std = gnu + + 11 " , <nl> + " - std = c + + 11 " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ " : gloo_headers " ] + if_cuda ( <nl> + [ " : gloo_cuda " ] , <nl> + [ ] , <nl> + ) , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 882d5cb342a4 <nl> mmm / dev / null <nl> ppp b / third_party / ideep . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " ideep " , <nl> + hdrs = glob ( [ <nl> + " include / * * / * . hpp " , <nl> + " include / * * / * . h " , <nl> + ] ) , <nl> + defines = [ <nl> + " IDEEP_USE_MKL " , <nl> + ] , <nl> + includes = [ <nl> + " include / " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ " @ mkl_dnn / / : mkl - dnn " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . c105e73ac2f1 <nl> mmm / dev / null <nl> ppp b / third_party / miniz - 2 . 0 . 8 / BUILD . bazel <nl> <nl> + cc_library ( <nl> + name = " miniz " , <nl> + srcs = [ <nl> + " miniz . c " , <nl> + ] , <nl> + hdrs = [ <nl> + " miniz . h " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . fdb887c9cacc <nl> mmm / dev / null <nl> ppp b / third_party / mkl - dnn . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + load ( " @ / / third_party : substitution . bzl " , " template_rule " ) <nl> + <nl> + template_rule ( <nl> + name = " include_dnnl_version " , <nl> + src = " include / dnnl_version . h . in " , <nl> + out = " include / dnnl_version . h " , <nl> + substitutions = { <nl> + " @ DNNL_VERSION_MAJOR @ " : " 1 " , <nl> + " @ DNNL_VERSION_MINOR @ " : " 2 " , <nl> + " @ DNNL_VERSION_PATCH @ " : " 0 " , <nl> + " @ DNNL_VERSION_HASH @ " : " 70f8b879ea7a0c38caedb3320b7c85e8497ff50d " , <nl> + } , <nl> + ) <nl> + <nl> + template_rule ( <nl> + name = " include_dnnl_config " , <nl> + src = " include / dnnl_config . h . in " , <nl> + out = " include / dnnl_config . h " , <nl> + substitutions = { <nl> + " cmakedefine " : " define " , <nl> + " $ { DNNL_CPU_THREADING_RUNTIME } " : " OMP " , <nl> + " $ { DNNL_CPU_RUNTIME } " : " OMP " , <nl> + " $ { DNNL_GPU_RUNTIME } " : " NONE " , <nl> + } , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " mkl - dnn " , <nl> + srcs = glob ( [ <nl> + " src / common / * . cpp " , <nl> + " src / cpu / * . cpp " , <nl> + " src / cpu / binary / * . cpp " , <nl> + " src / cpu / gemm / * . cpp " , <nl> + " src / cpu / gemm / bf16 / * . cpp " , <nl> + " src / cpu / gemm / f32 / * . cpp " , <nl> + " src / cpu / gemm / s8x8s32 / * . cpp " , <nl> + " src / cpu / jit_utils / * . cpp " , <nl> + " src / cpu / jit_utils / jitprofiling / * . c " , <nl> + " src / cpu / jit_utils / linux_perf / * . cpp " , <nl> + " src / cpu / matmul / * . cpp " , <nl> + " src / cpu / resampling / * . cpp " , <nl> + " src / cpu / rnn / * . cpp " , <nl> + ] ) , <nl> + hdrs = glob ( [ <nl> + " include / * . h " , <nl> + " include / * . hpp " , <nl> + " src / * . hpp " , <nl> + " src / cpu / * * / * . hpp " , <nl> + " src / cpu / * * / * . h " , <nl> + " src / common / * . hpp " , <nl> + " src / cpu / rnn / * . hpp " , <nl> + ] ) + [ <nl> + " include / dnnl_version . h " , <nl> + " include / dnnl_config . h " , <nl> + ] , <nl> + copts = [ <nl> + " - DUSE_AVX " , <nl> + " - DUSE_AVX2 " , <nl> + " - DDNNL_DLL " , <nl> + " - DDNNL_DLL_EXPORTS " , <nl> + " - DDNNL_ENABLE_CONCURRENT_EXEC " , <nl> + " - DTH_BLAS_MKL " , <nl> + " - D__STDC_CONSTANT_MACROS " , <nl> + " - D__STDC_LIMIT_MACROS " , <nl> + " - fno - strict - overflow " , <nl> + " - fopenmp " , <nl> + ] + select ( { <nl> + " @ / / tools / config : thread_sanitizer " : [ " - DMKLDNN_THR = 0 " ] , <nl> + " / / conditions : default " : [ " - DMKLDNN_THR = 2 " ] , <nl> + } ) , <nl> + includes = [ <nl> + " include / " , <nl> + " src / " , <nl> + " src / common / " , <nl> + " src / cpu / " , <nl> + " src / cpu / xbyak / " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + linkopts = [ <nl> + " - lgomp " , <nl> + ] , <nl> + deps = [ <nl> + " @ mkl " , <nl> + ] + select ( { <nl> + " @ / / tools / config : thread_sanitizer " : [ ] , <nl> + " / / conditions : default " : [ " @ tbb " ] , <nl> + } ) , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . bc868b24e83c <nl> mmm / dev / null <nl> ppp b / third_party / mkl . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " mkl " , <nl> + srcs = [ <nl> + " libmkl_avx2 . so " , <nl> + " libmkl_core . so " , <nl> + " libmkl_def . so " , <nl> + " libmkl_intel_lp64 . so " , <nl> + " libmkl_rt . so " , <nl> + " libmkl_sequential . so " , <nl> + " libmkl_vml_avx2 . so " , <nl> + " libmkl_vml_avx512 . so " , <nl> + " libmkl_vml_def . so " , <nl> + ] + select ( { <nl> + " @ / / tools / config : thread_sanitizer " : [ ] , <nl> + " / / conditions : default " : [ " libmkl_tbb_thread . so " ] , <nl> + } ) , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ " @ mkl_headers " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 965801c91aa9 <nl> mmm / dev / null <nl> ppp b / third_party / mkl_headers . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + <nl> + cc_library ( <nl> + name = " mkl_headers " , <nl> + hdrs = glob ( [ " include / * . h " ] ) , <nl> + includes = [ " include / " ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . b8809deb4fea <nl> mmm / dev / null <nl> ppp b / third_party / onnx . BUILD <nl> <nl> + load ( " @ rules_proto / / proto : defs . bzl " , " proto_library " ) <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " , " cc_proto_library " ) <nl> + load ( " @ rules_python / / python : defs . bzl " , " py_binary " ) <nl> + <nl> + py_binary ( <nl> + name = " gen_proto " , <nl> + srcs = [ " onnx / gen_proto . py " ] , <nl> + data = [ <nl> + " onnx / onnx . in . proto " , <nl> + " onnx / onnx - operators . in . proto " , <nl> + ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " generate_onnx_proto " , <nl> + outs = [ <nl> + " onnx / onnx_onnx_torch - ml . proto " , <nl> + " onnx / onnx - ml . pb . h " , <nl> + ] , <nl> + cmd = " $ ( location : gen_proto ) - p onnx_torch - o $ ( @ D ) / onnx onnx - m > / dev / null & & sed - i ' s / onnx_onnx_torch - ml . pb . h / onnx \ \ / onnx_onnx_torch - ml . pb . h / g ' $ ( @ D ) / onnx / onnx - ml . pb . h " , <nl> + tools = [ " : gen_proto " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " generate_onnx_operators_proto " , <nl> + outs = [ <nl> + " onnx / onnx - operators_onnx_torch - ml . proto " , <nl> + " onnx / onnx - operators - ml . pb . h " , <nl> + ] , <nl> + cmd = " $ ( location : gen_proto ) - p onnx_torch - o $ ( @ D ) / onnx onnx - operators - m > / dev / null & & sed - i ' s / onnx - operators_onnx_torch - ml . pb . h / onnx \ \ / onnx - operators_onnx_torch - ml . pb . h / g ' $ ( @ D ) / onnx / onnx - operators - ml . pb . h " , <nl> + tools = [ " : gen_proto " ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " onnx " , <nl> + srcs = glob ( <nl> + [ <nl> + " onnx / * . cc " , <nl> + " onnx / common / * . cc " , <nl> + " onnx / defs / * . cc " , <nl> + " onnx / defs / controlflow / * . cc " , <nl> + " onnx / defs / experiments / * . cc " , <nl> + " onnx / defs / generator / * . cc " , <nl> + " onnx / defs / logical / * . cc " , <nl> + " onnx / defs / math / * . cc " , <nl> + " onnx / defs / nn / * . cc " , <nl> + " onnx / defs / object_detection / * . cc " , <nl> + " onnx / defs / quantization / * . cc " , <nl> + " onnx / defs / reduction / * . cc " , <nl> + " onnx / defs / rnn / * . cc " , <nl> + " onnx / defs / sequence / * . cc " , <nl> + " onnx / defs / tensor / * . cc " , <nl> + " onnx / defs / traditionalml / * . cc " , <nl> + " onnx / defs / traditionalml / * . cc " , <nl> + " onnx / optimizer / * . cc " , <nl> + " onnx / shape_inference / * . cc " , <nl> + " onnx / version_converter / * . cc " , <nl> + ] , <nl> + exclude = [ <nl> + " onnx / cpp2py_export . cc " , <nl> + ] , <nl> + ) , <nl> + hdrs = glob ( [ <nl> + " onnx / * . h " , <nl> + " onnx / version_converter / * . h " , <nl> + " onnx / common / * . h " , <nl> + " onnx / defs / * . h " , <nl> + " onnx / defs / tensor / * . h " , <nl> + " onnx / shape_inference / * . h " , <nl> + " onnx / optimizer / * . h " , <nl> + " onnx / optimizer / passes / * . h " , <nl> + " onnx / version_converter / adapters / * . h " , <nl> + ] ) + [ <nl> + " onnx / onnx - ml . pb . h " , <nl> + " onnx / onnx - operators - ml . pb . h " , <nl> + ] , <nl> + defines = [ <nl> + " ONNX_ML = 1 " , <nl> + " ONNX_NAMESPACE = onnx_torch " , <nl> + ] , <nl> + includes = [ <nl> + " . " , <nl> + " onnx / " , <nl> + ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : onnx_proto_lib " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " onnx_proto_headers " , <nl> + hdrs = glob ( [ <nl> + " onnx / * _pb . h " , <nl> + ] ) , <nl> + visibility = [ " / / visibility : public " ] , <nl> + deps = [ <nl> + " : onnx_proto_lib " , <nl> + ] , <nl> + ) <nl> + <nl> + proto_library ( <nl> + name = " onnx_proto " , <nl> + srcs = [ <nl> + " onnx / onnx - operators_onnx_torch - ml . proto " , <nl> + " onnx / onnx_onnx_torch - ml . proto " , <nl> + ] , <nl> + ) <nl> + <nl> + cc_proto_library ( <nl> + name = " onnx_proto_lib " , <nl> + deps = [ " : onnx_proto " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 6b7349e6696c <nl> mmm / dev / null <nl> ppp b / third_party / sleef . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_binary " , " cc_library " ) <nl> + load ( " @ / / third_party : sleef . bzl " , " sleef_cc_library " ) <nl> + <nl> + SLEEF_COPTS = [ <nl> + " - DHAVE_MALLOC_USABLE_SIZE = 1 " , <nl> + " - DHAVE_MMAP = 1 " , <nl> + " - DHAVE_SHM_OPEN = 1 " , <nl> + " - DHAVE_SHM_UNLINK = 1 " , <nl> + " - DIDEEP_USE_MKL " , <nl> + " - DMKLDNN_THR = MKLDNN_THR_TBB " , <nl> + " - DONNX_ML = 1 " , <nl> + " - DONNX_NAMESPACE = onnx " , <nl> + " - DTH_BLAS_MKL " , <nl> + " - D_FILE_OFFSET_BITS = 64 " , <nl> + " - ffp - contract = off " , <nl> + " - fno - math - errno " , <nl> + " - fno - trapping - math " , <nl> + " - DCAFFE2_USE_GLOO " , <nl> + " - DCUDA_HAS_FP16 = 1 " , <nl> + " - DHAVE_GCC_GET_CPUID " , <nl> + " - DUSE_AVX " , <nl> + " - DUSE_AVX2 " , <nl> + " - DTH_HAVE_THREAD " , <nl> + " - std = gnu99 " , <nl> + ] <nl> + <nl> + SLEEF_COMMON_TARGET_COPTS = [ <nl> + " - DSLEEF_STATIC_LIBS = 1 " , <nl> + " - DENABLE_ALIAS = 1 " , <nl> + ] <nl> + <nl> + SLEEF_PRIVATE_HEADERS = glob ( [ <nl> + " build / include / * . h " , <nl> + " src / arch / * . h " , <nl> + " src / common / * . h " , <nl> + " src / libm / * . h " , <nl> + " src / libm / include / * . h " , <nl> + ] ) <nl> + <nl> + SLEEF_PUBLIC_HEADERS = [ <nl> + " : sleef_h " , <nl> + ] <nl> + <nl> + SLEEF_PRIVATE_INCLUDES = [ <nl> + " - Iexternal / sleef / src / arch " , <nl> + " - Iexternal / sleef / src / common " , <nl> + ] <nl> + <nl> + SLEEF_PUBLIC_INCLUDES = [ <nl> + " build / include " , <nl> + ] <nl> + <nl> + SLEEF_VISIBILITY = [ <nl> + " @ pytorch / / : __subpackages__ " , <nl> + ] <nl> + <nl> + cc_binary ( <nl> + name = " mkalias " , <nl> + srcs = [ <nl> + " src / libm / funcproto . h " , <nl> + " src / libm / mkalias . c " , <nl> + ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " alias_avx512f_h " , <nl> + outs = [ " alias_avx512f . h " ] , <nl> + cmd = " { " + " ; " . join ( [ <nl> + " $ ( location : mkalias ) - 16 __m512 __m512i e avx512f " , <nl> + " $ ( location : mkalias ) 8 __m512d __m256i e avx512f " , <nl> + ] ) + " ; } > $ @ " , <nl> + tools = [ " : mkalias " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " mkdisp " , <nl> + srcs = [ <nl> + " src / libm / funcproto . h " , <nl> + " src / libm / mkdisp . c " , <nl> + ] , <nl> + copts = SLEEF_COPTS , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " dispavx_c " , <nl> + srcs = [ " src / libm / dispavx . c . org " ] , <nl> + outs = [ " dispavx . c " ] , <nl> + cmd = " { cat $ ( location src / libm / dispavx . c . org ) ; $ ( location : mkdisp ) 4 8 __m256d __m256 __m128i avx fma4 avx2 ; } > $ @ " , <nl> + tools = [ " : mkdisp " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " dispsse_c " , <nl> + srcs = [ " src / libm / dispsse . c . org " ] , <nl> + outs = [ " dispsse . c " ] , <nl> + cmd = " { cat $ ( location src / libm / dispsse . c . org ) ; $ ( location : mkdisp ) 2 4 __m128d __m128 __m128i sse2 sse4 avx2128 ; } > $ @ " , <nl> + tools = [ " : mkdisp " ] , <nl> + ) <nl> + <nl> + cc_binary ( <nl> + name = " mkrename " , <nl> + srcs = [ <nl> + " src / libm / funcproto . h " , <nl> + " src / libm / mkrename . c " , <nl> + ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renameavx_h " , <nl> + outs = [ " renameavx . h " ] , <nl> + cmd = " $ ( location : mkrename ) cinz_ 4 8 avx > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renameavx2_h " , <nl> + outs = [ " renameavx2 . h " ] , <nl> + cmd = " $ ( location : mkrename ) finz_ 4 8 avx2 > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renameavx2128_h " , <nl> + outs = [ " renameavx2128 . h " ] , <nl> + cmd = " $ ( location : mkrename ) finz_ 2 4 avx2128 > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renameavx512f_h " , <nl> + outs = [ " renameavx512f . h " ] , <nl> + cmd = " $ ( location : mkrename ) finz_ 8 16 avx512f > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renameavx512fnofma_h " , <nl> + outs = [ " renameavx512fnofma . h " ] , <nl> + cmd = " $ ( location : mkrename ) cinz_ 8 16 avx512fnofma > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renamefma4_h " , <nl> + outs = [ " renamefma4 . h " ] , <nl> + cmd = " $ ( location : mkrename ) finz_ 4 8 fma4 > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renamepurec_scalar_h " , <nl> + outs = [ " renamepurec_scalar . h " ] , <nl> + cmd = " $ ( location : mkrename ) cinz_ 1 1 purec > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renamepurecfma_scalar_h " , <nl> + outs = [ " renamepurecfma_scalar . h " ] , <nl> + cmd = " $ ( location : mkrename ) finz_ 1 1 purecfma > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renamesse2_h " , <nl> + outs = [ " renamesse2 . h " ] , <nl> + cmd = " $ ( location : mkrename ) cinz_ 2 4 sse2 > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " renamesse4_h " , <nl> + outs = [ " renamesse4 . h " ] , <nl> + cmd = " $ ( location : mkrename ) cinz_ 2 4 sse4 > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + genrule ( <nl> + name = " sleef_h " , <nl> + srcs = [ <nl> + " src / libm / sleeflibm_header . h . org " , <nl> + " src / libm / sleeflibm_footer . h . org " , <nl> + ] , <nl> + outs = [ " build / include / sleef . h " ] , <nl> + cmd = " { " + " ; " . join ( [ <nl> + " cat $ ( location src / libm / sleeflibm_header . h . org ) " , <nl> + " $ ( location : mkrename ) cinz_ 2 4 __m128d __m128 __m128i __m128i __SSE2__ " , <nl> + " $ ( location : mkrename ) cinz_ 2 4 __m128d __m128 __m128i __m128i __SSE2__ sse2 " , <nl> + " $ ( location : mkrename ) cinz_ 2 4 __m128d __m128 __m128i __m128i __SSE2__ sse4 " , <nl> + " $ ( location : mkrename ) cinz_ 4 8 __m256d __m256 __m128i \ " struct { __m128i x , y ; } \ " __AVX__ " , <nl> + " $ ( location : mkrename ) cinz_ 4 8 __m256d __m256 __m128i \ " struct { __m128i x , y ; } \ " __AVX__ avx " , <nl> + " $ ( location : mkrename ) finz_ 4 8 __m256d __m256 __m128i \ " struct { __m128i x , y ; } \ " __AVX__ fma4 " , <nl> + " $ ( location : mkrename ) finz_ 4 8 __m256d __m256 __m128i __m256i __AVX__ avx2 " , <nl> + " $ ( location : mkrename ) finz_ 2 4 __m128d __m128 __m128i __m128i __SSE2__ avx2128 " , <nl> + " $ ( location : mkrename ) finz_ 8 16 __m512d __m512 __m256i __m512i __AVX512F__ " , <nl> + " $ ( location : mkrename ) finz_ 8 16 __m512d __m512 __m256i __m512i __AVX512F__ avx512f " , <nl> + " $ ( location : mkrename ) cinz_ 8 16 __m512d __m512 __m256i __m512i __AVX512F__ avx512fnofma " , <nl> + " $ ( location : mkrename ) cinz_ 1 1 double float int32_t int32_t __STDC__ purec " , <nl> + " $ ( location : mkrename ) finz_ 1 1 double float int32_t int32_t FP_FAST_FMA purecfma " , <nl> + " cat $ ( location src / libm / sleeflibm_footer . h . org ) " , <nl> + ] ) + " ; } > $ @ " , <nl> + tools = [ " : mkrename " ] , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " sleef " , <nl> + srcs = [ <nl> + " src / libm / rempitab . c " , <nl> + " src / libm / sleefdp . c " , <nl> + " src / libm / sleefld . c " , <nl> + " src / libm / sleefqp . c " , <nl> + " src / libm / sleefsp . c " , <nl> + ] , <nl> + hdrs = SLEEF_PUBLIC_HEADERS , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLEFLOAT128 = 1 " , <nl> + " - Wno - unused - result " , <nl> + ] , <nl> + includes = SLEEF_PUBLIC_INCLUDES , <nl> + # - lgcc resolves <nl> + # U __addtf3 <nl> + # U __eqtf2 <nl> + # U __fixtfdi <nl> + # U __floatditf <nl> + # U __gttf2 <nl> + # U __lttf2 <nl> + # U __multf3 <nl> + # U __subtf3 <nl> + # in bazel - bin / external / sleef / _objs / sleef / sleefqp . pic . o <nl> + linkopts = [ <nl> + " - lgcc " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + # The purpose of the lists in deps is to keep related pairs of <nl> + # libraries together . In particular , each pair that contains a * det * <nl> + # library originates with a sleef_cc_library ( ) . <nl> + deps = [ <nl> + " : common " , <nl> + " : dispavx " , <nl> + " : dispsse " , <nl> + ] + [ <nl> + " : sleefavx " , <nl> + " : sleefdetavx " , <nl> + ] + [ <nl> + " : sleefavx2 " , <nl> + " : sleefdetavx2 " , <nl> + ] + [ <nl> + " : sleefavx2128 " , <nl> + " : sleefdetavx2128 " , <nl> + ] + [ <nl> + " : sleefavx512f " , <nl> + " : sleefdetavx512f " , <nl> + ] + [ <nl> + " : sleefavx512fnofma " , <nl> + " : sleefdetavx512fnofma " , <nl> + ] + [ <nl> + " : sleeffma4 " , <nl> + " : sleefdetfma4 " , <nl> + ] + [ <nl> + " : sleefsse2 " , <nl> + " : sleefdetsse2 " , <nl> + ] + [ <nl> + " : sleefsse4 " , <nl> + " : sleefdetsse4 " , <nl> + ] + [ <nl> + " : sleefpurec_scalar " , <nl> + " : sleefdetpurec_scalar " , <nl> + ] + [ <nl> + " : sleefpurecfma_scalar " , <nl> + " : sleefdetpurecfma_scalar " , <nl> + ] , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " common " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / common / common . c " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + [ <nl> + " - Wno - unused - result " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " dispavx " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + SLEEF_PUBLIC_HEADERS + [ <nl> + " : dispavx_c " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DENABLE_AVX2 = 1 " , <nl> + " - DENABLE_FMA4 = 1 " , <nl> + " - mavx " , <nl> + ] , <nl> + includes = SLEEF_PUBLIC_INCLUDES , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " dispsse " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + SLEEF_PUBLIC_HEADERS + [ <nl> + " : dispsse_c " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DENABLE_AVX2 = 1 " , <nl> + " - DENABLE_FMA4 = 1 " , <nl> + " - msse2 " , <nl> + ] , <nl> + includes = SLEEF_PUBLIC_INCLUDES , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefavx512f " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : alias_avx512f_h " , <nl> + " : renameavx512f_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DALIAS_NO_EXT_SUFFIX = \ \ \ " alias_avx512f . h \ \ \ " " , <nl> + " - DENABLE_AVX512F = 1 " , <nl> + " - mavx512f " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefavx512fnofma " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renameavx512fnofma_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_AVX512FNOFMA = 1 " , <nl> + " - mavx512f " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefavx " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renameavx_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_AVX = 1 " , <nl> + " - mavx " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefavx2 " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renameavx2_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_AVX2 = 1 " , <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefavx2128 " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renameavx2128_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_AVX2128 = 1 " , <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleeffma4 " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renamefma4_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_FMA4 = 1 " , <nl> + " - mfma4 " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefsse2 " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renamesse2_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_SSE2 = 1 " , <nl> + " - msse2 " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefsse4 " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renamesse4_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_SSE4 = 1 " , <nl> + " - msse4 . 1 " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefpurec_scalar " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renamepurec_scalar_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_PUREC_SCALAR = 1 " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> + <nl> + sleef_cc_library ( <nl> + name = " sleefpurecfma_scalar " , <nl> + srcs = SLEEF_PRIVATE_HEADERS + [ <nl> + " src / libm / sleefsimddp . c " , <nl> + " src / libm / sleefsimdsp . c " , <nl> + " : renamepurecfma_scalar_h " , <nl> + ] , <nl> + copts = SLEEF_PRIVATE_INCLUDES + SLEEF_COPTS + SLEEF_COMMON_TARGET_COPTS + [ <nl> + " - DDORENAME = 1 " , <nl> + " - DENABLE_PURECFMA_SCALAR = 1 " , <nl> + " - mavx2 " , <nl> + " - mfma " , <nl> + ] , <nl> + linkstatic = True , <nl> + visibility = SLEEF_VISIBILITY , <nl> + alwayslink = True , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . b7e18c92e5e4 <nl> mmm / dev / null <nl> ppp b / third_party / tbb . BUILD <nl> <nl> + load ( " @ rules_cc / / cc : defs . bzl " , " cc_library " ) <nl> + load ( " @ / / third_party : substitution . bzl " , " template_rule " ) <nl> + <nl> + licenses ( [ " notice " ] ) # Apache 2 . 0 <nl> + <nl> + template_rule ( <nl> + name = " version_string " , <nl> + src = " @ / / : aten / src / ATen / cpu / tbb / extra / version_string . ver . in " , <nl> + out = " version_string . h " , <nl> + substitutions = { <nl> + " @ CMAKE_SYSTEM_NAME @ " : " Unknown " , <nl> + " @ CMAKE_SYSTEM @ " : " Unknown " , <nl> + " @ CMAKE_SYSTEM_VERSION @ " : " Unknown " , <nl> + " @ CMAKE_CXX_COMPILER_ID @ " : " Unknown " , <nl> + " @ _configure_date @ " : " Unknown " , <nl> + } <nl> + ) <nl> + <nl> + cc_library ( <nl> + name = " tbb " , <nl> + srcs = [ " : version_string " ] + glob ( <nl> + [ <nl> + " src / old / * . h " , <nl> + " src / rml / client / * . h " , <nl> + " src / rml / include / * . h " , <nl> + " src / rml / server / * . h " , <nl> + " src / tbb / * . h " , <nl> + " src / tbb / tools_api / * . h " , <nl> + " src / tbb / tools_api / legacy / * . h " , <nl> + " src / old / * . cpp " , <nl> + " src / tbb / * . cpp " , <nl> + ] , <nl> + exclude = [ " src / old / test_ * . cpp " ] , <nl> + ) + [ " src / rml / client / rml_tbb . cpp " ] , <nl> + hdrs = glob ( <nl> + [ <nl> + " include / tbb / * " , <nl> + " include / tbb / compat / * " , <nl> + " include / tbb / internal / * " , <nl> + " include / tbb / machine / * " , <nl> + ] , <nl> + exclude = [ " include / tbb / scalable_allocator . h " ] , <nl> + ) , <nl> + copts = [ <nl> + " - Iexternal / tbb / src / rml / include " , <nl> + " - Iexternal / tbb / src " , <nl> + " - pthread " , <nl> + " - DDO_ITT_NOTIFY = 1 " , <nl> + " - DUSE_PTHREAD = 1 " , <nl> + " - D__TBB_BUILD = 1 " , <nl> + " - D__TBB_DYNAMIC_LOAD_ENABLED = 0 " , <nl> + " - D__TBB_SOURCE_DIRECTLY_INCLUDED = 1 " , <nl> + " - fno - sanitize = vptr " , <nl> + " - fno - sanitize = thread " , <nl> + ] , <nl> + defines = [ <nl> + # TBB Cannot detect the standard library version when using clang with libstdc + + . <nl> + # See https : / / github . com / 01org / tbb / issues / 22 <nl> + " TBB_USE_GLIBCXX_VERSION = ( _GLIBCXX_RELEASE * 10000 ) " , <nl> + " TBB_PREVIEW_GLOBAL_CONTROL = 1 " , <nl> + " TBB_PREVIEW_LOCAL_OBSERVER = 1 " , <nl> + " __TBB_ALLOW_MUTABLE_FUNCTORS = 1 " , <nl> + ] , <nl> + includes = [ <nl> + " include " , <nl> + " src / tbb / tools_api " , <nl> + ] , <nl> + linkopts = [ <nl> + " - ldl " , <nl> + " - lpthread " , <nl> + " - lrt " , <nl> + ] , <nl> + textual_hdrs = [ " src / tbb / tools_api / ittnotify_static . c " ] , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . 4a1f6845b774 <nl> mmm / dev / null <nl> ppp b / third_party / tbb . patch <nl> <nl> pppmmm a / src / rml / server / rml_server . cpp <nl> ppp + b / src / rml / server / rml_server . cpp <nl> + extern " C " void __KMP_call_with_my_server_info ( : : rml : : server_info_callback_t cb <nl> + / * <nl> + * RML server info <nl> + * / <nl> + - # include " version_string . ver " <nl> + + # include " version_string . h " <nl> + <nl> + # ifndef __TBB_VERSION_STRINGS <nl> + - # pragma message ( " Warning : version_string . ver isn ' t generated properly by version_info . sh script ! " ) <nl> + + # pragma message ( " Warning : version_string . h isn ' t generated properly by version_info . sh script ! " ) <nl> + # endif <nl> + <nl> + / / We use the build time as the RML server info . TBB is required to build RML , so we make it the same as the TBB build time . <nl> pppmmm a / src / tbb / tbb_version . h <nl> ppp + b / src / tbb / tbb_version . h <nl> + <nl> + # ifndef ENDL <nl> + # define ENDL " \ n " <nl> + # endif <nl> + - # include " version_string . ver " <nl> + + # include " version_string . h " <nl> + <nl> + # ifndef __TBB_VERSION_STRINGS <nl> + - # pragma message ( " Warning : version_string . ver isn ' t generated properly by version_info . sh script ! " ) <nl> + + # pragma message ( " Warning : version_string . h isn ' t generated properly by version_info . sh script ! " ) <nl> + / / here is an example of macros value : <nl> + # define __TBB_VERSION_STRINGS \ <nl> + " TBB : BUILD_HOST \ tUnknown \ n " \ <nl> new file mode 100644 <nl> index 000000000000 . . a8f9d0452fce <nl> mmm / dev / null <nl> ppp b / tools / config / BUILD <nl> <nl> + load ( " @ bazel_skylib / / lib : selects . bzl " , " selects " ) <nl> + <nl> + config_setting ( <nl> + name = " cuda " , <nl> + define_values = { <nl> + " cuda " : " true " , <nl> + } , <nl> + ) <nl> + <nl> + # Even when building with - - config = cuda , host targets should be built with cuda disabled <nl> + # as these targets will run on CI machines that have no GPUs . <nl> + selects . config_setting_group ( <nl> + name = " cuda_enabled_and_capable " , <nl> + match_all = [ <nl> + " : cuda " , <nl> + " / / tools / toolchain : is_cuda_capable " , <nl> + ] , <nl> + ) <nl> + <nl> + # Configures the system to build with cuda using clang . <nl> + config_setting ( <nl> + name = " cuda_clang " , <nl> + define_values = { <nl> + " cuda_clang " : " true " , <nl> + } , <nl> + ) <nl> + <nl> + # Indicates that cuda code should be compiled with nvcc <nl> + # Mostly exists to support _analysis_ of tensorflow ; more work is needed to actually make this <nl> + # setting work . <nl> + config_setting ( <nl> + name = " cuda_nvcc " , <nl> + define_values = { <nl> + " cuda_nvcc " : " true " , <nl> + } , <nl> + ) <nl> + <nl> + config_setting ( <nl> + name = " thread_sanitizer " , <nl> + define_values = { " thread_sanitizer " : " 1 " } , <nl> + visibility = [ " / / visibility : public " ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . e69de29bb2d1 <nl>
|
Bazel build of pytorch ( )
|
pytorch/pytorch
|
585f153d0076287f202200ac20c89892459b6f7d
|
2020-04-04T00:13:58Z
|
mmm a / include / swift / Remote / FailureKinds . def <nl> ppp b / include / swift / Remote / FailureKinds . def <nl> FAILURE ( Unknown , " an unknown failure occurred " , ( ) ) <nl> FAILURE ( Memory , " an unknown failure occurred while reading % 0 at address % 1 " , <nl> ( String , Address ) ) <nl> <nl> - # undef FAILURE <nl> \ No newline at end of file <nl> + # undef FAILURE <nl>
|
Add trailing newline to file .
|
apple/swift
|
0be841ebb7a9520cc18d851815647c130309985d
|
2016-04-20T20:53:49Z
|
mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> template < class T > class Handle { <nl> / * * <nl> * Creates an empty handle . <nl> * / <nl> - V8_INLINE Handle ( ) : val_ ( 0 ) { } <nl> + V8_INLINE ( Handle ( ) ) : val_ ( 0 ) { } <nl> <nl> / * * <nl> * Creates a handle for the contents of the specified handle . This <nl> template < class T > class Handle { <nl> * Handle < String > to a variable declared as Handle < Value > , is legal <nl> * because String is a subclass of Value . <nl> * / <nl> - template < class S > V8_INLINE Handle ( Handle < S > that ) <nl> + template < class S > V8_INLINE ( Handle ( Handle < S > that ) ) <nl> : val_ ( reinterpret_cast < T * > ( * that ) ) { <nl> / * * <nl> * This check fails when trying to convert between incompatible <nl> template < class T > class Handle { <nl> / * * <nl> * Returns true if the handle is empty . <nl> * / <nl> - V8_INLINE bool IsEmpty ( ) const { return val_ = = 0 ; } <nl> + V8_INLINE ( bool IsEmpty ( ) const ) { return val_ = = 0 ; } <nl> <nl> / * * <nl> * Sets the handle to be empty . IsEmpty ( ) will then return true . <nl> * / <nl> - V8_INLINE void Clear ( ) { val_ = 0 ; } <nl> + V8_INLINE ( void Clear ( ) ) { val_ = 0 ; } <nl> <nl> - V8_INLINE T * operator - > ( ) const { return val_ ; } <nl> + V8_INLINE ( T * operator - > ( ) const ) { return val_ ; } <nl> <nl> - V8_INLINE T * operator * ( ) const { return val_ ; } <nl> + V8_INLINE ( T * operator * ( ) const ) { return val_ ; } <nl> <nl> / * * <nl> * Checks whether two handles are the same . <nl> template < class T > class Handle { <nl> * to which they refer are identical . <nl> * The handles ' references are not checked . <nl> * / <nl> - template < class S > V8_INLINE bool operator = = ( const Handle < S > & that ) const { <nl> + template < class S > V8_INLINE ( bool operator = = ( const Handle < S > & that ) const ) { <nl> internal : : Object * * a = reinterpret_cast < internal : : Object * * > ( * * this ) ; <nl> internal : : Object * * b = reinterpret_cast < internal : : Object * * > ( * that ) ; <nl> if ( a = = 0 ) return b = = 0 ; <nl> template < class T > class Handle { <nl> return * a = = * b ; <nl> } <nl> <nl> - template < class S > V8_INLINE bool operator = = ( <nl> - const Persistent < S > & that ) const { <nl> + template < class S > V8_INLINE ( <nl> + bool operator = = ( const Persistent < S > & that ) const ) { <nl> internal : : Object * * a = reinterpret_cast < internal : : Object * * > ( * * this ) ; <nl> internal : : Object * * b = reinterpret_cast < internal : : Object * * > ( * that ) ; <nl> if ( a = = 0 ) return b = = 0 ; <nl> template < class T > class Handle { <nl> * the objects to which they refer are different . <nl> * The handles ' references are not checked . <nl> * / <nl> - template < class S > V8_INLINE bool operator ! = ( const Handle < S > & that ) const { <nl> + template < class S > V8_INLINE ( bool operator ! = ( const Handle < S > & that ) const ) { <nl> return ! operator = = ( that ) ; <nl> } <nl> <nl> - template < class S > V8_INLINE bool operator ! = ( <nl> - const Persistent < S > & that ) const { <nl> + template < class S > V8_INLINE ( <nl> + bool operator ! = ( const Persistent < S > & that ) const ) { <nl> return ! operator = = ( that ) ; <nl> } <nl> <nl> - template < class S > V8_INLINE static Handle < T > Cast ( Handle < S > that ) { <nl> + template < class S > V8_INLINE ( static Handle < T > Cast ( Handle < S > that ) ) { <nl> # ifdef V8_ENABLE_CHECKS <nl> / / If we ' re going to perform the type check then we have to check <nl> / / that the handle isn ' t empty before doing the checked cast . <nl> template < class T > class Handle { <nl> return Handle < T > ( T : : Cast ( * that ) ) ; <nl> } <nl> <nl> - template < class S > V8_INLINE Handle < S > As ( ) { <nl> + template < class S > V8_INLINE ( Handle < S > As ( ) ) { <nl> return Handle < S > : : Cast ( * this ) ; <nl> } <nl> <nl> - V8_INLINE static Handle < T > New ( Isolate * isolate , Handle < T > that ) { <nl> + V8_INLINE ( static Handle < T > New ( Isolate * isolate , Handle < T > that ) ) { <nl> return New ( isolate , that . val_ ) ; <nl> } <nl> - V8_INLINE static Handle < T > New ( Isolate * isolate , const Persistent < T > & that ) { <nl> + V8_INLINE ( static Handle < T > New ( Isolate * isolate , const Persistent < T > & that ) ) { <nl> return New ( isolate , that . val_ ) ; <nl> } <nl> <nl> template < class T > class Handle { <nl> / * * <nl> * Creates a new handle for the specified value . <nl> * / <nl> - V8_INLINE explicit Handle ( T * val ) : val_ ( val ) { } <nl> + V8_INLINE ( explicit Handle ( T * val ) ) : val_ ( val ) { } <nl> <nl> private : <nl> friend class Utils ; <nl> template < class T > class Handle { <nl> friend class Context ; <nl> friend class HandleScope ; <nl> <nl> - V8_INLINE static Handle < T > New ( Isolate * isolate , T * that ) ; <nl> + V8_INLINE ( static Handle < T > New ( Isolate * isolate , T * that ) ) ; <nl> <nl> T * val_ ; <nl> } ; <nl> template < class T > class Handle { <nl> * / <nl> template < class T > class Local : public Handle < T > { <nl> public : <nl> - V8_INLINE Local ( ) ; <nl> - template < class S > V8_INLINE Local ( Local < S > that ) <nl> + V8_INLINE ( Local ( ) ) ; <nl> + template < class S > V8_INLINE ( Local ( Local < S > that ) ) <nl> : Handle < T > ( reinterpret_cast < T * > ( * that ) ) { <nl> / * * <nl> * This check fails when trying to convert between incompatible <nl> template < class T > class Local : public Handle < T > { <nl> } <nl> <nl> <nl> - template < class S > V8_INLINE static Local < T > Cast ( Local < S > that ) { <nl> + template < class S > V8_INLINE ( static Local < T > Cast ( Local < S > that ) ) { <nl> # ifdef V8_ENABLE_CHECKS <nl> / / If we ' re going to perform the type check then we have to check <nl> / / that the handle isn ' t empty before doing the checked cast . <nl> template < class T > class Local : public Handle < T > { <nl> # endif <nl> return Local < T > ( T : : Cast ( * that ) ) ; <nl> } <nl> - template < class S > V8_INLINE Local ( Handle < S > that ) <nl> + template < class S > V8_INLINE ( Local ( Handle < S > that ) ) <nl> : Handle < T > ( reinterpret_cast < T * > ( * that ) ) { <nl> TYPE_CHECK ( T , S ) ; <nl> } <nl> <nl> - template < class S > V8_INLINE Local < S > As ( ) { <nl> + template < class S > V8_INLINE ( Local < S > As ( ) ) { <nl> return Local < S > : : Cast ( * this ) ; <nl> } <nl> <nl> template < class T > class Local : public Handle < T > { <nl> * The referee is kept alive by the local handle even when <nl> * the original handle is destroyed / disposed . <nl> * / <nl> - V8_INLINE static Local < T > New ( Handle < T > that ) ; <nl> - V8_INLINE static Local < T > New ( Isolate * isolate , Handle < T > that ) ; <nl> + V8_INLINE ( static Local < T > New ( Handle < T > that ) ) ; <nl> + V8_INLINE ( static Local < T > New ( Isolate * isolate , Handle < T > that ) ) ; <nl> template < class M > <nl> - V8_INLINE static Local < T > New ( Isolate * isolate , <nl> - const Persistent < T , M > & that ) ; <nl> + V8_INLINE ( static Local < T > New ( Isolate * isolate , <nl> + const Persistent < T , M > & that ) ) ; <nl> <nl> # ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR <nl> <nl> private : <nl> # endif <nl> - template < class S > V8_INLINE Local ( S * that ) : Handle < T > ( that ) { } <nl> + template < class S > V8_INLINE ( Local ( S * that ) : Handle < T > ( that ) ) { } <nl> <nl> private : <nl> friend class Utils ; <nl> template < class T > class Local : public Handle < T > { <nl> template < class F > friend class internal : : CustomArguments ; <nl> friend class HandleScope ; <nl> <nl> - V8_INLINE static Local < T > New ( Isolate * isolate , T * that ) ; <nl> + V8_INLINE ( static Local < T > New ( Isolate * isolate , T * that ) ) ; <nl> } ; <nl> <nl> <nl> / / Eternal handles are set - once handles that live for the life of the isolate . <nl> template < class T > class Eternal { <nl> public : <nl> - V8_INLINE Eternal ( ) : index_ ( kInitialValue ) { } <nl> + V8_INLINE ( Eternal ( ) ) : index_ ( kInitialValue ) { } <nl> template < class S > <nl> - V8_INLINE Eternal ( Isolate * isolate , Local < S > handle ) : index_ ( kInitialValue ) { <nl> + V8_INLINE ( Eternal ( Isolate * isolate , Local < S > handle ) ) <nl> + : index_ ( kInitialValue ) { <nl> Set ( isolate , handle ) ; <nl> } <nl> / / Can only be safely called if already set . <nl> - V8_INLINE Local < T > Get ( Isolate * isolate ) ; <nl> - V8_INLINE bool IsEmpty ( ) { return index_ = = kInitialValue ; } <nl> - template < class S > V8_INLINE void Set ( Isolate * isolate , Local < S > handle ) ; <nl> + V8_INLINE ( Local < T > Get ( Isolate * isolate ) ) ; <nl> + V8_INLINE ( bool IsEmpty ( ) ) { return index_ = = kInitialValue ; } <nl> + template < class S > <nl> + V8_INLINE ( void Set ( Isolate * isolate , Local < S > handle ) ) ; <nl> <nl> private : <nl> static const int kInitialValue = - 1 ; <nl> class WeakCallbackData { <nl> public : <nl> typedef void ( * Callback ) ( const WeakCallbackData < T , P > & data ) ; <nl> <nl> - V8_INLINE Isolate * GetIsolate ( ) const { return isolate_ ; } <nl> - V8_INLINE Local < T > GetValue ( ) const { return handle_ ; } <nl> - V8_INLINE P * GetParameter ( ) const { return parameter_ ; } <nl> + V8_INLINE ( Isolate * GetIsolate ( ) ) const { return isolate_ ; } <nl> + V8_INLINE ( Local < T > GetValue ( ) ) const { return handle_ ; } <nl> + V8_INLINE ( P * GetParameter ( ) ) const { return parameter_ ; } <nl> <nl> private : <nl> friend class internal : : GlobalHandles ; <nl> class NonCopyablePersistentTraits { <nl> typedef Persistent < T , NonCopyablePersistentTraits < T > > NonCopyablePersistent ; <nl> static const bool kResetInDestructor = false ; <nl> template < class S , class M > <nl> - V8_INLINE static void Copy ( const Persistent < S , M > & source , <nl> - NonCopyablePersistent * dest ) { <nl> + V8_INLINE ( static void Copy ( const Persistent < S , M > & source , <nl> + NonCopyablePersistent * dest ) ) { <nl> Uncompilable < Object > ( ) ; <nl> } <nl> / / TODO ( dcarney ) : come up with a good compile error here . <nl> - template < class O > V8_INLINE static void Uncompilable ( ) { <nl> + template < class O > <nl> + V8_INLINE ( static void Uncompilable ( ) ) { <nl> TYPE_CHECK ( O , Primitive ) ; <nl> } <nl> } ; <nl> template < class T , class M > class Persistent { <nl> / * * <nl> * A Persistent with no storage cell . <nl> * / <nl> - V8_INLINE Persistent ( ) : val_ ( 0 ) { } <nl> + V8_INLINE ( Persistent ( ) ) : val_ ( 0 ) { } <nl> / * * <nl> * Construct a Persistent from a Handle . <nl> * When the Handle is non - empty , a new storage cell is created <nl> * pointing to the same object , and no flags are set . <nl> * / <nl> - template < class S > V8_INLINE Persistent ( Isolate * isolate , Handle < S > that ) <nl> + template < class S > V8_INLINE ( Persistent ( Isolate * isolate , Handle < S > that ) ) <nl> : val_ ( New ( isolate , * that ) ) { <nl> TYPE_CHECK ( T , S ) ; <nl> } <nl> template < class T , class M > class Persistent { <nl> * pointing to the same object , and no flags are set . <nl> * / <nl> template < class S , class M2 > <nl> - V8_INLINE Persistent ( Isolate * isolate , const Persistent < S , M2 > & that ) <nl> + V8_INLINE ( Persistent ( Isolate * isolate , const Persistent < S , M2 > & that ) ) <nl> : val_ ( New ( isolate , * that ) ) { <nl> TYPE_CHECK ( T , S ) ; <nl> } <nl> template < class T , class M > class Persistent { <nl> * traits class is called , allowing the setting of flags based on the <nl> * copied Persistent . <nl> * / <nl> - V8_INLINE Persistent ( const Persistent & that ) : val_ ( 0 ) { <nl> + V8_INLINE ( Persistent ( const Persistent & that ) ) : val_ ( 0 ) { <nl> Copy ( that ) ; <nl> } <nl> template < class S , class M2 > <nl> - V8_INLINE Persistent ( const Persistent < S , M2 > & that ) : val_ ( 0 ) { <nl> + V8_INLINE ( Persistent ( const Persistent < S , M2 > & that ) ) : val_ ( 0 ) { <nl> Copy ( that ) ; <nl> } <nl> - V8_INLINE Persistent & operator = ( const Persistent & that ) { / / NOLINT <nl> + V8_INLINE ( Persistent & operator = ( const Persistent & that ) ) { / / NOLINT <nl> Copy ( that ) ; <nl> return * this ; <nl> } <nl> template < class S , class M2 > <nl> - V8_INLINE Persistent & operator = ( const Persistent < S , M2 > & that ) { / / NOLINT <nl> + V8_INLINE ( Persistent & operator = ( const Persistent < S , M2 > & that ) ) { / / NOLINT <nl> Copy ( that ) ; <nl> return * this ; <nl> } <nl> template < class T , class M > class Persistent { <nl> * kResetInDestructor flags in the traits class . Since not calling dispose <nl> * can result in a memory leak , it is recommended to always set this flag . <nl> * / <nl> - V8_INLINE ~ Persistent ( ) { <nl> + V8_INLINE ( ~ Persistent ( ) ) { <nl> if ( M : : kResetInDestructor ) Reset ( ) ; <nl> } <nl> <nl> template < class T , class M > class Persistent { <nl> * If non - empty , destroy the underlying storage cell <nl> * IsEmpty ( ) will return true after this call . <nl> * / <nl> - V8_INLINE void Reset ( ) ; <nl> + V8_INLINE ( void Reset ( ) ) ; <nl> + template < class S > <nl> / * * <nl> * If non - empty , destroy the underlying storage cell <nl> * and create a new one with the contents of other if other is non empty <nl> * / <nl> - template < class S > <nl> - V8_INLINE void Reset ( Isolate * isolate , const Handle < S > & other ) ; <nl> + V8_INLINE ( void Reset ( Isolate * isolate , const Handle < S > & other ) ) ; <nl> / * * <nl> * If non - empty , destroy the underlying storage cell <nl> * and create a new one with the contents of other if other is non empty <nl> * / <nl> template < class S , class M2 > <nl> - V8_INLINE void Reset ( Isolate * isolate , const Persistent < S , M2 > & other ) ; <nl> + V8_INLINE ( void Reset ( Isolate * isolate , const Persistent < S , M2 > & other ) ) ; <nl> / / TODO ( dcarney ) : deprecate <nl> - V8_INLINE void Dispose ( ) { Reset ( ) ; } <nl> - V8_DEPRECATED ( V8_INLINE void Dispose ( Isolate * isolate ) ) { Reset ( ) ; } <nl> + V8_INLINE ( void Dispose ( ) ) { Reset ( ) ; } <nl> + V8_DEPRECATED ( V8_INLINE ( void Dispose ( Isolate * isolate ) ) ) { Reset ( ) ; } <nl> <nl> - V8_INLINE bool IsEmpty ( ) const { return val_ = = 0 ; } <nl> + V8_INLINE ( bool IsEmpty ( ) const ) { return val_ = = 0 ; } <nl> <nl> / / TODO ( dcarney ) : this is pretty useless , fix or remove <nl> template < class S > <nl> - V8_INLINE static Persistent < T > & Cast ( Persistent < S > & that ) { / / NOLINT <nl> + V8_INLINE ( static Persistent < T > & Cast ( Persistent < S > & that ) ) { / / NOLINT <nl> # ifdef V8_ENABLE_CHECKS <nl> / / If we ' re going to perform the type check then we have to check <nl> / / that the handle isn ' t empty before doing the checked cast . <nl> template < class T , class M > class Persistent { <nl> } <nl> <nl> / / TODO ( dcarney ) : this is pretty useless , fix or remove <nl> - template < class S > V8_INLINE Persistent < S > & As ( ) { / / NOLINT <nl> + template < class S > V8_INLINE ( Persistent < S > & As ( ) ) { / / NOLINT <nl> return Persistent < S > : : Cast ( * this ) ; <nl> } <nl> <nl> - template < class S , class M2 > <nl> - V8_INLINE bool operator = = ( const Persistent < S , M2 > & that ) const { <nl> + template < class S , class M2 > V8_INLINE ( <nl> + bool operator = = ( const Persistent < S , M2 > & that ) const ) { <nl> internal : : Object * * a = reinterpret_cast < internal : : Object * * > ( * * this ) ; <nl> internal : : Object * * b = reinterpret_cast < internal : : Object * * > ( * that ) ; <nl> if ( a = = 0 ) return b = = 0 ; <nl> template < class T , class M > class Persistent { <nl> return * a = = * b ; <nl> } <nl> <nl> - template < class S > V8_INLINE bool operator = = ( const Handle < S > & that ) const { <nl> + template < class S > V8_INLINE ( bool operator = = ( const Handle < S > & that ) const ) { <nl> internal : : Object * * a = reinterpret_cast < internal : : Object * * > ( * * this ) ; <nl> internal : : Object * * b = reinterpret_cast < internal : : Object * * > ( * that ) ; <nl> if ( a = = 0 ) return b = = 0 ; <nl> template < class T , class M > class Persistent { <nl> return * a = = * b ; <nl> } <nl> <nl> - template < class S , class M2 > <nl> - V8_INLINE bool operator ! = ( const Persistent < S , M2 > & that ) const { <nl> + template < class S , class M2 > V8_INLINE ( <nl> + bool operator ! = ( const Persistent < S , M2 > & that ) const ) { <nl> return ! operator = = ( that ) ; <nl> } <nl> <nl> - template < class S > V8_INLINE bool operator ! = ( const Handle < S > & that ) const { <nl> + template < class S > V8_INLINE ( bool operator ! = ( const Handle < S > & that ) const ) { <nl> return ! operator = = ( that ) ; <nl> } <nl> <nl> template < typename P > <nl> - V8_INLINE void SetWeak ( <nl> + V8_INLINE ( void SetWeak ( <nl> P * parameter , <nl> - typename WeakCallbackData < T , P > : : Callback callback ) ; <nl> + typename WeakCallbackData < T , P > : : Callback callback ) ) ; <nl> <nl> template < typename S , typename P > <nl> - V8_INLINE void SetWeak ( <nl> + V8_INLINE ( void SetWeak ( <nl> P * parameter , <nl> - typename WeakCallbackData < S , P > : : Callback callback ) ; <nl> + typename WeakCallbackData < S , P > : : Callback callback ) ) ; <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> template < typename S , typename P > <nl> - V8_INLINE void MakeWeak ( <nl> + V8_INLINE ( void MakeWeak ( <nl> P * parameter , <nl> - typename WeakReferenceCallbacks < S , P > : : Revivable callback ) ; <nl> + typename WeakReferenceCallbacks < S , P > : : Revivable callback ) ) ; <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> template < typename P > <nl> - V8_INLINE void MakeWeak ( <nl> + V8_INLINE ( void MakeWeak ( <nl> P * parameter , <nl> - typename WeakReferenceCallbacks < T , P > : : Revivable callback ) ; <nl> + typename WeakReferenceCallbacks < T , P > : : Revivable callback ) ) ; <nl> <nl> - V8_INLINE void ClearWeak ( ) ; <nl> + V8_INLINE ( void ClearWeak ( ) ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE void ClearWeak ( Isolate * isolate ) ) { ClearWeak ( ) ; } <nl> + V8_DEPRECATED ( V8_INLINE ( void ClearWeak ( Isolate * isolate ) ) ) { ClearWeak ( ) ; } <nl> <nl> / * * <nl> * Marks the reference to this object independent . Garbage collector is free <nl> template < class T , class M > class Persistent { <nl> * independent handle should not assume that it will be preceded by a global <nl> * GC prologue callback or followed by a global GC epilogue callback . <nl> * / <nl> - V8_INLINE void MarkIndependent ( ) ; <nl> + V8_INLINE ( void MarkIndependent ( ) ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE void MarkIndependent ( Isolate * isolate ) ) { <nl> + V8_DEPRECATED ( V8_INLINE ( void MarkIndependent ( Isolate * isolate ) ) ) { <nl> MarkIndependent ( ) ; <nl> } <nl> <nl> template < class T , class M > class Persistent { <nl> * external dependencies . This mark is automatically cleared after each <nl> * garbage collection . <nl> * / <nl> - V8_INLINE void MarkPartiallyDependent ( ) ; <nl> + V8_INLINE ( void MarkPartiallyDependent ( ) ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE void MarkPartiallyDependent ( Isolate * isolate ) ) { <nl> + V8_DEPRECATED ( V8_INLINE ( void MarkPartiallyDependent ( Isolate * isolate ) ) ) { <nl> MarkPartiallyDependent ( ) ; <nl> } <nl> <nl> - V8_INLINE bool IsIndependent ( ) const ; <nl> + V8_INLINE ( bool IsIndependent ( ) const ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE bool IsIndependent ( Isolate * isolate ) const ) { <nl> + V8_DEPRECATED ( V8_INLINE ( bool IsIndependent ( Isolate * isolate ) ) const ) { <nl> return IsIndependent ( ) ; <nl> } <nl> <nl> / * * Checks if the handle holds the only reference to an object . * / <nl> - V8_INLINE bool IsNearDeath ( ) const ; <nl> + V8_INLINE ( bool IsNearDeath ( ) const ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE bool IsNearDeath ( Isolate * isolate ) const ) { <nl> + V8_DEPRECATED ( V8_INLINE ( bool IsNearDeath ( Isolate * isolate ) ) const ) { <nl> return IsNearDeath ( ) ; <nl> } <nl> <nl> / * * Returns true if the handle ' s reference is weak . * / <nl> - V8_INLINE bool IsWeak ( ) const ; <nl> + V8_INLINE ( bool IsWeak ( ) const ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE bool IsWeak ( Isolate * isolate ) const ) { <nl> + V8_DEPRECATED ( V8_INLINE ( bool IsWeak ( Isolate * isolate ) ) const ) { <nl> return IsWeak ( ) ; <nl> } <nl> <nl> template < class T , class M > class Persistent { <nl> * Assigns a wrapper class ID to the handle . See RetainedObjectInfo interface <nl> * description in v8 - profiler . h for details . <nl> * / <nl> - V8_INLINE void SetWrapperClassId ( uint16_t class_id ) ; <nl> + V8_INLINE ( void SetWrapperClassId ( uint16_t class_id ) ) ; <nl> <nl> V8_DEPRECATED ( <nl> - V8_INLINE void SetWrapperClassId ( Isolate * isolate , uint16_t class_id ) ) { <nl> + V8_INLINE ( void SetWrapperClassId ( Isolate * isolate , uint16_t class_id ) ) ) { <nl> SetWrapperClassId ( class_id ) ; <nl> } <nl> <nl> template < class T , class M > class Persistent { <nl> * Returns the class ID previously assigned to this handle or 0 if no class ID <nl> * was previously assigned . <nl> * / <nl> - V8_INLINE uint16_t WrapperClassId ( ) const ; <nl> + V8_INLINE ( uint16_t WrapperClassId ( ) const ) ; <nl> <nl> - V8_DEPRECATED ( V8_INLINE uint16_t WrapperClassId ( Isolate * isolate ) const ) { <nl> + V8_DEPRECATED ( V8_INLINE ( uint16_t WrapperClassId ( Isolate * isolate ) ) const ) { <nl> return WrapperClassId ( ) ; <nl> } <nl> <nl> / / TODO ( dcarney ) : remove <nl> - V8_INLINE T * ClearAndLeak ( ) ; <nl> + V8_INLINE ( T * ClearAndLeak ( ) ) ; <nl> <nl> / / TODO ( dcarney ) : remove <nl> - V8_INLINE void Clear ( ) { val_ = 0 ; } <nl> + V8_INLINE ( void Clear ( ) ) { val_ = 0 ; } <nl> <nl> / / TODO ( dcarney ) : remove <nl> # ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR <nl> <nl> private : <nl> # endif <nl> - template < class S > V8_INLINE Persistent ( S * that ) : val_ ( that ) { } <nl> + template < class S > V8_INLINE ( Persistent ( S * that ) ) : val_ ( that ) { } <nl> <nl> - V8_INLINE T * operator * ( ) const { return val_ ; } <nl> + V8_INLINE ( T * operator * ( ) const ) { return val_ ; } <nl> <nl> private : <nl> friend class Utils ; <nl> template < class T , class M > class Persistent { <nl> template < class F1 , class F2 > friend class Persistent ; <nl> template < class F > friend class ReturnValue ; <nl> <nl> - V8_INLINE static T * New ( Isolate * isolate , T * that ) ; <nl> + V8_INLINE ( static T * New ( Isolate * isolate , T * that ) ) ; <nl> template < class S , class M2 > <nl> - V8_INLINE void Copy ( const Persistent < S , M2 > & that ) ; <nl> + V8_INLINE ( void Copy ( const Persistent < S , M2 > & that ) ) ; <nl> <nl> T * val_ ; <nl> } ; <nl> class V8_EXPORT HandleScope { <nl> internal : : Object * * next ; <nl> internal : : Object * * limit ; <nl> int level ; <nl> - V8_INLINE void Initialize ( ) { <nl> + V8_INLINE ( void Initialize ( ) ) { <nl> next = limit = NULL ; <nl> level = 0 ; <nl> } <nl> class V8_EXPORT ScriptData { / / NOLINT <nl> * / <nl> class ScriptOrigin { <nl> public : <nl> - V8_INLINE ScriptOrigin ( <nl> + V8_INLINE ( ScriptOrigin ( <nl> Handle < Value > resource_name , <nl> Handle < Integer > resource_line_offset = Handle < Integer > ( ) , <nl> Handle < Integer > resource_column_offset = Handle < Integer > ( ) , <nl> - Handle < Boolean > resource_is_shared_cross_origin = Handle < Boolean > ( ) ) <nl> + Handle < Boolean > resource_is_shared_cross_origin = Handle < Boolean > ( ) ) ) <nl> : resource_name_ ( resource_name ) , <nl> resource_line_offset_ ( resource_line_offset ) , <nl> resource_column_offset_ ( resource_column_offset ) , <nl> resource_is_shared_cross_origin_ ( resource_is_shared_cross_origin ) { } <nl> - V8_INLINE Handle < Value > ResourceName ( ) const ; <nl> - V8_INLINE Handle < Integer > ResourceLineOffset ( ) const ; <nl> - V8_INLINE Handle < Integer > ResourceColumnOffset ( ) const ; <nl> - V8_INLINE Handle < Boolean > ResourceIsSharedCrossOrigin ( ) const ; <nl> + V8_INLINE ( Handle < Value > ResourceName ( ) const ) ; <nl> + V8_INLINE ( Handle < Integer > ResourceLineOffset ( ) const ) ; <nl> + V8_INLINE ( Handle < Integer > ResourceColumnOffset ( ) const ) ; <nl> + V8_INLINE ( Handle < Boolean > ResourceIsSharedCrossOrigin ( ) const ) ; <nl> private : <nl> Handle < Value > resource_name_ ; <nl> Handle < Integer > resource_line_offset_ ; <nl> class V8_EXPORT Value : public Data { <nl> * Returns true if this value is the undefined value . See ECMA - 262 <nl> * 4 . 3 . 10 . <nl> * / <nl> - V8_INLINE bool IsUndefined ( ) const ; <nl> + V8_INLINE ( bool IsUndefined ( ) const ) ; <nl> <nl> / * * <nl> * Returns true if this value is the null value . See ECMA - 262 <nl> * 4 . 3 . 11 . <nl> * / <nl> - V8_INLINE bool IsNull ( ) const ; <nl> + V8_INLINE ( bool IsNull ( ) const ) ; <nl> <nl> / * * <nl> * Returns true if this value is true . <nl> class V8_EXPORT Value : public Data { <nl> * Returns true if this value is an instance of the String type . <nl> * See ECMA - 262 8 . 4 . <nl> * / <nl> - V8_INLINE bool IsString ( ) const ; <nl> + V8_INLINE ( bool IsString ( ) const ) ; <nl> <nl> / * * <nl> * Returns true if this value is a symbol . <nl> class V8_EXPORT Value : public Data { <nl> bool Equals ( Handle < Value > that ) const ; <nl> bool StrictEquals ( Handle < Value > that ) const ; <nl> <nl> - template < class T > V8_INLINE static Value * Cast ( T * value ) ; <nl> + template < class T > V8_INLINE ( static Value * Cast ( T * value ) ) ; <nl> <nl> private : <nl> - V8_INLINE bool QuickIsUndefined ( ) const ; <nl> - V8_INLINE bool QuickIsNull ( ) const ; <nl> - V8_INLINE bool QuickIsString ( ) const ; <nl> + V8_INLINE ( bool QuickIsUndefined ( ) const ) ; <nl> + V8_INLINE ( bool QuickIsNull ( ) const ) ; <nl> + V8_INLINE ( bool QuickIsString ( ) const ) ; <nl> bool FullIsUndefined ( ) const ; <nl> bool FullIsNull ( ) const ; <nl> bool FullIsString ( ) const ; <nl> class V8_EXPORT Primitive : public Value { } ; <nl> class V8_EXPORT Boolean : public Primitive { <nl> public : <nl> bool Value ( ) const ; <nl> - V8_INLINE static Handle < Boolean > New ( bool value ) ; <nl> + V8_INLINE ( static Handle < Boolean > New ( bool value ) ) ; <nl> } ; <nl> <nl> <nl> class V8_EXPORT String : public Primitive { <nl> / * * <nl> * This function is no longer useful . <nl> * / <nl> - V8_DEPRECATED ( V8_INLINE bool MayContainNonAscii ( ) const ) { return true ; } <nl> + V8_DEPRECATED ( V8_INLINE ( bool MayContainNonAscii ( ) ) const ) { return true ; } <nl> <nl> / * * <nl> * Returns whether this string is known to contain only one byte data . <nl> class V8_EXPORT String : public Primitive { <nl> * A zero length string . <nl> * / <nl> static v8 : : Local < v8 : : String > Empty ( ) ; <nl> - V8_INLINE static v8 : : Local < v8 : : String > Empty ( Isolate * isolate ) ; <nl> + V8_INLINE ( static v8 : : Local < v8 : : String > Empty ( Isolate * isolate ) ) ; <nl> <nl> / * * <nl> * Returns true if the string is external <nl> class V8_EXPORT String : public Primitive { <nl> * regardless of the encoding , otherwise return NULL . The encoding of the <nl> * string is returned in encoding_out . <nl> * / <nl> - V8_INLINE ExternalStringResourceBase * GetExternalStringResourceBase ( <nl> - Encoding * encoding_out ) const ; <nl> + V8_INLINE ( ExternalStringResourceBase * GetExternalStringResourceBase ( <nl> + Encoding * encoding_out ) const ) ; <nl> <nl> / * * <nl> * Get the ExternalStringResource for an external string . Returns <nl> * NULL if IsExternal ( ) doesn ' t return true . <nl> * / <nl> - V8_INLINE ExternalStringResource * GetExternalStringResource ( ) const ; <nl> + V8_INLINE ( ExternalStringResource * GetExternalStringResource ( ) const ) ; <nl> <nl> / * * <nl> * Get the ExternalAsciiStringResource for an external ASCII string . <nl> class V8_EXPORT String : public Primitive { <nl> * / <nl> const ExternalAsciiStringResource * GetExternalAsciiStringResource ( ) const ; <nl> <nl> - V8_INLINE static String * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static String * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> / * * <nl> class V8_EXPORT String : public Primitive { <nl> * The second parameter ' length ' gives the buffer length . If omitted , <nl> * the function calls ' strlen ' to determine the buffer length . <nl> * / <nl> - V8_INLINE static Local < String > New ( const char * data , int length = - 1 ) ; <nl> + V8_INLINE ( static Local < String > New ( const char * data , int length = - 1 ) ) ; <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> / * * Allocates a new string from 16 - bit character codes . * / <nl> - V8_INLINE static Local < String > New ( const uint16_t * data , int length = - 1 ) ; <nl> + V8_INLINE ( static Local < String > New ( const uint16_t * data , int length = - 1 ) ) ; <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> / * * <nl> * Creates an internalized string ( historically called a " symbol " , <nl> * not to be confused with ES6 symbols ) . Returns one if it exists already . <nl> * / <nl> - V8_INLINE static Local < String > NewSymbol ( const char * data , int length = - 1 ) ; <nl> + V8_INLINE ( static Local < String > NewSymbol ( const char * data , int length = - 1 ) ) ; <nl> <nl> enum NewStringType { <nl> kNormalString , kInternalizedString , kUndetectableString <nl> class V8_EXPORT String : public Primitive { <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> / * * Creates an undetectable string from the supplied ASCII or UTF - 8 data . * / <nl> - V8_INLINE static Local < String > NewUndetectable ( const char * data , <nl> - int length = - 1 ) ; <nl> + V8_INLINE ( <nl> + static Local < String > NewUndetectable ( const char * data , int length = - 1 ) ) ; <nl> <nl> / / TODO ( dcarney ) : deprecate <nl> / * * Creates an undetectable string from the supplied 16 - bit character codes . * / <nl> - V8_INLINE static Local < String > NewUndetectable ( const uint16_t * data , <nl> - int length = - 1 ) ; <nl> + V8_INLINE ( static Local < String > NewUndetectable ( <nl> + const uint16_t * data , int length = - 1 ) ) ; <nl> <nl> / * * <nl> * Converts an object to a UTF - 8 - encoded character array . Useful if <nl> class V8_EXPORT Symbol : public Primitive { <nl> / / Create a symbol with a print name . <nl> static Local < Symbol > New ( Isolate * isolate , const char * data , int length = - 1 ) ; <nl> <nl> - V8_INLINE static Symbol * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static Symbol * Cast ( v8 : : Value * obj ) ) ; <nl> private : <nl> Symbol ( ) ; <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT Number : public Primitive { <nl> double Value ( ) const ; <nl> static Local < Number > New ( double value ) ; <nl> static Local < Number > New ( Isolate * isolate , double value ) ; <nl> - V8_INLINE static Number * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static Number * Cast ( v8 : : Value * obj ) ) ; <nl> private : <nl> Number ( ) ; <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT Integer : public Number { <nl> static Local < Integer > New ( int32_t value , Isolate * ) ; <nl> static Local < Integer > NewFromUnsigned ( uint32_t value , Isolate * ) ; <nl> int64_t Value ( ) const ; <nl> - V8_INLINE static Integer * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static Integer * Cast ( v8 : : Value * obj ) ) ; <nl> private : <nl> Integer ( ) ; <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT Object : public Value { <nl> int InternalFieldCount ( ) ; <nl> <nl> / * * Gets the value from an internal field . * / <nl> - V8_INLINE Local < Value > GetInternalField ( int index ) ; <nl> + V8_INLINE ( Local < Value > GetInternalField ( int index ) ) ; <nl> <nl> / * * Sets the value in an internal field . * / <nl> void SetInternalField ( int index , Handle < Value > value ) ; <nl> class V8_EXPORT Object : public Value { <nl> * must have been set by SetAlignedPointerInInternalField , everything else <nl> * leads to undefined behavior . <nl> * / <nl> - V8_INLINE void * GetAlignedPointerFromInternalField ( int index ) ; <nl> + V8_INLINE ( void * GetAlignedPointerFromInternalField ( int index ) ) ; <nl> <nl> / * * <nl> * Sets a 2 - byte - aligned native pointer in an internal field . To retrieve such <nl> class V8_EXPORT Object : public Value { <nl> Local < Value > CallAsConstructor ( int argc , Handle < Value > argv [ ] ) ; <nl> <nl> static Local < Object > New ( ) ; <nl> - V8_INLINE static Object * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Object * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Object ( ) ; <nl> class V8_EXPORT Array : public Object { <nl> * / <nl> static Local < Array > New ( int length = 0 ) ; <nl> <nl> - V8_INLINE static Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Array * Cast ( Value * obj ) ) ; <nl> private : <nl> Array ( ) ; <nl> static void CheckCast ( Value * obj ) ; <nl> class V8_EXPORT Array : public Object { <nl> template < typename T > <nl> class ReturnValue { <nl> public : <nl> - template < class S > V8_INLINE ReturnValue ( const ReturnValue < S > & that ) <nl> + template < class S > V8_INLINE ( ReturnValue ( const ReturnValue < S > & that ) ) <nl> : value_ ( that . value_ ) { <nl> TYPE_CHECK ( T , S ) ; <nl> } <nl> / / Handle setters <nl> - template < typename S > V8_INLINE void Set ( const Persistent < S > & handle ) ; <nl> - template < typename S > V8_INLINE void Set ( const Handle < S > handle ) ; <nl> + template < typename S > V8_INLINE ( void Set ( const Persistent < S > & handle ) ) ; <nl> + template < typename S > V8_INLINE ( void Set ( const Handle < S > handle ) ) ; <nl> / / Fast primitive setters <nl> - V8_INLINE void Set ( bool value ) ; <nl> - V8_INLINE void Set ( double i ) ; <nl> - V8_INLINE void Set ( int32_t i ) ; <nl> - V8_INLINE void Set ( uint32_t i ) ; <nl> + V8_INLINE ( void Set ( bool value ) ) ; <nl> + V8_INLINE ( void Set ( double i ) ) ; <nl> + V8_INLINE ( void Set ( int32_t i ) ) ; <nl> + V8_INLINE ( void Set ( uint32_t i ) ) ; <nl> / / Fast JS primitive setters <nl> - V8_INLINE void SetNull ( ) ; <nl> - V8_INLINE void SetUndefined ( ) ; <nl> - V8_INLINE void SetEmptyString ( ) ; <nl> + V8_INLINE ( void SetNull ( ) ) ; <nl> + V8_INLINE ( void SetUndefined ( ) ) ; <nl> + V8_INLINE ( void SetEmptyString ( ) ) ; <nl> / / Convenience getter for Isolate <nl> - V8_INLINE Isolate * GetIsolate ( ) ; <nl> + V8_INLINE ( Isolate * GetIsolate ( ) ) ; <nl> <nl> private : <nl> template < class F > friend class ReturnValue ; <nl> template < class F > friend class FunctionCallbackInfo ; <nl> template < class F > friend class PropertyCallbackInfo ; <nl> - V8_INLINE internal : : Object * GetDefaultValue ( ) ; <nl> - V8_INLINE explicit ReturnValue ( internal : : Object * * slot ) ; <nl> + V8_INLINE ( internal : : Object * GetDefaultValue ( ) ) ; <nl> + V8_INLINE ( explicit ReturnValue ( internal : : Object * * slot ) ) ; <nl> internal : : Object * * value_ ; <nl> } ; <nl> <nl> class ReturnValue { <nl> template < typename T > <nl> class FunctionCallbackInfo { <nl> public : <nl> - V8_INLINE int Length ( ) const ; <nl> - V8_INLINE Local < Value > operator [ ] ( int i ) const ; <nl> - V8_INLINE Local < Function > Callee ( ) const ; <nl> - V8_INLINE Local < Object > This ( ) const ; <nl> - V8_INLINE Local < Object > Holder ( ) const ; <nl> - V8_INLINE bool IsConstructCall ( ) const ; <nl> - V8_INLINE Local < Value > Data ( ) const ; <nl> - V8_INLINE Isolate * GetIsolate ( ) const ; <nl> - V8_INLINE ReturnValue < T > GetReturnValue ( ) const ; <nl> + V8_INLINE ( int Length ( ) const ) ; <nl> + V8_INLINE ( Local < Value > operator [ ] ( int i ) const ) ; <nl> + V8_INLINE ( Local < Function > Callee ( ) const ) ; <nl> + V8_INLINE ( Local < Object > This ( ) const ) ; <nl> + V8_INLINE ( Local < Object > Holder ( ) const ) ; <nl> + V8_INLINE ( bool IsConstructCall ( ) const ) ; <nl> + V8_INLINE ( Local < Value > Data ( ) const ) ; <nl> + V8_INLINE ( Isolate * GetIsolate ( ) const ) ; <nl> + V8_INLINE ( ReturnValue < T > GetReturnValue ( ) const ) ; <nl> / / This shouldn ' t be public , but the arm compiler needs it . <nl> static const int kArgsLength = 6 ; <nl> <nl> class FunctionCallbackInfo { <nl> static const int kCalleeIndex = - 4 ; <nl> static const int kHolderIndex = - 5 ; <nl> <nl> - V8_INLINE FunctionCallbackInfo ( internal : : Object * * implicit_args , <nl> + V8_INLINE ( FunctionCallbackInfo ( internal : : Object * * implicit_args , <nl> internal : : Object * * values , <nl> int length , <nl> - bool is_construct_call ) ; <nl> + bool is_construct_call ) ) ; <nl> internal : : Object * * implicit_args_ ; <nl> internal : : Object * * values_ ; <nl> int length_ ; <nl> class FunctionCallbackInfo { <nl> template < typename T > <nl> class PropertyCallbackInfo { <nl> public : <nl> - V8_INLINE Isolate * GetIsolate ( ) const ; <nl> - V8_INLINE Local < Value > Data ( ) const ; <nl> - V8_INLINE Local < Object > This ( ) const ; <nl> - V8_INLINE Local < Object > Holder ( ) const ; <nl> - V8_INLINE ReturnValue < T > GetReturnValue ( ) const ; <nl> + V8_INLINE ( Isolate * GetIsolate ( ) const ) ; <nl> + V8_INLINE ( Local < Value > Data ( ) const ) ; <nl> + V8_INLINE ( Local < Object > This ( ) const ) ; <nl> + V8_INLINE ( Local < Object > Holder ( ) const ) ; <nl> + V8_INLINE ( ReturnValue < T > GetReturnValue ( ) const ) ; <nl> / / This shouldn ' t be public , but the arm compiler needs it . <nl> static const int kArgsLength = 6 ; <nl> <nl> class PropertyCallbackInfo { <nl> static const int kReturnValueDefaultValueIndex = - 4 ; <nl> static const int kIsolateIndex = - 5 ; <nl> <nl> - V8_INLINE PropertyCallbackInfo ( internal : : Object * * args ) : args_ ( args ) { } <nl> + V8_INLINE ( PropertyCallbackInfo ( internal : : Object * * args ) ) <nl> + : args_ ( args ) { } <nl> internal : : Object * * args_ ; <nl> } ; <nl> <nl> class V8_EXPORT Function : public Object { <nl> int ScriptId ( ) const ; <nl> <nl> ScriptOrigin GetScriptOrigin ( ) const ; <nl> - V8_INLINE static Function * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Function * Cast ( Value * obj ) ) ; <nl> static const int kLineOffsetNotFound ; <nl> <nl> private : <nl> class V8_EXPORT ArrayBuffer : public Object { <nl> * / <nl> Contents Externalize ( ) ; <nl> <nl> - V8_INLINE static ArrayBuffer * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static ArrayBuffer * Cast ( Value * obj ) ) ; <nl> <nl> static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT ; <nl> <nl> class V8_EXPORT ArrayBufferView : public Object { <nl> * / <nl> void * BaseAddress ( ) ; <nl> <nl> - V8_INLINE static ArrayBufferView * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static ArrayBufferView * Cast ( Value * obj ) ) ; <nl> <nl> static const int kInternalFieldCount = <nl> V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT ; <nl> class V8_EXPORT TypedArray : public ArrayBufferView { <nl> * / <nl> size_t Length ( ) ; <nl> <nl> - V8_INLINE static TypedArray * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static TypedArray * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> TypedArray ( ) ; <nl> class V8_EXPORT Uint8Array : public TypedArray { <nl> public : <nl> static Local < Uint8Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Uint8Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Uint8Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Uint8Array ( ) ; <nl> class V8_EXPORT Uint8ClampedArray : public TypedArray { <nl> public : <nl> static Local < Uint8ClampedArray > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Uint8ClampedArray * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Uint8ClampedArray * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Uint8ClampedArray ( ) ; <nl> class V8_EXPORT Int8Array : public TypedArray { <nl> public : <nl> static Local < Int8Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Int8Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Int8Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Int8Array ( ) ; <nl> class V8_EXPORT Uint16Array : public TypedArray { <nl> public : <nl> static Local < Uint16Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Uint16Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Uint16Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Uint16Array ( ) ; <nl> class V8_EXPORT Int16Array : public TypedArray { <nl> public : <nl> static Local < Int16Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Int16Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Int16Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Int16Array ( ) ; <nl> class V8_EXPORT Uint32Array : public TypedArray { <nl> public : <nl> static Local < Uint32Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Uint32Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Uint32Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Uint32Array ( ) ; <nl> class V8_EXPORT Int32Array : public TypedArray { <nl> public : <nl> static Local < Int32Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Int32Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Int32Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Int32Array ( ) ; <nl> class V8_EXPORT Float32Array : public TypedArray { <nl> public : <nl> static Local < Float32Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Float32Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Float32Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Float32Array ( ) ; <nl> class V8_EXPORT Float64Array : public TypedArray { <nl> public : <nl> static Local < Float64Array > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static Float64Array * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static Float64Array * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> Float64Array ( ) ; <nl> class V8_EXPORT DataView : public ArrayBufferView { <nl> public : <nl> static Local < DataView > New ( Handle < ArrayBuffer > array_buffer , <nl> size_t byte_offset , size_t length ) ; <nl> - V8_INLINE static DataView * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static DataView * Cast ( Value * obj ) ) ; <nl> <nl> private : <nl> DataView ( ) ; <nl> class V8_EXPORT Date : public Object { <nl> * / <nl> double ValueOf ( ) const ; <nl> <nl> - V8_INLINE static Date * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static Date * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> / * * <nl> * Notification that the embedder has changed the time zone , <nl> class V8_EXPORT NumberObject : public Object { <nl> * / <nl> double ValueOf ( ) const ; <nl> <nl> - V8_INLINE static NumberObject * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static NumberObject * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> private : <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT BooleanObject : public Object { <nl> * / <nl> bool ValueOf ( ) const ; <nl> <nl> - V8_INLINE static BooleanObject * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static BooleanObject * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> private : <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT StringObject : public Object { <nl> * / <nl> Local < String > ValueOf ( ) const ; <nl> <nl> - V8_INLINE static StringObject * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static StringObject * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> private : <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT SymbolObject : public Object { <nl> * / <nl> Local < Symbol > ValueOf ( ) const ; <nl> <nl> - V8_INLINE static SymbolObject * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static SymbolObject * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> private : <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT RegExp : public Object { <nl> * / <nl> Flags GetFlags ( ) const ; <nl> <nl> - V8_INLINE static RegExp * Cast ( v8 : : Value * obj ) ; <nl> + V8_INLINE ( static RegExp * Cast ( v8 : : Value * obj ) ) ; <nl> <nl> private : <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT RegExp : public Object { <nl> class V8_EXPORT External : public Value { <nl> public : <nl> static Local < External > New ( void * value ) ; <nl> - V8_INLINE static External * Cast ( Value * obj ) ; <nl> + V8_INLINE ( static External * Cast ( Value * obj ) ) ; <nl> void * Value ( ) const ; <nl> private : <nl> static void CheckCast ( v8 : : Value * obj ) ; <nl> class V8_EXPORT Template : public Data { <nl> / * * Adds a property to each instance created by this template . * / <nl> void Set ( Handle < String > name , Handle < Data > value , <nl> PropertyAttribute attributes = None ) ; <nl> - V8_INLINE void Set ( const char * name , Handle < Data > value ) ; <nl> + V8_INLINE ( void Set ( const char * name , Handle < Data > value ) ) ; <nl> <nl> void SetAccessorProperty ( <nl> Local < String > name , <nl> void V8_EXPORT RegisterExtension ( Extension * extension ) ; <nl> * / <nl> class V8_EXPORT DeclareExtension { <nl> public : <nl> - V8_INLINE DeclareExtension ( Extension * extension ) { <nl> + V8_INLINE ( DeclareExtension ( Extension * extension ) ) { <nl> RegisterExtension ( extension ) ; <nl> } <nl> } ; <nl> Handle < Primitive > V8_EXPORT Null ( ) ; <nl> Handle < Boolean > V8_EXPORT True ( ) ; <nl> Handle < Boolean > V8_EXPORT False ( ) ; <nl> <nl> - V8_INLINE Handle < Primitive > Undefined ( Isolate * isolate ) ; <nl> - V8_INLINE Handle < Primitive > Null ( Isolate * isolate ) ; <nl> - V8_INLINE Handle < Boolean > True ( Isolate * isolate ) ; <nl> - V8_INLINE Handle < Boolean > False ( Isolate * isolate ) ; <nl> + V8_INLINE ( Handle < Primitive > Undefined ( Isolate * isolate ) ) ; <nl> + V8_INLINE ( Handle < Primitive > Null ( Isolate * isolate ) ) ; <nl> + V8_INLINE ( Handle < Boolean > True ( Isolate * isolate ) ) ; <nl> + V8_INLINE ( Handle < Boolean > False ( Isolate * isolate ) ) ; <nl> <nl> <nl> / * * <nl> class V8_EXPORT Isolate { <nl> / * * <nl> * Associate embedder - specific data with the isolate <nl> * / <nl> - V8_INLINE void SetData ( void * data ) ; <nl> + V8_INLINE ( void SetData ( void * data ) ) ; <nl> <nl> / * * <nl> * Retrieve embedder - specific data from the isolate . <nl> * Returns NULL if SetData has never been called . <nl> * / <nl> - V8_INLINE void * GetData ( ) ; <nl> + V8_INLINE ( void * GetData ( ) ) ; <nl> <nl> / * * <nl> * Get statistics about the heap memory usage . <nl> class V8_EXPORT PersistentHandleVisitor { / / NOLINT <nl> class V8_EXPORT AssertNoGCScope { <nl> # ifndef DEBUG <nl> / / TODO ( yangguo ) : remove isolate argument . <nl> - V8_INLINE AssertNoGCScope ( Isolate * isolate ) { } <nl> + V8_INLINE ( AssertNoGCScope ( Isolate * isolate ) ) { } <nl> # else <nl> AssertNoGCScope ( Isolate * isolate ) ; <nl> ~ AssertNoGCScope ( ) ; <nl> class V8_EXPORT Context { <nl> * previous call to SetEmbedderData with the same index . Note that index 0 <nl> * currently has a special meaning for Chrome ' s debugger . <nl> * / <nl> - V8_INLINE Local < Value > GetEmbedderData ( int index ) ; <nl> + V8_INLINE ( Local < Value > GetEmbedderData ( int index ) ) ; <nl> <nl> / * * <nl> * Sets the embedder data with the given index , growing the data as <nl> class V8_EXPORT Context { <nl> * SetAlignedPointerInEmbedderData with the same index . Note that index 0 <nl> * currently has a special meaning for Chrome ' s debugger . <nl> * / <nl> - V8_INLINE void * GetAlignedPointerFromEmbedderData ( int index ) ; <nl> + V8_INLINE ( void * GetAlignedPointerFromEmbedderData ( int index ) ) ; <nl> <nl> / * * <nl> * Sets a 2 - byte - aligned native pointer in the embedder data with the given <nl> class V8_EXPORT Context { <nl> * / <nl> class Scope { <nl> public : <nl> - explicit V8_INLINE Scope ( Handle < Context > context ) : context_ ( context ) { <nl> + explicit V8_INLINE ( Scope ( Handle < Context > context ) ) : context_ ( context ) { <nl> context_ - > Enter ( ) ; <nl> } <nl> / / TODO ( dcarney ) : deprecate <nl> - V8_INLINE Scope ( Isolate * isolate , Persistent < Context > & context ) / / NOLINT <nl> + V8_INLINE ( Scope ( Isolate * isolate , Persistent < Context > & context ) ) / / NOLINT <nl> : context_ ( Handle < Context > : : New ( isolate , context ) ) { <nl> context_ - > Enter ( ) ; <nl> } <nl> - V8_INLINE ~ Scope ( ) { context_ - > Exit ( ) ; } <nl> + V8_INLINE ( ~ Scope ( ) ) { context_ - > Exit ( ) ; } <nl> <nl> private : <nl> Handle < Context > context_ ; <nl> class V8_EXPORT Unlocker { <nl> / * * <nl> * Initialize Unlocker for a given Isolate . <nl> * / <nl> - V8_INLINE explicit Unlocker ( Isolate * isolate ) { Initialize ( isolate ) ; } <nl> + V8_INLINE ( explicit Unlocker ( Isolate * isolate ) ) { Initialize ( isolate ) ; } <nl> <nl> / * * Deprecated . Use Isolate version instead . * / <nl> V8_DEPRECATED ( Unlocker ( ) ) ; <nl> class V8_EXPORT Locker { <nl> / * * <nl> * Initialize Locker for a given Isolate . <nl> * / <nl> - V8_INLINE explicit Locker ( Isolate * isolate ) { Initialize ( isolate ) ; } <nl> + V8_INLINE ( explicit Locker ( Isolate * isolate ) ) { Initialize ( isolate ) ; } <nl> <nl> / * * Deprecated . Use Isolate version instead . * / <nl> V8_DEPRECATED ( Locker ( ) ) ; <nl> const intptr_t kSmiTagMask = ( 1 < < kSmiTagSize ) - 1 ; <nl> template < size_t ptr_size > struct SmiTagging ; <nl> <nl> template < int kSmiShiftSize > <nl> - V8_INLINE internal : : Object * IntToSmi ( int value ) { <nl> + V8_INLINE ( internal : : Object * IntToSmi ( int value ) ) { <nl> int smi_shift_bits = kSmiTagSize + kSmiShiftSize ; <nl> intptr_t tagged_value = <nl> ( static_cast < intptr_t > ( value ) < < smi_shift_bits ) | kSmiTag ; <nl> V8_INLINE internal : : Object * IntToSmi ( int value ) { <nl> template < > struct SmiTagging < 4 > { <nl> static const int kSmiShiftSize = 0 ; <nl> static const int kSmiValueSize = 31 ; <nl> - V8_INLINE static int SmiToInt ( internal : : Object * value ) { <nl> + V8_INLINE ( static int SmiToInt ( internal : : Object * value ) ) { <nl> int shift_bits = kSmiTagSize + kSmiShiftSize ; <nl> / / Throw away top 32 bits and shift down ( requires > > to be sign extending ) . <nl> return static_cast < int > ( reinterpret_cast < intptr_t > ( value ) ) > > shift_bits ; <nl> } <nl> - V8_INLINE static internal : : Object * IntToSmi ( int value ) { <nl> + V8_INLINE ( static internal : : Object * IntToSmi ( int value ) ) { <nl> return internal : : IntToSmi < kSmiShiftSize > ( value ) ; <nl> } <nl> - V8_INLINE static bool IsValidSmi ( intptr_t value ) { <nl> + V8_INLINE ( static bool IsValidSmi ( intptr_t value ) ) { <nl> / / To be representable as an tagged small integer , the two <nl> / / most - significant bits of ' value ' must be either 00 or 11 due to <nl> / / sign - extension . To check this we add 01 to the two <nl> template < > struct SmiTagging < 4 > { <nl> template < > struct SmiTagging < 8 > { <nl> static const int kSmiShiftSize = 31 ; <nl> static const int kSmiValueSize = 32 ; <nl> - V8_INLINE static int SmiToInt ( internal : : Object * value ) { <nl> + V8_INLINE ( static int SmiToInt ( internal : : Object * value ) ) { <nl> int shift_bits = kSmiTagSize + kSmiShiftSize ; <nl> / / Shift down and throw away top 32 bits . <nl> return static_cast < int > ( reinterpret_cast < intptr_t > ( value ) > > shift_bits ) ; <nl> } <nl> - V8_INLINE static internal : : Object * IntToSmi ( int value ) { <nl> + V8_INLINE ( static internal : : Object * IntToSmi ( int value ) ) { <nl> return internal : : IntToSmi < kSmiShiftSize > ( value ) ; <nl> } <nl> - V8_INLINE static bool IsValidSmi ( intptr_t value ) { <nl> + V8_INLINE ( static bool IsValidSmi ( intptr_t value ) ) { <nl> / / To be representable as a long smi , the value must be a 32 - bit integer . <nl> return ( value = = static_cast < int32_t > ( value ) ) ; <nl> } <nl> template < > struct SmiTagging < 8 > { <nl> typedef SmiTagging < kApiPointerSize > PlatformSmiTagging ; <nl> const int kSmiShiftSize = PlatformSmiTagging : : kSmiShiftSize ; <nl> const int kSmiValueSize = PlatformSmiTagging : : kSmiValueSize ; <nl> - V8_INLINE static bool SmiValuesAre31Bits ( ) { return kSmiValueSize = = 31 ; } <nl> - V8_INLINE static bool SmiValuesAre32Bits ( ) { return kSmiValueSize = = 32 ; } <nl> + V8_INLINE ( static bool SmiValuesAre31Bits ( ) ) { return kSmiValueSize = = 31 ; } <nl> + V8_INLINE ( static bool SmiValuesAre32Bits ( ) ) { return kSmiValueSize = = 32 ; } <nl> <nl> / * * <nl> * This class exports constants and functionality from within v8 that <nl> class Internals { <nl> static const int kNullOddballKind = 3 ; <nl> <nl> static void CheckInitializedImpl ( v8 : : Isolate * isolate ) ; <nl> - V8_INLINE static void CheckInitialized ( v8 : : Isolate * isolate ) { <nl> + V8_INLINE ( static void CheckInitialized ( v8 : : Isolate * isolate ) ) { <nl> # ifdef V8_ENABLE_CHECKS <nl> CheckInitializedImpl ( isolate ) ; <nl> # endif <nl> } <nl> <nl> - V8_INLINE static bool HasHeapObjectTag ( internal : : Object * value ) { <nl> + V8_INLINE ( static bool HasHeapObjectTag ( internal : : Object * value ) ) { <nl> return ( ( reinterpret_cast < intptr_t > ( value ) & kHeapObjectTagMask ) = = <nl> kHeapObjectTag ) ; <nl> } <nl> <nl> - V8_INLINE static int SmiValue ( internal : : Object * value ) { <nl> + V8_INLINE ( static int SmiValue ( internal : : Object * value ) ) { <nl> return PlatformSmiTagging : : SmiToInt ( value ) ; <nl> } <nl> <nl> - V8_INLINE static internal : : Object * IntToSmi ( int value ) { <nl> + V8_INLINE ( static internal : : Object * IntToSmi ( int value ) ) { <nl> return PlatformSmiTagging : : IntToSmi ( value ) ; <nl> } <nl> <nl> - V8_INLINE static bool IsValidSmi ( intptr_t value ) { <nl> + V8_INLINE ( static bool IsValidSmi ( intptr_t value ) ) { <nl> return PlatformSmiTagging : : IsValidSmi ( value ) ; <nl> } <nl> <nl> - V8_INLINE static int GetInstanceType ( internal : : Object * obj ) { <nl> + V8_INLINE ( static int GetInstanceType ( internal : : Object * obj ) ) { <nl> typedef internal : : Object O ; <nl> O * map = ReadField < O * > ( obj , kHeapObjectMapOffset ) ; <nl> return ReadField < uint8_t > ( map , kMapInstanceTypeOffset ) ; <nl> } <nl> <nl> - V8_INLINE static int GetOddballKind ( internal : : Object * obj ) { <nl> + V8_INLINE ( static int GetOddballKind ( internal : : Object * obj ) ) { <nl> typedef internal : : Object O ; <nl> return SmiValue ( ReadField < O * > ( obj , kOddballKindOffset ) ) ; <nl> } <nl> <nl> - V8_INLINE static bool IsExternalTwoByteString ( int instance_type ) { <nl> + V8_INLINE ( static bool IsExternalTwoByteString ( int instance_type ) ) { <nl> int representation = ( instance_type & kFullStringRepresentationMask ) ; <nl> return representation = = kExternalTwoByteRepresentationTag ; <nl> } <nl> <nl> - V8_INLINE static uint8_t GetNodeFlag ( internal : : Object * * obj , int shift ) { <nl> + V8_INLINE ( static uint8_t GetNodeFlag ( internal : : Object * * obj , int shift ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( obj ) + kNodeFlagsOffset ; <nl> return * addr & static_cast < uint8_t > ( 1U < < shift ) ; <nl> } <nl> <nl> - V8_INLINE static void UpdateNodeFlag ( internal : : Object * * obj , <nl> - bool value , int shift ) { <nl> + V8_INLINE ( static void UpdateNodeFlag ( internal : : Object * * obj , <nl> + bool value , int shift ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( obj ) + kNodeFlagsOffset ; <nl> uint8_t mask = static_cast < uint8_t > ( 1 < < shift ) ; <nl> * addr = static_cast < uint8_t > ( ( * addr & ~ mask ) | ( value < < shift ) ) ; <nl> } <nl> <nl> - V8_INLINE static uint8_t GetNodeState ( internal : : Object * * obj ) { <nl> + V8_INLINE ( static uint8_t GetNodeState ( internal : : Object * * obj ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( obj ) + kNodeFlagsOffset ; <nl> return * addr & kNodeStateMask ; <nl> } <nl> <nl> - V8_INLINE static void UpdateNodeState ( internal : : Object * * obj , <nl> - uint8_t value ) { <nl> + V8_INLINE ( static void UpdateNodeState ( internal : : Object * * obj , <nl> + uint8_t value ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( obj ) + kNodeFlagsOffset ; <nl> * addr = static_cast < uint8_t > ( ( * addr & ~ kNodeStateMask ) | value ) ; <nl> } <nl> <nl> - V8_INLINE static void SetEmbedderData ( v8 : : Isolate * isolate , void * data ) { <nl> + V8_INLINE ( static void SetEmbedderData ( v8 : : Isolate * isolate , void * data ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( isolate ) + <nl> kIsolateEmbedderDataOffset ; <nl> * reinterpret_cast < void * * > ( addr ) = data ; <nl> } <nl> <nl> - V8_INLINE static void * GetEmbedderData ( v8 : : Isolate * isolate ) { <nl> + V8_INLINE ( static void * GetEmbedderData ( v8 : : Isolate * isolate ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( isolate ) + <nl> kIsolateEmbedderDataOffset ; <nl> return * reinterpret_cast < void * * > ( addr ) ; <nl> } <nl> <nl> - V8_INLINE static internal : : Object * * GetRoot ( v8 : : Isolate * isolate , <nl> - int index ) { <nl> + V8_INLINE ( static internal : : Object * * GetRoot ( v8 : : Isolate * isolate , <nl> + int index ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( isolate ) + kIsolateRootsOffset ; <nl> return reinterpret_cast < internal : : Object * * > ( addr + index * kApiPointerSize ) ; <nl> } <nl> <nl> - template < typename T > V8_INLINE static T ReadField ( Object * ptr , int offset ) { <nl> + template < typename T > <nl> + V8_INLINE ( static T ReadField ( Object * ptr , int offset ) ) { <nl> uint8_t * addr = reinterpret_cast < uint8_t * > ( ptr ) + offset - kHeapObjectTag ; <nl> return * reinterpret_cast < T * > ( addr ) ; <nl> } <nl> <nl> template < typename T > <nl> - V8_INLINE static T ReadEmbedderData ( Context * context , int index ) { <nl> + V8_INLINE ( static T ReadEmbedderData ( Context * context , int index ) ) { <nl> typedef internal : : Object O ; <nl> typedef internal : : Internals I ; <nl> O * ctx = * reinterpret_cast < O * * > ( context ) ; <nl> class Internals { <nl> return I : : ReadField < T > ( embedder_data , value_offset ) ; <nl> } <nl> <nl> - V8_INLINE static bool CanCastToHeapObject ( void * o ) { return false ; } <nl> - V8_INLINE static bool CanCastToHeapObject ( Context * o ) { return true ; } <nl> - V8_INLINE static bool CanCastToHeapObject ( String * o ) { return true ; } <nl> - V8_INLINE static bool CanCastToHeapObject ( Object * o ) { return true ; } <nl> - V8_INLINE static bool CanCastToHeapObject ( Message * o ) { return true ; } <nl> - V8_INLINE static bool CanCastToHeapObject ( StackTrace * o ) { return true ; } <nl> - V8_INLINE static bool CanCastToHeapObject ( StackFrame * o ) { return true ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( void * o ) ) { return false ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( Context * o ) ) { return true ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( String * o ) ) { return true ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( Object * o ) ) { return true ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( Message * o ) ) { return true ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( StackTrace * o ) ) { return true ; } <nl> + V8_INLINE ( static bool CanCastToHeapObject ( StackFrame * o ) ) { return true ; } <nl> } ; <nl> <nl> } / / namespace internal <nl> mmm a / include / v8config . h <nl> ppp b / include / v8config . h <nl> <nl> / / supported <nl> / / V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__ ( ( deprecated ) ) supported <nl> / / V8_HAS_ATTRIBUTE_NOINLINE - __attribute__ ( ( noinline ) ) supported <nl> - / / V8_HAS_ATTRIBUTE_PURE - __attribute__ ( ( pure ) ) supported <nl> / / V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__ ( ( visibility ) ) supported <nl> / / V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__ ( ( warn_unused_result ) ) <nl> / / supported <nl> <nl> # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE ( __has_attribute ( always_inline ) ) <nl> # define V8_HAS_ATTRIBUTE_DEPRECATED ( __has_attribute ( deprecated ) ) <nl> # define V8_HAS_ATTRIBUTE_NOINLINE ( __has_attribute ( noinline ) ) <nl> - # define V8_HAS_ATTRIBUTE_PURE ( __has_attribute ( pure ) ) <nl> # define V8_HAS_ATTRIBUTE_VISIBILITY ( __has_attribute ( visibility ) ) <nl> # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ <nl> ( __has_attribute ( warn_unused_result ) ) <nl> <nl> # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE ( V8_GNUC_PREREQ ( 4 , 4 , 0 ) ) <nl> # define V8_HAS_ATTRIBUTE_DEPRECATED ( V8_GNUC_PREREQ ( 3 , 4 , 0 ) ) <nl> # define V8_HAS_ATTRIBUTE_NOINLINE ( V8_GNUC_PREREQ ( 3 , 4 , 0 ) ) <nl> - # define V8_HAS_ATTRIBUTE_PURE ( V8_GNUC_PREREQ ( 2 , 96 , 0 ) ) <nl> # define V8_HAS_ATTRIBUTE_VISIBILITY ( V8_GNUC_PREREQ ( 4 , 3 , 0 ) ) <nl> # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ <nl> ( ! V8_CC_INTEL & & V8_GNUC_PREREQ ( 4 , 1 , 0 ) ) <nl> <nl> / / Helper macros <nl> <nl> / / A macro used to make better inlining . Don ' t bother for debug builds . <nl> - / / Use like : <nl> - / / V8_INLINE int GetZero ( ) { return 0 ; } <nl> # if ! defined ( DEBUG ) & & V8_HAS_ATTRIBUTE_ALWAYS_INLINE <nl> - # define V8_INLINE inline __attribute__ ( ( always_inline ) ) <nl> + # define V8_INLINE ( declarator ) inline __attribute__ ( ( always_inline ) ) declarator <nl> # elif ! defined ( DEBUG ) & & V8_HAS___FORCEINLINE <nl> - # define V8_INLINE __forceinline <nl> + # define V8_INLINE ( declarator ) __forceinline declarator <nl> # else <nl> - # define V8_INLINE inline <nl> + # define V8_INLINE ( declarator ) inline declarator <nl> # endif <nl> <nl> <nl> / / A macro used to tell the compiler to never inline a particular function . <nl> / / Don ' t bother for debug builds . <nl> - / / Use like : <nl> - / / V8_NOINLINE int GetMinusOne ( ) { return - 1 ; } <nl> # if ! defined ( DEBUG ) & & V8_HAS_ATTRIBUTE_NOINLINE <nl> - # define V8_NOINLINE __attribute__ ( ( noinline ) ) <nl> + # define V8_NOINLINE ( declarator ) __attribute__ ( ( noinline ) ) declarator <nl> # elif ! defined ( DEBUG ) & & V8_HAS_DECLSPEC_NOINLINE <nl> - # define V8_NOINLINE __declspec ( noinline ) <nl> + # define V8_NOINLINE ( declarator ) __declspec ( noinline ) declarator <nl> # else <nl> - # define V8_NOINLINE / * NOT SUPPORTED * / <nl> + # define V8_NOINLINE ( declarator ) declarator <nl> # endif <nl> <nl> <nl> <nl> # endif <nl> <nl> <nl> - / / Many functions have no effects except the return value and their return value <nl> - / / depends only on the parameters and / or global variables . Such a function can <nl> - / / be subject to common subexpression elimination and loop optimization just as <nl> - / / an arithmetic operator would be . These functions should be declared with the <nl> - / / attribute V8_PURE . For example , <nl> - / / <nl> - / / int square ( int ) V8_PURE ; <nl> - / / <nl> - / / says that the hypothetical function square is safe to call fewer times than <nl> - / / the program says . <nl> - / / <nl> - / / Some of common examples of pure functions are strlen or memcmp . Interesting <nl> - / / non - V8_PURE functions are functions with infinite loops or those depending <nl> - / / on volatile memory or other system resource , that may change between two <nl> - / / consecutive calls ( such as feof in a multithreaded environment ) . <nl> - # if V8_HAS_ATTRIBUTE_PURE <nl> - # define V8_PURE __attribute__ ( ( pure ) ) <nl> - # else <nl> - # define V8_PURE / * NOT SUPPORTED * / <nl> - # endif <nl> - <nl> - <nl> / / Annotate a function indicating the caller must examine the return value . <nl> / / Use like : <nl> / / int foo ( ) V8_WARN_UNUSED_RESULT ; <nl> mmm a / src / arm / codegen - arm . cc <nl> ppp b / src / arm / codegen - arm . cc <nl> double fast_exp_simulator ( double x ) { <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> if ( ! FLAG_fast_math ) return & exp ; <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return & exp ; <nl> ExternalReference : : InitializeMathExpData ( ) ; <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> <nl> # if ! defined ( USE_SIMULATOR ) <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> OS : : MemCopyUint8Function CreateMemCopyUint8Function ( <nl> return stub ; <nl> } <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return stub ; <nl> <nl> MacroAssembler masm ( NULL , buffer , static_cast < int > ( actual_size ) ) ; <nl> OS : : MemCopyUint8Function CreateMemCopyUint8Function ( <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < OS : : MemCopyUint8Function > ( buffer ) ; <nl> # endif <nl> } <nl> OS : : MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function ( <nl> return stub ; <nl> } <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return stub ; <nl> <nl> MacroAssembler masm ( NULL , buffer , static_cast < int > ( actual_size ) ) ; <nl> OS : : MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function ( <nl> masm . GetCode ( & desc ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> <nl> return FUNCTION_CAST < OS : : MemCopyUint16Uint8Function > ( buffer ) ; <nl> # endif <nl> mmm a / src / cpu . cc <nl> ppp b / src / cpu . cc <nl> namespace internal { <nl> / / Define __cpuid ( ) for non - MSVC compilers . <nl> # if ! V8_CC_MSVC <nl> <nl> - static V8_INLINE void __cpuid ( int cpu_info [ 4 ] , int info_type ) { <nl> + static V8_INLINE ( void __cpuid ( int cpu_info [ 4 ] , int info_type ) ) { <nl> # if defined ( __i386__ ) & & defined ( __pic__ ) <nl> / / Make sure to preserve ebx , which contains the pointer <nl> / / to the GOT in case we ' re generating PIC . <nl> mmm a / src / deoptimizer . cc <nl> ppp b / src / deoptimizer . cc <nl> namespace internal { <nl> <nl> static MemoryChunk * AllocateCodeChunk ( MemoryAllocator * allocator ) { <nl> return allocator - > AllocateChunk ( Deoptimizer : : GetMaxDeoptTableSize ( ) , <nl> - VirtualMemory : : GetPageSize ( ) , <nl> - VirtualMemory : : EXECUTABLE , <nl> + OS : : CommitPageSize ( ) , <nl> + # if defined ( __native_client__ ) <nl> + / / The Native Client port of V8 uses an interpreter , <nl> + / / so code pages don ' t need PROT_EXEC . <nl> + NOT_EXECUTABLE , <nl> + # else <nl> + EXECUTABLE , <nl> + # endif <nl> NULL ) ; <nl> } <nl> <nl> static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB ; <nl> size_t Deoptimizer : : GetMaxDeoptTableSize ( ) { <nl> int entries_size = <nl> Deoptimizer : : kMaxNumberOfEntries * Deoptimizer : : table_entry_size_ ; <nl> - int commit_page_size = static_cast < int > ( VirtualMemory : : GetPageSize ( ) ) ; <nl> + int commit_page_size = static_cast < int > ( OS : : CommitPageSize ( ) ) ; <nl> int page_count = ( ( kDeoptTableMaxEpilogueCodeSize + entries_size - 1 ) / <nl> commit_page_size ) + 1 ; <nl> return static_cast < size_t > ( commit_page_size * page_count ) ; <nl> mmm a / src / globals . h <nl> ppp b / src / globals . h <nl> F FUNCTION_CAST ( Address addr ) { <nl> DISALLOW_COPY_AND_ASSIGN ( TypeName ) <nl> <nl> <nl> - / / Newly written code should use V8_INLINE and V8_NOINLINE directly . <nl> - # define INLINE ( declarator ) V8_INLINE declarator <nl> - # define NO_INLINE ( declarator ) V8_NOINLINE declarator <nl> + / / Newly written code should use V8_INLINE ( ) and V8_NOINLINE ( ) directly . <nl> + # define INLINE ( declarator ) V8_INLINE ( declarator ) <nl> + # define NO_INLINE ( declarator ) V8_NOINLINE ( declarator ) <nl> <nl> <nl> / / Newly written code should use V8_WARN_UNUSED_RESULT . <nl> mmm a / src / heap - inl . h <nl> ppp b / src / heap - inl . h <nl> MaybeObject * Heap : : AllocateOneByteInternalizedString ( Vector < const uint8_t > str , <nl> / / Allocate string . <nl> Object * result ; <nl> { MaybeObject * maybe_result = ( size > Page : : kMaxNonCodeHeapObjectSize ) <nl> - ? lo_space_ - > AllocateRaw ( size , VirtualMemory : : NOT_EXECUTABLE ) <nl> + ? lo_space_ - > AllocateRaw ( size , NOT_EXECUTABLE ) <nl> : old_data_space_ - > AllocateRaw ( size ) ; <nl> if ( ! maybe_result - > ToObject ( & result ) ) return maybe_result ; <nl> } <nl> MaybeObject * Heap : : AllocateTwoByteInternalizedString ( Vector < const uc16 > str , <nl> / / Allocate string . <nl> Object * result ; <nl> { MaybeObject * maybe_result = ( size > Page : : kMaxNonCodeHeapObjectSize ) <nl> - ? lo_space_ - > AllocateRaw ( size , VirtualMemory : : NOT_EXECUTABLE ) <nl> + ? lo_space_ - > AllocateRaw ( size , NOT_EXECUTABLE ) <nl> : old_data_space_ - > AllocateRaw ( size ) ; <nl> if ( ! maybe_result - > ToObject ( & result ) ) return maybe_result ; <nl> } <nl> MaybeObject * Heap : : AllocateRaw ( int size_in_bytes , <nl> } else if ( CODE_SPACE = = space ) { <nl> result = code_space_ - > AllocateRaw ( size_in_bytes ) ; <nl> } else if ( LO_SPACE = = space ) { <nl> - result = lo_space_ - > AllocateRaw ( <nl> - size_in_bytes , VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + result = lo_space_ - > AllocateRaw ( size_in_bytes , NOT_EXECUTABLE ) ; <nl> } else if ( CELL_SPACE = = space ) { <nl> result = cell_space_ - > AllocateRaw ( size_in_bytes ) ; <nl> } else if ( PROPERTY_CELL_SPACE = = space ) { <nl> mmm a / src / heap . cc <nl> ppp b / src / heap . cc <nl> Heap : : Heap ( ) <nl> max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE ; <nl> # endif <nl> <nl> - intptr_t max_virtual = static_cast < intptr_t > ( VirtualMemory : : GetLimit ( ) ) ; <nl> + intptr_t max_virtual = OS : : MaxVirtualMemory ( ) ; <nl> + <nl> if ( max_virtual > 0 ) { <nl> if ( code_range_size_ > 0 ) { <nl> / / Reserve no more than 1 / 8 of the memory for the code range . <nl> MaybeObject * Heap : : CreateCode ( const CodeDesc & desc , <nl> HeapObject * result ; <nl> bool force_lo_space = obj_size > code_space ( ) - > AreaSize ( ) ; <nl> if ( force_lo_space ) { <nl> - maybe_result = lo_space_ - > AllocateRaw ( obj_size , VirtualMemory : : EXECUTABLE ) ; <nl> + maybe_result = lo_space_ - > AllocateRaw ( obj_size , EXECUTABLE ) ; <nl> } else { <nl> maybe_result = code_space_ - > AllocateRaw ( obj_size ) ; <nl> } <nl> MaybeObject * Heap : : CreateCode ( const CodeDesc & desc , <nl> / / Discard the first code allocation , which was on a page where it could be <nl> / / moved . <nl> CreateFillerObjectAt ( result - > address ( ) , obj_size ) ; <nl> - maybe_result = lo_space_ - > AllocateRaw ( obj_size , VirtualMemory : : EXECUTABLE ) ; <nl> + maybe_result = lo_space_ - > AllocateRaw ( obj_size , EXECUTABLE ) ; <nl> if ( ! maybe_result - > To < HeapObject > ( & result ) ) return maybe_result ; <nl> } <nl> <nl> MaybeObject * Heap : : CopyCode ( Code * code ) { <nl> int obj_size = code - > Size ( ) ; <nl> MaybeObject * maybe_result ; <nl> if ( obj_size > code_space ( ) - > AreaSize ( ) ) { <nl> - maybe_result = lo_space_ - > AllocateRaw ( obj_size , VirtualMemory : : EXECUTABLE ) ; <nl> + maybe_result = lo_space_ - > AllocateRaw ( obj_size , EXECUTABLE ) ; <nl> } else { <nl> maybe_result = code_space_ - > AllocateRaw ( obj_size ) ; <nl> } <nl> MaybeObject * Heap : : CopyCode ( Code * code , Vector < byte > reloc_info ) { <nl> <nl> MaybeObject * maybe_result ; <nl> if ( new_obj_size > code_space ( ) - > AreaSize ( ) ) { <nl> - maybe_result = lo_space_ - > AllocateRaw ( <nl> - new_obj_size , VirtualMemory : : EXECUTABLE ) ; <nl> + maybe_result = lo_space_ - > AllocateRaw ( new_obj_size , EXECUTABLE ) ; <nl> } else { <nl> maybe_result = code_space_ - > AllocateRaw ( new_obj_size ) ; <nl> } <nl> MaybeObject * Heap : : AllocateInternalizedStringImpl ( <nl> / / Allocate string . <nl> Object * result ; <nl> { MaybeObject * maybe_result = ( size > Page : : kMaxNonCodeHeapObjectSize ) <nl> - ? lo_space_ - > AllocateRaw ( size , VirtualMemory : : NOT_EXECUTABLE ) <nl> + ? lo_space_ - > AllocateRaw ( size , NOT_EXECUTABLE ) <nl> : old_data_space_ - > AllocateRaw ( size ) ; <nl> if ( ! maybe_result - > ToObject ( & result ) ) return maybe_result ; <nl> } <nl> MaybeObject * Heap : : AllocateRawFixedArray ( int length ) { <nl> int size = FixedArray : : SizeFor ( length ) ; <nl> return size < = Page : : kMaxNonCodeHeapObjectSize <nl> ? new_space_ . AllocateRaw ( size ) <nl> - : lo_space_ - > AllocateRaw ( size , VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + : lo_space_ - > AllocateRaw ( size , NOT_EXECUTABLE ) ; <nl> } <nl> <nl> <nl> bool Heap : : SetUp ( ) { <nl> new OldSpace ( this , <nl> max_old_generation_size_ , <nl> OLD_POINTER_SPACE , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + NOT_EXECUTABLE ) ; <nl> if ( old_pointer_space_ = = NULL ) return false ; <nl> if ( ! old_pointer_space_ - > SetUp ( ) ) return false ; <nl> <nl> bool Heap : : SetUp ( ) { <nl> new OldSpace ( this , <nl> max_old_generation_size_ , <nl> OLD_DATA_SPACE , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + NOT_EXECUTABLE ) ; <nl> if ( old_data_space_ = = NULL ) return false ; <nl> if ( ! old_data_space_ - > SetUp ( ) ) return false ; <nl> <nl> bool Heap : : SetUp ( ) { <nl> } <nl> } <nl> <nl> - code_space_ = new OldSpace ( <nl> - this , max_old_generation_size_ , CODE_SPACE , VirtualMemory : : EXECUTABLE ) ; <nl> + code_space_ = <nl> + new OldSpace ( this , max_old_generation_size_ , CODE_SPACE , EXECUTABLE ) ; <nl> if ( code_space_ = = NULL ) return false ; <nl> if ( ! code_space_ - > SetUp ( ) ) return false ; <nl> <nl> void Heap : : FreeQueuedChunks ( ) { <nl> MemoryChunk * inner_last = MemoryChunk : : FromAddress ( chunk_end - 1 ) ; <nl> while ( inner < = inner_last ) { <nl> / / Size of a large chunk is always a multiple of <nl> - / / VirtualMemory : : GetAllocationGranularity ( ) so <nl> - / / there is always enough space for a fake <nl> - / / MemoryChunk header . <nl> + / / OS : : AllocateAlignment ( ) so there is always <nl> + / / enough space for a fake MemoryChunk header . <nl> Address area_end = Min ( inner - > address ( ) + Page : : kPageSize , chunk_end ) ; <nl> / / Guard against overflow . <nl> if ( area_end < inner - > address ( ) ) area_end = chunk_end ; <nl> mmm a / src / ia32 / codegen - ia32 . cc <nl> ppp b / src / ia32 / codegen - ia32 . cc <nl> void StubRuntimeCallHelper : : AfterCall ( MacroAssembler * masm ) const { <nl> UnaryMathFunction CreateTranscendentalFunction ( TranscendentalCache : : Type type ) { <nl> size_t actual_size ; <nl> / / Allocate buffer in executable space . <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , <nl> + & actual_size , <nl> + true ) ) ; <nl> if ( buffer = = NULL ) { <nl> / / Fallback to library function if function cannot be created . <nl> switch ( type ) { <nl> UnaryMathFunction CreateTranscendentalFunction ( TranscendentalCache : : Type type ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> } <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> if ( ! CpuFeatures : : IsSupported ( SSE2 ) ) return & exp ; <nl> if ( ! FLAG_fast_math ) return & exp ; <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return & exp ; <nl> ExternalReference : : InitializeMathExpData ( ) ; <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> } <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> UnaryMathFunction CreateSqrtFunction ( ) { <nl> size_t actual_size ; <nl> / / Allocate buffer in executable space . <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , <nl> + & actual_size , <nl> + true ) ) ; <nl> / / If SSE2 is not available , we can use libc ' s implementation to ensure <nl> / / consistency since code by fullcodegen ' s calls into runtime in that case . <nl> if ( buffer = = NULL | | ! CpuFeatures : : IsSupported ( SSE2 ) ) return & sqrt ; <nl> UnaryMathFunction CreateSqrtFunction ( ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> } <nl> <nl> class LabelConverter { <nl> OS : : MemMoveFunction CreateMemMoveFunction ( ) { <nl> size_t actual_size ; <nl> / / Allocate buffer in executable space . <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return NULL ; <nl> MacroAssembler masm ( NULL , buffer , static_cast < int > ( actual_size ) ) ; <nl> LabelConverter conv ( buffer ) ; <nl> OS : : MemMoveFunction CreateMemMoveFunction ( ) { <nl> masm . GetCode ( & desc ) ; <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> / / TODO ( jkummerow ) : It would be nice to register this code creation event <nl> / / with the PROFILE / GDBJIT system . <nl> return FUNCTION_CAST < OS : : MemMoveFunction > ( buffer ) ; <nl> mmm a / src / incremental - marking . cc <nl> ppp b / src / incremental - marking . cc <nl> void IncrementalMarking : : EnsureMarkingDequeIsCommitted ( ) { <nl> bool success = marking_deque_memory_ - > Commit ( <nl> reinterpret_cast < Address > ( marking_deque_memory_ - > address ( ) ) , <nl> marking_deque_memory_ - > size ( ) , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + false ) ; / / Not executable . <nl> CHECK ( success ) ; <nl> marking_deque_memory_committed_ = true ; <nl> } <nl> mmm a / src / mips / codegen - mips . cc <nl> ppp b / src / mips / codegen - mips . cc <nl> double fast_exp_simulator ( double x ) { <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> if ( ! FLAG_fast_math ) return & exp ; <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return & exp ; <nl> ExternalReference : : InitializeMathExpData ( ) ; <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> <nl> # if ! defined ( USE_SIMULATOR ) <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> mmm a / src / platform - cygwin . cc <nl> ppp b / src / platform - cygwin . cc <nl> double OS : : LocalTimeOffset ( ) { <nl> } <nl> <nl> <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) { <nl> + const size_t msize = RoundUp ( requested , sysconf ( _SC_PAGESIZE ) ) ; <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + void * mbase = mmap ( NULL , msize , prot , MAP_PRIVATE | MAP_ANONYMOUS , - 1 , 0 ) ; <nl> + if ( mbase = = MAP_FAILED ) { <nl> + LOG ( Isolate : : Current ( ) , StringEvent ( " OS : : Allocate " , " mmap failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> void OS : : DumpBacktrace ( ) { <nl> / / Currently unsupported . <nl> } <nl> static void * GetRandomAddr ( ) { <nl> / / CpuFeatures : : Probe . We don ' t care about randomization in this case because <nl> / / the code page is immediately freed . <nl> if ( isolate ! = NULL ) { <nl> - / / The address range used to randomize RWX allocations in <nl> - / / VirtualMemory : : AllocateRegion ( ) . <nl> + / / The address range used to randomize RWX allocations in OS : : Allocate <nl> / / Try not to map pages into the default range that windows loads DLLs <nl> / / Use a multiple of 64k to prevent committing unused memory . <nl> / / Note : This does not guarantee RWX regions will be within the <nl> static void * GetRandomAddr ( ) { <nl> return NULL ; <nl> } <nl> <nl> + <nl> + static void * RandomizedVirtualAlloc ( size_t size , int action , int protection ) { <nl> + LPVOID base = NULL ; <nl> + <nl> + if ( protection = = PAGE_EXECUTE_READWRITE | | protection = = PAGE_NOACCESS ) { <nl> + / / For exectutable pages try and randomize the allocation address <nl> + for ( size_t attempts = 0 ; base = = NULL & & attempts < 3 ; + + attempts ) { <nl> + base = VirtualAlloc ( GetRandomAddr ( ) , size , action , protection ) ; <nl> + } <nl> + } <nl> + <nl> + / / After three attempts give up and let the OS find an address to use . <nl> + if ( base = = NULL ) base = VirtualAlloc ( NULL , size , action , protection ) ; <nl> + <nl> + return base ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * address = ReserveRegion ( request_size ) ; <nl> + if ( address = = NULL ) return ; <nl> + Address base = RoundUp ( static_cast < Address > ( address ) , alignment ) ; <nl> + / / Try reducing the size by freeing and then reallocating a specific area . <nl> + bool result = ReleaseRegion ( address , request_size ) ; <nl> + USE ( result ) ; <nl> + ASSERT ( result ) ; <nl> + address = VirtualAlloc ( base , size , MEM_RESERVE , PAGE_NOACCESS ) ; <nl> + if ( address ! = NULL ) { <nl> + request_size = size ; <nl> + ASSERT ( base = = static_cast < Address > ( address ) ) ; <nl> + } else { <nl> + / / Resizing failed , just go with a bigger area . <nl> + address = ReserveRegion ( request_size ) ; <nl> + if ( address = = NULL ) return ; <nl> + } <nl> + address_ = address ; <nl> + size_ = request_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address_ , size_ ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + ASSERT ( IsReserved ( ) ) ; <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + return RandomizedVirtualAlloc ( size , MEM_RESERVE , PAGE_NOACCESS ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * base , size_t size , bool is_executable ) { <nl> + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE ; <nl> + if ( NULL = = VirtualAlloc ( base , size , MEM_COMMIT , prot ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + if ( NULL = = VirtualAlloc ( address , <nl> + OS : : CommitPageSize ( ) , <nl> + MEM_COMMIT , <nl> + PAGE_NOACCESS ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * base , size_t size ) { <nl> + return VirtualFree ( base , size , MEM_DECOMMIT ) ! = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * base , size_t size ) { <nl> + return VirtualFree ( base , 0 , MEM_RELEASE ) ! = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + / / TODO ( alph ) : implement for the platform . <nl> + return false ; <nl> + } <nl> + <nl> } } / / namespace v8 : : internal <nl> mmm a / src / platform - freebsd . cc <nl> ppp b / src / platform - freebsd . cc <nl> double OS : : LocalTimeOffset ( ) { <nl> } <nl> <nl> <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool executable ) { <nl> + const size_t msize = RoundUp ( requested , getpagesize ( ) ) ; <nl> + int prot = PROT_READ | PROT_WRITE | ( executable ? PROT_EXEC : 0 ) ; <nl> + void * mbase = mmap ( NULL , msize , prot , MAP_PRIVATE | MAP_ANON , - 1 , 0 ) ; <nl> + <nl> + if ( mbase = = MAP_FAILED ) { <nl> + LOG ( Isolate : : Current ( ) , StringEvent ( " OS : : Allocate " , " mmap failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> void OS : : DumpBacktrace ( ) { <nl> POSIXBacktraceHelper < backtrace , backtrace_symbols > : : DumpBacktrace ( ) ; <nl> } <nl> int OS : : StackWalk ( Vector < OS : : StackFrame > frames ) { <nl> return POSIXBacktraceHelper < backtrace , backtrace_symbols > : : StackWalk ( frames ) ; <nl> } <nl> <nl> + <nl> + / / Constants used for mmap . <nl> + static const int kMmapFd = - 1 ; <nl> + static const int kMmapFdOffset = 0 ; <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * reservation = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + request_size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + if ( reservation = = MAP_FAILED ) return ; <nl> + <nl> + Address base = static_cast < Address > ( reservation ) ; <nl> + Address aligned_base = RoundUp ( base , alignment ) ; <nl> + ASSERT_LE ( base , aligned_base ) ; <nl> + <nl> + / / Unmap extra memory reserved before and after the desired block . <nl> + if ( aligned_base ! = base ) { <nl> + size_t prefix_size = static_cast < size_t > ( aligned_base - base ) ; <nl> + OS : : Free ( base , prefix_size ) ; <nl> + request_size - = prefix_size ; <nl> + } <nl> + <nl> + size_t aligned_size = RoundUp ( size , OS : : AllocateAlignment ( ) ) ; <nl> + ASSERT_LE ( aligned_size , request_size ) ; <nl> + <nl> + if ( aligned_size ! = request_size ) { <nl> + size_t suffix_size = request_size - aligned_size ; <nl> + OS : : Free ( aligned_base + aligned_size , suffix_size ) ; <nl> + request_size - = suffix_size ; <nl> + } <nl> + <nl> + ASSERT ( aligned_size = = request_size ) ; <nl> + <nl> + address_ = static_cast < void * > ( aligned_base ) ; <nl> + size_ = aligned_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address ( ) , size ( ) ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + OS : : Guard ( address , OS : : CommitPageSize ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + void * result = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + <nl> + if ( result = = MAP_FAILED ) return NULL ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * base , size_t size , bool is_executable ) { <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + if ( MAP_FAILED = = mmap ( base , <nl> + size , <nl> + prot , <nl> + MAP_PRIVATE | MAP_ANON | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * base , size_t size ) { <nl> + return mmap ( base , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ! = MAP_FAILED ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * base , size_t size ) { <nl> + return munmap ( base , size ) = = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + / / TODO ( alph ) : implement for the platform . <nl> + return false ; <nl> + } <nl> + <nl> } } / / namespace v8 : : internal <nl> mmm a / src / platform - linux . cc <nl> ppp b / src / platform - linux . cc <nl> double OS : : LocalTimeOffset ( ) { <nl> } <nl> <nl> <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) { <nl> + const size_t msize = RoundUp ( requested , AllocateAlignment ( ) ) ; <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + void * addr = OS : : GetRandomMmapAddr ( ) ; <nl> + void * mbase = mmap ( addr , msize , prot , MAP_PRIVATE | MAP_ANONYMOUS , - 1 , 0 ) ; <nl> + if ( mbase = = MAP_FAILED ) { <nl> + LOG ( i : : Isolate : : Current ( ) , <nl> + StringEvent ( " OS : : Allocate " , " mmap failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> void OS : : DumpBacktrace ( ) { <nl> / / backtrace is a glibc extension . <nl> # if defined ( __GLIBC__ ) & & ! defined ( __UCLIBC__ ) <nl> OS : : MemoryMappedFile * OS : : MemoryMappedFile : : open ( const char * name ) { <nl> int size = ftell ( file ) ; <nl> <nl> void * memory = <nl> - mmap ( NULL , <nl> + mmap ( OS : : GetRandomMmapAddr ( ) , <nl> size , <nl> PROT_READ | PROT_WRITE , <nl> MAP_SHARED , <nl> fileno ( file ) , <nl> 0 ) ; <nl> - if ( memory = = MAP_FAILED ) { <nl> - fclose ( file ) ; <nl> - return NULL ; <nl> - } <nl> return new PosixMemoryMappedFile ( file , memory , size ) ; <nl> } <nl> <nl> OS : : MemoryMappedFile * OS : : MemoryMappedFile : : create ( const char * name , int size , <nl> return NULL ; <nl> } <nl> void * memory = <nl> - mmap ( NULL , <nl> + mmap ( OS : : GetRandomMmapAddr ( ) , <nl> size , <nl> PROT_READ | PROT_WRITE , <nl> MAP_SHARED , <nl> fileno ( file ) , <nl> 0 ) ; <nl> - if ( memory = = MAP_FAILED ) { <nl> - fclose ( file ) ; <nl> - return NULL ; <nl> - } <nl> return new PosixMemoryMappedFile ( file , memory , size ) ; <nl> } <nl> <nl> <nl> PosixMemoryMappedFile : : ~ PosixMemoryMappedFile ( ) { <nl> - int result = munmap ( memory_ , size_ ) ; <nl> - ASSERT_EQ ( 0 , result ) ; <nl> - USE ( result ) ; <nl> + if ( memory_ ) OS : : Free ( memory_ , size_ ) ; <nl> fclose ( file_ ) ; <nl> } <nl> <nl> void OS : : SignalCodeMovingGC ( ) { <nl> OS : : PrintError ( " Failed to open % s \ n " , FLAG_gc_fake_mmap ) ; <nl> OS : : Abort ( ) ; <nl> } <nl> - void * addr = mmap ( NULL , <nl> + void * addr = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> size , <nl> # if defined ( __native_client__ ) <nl> / / The Native Client port of V8 uses an interpreter , <nl> void OS : : SignalCodeMovingGC ( ) { <nl> fileno ( f ) , <nl> 0 ) ; <nl> ASSERT ( addr ! = MAP_FAILED ) ; <nl> - int result = munmap ( addr , size ) ; <nl> - ASSERT_EQ ( 0 , result ) ; <nl> - USE ( result ) ; <nl> + OS : : Free ( addr , size ) ; <nl> fclose ( f ) ; <nl> } <nl> <nl> int OS : : StackWalk ( Vector < OS : : StackFrame > frames ) { <nl> # endif <nl> } <nl> <nl> + <nl> + / / Constants used for mmap . <nl> + static const int kMmapFd = - 1 ; <nl> + static const int kMmapFdOffset = 0 ; <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * reservation = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + request_size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + if ( reservation = = MAP_FAILED ) return ; <nl> + <nl> + Address base = static_cast < Address > ( reservation ) ; <nl> + Address aligned_base = RoundUp ( base , alignment ) ; <nl> + ASSERT_LE ( base , aligned_base ) ; <nl> + <nl> + / / Unmap extra memory reserved before and after the desired block . <nl> + if ( aligned_base ! = base ) { <nl> + size_t prefix_size = static_cast < size_t > ( aligned_base - base ) ; <nl> + OS : : Free ( base , prefix_size ) ; <nl> + request_size - = prefix_size ; <nl> + } <nl> + <nl> + size_t aligned_size = RoundUp ( size , OS : : AllocateAlignment ( ) ) ; <nl> + ASSERT_LE ( aligned_size , request_size ) ; <nl> + <nl> + if ( aligned_size ! = request_size ) { <nl> + size_t suffix_size = request_size - aligned_size ; <nl> + OS : : Free ( aligned_base + aligned_size , suffix_size ) ; <nl> + request_size - = suffix_size ; <nl> + } <nl> + <nl> + ASSERT ( aligned_size = = request_size ) ; <nl> + <nl> + address_ = static_cast < void * > ( aligned_base ) ; <nl> + size_ = aligned_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address ( ) , size ( ) ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + OS : : Guard ( address , OS : : CommitPageSize ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + void * result = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + <nl> + if ( result = = MAP_FAILED ) return NULL ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * base , size_t size , bool is_executable ) { <nl> + # if defined ( __native_client__ ) <nl> + / / The Native Client port of V8 uses an interpreter , <nl> + / / so code pages don ' t need PROT_EXEC . <nl> + int prot = PROT_READ | PROT_WRITE ; <nl> + # else <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + # endif <nl> + if ( MAP_FAILED = = mmap ( base , <nl> + size , <nl> + prot , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * base , size_t size ) { <nl> + return mmap ( base , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ! = MAP_FAILED ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * base , size_t size ) { <nl> + return munmap ( base , size ) = = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + return true ; <nl> + } <nl> + <nl> } } / / namespace v8 : : internal <nl> mmm a / src / platform - macos . cc <nl> ppp b / src / platform - macos . cc <nl> namespace v8 { <nl> namespace internal { <nl> <nl> <nl> + / / Constants used for mmap . <nl> + / / kMmapFd is used to pass vm_alloc flags to tag the region with the user <nl> + / / defined tag 255 This helps identify V8 - allocated regions in memory analysis <nl> + / / tools like vmmap ( 1 ) . <nl> + static const int kMmapFd = VM_MAKE_TAG ( 255 ) ; <nl> + static const off_t kMmapFdOffset = 0 ; <nl> + <nl> + <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) { <nl> + const size_t msize = RoundUp ( requested , getpagesize ( ) ) ; <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + void * mbase = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + msize , <nl> + prot , <nl> + MAP_PRIVATE | MAP_ANON , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + if ( mbase = = MAP_FAILED ) { <nl> + LOG ( Isolate : : Current ( ) , StringEvent ( " OS : : Allocate " , " mmap failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> void OS : : DumpBacktrace ( ) { <nl> / / If weak link to execinfo lib has failed , ie because we are on 10 . 4 , abort . <nl> if ( backtrace = = NULL ) return ; <nl> OS : : MemoryMappedFile * OS : : MemoryMappedFile : : open ( const char * name ) { <nl> int size = ftell ( file ) ; <nl> <nl> void * memory = <nl> - mmap ( NULL , <nl> + mmap ( OS : : GetRandomMmapAddr ( ) , <nl> size , <nl> PROT_READ | PROT_WRITE , <nl> MAP_SHARED , <nl> OS : : MemoryMappedFile * OS : : MemoryMappedFile : : create ( const char * name , int size , <nl> return NULL ; <nl> } <nl> void * memory = <nl> - mmap ( NULL , <nl> + mmap ( OS : : GetRandomMmapAddr ( ) , <nl> size , <nl> PROT_READ | PROT_WRITE , <nl> MAP_SHARED , <nl> OS : : MemoryMappedFile * OS : : MemoryMappedFile : : create ( const char * name , int size , <nl> <nl> <nl> PosixMemoryMappedFile : : ~ PosixMemoryMappedFile ( ) { <nl> - if ( memory_ ) munmap ( memory_ , size_ ) ; <nl> + if ( memory_ ) OS : : Free ( memory_ , size_ ) ; <nl> fclose ( file_ ) ; <nl> } <nl> <nl> int OS : : StackWalk ( Vector < StackFrame > frames ) { <nl> return POSIXBacktraceHelper < backtrace , backtrace_symbols > : : StackWalk ( frames ) ; <nl> } <nl> <nl> + <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * reservation = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + request_size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + if ( reservation = = MAP_FAILED ) return ; <nl> + <nl> + Address base = static_cast < Address > ( reservation ) ; <nl> + Address aligned_base = RoundUp ( base , alignment ) ; <nl> + ASSERT_LE ( base , aligned_base ) ; <nl> + <nl> + / / Unmap extra memory reserved before and after the desired block . <nl> + if ( aligned_base ! = base ) { <nl> + size_t prefix_size = static_cast < size_t > ( aligned_base - base ) ; <nl> + OS : : Free ( base , prefix_size ) ; <nl> + request_size - = prefix_size ; <nl> + } <nl> + <nl> + size_t aligned_size = RoundUp ( size , OS : : AllocateAlignment ( ) ) ; <nl> + ASSERT_LE ( aligned_size , request_size ) ; <nl> + <nl> + if ( aligned_size ! = request_size ) { <nl> + size_t suffix_size = request_size - aligned_size ; <nl> + OS : : Free ( aligned_base + aligned_size , suffix_size ) ; <nl> + request_size - = suffix_size ; <nl> + } <nl> + <nl> + ASSERT ( aligned_size = = request_size ) ; <nl> + <nl> + address_ = static_cast < void * > ( aligned_base ) ; <nl> + size_ = aligned_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address ( ) , size ( ) ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + OS : : Guard ( address , OS : : CommitPageSize ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + void * result = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + <nl> + if ( result = = MAP_FAILED ) return NULL ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * address , <nl> + size_t size , <nl> + bool is_executable ) { <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + if ( MAP_FAILED = = mmap ( address , <nl> + size , <nl> + prot , <nl> + MAP_PRIVATE | MAP_ANON | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * address , size_t size ) { <nl> + return mmap ( address , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ! = MAP_FAILED ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * address , size_t size ) { <nl> + return munmap ( address , size ) = = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + return false ; <nl> + } <nl> + <nl> } } / / namespace v8 : : internal <nl> mmm a / src / platform - openbsd . cc <nl> ppp b / src / platform - openbsd . cc <nl> double OS : : LocalTimeOffset ( ) { <nl> } <nl> <nl> <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) { <nl> + const size_t msize = RoundUp ( requested , AllocateAlignment ( ) ) ; <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + void * addr = OS : : GetRandomMmapAddr ( ) ; <nl> + void * mbase = mmap ( addr , msize , prot , MAP_PRIVATE | MAP_ANON , - 1 , 0 ) ; <nl> + if ( mbase = = MAP_FAILED ) { <nl> + LOG ( i : : Isolate : : Current ( ) , <nl> + StringEvent ( " OS : : Allocate " , " mmap failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> void OS : : DumpBacktrace ( ) { <nl> / / Currently unsupported . <nl> } <nl> int OS : : StackWalk ( Vector < OS : : StackFrame > frames ) { <nl> return frames_count ; <nl> } <nl> <nl> + <nl> + / / Constants used for mmap . <nl> + static const int kMmapFd = - 1 ; <nl> + static const int kMmapFdOffset = 0 ; <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * reservation = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + request_size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + if ( reservation = = MAP_FAILED ) return ; <nl> + <nl> + Address base = static_cast < Address > ( reservation ) ; <nl> + Address aligned_base = RoundUp ( base , alignment ) ; <nl> + ASSERT_LE ( base , aligned_base ) ; <nl> + <nl> + / / Unmap extra memory reserved before and after the desired block . <nl> + if ( aligned_base ! = base ) { <nl> + size_t prefix_size = static_cast < size_t > ( aligned_base - base ) ; <nl> + OS : : Free ( base , prefix_size ) ; <nl> + request_size - = prefix_size ; <nl> + } <nl> + <nl> + size_t aligned_size = RoundUp ( size , OS : : AllocateAlignment ( ) ) ; <nl> + ASSERT_LE ( aligned_size , request_size ) ; <nl> + <nl> + if ( aligned_size ! = request_size ) { <nl> + size_t suffix_size = request_size - aligned_size ; <nl> + OS : : Free ( aligned_base + aligned_size , suffix_size ) ; <nl> + request_size - = suffix_size ; <nl> + } <nl> + <nl> + ASSERT ( aligned_size = = request_size ) ; <nl> + <nl> + address_ = static_cast < void * > ( aligned_base ) ; <nl> + size_ = aligned_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address ( ) , size ( ) ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + OS : : Guard ( address , OS : : CommitPageSize ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + void * result = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + <nl> + if ( result = = MAP_FAILED ) return NULL ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * base , size_t size , bool is_executable ) { <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + if ( MAP_FAILED = = mmap ( base , <nl> + size , <nl> + prot , <nl> + MAP_PRIVATE | MAP_ANON | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * base , size_t size ) { <nl> + return mmap ( base , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ! = MAP_FAILED ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * base , size_t size ) { <nl> + return munmap ( base , size ) = = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + / / TODO ( alph ) : implement for the platform . <nl> + return false ; <nl> + } <nl> + <nl> } } / / namespace v8 : : internal <nl> mmm a / src / platform - posix . cc <nl> ppp b / src / platform - posix . cc <nl> uint64_t OS : : CpuFeaturesImpliedByPlatform ( ) { <nl> } <nl> <nl> <nl> + / / Maximum size of the virtual memory . 0 means there is no artificial <nl> + / / limit . <nl> + <nl> + intptr_t OS : : MaxVirtualMemory ( ) { <nl> + struct rlimit limit ; <nl> + int result = getrlimit ( RLIMIT_DATA , & limit ) ; <nl> + if ( result ! = 0 ) return 0 ; <nl> + return limit . rlim_cur ; <nl> + } <nl> + <nl> + <nl> int OS : : ActivationFrameAlignment ( ) { <nl> # if V8_TARGET_ARCH_ARM <nl> / / On EABI ARM targets this is required for fp correctness in the <nl> int OS : : ActivationFrameAlignment ( ) { <nl> } <nl> <nl> <nl> + intptr_t OS : : CommitPageSize ( ) { <nl> + static intptr_t page_size = getpagesize ( ) ; <nl> + return page_size ; <nl> + } <nl> + <nl> + <nl> + void OS : : Free ( void * address , const size_t size ) { <nl> + / / TODO ( 1240712 ) : munmap has a return value which is ignored here . <nl> + int result = munmap ( address , size ) ; <nl> + USE ( result ) ; <nl> + ASSERT ( result = = 0 ) ; <nl> + } <nl> + <nl> + <nl> + / / Get rid of writable permission on code allocations . <nl> + void OS : : ProtectCode ( void * address , const size_t size ) { <nl> + # if defined ( __CYGWIN__ ) <nl> + DWORD old_protect ; <nl> + VirtualProtect ( address , size , PAGE_EXECUTE_READ , & old_protect ) ; <nl> + # elif defined ( __native_client__ ) <nl> + / / The Native Client port of V8 uses an interpreter , so <nl> + / / code pages don ' t need PROT_EXEC . <nl> + mprotect ( address , size , PROT_READ ) ; <nl> + # else <nl> + mprotect ( address , size , PROT_READ | PROT_EXEC ) ; <nl> + # endif <nl> + } <nl> + <nl> + <nl> + / / Create guard pages . <nl> + void OS : : Guard ( void * address , const size_t size ) { <nl> + # if defined ( __CYGWIN__ ) <nl> + DWORD oldprotect ; <nl> + VirtualProtect ( address , size , PAGE_NOACCESS , & oldprotect ) ; <nl> + # else <nl> + mprotect ( address , size , PROT_NONE ) ; <nl> + # endif <nl> + } <nl> + <nl> + <nl> + void * OS : : GetRandomMmapAddr ( ) { <nl> + # if defined ( __native_client__ ) <nl> + / / TODO ( bradchen ) : restore randomization once Native Client gets <nl> + / / smarter about using mmap address hints . <nl> + / / See http : / / code . google . com / p / nativeclient / issues / 3341 <nl> + return NULL ; <nl> + # endif <nl> + Isolate * isolate = Isolate : : UncheckedCurrent ( ) ; <nl> + / / Note that the current isolate isn ' t set up in a call path via <nl> + / / CpuFeatures : : Probe . We don ' t care about randomization in this case because <nl> + / / the code page is immediately freed . <nl> + if ( isolate ! = NULL ) { <nl> + uintptr_t raw_addr ; <nl> + isolate - > random_number_generator ( ) - > NextBytes ( & raw_addr , sizeof ( raw_addr ) ) ; <nl> + # if V8_TARGET_ARCH_X64 <nl> + / / Currently available CPUs have 48 bits of virtual addressing . Truncate <nl> + / / the hint address to 46 bits to give the kernel a fighting chance of <nl> + / / fulfilling our placement request . <nl> + raw_addr & = V8_UINT64_C ( 0x3ffffffff000 ) ; <nl> + # else <nl> + raw_addr & = 0x3ffff000 ; <nl> + <nl> + # ifdef __sun <nl> + / / For our Solaris / illumos mmap hint , we pick a random address in the bottom <nl> + / / half of the top half of the address space ( that is , the third quarter ) . <nl> + / / Because we do not MAP_FIXED , this will be treated only as a hint - - the <nl> + / / system will not fail to mmap ( ) because something else happens to already <nl> + / / be mapped at our random address . We deliberately set the hint high enough <nl> + / / to get well above the system ' s break ( that is , the heap ) ; Solaris and <nl> + / / illumos will try the hint and if that fails allocate as if there were <nl> + / / no hint at all . The high hint prevents the break from getting hemmed in <nl> + / / at low values , ceding half of the address space to the system heap . <nl> + raw_addr + = 0x80000000 ; <nl> + # else <nl> + / / The range 0x20000000 - 0x60000000 is relatively unpopulated across a <nl> + / / variety of ASLR modes ( PAE kernel , NX compat mode , etc ) and on macos <nl> + / / 10 . 6 and 10 . 7 . <nl> + raw_addr + = 0x20000000 ; <nl> + # endif <nl> + # endif <nl> + return reinterpret_cast < void * > ( raw_addr ) ; <nl> + } <nl> + return NULL ; <nl> + } <nl> + <nl> + <nl> + size_t OS : : AllocateAlignment ( ) { <nl> + return getpagesize ( ) ; <nl> + } <nl> + <nl> + <nl> void OS : : Sleep ( int milliseconds ) { <nl> useconds_t ms = static_cast < useconds_t > ( milliseconds ) ; <nl> usleep ( 1000 * ms ) ; <nl> mmm a / src / platform - solaris . cc <nl> ppp b / src / platform - solaris . cc <nl> double OS : : LocalTimeOffset ( ) { <nl> } <nl> <nl> <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) { <nl> + const size_t msize = RoundUp ( requested , getpagesize ( ) ) ; <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + void * mbase = mmap ( NULL , msize , prot , MAP_PRIVATE | MAP_ANON , - 1 , 0 ) ; <nl> + <nl> + if ( mbase = = MAP_FAILED ) { <nl> + LOG ( Isolate : : Current ( ) , StringEvent ( " OS : : Allocate " , " mmap failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> void OS : : DumpBacktrace ( ) { <nl> / / Currently unsupported . <nl> } <nl> int OS : : StackWalk ( Vector < OS : : StackFrame > frames ) { <nl> return walker . index ; <nl> } <nl> <nl> + <nl> + / / Constants used for mmap . <nl> + static const int kMmapFd = - 1 ; <nl> + static const int kMmapFdOffset = 0 ; <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * reservation = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + request_size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + if ( reservation = = MAP_FAILED ) return ; <nl> + <nl> + Address base = static_cast < Address > ( reservation ) ; <nl> + Address aligned_base = RoundUp ( base , alignment ) ; <nl> + ASSERT_LE ( base , aligned_base ) ; <nl> + <nl> + / / Unmap extra memory reserved before and after the desired block . <nl> + if ( aligned_base ! = base ) { <nl> + size_t prefix_size = static_cast < size_t > ( aligned_base - base ) ; <nl> + OS : : Free ( base , prefix_size ) ; <nl> + request_size - = prefix_size ; <nl> + } <nl> + <nl> + size_t aligned_size = RoundUp ( size , OS : : AllocateAlignment ( ) ) ; <nl> + ASSERT_LE ( aligned_size , request_size ) ; <nl> + <nl> + if ( aligned_size ! = request_size ) { <nl> + size_t suffix_size = request_size - aligned_size ; <nl> + OS : : Free ( aligned_base + aligned_size , suffix_size ) ; <nl> + request_size - = suffix_size ; <nl> + } <nl> + <nl> + ASSERT ( aligned_size = = request_size ) ; <nl> + <nl> + address_ = static_cast < void * > ( aligned_base ) ; <nl> + size_ = aligned_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address ( ) , size ( ) ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + OS : : Guard ( address , OS : : CommitPageSize ( ) ) ; <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + void * result = mmap ( OS : : GetRandomMmapAddr ( ) , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ; <nl> + <nl> + if ( result = = MAP_FAILED ) return NULL ; <nl> + <nl> + return result ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * base , size_t size , bool is_executable ) { <nl> + int prot = PROT_READ | PROT_WRITE | ( is_executable ? PROT_EXEC : 0 ) ; <nl> + if ( MAP_FAILED = = mmap ( base , <nl> + size , <nl> + prot , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * base , size_t size ) { <nl> + return mmap ( base , <nl> + size , <nl> + PROT_NONE , <nl> + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED , <nl> + kMmapFd , <nl> + kMmapFdOffset ) ! = MAP_FAILED ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * base , size_t size ) { <nl> + return munmap ( base , size ) = = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + / / TODO ( alph ) : implement for the platform . <nl> + return false ; <nl> + } <nl> + <nl> } } / / namespace v8 : : internal <nl> mmm a / src / platform - win32 . cc <nl> ppp b / src / platform - win32 . cc <nl> int strncasecmp ( const char * s1 , const char * s2 , int n ) { <nl> # define _TRUNCATE 0 <nl> # define STRUNCATE 80 <nl> <nl> + inline void MemoryBarrier ( ) { <nl> + int barrier = 0 ; <nl> + __asm__ __volatile__ ( " xchgl % % eax , % 0 " : " = r " ( barrier ) ) ; <nl> + } <nl> + <nl> # endif / / __MINGW64_VERSION_MAJOR <nl> <nl> <nl> int strncpy_s ( char * dest , size_t dest_size , const char * source , size_t count ) { <nl> namespace v8 { <nl> namespace internal { <nl> <nl> + intptr_t OS : : MaxVirtualMemory ( ) { <nl> + return 0 ; <nl> + } <nl> + <nl> + <nl> double ceiling ( double x ) { <nl> return ceil ( x ) ; <nl> } <nl> void OS : : StrNCpy ( Vector < char > dest , const char * src , size_t n ) { <nl> # undef STRUNCATE <nl> <nl> <nl> + / / Get the system ' s page size used by VirtualAlloc ( ) or the next power <nl> + / / of two . The reason for always returning a power of two is that the <nl> + / / rounding up in OS : : Allocate expects that . <nl> + static size_t GetPageSize ( ) { <nl> + static size_t page_size = 0 ; <nl> + if ( page_size = = 0 ) { <nl> + SYSTEM_INFO info ; <nl> + GetSystemInfo ( & info ) ; <nl> + page_size = RoundUpToPowerOf2 ( info . dwPageSize ) ; <nl> + } <nl> + return page_size ; <nl> + } <nl> + <nl> + <nl> + / / The allocation alignment is the guaranteed alignment for <nl> + / / VirtualAlloc ' ed blocks of memory . <nl> + size_t OS : : AllocateAlignment ( ) { <nl> + static size_t allocate_alignment = 0 ; <nl> + if ( allocate_alignment = = 0 ) { <nl> + SYSTEM_INFO info ; <nl> + GetSystemInfo ( & info ) ; <nl> + allocate_alignment = info . dwAllocationGranularity ; <nl> + } <nl> + return allocate_alignment ; <nl> + } <nl> + <nl> + <nl> + void * OS : : GetRandomMmapAddr ( ) { <nl> + Isolate * isolate = Isolate : : UncheckedCurrent ( ) ; <nl> + / / Note that the current isolate isn ' t set up in a call path via <nl> + / / CpuFeatures : : Probe . We don ' t care about randomization in this case because <nl> + / / the code page is immediately freed . <nl> + if ( isolate ! = NULL ) { <nl> + / / The address range used to randomize RWX allocations in OS : : Allocate <nl> + / / Try not to map pages into the default range that windows loads DLLs <nl> + / / Use a multiple of 64k to prevent committing unused memory . <nl> + / / Note : This does not guarantee RWX regions will be within the <nl> + / / range kAllocationRandomAddressMin to kAllocationRandomAddressMax <nl> + # ifdef V8_HOST_ARCH_64_BIT <nl> + static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000 ; <nl> + static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000 ; <nl> + # else <nl> + static const intptr_t kAllocationRandomAddressMin = 0x04000000 ; <nl> + static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000 ; <nl> + # endif <nl> + uintptr_t address = <nl> + ( isolate - > random_number_generator ( ) - > NextInt ( ) < < kPageSizeBits ) | <nl> + kAllocationRandomAddressMin ; <nl> + address & = kAllocationRandomAddressMax ; <nl> + return reinterpret_cast < void * > ( address ) ; <nl> + } <nl> + return NULL ; <nl> + } <nl> + <nl> + <nl> + static void * RandomizedVirtualAlloc ( size_t size , int action , int protection ) { <nl> + LPVOID base = NULL ; <nl> + <nl> + if ( protection = = PAGE_EXECUTE_READWRITE | | protection = = PAGE_NOACCESS ) { <nl> + / / For exectutable pages try and randomize the allocation address <nl> + for ( size_t attempts = 0 ; base = = NULL & & attempts < 3 ; + + attempts ) { <nl> + base = VirtualAlloc ( OS : : GetRandomMmapAddr ( ) , size , action , protection ) ; <nl> + } <nl> + } <nl> + <nl> + / / After three attempts give up and let the OS find an address to use . <nl> + if ( base = = NULL ) base = VirtualAlloc ( NULL , size , action , protection ) ; <nl> + <nl> + return base ; <nl> + } <nl> + <nl> + <nl> + void * OS : : Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) { <nl> + / / VirtualAlloc rounds allocated size to page size automatically . <nl> + size_t msize = RoundUp ( requested , static_cast < int > ( GetPageSize ( ) ) ) ; <nl> + <nl> + / / Windows XP SP2 allows Data Excution Prevention ( DEP ) . <nl> + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE ; <nl> + <nl> + LPVOID mbase = RandomizedVirtualAlloc ( msize , <nl> + MEM_COMMIT | MEM_RESERVE , <nl> + prot ) ; <nl> + <nl> + if ( mbase = = NULL ) { <nl> + LOG ( Isolate : : Current ( ) , StringEvent ( " OS : : Allocate " , " VirtualAlloc failed " ) ) ; <nl> + return NULL ; <nl> + } <nl> + <nl> + ASSERT ( IsAligned ( reinterpret_cast < size_t > ( mbase ) , OS : : AllocateAlignment ( ) ) ) ; <nl> + <nl> + * allocated = msize ; <nl> + return mbase ; <nl> + } <nl> + <nl> + <nl> + void OS : : Free ( void * address , const size_t size ) { <nl> + / / TODO ( 1240712 ) : VirtualFree has a return value which is ignored here . <nl> + VirtualFree ( address , 0 , MEM_RELEASE ) ; <nl> + USE ( size ) ; <nl> + } <nl> + <nl> + <nl> + intptr_t OS : : CommitPageSize ( ) { <nl> + return 4096 ; <nl> + } <nl> + <nl> + <nl> + void OS : : ProtectCode ( void * address , const size_t size ) { <nl> + DWORD old_protect ; <nl> + VirtualProtect ( address , size , PAGE_EXECUTE_READ , & old_protect ) ; <nl> + } <nl> + <nl> + <nl> + void OS : : Guard ( void * address , const size_t size ) { <nl> + DWORD oldprotect ; <nl> + VirtualProtect ( address , size , PAGE_NOACCESS , & oldprotect ) ; <nl> + } <nl> + <nl> + <nl> void OS : : Sleep ( int milliseconds ) { <nl> : : Sleep ( milliseconds ) ; <nl> } <nl> int OS : : ActivationFrameAlignment ( ) { <nl> } <nl> <nl> <nl> + VirtualMemory : : VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size ) <nl> + : address_ ( ReserveRegion ( size ) ) , size_ ( size ) { } <nl> + <nl> + <nl> + VirtualMemory : : VirtualMemory ( size_t size , size_t alignment ) <nl> + : address_ ( NULL ) , size_ ( 0 ) { <nl> + ASSERT ( IsAligned ( alignment , static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ) ; <nl> + size_t request_size = RoundUp ( size + alignment , <nl> + static_cast < intptr_t > ( OS : : AllocateAlignment ( ) ) ) ; <nl> + void * address = ReserveRegion ( request_size ) ; <nl> + if ( address = = NULL ) return ; <nl> + Address base = RoundUp ( static_cast < Address > ( address ) , alignment ) ; <nl> + / / Try reducing the size by freeing and then reallocating a specific area . <nl> + bool result = ReleaseRegion ( address , request_size ) ; <nl> + USE ( result ) ; <nl> + ASSERT ( result ) ; <nl> + address = VirtualAlloc ( base , size , MEM_RESERVE , PAGE_NOACCESS ) ; <nl> + if ( address ! = NULL ) { <nl> + request_size = size ; <nl> + ASSERT ( base = = static_cast < Address > ( address ) ) ; <nl> + } else { <nl> + / / Resizing failed , just go with a bigger area . <nl> + address = ReserveRegion ( request_size ) ; <nl> + if ( address = = NULL ) return ; <nl> + } <nl> + address_ = address ; <nl> + size_ = request_size ; <nl> + } <nl> + <nl> + <nl> + VirtualMemory : : ~ VirtualMemory ( ) { <nl> + if ( IsReserved ( ) ) { <nl> + bool result = ReleaseRegion ( address ( ) , size ( ) ) ; <nl> + ASSERT ( result ) ; <nl> + USE ( result ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : IsReserved ( ) { <nl> + return address_ ! = NULL ; <nl> + } <nl> + <nl> + <nl> + void VirtualMemory : : Reset ( ) { <nl> + address_ = NULL ; <nl> + size_ = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Commit ( void * address , size_t size , bool is_executable ) { <nl> + return CommitRegion ( address , size , is_executable ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Uncommit ( void * address , size_t size ) { <nl> + ASSERT ( IsReserved ( ) ) ; <nl> + return UncommitRegion ( address , size ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : Guard ( void * address ) { <nl> + if ( NULL = = VirtualAlloc ( address , <nl> + OS : : CommitPageSize ( ) , <nl> + MEM_COMMIT , <nl> + PAGE_NOACCESS ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + void * VirtualMemory : : ReserveRegion ( size_t size ) { <nl> + return RandomizedVirtualAlloc ( size , MEM_RESERVE , PAGE_NOACCESS ) ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : CommitRegion ( void * base , size_t size , bool is_executable ) { <nl> + int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE ; <nl> + if ( NULL = = VirtualAlloc ( base , size , MEM_COMMIT , prot ) ) { <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : UncommitRegion ( void * base , size_t size ) { <nl> + return VirtualFree ( base , size , MEM_DECOMMIT ) ! = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : ReleaseRegion ( void * base , size_t size ) { <nl> + return VirtualFree ( base , 0 , MEM_RELEASE ) ! = 0 ; <nl> + } <nl> + <nl> + <nl> + bool VirtualMemory : : HasLazyCommits ( ) { <nl> + / / TODO ( alph ) : implement for the platform . <nl> + return false ; <nl> + } <nl> + <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> / / Win32 thread support . <nl> <nl> mmm a / src / platform . h <nl> ppp b / src / platform . h <nl> class OS { <nl> static void PrintError ( const char * format , . . . ) ; <nl> static void VPrintError ( const char * format , va_list args ) ; <nl> <nl> + / / Allocate / Free memory used by JS heap . Pages are readable / writable , but <nl> + / / they are not guaranteed to be executable unless ' executable ' is true . <nl> + / / Returns the address of allocated memory , or NULL if failed . <nl> + static void * Allocate ( const size_t requested , <nl> + size_t * allocated , <nl> + bool is_executable ) ; <nl> + static void Free ( void * address , const size_t size ) ; <nl> + <nl> + / / This is the granularity at which the ProtectCode ( . . . ) call can set page <nl> + / / permissions . <nl> + static intptr_t CommitPageSize ( ) ; <nl> + <nl> + / / Mark code segments non - writable . <nl> + static void ProtectCode ( void * address , const size_t size ) ; <nl> + <nl> + / / Assign memory as a guard page so that access will cause an exception . <nl> + static void Guard ( void * address , const size_t size ) ; <nl> + <nl> + / / Generate a random address to be used for hinting mmap ( ) . <nl> + static void * GetRandomMmapAddr ( ) ; <nl> + <nl> + / / Get the Alignment guaranteed by Allocate ( ) . <nl> + static size_t AllocateAlignment ( ) ; <nl> + <nl> / / Sleep for a number of milliseconds . <nl> static void Sleep ( const int milliseconds ) ; <nl> <nl> class OS { <nl> / / positions indicated by the members of the CpuFeature enum from globals . h <nl> static uint64_t CpuFeaturesImpliedByPlatform ( ) ; <nl> <nl> + / / Maximum size of the virtual memory . 0 means there is no artificial <nl> + / / limit . <nl> + static intptr_t MaxVirtualMemory ( ) ; <nl> + <nl> / / Returns the double constant NAN <nl> static double nan_value ( ) ; <nl> <nl> class OS { <nl> DISALLOW_IMPLICIT_CONSTRUCTORS ( OS ) ; <nl> } ; <nl> <nl> + / / Represents and controls an area of reserved memory . <nl> + / / Control of the reserved memory can be assigned to another VirtualMemory <nl> + / / object by assignment or copy - contructing . This removes the reserved memory <nl> + / / from the original object . <nl> + class VirtualMemory { <nl> + public : <nl> + / / Empty VirtualMemory object , controlling no reserved memory . <nl> + VirtualMemory ( ) ; <nl> + <nl> + / / Reserves virtual memory with size . <nl> + explicit VirtualMemory ( size_t size ) ; <nl> + <nl> + / / Reserves virtual memory containing an area of the given size that <nl> + / / is aligned per alignment . This may not be at the position returned <nl> + / / by address ( ) . <nl> + VirtualMemory ( size_t size , size_t alignment ) ; <nl> + <nl> + / / Releases the reserved memory , if any , controlled by this VirtualMemory <nl> + / / object . <nl> + ~ VirtualMemory ( ) ; <nl> + <nl> + / / Returns whether the memory has been reserved . <nl> + bool IsReserved ( ) ; <nl> + <nl> + / / Initialize or resets an embedded VirtualMemory object . <nl> + void Reset ( ) ; <nl> + <nl> + / / Returns the start address of the reserved memory . <nl> + / / If the memory was reserved with an alignment , this address is not <nl> + / / necessarily aligned . The user might need to round it up to a multiple of <nl> + / / the alignment to get the start of the aligned block . <nl> + void * address ( ) { <nl> + ASSERT ( IsReserved ( ) ) ; <nl> + return address_ ; <nl> + } <nl> + <nl> + / / Returns the size of the reserved memory . The returned value is only <nl> + / / meaningful when IsReserved ( ) returns true . <nl> + / / If the memory was reserved with an alignment , this size may be larger <nl> + / / than the requested size . <nl> + size_t size ( ) { return size_ ; } <nl> + <nl> + / / Commits real memory . Returns whether the operation succeeded . <nl> + bool Commit ( void * address , size_t size , bool is_executable ) ; <nl> + <nl> + / / Uncommit real memory . Returns whether the operation succeeded . <nl> + bool Uncommit ( void * address , size_t size ) ; <nl> + <nl> + / / Creates a single guard page at the given address . <nl> + bool Guard ( void * address ) ; <nl> + <nl> + void Release ( ) { <nl> + ASSERT ( IsReserved ( ) ) ; <nl> + / / Notice : Order is important here . The VirtualMemory object might live <nl> + / / inside the allocated region . <nl> + void * address = address_ ; <nl> + size_t size = size_ ; <nl> + Reset ( ) ; <nl> + bool result = ReleaseRegion ( address , size ) ; <nl> + USE ( result ) ; <nl> + ASSERT ( result ) ; <nl> + } <nl> + <nl> + / / Assign control of the reserved region to a different VirtualMemory object . <nl> + / / The old object is no longer functional ( IsReserved ( ) returns false ) . <nl> + void TakeControl ( VirtualMemory * from ) { <nl> + ASSERT ( ! IsReserved ( ) ) ; <nl> + address_ = from - > address_ ; <nl> + size_ = from - > size_ ; <nl> + from - > Reset ( ) ; <nl> + } <nl> + <nl> + static void * ReserveRegion ( size_t size ) ; <nl> + <nl> + static bool CommitRegion ( void * base , size_t size , bool is_executable ) ; <nl> + <nl> + static bool UncommitRegion ( void * base , size_t size ) ; <nl> + <nl> + / / Must be called with a base pointer that has been returned by ReserveRegion <nl> + / / and the same size it was reserved with . <nl> + static bool ReleaseRegion ( void * base , size_t size ) ; <nl> + <nl> + / / Returns true if OS performs lazy commits , i . e . the memory allocation call <nl> + / / defers actual physical memory allocation till the first memory access . <nl> + / / Otherwise returns false . <nl> + static bool HasLazyCommits ( ) ; <nl> + <nl> + private : <nl> + void * address_ ; / / Start address of the virtual memory . <nl> + size_t size_ ; / / Size of the virtual memory . <nl> + } ; <nl> + <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> / / Thread <nl> / / <nl> mmm a / src / platform / elapsed - timer . h <nl> ppp b / src / platform / elapsed - timer . h <nl> class ElapsedTimer V8_FINAL BASE_EMBEDDED { <nl> } <nl> <nl> private : <nl> - static V8_INLINE TimeTicks Now ( ) { <nl> + V8_INLINE ( static TimeTicks Now ( ) ) { <nl> TimeTicks now = TimeTicks : : HighResNow ( ) ; <nl> ASSERT ( ! now . IsNull ( ) ) ; <nl> return now ; <nl> mmm a / src / platform / mutex . cc <nl> ppp b / src / platform / mutex . cc <nl> namespace internal { <nl> <nl> # if V8_OS_POSIX <nl> <nl> - static V8_INLINE void InitializeNativeHandle ( pthread_mutex_t * mutex ) { <nl> + static V8_INLINE ( void InitializeNativeHandle ( pthread_mutex_t * mutex ) ) { <nl> int result ; <nl> # if defined ( DEBUG ) <nl> / / Use an error checking mutex in debug mode . <nl> static V8_INLINE void InitializeNativeHandle ( pthread_mutex_t * mutex ) { <nl> } <nl> <nl> <nl> - static V8_INLINE void InitializeRecursiveNativeHandle ( pthread_mutex_t * mutex ) { <nl> + static V8_INLINE ( void InitializeRecursiveNativeHandle ( pthread_mutex_t * mutex ) ) { <nl> pthread_mutexattr_t attr ; <nl> int result = pthread_mutexattr_init ( & attr ) ; <nl> ASSERT_EQ ( 0 , result ) ; <nl> static V8_INLINE void InitializeRecursiveNativeHandle ( pthread_mutex_t * mutex ) { <nl> } <nl> <nl> <nl> - static V8_INLINE void DestroyNativeHandle ( pthread_mutex_t * mutex ) { <nl> + static V8_INLINE ( void DestroyNativeHandle ( pthread_mutex_t * mutex ) ) { <nl> int result = pthread_mutex_destroy ( mutex ) ; <nl> ASSERT_EQ ( 0 , result ) ; <nl> USE ( result ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE void LockNativeHandle ( pthread_mutex_t * mutex ) { <nl> + static V8_INLINE ( void LockNativeHandle ( pthread_mutex_t * mutex ) ) { <nl> int result = pthread_mutex_lock ( mutex ) ; <nl> ASSERT_EQ ( 0 , result ) ; <nl> USE ( result ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE void UnlockNativeHandle ( pthread_mutex_t * mutex ) { <nl> + static V8_INLINE ( void UnlockNativeHandle ( pthread_mutex_t * mutex ) ) { <nl> int result = pthread_mutex_unlock ( mutex ) ; <nl> ASSERT_EQ ( 0 , result ) ; <nl> USE ( result ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE bool TryLockNativeHandle ( pthread_mutex_t * mutex ) { <nl> + static V8_INLINE ( bool TryLockNativeHandle ( pthread_mutex_t * mutex ) ) { <nl> int result = pthread_mutex_trylock ( mutex ) ; <nl> if ( result = = EBUSY ) { <nl> return false ; <nl> static V8_INLINE bool TryLockNativeHandle ( pthread_mutex_t * mutex ) { <nl> <nl> # elif V8_OS_WIN <nl> <nl> - static V8_INLINE void InitializeNativeHandle ( PCRITICAL_SECTION cs ) { <nl> + static V8_INLINE ( void InitializeNativeHandle ( PCRITICAL_SECTION cs ) ) { <nl> InitializeCriticalSection ( cs ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE void InitializeRecursiveNativeHandle ( PCRITICAL_SECTION cs ) { <nl> + static V8_INLINE ( void InitializeRecursiveNativeHandle ( PCRITICAL_SECTION cs ) ) { <nl> InitializeCriticalSection ( cs ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE void DestroyNativeHandle ( PCRITICAL_SECTION cs ) { <nl> + static V8_INLINE ( void DestroyNativeHandle ( PCRITICAL_SECTION cs ) ) { <nl> DeleteCriticalSection ( cs ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE void LockNativeHandle ( PCRITICAL_SECTION cs ) { <nl> + static V8_INLINE ( void LockNativeHandle ( PCRITICAL_SECTION cs ) ) { <nl> EnterCriticalSection ( cs ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE void UnlockNativeHandle ( PCRITICAL_SECTION cs ) { <nl> + static V8_INLINE ( void UnlockNativeHandle ( PCRITICAL_SECTION cs ) ) { <nl> LeaveCriticalSection ( cs ) ; <nl> } <nl> <nl> <nl> - static V8_INLINE bool TryLockNativeHandle ( PCRITICAL_SECTION cs ) { <nl> + static V8_INLINE ( bool TryLockNativeHandle ( PCRITICAL_SECTION cs ) ) { <nl> return TryEnterCriticalSection ( cs ) ; <nl> } <nl> <nl> mmm a / src / platform / mutex . h <nl> ppp b / src / platform / mutex . h <nl> class Mutex V8_FINAL { <nl> int level_ ; <nl> # endif <nl> <nl> - V8_INLINE void AssertHeldAndUnmark ( ) { <nl> + V8_INLINE ( void AssertHeldAndUnmark ( ) ) { <nl> # ifdef DEBUG <nl> ASSERT_EQ ( 1 , level_ ) ; <nl> level_ - - ; <nl> # endif <nl> } <nl> <nl> - V8_INLINE void AssertUnheldAndMark ( ) { <nl> + V8_INLINE ( void AssertUnheldAndMark ( ) ) { <nl> # ifdef DEBUG <nl> ASSERT_EQ ( 0 , level_ ) ; <nl> level_ + + ; <nl> mmm a / src / platform / socket . h <nl> ppp b / src / platform / socket . h <nl> class Socket V8_FINAL { <nl> / / Set the value of the SO_REUSEADDR socket option . <nl> bool SetReuseAddress ( bool reuse_address ) ; <nl> <nl> - V8_INLINE bool IsValid ( ) const { <nl> + V8_INLINE ( bool IsValid ( ) ) const { <nl> return native_handle_ ! = kInvalidNativeHandle ; <nl> } <nl> <nl> deleted file mode 100644 <nl> index f72bc9054c3 . . 00000000000 <nl> mmm a / src / platform / virtual - memory . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2013 the V8 project authors . All rights reserved . <nl> - / / Redistribution and use in source and binary forms , with or without <nl> - / / modification , are permitted provided that the following conditions are <nl> - / / met : <nl> - / / <nl> - / / * Redistributions of source code must retain the above copyright <nl> - / / notice , this list of conditions and the following disclaimer . <nl> - / / * Redistributions in binary form must reproduce the above <nl> - / / copyright notice , this list of conditions and the following <nl> - / / disclaimer in the documentation and / or other materials provided <nl> - / / with the distribution . <nl> - / / * Neither the name of Google Inc . nor the names of its <nl> - / / contributors may be used to endorse or promote products derived <nl> - / / from this software without specific prior written permission . <nl> - / / <nl> - / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - <nl> - # include " platform / virtual - memory . h " <nl> - <nl> - # if V8_OS_POSIX <nl> - # include < sys / types . h > <nl> - # include < sys / mman . h > <nl> - # include < sys / time . h > <nl> - # include < sys / resource . h > <nl> - <nl> - # include < unistd . h > <nl> - # endif <nl> - <nl> - # if V8_OS_MACOSX <nl> - # include < mach / vm_statistics . h > <nl> - # endif <nl> - <nl> - # include < cerrno > <nl> - <nl> - # include " platform / mutex . h " <nl> - # include " utils . h " <nl> - # include " utils / random - number - generator . h " <nl> - # if V8_OS_CYGIN | | V8_OS_WIN <nl> - # include " win32 - headers . h " <nl> - # endif <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - <nl> - class RandomAddressGenerator V8_FINAL { <nl> - public : <nl> - V8_INLINE uintptr_t NextAddress ( ) { <nl> - LockGuard < Mutex > lock_guard ( & mutex_ ) ; <nl> - uintptr_t address = rng_ . NextInt ( ) ; <nl> - # if V8_HOST_ARCH_64_BIT <nl> - address = ( address < < 32 ) + static_cast < uintptr_t > ( rng_ . NextInt ( ) ) ; <nl> - # endif <nl> - return address ; <nl> - } <nl> - <nl> - private : <nl> - Mutex mutex_ ; <nl> - RandomNumberGenerator rng_ ; <nl> - } ; <nl> - <nl> - typedef LazyInstance < RandomAddressGenerator , <nl> - DefaultConstructTrait < RandomAddressGenerator > , <nl> - ThreadSafeInitOnceTrait > : : type LazyRandomAddressGenerator ; <nl> - <nl> - # define LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER LAZY_INSTANCE_INITIALIZER <nl> - <nl> - <nl> - static V8_INLINE void * GenerateRandomAddress ( ) { <nl> - # if V8_OS_NACL <nl> - / / TODO ( bradchen ) : Restore randomization once Native Client gets smarter <nl> - / / about using mmap address hints . <nl> - / / See http : / / code . google . com / p / nativeclient / issues / 3341 <nl> - return NULL ; <nl> - # else / / V8_OS_NACL <nl> - LazyRandomAddressGenerator random_address_generator = <nl> - LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER ; <nl> - uintptr_t address = random_address_generator . Pointer ( ) - > NextAddress ( ) ; <nl> - <nl> - # if V8_TARGET_ARCH_X64 <nl> - # if V8_OS_CYGWIN | | V8_OS_WIN <nl> - / / Try not to map pages into the default range that windows loads DLLs . <nl> - / / Use a multiple of 64KiB to prevent committing unused memory . <nl> - address + = V8_UINT64_C ( 0x00080000000 ) ; <nl> - address & = V8_UINT64_C ( 0x3ffffff0000 ) ; <nl> - # else / / V8_OS_CYGWIN | | V8_OS_WIN <nl> - / / Currently available CPUs have 48 bits of virtual addressing . Truncate <nl> - / / the hint address to 46 bits to give the kernel a fighting chance of <nl> - / / fulfilling our placement request . <nl> - address & = V8_UINT64_C ( 0x3ffffffff000 ) ; <nl> - # endif / / V8_OS_CYGWIN | | V8_OS_WIN <nl> - # else / / V8_TARGET_ARCH_X64 <nl> - # if V8_OS_CYGWIN | | V8_OS_WIN <nl> - / / Try not to map pages into the default range that windows loads DLLs . <nl> - / / Use a multiple of 64KiB to prevent committing unused memory . <nl> - address + = 0x04000000 ; <nl> - address & = 0x3fff0000 ; <nl> - # elif V8_OS_SOLARIS <nl> - / / For our Solaris / illumos mmap hint , we pick a random address in the bottom <nl> - / / half of the top half of the address space ( that is , the third quarter ) . <nl> - / / Because we do not MAP_FIXED , this will be treated only as a hint - - the <nl> - / / system will not fail to mmap ( ) because something else happens to already <nl> - / / be mapped at our random address . We deliberately set the hint high enough <nl> - / / to get well above the system ' s break ( that is , the heap ) ; Solaris and <nl> - / / illumos will try the hint and if that fails allocate as if there were <nl> - / / no hint at all . The high hint prevents the break from getting hemmed in <nl> - / / at low values , ceding half of the address space to the system heap . <nl> - address & = 0x3ffff000 ; <nl> - address + = 0x80000000 ; <nl> - # else / / V8_OS_CYGWIN | | V8_OS_WIN <nl> - / / The range 0x20000000 - 0x60000000 is relatively unpopulated across a <nl> - / / variety of ASLR modes ( PAE kernel , NX compat mode , etc ) and on Mac OS X <nl> - / / 10 . 6 and 10 . 7 . <nl> - address & = 0x3ffff000 ; <nl> - address + = 0x20000000 ; <nl> - # endif / / V8_OS_CYGIN | | V8_OS_WIN <nl> - # endif / / V8_TARGET_ARCH_X64 <nl> - return reinterpret_cast < void * > ( address ) ; <nl> - # endif / / V8_OS_NACL <nl> - } <nl> - <nl> - <nl> - / / static <nl> - void * VirtualMemory : : AllocateRegion ( size_t size , <nl> - size_t * size_return , <nl> - Executability executability ) { <nl> - ASSERT_LT ( 0 , size ) ; <nl> - ASSERT_NE ( NULL , size_return ) ; <nl> - void * address = ReserveRegion ( size , & size ) ; <nl> - if ( address = = NULL ) return NULL ; <nl> - if ( ! CommitRegion ( address , size , executability ) ) { <nl> - bool result = ReleaseRegion ( address , size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> - return NULL ; <nl> - } <nl> - * size_return = size ; <nl> - return address ; <nl> - } <nl> - <nl> - # if V8_OS_CYGWIN | | V8_OS_WIN <nl> - <nl> - / / static <nl> - void * VirtualMemory : : ReserveRegion ( size_t size , size_t * size_return ) { <nl> - ASSERT_LT ( 0 , size ) ; <nl> - ASSERT_NE ( NULL , size_return ) ; <nl> - / / The minimum size that can be reserved is 64KiB , see <nl> - / / http : / / msdn . microsoft . com / en - us / library / ms810627 . aspx <nl> - if ( size < 64 * KB ) { <nl> - size = 64 * KB ; <nl> - } <nl> - size = RoundUp ( size , GetPageSize ( ) ) ; <nl> - LPVOID address = NULL ; <nl> - / / Try and randomize the allocation address ( up to three attempts ) . <nl> - for ( unsigned attempts = 0 ; address = = NULL & & attempts < 3 ; + + attempts ) { <nl> - address = VirtualAlloc ( GenerateRandomAddress ( ) , <nl> - size , <nl> - MEM_RESERVE , <nl> - PAGE_NOACCESS ) ; <nl> - } <nl> - if ( address = = NULL ) { <nl> - / / After three attempts give up and let the kernel find an address . <nl> - address = VirtualAlloc ( NULL , size , MEM_RESERVE , PAGE_NOACCESS ) ; <nl> - } <nl> - if ( address = = NULL ) { <nl> - return NULL ; <nl> - } <nl> - ASSERT ( IsAligned ( reinterpret_cast < uintptr_t > ( address ) , <nl> - GetAllocationGranularity ( ) ) ) ; <nl> - * size_return = size ; <nl> - return address ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - void * VirtualMemory : : ReserveRegion ( size_t size , <nl> - size_t * size_return , <nl> - size_t alignment ) { <nl> - ASSERT_LT ( 0 , size ) ; <nl> - ASSERT_NE ( NULL , size_return ) ; <nl> - ASSERT ( IsAligned ( alignment , GetAllocationGranularity ( ) ) ) ; <nl> - <nl> - size_t reserved_size = RoundUp ( size + alignment , GetAllocationGranularity ( ) ) ; <nl> - Address reserved_base = static_cast < Address > ( <nl> - ReserveRegion ( reserved_size , & reserved_size ) ) ; <nl> - if ( reserved_base = = NULL ) { <nl> - return NULL ; <nl> - } <nl> - ASSERT_LE ( size , reserved_size ) ; <nl> - ASSERT_LE ( size + alignment , reserved_size ) ; <nl> - ASSERT ( IsAligned ( reserved_size , GetPageSize ( ) ) ) ; <nl> - <nl> - / / Try reducing the size by freeing and then reallocating a specific area . <nl> - bool result = ReleaseRegion ( reserved_base , reserved_size ) ; <nl> - USE ( result ) ; <nl> - ASSERT ( result ) ; <nl> - size_t aligned_size = RoundUp ( size , GetPageSize ( ) ) ; <nl> - Address aligned_base = static_cast < Address > ( <nl> - VirtualAlloc ( RoundUp ( reserved_base , alignment ) , <nl> - aligned_size , <nl> - MEM_RESERVE , <nl> - PAGE_NOACCESS ) ) ; <nl> - if ( aligned_base ! = NULL ) { <nl> - ASSERT ( aligned_base = = RoundUp ( reserved_base , alignment ) ) ; <nl> - ASSERT ( IsAligned ( reinterpret_cast < uintptr_t > ( aligned_base ) , <nl> - GetAllocationGranularity ( ) ) ) ; <nl> - ASSERT ( IsAligned ( aligned_size , GetPageSize ( ) ) ) ; <nl> - * size_return = aligned_size ; <nl> - return aligned_base ; <nl> - } <nl> - <nl> - / / Resizing failed , just go with a bigger area . <nl> - ASSERT ( IsAligned ( reserved_size , GetAllocationGranularity ( ) ) ) ; <nl> - return ReserveRegion ( reserved_size , size_return ) ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : CommitRegion ( void * address , <nl> - size_t size , <nl> - Executability executability ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - DWORD protect = 0 ; <nl> - switch ( executability ) { <nl> - case NOT_EXECUTABLE : <nl> - protect = PAGE_READWRITE ; <nl> - break ; <nl> - <nl> - case EXECUTABLE : <nl> - protect = PAGE_EXECUTE_READWRITE ; <nl> - break ; <nl> - } <nl> - LPVOID result = VirtualAlloc ( address , size , MEM_COMMIT , protect ) ; <nl> - if ( result = = NULL ) { <nl> - ASSERT ( GetLastError ( ) ! = ERROR_INVALID_ADDRESS ) ; <nl> - return false ; <nl> - } <nl> - ASSERT_EQ ( address , result ) ; <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : UncommitRegion ( void * address , size_t size ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - int result = VirtualFree ( address , size , MEM_DECOMMIT ) ; <nl> - if ( result = = 0 ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : WriteProtectRegion ( void * address , size_t size ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - DWORD old_protect ; <nl> - return VirtualProtect ( address , size , PAGE_EXECUTE_READ , & old_protect ) ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : ReleaseRegion ( void * address , size_t size ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - USE ( size ) ; <nl> - int result = VirtualFree ( address , 0 , MEM_RELEASE ) ; <nl> - if ( result = = 0 ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - size_t VirtualMemory : : GetAllocationGranularity ( ) { <nl> - static size_t allocation_granularity = 0 ; <nl> - if ( allocation_granularity = = 0 ) { <nl> - SYSTEM_INFO system_info ; <nl> - GetSystemInfo ( & system_info ) ; <nl> - allocation_granularity = system_info . dwAllocationGranularity ; <nl> - MemoryBarrier ( ) ; <nl> - } <nl> - ASSERT_GE ( allocation_granularity , GetPageSize ( ) ) ; <nl> - return allocation_granularity ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - size_t VirtualMemory : : GetLimit ( ) { <nl> - return 0 ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - size_t VirtualMemory : : GetPageSize ( ) { <nl> - static size_t page_size = 0 ; <nl> - if ( page_size = = 0 ) { <nl> - SYSTEM_INFO system_info ; <nl> - GetSystemInfo ( & system_info ) ; <nl> - page_size = system_info . dwPageSize ; <nl> - MemoryBarrier ( ) ; <nl> - } <nl> - return page_size ; <nl> - } <nl> - <nl> - <nl> - # else / / V8_OS_CYGIN | | V8_OS_WIN <nl> - <nl> - <nl> - / / Constants used for mmap . <nl> - # if V8_OS_MACOSX <nl> - / / kMmapFd is used to pass vm_alloc flags to tag the region with the user <nl> - / / defined tag 255 This helps identify V8 - allocated regions in memory analysis <nl> - / / tools like vmmap ( 1 ) . <nl> - static const int kMmapFd = VM_MAKE_TAG ( 255 ) ; <nl> - # else <nl> - static const int kMmapFd = - 1 ; <nl> - # endif / / V8_OS_MACOSX <nl> - static const off_t kMmapFdOffset = 0 ; <nl> - <nl> - <nl> - / / static <nl> - void * VirtualMemory : : ReserveRegion ( size_t size , size_t * size_return ) { <nl> - ASSERT_LT ( 0 , size ) ; <nl> - ASSERT_NE ( NULL , size_return ) ; <nl> - <nl> - size = RoundUp ( size , GetPageSize ( ) ) ; <nl> - void * address = mmap ( GenerateRandomAddress ( ) , <nl> - size , <nl> - PROT_NONE , <nl> - MAP_ANON | MAP_NORESERVE | MAP_PRIVATE , <nl> - kMmapFd , <nl> - kMmapFdOffset ) ; <nl> - if ( address = = MAP_FAILED ) { <nl> - ASSERT_NE ( EINVAL , errno ) ; <nl> - return NULL ; <nl> - } <nl> - * size_return = size ; <nl> - return address ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - void * VirtualMemory : : ReserveRegion ( size_t size , <nl> - size_t * size_return , <nl> - size_t alignment ) { <nl> - ASSERT_LT ( 0 , size ) ; <nl> - ASSERT_NE ( NULL , size_return ) ; <nl> - ASSERT ( IsAligned ( alignment , GetPageSize ( ) ) ) ; <nl> - <nl> - size_t reserved_size ; <nl> - Address reserved_base = static_cast < Address > ( <nl> - ReserveRegion ( size + alignment , & reserved_size ) ) ; <nl> - if ( reserved_base = = NULL ) { <nl> - return NULL ; <nl> - } <nl> - <nl> - Address aligned_base = RoundUp ( reserved_base , alignment ) ; <nl> - ASSERT_LE ( reserved_base , aligned_base ) ; <nl> - <nl> - / / Unmap extra memory reserved before the aligned region . <nl> - if ( aligned_base ! = reserved_base ) { <nl> - size_t prefix_size = static_cast < size_t > ( aligned_base - reserved_base ) ; <nl> - bool result = ReleaseRegion ( reserved_base , prefix_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> - reserved_size - = prefix_size ; <nl> - } <nl> - <nl> - size_t aligned_size = RoundUp ( size , GetPageSize ( ) ) ; <nl> - ASSERT_LE ( aligned_size , reserved_size ) ; <nl> - <nl> - / / Unmap extra memory reserved after the aligned region . <nl> - if ( aligned_size ! = reserved_size ) { <nl> - size_t suffix_size = reserved_size - aligned_size ; <nl> - bool result = ReleaseRegion ( aligned_base + aligned_size , suffix_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> - reserved_size - = suffix_size ; <nl> - } <nl> - <nl> - ASSERT ( aligned_size = = reserved_size ) ; <nl> - ASSERT_NE ( NULL , aligned_base ) ; <nl> - <nl> - * size_return = aligned_size ; <nl> - return aligned_base ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : CommitRegion ( void * address , <nl> - size_t size , <nl> - Executability executability ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - int prot = 0 ; <nl> - / / The Native Client port of V8 uses an interpreter , <nl> - / / so code pages don ' t need PROT_EXEC . <nl> - # if V8_OS_NACL <nl> - executability = NOT_EXECUTABLE ; <nl> - # endif <nl> - switch ( executability ) { <nl> - case NOT_EXECUTABLE : <nl> - prot = PROT_READ | PROT_WRITE ; <nl> - break ; <nl> - <nl> - case EXECUTABLE : <nl> - prot = PROT_EXEC | PROT_READ | PROT_WRITE ; <nl> - break ; <nl> - } <nl> - void * result = mmap ( address , <nl> - size , <nl> - prot , <nl> - MAP_ANON | MAP_FIXED | MAP_PRIVATE , <nl> - kMmapFd , <nl> - kMmapFdOffset ) ; <nl> - if ( result = = MAP_FAILED ) { <nl> - ASSERT_NE ( EINVAL , errno ) ; <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : UncommitRegion ( void * address , size_t size ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - void * result = mmap ( address , <nl> - size , <nl> - PROT_NONE , <nl> - MAP_ANON | MAP_FIXED | MAP_NORESERVE | MAP_PRIVATE , <nl> - kMmapFd , <nl> - kMmapFdOffset ) ; <nl> - if ( result = = MAP_FAILED ) { <nl> - ASSERT_NE ( EINVAL , errno ) ; <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : WriteProtectRegion ( void * address , size_t size ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - # if V8_OS_NACL <nl> - / / The Native Client port of V8 uses an interpreter , <nl> - / / so code pages don ' t need PROT_EXEC . <nl> - int prot = PROT_READ ; <nl> - # else <nl> - int prot = PROT_EXEC | PROT_READ ; <nl> - # endif <nl> - int result = mprotect ( address , size , prot ) ; <nl> - if ( result < 0 ) { <nl> - ASSERT_NE ( EINVAL , errno ) ; <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - bool VirtualMemory : : ReleaseRegion ( void * address , size_t size ) { <nl> - ASSERT_NE ( NULL , address ) ; <nl> - ASSERT_LT ( 0 , size ) ; <nl> - int result = munmap ( address , size ) ; <nl> - if ( result < 0 ) { <nl> - ASSERT_NE ( EINVAL , errno ) ; <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - size_t VirtualMemory : : GetAllocationGranularity ( ) { <nl> - return GetPageSize ( ) ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - size_t VirtualMemory : : GetLimit ( ) { <nl> - struct rlimit rlim ; <nl> - int result = getrlimit ( RLIMIT_DATA , & rlim ) ; <nl> - ASSERT_EQ ( 0 , result ) ; <nl> - USE ( result ) ; <nl> - return rlim . rlim_cur ; <nl> - } <nl> - <nl> - <nl> - / / static <nl> - size_t VirtualMemory : : GetPageSize ( ) { <nl> - static const size_t kPageSize = getpagesize ( ) ; <nl> - return kPageSize ; <nl> - } <nl> - <nl> - # endif / / V8_OS_CYGWIN | | V8_OS_WIN <nl> - <nl> - } } / / namespace v8 : : internal <nl> deleted file mode 100644 <nl> index 9a62c32f70e . . 00000000000 <nl> mmm a / src / platform / virtual - memory . h <nl> ppp / dev / null <nl> <nl> - / / Copyright 2013 the V8 project authors . All rights reserved . <nl> - / / Redistribution and use in source and binary forms , with or without <nl> - / / modification , are permitted provided that the following conditions are <nl> - / / met : <nl> - / / <nl> - / / * Redistributions of source code must retain the above copyright <nl> - / / notice , this list of conditions and the following disclaimer . <nl> - / / * Redistributions in binary form must reproduce the above <nl> - / / copyright notice , this list of conditions and the following <nl> - / / disclaimer in the documentation and / or other materials provided <nl> - / / with the distribution . <nl> - / / * Neither the name of Google Inc . nor the names of its <nl> - / / contributors may be used to endorse or promote products derived <nl> - / / from this software without specific prior written permission . <nl> - / / <nl> - / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - <nl> - # ifndef V8_PLATFORM_VIRTUAL_MEMORY_H_ <nl> - # define V8_PLATFORM_VIRTUAL_MEMORY_H_ <nl> - <nl> - # include " checks . h " <nl> - # include " globals . h " <nl> - <nl> - namespace v8 { <nl> - namespace internal { <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / VirtualMemory <nl> - / / <nl> - / / This class represents and controls an area of reserved memory . <nl> - / / Control of the reserved memory can be assigned to another VirtualMemory <nl> - / / object by assignment or copy - constructing . This removes the reserved memory <nl> - / / from the original object . <nl> - class VirtualMemory V8_FINAL { <nl> - public : <nl> - / / The executability of a memory region . <nl> - enum Executability { NOT_EXECUTABLE , EXECUTABLE } ; <nl> - <nl> - / / Empty VirtualMemory object , controlling no reserved memory . <nl> - VirtualMemory ( ) : address_ ( NULL ) , size_ ( 0 ) { } <nl> - <nl> - / / Reserves virtual memory with size . <nl> - explicit VirtualMemory ( size_t size ) : size_ ( 0 ) { <nl> - address_ = ReserveRegion ( size , & size_ ) ; <nl> - } <nl> - <nl> - / / Reserves virtual memory containing an area of the given size that <nl> - / / is aligned per alignment . This may not be at the position returned <nl> - / / by address ( ) . <nl> - VirtualMemory ( size_t size , size_t alignment ) : size_ ( 0 ) { <nl> - address_ = ReserveRegion ( size , & size_ , alignment ) ; <nl> - } <nl> - <nl> - / / Releases the reserved memory , if any , controlled by this VirtualMemory <nl> - / / object . <nl> - ~ VirtualMemory ( ) { <nl> - if ( IsReserved ( ) ) { <nl> - bool result = ReleaseRegion ( address_ , size_ ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> - } <nl> - } <nl> - <nl> - / / Returns whether the memory contains the specified address . <nl> - bool Contains ( const void * address ) const V8_WARN_UNUSED_RESULT { <nl> - if ( ! IsReserved ( ) ) return false ; <nl> - if ( address < address_ ) return false ; <nl> - if ( address > = reinterpret_cast < uint8_t * > ( address_ ) + size_ ) return false ; <nl> - return true ; <nl> - } <nl> - <nl> - / / Returns whether the memory has been reserved . <nl> - bool IsReserved ( ) const V8_WARN_UNUSED_RESULT { <nl> - return address_ ! = NULL ; <nl> - } <nl> - <nl> - / / Initialize or resets an embedded VirtualMemory object . <nl> - void Reset ( ) { <nl> - address_ = NULL ; <nl> - size_ = 0 ; <nl> - } <nl> - <nl> - / / Returns the start address of the reserved memory . The returned value is <nl> - / / only meaningful if | IsReserved ( ) | returns true . <nl> - / / If the memory was reserved with an alignment , this address is not <nl> - / / necessarily aligned . The user might need to round it up to a multiple of <nl> - / / the alignment to get the start of the aligned block . <nl> - void * address ( ) const V8_WARN_UNUSED_RESULT { return address_ ; } <nl> - <nl> - / / Returns the size of the reserved memory . The returned value is only <nl> - / / meaningful when | IsReserved ( ) | returns true . <nl> - / / If the memory was reserved with an alignment , this size may be larger <nl> - / / than the requested size . <nl> - size_t size ( ) const V8_WARN_UNUSED_RESULT { return size_ ; } <nl> - <nl> - / / Commits real memory . Returns whether the operation succeeded . <nl> - bool Commit ( void * address , <nl> - size_t size , <nl> - Executability executability ) V8_WARN_UNUSED_RESULT { <nl> - ASSERT ( IsReserved ( ) ) ; <nl> - ASSERT ( Contains ( address ) ) ; <nl> - ASSERT ( Contains ( reinterpret_cast < uint8_t * > ( address ) + size - 1 ) ) ; <nl> - return CommitRegion ( address , size , executability ) ; <nl> - } <nl> - <nl> - / / Uncommit real memory . Returns whether the operation succeeded . <nl> - bool Uncommit ( void * address , size_t size ) V8_WARN_UNUSED_RESULT { <nl> - ASSERT ( IsReserved ( ) ) ; <nl> - ASSERT ( Contains ( address ) ) ; <nl> - ASSERT ( Contains ( reinterpret_cast < uint8_t * > ( address ) + size - 1 ) ) ; <nl> - return UncommitRegion ( address , size ) ; <nl> - } <nl> - <nl> - / / Creates guard pages at the given address . <nl> - bool Guard ( void * address , size_t size ) V8_WARN_UNUSED_RESULT { <nl> - / / We can simply uncommit the specified pages . Any access <nl> - / / to them will cause a processor exception . <nl> - return Uncommit ( address , size ) ; <nl> - } <nl> - <nl> - void Release ( ) { <nl> - ASSERT ( IsReserved ( ) ) ; <nl> - / / WARNING : Order is important here . The VirtualMemory <nl> - / / object might live inside the allocated region . <nl> - void * address = address_ ; <nl> - size_t size = size_ ; <nl> - Reset ( ) ; <nl> - bool result = ReleaseRegion ( address , size ) ; <nl> - USE ( result ) ; <nl> - ASSERT ( result ) ; <nl> - } <nl> - <nl> - / / Assign control of the reserved region to a different VirtualMemory object . <nl> - / / The old object is no longer functional ( IsReserved ( ) returns false ) . <nl> - void TakeControl ( VirtualMemory * from ) { <nl> - ASSERT ( ! IsReserved ( ) ) ; <nl> - address_ = from - > address_ ; <nl> - size_ = from - > size_ ; <nl> - from - > Reset ( ) ; <nl> - } <nl> - <nl> - / / Allocates a region of memory pages . The pages are readable / writable , <nl> - / / but are not guaranteed to be executable unless explicitly requested . <nl> - / / Returns the base address of the allocated memory region , or NULL in <nl> - / / case of an error . <nl> - static void * AllocateRegion ( size_t size , <nl> - size_t * size_return , <nl> - Executability executability ) <nl> - V8_WARN_UNUSED_RESULT ; <nl> - <nl> - static void * ReserveRegion ( size_t size , <nl> - size_t * size_return ) V8_WARN_UNUSED_RESULT ; <nl> - <nl> - static void * ReserveRegion ( size_t size , <nl> - size_t * size_return , <nl> - size_t alignment ) V8_WARN_UNUSED_RESULT ; <nl> - <nl> - static bool CommitRegion ( void * address , <nl> - size_t size , <nl> - Executability executability ) V8_WARN_UNUSED_RESULT ; <nl> - <nl> - static bool UncommitRegion ( void * address , size_t size ) V8_WARN_UNUSED_RESULT ; <nl> - <nl> - / / Mark code segments readable - executable . <nl> - static bool WriteProtectRegion ( void * address , <nl> - size_t size ) V8_WARN_UNUSED_RESULT ; <nl> - <nl> - / / Must be called with a base pointer that has been returned by ReserveRegion <nl> - / / and the same size it was reserved with . <nl> - static bool ReleaseRegion ( void * address , size_t size ) V8_WARN_UNUSED_RESULT ; <nl> - <nl> - / / The granularity for the starting address at which virtual memory can be <nl> - / / reserved ( or allocated in terms of the underlying operating system ) . <nl> - static size_t GetAllocationGranularity ( ) V8_PURE ; <nl> - <nl> - / / The maximum size of the virtual memory . 0 means there is no artificial <nl> - / / limit . <nl> - static size_t GetLimit ( ) V8_PURE ; <nl> - <nl> - / / The page size and the granularity of page protection and commitment . <nl> - static size_t GetPageSize ( ) V8_PURE ; <nl> - <nl> - / / Returns true if OS performs lazy commits , i . e . the memory allocation call <nl> - / / defers actual physical memory allocation till the first memory access . <nl> - / / Otherwise returns false . <nl> - static V8_INLINE bool HasLazyCommits ( ) { <nl> - # if V8_OS_LINUX <nl> - return true ; <nl> - # else <nl> - return false ; <nl> - # endif <nl> - } <nl> - <nl> - private : <nl> - void * address_ ; / / Start address of the virtual memory . <nl> - size_t size_ ; / / Size of the virtual memory . <nl> - } ; <nl> - <nl> - } } / / namespace v8 : : internal <nl> - <nl> - # endif / / V8_PLATFORM_VIRTUAL_MEMORY_H_ <nl> mmm a / src / spaces - inl . h <nl> ppp b / src / spaces - inl . h <nl> HeapObject * HeapObjectIterator : : FromCurrentPage ( ) { <nl> } <nl> <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / MemoryAllocator <nl> + <nl> + # ifdef ENABLE_HEAP_PROTECTION <nl> + <nl> + void MemoryAllocator : : Protect ( Address start , size_t size ) { <nl> + OS : : Protect ( start , size ) ; <nl> + } <nl> + <nl> + <nl> + void MemoryAllocator : : Unprotect ( Address start , <nl> + size_t size , <nl> + Executability executable ) { <nl> + OS : : Unprotect ( start , size , executable ) ; <nl> + } <nl> + <nl> + <nl> + void MemoryAllocator : : ProtectChunkFromPage ( Page * page ) { <nl> + int id = GetChunkId ( page ) ; <nl> + OS : : Protect ( chunks_ [ id ] . address ( ) , chunks_ [ id ] . size ( ) ) ; <nl> + } <nl> + <nl> + <nl> + void MemoryAllocator : : UnprotectChunkFromPage ( Page * page ) { <nl> + int id = GetChunkId ( page ) ; <nl> + OS : : Unprotect ( chunks_ [ id ] . address ( ) , chunks_ [ id ] . size ( ) , <nl> + chunks_ [ id ] . owner ( ) - > executable ( ) = = EXECUTABLE ) ; <nl> + } <nl> + <nl> + # endif <nl> + <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / PagedSpace <nl> Page * Page : : Initialize ( Heap * heap , <nl> MemoryChunk * chunk , <nl> - VirtualMemory : : Executability executability , <nl> + Executability executable , <nl> PagedSpace * owner ) { <nl> Page * page = reinterpret_cast < Page * > ( chunk ) ; <nl> ASSERT ( page - > area_size ( ) < = kNonCodeObjectAreaSize ) ; <nl> mmm a / src / spaces . cc <nl> ppp b / src / spaces . cc <nl> Address CodeRange : : AllocateRawMemory ( const size_t requested_size , <nl> <nl> <nl> bool CodeRange : : CommitRawMemory ( Address start , size_t length ) { <nl> - return isolate_ - > memory_allocator ( ) - > CommitMemory ( <nl> - start , length , VirtualMemory : : EXECUTABLE ) ; <nl> + return isolate_ - > memory_allocator ( ) - > CommitMemory ( start , length , EXECUTABLE ) ; <nl> } <nl> <nl> <nl> bool CodeRange : : UncommitRawMemory ( Address start , size_t length ) { <nl> void CodeRange : : FreeRawMemory ( Address address , size_t length ) { <nl> ASSERT ( IsAddressAligned ( address , MemoryChunk : : kAlignment ) ) ; <nl> free_list_ . Add ( FreeBlock ( address , length ) ) ; <nl> - bool result = code_range_ - > Uncommit ( address , length ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + code_range_ - > Uncommit ( address , length ) ; <nl> } <nl> <nl> <nl> void MemoryAllocator : : TearDown ( ) { <nl> <nl> bool MemoryAllocator : : CommitMemory ( Address base , <nl> size_t size , <nl> - VirtualMemory : : Executability executability ) { <nl> - if ( ! VirtualMemory : : CommitRegion ( base , size , executability ) ) { <nl> + Executability executable ) { <nl> + if ( ! VirtualMemory : : CommitRegion ( base , size , executable = = EXECUTABLE ) ) { <nl> return false ; <nl> } <nl> UpdateAllocatedSpaceLimits ( base , base + size ) ; <nl> bool MemoryAllocator : : CommitMemory ( Address base , <nl> <nl> <nl> void MemoryAllocator : : FreeMemory ( VirtualMemory * reservation , <nl> - VirtualMemory : : Executability executability ) { <nl> + Executability executable ) { <nl> / / TODO ( gc ) make code_range part of memory allocator ? <nl> ASSERT ( reservation - > IsReserved ( ) ) ; <nl> size_t size = reservation - > size ( ) ; <nl> void MemoryAllocator : : FreeMemory ( VirtualMemory * reservation , <nl> <nl> isolate_ - > counters ( ) - > memory_allocated ( ) - > Decrement ( static_cast < int > ( size ) ) ; <nl> <nl> - if ( executability = = VirtualMemory : : EXECUTABLE ) { <nl> + if ( executable = = EXECUTABLE ) { <nl> ASSERT ( size_executable_ > = size ) ; <nl> size_executable_ - = size ; <nl> } <nl> / / Code which is part of the code - range does not have its own VirtualMemory . <nl> ASSERT ( ! isolate_ - > code_range ( ) - > contains ( <nl> static_cast < Address > ( reservation - > address ( ) ) ) ) ; <nl> - ASSERT ( executability = = VirtualMemory : : NOT_EXECUTABLE | | <nl> - ! isolate_ - > code_range ( ) - > exists ( ) ) ; <nl> + ASSERT ( executable = = NOT_EXECUTABLE | | ! isolate_ - > code_range ( ) - > exists ( ) ) ; <nl> reservation - > Release ( ) ; <nl> } <nl> <nl> <nl> void MemoryAllocator : : FreeMemory ( Address base , <nl> size_t size , <nl> - VirtualMemory : : Executability executability ) { <nl> + Executability executable ) { <nl> / / TODO ( gc ) make code_range part of memory allocator ? <nl> ASSERT ( size_ > = size ) ; <nl> size_ - = size ; <nl> <nl> isolate_ - > counters ( ) - > memory_allocated ( ) - > Decrement ( static_cast < int > ( size ) ) ; <nl> <nl> - if ( executability = = VirtualMemory : : EXECUTABLE ) { <nl> + if ( executable = = EXECUTABLE ) { <nl> ASSERT ( size_executable_ > = size ) ; <nl> size_executable_ - = size ; <nl> } <nl> if ( isolate_ - > code_range ( ) - > contains ( static_cast < Address > ( base ) ) ) { <nl> - ASSERT ( executability = = VirtualMemory : : EXECUTABLE ) ; <nl> + ASSERT ( executable = = EXECUTABLE ) ; <nl> isolate_ - > code_range ( ) - > FreeRawMemory ( base , size ) ; <nl> } else { <nl> - ASSERT ( executability = = VirtualMemory : : NOT_EXECUTABLE | | <nl> - ! isolate_ - > code_range ( ) - > exists ( ) ) ; <nl> + ASSERT ( executable = = NOT_EXECUTABLE | | ! isolate_ - > code_range ( ) - > exists ( ) ) ; <nl> bool result = VirtualMemory : : ReleaseRegion ( base , size ) ; <nl> USE ( result ) ; <nl> ASSERT ( result ) ; <nl> Address MemoryAllocator : : ReserveAlignedMemory ( size_t size , <nl> } <nl> <nl> <nl> - Address MemoryAllocator : : AllocateAlignedMemory ( <nl> - size_t reserve_size , <nl> - size_t commit_size , <nl> - size_t alignment , <nl> - VirtualMemory : : Executability executability , <nl> - VirtualMemory * controller ) { <nl> + Address MemoryAllocator : : AllocateAlignedMemory ( size_t reserve_size , <nl> + size_t commit_size , <nl> + size_t alignment , <nl> + Executability executable , <nl> + VirtualMemory * controller ) { <nl> ASSERT ( commit_size < = reserve_size ) ; <nl> VirtualMemory reservation ; <nl> Address base = ReserveAlignedMemory ( reserve_size , alignment , & reservation ) ; <nl> if ( base = = NULL ) return NULL ; <nl> <nl> - if ( executability = = VirtualMemory : : EXECUTABLE ) { <nl> + if ( executable = = EXECUTABLE ) { <nl> if ( ! CommitExecutableMemory ( & reservation , <nl> base , <nl> commit_size , <nl> Address MemoryAllocator : : AllocateAlignedMemory ( <nl> base = NULL ; <nl> } <nl> } else { <nl> - if ( reservation . Commit ( base , commit_size , VirtualMemory : : NOT_EXECUTABLE ) ) { <nl> + if ( reservation . Commit ( base , commit_size , false ) ) { <nl> UpdateAllocatedSpaceLimits ( base , base + commit_size ) ; <nl> } else { <nl> base = NULL ; <nl> NewSpacePage * NewSpacePage : : Initialize ( Heap * heap , <nl> Page : : kPageSize , <nl> area_start , <nl> area_end , <nl> - VirtualMemory : : NOT_EXECUTABLE , <nl> + NOT_EXECUTABLE , <nl> semi_space ) ; <nl> chunk - > set_next_chunk ( NULL ) ; <nl> chunk - > set_prev_chunk ( NULL ) ; <nl> MemoryChunk * MemoryChunk : : Initialize ( Heap * heap , <nl> size_t size , <nl> Address area_start , <nl> Address area_end , <nl> - VirtualMemory : : Executability executability , <nl> + Executability executable , <nl> Space * owner ) { <nl> MemoryChunk * chunk = FromAddress ( base ) ; <nl> <nl> MemoryChunk * MemoryChunk : : Initialize ( Heap * heap , <nl> ASSERT ( OFFSET_OF ( MemoryChunk , flags_ ) = = kFlagsOffset ) ; <nl> ASSERT ( OFFSET_OF ( MemoryChunk , live_byte_count_ ) = = kLiveBytesOffset ) ; <nl> <nl> - if ( executability = = VirtualMemory : : EXECUTABLE ) { <nl> + if ( executable = = EXECUTABLE ) { <nl> chunk - > SetFlag ( IS_EXECUTABLE ) ; <nl> } <nl> <nl> bool MemoryChunk : : CommitArea ( size_t requested ) { <nl> size_t guard_size = IsFlagSet ( IS_EXECUTABLE ) ? <nl> MemoryAllocator : : CodePageGuardSize ( ) : 0 ; <nl> size_t header_size = area_start ( ) - address ( ) - guard_size ; <nl> - size_t commit_size = RoundUp ( header_size + requested , <nl> - VirtualMemory : : GetPageSize ( ) ) ; <nl> + size_t commit_size = RoundUp ( header_size + requested , OS : : CommitPageSize ( ) ) ; <nl> size_t committed_size = RoundUp ( header_size + ( area_end ( ) - area_start ( ) ) , <nl> - VirtualMemory : : GetPageSize ( ) ) ; <nl> + OS : : CommitPageSize ( ) ) ; <nl> <nl> if ( commit_size > committed_size ) { <nl> / / Commit size should be less or equal than the reserved size . <nl> bool MemoryChunk : : CommitArea ( size_t requested ) { <nl> Address start = address ( ) + committed_size + guard_size ; <nl> size_t length = commit_size - committed_size ; <nl> if ( reservation_ . IsReserved ( ) ) { <nl> - VirtualMemory : : Executability executability = IsFlagSet ( IS_EXECUTABLE ) <nl> - ? VirtualMemory : : EXECUTABLE : VirtualMemory : : NOT_EXECUTABLE ; <nl> + Executability executable = IsFlagSet ( IS_EXECUTABLE ) <nl> + ? EXECUTABLE : NOT_EXECUTABLE ; <nl> if ( ! heap ( ) - > isolate ( ) - > memory_allocator ( ) - > CommitMemory ( <nl> - start , length , executability ) ) { <nl> + start , length , executable ) ) { <nl> return false ; <nl> } <nl> } else { <nl> void MemoryChunk : : Unlink ( ) { <nl> } <nl> <nl> <nl> - MemoryChunk * MemoryAllocator : : AllocateChunk ( <nl> - intptr_t reserve_area_size , <nl> - intptr_t commit_area_size , <nl> - VirtualMemory : : Executability executability , <nl> - Space * owner ) { <nl> + MemoryChunk * MemoryAllocator : : AllocateChunk ( intptr_t reserve_area_size , <nl> + intptr_t commit_area_size , <nl> + Executability executable , <nl> + Space * owner ) { <nl> ASSERT ( commit_area_size < = reserve_area_size ) ; <nl> <nl> size_t chunk_size ; <nl> MemoryChunk * MemoryAllocator : : AllocateChunk ( <nl> / / + mmmmmmmmmmmmmmmmmmmmmmmmmmm - + < - base + chunk_size <nl> / / <nl> <nl> - if ( executability = = VirtualMemory : : EXECUTABLE ) { <nl> + if ( executable = = EXECUTABLE ) { <nl> chunk_size = RoundUp ( CodePageAreaStartOffset ( ) + reserve_area_size , <nl> - VirtualMemory : : GetPageSize ( ) ) + CodePageGuardSize ( ) ; <nl> + OS : : CommitPageSize ( ) ) + CodePageGuardSize ( ) ; <nl> <nl> / / Check executable memory limit . <nl> if ( size_executable_ + chunk_size > capacity_executable_ ) { <nl> MemoryChunk * MemoryAllocator : : AllocateChunk ( <nl> <nl> / / Size of header ( not executable ) plus area ( executable ) . <nl> size_t commit_size = RoundUp ( CodePageGuardStartOffset ( ) + commit_area_size , <nl> - VirtualMemory : : GetPageSize ( ) ) ; <nl> + OS : : CommitPageSize ( ) ) ; <nl> / / Allocate executable memory either from code range or from the <nl> / / OS . <nl> if ( isolate_ - > code_range ( ) - > exists ( ) ) { <nl> MemoryChunk * MemoryAllocator : : AllocateChunk ( <nl> base = AllocateAlignedMemory ( chunk_size , <nl> commit_size , <nl> MemoryChunk : : kAlignment , <nl> - executability , <nl> + executable , <nl> & reservation ) ; <nl> if ( base = = NULL ) return NULL ; <nl> / / Update executable memory size . <nl> MemoryChunk * MemoryAllocator : : AllocateChunk ( <nl> area_end = area_start + commit_area_size ; <nl> } else { <nl> chunk_size = RoundUp ( MemoryChunk : : kObjectStartOffset + reserve_area_size , <nl> - VirtualMemory : : GetPageSize ( ) ) ; <nl> - size_t commit_size = RoundUp ( <nl> - MemoryChunk : : kObjectStartOffset + commit_area_size , <nl> - VirtualMemory : : GetPageSize ( ) ) ; <nl> + OS : : CommitPageSize ( ) ) ; <nl> + size_t commit_size = RoundUp ( MemoryChunk : : kObjectStartOffset + <nl> + commit_area_size , OS : : CommitPageSize ( ) ) ; <nl> base = AllocateAlignedMemory ( chunk_size , <nl> commit_size , <nl> MemoryChunk : : kAlignment , <nl> - executability , <nl> + executable , <nl> & reservation ) ; <nl> <nl> if ( base = = NULL ) return NULL ; <nl> MemoryChunk * MemoryAllocator : : AllocateChunk ( <nl> chunk_size , <nl> area_start , <nl> area_end , <nl> - executability , <nl> + executable , <nl> owner ) ; <nl> result - > set_reserved_memory ( & reservation ) ; <nl> return result ; <nl> void Page : : ResetFreeListStatistics ( ) { <nl> } <nl> <nl> <nl> - Page * MemoryAllocator : : AllocatePage ( <nl> - intptr_t size , <nl> - PagedSpace * owner , <nl> - VirtualMemory : : Executability executability ) { <nl> - MemoryChunk * chunk = AllocateChunk ( size , size , executability , owner ) ; <nl> + Page * MemoryAllocator : : AllocatePage ( intptr_t size , <nl> + PagedSpace * owner , <nl> + Executability executable ) { <nl> + MemoryChunk * chunk = AllocateChunk ( size , size , executable , owner ) ; <nl> <nl> if ( chunk = = NULL ) return NULL ; <nl> <nl> - return Page : : Initialize ( isolate_ - > heap ( ) , chunk , executability , owner ) ; <nl> + return Page : : Initialize ( isolate_ - > heap ( ) , chunk , executable , owner ) ; <nl> } <nl> <nl> <nl> - LargePage * MemoryAllocator : : AllocateLargePage ( <nl> - intptr_t object_size , <nl> - Space * owner , <nl> - VirtualMemory : : Executability executability ) { <nl> + LargePage * MemoryAllocator : : AllocateLargePage ( intptr_t object_size , <nl> + Space * owner , <nl> + Executability executable ) { <nl> MemoryChunk * chunk = AllocateChunk ( object_size , <nl> object_size , <nl> - executability , <nl> + executable , <nl> owner ) ; <nl> if ( chunk = = NULL ) return NULL ; <nl> return LargePage : : Initialize ( isolate_ - > heap ( ) , chunk ) ; <nl> void MemoryAllocator : : Free ( MemoryChunk * chunk ) { <nl> <nl> VirtualMemory * reservation = chunk - > reserved_memory ( ) ; <nl> if ( reservation - > IsReserved ( ) ) { <nl> - FreeMemory ( reservation , chunk - > executability ( ) ) ; <nl> + FreeMemory ( reservation , chunk - > executable ( ) ) ; <nl> } else { <nl> FreeMemory ( chunk - > address ( ) , <nl> chunk - > size ( ) , <nl> - chunk - > executability ( ) ) ; <nl> + chunk - > executable ( ) ) ; <nl> } <nl> } <nl> <nl> <nl> bool MemoryAllocator : : CommitBlock ( Address start , <nl> size_t size , <nl> - VirtualMemory : : Executability executability ) { <nl> - if ( ! CommitMemory ( start , size , executability ) ) return false ; <nl> + Executability executable ) { <nl> + if ( ! CommitMemory ( start , size , executable ) ) return false ; <nl> <nl> if ( Heap : : ShouldZapGarbage ( ) ) { <nl> ZapBlock ( start , size ) ; <nl> void MemoryAllocator : : ReportStatistics ( ) { <nl> int MemoryAllocator : : CodePageGuardStartOffset ( ) { <nl> / / We are guarding code pages : the first OS page after the header <nl> / / will be protected as non - writable . <nl> - return RoundUp ( Page : : kObjectStartOffset , VirtualMemory : : GetPageSize ( ) ) ; <nl> + return RoundUp ( Page : : kObjectStartOffset , OS : : CommitPageSize ( ) ) ; <nl> } <nl> <nl> <nl> int MemoryAllocator : : CodePageGuardSize ( ) { <nl> - return static_cast < int > ( VirtualMemory : : GetPageSize ( ) ) ; <nl> + return static_cast < int > ( OS : : CommitPageSize ( ) ) ; <nl> } <nl> <nl> <nl> int MemoryAllocator : : CodePageAreaStartOffset ( ) { <nl> int MemoryAllocator : : CodePageAreaEndOffset ( ) { <nl> / / We are guarding code pages : the last OS page will be protected as <nl> / / non - writable . <nl> - return Page : : kPageSize - static_cast < int > ( VirtualMemory : : GetPageSize ( ) ) ; <nl> + return Page : : kPageSize - static_cast < int > ( OS : : CommitPageSize ( ) ) ; <nl> } <nl> <nl> <nl> bool MemoryAllocator : : CommitExecutableMemory ( VirtualMemory * vm , <nl> / / Commit page header ( not executable ) . <nl> if ( ! vm - > Commit ( start , <nl> CodePageGuardStartOffset ( ) , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ) { <nl> + false ) ) { <nl> return false ; <nl> } <nl> <nl> / / Create guard page after the header . <nl> - if ( ! vm - > Guard ( start + CodePageGuardStartOffset ( ) , <nl> - VirtualMemory : : GetPageSize ( ) ) ) { <nl> + if ( ! vm - > Guard ( start + CodePageGuardStartOffset ( ) ) ) { <nl> return false ; <nl> } <nl> <nl> / / Commit page body ( executable ) . <nl> if ( ! vm - > Commit ( start + CodePageAreaStartOffset ( ) , <nl> commit_size - CodePageGuardStartOffset ( ) , <nl> - VirtualMemory : : EXECUTABLE ) ) { <nl> + true ) ) { <nl> return false ; <nl> } <nl> <nl> / / Create guard page before the end . <nl> - if ( ! vm - > Guard ( start + reserved_size - CodePageGuardSize ( ) , <nl> - VirtualMemory : : GetPageSize ( ) ) ) { <nl> + if ( ! vm - > Guard ( start + reserved_size - CodePageGuardSize ( ) ) ) { <nl> return false ; <nl> } <nl> <nl> void MemoryChunk : : IncrementLiveBytesFromMutator ( Address address , int by ) { <nl> PagedSpace : : PagedSpace ( Heap * heap , <nl> intptr_t max_capacity , <nl> AllocationSpace id , <nl> - VirtualMemory : : Executability executability ) <nl> - : Space ( heap , id , executability ) , <nl> + Executability executable ) <nl> + : Space ( heap , id , executable ) , <nl> free_list_ ( this ) , <nl> was_swept_conservatively_ ( false ) , <nl> first_unswept_page_ ( Page : : FromAddress ( NULL ) ) , <nl> bool PagedSpace : : Expand ( ) { <nl> } <nl> <nl> Page * p = heap ( ) - > isolate ( ) - > memory_allocator ( ) - > AllocatePage ( <nl> - size , this , executability ( ) ) ; <nl> + size , this , executable ( ) ) ; <nl> if ( p = = NULL ) return false ; <nl> <nl> ASSERT ( Capacity ( ) < = max_capacity_ ) ; <nl> void NewSpace : : TearDown ( ) { <nl> LOG ( heap ( ) - > isolate ( ) , DeleteEvent ( " InitialChunk " , chunk_base_ ) ) ; <nl> <nl> ASSERT ( reservation_ . IsReserved ( ) ) ; <nl> - heap ( ) - > isolate ( ) - > memory_allocator ( ) - > FreeMemory ( <nl> - & reservation_ , VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + heap ( ) - > isolate ( ) - > memory_allocator ( ) - > FreeMemory ( & reservation_ , <nl> + NOT_EXECUTABLE ) ; <nl> chunk_base_ = NULL ; <nl> chunk_size_ = 0 ; <nl> } <nl> bool SemiSpace : : Commit ( ) { <nl> Address start = end - pages * Page : : kPageSize ; <nl> if ( ! heap ( ) - > isolate ( ) - > memory_allocator ( ) - > CommitBlock ( start , <nl> capacity_ , <nl> - executability ( ) ) ) { <nl> + executable ( ) ) ) { <nl> return false ; <nl> } <nl> <nl> bool SemiSpace : : GrowTo ( int new_capacity ) { <nl> Address start = end - new_capacity ; <nl> size_t delta = new_capacity - capacity_ ; <nl> <nl> - ASSERT ( IsAligned ( delta , VirtualMemory : : GetAllocationGranularity ( ) ) ) ; <nl> + ASSERT ( IsAligned ( delta , OS : : AllocateAlignment ( ) ) ) ; <nl> if ( ! heap ( ) - > isolate ( ) - > memory_allocator ( ) - > CommitBlock ( <nl> - start , delta , executability ( ) ) ) { <nl> + start , delta , executable ( ) ) ) { <nl> return false ; <nl> } <nl> capacity_ = new_capacity ; <nl> bool SemiSpace : : ShrinkTo ( int new_capacity ) { <nl> Address space_end = start_ + maximum_capacity_ ; <nl> Address old_start = space_end - capacity_ ; <nl> size_t delta = capacity_ - new_capacity ; <nl> - ASSERT ( IsAligned ( delta , VirtualMemory : : GetAllocationGranularity ( ) ) ) ; <nl> + ASSERT ( IsAligned ( delta , OS : : AllocateAlignment ( ) ) ) ; <nl> <nl> MemoryAllocator * allocator = heap ( ) - > isolate ( ) - > memory_allocator ( ) ; <nl> if ( ! allocator - > UncommitBlock ( old_start , delta ) ) { <nl> static bool ComparePointers ( void * key1 , void * key2 ) { <nl> LargeObjectSpace : : LargeObjectSpace ( Heap * heap , <nl> intptr_t max_capacity , <nl> AllocationSpace id ) <nl> - / / Managed on a per - allocation basis <nl> - : Space ( heap , id , VirtualMemory : : NOT_EXECUTABLE ) , <nl> + : Space ( heap , id , NOT_EXECUTABLE ) , / / Managed on a per - allocation basis <nl> max_capacity_ ( max_capacity ) , <nl> first_page_ ( NULL ) , <nl> size_ ( 0 ) , <nl> void LargeObjectSpace : : TearDown ( ) { <nl> } <nl> <nl> <nl> - MaybeObject * LargeObjectSpace : : AllocateRaw ( <nl> - int object_size , VirtualMemory : : Executability executability ) { <nl> + MaybeObject * LargeObjectSpace : : AllocateRaw ( int object_size , <nl> + Executability executable ) { <nl> / / Check if we want to force a GC before growing the old space further . <nl> / / If so , fail the allocation . <nl> if ( ! heap ( ) - > always_allocate ( ) & & <nl> MaybeObject * LargeObjectSpace : : AllocateRaw ( <nl> } <nl> <nl> LargePage * page = heap ( ) - > isolate ( ) - > memory_allocator ( ) - > <nl> - AllocateLargePage ( object_size , this , executability ) ; <nl> + AllocateLargePage ( object_size , this , executable ) ; <nl> if ( page = = NULL ) return Failure : : RetryAfterGC ( identity ( ) ) ; <nl> ASSERT ( page - > area_size ( ) > = object_size ) ; <nl> <nl> mmm a / src / spaces . h <nl> ppp b / src / spaces . h <nl> <nl> # include " list . h " <nl> # include " log . h " <nl> # include " platform / mutex . h " <nl> - # include " platform / virtual - memory . h " <nl> # include " v8utils . h " <nl> <nl> namespace v8 { <nl> class MemoryChunk { <nl> area_end_ = area_end ; <nl> } <nl> <nl> - VirtualMemory : : Executability executability ( ) { <nl> - return IsFlagSet ( IS_EXECUTABLE ) <nl> - ? VirtualMemory : : EXECUTABLE <nl> - : VirtualMemory : : NOT_EXECUTABLE ; <nl> + Executability executable ( ) { <nl> + return IsFlagSet ( IS_EXECUTABLE ) ? EXECUTABLE : NOT_EXECUTABLE ; <nl> } <nl> <nl> bool ContainsOnlyData ( ) { <nl> class MemoryChunk { <nl> size_t size , <nl> Address area_start , <nl> Address area_end , <nl> - VirtualMemory : : Executability executability , <nl> + Executability executable , <nl> Space * owner ) ; <nl> <nl> friend class MemoryAllocator ; <nl> class Page : public MemoryChunk { <nl> <nl> static inline Page * Initialize ( Heap * heap , <nl> MemoryChunk * chunk , <nl> - VirtualMemory : : Executability executable , <nl> + Executability executable , <nl> PagedSpace * owner ) ; <nl> <nl> void InitializeAsAnchor ( PagedSpace * owner ) ; <nl> STATIC_CHECK ( sizeof ( LargePage ) < = MemoryChunk : : kHeaderSize ) ; <nl> / / Space is the abstract superclass for all allocation spaces . <nl> class Space : public Malloced { <nl> public : <nl> - Space ( Heap * heap , <nl> - AllocationSpace id , <nl> - VirtualMemory : : Executability executability ) <nl> - : heap_ ( heap ) , id_ ( id ) , executability_ ( executability ) { } <nl> + Space ( Heap * heap , AllocationSpace id , Executability executable ) <nl> + : heap_ ( heap ) , id_ ( id ) , executable_ ( executable ) { } <nl> <nl> virtual ~ Space ( ) { } <nl> <nl> Heap * heap ( ) const { return heap_ ; } <nl> <nl> / / Does the space need executable memory ? <nl> - VirtualMemory : : Executability executability ( ) { return executability_ ; } <nl> + Executability executable ( ) { return executable_ ; } <nl> <nl> / / Identity used in error reporting . <nl> AllocationSpace identity ( ) { return id_ ; } <nl> class Space : public Malloced { <nl> private : <nl> Heap * heap_ ; <nl> AllocationSpace id_ ; <nl> - VirtualMemory : : Executability executability_ ; <nl> + Executability executable_ ; <nl> } ; <nl> <nl> <nl> class MemoryAllocator { <nl> <nl> void TearDown ( ) ; <nl> <nl> - Page * AllocatePage ( intptr_t size , <nl> - PagedSpace * owner , <nl> - VirtualMemory : : Executability executability ) ; <nl> + Page * AllocatePage ( <nl> + intptr_t size , PagedSpace * owner , Executability executable ) ; <nl> <nl> - LargePage * AllocateLargePage ( intptr_t object_size , <nl> - Space * owner , <nl> - VirtualMemory : : Executability executability ) ; <nl> + LargePage * AllocateLargePage ( <nl> + intptr_t object_size , Space * owner , Executability executable ) ; <nl> <nl> void Free ( MemoryChunk * chunk ) ; <nl> <nl> class MemoryAllocator { <nl> <nl> / / Returns an indication of whether a pointer is in a space that has <nl> / / been allocated by this MemoryAllocator . <nl> - V8_INLINE bool IsOutsideAllocatedSpace ( const void * address ) const { <nl> + V8_INLINE ( bool IsOutsideAllocatedSpace ( const void * address ) ) const { <nl> return address < lowest_ever_allocated_ | | <nl> address > = highest_ever_allocated_ ; <nl> } <nl> class MemoryAllocator { <nl> / / could be committed later by calling MemoryChunk : : CommitArea . <nl> MemoryChunk * AllocateChunk ( intptr_t reserve_area_size , <nl> intptr_t commit_area_size , <nl> - VirtualMemory : : Executability executability , <nl> + Executability executable , <nl> Space * space ) ; <nl> <nl> Address ReserveAlignedMemory ( size_t requested , <nl> class MemoryAllocator { <nl> Address AllocateAlignedMemory ( size_t reserve_size , <nl> size_t commit_size , <nl> size_t alignment , <nl> - VirtualMemory : : Executability executability , <nl> + Executability executable , <nl> VirtualMemory * controller ) ; <nl> <nl> - bool CommitMemory ( Address addr , <nl> - size_t size , <nl> - VirtualMemory : : Executability executability ) ; <nl> + bool CommitMemory ( Address addr , size_t size , Executability executable ) ; <nl> <nl> - void FreeMemory ( VirtualMemory * reservation , <nl> - VirtualMemory : : Executability executability ) ; <nl> - void FreeMemory ( Address addr , <nl> - size_t size , <nl> - VirtualMemory : : Executability executability ) ; <nl> + void FreeMemory ( VirtualMemory * reservation , Executability executable ) ; <nl> + void FreeMemory ( Address addr , size_t size , Executability executable ) ; <nl> <nl> / / Commit a contiguous block of memory from the initial chunk . Assumes that <nl> / / the address is not NULL , the size is greater than zero , and that the <nl> / / block is contained in the initial chunk . Returns true if it succeeded <nl> / / and false otherwise . <nl> - bool CommitBlock ( Address start , <nl> - size_t size , <nl> - VirtualMemory : : Executability executability ) ; <nl> + bool CommitBlock ( Address start , size_t size , Executability executable ) ; <nl> <nl> / / Uncommit a contiguous block of memory [ start . . ( start + size ) [ . <nl> / / start is not NULL , the size is greater than zero , and the <nl> class PagedSpace : public Space { <nl> PagedSpace ( Heap * heap , <nl> intptr_t max_capacity , <nl> AllocationSpace id , <nl> - VirtualMemory : : Executability executability ) ; <nl> + Executability executable ) ; <nl> <nl> virtual ~ PagedSpace ( ) { } <nl> <nl> class SemiSpace : public Space { <nl> public : <nl> / / Constructor . <nl> SemiSpace ( Heap * heap , SemiSpaceId semispace ) <nl> - : Space ( heap , NEW_SPACE , VirtualMemory : : NOT_EXECUTABLE ) , <nl> + : Space ( heap , NEW_SPACE , NOT_EXECUTABLE ) , <nl> start_ ( NULL ) , <nl> age_mark_ ( NULL ) , <nl> id_ ( semispace ) , <nl> class NewSpace : public Space { <nl> public : <nl> / / Constructor . <nl> explicit NewSpace ( Heap * heap ) <nl> - : Space ( heap , NEW_SPACE , VirtualMemory : : NOT_EXECUTABLE ) , <nl> + : Space ( heap , NEW_SPACE , NOT_EXECUTABLE ) , <nl> to_space_ ( heap , kToSpace ) , <nl> from_space_ ( heap , kFromSpace ) , <nl> reservation_ ( ) , <nl> class OldSpace : public PagedSpace { <nl> OldSpace ( Heap * heap , <nl> intptr_t max_capacity , <nl> AllocationSpace id , <nl> - VirtualMemory : : Executability executability ) <nl> - : PagedSpace ( heap , max_capacity , id , executability ) { <nl> + Executability executable ) <nl> + : PagedSpace ( heap , max_capacity , id , executable ) { <nl> page_extra_ = 0 ; <nl> } <nl> <nl> class FixedSpace : public PagedSpace { <nl> intptr_t max_capacity , <nl> AllocationSpace id , <nl> int object_size_in_bytes ) <nl> - : PagedSpace ( heap , max_capacity , id , VirtualMemory : : NOT_EXECUTABLE ) , <nl> + : PagedSpace ( heap , max_capacity , id , NOT_EXECUTABLE ) , <nl> object_size_in_bytes_ ( object_size_in_bytes ) { <nl> page_extra_ = Page : : kNonCodeObjectAreaSize % object_size_in_bytes ; <nl> } <nl> class LargeObjectSpace : public Space { <nl> <nl> / / Shared implementation of AllocateRaw , AllocateRawCode and <nl> / / AllocateRawFixedArray . <nl> - MUST_USE_RESULT MaybeObject * AllocateRaw ( <nl> - int object_size , VirtualMemory : : Executability executability ) ; <nl> + MUST_USE_RESULT MaybeObject * AllocateRaw ( int object_size , <nl> + Executability executable ) ; <nl> <nl> / / Available bytes for objects in this space . <nl> inline intptr_t Available ( ) ; <nl> mmm a / src / store - buffer . cc <nl> ppp b / src / store - buffer . cc <nl> void StoreBuffer : : SetUp ( ) { <nl> / / Don ' t know the alignment requirements of the OS , but it is certainly not <nl> / / less than 0xfff . <nl> ASSERT ( ( reinterpret_cast < uintptr_t > ( old_start_ ) & 0xfff ) = = 0 ) ; <nl> - int initial_length = <nl> - static_cast < int > ( VirtualMemory : : GetPageSize ( ) / kPointerSize ) ; <nl> + int initial_length = static_cast < int > ( OS : : CommitPageSize ( ) / kPointerSize ) ; <nl> ASSERT ( initial_length > 0 ) ; <nl> ASSERT ( initial_length < = kOldStoreBufferLength ) ; <nl> old_limit_ = old_start_ + initial_length ; <nl> void StoreBuffer : : SetUp ( ) { <nl> CHECK ( old_virtual_memory_ - > Commit ( <nl> reinterpret_cast < void * > ( old_start_ ) , <nl> ( old_limit_ - old_start_ ) * kPointerSize , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ) ; <nl> + false ) ) ; <nl> <nl> ASSERT ( reinterpret_cast < Address > ( start_ ) > = virtual_memory_ - > address ( ) ) ; <nl> ASSERT ( reinterpret_cast < Address > ( limit_ ) > = virtual_memory_ - > address ( ) ) ; <nl> void StoreBuffer : : SetUp ( ) { <nl> <nl> CHECK ( virtual_memory_ - > Commit ( reinterpret_cast < Address > ( start_ ) , <nl> kStoreBufferSize , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ) ; <nl> + false ) ) ; / / Not executable . <nl> heap_ - > public_set_store_buffer_top ( start_ ) ; <nl> <nl> hash_set_1_ = new uintptr_t [ kHashSetLength ] ; <nl> void StoreBuffer : : EnsureSpace ( intptr_t space_needed ) { <nl> size_t grow = old_limit_ - old_start_ ; / / Double size . <nl> CHECK ( old_virtual_memory_ - > Commit ( reinterpret_cast < void * > ( old_limit_ ) , <nl> grow * kPointerSize , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ) ; <nl> + false ) ) ; <nl> old_limit_ + = grow ; <nl> } <nl> <nl> mmm a / src / utils / random - number - generator . h <nl> ppp b / src / utils / random - number - generator . h <nl> class RandomNumberGenerator V8_FINAL { <nl> / / that one int value is pseudorandomly generated and returned . <nl> / / All 2 ^ 32 possible integer values are produced with ( approximately ) equal <nl> / / probability . <nl> - V8_INLINE int NextInt ( ) V8_WARN_UNUSED_RESULT { <nl> + V8_INLINE ( int NextInt ( ) ) V8_WARN_UNUSED_RESULT { <nl> return Next ( 32 ) ; <nl> } <nl> <nl> class RandomNumberGenerator V8_FINAL { <nl> / / | NextBoolean ( ) | is that one boolean value is pseudorandomly generated and <nl> / / returned . The values true and false are produced with ( approximately ) equal <nl> / / probability . <nl> - V8_INLINE bool NextBool ( ) V8_WARN_UNUSED_RESULT { <nl> + V8_INLINE ( bool NextBool ( ) ) V8_WARN_UNUSED_RESULT { <nl> return Next ( 1 ) ! = 0 ; <nl> } <nl> <nl> mmm a / src / v8globals . h <nl> ppp b / src / v8globals . h <nl> enum PretenureFlag { NOT_TENURED , TENURED } ; <nl> <nl> enum GarbageCollector { SCAVENGER , MARK_COMPACTOR } ; <nl> <nl> + enum Executability { NOT_EXECUTABLE , EXECUTABLE } ; <nl> + <nl> enum VisitMode { <nl> VISIT_ALL , <nl> VISIT_ALL_IN_SCAVENGE , <nl> mmm a / src / x64 / codegen - x64 . cc <nl> ppp b / src / x64 / codegen - x64 . cc <nl> void StubRuntimeCallHelper : : AfterCall ( MacroAssembler * masm ) const { <nl> UnaryMathFunction CreateTranscendentalFunction ( TranscendentalCache : : Type type ) { <nl> size_t actual_size ; <nl> / / Allocate buffer in executable space . <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , <nl> + & actual_size , <nl> + true ) ) ; <nl> if ( buffer = = NULL ) { <nl> / / Fallback to library function if function cannot be created . <nl> switch ( type ) { <nl> UnaryMathFunction CreateTranscendentalFunction ( TranscendentalCache : : Type type ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> } <nl> <nl> UnaryMathFunction CreateTranscendentalFunction ( TranscendentalCache : : Type type ) { <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> if ( ! FLAG_fast_math ) return & exp ; <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , & actual_size , true ) ) ; <nl> if ( buffer = = NULL ) return & exp ; <nl> ExternalReference : : InitializeMathExpData ( ) ; <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool ok = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( ok ) ; <nl> - USE ( ok ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> } <nl> <nl> UnaryMathFunction CreateExpFunction ( ) { <nl> UnaryMathFunction CreateSqrtFunction ( ) { <nl> size_t actual_size ; <nl> / / Allocate buffer in executable space . <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - 1 * KB , & actual_size , VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( 1 * KB , <nl> + & actual_size , <nl> + true ) ) ; <nl> if ( buffer = = NULL ) return & sqrt ; <nl> <nl> MacroAssembler masm ( NULL , buffer , static_cast < int > ( actual_size ) ) ; <nl> UnaryMathFunction CreateSqrtFunction ( ) { <nl> ASSERT ( ! RelocInfo : : RequiresRelocation ( desc ) ) ; <nl> <nl> CPU : : FlushICache ( buffer , actual_size ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> return FUNCTION_CAST < UnaryMathFunction > ( buffer ) ; <nl> } <nl> <nl> ModuloFunction CreateModuloFunction ( ) { <nl> <nl> CodeDesc desc ; <nl> masm . GetCode ( & desc ) ; <nl> - bool result = VirtualMemory : : WriteProtectRegion ( buffer , actual_size ) ; <nl> - ASSERT ( result ) ; <nl> - USE ( result ) ; <nl> + OS : : ProtectCode ( buffer , actual_size ) ; <nl> / / Call the function from C + + through this pointer . <nl> return FUNCTION_CAST < ModuloFunction > ( buffer ) ; <nl> } <nl> mmm a / test / cctest / cctest . gyp <nl> ppp b / test / cctest / cctest . gyp <nl> <nl> ' test - unbound - queue . cc ' , <nl> ' test - utils . cc ' , <nl> ' test - version . cc ' , <nl> - ' test - virtual - memory . cc ' , <nl> ' test - weakmaps . cc ' , <nl> ' test - weaksets . cc ' , <nl> ' test - weaktypedarrays . cc ' <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> struct CopyablePersistentTraits { <nl> typedef Persistent < T , CopyablePersistentTraits < T > > CopyablePersistent ; <nl> static const bool kResetInDestructor = true ; <nl> template < class S , class M > <nl> - static V8_INLINE void Copy ( const Persistent < S , M > & source , <nl> - CopyablePersistent * dest ) { <nl> + V8_INLINE ( static void Copy ( const Persistent < S , M > & source , <nl> + CopyablePersistent * dest ) ) { <nl> / / do nothing , just allow copy <nl> } <nl> } ; <nl> mmm a / test / cctest / test - assembler - x64 . cc <nl> ppp b / test / cctest / test - assembler - x64 . cc <nl> <nl> # include " serialize . h " <nl> # include " cctest . h " <nl> <nl> - using namespace v8 : : internal ; <nl> + using v8 : : internal : : Assembler ; <nl> + using v8 : : internal : : Code ; <nl> + using v8 : : internal : : CodeDesc ; <nl> + using v8 : : internal : : FUNCTION_CAST ; <nl> + using v8 : : internal : : Immediate ; <nl> + using v8 : : internal : : Isolate ; <nl> + using v8 : : internal : : Label ; <nl> + using v8 : : internal : : OS ; <nl> + using v8 : : internal : : Operand ; <nl> + using v8 : : internal : : byte ; <nl> + using v8 : : internal : : greater ; <nl> + using v8 : : internal : : less_equal ; <nl> + using v8 : : internal : : equal ; <nl> + using v8 : : internal : : not_equal ; <nl> + using v8 : : internal : : r13 ; <nl> + using v8 : : internal : : r15 ; <nl> + using v8 : : internal : : r8 ; <nl> + using v8 : : internal : : r9 ; <nl> + using v8 : : internal : : rax ; <nl> + using v8 : : internal : : rbx ; <nl> + using v8 : : internal : : rbp ; <nl> + using v8 : : internal : : rcx ; <nl> + using v8 : : internal : : rdi ; <nl> + using v8 : : internal : : rdx ; <nl> + using v8 : : internal : : rsi ; <nl> + using v8 : : internal : : rsp ; <nl> + using v8 : : internal : : times_1 ; <nl> + using v8 : : internal : : xmm0 ; <nl> <nl> / / Test the x64 assembler by compiling some simple functions into <nl> / / a buffer and executing them . These tests do not initialize the <nl> static const v8 : : internal : : Register arg2 = rsi ; <nl> TEST ( AssemblerX64ReturnOperation ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> <nl> TEST ( AssemblerX64ReturnOperation ) { <nl> TEST ( AssemblerX64StackOperations ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> <nl> TEST ( AssemblerX64StackOperations ) { <nl> TEST ( AssemblerX64ArithmeticOperations ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> <nl> TEST ( AssemblerX64ArithmeticOperations ) { <nl> TEST ( AssemblerX64ImulOperation ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> <nl> TEST ( AssemblerX64ImulOperation ) { <nl> TEST ( AssemblerX64MemoryOperands ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> <nl> TEST ( AssemblerX64MemoryOperands ) { <nl> TEST ( AssemblerX64ControlFlow ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> <nl> TEST ( AssemblerX64ControlFlow ) { <nl> TEST ( AssemblerX64LoopImmediates ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Assembler assm ( Isolate : : Current ( ) , buffer , static_cast < int > ( actual_size ) ) ; <nl> / / Assemble two loops using rax as counter , and verify the ending counts . <nl> mmm a / test / cctest / test - code - stubs - arm . cc <nl> ppp b / test / cctest / test - code - stubs - arm . cc <nl> ConvertDToIFunc MakeConvertDToIFuncTrampoline ( Isolate * isolate , <nl> bool inline_fastpath ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> HandleScope handles ( isolate ) ; <nl> MacroAssembler masm ( isolate , buffer , static_cast < int > ( actual_size ) ) ; <nl> mmm a / test / cctest / test - code - stubs - ia32 . cc <nl> ppp b / test / cctest / test - code - stubs - ia32 . cc <nl> ConvertDToIFunc MakeConvertDToIFuncTrampoline ( Isolate * isolate , <nl> Register destination_reg ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> HandleScope handles ( isolate ) ; <nl> MacroAssembler assm ( isolate , buffer , static_cast < int > ( actual_size ) ) ; <nl> mmm a / test / cctest / test - code - stubs - x64 . cc <nl> ppp b / test / cctest / test - code - stubs - x64 . cc <nl> ConvertDToIFunc MakeConvertDToIFuncTrampoline ( Isolate * isolate , <nl> Register destination_reg ) { <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> HandleScope handles ( isolate ) ; <nl> MacroAssembler assm ( isolate , buffer , static_cast < int > ( actual_size ) ) ; <nl> mmm a / test / cctest / test - macro - assembler - x64 . cc <nl> ppp b / test / cctest / test - macro - assembler - x64 . cc <nl> <nl> # include " serialize . h " <nl> # include " cctest . h " <nl> <nl> - using namespace v8 : : internal ; <nl> + using v8 : : internal : : Assembler ; <nl> + using v8 : : internal : : CodeDesc ; <nl> + using v8 : : internal : : Condition ; <nl> + using v8 : : internal : : FUNCTION_CAST ; <nl> + using v8 : : internal : : HandleScope ; <nl> + using v8 : : internal : : Immediate ; <nl> + using v8 : : internal : : Isolate ; <nl> + using v8 : : internal : : Label ; <nl> + using v8 : : internal : : MacroAssembler ; <nl> + using v8 : : internal : : OS ; <nl> + using v8 : : internal : : Operand ; <nl> + using v8 : : internal : : RelocInfo ; <nl> + using v8 : : internal : : Smi ; <nl> + using v8 : : internal : : SmiIndex ; <nl> + using v8 : : internal : : byte ; <nl> + using v8 : : internal : : carry ; <nl> + using v8 : : internal : : greater ; <nl> + using v8 : : internal : : greater_equal ; <nl> + using v8 : : internal : : kIntSize ; <nl> + using v8 : : internal : : kPointerSize ; <nl> + using v8 : : internal : : kSmiTagMask ; <nl> + using v8 : : internal : : kSmiValueSize ; <nl> + using v8 : : internal : : less_equal ; <nl> + using v8 : : internal : : negative ; <nl> + using v8 : : internal : : not_carry ; <nl> + using v8 : : internal : : not_equal ; <nl> + using v8 : : internal : : not_zero ; <nl> + using v8 : : internal : : positive ; <nl> + using v8 : : internal : : r11 ; <nl> + using v8 : : internal : : r13 ; <nl> + using v8 : : internal : : r14 ; <nl> + using v8 : : internal : : r15 ; <nl> + using v8 : : internal : : r8 ; <nl> + using v8 : : internal : : r9 ; <nl> + using v8 : : internal : : rax ; <nl> + using v8 : : internal : : rbp ; <nl> + using v8 : : internal : : rbx ; <nl> + using v8 : : internal : : rcx ; <nl> + using v8 : : internal : : rdi ; <nl> + using v8 : : internal : : rdx ; <nl> + using v8 : : internal : : rsi ; <nl> + using v8 : : internal : : rsp ; <nl> + using v8 : : internal : : times_pointer_size ; <nl> <nl> / / Test the x64 assembler by compiling some simple functions into <nl> / / a buffer and executing them . These tests do not initialize the <nl> TEST ( SmiMove ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiCompare ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 2 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 2 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( Integer32ToSmi ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( Integer64PlusConstantToSmi ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiCheck ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiNeg ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiAdd ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiSub ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 2 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 2 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiMul ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiDiv ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 2 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 2 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiMod ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 2 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 2 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiIndex ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 3 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 3 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiSelectNonSmi ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiAnd ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiOr ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiXor ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiNot ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiShiftLeft ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 4 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 4 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiShiftLogicalRight ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 3 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 3 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( SmiShiftArithmeticRight ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 2 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 2 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( PositiveSmiTimesPowerOfTwoToInteger64 ) { <nl> v8 : : internal : : V8 : : Initialize ( NULL ) ; <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 4 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 4 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> TEST ( OperandOffset ) { <nl> <nl> / / Allocate an executable page of memory . <nl> size_t actual_size ; <nl> - byte * buffer = static_cast < byte * > ( VirtualMemory : : AllocateRegion ( <nl> - Assembler : : kMinimalBufferSize * 2 , <nl> - & actual_size , <nl> - VirtualMemory : : EXECUTABLE ) ) ; <nl> + byte * buffer = <nl> + static_cast < byte * > ( OS : : Allocate ( Assembler : : kMinimalBufferSize * 2 , <nl> + & actual_size , <nl> + true ) ) ; <nl> CHECK ( buffer ) ; <nl> Isolate * isolate = Isolate : : Current ( ) ; <nl> HandleScope handles ( isolate ) ; <nl> mmm a / test / cctest / test - platform - linux . cc <nl> ppp b / test / cctest / test - platform - linux . cc <nl> <nl> using namespace : : v8 : : internal ; <nl> <nl> <nl> + TEST ( VirtualMemory ) { <nl> + VirtualMemory * vm = new VirtualMemory ( 1 * MB ) ; <nl> + CHECK ( vm - > IsReserved ( ) ) ; <nl> + void * block_addr = vm - > address ( ) ; <nl> + size_t block_size = 4 * KB ; <nl> + CHECK ( vm - > Commit ( block_addr , block_size , false ) ) ; <nl> + / / Check whether we can write to memory . <nl> + int * addr = static_cast < int * > ( block_addr ) ; <nl> + addr [ KB - 1 ] = 2 ; <nl> + CHECK ( vm - > Uncommit ( block_addr , block_size ) ) ; <nl> + delete vm ; <nl> + } <nl> + <nl> + <nl> TEST ( GetCurrentProcessId ) { <nl> CHECK_EQ ( static_cast < int > ( getpid ( ) ) , OS : : GetCurrentProcessId ( ) ) ; <nl> } <nl> mmm a / test / cctest / test - platform - win32 . cc <nl> ppp b / test / cctest / test - platform - win32 . cc <nl> <nl> using namespace : : v8 : : internal ; <nl> <nl> <nl> + TEST ( VirtualMemory ) { <nl> + VirtualMemory * vm = new VirtualMemory ( 1 * MB ) ; <nl> + CHECK ( vm - > IsReserved ( ) ) ; <nl> + void * block_addr = vm - > address ( ) ; <nl> + size_t block_size = 4 * KB ; <nl> + CHECK ( vm - > Commit ( block_addr , block_size , false ) ) ; <nl> + / / Check whether we can write to memory . <nl> + int * addr = static_cast < int * > ( block_addr ) ; <nl> + addr [ KB - 1 ] = 2 ; <nl> + CHECK ( vm - > Uncommit ( block_addr , block_size ) ) ; <nl> + delete vm ; <nl> + } <nl> + <nl> + <nl> TEST ( GetCurrentProcessId ) { <nl> CHECK_EQ ( static_cast < int > ( : : GetCurrentProcessId ( ) ) , <nl> OS : : GetCurrentProcessId ( ) ) ; <nl> mmm a / test / cctest / test - spaces . cc <nl> ppp b / test / cctest / test - spaces . cc <nl> static void VerifyMemoryChunk ( Isolate * isolate , <nl> size_t reserve_area_size , <nl> size_t commit_area_size , <nl> size_t second_commit_area_size , <nl> - VirtualMemory : : Executability executability ) { <nl> + Executability executable ) { <nl> MemoryAllocator * memory_allocator = new MemoryAllocator ( isolate ) ; <nl> CHECK ( memory_allocator - > SetUp ( heap - > MaxReserved ( ) , <nl> heap - > MaxExecutableSize ( ) ) ) ; <nl> TestMemoryAllocatorScope test_allocator_scope ( isolate , memory_allocator ) ; <nl> TestCodeRangeScope test_code_range_scope ( isolate , code_range ) ; <nl> <nl> - size_t header_size = ( executability = = VirtualMemory : : EXECUTABLE ) <nl> + size_t header_size = ( executable = = EXECUTABLE ) <nl> ? MemoryAllocator : : CodePageGuardStartOffset ( ) <nl> : MemoryChunk : : kObjectStartOffset ; <nl> - size_t guard_size = ( executability = = VirtualMemory : : EXECUTABLE ) <nl> + size_t guard_size = ( executable = = EXECUTABLE ) <nl> ? MemoryAllocator : : CodePageGuardSize ( ) <nl> : 0 ; <nl> <nl> MemoryChunk * memory_chunk = memory_allocator - > AllocateChunk ( reserve_area_size , <nl> commit_area_size , <nl> - executability , <nl> + executable , <nl> NULL ) ; <nl> size_t alignment = code_range - > exists ( ) ? <nl> - MemoryChunk : : kAlignment : VirtualMemory : : GetPageSize ( ) ; <nl> - size_t reserved_size = ( ( executability = = VirtualMemory : : EXECUTABLE ) ) <nl> + MemoryChunk : : kAlignment : OS : : CommitPageSize ( ) ; <nl> + size_t reserved_size = ( ( executable = = EXECUTABLE ) ) <nl> ? RoundUp ( header_size + guard_size + reserve_area_size + guard_size , <nl> alignment ) <nl> - : RoundUp ( header_size + reserve_area_size , VirtualMemory : : GetPageSize ( ) ) ; <nl> + : RoundUp ( header_size + reserve_area_size , OS : : CommitPageSize ( ) ) ; <nl> CHECK ( memory_chunk - > size ( ) = = reserved_size ) ; <nl> CHECK ( memory_chunk - > area_start ( ) < memory_chunk - > address ( ) + <nl> memory_chunk - > size ( ) ) ; <nl> TEST ( MemoryChunk ) { <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> second_commit_area_size , <nl> - VirtualMemory : : EXECUTABLE ) ; <nl> + EXECUTABLE ) ; <nl> <nl> VerifyMemoryChunk ( isolate , <nl> heap , <nl> TEST ( MemoryChunk ) { <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> second_commit_area_size , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + NOT_EXECUTABLE ) ; <nl> delete code_range ; <nl> <nl> / / Without CodeRange . <nl> TEST ( MemoryChunk ) { <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> second_commit_area_size , <nl> - VirtualMemory : : EXECUTABLE ) ; <nl> + EXECUTABLE ) ; <nl> <nl> VerifyMemoryChunk ( isolate , <nl> heap , <nl> TEST ( MemoryChunk ) { <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> second_commit_area_size , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + NOT_EXECUTABLE ) ; <nl> } <nl> } <nl> <nl> TEST ( MemoryAllocator ) { <nl> OldSpace faked_space ( heap , <nl> heap - > MaxReserved ( ) , <nl> OLD_POINTER_SPACE , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + NOT_EXECUTABLE ) ; <nl> Page * first_page = memory_allocator - > AllocatePage ( <nl> - faked_space . AreaSize ( ) , & faked_space , VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + faked_space . AreaSize ( ) , & faked_space , NOT_EXECUTABLE ) ; <nl> <nl> first_page - > InsertAfter ( faked_space . anchor ( ) - > prev_page ( ) ) ; <nl> CHECK ( first_page - > is_valid ( ) ) ; <nl> TEST ( MemoryAllocator ) { <nl> <nl> / / Again , we should get n or n - 1 pages . <nl> Page * other = memory_allocator - > AllocatePage ( <nl> - faked_space . AreaSize ( ) , & faked_space , VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + faked_space . AreaSize ( ) , & faked_space , NOT_EXECUTABLE ) ; <nl> CHECK ( other - > is_valid ( ) ) ; <nl> total_pages + + ; <nl> other - > InsertAfter ( first_page ) ; <nl> TEST ( OldSpace ) { <nl> OldSpace * s = new OldSpace ( heap , <nl> heap - > MaxOldGenerationSize ( ) , <nl> OLD_POINTER_SPACE , <nl> - VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + NOT_EXECUTABLE ) ; <nl> CHECK ( s ! = NULL ) ; <nl> <nl> CHECK ( s - > SetUp ( ) ) ; <nl> TEST ( LargeObjectSpace ) { <nl> <nl> int lo_size = Page : : kPageSize ; <nl> <nl> - Object * obj = lo - > AllocateRaw ( <nl> - lo_size , VirtualMemory : : NOT_EXECUTABLE ) - > ToObjectUnchecked ( ) ; <nl> + Object * obj = lo - > AllocateRaw ( lo_size , NOT_EXECUTABLE ) - > ToObjectUnchecked ( ) ; <nl> CHECK ( obj - > IsHeapObject ( ) ) ; <nl> <nl> HeapObject * ho = HeapObject : : cast ( obj ) ; <nl> TEST ( LargeObjectSpace ) { <nl> <nl> while ( true ) { <nl> intptr_t available = lo - > Available ( ) ; <nl> - { MaybeObject * maybe_obj = lo - > AllocateRaw ( <nl> - lo_size , VirtualMemory : : NOT_EXECUTABLE ) ; <nl> + { MaybeObject * maybe_obj = lo - > AllocateRaw ( lo_size , NOT_EXECUTABLE ) ; <nl> if ( ! maybe_obj - > ToObject ( & obj ) ) break ; <nl> } <nl> CHECK ( lo - > Available ( ) < available ) ; <nl> TEST ( LargeObjectSpace ) { <nl> <nl> CHECK ( ! lo - > IsEmpty ( ) ) ; <nl> <nl> - CHECK ( lo - > AllocateRaw ( lo_size , VirtualMemory : : NOT_EXECUTABLE ) - > IsFailure ( ) ) ; <nl> + CHECK ( lo - > AllocateRaw ( lo_size , NOT_EXECUTABLE ) - > IsFailure ( ) ) ; <nl> } <nl> deleted file mode 100644 <nl> index d441835b391 . . 00000000000 <nl> mmm a / test / cctest / test - virtual - memory . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2013 the V8 project authors . All rights reserved . <nl> - / / Redistribution and use in source and binary forms , with or without <nl> - / / modification , are permitted provided that the following conditions are <nl> - / / met : <nl> - / / <nl> - / / * Redistributions of source code must retain the above copyright <nl> - / / notice , this list of conditions and the following disclaimer . <nl> - / / * Redistributions in binary form must reproduce the above <nl> - / / copyright notice , this list of conditions and the following <nl> - / / disclaimer in the documentation and / or other materials provided <nl> - / / with the distribution . <nl> - / / * Neither the name of Google Inc . nor the names of its <nl> - / / contributors may be used to endorse or promote products derived <nl> - / / from this software without specific prior written permission . <nl> - / / <nl> - / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> - / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> - / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> - / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> - / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> - / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> - / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> - / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> - / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> - / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> - / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> - <nl> - # include " v8 . h " <nl> - <nl> - # include " cctest . h " <nl> - # include " platform / virtual - memory . h " <nl> - <nl> - using namespace : : v8 : : internal ; <nl> - <nl> - <nl> - TEST ( CommitAndUncommit ) { <nl> - static const size_t kSize = 1 * MB ; <nl> - static const size_t kBlockSize = 4 * KB ; <nl> - VirtualMemory vm ( kSize ) ; <nl> - CHECK ( vm . IsReserved ( ) ) ; <nl> - void * block_addr = vm . address ( ) ; <nl> - CHECK ( vm . Commit ( block_addr , kBlockSize , VirtualMemory : : NOT_EXECUTABLE ) ) ; <nl> - / / Check whether we can write to memory . <nl> - int * addr = static_cast < int * > ( block_addr ) ; <nl> - addr [ 5 ] = 2 ; <nl> - CHECK ( vm . Uncommit ( block_addr , kBlockSize ) ) ; <nl> - } <nl> - <nl> - <nl> - TEST ( Release ) { <nl> - static const size_t kSize = 4 * KB ; <nl> - VirtualMemory vm ( kSize ) ; <nl> - CHECK ( vm . IsReserved ( ) ) ; <nl> - CHECK_LE ( kSize , vm . size ( ) ) ; <nl> - CHECK_NE ( NULL , vm . address ( ) ) ; <nl> - vm . Release ( ) ; <nl> - CHECK ( ! vm . IsReserved ( ) ) ; <nl> - } <nl> - <nl> - <nl> - TEST ( TakeControl ) { <nl> - static const size_t kSize = 64 * KB ; <nl> - <nl> - VirtualMemory vm1 ( kSize ) ; <nl> - size_t size1 = vm1 . size ( ) ; <nl> - CHECK ( vm1 . IsReserved ( ) ) ; <nl> - CHECK_LE ( kSize , size1 ) ; <nl> - <nl> - VirtualMemory vm2 ; <nl> - CHECK ( ! vm2 . IsReserved ( ) ) ; <nl> - <nl> - vm2 . TakeControl ( & vm1 ) ; <nl> - CHECK ( vm2 . IsReserved ( ) ) ; <nl> - CHECK ( ! vm1 . IsReserved ( ) ) ; <nl> - CHECK ( vm2 . size ( ) = = size1 ) ; <nl> - } <nl> - <nl> - <nl> - TEST ( AllocationGranularityIsPowerOf2 ) { <nl> - CHECK ( IsPowerOf2 ( VirtualMemory : : GetAllocationGranularity ( ) ) ) ; <nl> - } <nl> - <nl> - <nl> - TEST ( PageSizeIsPowerOf2 ) { <nl> - CHECK ( IsPowerOf2 ( VirtualMemory : : GetPageSize ( ) ) ) ; <nl> - } <nl> mmm a / tools / gyp / v8 . gyp <nl> ppp b / tools / gyp / v8 . gyp <nl> <nl> ' . . / . . / src / platform / semaphore . h ' , <nl> ' . . / . . / src / platform / socket . cc ' , <nl> ' . . / . . / src / platform / socket . h ' , <nl> - ' . . / . . / src / platform / virtual - memory . cc ' , <nl> - ' . . / . . / src / platform / virtual - memory . h ' , <nl> ' . . / . . / src / preparse - data - format . h ' , <nl> ' . . / . . / src / preparse - data . cc ' , <nl> ' . . / . . / src / preparse - data . h ' , <nl>
|
Revert r16648 , r16641 , r16638 and r16637 .
|
v8/v8
|
718a6a9a9ed22a45d7cf36f32675a438b8ed3040
|
2013-09-11T18:30:01Z
|
mmm a / src / wallet / walletutil . cpp <nl> ppp b / src / wallet / walletutil . cpp <nl> bool IsFeatureSupported ( int wallet_version , int feature_version ) <nl> { <nl> return wallet_version > = feature_version ; <nl> } <nl> + <nl> + WalletFeature GetClosestWalletFeature ( int version ) <nl> + { <nl> + if ( version > = FEATURE_LATEST ) return FEATURE_LATEST ; <nl> + if ( version > = FEATURE_PRE_SPLIT_KEYPOOL ) return FEATURE_PRE_SPLIT_KEYPOOL ; <nl> + if ( version > = FEATURE_NO_DEFAULT_KEY ) return FEATURE_NO_DEFAULT_KEY ; <nl> + if ( version > = FEATURE_HD_SPLIT ) return FEATURE_HD_SPLIT ; <nl> + if ( version > = FEATURE_HD ) return FEATURE_HD ; <nl> + if ( version > = FEATURE_COMPRPUBKEY ) return FEATURE_COMPRPUBKEY ; <nl> + if ( version > = FEATURE_WALLETCRYPT ) return FEATURE_WALLETCRYPT ; <nl> + if ( version > = FEATURE_BASE ) return FEATURE_BASE ; <nl> + return static_cast < WalletFeature > ( 0 ) ; <nl> + } <nl> mmm a / src / wallet / walletutil . h <nl> ppp b / src / wallet / walletutil . h <nl> enum WalletFeature <nl> } ; <nl> <nl> bool IsFeatureSupported ( int wallet_version , int feature_version ) ; <nl> + WalletFeature GetClosestWalletFeature ( int version ) ; <nl> <nl> enum WalletFlags : uint64_t { <nl> / / wallet flags in the upper section ( > 1 < < 31 ) will lead to not opening the wallet if flag is unknown <nl>
|
wallet : Add GetClosestWalletFeature function
|
bitcoin/bitcoin
|
5f720544f34dedf75b063b962845fa8eca604514
|
2020-10-19T04:14:38Z
|
mmm a / include / grpc / grpc . h <nl> ppp b / include / grpc / grpc . h <nl> void grpc_completion_queue_destroy ( grpc_completion_queue * cq ) ; <nl> / * Create a call given a grpc_channel , in order to call ' method ' . The request <nl> is not sent until grpc_call_invoke is called . All completions are sent to <nl> ' completion_queue ' . * / <nl> - grpc_call * grpc_channel_create_call ( grpc_channel * channel , const char * method , <nl> - const char * host , gpr_timespec deadline ) ; <nl> + grpc_call * grpc_channel_create_call_old ( grpc_channel * channel , <nl> + const char * method , const char * host , <nl> + gpr_timespec deadline ) ; <nl> <nl> / * Create a client channel * / <nl> grpc_channel * grpc_channel_create ( const char * target , <nl> void grpc_channel_destroy ( grpc_channel * channel ) ; <nl> REQUIRES : grpc_call_start_invoke / grpc_call_server_end_initial_metadata have <nl> not been called on this call . <nl> Produces no events . * / <nl> - grpc_call_error grpc_call_add_metadata ( grpc_call * call , grpc_metadata * metadata , <nl> - gpr_uint32 flags ) ; <nl> + grpc_call_error grpc_call_add_metadata_old ( grpc_call * call , <nl> + grpc_metadata * metadata , <nl> + gpr_uint32 flags ) ; <nl> <nl> / * Invoke the RPC . Starts sending metadata and request headers on the wire . <nl> flags is a bit - field combination of the write flags defined above . <nl> grpc_call_error grpc_call_add_metadata ( grpc_call * call , grpc_metadata * metadata , <nl> Produces a GRPC_FINISHED event with finished_tag when the call has been <nl> completed ( there may be other events for the call pending at this <nl> time ) * / <nl> - grpc_call_error grpc_call_invoke ( grpc_call * call , grpc_completion_queue * cq , <nl> - void * metadata_read_tag , void * finished_tag , <nl> - gpr_uint32 flags ) ; <nl> + grpc_call_error grpc_call_invoke_old ( grpc_call * call , grpc_completion_queue * cq , <nl> + void * metadata_read_tag , <nl> + void * finished_tag , gpr_uint32 flags ) ; <nl> <nl> / * Accept an incoming RPC , binding a completion queue to it . <nl> To be called before sending or receiving messages . <nl> grpc_call_error grpc_call_invoke ( grpc_call * call , grpc_completion_queue * cq , <nl> Produces a GRPC_FINISHED event with finished_tag when the call has been <nl> completed ( there may be other events for the call pending at this <nl> time ) * / <nl> - grpc_call_error grpc_call_server_accept ( grpc_call * call , <nl> - grpc_completion_queue * cq , <nl> - void * finished_tag ) ; <nl> + grpc_call_error grpc_call_server_accept_old ( grpc_call * call , <nl> + grpc_completion_queue * cq , <nl> + void * finished_tag ) ; <nl> <nl> / * Start sending metadata . <nl> To be called before sending messages . <nl> grpc_call_error grpc_call_server_accept ( grpc_call * call , <nl> REQUIRES : Can be called at most once per call . <nl> Can only be called on the server . <nl> Must be called after grpc_call_server_accept * / <nl> - grpc_call_error grpc_call_server_end_initial_metadata ( grpc_call * call , <nl> - gpr_uint32 flags ) ; <nl> + grpc_call_error grpc_call_server_end_initial_metadata_old ( grpc_call * call , <nl> + gpr_uint32 flags ) ; <nl> <nl> / * Called by clients to cancel an RPC on the server . <nl> Can be called multiple times , from any thread . * / <nl> grpc_call_error grpc_call_cancel_with_status ( grpc_call * call , <nl> grpc_call_server_end_of_initial_metadata must have been called <nl> successfully . <nl> Produces a GRPC_WRITE_ACCEPTED event . * / <nl> - grpc_call_error grpc_call_start_write ( grpc_call * call , <nl> - grpc_byte_buffer * byte_buffer , void * tag , <nl> - gpr_uint32 flags ) ; <nl> + grpc_call_error grpc_call_start_write_old ( grpc_call * call , <nl> + grpc_byte_buffer * byte_buffer , <nl> + void * tag , gpr_uint32 flags ) ; <nl> <nl> / * Queue a status for writing . <nl> REQUIRES : No other writes are pending on the call . <nl> grpc_call_error grpc_call_start_write ( grpc_call * call , <nl> call prior to calling this . <nl> Only callable on the server . <nl> Produces a GRPC_FINISH_ACCEPTED event when the status is sent . * / <nl> - grpc_call_error grpc_call_start_write_status ( grpc_call * call , <nl> - grpc_status_code status_code , <nl> - const char * status_message , <nl> - void * tag ) ; <nl> + grpc_call_error grpc_call_start_write_status_old ( grpc_call * call , <nl> + grpc_status_code status_code , <nl> + const char * status_message , <nl> + void * tag ) ; <nl> <nl> / * No more messages to send . <nl> REQUIRES : No other writes are pending on the call . <nl> Only callable on the client . <nl> Produces a GRPC_FINISH_ACCEPTED event when all bytes for the call have passed <nl> outgoing flow control . * / <nl> - grpc_call_error grpc_call_writes_done ( grpc_call * call , void * tag ) ; <nl> + grpc_call_error grpc_call_writes_done_old ( grpc_call * call , void * tag ) ; <nl> <nl> / * Initiate a read on a call . Output event contains a byte buffer with the <nl> result of the read . <nl> grpc_call_error grpc_call_writes_done ( grpc_call * call , void * tag ) ; <nl> On the server : <nl> grpc_call_server_accept must be called before calling this . <nl> Produces a single GRPC_READ event . * / <nl> - grpc_call_error grpc_call_start_read ( grpc_call * call , void * tag ) ; <nl> + grpc_call_error grpc_call_start_read_old ( grpc_call * call , void * tag ) ; <nl> <nl> / * Destroy a call . * / <nl> void grpc_call_destroy ( grpc_call * call ) ; <nl> void grpc_call_destroy ( grpc_call * call ) ; <nl> tag_cancel . <nl> REQUIRES : Server must not have been shutdown . <nl> NOTE : calling this is the only way to obtain GRPC_SERVER_RPC_NEW events . * / <nl> - grpc_call_error grpc_server_request_call ( grpc_server * server , void * tag_new ) ; <nl> + grpc_call_error grpc_server_request_call_old ( grpc_server * server , <nl> + void * tag_new ) ; <nl> <nl> / * Create a server * / <nl> grpc_server * grpc_server_create ( grpc_completion_queue * cq , <nl> mmm a / src / core / surface / call . c <nl> ppp b / src / core / surface / call . c <nl> void grpc_call_add_mdelem ( grpc_call * call , grpc_mdelem * mdelem , <nl> elem - > filter - > call_op ( elem , NULL , & op ) ; <nl> } <nl> <nl> - grpc_call_error grpc_call_add_metadata ( grpc_call * call , grpc_metadata * metadata , <nl> - gpr_uint32 flags ) { <nl> + grpc_call_error grpc_call_add_metadata_old ( grpc_call * call , <nl> + grpc_metadata * metadata , <nl> + gpr_uint32 flags ) { <nl> grpc_mdelem * mdelem ; <nl> <nl> if ( call - > is_client ) { <nl> static void call_started ( void * user_data , grpc_op_error error ) { <nl> grpc_call_internal_unref ( call ) ; <nl> } <nl> <nl> - grpc_call_error grpc_call_invoke ( grpc_call * call , grpc_completion_queue * cq , <nl> - void * metadata_read_tag , void * finished_tag , <nl> - gpr_uint32 flags ) { <nl> + grpc_call_error grpc_call_invoke_old ( grpc_call * call , grpc_completion_queue * cq , <nl> + void * metadata_read_tag , <nl> + void * finished_tag , gpr_uint32 flags ) { <nl> grpc_call_element * elem ; <nl> grpc_call_op op ; <nl> <nl> grpc_call_error grpc_call_invoke ( grpc_call * call , grpc_completion_queue * cq , <nl> return GRPC_CALL_OK ; <nl> } <nl> <nl> - grpc_call_error grpc_call_server_accept ( grpc_call * call , <nl> - grpc_completion_queue * cq , <nl> - void * finished_tag ) { <nl> + grpc_call_error grpc_call_server_accept_old ( grpc_call * call , <nl> + grpc_completion_queue * cq , <nl> + void * finished_tag ) { <nl> / * validate preconditions * / <nl> if ( call - > is_client ) { <nl> gpr_log ( GPR_ERROR , " can only call % s on servers " , __FUNCTION__ ) ; <nl> grpc_call_error grpc_call_server_accept ( grpc_call * call , <nl> return GRPC_CALL_OK ; <nl> } <nl> <nl> - grpc_call_error grpc_call_server_end_initial_metadata ( grpc_call * call , <nl> - gpr_uint32 flags ) { <nl> + grpc_call_error grpc_call_server_end_initial_metadata_old ( grpc_call * call , <nl> + gpr_uint32 flags ) { <nl> grpc_call_element * elem ; <nl> grpc_call_op op ; <nl> <nl> static void request_more_data ( grpc_call * call ) { <nl> elem - > filter - > call_op ( elem , NULL , & op ) ; <nl> } <nl> <nl> - grpc_call_error grpc_call_start_read ( grpc_call * call , void * tag ) { <nl> + grpc_call_error grpc_call_start_read_old ( grpc_call * call , void * tag ) { <nl> gpr_uint8 request_more = 0 ; <nl> <nl> switch ( call - > state ) { <nl> grpc_call_error grpc_call_start_read ( grpc_call * call , void * tag ) { <nl> return GRPC_CALL_OK ; <nl> } <nl> <nl> - grpc_call_error grpc_call_start_write ( grpc_call * call , <nl> - grpc_byte_buffer * byte_buffer , void * tag , <nl> - gpr_uint32 flags ) { <nl> + grpc_call_error grpc_call_start_write_old ( grpc_call * call , <nl> + grpc_byte_buffer * byte_buffer , <nl> + void * tag , gpr_uint32 flags ) { <nl> grpc_call_element * elem ; <nl> grpc_call_op op ; <nl> <nl> grpc_call_error grpc_call_start_write ( grpc_call * call , <nl> return GRPC_CALL_OK ; <nl> } <nl> <nl> - grpc_call_error grpc_call_writes_done ( grpc_call * call , void * tag ) { <nl> + grpc_call_error grpc_call_writes_done_old ( grpc_call * call , void * tag ) { <nl> grpc_call_element * elem ; <nl> grpc_call_op op ; <nl> <nl> grpc_call_error grpc_call_writes_done ( grpc_call * call , void * tag ) { <nl> return GRPC_CALL_OK ; <nl> } <nl> <nl> - grpc_call_error grpc_call_start_write_status ( grpc_call * call , <nl> - grpc_status_code status , <nl> - const char * details , void * tag ) { <nl> + grpc_call_error grpc_call_start_write_status_old ( grpc_call * call , <nl> + grpc_status_code status , <nl> + const char * details , <nl> + void * tag ) { <nl> grpc_call_element * elem ; <nl> grpc_call_op op ; <nl> <nl> mmm a / src / core / surface / channel . c <nl> ppp b / src / core / surface / channel . c <nl> grpc_channel * grpc_channel_create_from_filters ( <nl> <nl> static void do_nothing ( void * ignored , grpc_op_error error ) { } <nl> <nl> - grpc_call * grpc_channel_create_call ( grpc_channel * channel , const char * method , <nl> - const char * host , <nl> - gpr_timespec absolute_deadline ) { <nl> + grpc_call * grpc_channel_create_call_old ( grpc_channel * channel , <nl> + const char * method , const char * host , <nl> + gpr_timespec absolute_deadline ) { <nl> grpc_call * call ; <nl> grpc_mdelem * path_mdelem ; <nl> grpc_mdelem * authority_mdelem ; <nl> mmm a / src / core / surface / server . c <nl> ppp b / src / core / surface / server . c <nl> void grpc_server_add_listener ( grpc_server * server , void * arg , <nl> server - > listeners = l ; <nl> } <nl> <nl> - grpc_call_error grpc_server_request_call ( grpc_server * server , void * tag_new ) { <nl> + grpc_call_error grpc_server_request_call_old ( grpc_server * server , <nl> + void * tag_new ) { <nl> call_data * calld ; <nl> <nl> grpc_cq_begin_op ( server - > cq , NULL , GRPC_SERVER_RPC_NEW ) ; <nl> mmm a / src / cpp / client / channel . cc <nl> ppp b / src / cpp / client / channel . cc <nl> Status Channel : : StartBlockingRpc ( const RpcMethod & method , <nl> const google : : protobuf : : Message & request , <nl> google : : protobuf : : Message * result ) { <nl> Status status ; <nl> - grpc_call * call = grpc_channel_create_call ( <nl> + grpc_call * call = grpc_channel_create_call_old ( <nl> c_channel_ , method . name ( ) , target_ . c_str ( ) , context - > RawDeadline ( ) ) ; <nl> context - > set_call ( call ) ; <nl> grpc_event * ev ; <nl> Status Channel : : StartBlockingRpc ( const RpcMethod & method , <nl> / / add_metadata from context <nl> / / <nl> / / invoke <nl> - GPR_ASSERT ( grpc_call_invoke ( call , cq , metadata_read_tag , finished_tag , <nl> - GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_invoke_old ( call , cq , metadata_read_tag , finished_tag , <nl> + GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> / / write request <nl> grpc_byte_buffer * write_buffer = nullptr ; <nl> bool success = SerializeProto ( request , & write_buffer ) ; <nl> Status Channel : : StartBlockingRpc ( const RpcMethod & method , <nl> GetFinalStatus ( cq , finished_tag , nullptr ) ; <nl> return status ; <nl> } <nl> - GPR_ASSERT ( grpc_call_start_write ( call , write_buffer , write_tag , <nl> - GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_write_old ( call , write_buffer , write_tag , <nl> + GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> grpc_byte_buffer_destroy ( write_buffer ) ; <nl> ev = grpc_completion_queue_pluck ( cq , write_tag , gpr_inf_future ) ; <nl> <nl> Status Channel : : StartBlockingRpc ( const RpcMethod & method , <nl> return status ; <nl> } <nl> / / writes done <nl> - GPR_ASSERT ( grpc_call_writes_done ( call , halfclose_tag ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_writes_done_old ( call , halfclose_tag ) = = GRPC_CALL_OK ) ; <nl> ev = grpc_completion_queue_pluck ( cq , halfclose_tag , gpr_inf_future ) ; <nl> grpc_event_finish ( ev ) ; <nl> / / start read metadata <nl> Status Channel : : StartBlockingRpc ( const RpcMethod & method , <nl> ev = grpc_completion_queue_pluck ( cq , metadata_read_tag , gpr_inf_future ) ; <nl> grpc_event_finish ( ev ) ; <nl> / / start read <nl> - GPR_ASSERT ( grpc_call_start_read ( call , read_tag ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( call , read_tag ) = = GRPC_CALL_OK ) ; <nl> ev = grpc_completion_queue_pluck ( cq , read_tag , gpr_inf_future ) ; <nl> if ( ev - > data . read ) { <nl> if ( ! DeserializeProto ( ev - > data . read , result ) ) { <nl> StreamContextInterface * Channel : : CreateStream ( <nl> const RpcMethod & method , ClientContext * context , <nl> const google : : protobuf : : Message * request , <nl> google : : protobuf : : Message * result ) { <nl> - grpc_call * call = grpc_channel_create_call ( <nl> + grpc_call * call = grpc_channel_create_call_old ( <nl> c_channel_ , method . name ( ) , target_ . c_str ( ) , context - > RawDeadline ( ) ) ; <nl> context - > set_call ( call ) ; <nl> grpc_completion_queue * cq = grpc_completion_queue_create ( ) ; <nl> mmm a / src / cpp / server / async_server . cc <nl> ppp b / src / cpp / server / async_server . cc <nl> void AsyncServer : : RequestOneRpc ( ) { <nl> return ; <nl> } <nl> lock . unlock ( ) ; <nl> - grpc_call_error err = grpc_server_request_call ( server_ , nullptr ) ; <nl> + grpc_call_error err = grpc_server_request_call_old ( server_ , nullptr ) ; <nl> GPR_ASSERT ( err = = GRPC_CALL_OK ) ; <nl> } <nl> <nl> mmm a / src / cpp / server / async_server_context . cc <nl> ppp b / src / cpp / server / async_server_context . cc <nl> AsyncServerContext : : AsyncServerContext ( <nl> AsyncServerContext : : ~ AsyncServerContext ( ) { grpc_call_destroy ( call_ ) ; } <nl> <nl> void AsyncServerContext : : Accept ( grpc_completion_queue * cq ) { <nl> - GPR_ASSERT ( grpc_call_server_accept ( call_ , cq , this ) = = GRPC_CALL_OK ) ; <nl> - GPR_ASSERT ( grpc_call_server_end_initial_metadata ( call_ , 0 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_server_accept_old ( call_ , cq , this ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_server_end_initial_metadata_old ( call_ , 0 ) = = <nl> + GRPC_CALL_OK ) ; <nl> } <nl> <nl> bool AsyncServerContext : : StartRead ( google : : protobuf : : Message * request ) { <nl> GPR_ASSERT ( request ) ; <nl> request_ = request ; <nl> - grpc_call_error err = grpc_call_start_read ( call_ , this ) ; <nl> + grpc_call_error err = grpc_call_start_read_old ( call_ , this ) ; <nl> return err = = GRPC_CALL_OK ; <nl> } <nl> <nl> bool AsyncServerContext : : StartWrite ( const google : : protobuf : : Message & response , <nl> if ( ! SerializeProto ( response , & buffer ) ) { <nl> return false ; <nl> } <nl> - grpc_call_error err = grpc_call_start_write ( call_ , buffer , this , flags ) ; <nl> + grpc_call_error err = grpc_call_start_write_old ( call_ , buffer , this , flags ) ; <nl> grpc_byte_buffer_destroy ( buffer ) ; <nl> return err = = GRPC_CALL_OK ; <nl> } <nl> <nl> bool AsyncServerContext : : StartWriteStatus ( const Status & status ) { <nl> - grpc_call_error err = grpc_call_start_write_status ( <nl> + grpc_call_error err = grpc_call_start_write_status_old ( <nl> call_ , static_cast < grpc_status_code > ( status . code ( ) ) , <nl> status . details ( ) . empty ( ) ? nullptr <nl> : const_cast < char * > ( status . details ( ) . c_str ( ) ) , <nl> mmm a / src / cpp / server / server . cc <nl> ppp b / src / cpp / server / server . cc <nl> void Server : : Start ( ) { <nl> <nl> void Server : : AllowOneRpc ( ) { <nl> GPR_ASSERT ( started_ ) ; <nl> - grpc_call_error err = grpc_server_request_call ( server_ , nullptr ) ; <nl> + grpc_call_error err = grpc_server_request_call_old ( server_ , nullptr ) ; <nl> GPR_ASSERT ( err = = GRPC_CALL_OK ) ; <nl> } <nl> <nl> mmm a / src / cpp / stream / stream_context . cc <nl> ppp b / src / cpp / stream / stream_context . cc <nl> void StreamContext : : Start ( bool buffered ) { <nl> if ( is_client_ ) { <nl> / / TODO ( yangg ) handle metadata send path <nl> int flag = buffered ? GRPC_WRITE_BUFFER_HINT : 0 ; <nl> - grpc_call_error error = grpc_call_invoke ( <nl> + grpc_call_error error = grpc_call_invoke_old ( <nl> call ( ) , cq ( ) , client_metadata_read_tag ( ) , finished_tag ( ) , flag ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = error ) ; <nl> } else { <nl> / / TODO ( yangg ) metadata needs to be added before accept <nl> / / TODO ( yangg ) correctly set flag to accept <nl> - GPR_ASSERT ( grpc_call_server_accept ( call ( ) , cq ( ) , finished_tag ( ) ) = = <nl> + GPR_ASSERT ( grpc_call_server_accept_old ( call ( ) , cq ( ) , finished_tag ( ) ) = = <nl> GRPC_CALL_OK ) ; <nl> - GPR_ASSERT ( grpc_call_server_end_initial_metadata ( call ( ) , 0 ) = = <nl> + GPR_ASSERT ( grpc_call_server_end_initial_metadata_old ( call ( ) , 0 ) = = <nl> GRPC_CALL_OK ) ; <nl> } <nl> } <nl> <nl> bool StreamContext : : Read ( google : : protobuf : : Message * msg ) { <nl> / / TODO ( yangg ) check peer_halfclosed_ here for possible early return . <nl> - grpc_call_error err = grpc_call_start_read ( call ( ) , read_tag ( ) ) ; <nl> + grpc_call_error err = grpc_call_start_read_old ( call ( ) , read_tag ( ) ) ; <nl> GPR_ASSERT ( err = = GRPC_CALL_OK ) ; <nl> grpc_event * read_ev = <nl> grpc_completion_queue_pluck ( cq ( ) , read_tag ( ) , gpr_inf_future ) ; <nl> bool StreamContext : : Write ( const google : : protobuf : : Message * msg , bool is_last ) { <nl> } <nl> int flag = is_last ? GRPC_WRITE_BUFFER_HINT : 0 ; <nl> grpc_call_error err = <nl> - grpc_call_start_write ( call ( ) , out_buf , write_tag ( ) , flag ) ; <nl> + grpc_call_start_write_old ( call ( ) , out_buf , write_tag ( ) , flag ) ; <nl> grpc_byte_buffer_destroy ( out_buf ) ; <nl> GPR_ASSERT ( err = = GRPC_CALL_OK ) ; <nl> <nl> bool StreamContext : : Write ( const google : : protobuf : : Message * msg , bool is_last ) { <nl> grpc_event_finish ( ev ) ; <nl> } <nl> if ( ret & & is_last ) { <nl> - grpc_call_error err = grpc_call_writes_done ( call ( ) , halfclose_tag ( ) ) ; <nl> + grpc_call_error err = grpc_call_writes_done_old ( call ( ) , halfclose_tag ( ) ) ; <nl> GPR_ASSERT ( err = = GRPC_CALL_OK ) ; <nl> ev = grpc_completion_queue_pluck ( cq ( ) , halfclose_tag ( ) , gpr_inf_future ) ; <nl> GPR_ASSERT ( ev - > type = = GRPC_FINISH_ACCEPTED ) ; <nl> new file mode 100644 <nl> index 00000000000 . . e3fbd98336e <nl> mmm / dev / null <nl> ppp b / src / node / . gitignore <nl> <nl> + build <nl> + node_modules <nl> mmm a / src / node / ext / call . cc <nl> ppp b / src / node / ext / call . cc <nl> NAN_METHOD ( Call : : New ) { <nl> NanUtf8String method ( args [ 1 ] ) ; <nl> double deadline = args [ 2 ] - > NumberValue ( ) ; <nl> grpc_channel * wrapped_channel = channel - > GetWrappedChannel ( ) ; <nl> - grpc_call * wrapped_call = <nl> - grpc_channel_create_call ( wrapped_channel , * method , channel - > GetHost ( ) , <nl> - MillisecondsToTimespec ( deadline ) ) ; <nl> + grpc_call * wrapped_call = grpc_channel_create_call_old ( <nl> + wrapped_channel , * method , channel - > GetHost ( ) , <nl> + MillisecondsToTimespec ( deadline ) ) ; <nl> call = new Call ( wrapped_call ) ; <nl> args . This ( ) - > SetHiddenValue ( String : : NewSymbol ( " channel_ " ) , <nl> channel_object ) ; <nl> NAN_METHOD ( Call : : AddMetadata ) { <nl> if ( Buffer : : HasInstance ( value ) ) { <nl> metadata . value = Buffer : : Data ( value ) ; <nl> metadata . value_length = Buffer : : Length ( value ) ; <nl> - error = grpc_call_add_metadata ( call - > wrapped_call , & metadata , 0 ) ; <nl> + error = grpc_call_add_metadata_old ( call - > wrapped_call , & metadata , 0 ) ; <nl> } else if ( value - > IsString ( ) ) { <nl> Handle < String > string_value = value - > ToString ( ) ; <nl> NanUtf8String utf8_value ( string_value ) ; <nl> NAN_METHOD ( Call : : AddMetadata ) { <nl> metadata . value_length = string_value - > Length ( ) ; <nl> gpr_log ( GPR_DEBUG , " adding metadata : % s , % s , % d " , metadata . key , <nl> metadata . value , metadata . value_length ) ; <nl> - error = grpc_call_add_metadata ( call - > wrapped_call , & metadata , 0 ) ; <nl> + error = grpc_call_add_metadata_old ( call - > wrapped_call , & metadata , 0 ) ; <nl> } else { <nl> return NanThrowTypeError ( <nl> " addMetadata values must be strings or buffers " ) ; <nl> NAN_METHOD ( Call : : Invoke ) { <nl> } <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> unsigned int flags = args [ 3 ] - > Uint32Value ( ) ; <nl> - grpc_call_error error = grpc_call_invoke ( <nl> + grpc_call_error error = grpc_call_invoke_old ( <nl> call - > wrapped_call , CompletionQueueAsyncWorker : : GetQueue ( ) , <nl> CreateTag ( args [ 0 ] , args . This ( ) ) , CreateTag ( args [ 1 ] , args . This ( ) ) , flags ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> NAN_METHOD ( Call : : ServerAccept ) { <nl> return NanThrowTypeError ( " accept ' s first argument must be a function " ) ; <nl> } <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> - grpc_call_error error = grpc_call_server_accept ( <nl> + grpc_call_error error = grpc_call_server_accept_old ( <nl> call - > wrapped_call , CompletionQueueAsyncWorker : : GetQueue ( ) , <nl> CreateTag ( args [ 0 ] , args . This ( ) ) ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> NAN_METHOD ( Call : : ServerEndInitialMetadata ) { <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> unsigned int flags = args [ 1 ] - > Uint32Value ( ) ; <nl> grpc_call_error error = <nl> - grpc_call_server_end_initial_metadata ( call - > wrapped_call , flags ) ; <nl> + grpc_call_server_end_initial_metadata_old ( call - > wrapped_call , flags ) ; <nl> if ( error ! = GRPC_CALL_OK ) { <nl> return NanThrowError ( " serverEndInitialMetadata failed " , error ) ; <nl> } <nl> NAN_METHOD ( Call : : StartWrite ) { <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> grpc_byte_buffer * buffer = BufferToByteBuffer ( args [ 0 ] ) ; <nl> unsigned int flags = args [ 2 ] - > Uint32Value ( ) ; <nl> - grpc_call_error error = grpc_call_start_write ( <nl> + grpc_call_error error = grpc_call_start_write_old ( <nl> call - > wrapped_call , buffer , CreateTag ( args [ 1 ] , args . This ( ) ) , flags ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> CompletionQueueAsyncWorker : : Next ( ) ; <nl> NAN_METHOD ( Call : : StartWriteStatus ) { <nl> } <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> NanUtf8String details ( args [ 1 ] ) ; <nl> - grpc_call_error error = grpc_call_start_write_status ( <nl> + grpc_call_error error = grpc_call_start_write_status_old ( <nl> call - > wrapped_call , ( grpc_status_code ) args [ 0 ] - > Uint32Value ( ) , * details , <nl> CreateTag ( args [ 2 ] , args . This ( ) ) ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> NAN_METHOD ( Call : : WritesDone ) { <nl> return NanThrowTypeError ( " writesDone ' s first argument must be a function " ) ; <nl> } <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> - grpc_call_error error = grpc_call_writes_done ( <nl> + grpc_call_error error = grpc_call_writes_done_old ( <nl> call - > wrapped_call , CreateTag ( args [ 0 ] , args . This ( ) ) ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> CompletionQueueAsyncWorker : : Next ( ) ; <nl> NAN_METHOD ( Call : : StartRead ) { <nl> return NanThrowTypeError ( " startRead ' s first argument must be a function " ) ; <nl> } <nl> Call * call = ObjectWrap : : Unwrap < Call > ( args . This ( ) ) ; <nl> - grpc_call_error error = <nl> - grpc_call_start_read ( call - > wrapped_call , CreateTag ( args [ 0 ] , args . This ( ) ) ) ; <nl> + grpc_call_error error = grpc_call_start_read_old ( <nl> + call - > wrapped_call , CreateTag ( args [ 0 ] , args . This ( ) ) ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> CompletionQueueAsyncWorker : : Next ( ) ; <nl> } else { <nl> mmm a / src / node / ext / server . cc <nl> ppp b / src / node / ext / server . cc <nl> NAN_METHOD ( Server : : RequestCall ) { <nl> return NanThrowTypeError ( " requestCall can only be called on a Server " ) ; <nl> } <nl> Server * server = ObjectWrap : : Unwrap < Server > ( args . This ( ) ) ; <nl> - grpc_call_error error = grpc_server_request_call ( <nl> + grpc_call_error error = grpc_server_request_call_old ( <nl> server - > wrapped_server , CreateTag ( args [ 0 ] , NanNull ( ) ) ) ; <nl> if ( error = = GRPC_CALL_OK ) { <nl> CompletionQueueAsyncWorker : : Next ( ) ; <nl> mmm a / src / php / ext / grpc / call . c <nl> ppp b / src / php / ext / grpc / call . c <nl> int php_grpc_call_add_metadata_array_walk ( void * elem TSRMLS_DC , int num_args , <nl> metadata . key = ( char * ) key ; <nl> metadata . value = Z_STRVAL_P ( * data ) ; <nl> metadata . value_length = Z_STRLEN_P ( * data ) ; <nl> - error_code = grpc_call_add_metadata ( call , & metadata , 0u ) ; <nl> + error_code = grpc_call_add_metadata_old ( call , & metadata , 0u ) ; <nl> MAYBE_THROW_CALL_ERROR ( add_metadata , error_code ) ; <nl> break ; <nl> case IS_ARRAY : <nl> PHP_METHOD ( Call , __construct ) { <nl> wrapped_grpc_timeval * deadline = <nl> ( wrapped_grpc_timeval * ) zend_object_store_get_object ( <nl> deadline_obj TSRMLS_CC ) ; <nl> - call - > wrapped = grpc_channel_create_call ( channel - > wrapped , method , <nl> - channel - > target , deadline - > wrapped ) ; <nl> + call - > wrapped = grpc_channel_create_call_old ( <nl> + channel - > wrapped , method , channel - > target , deadline - > wrapped ) ; <nl> } <nl> <nl> / * * <nl> PHP_METHOD ( Call , invoke ) { <nl> wrapped_grpc_completion_queue * queue = <nl> ( wrapped_grpc_completion_queue * ) zend_object_store_get_object ( <nl> queue_obj TSRMLS_CC ) ; <nl> - error_code = grpc_call_invoke ( call - > wrapped , queue - > wrapped , ( void * ) tag1 , <nl> - ( void * ) tag2 , ( gpr_uint32 ) flags ) ; <nl> + error_code = grpc_call_invoke_old ( call - > wrapped , queue - > wrapped , ( void * ) tag1 , <nl> + ( void * ) tag2 , ( gpr_uint32 ) flags ) ; <nl> MAYBE_THROW_CALL_ERROR ( invoke , error_code ) ; <nl> } <nl> <nl> PHP_METHOD ( Call , server_accept ) { <nl> ( wrapped_grpc_completion_queue * ) zend_object_store_get_object ( <nl> queue_obj TSRMLS_CC ) ; <nl> error_code = <nl> - grpc_call_server_accept ( call - > wrapped , queue - > wrapped , ( void * ) tag ) ; <nl> + grpc_call_server_accept_old ( call - > wrapped , queue - > wrapped , ( void * ) tag ) ; <nl> MAYBE_THROW_CALL_ERROR ( server_accept , error_code ) ; <nl> } <nl> <nl> PHP_METHOD ( Call , server_end_initial_metadata ) { <nl> } <nl> wrapped_grpc_call * call = <nl> ( wrapped_grpc_call * ) zend_object_store_get_object ( getThis ( ) TSRMLS_CC ) ; <nl> - error_code = grpc_call_server_end_initial_metadata ( call - > wrapped , flags ) ; <nl> + error_code = grpc_call_server_end_initial_metadata_old ( call - > wrapped , flags ) ; <nl> MAYBE_THROW_CALL_ERROR ( server_end_initial_metadata , error_code ) ; <nl> } <nl> <nl> PHP_METHOD ( Call , start_write ) { <nl> 1 TSRMLS_CC ) ; <nl> return ; <nl> } <nl> - error_code = grpc_call_start_write ( call - > wrapped , <nl> - string_to_byte_buffer ( buffer , buffer_len ) , <nl> - ( void * ) tag , ( gpr_uint32 ) flags ) ; <nl> + error_code = grpc_call_start_write_old ( <nl> + call - > wrapped , string_to_byte_buffer ( buffer , buffer_len ) , ( void * ) tag , <nl> + ( gpr_uint32 ) flags ) ; <nl> MAYBE_THROW_CALL_ERROR ( start_write , error_code ) ; <nl> } <nl> <nl> PHP_METHOD ( Call , start_write_status ) { <nl> " start_write_status expects a long , a string , and a long " , 1 TSRMLS_CC ) ; <nl> return ; <nl> } <nl> - error_code = <nl> - grpc_call_start_write_status ( call - > wrapped , ( grpc_status_code ) status_code , <nl> - status_details , ( void * ) tag ) ; <nl> + error_code = grpc_call_start_write_status_old ( call - > wrapped , <nl> + ( grpc_status_code ) status_code , <nl> + status_details , ( void * ) tag ) ; <nl> MAYBE_THROW_CALL_ERROR ( start_write_status , error_code ) ; <nl> } <nl> <nl> PHP_METHOD ( Call , writes_done ) { <nl> " writes_done expects a long " , 1 TSRMLS_CC ) ; <nl> return ; <nl> } <nl> - error_code = grpc_call_writes_done ( call - > wrapped , ( void * ) tag ) ; <nl> + error_code = grpc_call_writes_done_old ( call - > wrapped , ( void * ) tag ) ; <nl> MAYBE_THROW_CALL_ERROR ( writes_done , error_code ) ; <nl> } <nl> <nl> PHP_METHOD ( Call , start_read ) { <nl> " start_read expects a long " , 1 TSRMLS_CC ) ; <nl> return ; <nl> } <nl> - error_code = grpc_call_start_read ( call - > wrapped , ( void * ) tag ) ; <nl> + error_code = grpc_call_start_read_old ( call - > wrapped , ( void * ) tag ) ; <nl> MAYBE_THROW_CALL_ERROR ( start_read , error_code ) ; <nl> } <nl> <nl> mmm a / src / php / ext / grpc / server . c <nl> ppp b / src / php / ext / grpc / server . c <nl> PHP_METHOD ( Server , request_call ) { <nl> " request_call expects a long " , 1 TSRMLS_CC ) ; <nl> return ; <nl> } <nl> - error_code = grpc_server_request_call ( server - > wrapped , ( void * ) tag_new ) ; <nl> + error_code = grpc_server_request_call_old ( server - > wrapped , ( void * ) tag_new ) ; <nl> MAYBE_THROW_CALL_ERROR ( request_call , error_code ) ; <nl> } <nl> <nl> mmm a / src / python / src / _adapter / _call . c <nl> ppp b / src / python / src / _adapter / _call . c <nl> static int pygrpc_call_init ( Call * self , PyObject * args , PyObject * kwds ) { <nl> / * TODO ( nathaniel ) : Hoist the gpr_timespec < - > PyFloat arithmetic into its own <nl> * function with its own test coverage . <nl> * / <nl> - self - > c_call = <nl> - grpc_channel_create_call ( ( ( Channel * ) channel ) - > c_channel , method , host , <nl> - gpr_time_from_nanos ( deadline * GPR_NS_PER_SEC ) ) ; <nl> + self - > c_call = grpc_channel_create_call_old ( <nl> + ( ( Channel * ) channel ) - > c_channel , method , host , <nl> + gpr_time_from_nanos ( deadline * GPR_NS_PER_SEC ) ) ; <nl> <nl> return 0 ; <nl> } <nl> static void pygrpc_call_dealloc ( Call * self ) { <nl> self - > ob_type - > tp_free ( ( PyObject * ) self ) ; <nl> } <nl> <nl> - static const PyObject * pygrpc_call_invoke ( Call * self , PyObject * args ) { <nl> + static const PyObject * pygrpc_call_invoke_old ( Call * self , PyObject * args ) { <nl> const PyObject * completion_queue ; <nl> const PyObject * metadata_tag ; <nl> const PyObject * finish_tag ; <nl> static const PyObject * pygrpc_call_invoke ( Call * self , PyObject * args ) { <nl> return NULL ; <nl> } <nl> <nl> - call_error = grpc_call_invoke ( <nl> + call_error = grpc_call_invoke_old ( <nl> self - > c_call , ( ( CompletionQueue * ) completion_queue ) - > c_completion_queue , <nl> ( void * ) metadata_tag , ( void * ) finish_tag , 0 ) ; <nl> <nl> static const PyObject * pygrpc_call_write ( Call * self , PyObject * args ) { <nl> byte_buffer = grpc_byte_buffer_create ( & slice , 1 ) ; <nl> gpr_slice_unref ( slice ) ; <nl> <nl> - call_error = grpc_call_start_write ( self - > c_call , byte_buffer , ( void * ) tag , 0 ) ; <nl> + call_error = <nl> + grpc_call_start_write_old ( self - > c_call , byte_buffer , ( void * ) tag , 0 ) ; <nl> <nl> grpc_byte_buffer_destroy ( byte_buffer ) ; <nl> <nl> static const PyObject * pygrpc_call_complete ( Call * self , PyObject * args ) { <nl> return NULL ; <nl> } <nl> <nl> - call_error = grpc_call_writes_done ( self - > c_call , ( void * ) tag ) ; <nl> + call_error = grpc_call_writes_done_old ( self - > c_call , ( void * ) tag ) ; <nl> <nl> result = pygrpc_translate_call_error ( call_error ) ; <nl> if ( result ! = NULL ) { <nl> static const PyObject * pygrpc_call_accept ( Call * self , PyObject * args ) { <nl> return NULL ; <nl> } <nl> <nl> - call_error = grpc_call_server_accept ( <nl> + call_error = grpc_call_server_accept_old ( <nl> self - > c_call , ( ( CompletionQueue * ) completion_queue ) - > c_completion_queue , <nl> ( void * ) tag ) ; <nl> result = pygrpc_translate_call_error ( call_error ) ; <nl> static const PyObject * pygrpc_call_accept ( Call * self , PyObject * args ) { <nl> static const PyObject * pygrpc_call_premetadata ( Call * self , PyObject * args ) { <nl> / * TODO ( b / 18702680 ) : Actually support metadata . * / <nl> return pygrpc_translate_call_error ( <nl> - grpc_call_server_end_initial_metadata ( self - > c_call , 0 ) ) ; <nl> + grpc_call_server_end_initial_metadata_old ( self - > c_call , 0 ) ) ; <nl> } <nl> <nl> static const PyObject * pygrpc_call_read ( Call * self , PyObject * args ) { <nl> static const PyObject * pygrpc_call_read ( Call * self , PyObject * args ) { <nl> return NULL ; <nl> } <nl> <nl> - call_error = grpc_call_start_read ( self - > c_call , ( void * ) tag ) ; <nl> + call_error = grpc_call_start_read_old ( self - > c_call , ( void * ) tag ) ; <nl> <nl> result = pygrpc_translate_call_error ( call_error ) ; <nl> if ( result ! = NULL ) { <nl> static const PyObject * pygrpc_call_status ( Call * self , PyObject * args ) { <nl> Py_DECREF ( code ) ; <nl> Py_DECREF ( details ) ; <nl> <nl> - call_error = grpc_call_start_write_status ( self - > c_call , c_code , c_message , <nl> - ( void * ) tag ) ; <nl> + call_error = grpc_call_start_write_status_old ( self - > c_call , c_code , c_message , <nl> + ( void * ) tag ) ; <nl> <nl> result = pygrpc_translate_call_error ( call_error ) ; <nl> if ( result ! = NULL ) { <nl> mmm a / src / python / src / _adapter / _server . c <nl> ppp b / src / python / src / _adapter / _server . c <nl> static const PyObject * pygrpc_server_service ( Server * self , PyObject * args ) { <nl> return NULL ; <nl> } <nl> <nl> - call_error = grpc_server_request_call ( self - > c_server , ( void * ) tag ) ; <nl> + call_error = grpc_server_request_call_old ( self - > c_server , ( void * ) tag ) ; <nl> <nl> result = pygrpc_translate_call_error ( call_error ) ; <nl> if ( result ! = NULL ) { <nl> mmm a / src / ruby / ext / grpc / rb_call . c <nl> ppp b / src / ruby / ext / grpc / rb_call . c <nl> int grpc_rb_call_add_metadata_hash_cb ( VALUE key , VALUE val , VALUE call_obj ) { <nl> md_obj_args [ 1 ] = rb_ary_entry ( val , i ) ; <nl> md_obj = rb_class_new_instance ( 2 , md_obj_args , rb_cMetadata ) ; <nl> md = grpc_rb_get_wrapped_metadata ( md_obj ) ; <nl> - err = grpc_call_add_metadata ( call , md , NUM2UINT ( flags ) ) ; <nl> + err = grpc_call_add_metadata_old ( call , md , NUM2UINT ( flags ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " add metadata failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> int grpc_rb_call_add_metadata_hash_cb ( VALUE key , VALUE val , VALUE call_obj ) { <nl> md_obj_args [ 1 ] = val ; <nl> md_obj = rb_class_new_instance ( 2 , md_obj_args , rb_cMetadata ) ; <nl> md = grpc_rb_get_wrapped_metadata ( md_obj ) ; <nl> - err = grpc_call_add_metadata ( call , md , NUM2UINT ( flags ) ) ; <nl> + err = grpc_call_add_metadata_old ( call , md , NUM2UINT ( flags ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " add metadata failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_invoke ( int argc , VALUE * argv , VALUE self ) { <nl> } <nl> cq = grpc_rb_get_wrapped_completion_queue ( cqueue ) ; <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_invoke ( call , cq , ROBJECT ( metadata_read_tag ) , <nl> - ROBJECT ( finished_tag ) , NUM2UINT ( flags ) ) ; <nl> + err = grpc_call_invoke_old ( call , cq , ROBJECT ( metadata_read_tag ) , <nl> + ROBJECT ( finished_tag ) , NUM2UINT ( flags ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " invoke failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_start_read ( VALUE self , VALUE tag ) { <nl> grpc_call * call = NULL ; <nl> grpc_call_error err ; <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_start_read ( call , ROBJECT ( tag ) ) ; <nl> + err = grpc_call_start_read_old ( call , ROBJECT ( tag ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " start read failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_start_write ( int argc , VALUE * argv , VALUE self ) { <nl> } <nl> bfr = grpc_rb_get_wrapped_byte_buffer ( byte_buffer ) ; <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_start_write ( call , bfr , ROBJECT ( tag ) , NUM2UINT ( flags ) ) ; <nl> + err = grpc_call_start_write_old ( call , bfr , ROBJECT ( tag ) , NUM2UINT ( flags ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " start write failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_start_write_status ( VALUE self , VALUE code , <nl> grpc_call * call = NULL ; <nl> grpc_call_error err ; <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_start_write_status ( call , NUM2UINT ( code ) , <nl> - StringValueCStr ( status ) , ROBJECT ( tag ) ) ; <nl> + err = grpc_call_start_write_status_old ( call , NUM2UINT ( code ) , <nl> + StringValueCStr ( status ) , ROBJECT ( tag ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " start write status : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_writes_done ( VALUE self , VALUE tag ) { <nl> grpc_call * call = NULL ; <nl> grpc_call_error err ; <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_writes_done ( call , ROBJECT ( tag ) ) ; <nl> + err = grpc_call_writes_done_old ( call , ROBJECT ( tag ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " writes done : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_server_end_initial_metadata ( int argc , VALUE * argv , <nl> flags = UINT2NUM ( 0 ) ; / * Default to no flags * / <nl> } <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_server_end_initial_metadata ( call , NUM2UINT ( flags ) ) ; <nl> + err = grpc_call_server_end_initial_metadata_old ( call , NUM2UINT ( flags ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " end_initial_metadata failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> static VALUE grpc_rb_call_server_accept ( VALUE self , VALUE cqueue , <nl> grpc_completion_queue * cq = grpc_rb_get_wrapped_completion_queue ( cqueue ) ; <nl> grpc_call_error err ; <nl> Data_Get_Struct ( self , grpc_call , call ) ; <nl> - err = grpc_call_server_accept ( call , cq , ROBJECT ( finished_tag ) ) ; <nl> + err = grpc_call_server_accept_old ( call , cq , ROBJECT ( finished_tag ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " server_accept failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> mmm a / src / ruby / ext / grpc / rb_channel . c <nl> ppp b / src / ruby / ext / grpc / rb_channel . c <nl> static VALUE grpc_rb_channel_create_call ( VALUE self , VALUE method , VALUE host , <nl> rb_raise ( rb_eRuntimeError , " closed ! " ) ; <nl> } <nl> <nl> - call = grpc_channel_create_call ( ch , method_chars , host_chars , <nl> - grpc_rb_time_timeval ( deadline , <nl> - / * absolute time * / 0 ) ) ; <nl> + call = <nl> + grpc_channel_create_call_old ( ch , method_chars , host_chars , <nl> + grpc_rb_time_timeval ( deadline , <nl> + / * absolute time * / 0 ) ) ; <nl> if ( call = = NULL ) { <nl> rb_raise ( rb_eRuntimeError , " cannot create call with method % s " , <nl> method_chars ) ; <nl> mmm a / src / ruby / ext / grpc / rb_server . c <nl> ppp b / src / ruby / ext / grpc / rb_server . c <nl> static VALUE grpc_rb_server_request_call ( VALUE self , VALUE tag_new ) { <nl> if ( s - > wrapped = = NULL ) { <nl> rb_raise ( rb_eRuntimeError , " closed ! " ) ; <nl> } else { <nl> - err = grpc_server_request_call ( s - > wrapped , ROBJECT ( tag_new ) ) ; <nl> + err = grpc_server_request_call_old ( s - > wrapped , ROBJECT ( tag_new ) ) ; <nl> if ( err ! = GRPC_CALL_OK ) { <nl> rb_raise ( rb_eCallError , " server request failed : % s ( code = % d ) " , <nl> grpc_call_error_detail_of ( err ) , err ) ; <nl> mmm a / test / core / echo / client . c <nl> ppp b / test / core / echo / client . c <nl> static void start_write_next_slice ( grpc_call * call , int first , int length ) { <nl> for ( i = 0 ; i < length ; i + + ) <nl> GPR_SLICE_START_PTR ( slice ) [ i ] = ( first + i ) % 256 ; <nl> byte_buffer = grpc_byte_buffer_create ( & slice , 1 ) ; <nl> - GPR_ASSERT ( grpc_call_start_write ( call , byte_buffer , ( void * ) 1 , 0 ) = = <nl> + GPR_ASSERT ( grpc_call_start_write_old ( call , byte_buffer , ( void * ) 1 , 0 ) = = <nl> GRPC_CALL_OK ) ; <nl> gpr_slice_unref ( slice ) ; <nl> grpc_byte_buffer_destroy ( byte_buffer ) ; <nl> int main ( int argc , char * * argv ) { <nl> <nl> GPR_ASSERT ( argc = = 2 ) ; <nl> channel = grpc_channel_create ( argv [ 1 ] , NULL ) ; <nl> - call = grpc_channel_create_call ( channel , " / foo " , " localhost " , <nl> - gpr_time_add ( gpr_time_from_seconds ( 5 ) , <nl> - gpr_now ( ) ) ) ; <nl> - GPR_ASSERT ( grpc_call_invoke ( call , cq , ( void * ) 1 , ( void * ) 1 , 0 ) = = <nl> + call = grpc_channel_create_call_old ( <nl> + channel , " / foo " , " localhost " , <nl> + gpr_time_add ( gpr_time_from_seconds ( 5 ) , gpr_now ( ) ) ) ; <nl> + GPR_ASSERT ( grpc_call_invoke_old ( call , cq , ( void * ) 1 , ( void * ) 1 , 0 ) = = <nl> GRPC_CALL_OK ) ; <nl> <nl> start_write_next_slice ( call , bytes_written , WRITE_SLICE_LENGTH ) ; <nl> bytes_written + = WRITE_SLICE_LENGTH ; <nl> - GPR_ASSERT ( grpc_call_start_read ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> waiting_finishes = 2 ; <nl> while ( waiting_finishes ) { <nl> ev = grpc_completion_queue_next ( cq , gpr_inf_future ) ; <nl> int main ( int argc , char * * argv ) { <nl> start_write_next_slice ( call , bytes_written , WRITE_SLICE_LENGTH ) ; <nl> bytes_written + = WRITE_SLICE_LENGTH ; <nl> } else { <nl> - GPR_ASSERT ( grpc_call_writes_done ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_writes_done_old ( call , ( void * ) 1 ) = = <nl> + GRPC_CALL_OK ) ; <nl> } <nl> break ; <nl> case GRPC_CLIENT_METADATA_READ : <nl> int main ( int argc , char * * argv ) { <nl> } <nl> grpc_byte_buffer_reader_destroy ( bb_reader ) ; <nl> if ( bytes_read < TOTAL_BYTES ) { <nl> - GPR_ASSERT ( grpc_call_start_read ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> } <nl> break ; <nl> case GRPC_FINISHED : <nl> mmm a / test / core / echo / server . c <nl> ppp b / test / core / echo / server . c <nl> static void request_call ( void ) { <nl> call_state * tag = gpr_malloc ( sizeof ( * tag ) ) ; <nl> gpr_ref_init ( & tag - > pending_ops , 2 ) ; <nl> tag - > bytes_read = 0 ; <nl> - grpc_server_request_call ( server , tag ) ; <nl> + grpc_server_request_call_old ( server , tag ) ; <nl> } <nl> <nl> static void assert_read_ok ( call_state * s , grpc_byte_buffer * b ) { <nl> int main ( int argc , char * * argv ) { <nl> case GRPC_SERVER_RPC_NEW : <nl> if ( ev - > call ! = NULL ) { <nl> / * initial ops are already started in request_call * / <nl> - grpc_call_server_accept ( ev - > call , cq , s ) ; <nl> - grpc_call_server_end_initial_metadata ( ev - > call , <nl> - GRPC_WRITE_BUFFER_HINT ) ; <nl> - GPR_ASSERT ( grpc_call_start_read ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> + grpc_call_server_accept_old ( ev - > call , cq , s ) ; <nl> + grpc_call_server_end_initial_metadata_old ( ev - > call , <nl> + GRPC_WRITE_BUFFER_HINT ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> request_call ( ) ; <nl> } else { <nl> GPR_ASSERT ( shutdown_started ) ; <nl> int main ( int argc , char * * argv ) { <nl> break ; <nl> case GRPC_WRITE_ACCEPTED : <nl> GPR_ASSERT ( ev - > data . write_accepted = = GRPC_OP_OK ) ; <nl> - GPR_ASSERT ( grpc_call_start_read ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> break ; <nl> case GRPC_READ : <nl> if ( ev - > data . read ) { <nl> assert_read_ok ( ev - > tag , ev - > data . read ) ; <nl> - GPR_ASSERT ( grpc_call_start_write ( ev - > call , ev - > data . read , s , <nl> - GRPC_WRITE_BUFFER_HINT ) = = <nl> + GPR_ASSERT ( grpc_call_start_write_old ( ev - > call , ev - > data . read , s , <nl> + GRPC_WRITE_BUFFER_HINT ) = = <nl> GRPC_CALL_OK ) ; <nl> } else { <nl> - GPR_ASSERT ( grpc_call_start_write_status ( ev - > call , GRPC_STATUS_OK , <nl> - NULL , s ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_write_status_old ( ev - > call , GRPC_STATUS_OK , <nl> + NULL , s ) = = GRPC_CALL_OK ) ; <nl> } <nl> break ; <nl> case GRPC_FINISH_ACCEPTED : <nl> mmm a / test / core / end2end / dualstack_socket_test . c <nl> ppp b / test / core / end2end / dualstack_socket_test . c <nl> void test_connect ( const char * server_host , const char * client_host , int port , <nl> } <nl> <nl> / * Send a trivial request . * / <nl> - c = grpc_channel_create_call ( client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( client , " / foo " , " test . google . com " , deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_invoke ( c , client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_invoke_old ( c , client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> if ( expect_ok ) { <nl> / * Check for a successful request . * / <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write_status ( s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , <nl> - tag ( 5 ) ) ) ; <nl> + grpc_call_start_write_status_old ( s , GRPC_STATUS_UNIMPLEMENTED , <nl> + " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> " xyz " , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> mmm a / test / core / end2end / no_server_test . c <nl> ppp b / test / core / end2end / no_server_test . c <nl> int main ( int argc , char * * argv ) { <nl> <nl> / * create a call , channel to a non existant server * / <nl> chan = grpc_channel_create ( " nonexistant : 54321 " , NULL ) ; <nl> - call = grpc_channel_create_call ( chan , " / foo " , " nonexistant " , deadline ) ; <nl> - GPR_ASSERT ( grpc_call_invoke ( call , cq , tag ( 2 ) , tag ( 3 ) , 0 ) = = GRPC_CALL_OK ) ; <nl> + call = grpc_channel_create_call_old ( chan , " / foo " , " nonexistant " , deadline ) ; <nl> + GPR_ASSERT ( grpc_call_invoke_old ( call , cq , tag ( 2 ) , tag ( 3 ) , 0 ) = = GRPC_CALL_OK ) ; <nl> / * verify that all tags get completed * / <nl> cq_expect_client_metadata_read ( cqv , tag ( 2 ) , NULL ) ; <nl> cq_expect_finished_with_status ( cqv , tag ( 3 ) , GRPC_STATUS_DEADLINE_EXCEEDED , <nl> mmm a / test / core / end2end / tests / cancel_after_accept . c <nl> ppp b / test / core / end2end / tests / cancel_after_accept . c <nl> static void test_cancel_after_accept ( grpc_end2end_test_config config , <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> mmm a / test / core / end2end / tests / cancel_after_accept_and_writes_closed . c <nl> ppp b / test / core / end2end / tests / cancel_after_accept_and_writes_closed . c <nl> static void test_cancel_after_accept_and_writes_closed ( <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 101 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 101 ) ) ) ; <nl> cq_expect_empty_read ( v_server , tag ( 101 ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> mmm a / test / core / end2end / tests / cancel_after_invoke . c <nl> ppp b / test / core / end2end / tests / cancel_after_invoke . c <nl> static void test_cancel_after_invoke ( grpc_end2end_test_config config , <nl> gpr_timespec deadline = five_seconds_time ( ) ; <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = mode . initiate_cancel ( c ) ) ; <nl> <nl> mmm a / test / core / end2end / tests / cancel_before_invoke . c <nl> ppp b / test / core / end2end / tests / cancel_before_invoke . c <nl> static void test_cancel_before_invoke ( grpc_end2end_test_config config ) { <nl> gpr_timespec deadline = five_seconds_time ( ) ; <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_cancel ( c ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_CANCELLED , NULL , <nl> NULL ) ; <nl> mmm a / test / core / end2end / tests / cancel_in_a_vacuum . c <nl> ppp b / test / core / end2end / tests / cancel_in_a_vacuum . c <nl> static void test_cancel_in_a_vacuum ( grpc_end2end_test_config config , <nl> gpr_timespec deadline = five_seconds_time ( ) ; <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = mode . initiate_cancel ( c ) ) ; <nl> mmm a / test / core / end2end / tests / census_simple_request . c <nl> ppp b / test / core / end2end / tests / census_simple_request . c <nl> static void test_body ( grpc_end2end_test_fixture f ) { <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> tag ( 1 ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> " xyz " , NULL ) ; <nl> mmm a / test / core / end2end / tests / disappearing_server . c <nl> ppp b / test / core / end2end / tests / disappearing_server . c <nl> static void do_request_and_shutdown_server ( grpc_end2end_test_fixture * f , <nl> grpc_call * s ; <nl> gpr_timespec deadline = five_seconds_time ( ) ; <nl> <nl> - c = grpc_channel_create_call ( f - > client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f - > client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f - > client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f - > client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f - > server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f - > server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_server_accept ( s , f - > server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + grpc_call_server_accept_old ( s , f - > server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> static void do_request_and_shutdown_server ( grpc_end2end_test_fixture * f , <nl> - and still complete the request * / <nl> grpc_server_shutdown ( f - > server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> " xyz " , NULL ) ; <nl> mmm a / test / core / end2end / tests / early_server_shutdown_finishes_inflight_calls . c <nl> ppp b / test / core / end2end / tests / early_server_shutdown_finishes_inflight_calls . c <nl> static void test_early_server_shutdown_finishes_inflight_calls ( <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> mmm a / test / core / end2end / tests / early_server_shutdown_finishes_tags . c <nl> ppp b / test / core / end2end / tests / early_server_shutdown_finishes_tags . c <nl> static void test_early_server_shutdown_finishes_tags ( <nl> <nl> / * upon shutdown , the server should finish all requested calls indicating <nl> no new call * / <nl> - grpc_server_request_call ( f . server , tag ( 1000 ) ) ; <nl> + grpc_server_request_call_old ( f . server , tag ( 1000 ) ) ; <nl> grpc_server_shutdown ( f . server ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 1000 ) , NULL , NULL , gpr_inf_past , <nl> NULL ) ; <nl> mmm a / test / core / end2end / tests / graceful_server_shutdown . c <nl> ppp b / test / core / end2end / tests / graceful_server_shutdown . c <nl> static void test_early_server_shutdown_finishes_inflight_calls ( <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> static void test_early_server_shutdown_finishes_inflight_calls ( <nl> grpc_server_shutdown_and_notify ( f . server , tag ( 0xdead ) ) ; <nl> cq_verify_empty ( v_server ) ; <nl> <nl> - grpc_call_start_write_status ( s , GRPC_STATUS_OK , NULL , tag ( 103 ) ) ; <nl> + grpc_call_start_write_status_old ( s , GRPC_STATUS_OK , NULL , tag ( 103 ) ) ; <nl> grpc_call_destroy ( s ) ; <nl> cq_expect_finish_accepted ( v_server , tag ( 103 ) , GRPC_OP_OK ) ; <nl> cq_expect_finished ( v_server , tag ( 102 ) , NULL ) ; <nl> mmm a / test / core / end2end / tests / invoke_large_request . c <nl> ppp b / test / core / end2end / tests / invoke_large_request . c <nl> static void test_invoke_large_request ( grpc_end2end_test_config config ) { <nl> / * byte buffer holds the slice , we can unref it already * / <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> static void test_invoke_large_request ( grpc_end2end_test_config config ) { <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 5 ) ) ) ; <nl> / * now the write can be accepted * / <nl> cq_expect_write_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> cq_expect_read ( v_server , tag ( 5 ) , large_slice ( ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 8 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 9 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 8 ) , GRPC_OP_OK ) ; <nl> mmm a / test / core / end2end / tests / max_concurrent_streams . c <nl> ppp b / test / core / end2end / tests / max_concurrent_streams . c <nl> static void simple_request_body ( grpc_end2end_test_fixture f ) { <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> " xyz " , NULL ) ; <nl> static void test_max_concurrent_streams ( grpc_end2end_test_config config ) { <nl> / * start two requests - ensuring that the second is not accepted until <nl> the first completes * / <nl> deadline = five_seconds_time ( ) ; <nl> - c1 = <nl> - grpc_channel_create_call ( f . client , " / alpha " , " test . google . com " , deadline ) ; <nl> + c1 = grpc_channel_create_call_old ( f . client , " / alpha " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c1 ) ; <nl> - c2 = grpc_channel_create_call ( f . client , " / beta " , " test . google . com " , deadline ) ; <nl> + c2 = grpc_channel_create_call_old ( f . client , " / beta " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c1 ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c1 , f . client_cq , tag ( 301 ) , tag ( 302 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c1 , f . client_cq , tag ( 301 ) , tag ( 302 ) , 0 ) ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c2 , f . client_cq , tag ( 401 ) , tag ( 402 ) , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c1 , tag ( 303 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c2 , tag ( 303 ) ) ) ; <nl> + grpc_call_invoke_old ( c2 , f . client_cq , tag ( 401 ) , tag ( 402 ) , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c1 , tag ( 303 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c2 , tag ( 303 ) ) ) ; <nl> <nl> ev = grpc_completion_queue_next ( <nl> f . client_cq , gpr_time_add ( gpr_now ( ) , gpr_time_from_seconds ( 10 ) ) ) ; <nl> static void test_max_concurrent_streams ( grpc_end2end_test_config config ) { <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_server_accept ( s1 , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s1 , 0 ) ) ; <nl> + grpc_call_server_accept_old ( s1 , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s1 , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( live_call + 1 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write_status ( s1 , GRPC_STATUS_UNIMPLEMENTED , " xyz " , <nl> - tag ( 103 ) ) ) ; <nl> + grpc_call_start_write_status_old ( s1 , GRPC_STATUS_UNIMPLEMENTED , <nl> + " xyz " , tag ( 103 ) ) ) ; <nl> cq_expect_finish_accepted ( v_server , tag ( 103 ) , GRPC_OP_OK ) ; <nl> cq_expect_finished ( v_server , tag ( 102 ) , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> static void test_max_concurrent_streams ( grpc_end2end_test_config config ) { <nl> live_call = ( live_call = = 300 ) ? 400 : 300 ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 200 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 200 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s2 , tag ( 200 ) , <nl> live_call = = 300 ? " / alpha " : " / beta " , <nl> " test . google . com " , deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_server_accept ( s2 , f . server_cq , tag ( 202 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s2 , 0 ) ) ; <nl> + grpc_call_server_accept_old ( s2 , f . server_cq , tag ( 202 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s2 , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( live_call + 1 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write_status ( s2 , GRPC_STATUS_UNIMPLEMENTED , " xyz " , <nl> - tag ( 203 ) ) ) ; <nl> + grpc_call_start_write_status_old ( s2 , GRPC_STATUS_UNIMPLEMENTED , <nl> + " xyz " , tag ( 203 ) ) ) ; <nl> cq_expect_finish_accepted ( v_server , tag ( 203 ) , GRPC_OP_OK ) ; <nl> cq_expect_finished ( v_server , tag ( 202 ) , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> mmm a / test / core / end2end / tests / ping_pong_streaming . c <nl> ppp b / test / core / end2end / tests / ping_pong_streaming . c <nl> static void test_pingpong_streaming ( grpc_end2end_test_config config , <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> gpr_log ( GPR_INFO , " testing with % d message pairs . " , messages ) ; <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> static void test_pingpong_streaming ( grpc_end2end_test_config config , <nl> for ( i = 0 ; i < messages ; i + + ) { <nl> request_payload = grpc_byte_buffer_create ( & request_payload_slice , 1 ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 2 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 2 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its <nl> contents <nl> correctly * / <nl> static void test_pingpong_streaming ( grpc_end2end_test_config config , <nl> cq_expect_write_accepted ( v_client , tag ( 2 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 3 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 3 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 3 ) , <nl> gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> response_payload = grpc_byte_buffer_create ( & response_payload_slice , 1 ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( s , response_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( s , response_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its <nl> contents <nl> correctly * / <nl> static void test_pingpong_streaming ( grpc_end2end_test_config config , <nl> cq_expect_write_accepted ( v_server , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( c , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( c , tag ( 5 ) ) ) ; <nl> cq_expect_read ( v_client , tag ( 5 ) , gpr_slice_from_copied_string ( " hello you " ) ) ; <nl> cq_verify ( v_client ) ; <nl> } <nl> static void test_pingpong_streaming ( grpc_end2end_test_config config , <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> gpr_slice_unref ( response_payload_slice ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 6 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 6 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 7 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 6 ) , GRPC_OP_OK ) ; <nl> mmm a / test / core / end2end / tests / request_response_with_binary_metadata_and_payload . c <nl> ppp b / test / core / end2end / tests / request_response_with_binary_metadata_and_payload . c <nl> static void test_request_response_with_metadata_and_payload ( <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> / * byte buffer holds the slice , we can unref it already * / <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> gpr_slice_unref ( response_payload_slice ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> / * add multiple metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta1 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta2 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta1 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta2 , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> static void test_request_response_with_metadata_and_payload ( <nl> " \ x10 \ x11 \ x12 \ x13 \ x14 \ x15 \ x16 \ x17 \ x18 \ x19 \ x1a \ x1b \ x1c \ x1d " , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ; <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ; <nl> <nl> / * add multiple metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta3 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta4 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta3 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta4 , 0 ) ) ; <nl> <nl> - grpc_call_server_end_initial_metadata ( s , 0 ) ; <nl> + grpc_call_server_end_initial_metadata_old ( s , 0 ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 5 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 5 ) , gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( response_payload ) ; <nl> static void test_request_response_with_metadata_and_payload ( <nl> " \ xf0 \ xf1 \ xf2 \ xf3 \ xf4 \ xf5 \ xf6 \ xf7 \ xf8 \ xf9 \ xfa \ xfb \ xfc \ xfd \ xfe \ xff " , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( c , tag ( 7 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( c , tag ( 7 ) ) ) ; <nl> cq_expect_read ( v_client , tag ( 7 ) , gpr_slice_from_copied_string ( " hello you " ) ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 8 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 9 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 8 ) , GRPC_OP_OK ) ; <nl> mmm a / test / core / end2end / tests / request_response_with_metadata_and_payload . c <nl> ppp b / test / core / end2end / tests / request_response_with_metadata_and_payload . c <nl> static void test_request_response_with_metadata_and_payload ( <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> / * byte buffer holds the slice , we can unref it already * / <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> gpr_slice_unref ( response_payload_slice ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> / * add multiple metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta1 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta2 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta1 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta2 , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> static void test_request_response_with_metadata_and_payload ( <nl> deadline , " key1 " , " val1 " , " key2 " , " val2 " , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ; <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ; <nl> <nl> / * add multiple metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta3 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta4 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta3 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta4 , 0 ) ) ; <nl> <nl> - grpc_call_server_end_initial_metadata ( s , 0 ) ; <nl> + grpc_call_server_end_initial_metadata_old ( s , 0 ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 5 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 5 ) , gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( response_payload ) ; <nl> static void test_request_response_with_metadata_and_payload ( <nl> " val4 " , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( c , tag ( 7 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( c , tag ( 7 ) ) ) ; <nl> cq_expect_read ( v_client , tag ( 7 ) , gpr_slice_from_copied_string ( " hello you " ) ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 8 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 9 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 8 ) , GRPC_OP_OK ) ; <nl> mmm a / test / core / end2end / tests / request_response_with_payload . c <nl> ppp b / test / core / end2end / tests / request_response_with_payload . c <nl> static void request_response_with_payload ( grpc_end2end_test_fixture f ) { <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> gpr_slice_unref ( response_payload_slice ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> static void request_response_with_payload ( grpc_end2end_test_fixture f ) { <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 5 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 5 ) , gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( response_payload ) ; <nl> cq_expect_write_accepted ( v_server , tag ( 6 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( c , tag ( 7 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( c , tag ( 7 ) ) ) ; <nl> cq_expect_read ( v_client , tag ( 7 ) , gpr_slice_from_copied_string ( " hello you " ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 8 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 9 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 8 ) , GRPC_OP_OK ) ; <nl> mmm a / test / core / end2end / tests / request_response_with_trailing_metadata_and_payload . c <nl> ppp b / test / core / end2end / tests / request_response_with_trailing_metadata_and_payload . c <nl> static void test_request_response_with_metadata_and_payload ( <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> / * byte buffer holds the slice , we can unref it already * / <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> gpr_slice_unref ( response_payload_slice ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> / * add multiple metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta1 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta2 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta1 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta2 , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> static void test_request_response_with_metadata_and_payload ( <nl> deadline , " key1 " , " val1 " , " key2 " , " val2 " , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ; <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ; <nl> <nl> / * add multiple metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta3 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta4 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta3 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta4 , 0 ) ) ; <nl> <nl> - grpc_call_server_end_initial_metadata ( s , 0 ) ; <nl> + grpc_call_server_end_initial_metadata_old ( s , 0 ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta5 , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( s , & meta6 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta5 , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( s , & meta6 , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 5 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 5 ) , gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( response_payload ) ; <nl> static void test_request_response_with_metadata_and_payload ( <nl> " val4 " , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( c , tag ( 7 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( c , tag ( 7 ) ) ) ; <nl> cq_expect_read ( v_client , tag ( 7 ) , gpr_slice_from_copied_string ( " hello you " ) ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 8 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 9 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 8 ) , GRPC_OP_OK ) ; <nl> mmm a / test / core / end2end / tests / request_with_large_metadata . c <nl> ppp b / test / core / end2end / tests / request_with_large_metadata . c <nl> static void test_request_with_large_metadata ( grpc_end2end_test_config config ) { <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> const int large_size = 64 * 1024 ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> meta . key = " key " ; <nl> meta . value = gpr_malloc ( large_size + 1 ) ; <nl> static void test_request_with_large_metadata ( grpc_end2end_test_config config ) { <nl> meta . value [ large_size ] = 0 ; <nl> meta . value_length = large_size ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> / * add the metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( c , & meta , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( c , & meta , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , " key " , meta . value , NULL ) ; <nl> static void test_request_with_large_metadata ( grpc_end2end_test_config config ) { <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 8 ) ) ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write_status ( s , GRPC_STATUS_OK , NULL , tag ( 9 ) ) ) ; <nl> + grpc_call_start_write_status_old ( s , GRPC_STATUS_OK , NULL , tag ( 9 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 8 ) , GRPC_OP_OK ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_OK , NULL , NULL ) ; <nl> mmm a / test / core / end2end / tests / request_with_payload . c <nl> ppp b / test / core / end2end / tests / request_with_payload . c <nl> static void test_invoke_request_with_payload ( grpc_end2end_test_config config ) { <nl> / * byte buffer holds the slice , we can unref it already * / <nl> gpr_slice_unref ( payload_slice ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write ( c , payload , tag ( 4 ) , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_old ( c , payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( payload ) ; <nl> static void test_invoke_request_with_payload ( grpc_end2end_test_config config ) { <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 4 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 4 ) , gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 5 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 6 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 5 ) , GRPC_OP_OK ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> mmm a / test / core / end2end / tests / simple_delayed_request . c <nl> ppp b / test / core / end2end / tests / simple_delayed_request . c <nl> static void simple_delayed_request_body ( grpc_end2end_test_config config , <nl> <nl> config . init_client ( f , client_args ) ; <nl> <nl> - c = grpc_channel_create_call ( f - > client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f - > client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f - > client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f - > client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> config . init_server ( f , server_args ) ; <nl> <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f - > server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f - > server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_server_accept ( s , f - > server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + grpc_call_server_accept_old ( s , f - > server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> " xyz " , NULL ) ; <nl> mmm a / test / core / end2end / tests / simple_request . c <nl> ppp b / test / core / end2end / tests / simple_request . c <nl> static void simple_request_body ( grpc_end2end_test_fixture f ) { <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finished_with_status ( v_client , tag ( 3 ) , GRPC_STATUS_UNIMPLEMENTED , <nl> " xyz " , NULL ) ; <nl> static void simple_request_body2 ( grpc_end2end_test_fixture f ) { <nl> cq_verifier * v_client = cq_verifier_create ( f . client_cq ) ; <nl> cq_verifier * v_server = cq_verifier_create ( f . server_cq ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 4 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 4 ) ) ) ; <nl> cq_expect_finish_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 5 ) ) ) ; <nl> cq_expect_finish_accepted ( v_server , tag ( 5 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_server ) ; <nl> mmm a / test / core / end2end / tests / thread_stress . c <nl> ppp b / test / core / end2end / tests / thread_stress . c <nl> static void drain_cq ( int client , grpc_completion_queue * cq ) { <nl> static void start_request ( void ) { <nl> gpr_slice slice = gpr_slice_malloc ( 100 ) ; <nl> grpc_byte_buffer * buf ; <nl> - grpc_call * call = grpc_channel_create_call ( <nl> + grpc_call * call = grpc_channel_create_call_old ( <nl> g_fixture . client , " / Foo " , " test . google . com " , g_test_end_time ) ; <nl> <nl> memset ( GPR_SLICE_START_PTR ( slice ) , 1 , GPR_SLICE_LENGTH ( slice ) ) ; <nl> static void start_request ( void ) { <nl> <nl> g_active_requests + + ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( call , g_fixture . client_cq , NULL , NULL , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( call , NULL ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write ( call , buf , NULL , 0 ) ) ; <nl> + grpc_call_invoke_old ( call , g_fixture . client_cq , NULL , NULL , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( call , NULL ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_old ( call , buf , NULL , 0 ) ) ; <nl> <nl> grpc_byte_buffer_destroy ( buf ) ; <nl> } <nl> static void client_thread ( void * p ) { <nl> case GRPC_READ : <nl> break ; <nl> case GRPC_WRITE_ACCEPTED : <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( ev - > call , NULL ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( ev - > call , NULL ) ) ; <nl> break ; <nl> case GRPC_FINISH_ACCEPTED : <nl> break ; <nl> static void client_thread ( void * p ) { <nl> static void request_server_call ( void ) { <nl> gpr_refcount * rc = gpr_malloc ( sizeof ( gpr_refcount ) ) ; <nl> gpr_ref_init ( rc , 2 ) ; <nl> - grpc_server_request_call ( g_fixture . server , rc ) ; <nl> + grpc_server_request_call_old ( g_fixture . server , rc ) ; <nl> } <nl> <nl> static void maybe_end_server_call ( grpc_call * call , gpr_refcount * rc ) { <nl> if ( gpr_unref ( rc ) ) { <nl> - GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write_status ( call , GRPC_STATUS_OK , NULL , NULL ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> + call , GRPC_STATUS_OK , NULL , NULL ) ) ; <nl> gpr_free ( rc ) ; <nl> } <nl> } <nl> static void server_thread ( void * p ) { <nl> case GRPC_SERVER_RPC_NEW : <nl> if ( ev - > call ) { <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_server_accept ( ev - > call , g_fixture . server_cq , <nl> - ev - > tag ) ) ; <nl> + grpc_call_server_accept_old ( <nl> + ev - > call , g_fixture . server_cq , ev - > tag ) ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_server_end_initial_metadata ( ev - > call , 0 ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( ev - > call , ev - > tag ) ) ; <nl> + grpc_call_server_end_initial_metadata_old ( ev - > call , 0 ) ) ; <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( ev - > call , buf , ev - > tag , 0 ) ) ; <nl> + grpc_call_start_read_old ( ev - > call , ev - > tag ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_start_write_old ( ev - > call , buf , ev - > tag , 0 ) ) ; <nl> } else { <nl> gpr_free ( ev - > tag ) ; <nl> } <nl> break ; <nl> case GRPC_READ : <nl> if ( ev - > data . read ) { <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( ev - > call , ev - > tag ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_start_read_old ( ev - > call , ev - > tag ) ) ; <nl> } else { <nl> maybe_end_server_call ( ev - > call , ev - > tag ) ; <nl> } <nl> mmm a / test / core / end2end / tests / writes_done_hangs_with_pending_read . c <nl> ppp b / test / core / end2end / tests / writes_done_hangs_with_pending_read . c <nl> static void test_writes_done_hangs_with_pending_read ( <nl> gpr_slice_unref ( request_payload_slice ) ; <nl> gpr_slice_unref ( response_payload_slice ) ; <nl> <nl> - c = grpc_channel_create_call ( f . client , " / foo " , " test . google . com " , deadline ) ; <nl> + c = grpc_channel_create_call_old ( f . client , " / foo " , " test . google . com " , <nl> + deadline ) ; <nl> GPR_ASSERT ( c ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_invoke ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + grpc_call_invoke_old ( c , f . client_cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( c , request_payload , tag ( 4 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( request_payload ) ; <nl> cq_expect_write_accepted ( v_client , tag ( 4 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call ( f . server , tag ( 100 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_server_request_call_old ( f . server , tag ( 100 ) ) ) ; <nl> cq_expect_server_rpc_new ( v_server , & s , tag ( 100 ) , " / foo " , " test . google . com " , <nl> deadline , NULL ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_accept ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata ( s , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = <nl> + grpc_call_server_accept_old ( s , f . server_cq , tag ( 102 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_server_end_initial_metadata_old ( s , 0 ) ) ; <nl> cq_expect_client_metadata_read ( v_client , tag ( 2 ) , NULL ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( s , tag ( 5 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( s , tag ( 5 ) ) ) ; <nl> cq_expect_read ( v_server , tag ( 5 ) , gpr_slice_from_copied_string ( " hello world " ) ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> GPR_ASSERT ( GRPC_CALL_OK = = <nl> - grpc_call_start_write ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> + grpc_call_start_write_old ( s , response_payload , tag ( 6 ) , 0 ) ) ; <nl> / * destroy byte buffer early to ensure async code keeps track of its contents <nl> correctly * / <nl> grpc_byte_buffer_destroy ( response_payload ) ; <nl> cq_expect_write_accepted ( v_server , tag ( 6 ) , GRPC_OP_OK ) ; <nl> cq_verify ( v_server ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done ( c , tag ( 6 ) ) ) ; <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status ( <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_writes_done_old ( c , tag ( 6 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_write_status_old ( <nl> s , GRPC_STATUS_UNIMPLEMENTED , " xyz " , tag ( 7 ) ) ) ; <nl> <nl> cq_expect_finish_accepted ( v_client , tag ( 6 ) , GRPC_OP_OK ) ; <nl> static void test_writes_done_hangs_with_pending_read ( <nl> / * does not return status because there is a pending message to be read * / <nl> cq_verify_empty ( v_client ) ; <nl> <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read ( c , tag ( 8 ) ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_start_read_old ( c , tag ( 8 ) ) ) ; <nl> cq_expect_read ( v_client , tag ( 8 ) , gpr_slice_from_copied_string ( " hello you " ) ) ; <nl> cq_verify ( v_client ) ; <nl> <nl> mmm a / test / core / fling / client . c <nl> ppp b / test / core / fling / client . c <nl> static grpc_call * call ; <nl> static void init_ping_pong_request ( void ) { } <nl> <nl> static void step_ping_pong_request ( void ) { <nl> - call = grpc_channel_create_call ( channel , " / Reflector / reflectUnary " , <nl> - " localhost " , gpr_inf_future ) ; <nl> - GPR_ASSERT ( grpc_call_invoke ( call , cq , ( void * ) 1 , ( void * ) 1 , <nl> - GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> - GPR_ASSERT ( grpc_call_start_write ( call , the_buffer , ( void * ) 1 , <nl> - GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> + call = grpc_channel_create_call_old ( channel , " / Reflector / reflectUnary " , <nl> + " localhost " , gpr_inf_future ) ; <nl> + GPR_ASSERT ( grpc_call_invoke_old ( call , cq , ( void * ) 1 , ( void * ) 1 , <nl> + GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_write_old ( call , the_buffer , ( void * ) 1 , <nl> + GRPC_WRITE_BUFFER_HINT ) = = GRPC_CALL_OK ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> - GPR_ASSERT ( grpc_call_start_read ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> - GPR_ASSERT ( grpc_call_writes_done ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_writes_done_old ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> static void step_ping_pong_request ( void ) { <nl> } <nl> <nl> static void init_ping_pong_stream ( void ) { <nl> - call = grpc_channel_create_call ( channel , " / Reflector / reflectStream " , <nl> - " localhost " , gpr_inf_future ) ; <nl> - GPR_ASSERT ( grpc_call_invoke ( call , cq , ( void * ) 1 , ( void * ) 1 , 0 ) = = <nl> + call = grpc_channel_create_call_old ( channel , " / Reflector / reflectStream " , <nl> + " localhost " , gpr_inf_future ) ; <nl> + GPR_ASSERT ( grpc_call_invoke_old ( call , cq , ( void * ) 1 , ( void * ) 1 , 0 ) = = <nl> GRPC_CALL_OK ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> } <nl> <nl> static void step_ping_pong_stream ( void ) { <nl> - GPR_ASSERT ( grpc_call_start_write ( call , the_buffer , ( void * ) 1 , 0 ) = = <nl> + GPR_ASSERT ( grpc_call_start_write_old ( call , the_buffer , ( void * ) 1 , 0 ) = = <nl> GRPC_CALL_OK ) ; <nl> - GPR_ASSERT ( grpc_call_start_read ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( call , ( void * ) 1 ) = = GRPC_CALL_OK ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> grpc_event_finish ( grpc_completion_queue_next ( cq , gpr_inf_future ) ) ; <nl> } <nl> mmm a / test / core / fling / server . c <nl> ppp b / test / core / fling / server . c <nl> typedef struct { <nl> static void request_call ( void ) { <nl> call_state * s = gpr_malloc ( sizeof ( call_state ) ) ; <nl> gpr_ref_init ( & s - > pending_ops , 2 ) ; <nl> - grpc_server_request_call ( server , s ) ; <nl> + grpc_server_request_call_old ( server , s ) ; <nl> } <nl> <nl> static void sigint_handler ( int x ) { got_sigint = 1 ; } <nl> int main ( int argc , char * * argv ) { <nl> } else { <nl> s - > flags = GRPC_WRITE_BUFFER_HINT ; <nl> } <nl> - grpc_call_server_accept ( ev - > call , cq , s ) ; <nl> - grpc_call_server_end_initial_metadata ( ev - > call , s - > flags ) ; <nl> - GPR_ASSERT ( grpc_call_start_read ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> + grpc_call_server_accept_old ( ev - > call , cq , s ) ; <nl> + grpc_call_server_end_initial_metadata_old ( ev - > call , s - > flags ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> request_call ( ) ; <nl> } else { <nl> GPR_ASSERT ( shutdown_started ) ; <nl> int main ( int argc , char * * argv ) { <nl> break ; <nl> case GRPC_WRITE_ACCEPTED : <nl> GPR_ASSERT ( ev - > data . write_accepted = = GRPC_OP_OK ) ; <nl> - GPR_ASSERT ( grpc_call_start_read ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_read_old ( ev - > call , s ) = = GRPC_CALL_OK ) ; <nl> break ; <nl> case GRPC_READ : <nl> if ( ev - > data . read ) { <nl> - GPR_ASSERT ( grpc_call_start_write ( ev - > call , ev - > data . read , s , <nl> - s - > flags ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_write_old ( ev - > call , ev - > data . read , s , <nl> + s - > flags ) = = GRPC_CALL_OK ) ; <nl> } else { <nl> - GPR_ASSERT ( grpc_call_start_write_status ( ev - > call , GRPC_STATUS_OK , <nl> - NULL , s ) = = GRPC_CALL_OK ) ; <nl> + GPR_ASSERT ( grpc_call_start_write_status_old ( ev - > call , GRPC_STATUS_OK , <nl> + NULL , s ) = = GRPC_CALL_OK ) ; <nl> } <nl> break ; <nl> case GRPC_FINISH_ACCEPTED : <nl> mmm a / test / core / surface / lame_client_test . c <nl> ppp b / test / core / surface / lame_client_test . c <nl> int main ( int argc , char * * argv ) { <nl> <nl> chan = grpc_lame_client_channel_create ( ) ; <nl> GPR_ASSERT ( chan ) ; <nl> - call = grpc_channel_create_call ( <nl> + call = grpc_channel_create_call_old ( <nl> chan , " / Foo " , " anywhere " , <nl> gpr_time_add ( gpr_now ( ) , gpr_time_from_seconds ( 100 ) ) ) ; <nl> GPR_ASSERT ( call ) ; <nl> int main ( int argc , char * * argv ) { <nl> cqv = cq_verifier_create ( cq ) ; <nl> <nl> / * we should be able to add metadata * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata ( call , & md , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_add_metadata_old ( call , & md , 0 ) ) ; <nl> <nl> / * and invoke the call * / <nl> - GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_invoke ( call , cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> + GPR_ASSERT ( GRPC_CALL_OK = = grpc_call_invoke_old ( call , cq , tag ( 2 ) , tag ( 3 ) , 0 ) ) ; <nl> <nl> / * the call should immediately fail * / <nl> cq_expect_client_metadata_read ( cqv , tag ( 2 ) , NULL ) ; <nl>
|
Prepare for the new batch call API .
|
grpc/grpc
|
a7cac78516057512d62cdae8fc5fdf9e2f90a136
|
2015-02-02T18:16:30Z
|
mmm a / generic / THTensor . c <nl> ppp b / generic / THTensor . c <nl> int THTensor_ ( isSameSizeAs ) ( const THTensor * self , const THTensor * src ) <nl> <nl> int THTensor_ ( isSetTo ) ( const THTensor * self , const THTensor * src ) <nl> { <nl> + if ( ! self - > storage ) <nl> + return 0 ; <nl> if ( self - > storage = = src - > storage & & <nl> self - > storageOffset = = src - > storageOffset & & <nl> self - > nDimension = = src - > nDimension ) <nl>
|
Fix a bug in : isSetTo
|
pytorch/pytorch
|
59c25ebe036a69ee86fe6cb1ec2a7f5ea54807f7
|
2016-03-04T13:40:45Z
|
mmm a / dbtests / framework . cpp <nl> ppp b / dbtests / framework . cpp <nl> namespace mongo { <nl> Result * Result : : cur = 0 ; <nl> <nl> Result * Suite : : run ( ) { <nl> + log ( 1 ) < < " \ t about to setupTests " < < endl ; <nl> setupTests ( ) ; <nl> + log ( 1 ) < < " \ t done setupTests " < < endl ; <nl> <nl> Result * r = new Result ( _name ) ; <nl> Result : : cur = r ; <nl> namespace mongo { <nl> <nl> bool passes = false ; <nl> <nl> - log ( 1 ) < < " \ t " < < tc - > getName ( ) < < endl ; <nl> + log ( 1 ) < < " \ t going to run test : " < < tc - > getName ( ) < < endl ; <nl> <nl> stringstream err ; <nl> err < < tc - > getName ( ) < < " \ t " ; <nl> namespace mongo { <nl> r - > _messages . push_back ( err . str ( ) ) ; <nl> } <nl> } <nl> - <nl> + <nl> + log ( 1 ) < < " \ t DONE running tests " < < endl ; <nl> + <nl> return r ; <nl> } <nl> <nl> namespace mongo { <nl> " directory will be overwritten if it already exists " ) <nl> ( " debug " , " run tests with verbose output " ) <nl> ( " list , l " , " list available test suites " ) <nl> + ( " verbose , v " , " verbose " ) <nl> ( " seed " , po : : value < unsigned long long > ( & seed ) , " random number seed " ) <nl> ; <nl> <nl> namespace mongo { <nl> return EXIT_CLEAN ; <nl> } <nl> <nl> - if ( params . count ( " debug " ) ) { <nl> + if ( params . count ( " debug " ) | | params . count ( " verbose " ) ) { <nl> logLevel = 1 ; <nl> } <nl> <nl>
|
some debugging
|
mongodb/mongo
|
9d0c7e66a3dd61814924f91c26fd1d93896eaa7a
|
2009-09-30T14:30:25Z
|
mmm a / xbmc / filesystem / CircularCache . cpp <nl> ppp b / xbmc / filesystem / CircularCache . cpp <nl> <nl> / * <nl> - * Copyright ( C ) 2005 - 2008 Team XBMC <nl> + * Copyright ( C ) 2005 - 2012 Team XBMC <nl> * http : / / www . xbmc . org <nl> * <nl> * This Program is free software ; you can redistribute it and / or modify <nl> int CCircularCache : : Open ( ) <nl> { <nl> # ifdef _WIN32 <nl> m_handle = CreateFileMapping ( INVALID_HANDLE_VALUE , NULL , PAGE_READWRITE , 0 , m_size , NULL ) ; <nl> - if ( m_handle = = INVALID_HANDLE_VALUE ) <nl> + if ( m_handle = = NULL ) <nl> return CACHE_RC_ERROR ; <nl> m_buf = ( uint8_t * ) MapViewOfFile ( m_handle , FILE_MAP_ALL_ACCESS , 0 , 0 , 0 ) ; <nl> # else <nl>
|
Fix CircularCache : : Open
|
xbmc/xbmc
|
5d441d59a0b443093fe646f3c768d0ee176e4f59
|
2012-08-20T17:41:38Z
|
mmm a / Telegram / SourceFiles / platform / linux / linux_libs . cpp <nl> ppp b / Telegram / SourceFiles / platform / linux / linux_libs . cpp <nl> bool setupGtkBase ( QLibrary & lib_gtk ) { <nl> / / Otherwise we get segfault in Ubuntu 17 . 04 in gtk_init_check ( ) call . <nl> / / See https : / / github . com / telegramdesktop / tdesktop / issues / 3176 <nl> / / See https : / / github . com / telegramdesktop / tdesktop / issues / 3162 <nl> - if ( QGuiApplication : : platformName ( ) . startsWith ( qsl ( " wayland " ) , Qt : : CaseInsensitive ) ) { <nl> + if ( QGuiApplication : : platformName ( ) . startsWith ( qsl ( " wayland " ) , Qt : : CaseInsensitive ) <nl> + & & ! lib_gtk . fileName ( ) . contains ( " gtk - x11 - 2 . 0 " ) ) { <nl> DEBUG_LOG ( ( " Limit allowed GDK backends to wayland " ) ) ; <nl> gdk_set_allowed_backends ( " wayland " ) ; <nl> - } else if ( QGuiApplication : : platformName ( ) = = qsl ( " xcb " ) ) { <nl> + } else { <nl> DEBUG_LOG ( ( " Limit allowed GDK backends to x11 " ) ) ; <nl> gdk_set_allowed_backends ( " x11 " ) ; <nl> } <nl> void start ( ) { <nl> DEBUG_LOG ( ( " Loading libraries " ) ) ; <nl> <nl> bool gtkLoaded = false ; <nl> - bool isWayland = QGuiApplication : : platformName ( ) . startsWith ( qsl ( " wayland " ) , Qt : : CaseInsensitive ) ; <nl> QLibrary lib_gtk ; <nl> <nl> if ( loadLibrary ( lib_gtk , " gtk - 3 " , 0 ) ) { <nl> gtkLoaded = setupGtkBase ( lib_gtk ) ; <nl> } <nl> - if ( ! gtkLoaded & & ! isWayland & & loadLibrary ( lib_gtk , " gtk - x11 - 2 . 0 " , 0 ) ) { <nl> + if ( ! gtkLoaded & & loadLibrary ( lib_gtk , " gtk - x11 - 2 . 0 " , 0 ) ) { <nl> gtkLoaded = setupGtkBase ( lib_gtk ) ; <nl> } <nl> <nl>
|
Load gtk2 even on Wayland
|
telegramdesktop/tdesktop
|
7ac78be984db7960f9ed153b43218fbecb764fb0
|
2020-06-01T14:22:53Z
|
mmm a / xbmc / android / jni / BroadcastReceiver . cpp <nl> ppp b / xbmc / android / jni / BroadcastReceiver . cpp <nl> void CJNIBroadcastReceiver : : DestroyBroadcastReceiver ( ) <nl> { <nl> m_object . reset ( ) ; <nl> } <nl> - <nl> - extern " C " <nl> - JNIEXPORT void JNICALL Java_org_xbmc_xbmc_XBMCBroadcastReceiver__1onReceive <nl> - ( JNIEnv * env , jobject context , jobject intent ) <nl> - { <nl> - if ( CJNIBroadcastReceiver : : jni_app_context ) <nl> - CJNIBroadcastReceiver : : jni_app_context - > onReceive ( CJNIIntent ( jhobject ( intent ) ) ) ; <nl> - } <nl>
|
jni : drop hard - coded jniexport
|
xbmc/xbmc
|
803f0a9bd102d8150e01c9ab23565afdd5785069
|
2013-06-07T21:53:17Z
|
mmm a / jstests / ssl / ssl_fragment . js <nl> ppp b / jstests / ssl / ssl_fragment . js <nl> <nl> ( function ( ) { <nl> ' use strict ' ; <nl> <nl> - var conn = MongoRunner . runMongod ( { <nl> + const conn = MongoRunner . runMongod ( { <nl> sslMode : " requireSSL " , <nl> sslPEMKeyFile : " jstests / libs / server . pem " , <nl> + networkMessageCompressors : ' disabled ' , <nl> } ) ; <nl> <nl> - var large = " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa " ; <nl> - var s = large ; <nl> - <nl> / / SSL packets have a max size of ~ 16 kb so to test packet fragmentation support , create a <nl> / / string larger then 16kb . <nl> - for ( let i = 0 ; i < 5 * 1700 ; i + + ) { <nl> - s + = large ; <nl> + const chunk = ' E $ % G ^ 56w4v5v54Vv $ V @ # t2 # % t56u7B $ ub % 6 NU @ Y3qv4Yq % yq4C % yx $ % zh ' ; / / random data <nl> + let s = ' ' ; <nl> + while ( s . length < ( 8 * 1024 * 1024 ) ) { <nl> + s + = chunk ; <nl> } <nl> <nl> - let ssl_frag = conn . getCollection ( ' test . ssl_frag ' ) ; <nl> - assert . writeOK ( ssl_frag . insert ( { _id : large } ) ) ; <nl> + const ssl_frag = conn . getCollection ( ' test . ssl_frag ' ) ; <nl> + assert . writeOK ( ssl_frag . insert ( { _id : " large_str " , foo : s } ) ) ; <nl> <nl> - let docs = ssl_frag . find ( { } ) ; <nl> - assert . lt ( 2 * 16 * 1024 , Object . bsonsize ( docs ) , " test doc too small " ) ; <nl> + const read = ssl_frag . find ( { _id : " large_str " } ) . toArray ( ) [ 0 ] . foo ; <nl> + assert . eq ( s , read , " Did not receive value written " ) ; <nl> <nl> MongoRunner . stopMongod ( conn ) ; <nl> } ) ( ) ; <nl> mmm a / src / mongo / util / net / ssl / detail / impl / engine_apple . ipp <nl> ppp b / src / mongo / util / net / ssl / detail / impl / engine_apple . ipp <nl> bool engine : : _initSSL ( stream_base : : handshake_type type , asio : : error_code & ec ) { <nl> } <nl> <nl> engine : : want engine : : handshake ( stream_base : : handshake_type type , asio : : error_code & ec ) { <nl> + ec = asio : : error_code ( ) ; <nl> if ( ! _initSSL ( type , ec ) ) { <nl> / / Error happened , ec has been set . <nl> return want : : want_nothing ; <nl> engine : : want engine : : handshake ( stream_base : : handshake_type type , asio : : error_cod <nl> } <nl> <nl> engine : : want engine : : shutdown ( asio : : error_code & ec ) { <nl> + ec = asio : : error_code ( ) ; <nl> if ( _ssl ) { <nl> const auto status = : : SSLClose ( _ssl . get ( ) ) ; <nl> if ( status = = : : errSSLWouldBlock ) { <nl> const asio : : error_code & engine : : map_error_code ( asio : : error_code & ec ) const { <nl> engine : : want engine : : write ( const asio : : const_buffer & data , <nl> asio : : error_code & ec , <nl> std : : size_t & bytes_transferred ) { <nl> + ec = asio : : error_code ( ) ; <nl> if ( ! verifyConnected ( _ssl . get ( ) , & ec ) ) { <nl> return want : : want_nothing ; <nl> } <nl> asio : : mutable_buffer engine : : get_output ( const asio : : mutable_buffer & data ) { <nl> engine : : want engine : : read ( const asio : : mutable_buffer & data , <nl> asio : : error_code & ec , <nl> std : : size_t & bytes_transferred ) { <nl> + ec = asio : : error_code ( ) ; <nl> if ( ! verifyConnected ( _ssl . get ( ) , & ec ) ) { <nl> return want : : want_nothing ; <nl> } <nl>
|
SERVER - 37259 Clear asio : : error_code in engine callbacks
|
mongodb/mongo
|
a8995601144d16fe6c762624f64f5690b5caf684
|
2018-09-21T21:04:34Z
|
mmm a / hphp / test / slow / lang / redefine_function . php . expectf <nl> ppp b / hphp / test / slow / lang / redefine_function . php . expectf <nl> @ @ - 1 + 1 @ @ <nl> - Fatal error : Cannot redeclare foo ( ) ( previously declared in % s / test / slow / lang / redefine_function . php : 5 ) in % s / test / slow / lang / redefine_function . php on line 9 <nl> \ No newline at end of file <nl> + Fatal error : Cannot redeclare foo ( ) ( previously declared in % s / redefine_function . php : 5 ) in % s / redefine_function . php on line 9 <nl> mmm a / hphp / test / slow / parser / memory_exhaust . php . expectregex <nl> ppp b / hphp / test / slow / parser / memory_exhaust . php . expectregex <nl> @ @ - 1 + 1 @ @ <nl> - ( Fatal error : ( memory exhausted in | AssemblerUnserializationError : Maximum stack size reached \ n ) [ ^ \ r \ n ] + \ / test \ / slow \ / parser \ / memory_exhaust \ . php on line ( 3 | - 1 ) | Notice : File could not be loaded : [ ^ \ r \ n ] + \ / test \ / slow \ / parser \ / memory_exhaust \ . php ) <nl> + ( Fatal error : ( memory exhausted in | AssemblerUnserializationError : Maximum stack size reached \ n ) [ ^ \ r \ n ] + \ / memory_exhaust \ . php on line ( 3 | - 1 ) | Notice : File could not be loaded : [ ^ \ r \ n ] + \ / memory_exhaust \ . php ) <nl>
|
Fix some tests when run from not - the - fbcode - root
|
facebook/hhvm
|
8a1cd6e28970cb85739b0c2d242ddaaa7bdac529
|
2019-05-01T17:53:52Z
|
mmm a / tensorflow / java / src / main / native / session_jni . cc <nl> ppp b / tensorflow / java / src / main / native / session_jni . cc <nl> JNIEXPORT jbyteArray JNICALL Java_org_tensorflow_Session_run ( <nl> } <nl> <nl> if ( ! throwExceptionIfNotOK ( env , status ) ) { <nl> + TF_DeleteStatus ( status ) ; <nl> return nullptr ; <nl> } <nl> jlong * t = env - > GetLongArrayElements ( output_tensor_handles , nullptr ) ; <nl> JNIEXPORT jbyteArray JNICALL Java_org_tensorflow_Session_run ( <nl> memcpy ( elems , run_metadata - > data , run_metadata - > length ) ; <nl> env - > ReleaseByteArrayElements ( ret , elems , JNI_COMMIT ) ; <nl> } <nl> + TF_DeleteStatus ( status ) ; <nl> return ret ; <nl> } <nl>
|
Java : Plug memory leak in Session . run ( )
|
tensorflow/tensorflow
|
8304e197ea9eeb617f224a1ba0cc4068596098d1
|
2017-05-25T18:43:20Z
|
mmm a / package . json <nl> ppp b / package . json <nl> <nl> " poisson - process " : " ^ 0 . 2 . 1 " <nl> } , <nl> " engines " : { <nl> - " node " : " > = 1 . 1 . 0 " <nl> + " node " : " > = 4 " <nl> } , <nl> " binary " : { <nl> " module_name " : " grpc_node " , <nl> mmm a / src / node / ext / server_uv . cc <nl> ppp b / src / node / ext / server_uv . cc <nl> namespace grpc { <nl> namespace node { <nl> <nl> using Nan : : Callback ; <nl> + using Nan : : MaybeLocal ; <nl> <nl> using v8 : : External ; <nl> using v8 : : Function ; <nl> using v8 : : FunctionTemplate ; <nl> using v8 : : Local ; <nl> - using v8 : : MaybeLocal ; <nl> using v8 : : Object ; <nl> using v8 : : Value ; <nl> <nl> mmm a / templates / package . json . template <nl> ppp b / templates / package . json . template <nl> <nl> " poisson - process " : " ^ 0 . 2 . 1 " <nl> } , <nl> " engines " : { <nl> - " node " : " > = 1 . 1 . 0 " <nl> + " node " : " > = 4 " <nl> } , <nl> " binary " : { <nl> " module_name " : " grpc_node " , <nl> mmm a / tools / run_tests / artifacts / build_artifact_node . bat <nl> ppp b / tools / run_tests / artifacts / build_artifact_node . bat <nl> <nl> @ rem ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> @ rem OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> <nl> - set node_versions = 1 . 1 . 0 2 . 0 . 0 3 . 0 . 0 4 . 0 . 0 5 . 0 . 0 6 . 0 . 0 7 . 0 . 0 <nl> + set node_versions = 4 . 0 . 0 5 . 0 . 0 6 . 0 . 0 7 . 0 . 0 <nl> <nl> set electron_versions = 1 . 0 . 0 1 . 1 . 0 1 . 2 . 0 1 . 3 . 0 1 . 4 . 0 <nl> <nl> mmm a / tools / run_tests / artifacts / build_artifact_node . sh <nl> ppp b / tools / run_tests / artifacts / build_artifact_node . sh <nl> mkdir - p artifacts <nl> <nl> npm update <nl> <nl> - node_versions = ( 1 . 1 . 0 2 . 0 . 0 3 . 0 . 0 4 . 0 . 0 5 . 0 . 0 6 . 0 . 0 7 . 0 . 0 ) <nl> + node_versions = ( 4 . 0 . 0 5 . 0 . 0 6 . 0 . 0 7 . 0 . 0 ) <nl> <nl> electron_versions = ( 1 . 0 . 0 1 . 1 . 0 1 . 2 . 0 1 . 3 . 0 1 . 4 . 0 ) <nl> <nl>
|
Drop support for io . js , fix minor issue with node extension
|
grpc/grpc
|
aaef11aa298c9b760fa7a634ce6a1be1ed8d6595
|
2017-03-14T18:19:25Z
|
mmm a / SConstruct <nl> ppp b / SConstruct <nl> AddOption ( ' - - prefix ' , <nl> help = ' installation prefix ' ) <nl> <nl> <nl> + AddOption ( ' - - java ' , <nl> + dest = ' javaHome ' , <nl> + type = ' string ' , <nl> + default = " / opt / java / " , <nl> + nargs = 1 , <nl> + action = ' store ' , <nl> + metavar = ' DIR ' , <nl> + help = ' java home ' ) <nl> + <nl> + <nl> # mmm environment setup mmm <nl> <nl> env = Environment ( ) <nl> allClientFiles = commonFiles + coreDbFiles + [ " client / clientOnly . cpp " ] ; <nl> <nl> nix = False <nl> <nl> + print ( " javaHome : " + GetOption ( " javaHome " ) ) <nl> + <nl> def findVersion ( root , choices ) : <nl> for c in choices : <nl> if ( os . path . exists ( root + c ) ) : <nl> if " darwin " = = os . sys . platform : <nl> nix = True <nl> <nl> elif " linux2 " = = os . sys . platform : <nl> - javaHome = " / opt / java / " <nl> <nl> env . Append ( CPPPATH = [ javaHome + " include " , javaHome + " include / linux " ] ) <nl> <nl>
|
configurable java home
|
mongodb/mongo
|
0279a79e5a574cf0ccdc92298d34c7f93668ed59
|
2009-01-19T18:02:41Z
|
mmm a / CHANGELOG_RU . md <nl> ppp b / CHANGELOG_RU . md <nl> <nl> <nl> # # # Новые возможности : <nl> <nl> - * Добавлен тип данных ` DECIMAL ( digits , scale ) ` ( ` Decimal32 ( scale ) ` , ` Decimal64 ( scale ) ` , ` Decimal128 ( scale ) ` ) . Возможность доступна под настройкой ` allow_experimental_decimal_type ` . # 2846 # 2970 # 3008 # 3047 <nl> - * Модификатор ` WITH ROLLUP ` для ` GROUP BY ` ( также доступен синтаксис : ` GROUP BY ROLLUP ( . . . ) ` ) . # 2948 <nl> - * В запросах с JOIN , звёздочка раскрывается в список столбцов всех таблиц , в соответствии со стандартом SQL . Вернуть старое поведение можно , выставив настройку ( уровня пользователя ) ` asterisk_left_columns_only ` в значение 1 . Winter Zhang # 2787 <nl> - * Добавлена поддержка JOIN с табличной функцией Winter Zhang # 2907 <nl> - * Автодополнение по нажатию Tab в clickhouse - client . Sergey Shcherbin # 2447 <nl> - * Нажатие Ctrl + C в clickhouse - client очищает запрос , если он был введён # 2877 <nl> - * Добавлена настройка ` join_default_strictness ` ( значения ` ' ' ` , ` ' any ' ` , ` ' all ' ` ) . Её использование позволяет не указывать ` ANY ` или ` ALL ` для ` JOIN ` # 2982 <nl> - * В каждой строчке лога сервера , относящейся к обработке запроса , выводится идентификатор запроса . # 2482 <nl> - * Возможность получения логов выполнения запроса в clickhouse - client ( настройка ` send_logs_level ` ) . При распределённой обработке запроса , логи отправляются каскадно со всех серверов . # 2482 <nl> - * В таблицах ` system . query_log ` и ` system . processes ` ( ` SHOW PROCESSLIST ` ) появилась информация о всех изменённых настройках при выполнении запроса ( вложенная структура данных ` Settings ` ) . Добавлена настройка ` log_query_settings ` . # 2482 <nl> - * В таблицах ` system . query_log ` и ` system . processes ` появилась информация о номерах потоков , участвующих в исполнении запроса ( столбец ` thread_numbers ` ) . # 2482 <nl> - * Добавлены счётчики ` ProfileEvents ` , измеряющие время , потраченное на чтение и запись по сети ; чтение и запись на диск ; количество сетевых ошибок ; время потраченное на ожидании при ограничении сетевой полосы . # 2482 <nl> - * Добавлены счётчики ` ProfileEvents ` , содержащие системные метрики из rusage ( позволяющие получить информацию об использовании CPU в userspace и ядре , page faults , context switches ) а также метрики taskstats ( позволяющие получить информацию о времени ожидания IO , CPU , а также количество прочитанных и записанных данных с учётом и без учёта page cache ) . # 2482 <nl> - * Счётчики ` ProfileEvents ` учитываются не только глобально , но и на каждый запрос , а также на каждый поток выполнения запроса , что позволяет детально профилировать потребление ресурсов отдельными запросами . # 2482 <nl> - * Добавлена таблица ` system . query_thread_log ` , содержащая информацию о каждом потоке выполнения запроса . Добавлена настройка ` log_query_threads ` . # 2482 <nl> - * В таблицах ` system . metrics ` и ` system . events ` появилась встроенная документация . # 3016 <nl> - * Добавлена функция ` arrayEnumerateDense ` Amos Bird # 2975 <nl> - * Добавлены функции ` arrayCumSumNonNegative ` и ` arrayDifference ` Aleksey Studnev # 2942 <nl> - * Добавлена агрегатная функция ` retention ` Sundy Li # 2887 <nl> - * Возможность сложения ( слияния ) состояний агрегатных функций с помощью оператора плюс , а также умножения состояний агрегатных функций на целую неотрицательную константу . # 3062 # 3034 <nl> - * В таблицах семейства MergeTree добавлен виртуальный столбец ` _partition_id ` # 3089 <nl> + * Добавлен тип данных ` DECIMAL ( digits , scale ) ` ( ` Decimal32 ( scale ) ` , ` Decimal64 ( scale ) ` , ` Decimal128 ( scale ) ` ) . Возможность доступна под настройкой ` allow_experimental_decimal_type ` . [ # 2846 ] ( https : / / github . com / yandex / ClickHouse / pull / 2846 ) [ # 2970 ] ( https : / / github . com / yandex / ClickHouse / pull / 2970 ) [ # 3008 ] ( https : / / github . com / yandex / ClickHouse / pull / 3008 ) [ # 3047 ] ( https : / / github . com / yandex / ClickHouse / pull / 3047 ) <nl> + * Модификатор ` WITH ROLLUP ` для ` GROUP BY ` ( также доступен синтаксис : ` GROUP BY ROLLUP ( . . . ) ` ) . [ # 2948 ] ( https : / / github . com / yandex / ClickHouse / pull / 2948 ) <nl> + * В запросах с JOIN , звёздочка раскрывается в список столбцов всех таблиц , в соответствии со стандартом SQL . Вернуть старое поведение можно , выставив настройку ( уровня пользователя ) ` asterisk_left_columns_only ` в значение 1 . Winter Zhang [ # 2787 ] ( https : / / github . com / yandex / ClickHouse / pull / 2787 ) <nl> + * Добавлена поддержка JOIN с табличной функцией Winter Zhang [ # 2907 ] ( https : / / github . com / yandex / ClickHouse / pull / 2907 ) <nl> + * Автодополнение по нажатию Tab в clickhouse - client . Sergey Shcherbin [ # 2447 ] ( https : / / github . com / yandex / ClickHouse / pull / 2447 ) <nl> + * Нажатие Ctrl + C в clickhouse - client очищает запрос , если он был введён [ # 2877 ] ( https : / / github . com / yandex / ClickHouse / pull / 2877 ) <nl> + * Добавлена настройка ` join_default_strictness ` ( значения ` ' ' ` , ` ' any ' ` , ` ' all ' ` ) . Её использование позволяет не указывать ` ANY ` или ` ALL ` для ` JOIN ` [ # 2982 ] ( https : / / github . com / yandex / ClickHouse / pull / 2982 ) <nl> + * В каждой строчке лога сервера , относящейся к обработке запроса , выводится идентификатор запроса . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * Возможность получения логов выполнения запроса в clickhouse - client ( настройка ` send_logs_level ` ) . При распределённой обработке запроса , логи отправляются каскадно со всех серверов . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * В таблицах ` system . query_log ` и ` system . processes ` ( ` SHOW PROCESSLIST ` ) появилась информация о всех изменённых настройках при выполнении запроса ( вложенная структура данных ` Settings ` ) . Добавлена настройка ` log_query_settings ` . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * В таблицах ` system . query_log ` и ` system . processes ` появилась информация о номерах потоков , участвующих в исполнении запроса ( столбец ` thread_numbers ` ) . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * Добавлены счётчики ` ProfileEvents ` , измеряющие время , потраченное на чтение и запись по сети ; чтение и запись на диск ; количество сетевых ошибок ; время потраченное на ожидании при ограничении сетевой полосы . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * Добавлены счётчики ` ProfileEvents ` , содержащие системные метрики из rusage ( позволяющие получить информацию об использовании CPU в userspace и ядре , page faults , context switches ) а также метрики taskstats ( позволяющие получить информацию о времени ожидания IO , CPU , а также количество прочитанных и записанных данных с учётом и без учёта page cache ) . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * Счётчики ` ProfileEvents ` учитываются не только глобально , но и на каждый запрос , а также на каждый поток выполнения запроса , что позволяет детально профилировать потребление ресурсов отдельными запросами . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * Добавлена таблица ` system . query_thread_log ` , содержащая информацию о каждом потоке выполнения запроса . Добавлена настройка ` log_query_threads ` . [ # 2482 ] ( https : / / github . com / yandex / ClickHouse / pull / 2482 ) <nl> + * В таблицах ` system . metrics ` и ` system . events ` появилась встроенная документация . [ # 3016 ] ( https : / / github . com / yandex / ClickHouse / pull / 3016 ) <nl> + * Добавлена функция ` arrayEnumerateDense ` Amos Bird [ # 2975 ] ( https : / / github . com / yandex / ClickHouse / pull / 2975 ) <nl> + * Добавлены функции ` arrayCumSumNonNegative ` и ` arrayDifference ` Aleksey Studnev [ # 2942 ] ( https : / / github . com / yandex / ClickHouse / pull / 2942 ) <nl> + * Добавлена агрегатная функция ` retention ` Sundy Li [ # 2887 ] ( https : / / github . com / yandex / ClickHouse / pull / 2887 ) <nl> + * Возможность сложения ( слияния ) состояний агрегатных функций с помощью оператора плюс , а также умножения состояний агрегатных функций на целую неотрицательную константу . [ # 3062 ] ( https : / / github . com / yandex / ClickHouse / pull / 3062 ) [ # 3034 ] ( https : / / github . com / yandex / ClickHouse / pull / 3034 ) <nl> + * В таблицах семейства MergeTree добавлен виртуальный столбец ` _partition_id ` [ # 3089 ] ( https : / / github . com / yandex / ClickHouse / pull / 3089 ) <nl> <nl> # # # Экспериментальные возможности : <nl> <nl> - * Добавлен тип данных ` LowCardinality ( T ) ` . Тип данных автоматически создаёт локальный словарь значений и позволяет обрабатывать данные без распаковки словаря . # 2830 <nl> - * Добавлен кэш JIT - скомпилированных функций , а также счётчик числа использований перед компиляцией . Возможность JIT - компиляции выражений включается настройкой ` compile_expressions ` # 2990 # 3077 <nl> + * Добавлен тип данных ` LowCardinality ( T ) ` . Тип данных автоматически создаёт локальный словарь значений и позволяет обрабатывать данные без распаковки словаря . [ # 2830 ] ( https : / / github . com / yandex / ClickHouse / pull / 2830 ) <nl> + * Добавлен кэш JIT - скомпилированных функций , а также счётчик числа использований перед компиляцией . Возможность JIT - компиляции выражений включается настройкой ` compile_expressions ` [ # 2990 ] ( https : / / github . com / yandex / ClickHouse / pull / 2990 ) [ # 3077 ] ( https : / / github . com / yandex / ClickHouse / pull / 3077 ) <nl> <nl> # # # Улучшения : <nl> <nl> * Исправлена проблема неограниченного накопления лога репликации в случае наличия заброшенных реплик . Добавлен режим эффективного восстановления реплик после длительного отставания . <nl> * Увеличена производительность при выполнении ` GROUP BY ` в случае , если есть несколько полей агрегации , одно из которых строковое , а другие - фиксированной длины . <nl> * Увеличена производительность при использовании ` PREWHERE ` и при неявном переносе выражений в ` PREWHERE ` . <nl> - * Увеличена производительность парсинга текстовых форматов ( ` CSV ` , ` TSV ` ) Amos Bird # 2977 # 2980 <nl> - * Увеличена производительность чтения строк и массивов в бинарных форматах Amos Bird # 2955 <nl> - * Увеличена производительность и уменьшено потребление памяти в запросах к таблицам ` system . tables ` и ` system . columns ` в случае наличия очень большого количества таблиц на одном сервере . # 2953 <nl> - * Исправлена проблема низкой производительности в случае наличия большого потока запросов , для которых возвращается ошибка ( в ` perf top ` видна функция ` _dl_addr ` , при этом сервер использует мало CPU ) . # 2938 <nl> - * Прокидывание условий внутрь View ( при включенной настройке ` enable_optimize_predicate_expression ` ) Winter Zhang # 2907 <nl> - * Доработки недостающей функциональности для типа данных ` UUID ` # 3074 # 2985 <nl> - * Тип данных ` UUID ` поддержан в словарях The - Alchemist # 2822 <nl> - * Функция ` visitParamExtractRaw ` корректно работает с вложенными структурами Winter Zhang # 2974 <nl> - * При использовании настройки ` input_format_skip_unknown_fields ` корректно работает пропуск значений - объектов в формате ` JSONEachRow ` BlahGeek # 2958 <nl> - * Для выражения ` CASE ` с условиями , появилась возможность не указывать ` ELSE ` , что эквивалентно ` ELSE NULL ` # 2920 <nl> - * Возможность конфигурирования operation timeout при работе с ZooKeeper . urykhy # 2971 <nl> - * Возможность указания смещения для ` LIMIT n , m ` в виде ` LIMIT n OFFSET m ` # 2840 <nl> - * Возможность использования синтаксиса ` SELECT TOP n ` в качестве альтернативы для ` LIMIT ` # 2840 <nl> + * Увеличена производительность парсинга текстовых форматов ( ` CSV ` , ` TSV ` ) Amos Bird [ # 2977 ] ( https : / / github . com / yandex / ClickHouse / pull / 2977 ) [ # 2980 ] ( https : / / github . com / yandex / ClickHouse / pull / 2980 ) <nl> + * Увеличена производительность чтения строк и массивов в бинарных форматах Amos Bird [ # 2955 ] ( https : / / github . com / yandex / ClickHouse / pull / 2955 ) <nl> + * Увеличена производительность и уменьшено потребление памяти в запросах к таблицам ` system . tables ` и ` system . columns ` в случае наличия очень большого количества таблиц на одном сервере . [ # 2953 ] ( https : / / github . com / yandex / ClickHouse / pull / 2953 ) <nl> + * Исправлена проблема низкой производительности в случае наличия большого потока запросов , для которых возвращается ошибка ( в ` perf top ` видна функция ` _dl_addr ` , при этом сервер использует мало CPU ) . [ # 2938 ] ( https : / / github . com / yandex / ClickHouse / pull / 2938 ) <nl> + * Прокидывание условий внутрь View ( при включенной настройке ` enable_optimize_predicate_expression ` ) Winter Zhang [ # 2907 ] ( https : / / github . com / yandex / ClickHouse / pull / 2907 ) <nl> + * Доработки недостающей функциональности для типа данных ` UUID ` [ # 3074 ] ( https : / / github . com / yandex / ClickHouse / pull / 3074 ) [ # 2985 ] ( https : / / github . com / yandex / ClickHouse / pull / 2985 ) <nl> + * Тип данных ` UUID ` поддержан в словарях The - Alchemist [ # 2822 ] ( https : / / github . com / yandex / ClickHouse / pull / 2822 ) <nl> + * Функция ` visitParamExtractRaw ` корректно работает с вложенными структурами Winter Zhang [ # 2974 ] ( https : / / github . com / yandex / ClickHouse / pull / 2974 ) <nl> + * При использовании настройки ` input_format_skip_unknown_fields ` корректно работает пропуск значений - объектов в формате ` JSONEachRow ` BlahGeek [ # 2958 ] ( https : / / github . com / yandex / ClickHouse / pull / 2958 ) <nl> + * Для выражения ` CASE ` с условиями , появилась возможность не указывать ` ELSE ` , что эквивалентно ` ELSE NULL ` [ # 2920 ] ( https : / / github . com / yandex / ClickHouse / pull / 2920 ) <nl> + * Возможность конфигурирования operation timeout при работе с ZooKeeper . urykhy [ # 2971 ] ( https : / / github . com / yandex / ClickHouse / pull / 2971 ) <nl> + * Возможность указания смещения для ` LIMIT n , m ` в виде ` LIMIT n OFFSET m ` [ # 2840 ] ( https : / / github . com / yandex / ClickHouse / pull / 2840 ) <nl> + * Возможность использования синтаксиса ` SELECT TOP n ` в качестве альтернативы для ` LIMIT ` [ # 2840 ] ( https : / / github . com / yandex / ClickHouse / pull / 2840 ) <nl> * Увеличен размер очереди записи в системные таблицы , что позволяет уменьшить количество ситуаций ` SystemLog queue is full ` . <nl> - * В агрегатной функции ` windowFunnel ` добавлена поддержка событий , подходящих под несколько условий Amos Bird # 2801 <nl> - * Возможность использования дублирующихся столбцов в секции ` USING ` для ` JOIN ` # 3006 <nl> - * Для форматов ` Pretty ` введено ограничение выравнивания столбцов по ширине . Настройка ` output_format_pretty_max_column_pad_width ` . В случае более широкого значения , оно всё ещё будет выведено целиком , но остальные ячейки таблицы не будут излишне широкими # 3003 <nl> - * В табличной функции ` odbc ` добавлена возможность указания имени базы данных / схемы Amos Bird # 2885 <nl> - * Добавлена возможность использования имени пользователя , заданного в конфигурационном файле ` clickhouse - client ` Vladimir Kozbin # 2909 <nl> + * В агрегатной функции ` windowFunnel ` добавлена поддержка событий , подходящих под несколько условий Amos Bird [ # 2801 ] ( https : / / github . com / yandex / ClickHouse / pull / 2801 ) <nl> + * Возможность использования дублирующихся столбцов в секции ` USING ` для ` JOIN ` [ # 3006 ] ( https : / / github . com / yandex / ClickHouse / pull / 3006 ) <nl> + * Для форматов ` Pretty ` введено ограничение выравнивания столбцов по ширине . Настройка ` output_format_pretty_max_column_pad_width ` . В случае более широкого значения , оно всё ещё будет выведено целиком , но остальные ячейки таблицы не будут излишне широкими [ # 3003 ] ( https : / / github . com / yandex / ClickHouse / pull / 3003 ) <nl> + * В табличной функции ` odbc ` добавлена возможность указания имени базы данных / схемы Amos Bird [ # 2885 ] ( https : / / github . com / yandex / ClickHouse / pull / 2885 ) <nl> + * Добавлена возможность использования имени пользователя , заданного в конфигурационном файле ` clickhouse - client ` Vladimir Kozbin [ # 2909 ] ( https : / / github . com / yandex / ClickHouse / pull / 2909 ) <nl> * Счётчик ` ZooKeeperExceptions ` разделён на три счётчика ` ZooKeeperUserExceptions ` , ` ZooKeeperHardwareExceptions ` , ` ZooKeeperOtherExceptions ` . <nl> * Запросы ` ALTER DELETE ` работают для материализованных представлений . <nl> * Добавлена рандомизация во времени периодического запуска cleanup thread для таблиц типа ` ReplicatedMergeTree ` , чтобы избежать периодических всплесков нагрузки в случае очень большого количества таблиц типа ` ReplicatedMergeTree ` . <nl> - * Поддержка запроса ` ATTACH TABLE . . . ON CLUSTER ` # 3025 <nl> + * Поддержка запроса ` ATTACH TABLE . . . ON CLUSTER ` [ # 3025 ] ( https : / / github . com / yandex / ClickHouse / pull / 3025 ) <nl> <nl> # # # Исправление ошибок : <nl> <nl> - * Исправлена ошибка в работе таблиц типа ` Dictionary ` ( кидается исключение ` Size of offsets doesn ' t match size of column ` или ` Unknown compression method ` ) . Ошибка появилась в версии 18 . 10 . 3 . # issue2913 <nl> - * Исправлена ошибка при мерже данных таблиц типа ` CollapsingMergeTree ` , если один из кусков данных пустой ( такие куски , в свою очередь , образуются при слиянии или при ` ALTER DELETE ` в случае удаления всех данных ) , и для слияния был выбран алгоритм ` vertical ` # 3049 <nl> - * Исправлен race condition при ` DROP ` или ` TRUNCATE ` таблиц типа ` Memory ` при одновременном ` SELECT ` , который мог приводить к падениям сервера . Ошибка появилась в версии 1 . 1 . 54388 . # 3038 <nl> - * Исправлена возможность потери данных при вставке в ` Replicated ` таблицы в случае получения ошибки ` Session expired ` ( потеря данных может быть обнаружена по метрике ` ReplicatedDataLoss ` ) . Ошибка возникла в версии 1 . 1 . 54378 . # 2939 # 2949 # 2964 <nl> - * Исправлен segfault при ` JOIN . . . ON ` # 3000 <nl> - * Исправлена ошибка поиска имён столбцов в случае , если выражение ` WHERE ` состоит целиком из квалифицированного имени столбца , как например ` WHERE table . column ` # 2994 <nl> - * Исправлена ошибка вида " Not found column " при выполнении распределённых запросов в случае , если с удалённого сервера запрашивается единственный столбец , представляющий собой выражение IN с подзапросом # 3087 <nl> - * Исправлена ошибка ` Block structure mismatch in UNION stream : different number of columns ` , возникающая при распределённых запросах , если один из шардов локальный , а другой - нет , и если при этом срабатывает оптимизация переноса в ` PREWHERE ` # 2226 # 3037 # 3055 # 3065 # 3073 # 3090 # 3093 <nl> - * Исправлена работа функции ` pointInPolygon ` для некоторого случая невыпуклых полигонов # 2910 <nl> - * Исправлен некорректный результат при сравнении ` nan ` с целыми числами # 3024 <nl> - * Исправлена ошибка в библиотеке ` zlib - ng ` , которая могла приводить к segfault в редких случаях # 2854 <nl> - * Исправлена утечка памяти при вставке в таблицу со столбцами типа ` AggregateFunction ` , если состояние агрегатной функции нетривиальное ( выделяет память отдельно ) , и если в одном запросе на вставку получается несколько маленьких блоков # 3084 <nl> + * Исправлена ошибка в работе таблиц типа ` Dictionary ` ( кидается исключение ` Size of offsets doesn ' t match size of column ` или ` Unknown compression method ` ) . Ошибка появилась в версии 18 . 10 . 3 . [ # 2913 ] ( https : / / github . com / yandex / ClickHouse / issues / 2913 ) <nl> + * Исправлена ошибка при мерже данных таблиц типа ` CollapsingMergeTree ` , если один из кусков данных пустой ( такие куски , в свою очередь , образуются при слиянии или при ` ALTER DELETE ` в случае удаления всех данных ) , и для слияния был выбран алгоритм ` vertical ` [ # 3049 ] ( https : / / github . com / yandex / ClickHouse / pull / 3049 ) <nl> + * Исправлен race condition при ` DROP ` или ` TRUNCATE ` таблиц типа ` Memory ` при одновременном ` SELECT ` , который мог приводить к падениям сервера . Ошибка появилась в версии 1 . 1 . 54388 . [ # 3038 ] ( https : / / github . com / yandex / ClickHouse / pull / 3038 ) <nl> + * Исправлена возможность потери данных при вставке в ` Replicated ` таблицы в случае получения ошибки ` Session expired ` ( потеря данных может быть обнаружена по метрике ` ReplicatedDataLoss ` ) . Ошибка возникла в версии 1 . 1 . 54378 . [ # 2939 ] ( https : / / github . com / yandex / ClickHouse / pull / 2939 ) [ # 2949 ] ( https : / / github . com / yandex / ClickHouse / pull / 2949 ) [ # 2964 ] ( https : / / github . com / yandex / ClickHouse / pull / 2964 ) <nl> + * Исправлен segfault при ` JOIN . . . ON ` [ # 3000 ] ( https : / / github . com / yandex / ClickHouse / pull / 3000 ) <nl> + * Исправлена ошибка поиска имён столбцов в случае , если выражение ` WHERE ` состоит целиком из квалифицированного имени столбца , как например ` WHERE table . column ` [ # 2994 ] ( https : / / github . com / yandex / ClickHouse / pull / 2994 ) <nl> + * Исправлена ошибка вида " Not found column " при выполнении распределённых запросов в случае , если с удалённого сервера запрашивается единственный столбец , представляющий собой выражение IN с подзапросом [ # 3087 ] ( https : / / github . com / yandex / ClickHouse / pull / 3087 ) <nl> + * Исправлена ошибка ` Block structure mismatch in UNION stream : different number of columns ` , возникающая при распределённых запросах , если один из шардов локальный , а другой - нет , и если при этом срабатывает оптимизация переноса в ` PREWHERE ` [ # 2226 ] ( https : / / github . com / yandex / ClickHouse / pull / 2226 ) [ # 3037 ] ( https : / / github . com / yandex / ClickHouse / pull / 3037 ) [ # 3055 ] ( https : / / github . com / yandex / ClickHouse / pull / 3055 ) [ # 3065 ] ( https : / / github . com / yandex / ClickHouse / pull / 3065 ) [ # 3073 ] ( https : / / github . com / yandex / ClickHouse / pull / 3073 ) [ # 3090 ] ( https : / / github . com / yandex / ClickHouse / pull / 3090 ) [ # 3093 ] ( https : / / github . com / yandex / ClickHouse / pull / 3093 ) <nl> + * Исправлена работа функции ` pointInPolygon ` для некоторого случая невыпуклых полигонов [ # 2910 ] ( https : / / github . com / yandex / ClickHouse / pull / 2910 ) <nl> + * Исправлен некорректный результат при сравнении ` nan ` с целыми числами [ # 3024 ] ( https : / / github . com / yandex / ClickHouse / pull / 3024 ) <nl> + * Исправлена ошибка в библиотеке ` zlib - ng ` , которая могла приводить к segfault в редких случаях [ # 2854 ] ( https : / / github . com / yandex / ClickHouse / pull / 2854 ) <nl> + * Исправлена утечка памяти при вставке в таблицу со столбцами типа ` AggregateFunction ` , если состояние агрегатной функции нетривиальное ( выделяет память отдельно ) , и если в одном запросе на вставку получается несколько маленьких блоков [ # 3084 ] ( https : / / github . com / yandex / ClickHouse / pull / 3084 ) <nl> * Исправлен race condition при одновременном создании и удалении одной и той же таблицы типа ` Buffer ` или ` MergeTree ` . <nl> - * Исправлена возможность segfault при сравнении кортежей из некоторых нетривиальных типов , таких как , например , кортежей # 2989 <nl> - * Исправлена возможность segfault при выполнении некоторых запросов ` ON CLUSTER ` Winter Zhang # 2960 <nl> - * Исправлена ошибка в функции ` arrayDistinct ` в случае ` Nullable ` элементов массивов # 2845 # 2937 <nl> - * Возможность ` enable_optimize_predicate_expression ` корректно поддерживает случаи с ` SELECT * ` Winter Zhang # 2929 <nl> - * Исправлена возможность segfault при переинициализации сессии с ZooKeeper # 2917 <nl> + * Исправлена возможность segfault при сравнении кортежей из некоторых нетривиальных типов , таких как , например , кортежей [ # 2989 ] ( https : / / github . com / yandex / ClickHouse / pull / 2989 ) <nl> + * Исправлена возможность segfault при выполнении некоторых запросов ` ON CLUSTER ` Winter Zhang [ # 2960 ] ( https : / / github . com / yandex / ClickHouse / pull / 2960 ) <nl> + * Исправлена ошибка в функции ` arrayDistinct ` в случае ` Nullable ` элементов массивов [ # 2845 ] ( https : / / github . com / yandex / ClickHouse / pull / 2845 ) [ # 2937 ] ( https : / / github . com / yandex / ClickHouse / pull / 2937 ) <nl> + * Возможность ` enable_optimize_predicate_expression ` корректно поддерживает случаи с ` SELECT * ` Winter Zhang [ # 2929 ] ( https : / / github . com / yandex / ClickHouse / pull / 2929 ) <nl> + * Исправлена возможность segfault при переинициализации сессии с ZooKeeper [ # 2917 ] ( https : / / github . com / yandex / ClickHouse / pull / 2917 ) <nl> * Исправлена возможность блокировки при взаимодействии с ZooKeeper . <nl> * Исправлен некорректный код суммирования вложенных структур данных в ` SummingMergeTree ` . <nl> - * При выделении памяти для состояний агрегатных функций , корректно учитывается выравнивание , что позволяет использовать при реализации состояний агрегатных функций операции , для которых выравнивание является необходимым chenxing - xc # 2808 <nl> + * При выделении памяти для состояний агрегатных функций , корректно учитывается выравнивание , что позволяет использовать при реализации состояний агрегатных функций операции , для которых выравнивание является необходимым chenxing - xc [ # 2808 ] ( https : / / github . com / yandex / ClickHouse / pull / 2808 ) <nl> <nl> # # # Исправления безопасности : <nl> <nl> - * Безопасная работа с ODBC источниками данных . Взаимодействие с ODBC драйверами выполняется через отдельный процесс ` clickhouse - odbc - bridge ` . Ошибки в сторонних ODBC драйверах теперь не приводят к проблемам со стабильностью сервера или уязвимостям # 2828 # 2879 # 2886 # 2893 # 2921 <nl> - * Исправлена некорректная валидация пути к файлу в табличной функции ` catBoostPool ` # 2894 <nl> - * Содержимое системных таблиц ( ` tables ` , ` databases ` , ` parts ` , ` columns ` , ` parts_columns ` , ` merges ` , ` mutations ` , ` replicas ` , ` replication_queue ` ) фильтруется согласно конфигурации доступа к базам данных для пользователя ( ` allow_databases ` ) Winter Zhang # 2856 <nl> + * Безопасная работа с ODBC источниками данных . Взаимодействие с ODBC драйверами выполняется через отдельный процесс ` clickhouse - odbc - bridge ` . Ошибки в сторонних ODBC драйверах теперь не приводят к проблемам со стабильностью сервера или уязвимостям [ # 2828 ] ( https : / / github . com / yandex / ClickHouse / pull / 2828 ) [ # 2879 ] ( https : / / github . com / yandex / ClickHouse / pull / 2879 ) [ # 2886 ] ( https : / / github . com / yandex / ClickHouse / pull / 2886 ) [ # 2893 ] ( https : / / github . com / yandex / ClickHouse / pull / 2893 ) [ # 2921 ] ( https : / / github . com / yandex / ClickHouse / pull / 2921 ) <nl> + * Исправлена некорректная валидация пути к файлу в табличной функции ` catBoostPool ` [ # 2894 ] ( https : / / github . com / yandex / ClickHouse / pull / 2894 ) <nl> + * Содержимое системных таблиц ( ` tables ` , ` databases ` , ` parts ` , ` columns ` , ` parts_columns ` , ` merges ` , ` mutations ` , ` replicas ` , ` replication_queue ` ) фильтруется согласно конфигурации доступа к базам данных для пользователя ( ` allow_databases ` ) Winter Zhang [ # 2856 ] ( https : / / github . com / yandex / ClickHouse / pull / 2856 ) <nl> <nl> # # # Обратно несовместимые изменения : <nl> <nl> <nl> <nl> * Добавлен покоммитный запуск большинства интеграционных тестов . <nl> * Добавлен покоммитный запуск проверки стиля кода . <nl> - * Корректный выбор реализации ` memcpy ` при сборке на CentOS7 / Fedora Etienne Champetier # 2912 <nl> - * При сборке с помощью clang добавлены некоторые warnings из ` - Weverything ` в дополнение к обычным ` - Wall - Wextra - Werror ` # 2957 <nl> + * Корректный выбор реализации ` memcpy ` при сборке на CentOS7 / Fedora Etienne Champetier [ # 2912 ] ( https : / / github . com / yandex / ClickHouse / pull / 2912 ) <nl> + * При сборке с помощью clang добавлены некоторые warnings из ` - Weverything ` в дополнение к обычным ` - Wall - Wextra - Werror ` [ # 2957 ] ( https : / / github . com / yandex / ClickHouse / pull / 2957 ) <nl> * При debug сборке используется debug вариант ` jemalloc ` . <nl> - * Абстрагирован интерфейс библиотеки для взаимодействия с ZooKeeper # 2950 <nl> + * Абстрагирован интерфейс библиотеки для взаимодействия с ZooKeeper [ # 2950 ] ( https : / / github . com / yandex / ClickHouse / pull / 2950 ) <nl> <nl> <nl> # # ClickHouse release 18 . 10 . 3 , 2018 - 08 - 13 <nl> <nl> # # # Новые возможности : <nl> - * Возможность использования HTTPS для репликации # 2760 <nl> - * Добавлены функции ` murmurHash2_64 ` , ` murmurHash3_32 ` , ` murmurHash3_64 ` , ` murmurHash3_128 ` в дополнение к имеющемуся ` murmurHash2_32 ` # 2791 <nl> - * Поддержка Nullable типов в ODBC драйвере ClickHouse ( формат вывода ` ODBCDriver2 ` ) # 2834 <nl> + * Возможность использования HTTPS для репликации [ # 2760 ] ( https : / / github . com / yandex / ClickHouse / pull / 2760 ) <nl> + * Добавлены функции ` murmurHash2_64 ` , ` murmurHash3_32 ` , ` murmurHash3_64 ` , ` murmurHash3_128 ` в дополнение к имеющемуся ` murmurHash2_32 ` [ # 2791 ] ( https : / / github . com / yandex / ClickHouse / pull / 2791 ) <nl> + * Поддержка Nullable типов в ODBC драйвере ClickHouse ( формат вывода ` ODBCDriver2 ` ) [ # 2834 ] ( https : / / github . com / yandex / ClickHouse / pull / 2834 ) <nl> * Поддержка ` UUID ` в ключевых столбцах . <nl> <nl> # # # Улучшения : <nl> - * Удаление кластеров без перезагрузки сервера при их удалении из конфигурационных файлов . # 2777 <nl> - * Удаление внешних словарей без перезагрузки сервера при их удалении из конфигурационных файлов . # 2779 <nl> - * Добавлена поддержка ` SETTINGS ` для движка таблиц ` Kafka ` Alexander Marshalov # 2781 <nl> - * Доработки для типа данных ` UUID ` ( не полностью ) Šimon Podlipský # 2618 <nl> - * Поддежка пустых кусков после мержей в движках ` SummingMergeTree ` , ` CollapsingMergeTree ` and ` VersionedCollapsingMergeTree ` # 2815 <nl> - * Удаление старых записей о полностью выполнившихся мутациях ( ` ALTER DELETE ` ) # 2784 <nl> - * Добавлена таблица ` system . merge_tree_settings ` Kirill Shvakov # 2841 <nl> - * В таблицу ` system . tables ` добавлены столбцы зависимостей : ` dependencies_database ` и ` dependencies_table ` Winter Zhang # 2851 <nl> - * Добавлена опция конфига ` max_partition_size_to_drop ` # 2782 <nl> - * Добавлена настройка ` output_format_json_escape_forward_slashes ` Alexander Bocharov # 2812 <nl> - * Добавлена настройка ` max_fetch_partition_retries_count ` # 2831 <nl> - * Добавлена настройка ` prefer_localhost_replica ` , позволяющая отключить предпочтение локальной реплики и хождение на локальную реплику без межпроцессного взаимодействия # 2832 <nl> - * Агрегатная функция ` quantileExact ` возвращает ` nan ` в случае агрегации по пустому множеству ` Float32 ` / ` Float64 ` типов Sundy Li # 2855 <nl> + * Удаление кластеров без перезагрузки сервера при их удалении из конфигурационных файлов . [ # 2777 ] ( https : / / github . com / yandex / ClickHouse / pull / 2777 ) <nl> + * Удаление внешних словарей без перезагрузки сервера при их удалении из конфигурационных файлов . [ # 2779 ] ( https : / / github . com / yandex / ClickHouse / pull / 2779 ) <nl> + * Добавлена поддержка ` SETTINGS ` для движка таблиц ` Kafka ` Alexander Marshalov [ # 2781 ] ( https : / / github . com / yandex / ClickHouse / pull / 2781 ) <nl> + * Доработки для типа данных ` UUID ` ( не полностью ) Šimon Podlipský [ # 2618 ] ( https : / / github . com / yandex / ClickHouse / pull / 2618 ) <nl> + * Поддежка пустых кусков после мержей в движках ` SummingMergeTree ` , ` CollapsingMergeTree ` and ` VersionedCollapsingMergeTree ` [ # 2815 ] ( https : / / github . com / yandex / ClickHouse / pull / 2815 ) <nl> + * Удаление старых записей о полностью выполнившихся мутациях ( ` ALTER DELETE ` ) [ # 2784 ] ( https : / / github . com / yandex / ClickHouse / pull / 2784 ) <nl> + * Добавлена таблица ` system . merge_tree_settings ` Kirill Shvakov [ # 2841 ] ( https : / / github . com / yandex / ClickHouse / pull / 2841 ) <nl> + * В таблицу ` system . tables ` добавлены столбцы зависимостей : ` dependencies_database ` и ` dependencies_table ` Winter Zhang [ # 2851 ] ( https : / / github . com / yandex / ClickHouse / pull / 2851 ) <nl> + * Добавлена опция конфига ` max_partition_size_to_drop ` [ # 2782 ] ( https : / / github . com / yandex / ClickHouse / pull / 2782 ) <nl> + * Добавлена настройка ` output_format_json_escape_forward_slashes ` Alexander Bocharov [ # 2812 ] ( https : / / github . com / yandex / ClickHouse / pull / 2812 ) <nl> + * Добавлена настройка ` max_fetch_partition_retries_count ` [ # 2831 ] ( https : / / github . com / yandex / ClickHouse / pull / 2831 ) <nl> + * Добавлена настройка ` prefer_localhost_replica ` , позволяющая отключить предпочтение локальной реплики и хождение на локальную реплику без межпроцессного взаимодействия [ # 2832 ] ( https : / / github . com / yandex / ClickHouse / pull / 2832 ) <nl> + * Агрегатная функция ` quantileExact ` возвращает ` nan ` в случае агрегации по пустому множеству ` Float32 ` / ` Float64 ` типов Sundy Li [ # 2855 ] ( https : / / github . com / yandex / ClickHouse / pull / 2855 ) <nl> <nl> # # # Исправление ошибок : <nl> * Убрано излишнее экранирование параметров connection string для ODBC , котрое приводило к невозможности соединения . Ошибка возникла в версии 18 . 6 . 0 . <nl> - * Исправлена логика обработки команд на ` REPLACE PARTITION ` в очереди репликации . Неправильная логика могла приводить к тому , что при наличии двух ` REPLACE ` одной и той же партиции , один из них оставался в очереди репликации и не мог выполниться . # 2814 <nl> - * Исправлена ошибка при мерже , если все куски были пустыми ( такие куски , в свою очередь , образуются при слиянии или при ` ALTER DELETE ` в случае удаления всех данных ) . Ошибка появилась в версии 18 . 1 . 0 . # 2930 <nl> - * Исправлена ошибка при параллельной записи в таблицы типа ` Set ` или ` Join ` Amos Bird # 2823 <nl> - * Исправлена ошибка ` Block structure mismatch in UNION stream : different number of columns ` , возникающая при запросах с ` UNION ALL ` внутри подзапроса , в случае , если один из ` SELECT ` запросов содержит дублирующиеся имена столбцов Winter Zhang # 2094 <nl> + * Исправлена логика обработки команд на ` REPLACE PARTITION ` в очереди репликации . Неправильная логика могла приводить к тому , что при наличии двух ` REPLACE ` одной и той же партиции , один из них оставался в очереди репликации и не мог выполниться . [ # 2814 ] ( https : / / github . com / yandex / ClickHouse / pull / 2814 ) <nl> + * Исправлена ошибка при мерже , если все куски были пустыми ( такие куски , в свою очередь , образуются при слиянии или при ` ALTER DELETE ` в случае удаления всех данных ) . Ошибка появилась в версии 18 . 1 . 0 . [ # 2930 ] ( https : / / github . com / yandex / ClickHouse / pull / 2930 ) <nl> + * Исправлена ошибка при параллельной записи в таблицы типа ` Set ` или ` Join ` Amos Bird [ # 2823 ] ( https : / / github . com / yandex / ClickHouse / pull / 2823 ) <nl> + * Исправлена ошибка ` Block structure mismatch in UNION stream : different number of columns ` , возникающая при запросах с ` UNION ALL ` внутри подзапроса , в случае , если один из ` SELECT ` запросов содержит дублирующиеся имена столбцов Winter Zhang [ # 2094 ] ( https : / / github . com / yandex / ClickHouse / pull / 2094 ) <nl> * Исправлена утечка памяти в случае исключения при соединении с MySQL сервером . <nl> * Исправлен некорректный код возврата clickhouse - client в случае ошибочного запроса <nl> - * Исправлен некорректная работа materialized views , содержащих DISTINCT # issue2795 <nl> + * Исправлен некорректная работа materialized views , содержащих DISTINCT [ # 2795 ] ( https : / / github . com / yandex / ClickHouse / issues / 2795 ) <nl> <nl> # # # Обратно несовместимые изменения <nl> * Убрана поддержка запросов CHECK TABLE для Distributed таблиц . <nl> <nl> # # # Изменения сборки : <nl> - * Заменен аллокатор , теперь используется ` jemalloc ` вместо ` tcmalloc ` . На некоторых сценариях ускорение достигает 20 % . В то же время , существуют запросы , замедлившиеся до 20 % . Потребление памяти на некоторых сценариях примерно на 10 % меньше и более стабильно . При высококонкурентной нагрузке , потребление CPU в userspace и в system незначительно вырастает . # 2773 <nl> - * Использование libressl из submodule # 1983 # 2807 <nl> - * Использование unixodbc из submodule # 2789 <nl> - * Использование mariadb - connector - c из submodule # 2785 <nl> + * Заменен аллокатор , теперь используется ` jemalloc ` вместо ` tcmalloc ` . На некоторых сценариях ускорение достигает 20 % . В то же время , существуют запросы , замедлившиеся до 20 % . Потребление памяти на некоторых сценариях примерно на 10 % меньше и более стабильно . При высококонкурентной нагрузке , потребление CPU в userspace и в system незначительно вырастает . [ # 2773 ] ( https : / / github . com / yandex / ClickHouse / pull / 2773 ) <nl> + * Использование libressl из submodule [ # 1983 ] ( https : / / github . com / yandex / ClickHouse / pull / 1983 ) [ # 2807 ] ( https : / / github . com / yandex / ClickHouse / pull / 2807 ) <nl> + * Использование unixodbc из submodule [ # 2789 ] ( https : / / github . com / yandex / ClickHouse / pull / 2789 ) <nl> + * Использование mariadb - connector - c из submodule [ # 2785 ] ( https : / / github . com / yandex / ClickHouse / pull / 2785 ) <nl> * В репозиторий добавлены файлы функциональных тестов , рассчитывающих на наличие тестовых данных ( пока без самих тестовых данных ) . <nl> <nl> <nl>
|
Added changelog for 18 . 12 . 13 ( in progress ) [ # CLICKHOUSE - 3939 ]
|
ClickHouse/ClickHouse
|
87275fe6368b155804fe93556c819918e760c4b6
|
2018-09-11T21:27:04Z
|
mmm a / proton <nl> ppp b / proton <nl> import json <nl> import os <nl> import shutil <nl> import errno <nl> - import struct <nl> import subprocess <nl> import sys <nl> import tarfile <nl> class Proton : <nl> if not os . path . isdir ( self . default_pfx_dir ) : <nl> # make default prefix <nl> local_env [ " WINEPREFIX " ] = self . default_pfx_dir <nl> - g_session . run_proc ( [ self . wine , " wineboot " ] , local_env ) <nl> - g_session . run_proc ( [ self . wineserver , " - w " ] , local_env ) <nl> + g_session . run_proc ( [ self . wine_bin , " wineboot " ] , local_env ) <nl> + g_session . run_proc ( [ self . wineserver_bin , " - w " ] , local_env ) <nl> <nl> class CompatData : <nl> def __init__ ( self , compatdata ) : <nl> class Session : <nl> os . chmod ( tmpdir + " run " , 0o755 ) <nl> <nl> def run_proc ( self , args , local_env = None ) : <nl> - if local_env = = None : <nl> + if local_env is None : <nl> local_env = self . env <nl> subprocess . call ( args , env = local_env , stderr = self . log_file , stdout = self . log_file ) <nl> <nl> if __name__ = = " __main__ " : <nl> <nl> sys . exit ( 0 ) <nl> <nl> + # pylint - - disable = C0301 , C0326 , C0330 , C0111 , C0103 , R0902 , C1801 , R0914 , R0912 , R0915 <nl> # vim : set syntax = python : <nl>
|
proton : pylint fixes
|
ValveSoftware/Proton
|
685f1194880ff434820f3c2420647e5c1b2028df
|
2019-08-01T19:30:26Z
|
mmm a / src / brpc / input_messenger . cpp <nl> ppp b / src / brpc / input_messenger . cpp <nl> ParseResult InputMessenger : : CutInputMessage ( <nl> / / The protocol is fixed at client - side , no need to try others . <nl> LOG ( ERROR ) < < " Fail to parse response from " < < m - > remote_side ( ) <nl> < < " by " < < _handlers [ preferred ] . name <nl> - < < " ( client ' s protocol ) " ; <nl> + < < " at client - side " ; <nl> return MakeParseError ( PARSE_ERROR_ABSOLUTELY_WRONG ) ; <nl> } <nl> / / Clear context before trying next protocol which probably has <nl>
|
rephrase a log
|
apache/incubator-brpc
|
5e7a14778f476ae8840e40d21368fd11fa1d0f71
|
2017-11-16T09:59:33Z
|
mmm a / include / swift / AST / ASTContext . h <nl> ppp b / include / swift / AST / ASTContext . h <nl> class ASTContext final { <nl> CanGenericSignature getExistentialSignature ( CanType existential , <nl> ModuleDecl * mod ) ; <nl> <nl> - / / / Whether our effective Swift version is in the Swift 3 family . <nl> - bool isSwiftVersion3 ( ) const { return LangOpts . isSwiftVersion3 ( ) ; } <nl> - <nl> / / / Whether our effective Swift version is at least ' major ' . <nl> / / / <nl> / / / This is usually the check you want ; for example , when introducing <nl> mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> NOTE ( declared_protocol_conformance_here , none , <nl> " % 0 implicitly conforms to protocol % 2 } 1 here " , <nl> ( Type , unsigned , DeclName , DeclName ) ) <nl> <nl> - WARNING ( witness_unavailable_warn , none , <nl> - " unavailable % 0 % 1 was used to satisfy a requirement of protocol % 2 " , <nl> - ( DescriptiveDeclKind , DeclName , DeclName ) ) <nl> ERROR ( witness_unavailable , none , <nl> " unavailable % 0 % 1 was used to satisfy a requirement of protocol % 2 " , <nl> ( DescriptiveDeclKind , DeclName , DeclName ) ) <nl> mmm a / include / swift / Basic / LangOptions . h <nl> ppp b / include / swift / Basic / LangOptions . h <nl> namespace swift { <nl> return CustomConditionalCompilationFlags ; <nl> } <nl> <nl> - / / / Whether our effective Swift version is in the Swift 3 family <nl> - bool isSwiftVersion3 ( ) const { <nl> - return EffectiveLanguageVersion . isVersion3 ( ) ; <nl> - } <nl> - <nl> / / / Whether our effective Swift version is at least ' major ' . <nl> / / / <nl> / / / This is usually the check you want ; for example , when introducing <nl> mmm a / include / swift / Basic / Version . h <nl> ppp b / include / swift / Basic / Version . h <nl> class Version { <nl> / / / compiler to act as if it is version 3 . 1 . <nl> Optional < Version > getEffectiveLanguageVersion ( ) const ; <nl> <nl> - / / / Whether this version is in the Swift 3 family <nl> - bool isVersion3 ( ) const { return ! empty ( ) & & Components [ 0 ] = = 3 ; } <nl> - <nl> / / / Whether this version is greater than or equal to the given major version <nl> / / / number . <nl> bool isVersionAtLeast ( unsigned major , unsigned minor = 0 ) const { <nl> mmm a / lib / ClangImporter / ClangAdapter . cpp <nl> ppp b / lib / ClangImporter / ClangAdapter . cpp <nl> OptionalTypeKind importer : : getParamOptionality ( version : : Version swiftVersion , <nl> return OTK_None ; <nl> <nl> / / Check for the ' static ' annotation on C arrays . <nl> - if ( ! swiftVersion . isVersion3 ( ) ) <nl> - if ( const auto * DT = dyn_cast < clang : : DecayedType > ( paramTy ) ) <nl> - if ( const auto * AT = DT - > getOriginalType ( ) - > getAsArrayTypeUnsafe ( ) ) <nl> - if ( AT - > getSizeModifier ( ) = = clang : : ArrayType : : Static ) <nl> - return OTK_None ; <nl> + if ( const auto * DT = dyn_cast < clang : : DecayedType > ( paramTy ) ) <nl> + if ( const auto * AT = DT - > getOriginalType ( ) - > getAsArrayTypeUnsafe ( ) ) <nl> + if ( AT - > getSizeModifier ( ) = = clang : : ArrayType : : Static ) <nl> + return OTK_None ; <nl> <nl> / / Default to implicitly unwrapped optionals . <nl> return OTK_ImplicitlyUnwrappedOptional ; <nl> mmm a / lib / Frontend / CompilerInvocation . cpp <nl> ppp b / lib / Frontend / CompilerInvocation . cpp <nl> static bool ParseLangArgs ( LangOptions & Opts , ArgList & Args , <nl> <nl> Opts . EnableSwift3ObjCInference = <nl> Args . hasFlag ( OPT_enable_swift3_objc_inference , <nl> - OPT_disable_swift3_objc_inference , <nl> - Opts . isSwiftVersion3 ( ) ) ; <nl> + OPT_disable_swift3_objc_inference , false ) ; <nl> <nl> if ( Opts . EnableSwift3ObjCInference ) { <nl> if ( const Arg * A = Args . getLastArg ( <nl> mmm a / lib / Migrator / Migrator . cpp <nl> ppp b / lib / Migrator / Migrator . cpp <nl> Migrator : : performAFixItMigration ( version : : Version SwiftLanguageVersion ) { <nl> LLVMArgs . erase ( aarch64_use_tbi ) ; <nl> } <nl> <nl> - if ( StartInvocation . getLangOptions ( ) . EffectiveLanguageVersion . isVersion3 ( ) ) { <nl> - / / SE - 0160 : When migrating , always use the Swift 3 @ objc inference rules , <nl> - / / which drives warnings with the " @ objc " Fix - Its . <nl> - Invocation . getLangOptions ( ) . EnableSwift3ObjCInference = true ; <nl> - <nl> - / / The default behavior of the migrator , referred to as " minimal " migration <nl> - / / in SE - 0160 , only adds @ objc Fix - Its to those cases where the Objective - C <nl> - / / entry point is explicitly used somewhere in the source code . The user <nl> - / / may also select a workflow that adds @ objc for every declaration that <nl> - / / would infer @ objc under the Swift 3 rules but would no longer infer <nl> - / / @ objc in Swift 4 . <nl> - Invocation . getLangOptions ( ) . WarnSwift3ObjCInference = <nl> - getMigratorOptions ( ) . KeepObjcVisibility <nl> - ? Swift3ObjCInferenceWarnings : : Complete <nl> - : Swift3ObjCInferenceWarnings : : Minimal ; <nl> - } <nl> - <nl> const auto & OrigFrontendOpts = StartInvocation . getFrontendOptions ( ) ; <nl> <nl> assert ( OrigFrontendOpts . InputsAndOutputs . hasPrimaryInputs ( ) & & <nl> mmm a / lib / SIL / SILDeclRef . cpp <nl> ppp b / lib / SIL / SILDeclRef . cpp <nl> SubclassScope SILDeclRef : : getSubclassScope ( ) const { <nl> if ( isThunk ( ) | | isForeign ) <nl> return SubclassScope : : NotApplicable ; <nl> <nl> - / / Default arg generators only need to be visible in Swift 3 . <nl> - if ( isDefaultArgGenerator ( ) & & ! context - > getASTContext ( ) . isSwiftVersion3 ( ) ) <nl> + / / Default arg generators are not visible . <nl> + if ( isDefaultArgGenerator ( ) ) <nl> return SubclassScope : : NotApplicable ; <nl> <nl> auto * classType = context - > getSelfClassDecl ( ) ; <nl> mmm a / lib / SILGen / SILGenBridging . cpp <nl> ppp b / lib / SILGen / SILGenBridging . cpp <nl> void SILGenFunction : : emitNativeToForeignThunk ( SILDeclRef thunk ) { <nl> } <nl> <nl> if ( auto attr = decl - > getAttrs ( ) . getAttribute < ObjCAttr > ( ) ) { <nl> - / / If @ objc was inferred based on the Swift 3 @ objc inference rules , but <nl> - / / we aren ' t compiling in Swift 3 compatibility mode , emit a call to <nl> - / / Builtin . swift3ImplicitObjCEntrypoint ( ) to enable runtime logging of <nl> - / / the uses of such entrypoints . <nl> - if ( attr - > isSwift3Inferred ( ) & & <nl> - ! decl - > isDynamic ( ) & & <nl> - ! getASTContext ( ) . LangOpts . isSwiftVersion3 ( ) ) { <nl> - <nl> + / / If @ objc was inferred based on the Swift 3 @ objc inference rules , emit <nl> + / / a call to Builtin . swift3ImplicitObjCEntrypoint ( ) to enable runtime <nl> + / / logging of the uses of such entrypoints . <nl> + if ( attr - > isSwift3Inferred ( ) & & ! decl - > isDynamic ( ) ) { <nl> / / Get the starting source location of the declaration so we can say <nl> / / exactly where to stick ' @ objc ' . <nl> SourceLoc objcInsertionLoc = <nl> mmm a / lib / Sema / TypeCheckProtocol . cpp <nl> ppp b / lib / Sema / TypeCheckProtocol . cpp <nl> ConformanceChecker : : resolveWitnessViaLookup ( ValueDecl * requirement ) { <nl> <nl> break ; <nl> <nl> - case CheckKind : : WitnessUnavailable : { <nl> - bool emitError = ! witness - > getASTContext ( ) . LangOpts . isSwiftVersion3 ( ) ; <nl> - diagnoseOrDefer ( requirement , / * isError = * / emitError , <nl> - [ witness , requirement , emitError ] ( <nl> + case CheckKind : : WitnessUnavailable : <nl> + diagnoseOrDefer ( requirement , / * isError = * / true , <nl> + [ witness , requirement ] ( <nl> NormalProtocolConformance * conformance ) { <nl> auto & diags = witness - > getASTContext ( ) . Diags ; <nl> SourceLoc diagLoc = getLocForDiagnosingWitness ( conformance , witness ) ; <nl> diags . diagnose ( diagLoc , <nl> - emitError ? diag : : witness_unavailable <nl> - : diag : : witness_unavailable_warn , <nl> + diag : : witness_unavailable , <nl> witness - > getDescriptiveKind ( ) , <nl> witness - > getFullName ( ) , <nl> conformance - > getProtocol ( ) - > getFullName ( ) ) ; <nl> ConformanceChecker : : resolveWitnessViaLookup ( ValueDecl * requirement ) { <nl> } ) ; <nl> break ; <nl> } <nl> - } <nl> <nl> if ( auto * classDecl = Adoptee - > getClassOrBoundGenericClass ( ) ) { <nl> if ( ! classDecl - > isFinal ( ) ) { <nl> mmm a / lib / Sema / TypeCheckStmt . cpp <nl> ppp b / lib / Sema / TypeCheckStmt . cpp <nl> void TypeChecker : : checkDefaultArguments ( ParameterList * params , <nl> / / caller . <nl> if ( auto * func = dyn_cast < AbstractFunctionDecl > ( VD ) ) { <nl> auto expansion = func - > getResilienceExpansion ( ) ; <nl> - if ( ! Context . isSwiftVersion3 ( ) & & access . isPublic ( ) ) <nl> + if ( access . isPublic ( ) ) <nl> expansion = ResilienceExpansion : : Minimal ; <nl> <nl> func - > setDefaultArgumentResilienceExpansion ( expansion ) ; <nl> mmm a / lib / TBDGen / TBDGen . cpp <nl> ppp b / lib / TBDGen / TBDGen . cpp <nl> void TBDGenVisitor : : visitAbstractFunctionDecl ( AbstractFunctionDecl * AFD ) { <nl> addSymbol ( SILDeclRef ( AFD ) . asForeign ( ) ) ; <nl> } <nl> <nl> - auto publicDefaultArgGenerators = <nl> - SwiftModule - > getASTContext ( ) . isSwiftVersion3 ( ) | | <nl> - SwiftModule - > isTestingEnabled ( ) ; <nl> + auto publicDefaultArgGenerators = SwiftModule - > isTestingEnabled ( ) ; <nl> if ( ! publicDefaultArgGenerators ) <nl> return ; <nl> <nl>
|
Merge pull request from slavapestov / remove - swift3
|
apple/swift
|
1e7e73349d38d063a4de137c94ebb79f9504ae12
|
2018-10-31T00:51:00Z
|
mmm a / hphp / runtime / base / zend / zend_pack . cpp <nl> ppp b / hphp / runtime / base / zend / zend_pack . cpp <nl> Variant ZendPack : : unpack ( CStrRef fmt , CStrRef data ) { <nl> } <nl> <nl> v | = unpack ( & input [ inputpos ] , sizeof ( int ) , issigned , int_map ) ; <nl> - ret . set ( String ( n , CopyString ) , v ) ; <nl> + if ( type = = ' i ' ) { <nl> + ret . set ( String ( n , CopyString ) , v ) ; <nl> + } else { <nl> + uint64_t u64 = uint32_t ( v ) ; <nl> + ret . set ( String ( n , CopyString ) , u64 ) ; <nl> + } <nl> break ; <nl> } <nl> <nl> mmm a / hphp / test / test_ext_misc . cpp <nl> ppp b / hphp / test / test_ext_misc . cpp <nl> <nl> <nl> # include < test / test_ext_misc . h > <nl> # include < runtime / ext / ext_misc . h > <nl> + # include < runtime / ext / ext_string . h > <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> bool TestExtMisc : : test_uniqid ( ) { <nl> return Count ( true ) ; <nl> } <nl> <nl> + # define VUNPACK ( fmt , inp , exp ) \ <nl> + { Array __a = f_unpack ( fmt , inp ) ; \ <nl> + VS ( __a . exists ( 1 ) , true ) ; \ <nl> + VS ( __a [ 1 ] , ( int64_t ) exp ) ; } <nl> + <nl> bool TestExtMisc : : test_unpack ( ) { <nl> - / / covered in TestCodeRun : : TestExtMisc <nl> + / / Also covered in TestCodeRun : : TestExtMisc <nl> + <nl> + String iFF = f_str_repeat ( " \ xFF " , sizeof ( int ) ) ; <nl> + String le32_FF ( " \ xFF \ x00 \ x00 \ x00 " , 4 , AttachLiteral ) ; <nl> + String be32_FF ( " \ x00 \ x00 \ x00 \ xFF " , 4 , AttachLiteral ) ; <nl> + String le16_FF ( " \ xFF \ x00 " , 2 , AttachLiteral ) ; <nl> + String be16_FF ( " \ x00 \ xFF " , 2 , AttachLiteral ) ; <nl> + <nl> + uint32_t endian_check = 1 ; <nl> + bool le = ( ( char * ) & endian_check ) [ 0 ] ; <nl> + <nl> + / / HPHP , unlike PHP , truncates overflowing ints <nl> + if ( sizeof ( int ) = = 8 ) { <nl> + VUNPACK ( " I " , iFF , 0x7FFFFFFFFFFFFFFF ) ; <nl> + } else if ( sizeof ( int ) = = 4 ) { <nl> + VUNPACK ( " I " , iFF , 0xFFFFFFFF ) ; <nl> + } else { <nl> + / / Panic <nl> + VS ( true , false ) ; <nl> + } <nl> + <nl> + VUNPACK ( " i " , iFF , - 1 ) ; <nl> + <nl> + / / LlNV test 32 - bit ints specifically <nl> + VUNPACK ( " L " , iFF , 0xFFFFFFFF ) ; <nl> + VUNPACK ( " l " , iFF , - 1 ) ; <nl> + <nl> + VUNPACK ( " N " , be32_FF , 0xFF ) ; <nl> + VUNPACK ( " V " , le32_FF , 0xFF ) ; <nl> + VUNPACK ( " V " , be32_FF , 0xFF000000 ) ; <nl> + <nl> + VUNPACK ( " L " , le ? le32_FF : be32_FF , 0xFF ) ; <nl> + <nl> + / / Ssnv test 16 - bit shorts <nl> + VUNPACK ( " S " , iFF , 0xFFFF ) ; <nl> + VUNPACK ( " s " , iFF , - 1 ) ; <nl> + <nl> + VUNPACK ( " n " , be16_FF , 0xFF ) ; <nl> + VUNPACK ( " v " , le16_FF , 0xFF ) ; <nl> + VUNPACK ( " v " , be16_FF , 0xFF00 ) ; <nl> + <nl> + VUNPACK ( " S " , le ? le16_FF : be16_FF , 0xFF ) ; <nl> + <nl> return Count ( true ) ; <nl> } <nl> <nl>
|
Cast result of unpack to uint64_t when requesting unsigned type ' I '
|
facebook/hhvm
|
b8868a1c40a0369c08b6e1f2f9276c4acacd8052
|
2013-03-28T00:39:44Z
|
old mode 100644 <nl> new mode 100755 <nl> index 3c7b5b7af . . 7297cb33f <nl> mmm a / trunk / auto / auto_headers . sh <nl> ppp b / trunk / auto / auto_headers . sh <nl> echo " # define SRS_AUTO_USER_CONFIGURE \ " $ { SRS_AUTO_USER_CONFIGURE } \ " " > > $ SRS_AU <nl> echo " # define SRS_AUTO_CONFIGURE \ " $ { SRS_AUTO_CONFIGURE } \ " " > > $ SRS_AUTO_HEADERS_H <nl> echo " " > > $ SRS_AUTO_HEADERS_H <nl> <nl> + function srs_define_macro ( ) <nl> + { <nl> + macro = $ 1 & & file = $ 2 <nl> + echo " # define $ macro " > > $ file <nl> + echo " # define $ { macro } _BOOL true " > > $ file <nl> + } <nl> + <nl> + function srs_define_macro_value ( ) <nl> + { <nl> + macro = $ 1 & & value = $ 2 & & file = $ 3 <nl> + echo " # define $ macro $ value " > > $ file <nl> + echo " # define $ { macro } _BOOL true " > > $ file <nl> + } <nl> + <nl> + function srs_undefine_macro ( ) <nl> + { <nl> + macro = $ 1 & & file = $ 2 <nl> + echo " # undef $ macro " > > $ file <nl> + echo " # define $ { macro } _BOOL false " > > $ file <nl> + } <nl> + <nl> # export the preset . <nl> if [ $ SRS_OSX = YES ] ; then <nl> - echo " # define SRS_OSX " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_OSX " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_X86_X64 = YES ] ; then <nl> - echo " # define SRS_X86_X64 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_X86_X64 " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_ARM_UBUNTU12 = YES ] ; then <nl> - echo " # define SRS_ARM_UBUNTU12 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_ARM_UBUNTU12 " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_MIPS_UBUNTU12 = YES ] ; then <nl> - echo " # define SRS_MIPS_UBUNTU12 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_MIPS_UBUNTU12 " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_PI = YES ] ; then <nl> - echo " # define SRS_PI " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_PI " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_CUBIE = YES ] ; then <nl> - echo " # define SRS_CUBIE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_CUBIE " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> echo " " > > $ SRS_AUTO_HEADERS_H <nl> echo " " > > $ SRS_AUTO_HEADERS_H <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # write to source file <nl> if [ $ SRS_CROSS_BUILD = YES ] ; then <nl> - echo " cc = $ SrsArmCC gcc = $ SrsArmGCC g + + = $ SrsArmCXX ar = $ SrsArmAR ld = $ SrsArmLD randlib = $ SrsArmRANDLIB " <nl> - echo " # define SRS_AUTO_EMBEDED_TOOL_CHAIN \ " cc = $ SrsArmCC gcc = $ SrsArmGCC g + + = $ SrsArmCXX ar = $ SrsArmAR ld = $ SrsArmLD randlib = $ SrsArmRANDLIB \ " " > > $ SRS_AUTO_HEADERS_H <nl> + __TOOL_CHAIN = " cc = $ SrsArmCC gcc = $ SrsArmGCC g + + = $ SrsArmCXX ar = $ SrsArmAR ld = $ SrsArmLD randlib = $ SrsArmRANDLIB " & & echo " $ __TOOL_CHAIN " <nl> + srs_define_macro_value " SRS_AUTO_EMBEDED_TOOL_CHAIN " " \ " $ __TOOL_CHAIN \ " " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # define SRS_AUTO_EMBEDED_TOOL_CHAIN \ " normal x86 / x64 gcc \ " " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_EMBEDED_TOOL_CHAIN " $ SRS_AUTO_HEADERS_H <nl> fi <nl> echo " " > > $ SRS_AUTO_HEADERS_H <nl> <nl> # auto headers in depends . <nl> if [ $ SRS_HTTP_CORE = YES ] ; then <nl> - echo " # define SRS_AUTO_HTTP_CORE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_HTTP_CORE " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_HTTP_CORE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_HTTP_CORE " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_HTTP_SERVER = YES ] ; then <nl> - echo " # define SRS_AUTO_HTTP_SERVER " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_HTTP_SERVER " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_HTTP_SERVER " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_HTTP_SERVER " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_STREAM_CASTER = YES ] ; then <nl> - echo " # define SRS_AUTO_STREAM_CASTER " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_STREAM_CASTER " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_STREAM_CASTER " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_STREAM_CASTER " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_KAFKA = YES ] ; then <nl> - echo " # define SRS_AUTO_KAFKA " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_KAFKA " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_KAFKA " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_KAFKA " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_HTTP_API = YES ] ; then <nl> - echo " # define SRS_AUTO_HTTP_API " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_HTTP_API " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_HTTP_API " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_HTTP_API " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_NGINX = YES ] ; then <nl> - echo " # define SRS_AUTO_NGINX " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_NGINX " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_NGINX " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_NGINX " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_DVR = YES ] ; then <nl> - echo " # define SRS_AUTO_DVR " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_DVR " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_DVR " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_DVR " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_HLS = YES ] ; then <nl> - echo " # define SRS_AUTO_HLS " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_HLS " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_HLS " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_HLS " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_HDS = YES ] ; then <nl> - echo " # define SRS_AUTO_HDS " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_HDS " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_HDS " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_HDS " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_HTTP_CALLBACK = YES ] ; then <nl> - echo " # define SRS_AUTO_HTTP_CALLBACK " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_HTTP_CALLBACK " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_HTTP_CALLBACK " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_HTTP_CALLBACK " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_SSL = YES ] ; then <nl> - echo " # define SRS_AUTO_SSL " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_SSL " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_SSL " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_SSL " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_MEM_WATCH = YES ] ; then <nl> - echo " # define SRS_AUTO_MEM_WATCH " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_MEM_WATCH " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_MEM_WATCH " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_MEM_WATCH " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # whether compile ffmpeg tool <nl> if [ $ SRS_FFMPEG_TOOL = YES ] ; then <nl> - echo " # define SRS_AUTO_FFMPEG_TOOL " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_FFMPEG_TOOL " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_FFMPEG_TOOL " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_FFMPEG_TOOL " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # whatever the FFMPEG tools , if transcode and ingest specified , <nl> # srs always compile the FFMPEG tool stub which used to start the FFMPEG process . <nl> if [ $ SRS_FFMPEG_STUB = YES ] ; then <nl> - echo " # define SRS_AUTO_FFMPEG_STUB " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_FFMPEG_STUB " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_FFMPEG_STUB " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_FFMPEG_STUB " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_TRANSCODE = YES ] ; then <nl> - echo " # define SRS_AUTO_TRANSCODE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_TRANSCODE " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_TRANSCODE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_TRANSCODE " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_INGEST = YES ] ; then <nl> - echo " # define SRS_AUTO_INGEST " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_INGEST " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_INGEST " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_INGEST " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # for statistic . <nl> if [ $ SRS_STAT = YES ] ; then <nl> - echo " # define SRS_AUTO_STAT " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_STAT " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_STAT " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_STAT " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> if [ $ SRS_GPERF = YES ] ; then <nl> - echo " # define SRS_AUTO_GPERF " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_GPERF " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_GPERF " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_GPERF " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_GPERF_MC = YES ] ; then <nl> - echo " # define SRS_AUTO_GPERF_MC " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_GPERF_MC " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_GPERF_MC " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_GPERF_MC " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_GPERF_MP = YES ] ; then <nl> - echo " # define SRS_AUTO_GPERF_MP " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_GPERF_MP " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_GPERF_MP " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_GPERF_MP " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_GPERF_CP = YES ] ; then <nl> - echo " # define SRS_AUTO_GPERF_CP " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_GPERF_CP " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_GPERF_CP " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_GPERF_CP " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # for embeded . <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> if [ $ SRS_CROSS_BUILD = YES ] ; then <nl> - echo " # define SRS_AUTO_EMBEDED_CPU " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_EMBEDED_CPU " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_EMBEDED_CPU " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_EMBEDED_CPU " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # arm <nl> if [ $ SRS_ARM_UBUNTU12 = YES ] ; then <nl> - echo " # define SRS_AUTO_ARM_UBUNTU12 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_ARM_UBUNTU12 " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_ARM_UBUNTU12 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_ARM_UBUNTU12 " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # mips <nl> if [ $ SRS_MIPS_UBUNTU12 = YES ] ; then <nl> - echo " # define SRS_AUTO_MIPS_UBUNTU12 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_MIPS_UBUNTU12 " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_MIPS_UBUNTU12 " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_MIPS_UBUNTU12 " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> echo " " > > $ SRS_AUTO_HEADERS_H <nl> # for log level compile settings <nl> if [ $ SRS_LOG_VERBOSE = YES ] ; then <nl> - echo " # define SRS_AUTO_VERBOSE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_VERBOSE " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_VERBOSE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_VERBOSE " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_LOG_INFO = YES ] ; then <nl> - echo " # define SRS_AUTO_INFO " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_INFO " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_INFO " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_INFO " $ SRS_AUTO_HEADERS_H <nl> fi <nl> if [ $ SRS_LOG_TRACE = YES ] ; then <nl> - echo " # define SRS_AUTO_TRACE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_define_macro " SRS_AUTO_TRACE " $ SRS_AUTO_HEADERS_H <nl> else <nl> - echo " # undef SRS_AUTO_TRACE " > > $ SRS_AUTO_HEADERS_H <nl> + srs_undefine_macro " SRS_AUTO_TRACE " $ SRS_AUTO_HEADERS_H <nl> fi <nl> <nl> # prefix <nl> mmm a / trunk / src / app / srs_app_kafka . cpp <nl> ppp b / trunk / src / app / srs_app_kafka . cpp <nl> int SrsKafkaProducer : : initialize ( ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> <nl> - srs_trace ( " initialize kafka producer ok . " ) ; <nl> + / / when kafka enabled , request metadata when startup . <nl> + if ( _srs_config - > get_kafka_enabled ( ) & & ( ret = request_metadata ( ) ) ! = ERROR_SUCCESS ) { <nl> + srs_error ( " request kafka metadata failed . ret = % d " , ret ) ; <nl> + return ret ; <nl> + } <nl> + <nl> + srs_info ( " initialize kafka producer ok . " ) ; <nl> <nl> return ret ; <nl> } <nl> int SrsKafkaProducer : : start ( ) <nl> return ret ; <nl> } <nl> <nl> - srs_trace ( " start kafka async worker ok . " ) ; <nl> + srs_trace ( " kafka worker ok , enabled : % d " , _srs_config - > get_kafka_enabled ( ) ) ; <nl> <nl> return ret ; <nl> } <nl> void SrsKafkaProducer : : stop ( ) <nl> worker - > stop ( ) ; <nl> } <nl> <nl> + int SrsKafkaProducer : : request_metadata ( ) <nl> + { <nl> + int ret = ERROR_SUCCESS ; <nl> + <nl> + srs_info ( " update kafka metadata ok " ) ; <nl> + <nl> + return ret ; <nl> + } <nl> + <nl> # endif <nl> <nl> mmm a / trunk / src / app / srs_app_kafka . hpp <nl> ppp b / trunk / src / app / srs_app_kafka . hpp <nl> class SrsKafkaProducer <nl> virtual int initialize ( ) ; <nl> virtual int start ( ) ; <nl> virtual void stop ( ) ; <nl> + private : <nl> + virtual int request_metadata ( ) ; <nl> } ; <nl> <nl> # endif <nl> mmm a / trunk / src / app / srs_app_server . cpp <nl> ppp b / trunk / src / app / srs_app_server . cpp <nl> int SrsSignalManager : : start ( ) <nl> sa . sa_flags = 0 ; <nl> sigaction ( SRS_SIGNAL_PERSISTENCE_CONFIG , & sa , NULL ) ; <nl> <nl> - srs_trace ( " signal installed " ) ; <nl> + srs_trace ( " signal installed , reload = % d , dispose = % d , persistence = % d , grace_quit = % d " , <nl> + SRS_SIGNAL_RELOAD , SRS_SIGNAL_DISPOSE , SRS_SIGNAL_PERSISTENCE_CONFIG , SRS_SIGNAL_GRACEFULLY_QUIT ) ; <nl> <nl> return pthread - > start ( ) ; <nl> } <nl> mmm a / trunk / src / app / srs_app_st . cpp <nl> ppp b / trunk / src / app / srs_app_st . cpp <nl> int srs_st_init ( ) <nl> srs_error ( " st_set_eventsys use % s failed . ret = % d " , st_get_eventsys_name ( ) , ret ) ; <nl> return ret ; <nl> } <nl> - srs_trace ( " st_set_eventsys to % s " , st_get_eventsys_name ( ) ) ; <nl> + srs_info ( " st_set_eventsys to % s " , st_get_eventsys_name ( ) ) ; <nl> <nl> if ( st_init ( ) ! = 0 ) { <nl> ret = ERROR_ST_INITIALIZE ; <nl> mmm a / trunk / src / app / srs_app_utility . cpp <nl> ppp b / trunk / src / app / srs_app_utility . cpp <nl> void srs_parse_endpoint ( string ip_port , string & ip , int & port ) <nl> port = : : atoi ( the_port . c_str ( ) ) ; <nl> } <nl> <nl> + string srs_bool2switch ( bool v ) { <nl> + return v ? " on " : " off " ; <nl> + } <nl> + <nl> int srs_kill_forced ( int & pid ) <nl> { <nl> int ret = ERROR_SUCCESS ; <nl> void srs_update_proc_stat ( ) <nl> static int user_hz = 0 ; <nl> if ( user_hz < = 0 ) { <nl> user_hz = ( int ) sysconf ( _SC_CLK_TCK ) ; <nl> - srs_trace ( " USER_HZ = % d " , user_hz ) ; <nl> + srs_info ( " USER_HZ = % d " , user_hz ) ; <nl> srs_assert ( user_hz > 0 ) ; <nl> } <nl> <nl> void retrieve_local_ipv4_ips ( ) <nl> return ; <nl> } <nl> <nl> + stringstream ss0 ; <nl> + ss0 < < " ips " ; <nl> + <nl> + stringstream ss1 ; <nl> + ss1 < < " devices " ; <nl> + <nl> ifaddrs * p = ifap ; <nl> while ( p ! = NULL ) { <nl> ifaddrs * cur = p ; <nl> void retrieve_local_ipv4_ips ( ) <nl> <nl> std : : string ip = buf ; <nl> if ( ip ! = SRS_CONSTS_LOCALHOST ) { <nl> - srs_trace ( " retrieve local ipv4 ip = % s , index = % d " , ip . c_str ( ) , ( int ) ips . size ( ) ) ; <nl> + ss0 < < " , local [ " < < ( int ) ips . size ( ) < < " ] ipv4 " < < ip ; <nl> ips . push_back ( ip ) ; <nl> } <nl> <nl> / / set the device internet status . <nl> if ( ! srs_net_device_is_internet ( inaddr - > s_addr ) ) { <nl> - srs_trace ( " detect intranet address : % s , ifname = % s " , ip . c_str ( ) , cur - > ifa_name ) ; <nl> + ss1 < < " , intranet " ; <nl> _srs_device_ifs [ cur - > ifa_name ] = false ; <nl> } else { <nl> - srs_trace ( " detect internet address : % s , ifname = % s " , ip . c_str ( ) , cur - > ifa_name ) ; <nl> + ss1 < < " , internet " ; <nl> _srs_device_ifs [ cur - > ifa_name ] = true ; <nl> } <nl> + ss1 < < cur - > ifa_name < < " " < < ip ; <nl> } <nl> } <nl> + srs_trace ( ss0 . str ( ) . c_str ( ) ) ; <nl> + srs_trace ( ss1 . str ( ) . c_str ( ) ) ; <nl> <nl> freeifaddrs ( ifap ) ; <nl> } <nl> mmm a / trunk / src / app / srs_app_utility . hpp <nl> ppp b / trunk / src / app / srs_app_utility . hpp <nl> extern std : : string srs_path_build_timestamp ( std : : string template_path ) ; <nl> extern void srs_parse_endpoint ( std : : string ip_port , std : : string & ip , std : : string & port ) ; <nl> extern void srs_parse_endpoint ( std : : string ip_port , std : : string & ip , int & port ) ; <nl> <nl> + / * * <nl> + * convert bool to switch value , true to " on " , false to " off " . <nl> + * / <nl> + extern std : : string srs_bool2switch ( bool v ) ; <nl> + <nl> / * * <nl> * kill the pid by SIGINT , then wait to quit , <nl> * kill the pid by SIGKILL again when exceed the timeout . <nl> mmm a / trunk / src / main / srs_main_server . cpp <nl> ppp b / trunk / src / main / srs_main_server . cpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> # include < sys / types . h > <nl> # include < sys / wait . h > <nl> <nl> + # include < sstream > <nl> + using namespace std ; <nl> + <nl> # ifdef SRS_AUTO_GPERF_MP <nl> # include < gperftools / heap - profiler . h > <nl> # endif <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> # include < srs_app_log . hpp > <nl> # include < srs_kernel_utility . hpp > <nl> # include < srs_core_performance . hpp > <nl> + # include < srs_app_utility . hpp > <nl> <nl> / / pre - declare <nl> int run ( ) ; <nl> SrsServer * _srs_server = new SrsServer ( ) ; <nl> * / <nl> void show_macro_features ( ) <nl> { <nl> - # ifdef SRS_AUTO_SSL <nl> - srs_trace ( " check feature rtmp handshake : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature rtmp handshake : off " ) ; <nl> - # endif <nl> - <nl> - # ifdef SRS_AUTO_HLS <nl> - srs_trace ( " check feature hls : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature hls : off " ) ; <nl> - # endif <nl> - <nl> - # ifdef SRS_AUTO_HDS <nl> - srs_trace ( " check feature hds : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature hds : off " ) ; <nl> - # endif <nl> - <nl> - # ifdef SRS_AUTO_HTTP_CALLBACK <nl> - srs_trace ( " check feature http callback : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature http callback : off " ) ; <nl> - # endif <nl> - <nl> - # ifdef SRS_AUTO_HTTP_API <nl> - srs_trace ( " check feature http api : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature http api : off " ) ; <nl> - # endif <nl> - <nl> - # ifdef SRS_AUTO_HTTP_SERVER <nl> - srs_trace ( " check feature http server : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature http server : off " ) ; <nl> + if ( true ) { <nl> + stringstream ss ; <nl> + <nl> + ss < < " features " ; <nl> + <nl> + / / rch ( rtmp complex handshake ) <nl> + ss < < " , rch : " < < srs_bool2switch ( SRS_AUTO_SSL_BOOL ) ; <nl> + ss < < " , hls : " < < srs_bool2switch ( SRS_AUTO_HLS_BOOL ) ; <nl> + ss < < " , hds : " < < srs_bool2switch ( SRS_AUTO_HDS_BOOL ) ; <nl> + / / hc ( http callback ) <nl> + ss < < " , hc : " < < srs_bool2switch ( SRS_AUTO_HTTP_CALLBACK_BOOL ) ; <nl> + / / ha ( http api ) <nl> + ss < < " , ha : " < < srs_bool2switch ( SRS_AUTO_HTTP_API_BOOL ) ; <nl> + / / hs ( http server ) <nl> + ss < < " , hs : " < < srs_bool2switch ( SRS_AUTO_HTTP_SERVER_BOOL ) ; <nl> + / / hp ( http parser ) <nl> + ss < < " , hp : " < < srs_bool2switch ( SRS_AUTO_HTTP_CORE_BOOL ) ; <nl> + ss < < " , dvr : " < < srs_bool2switch ( SRS_AUTO_DVR_BOOL ) ; <nl> + / / trans ( transcode ) <nl> + ss < < " , trans : " < < srs_bool2switch ( SRS_AUTO_TRANSCODE_BOOL ) ; <nl> + / / inge ( ingest ) <nl> + ss < < " , inge : " < < srs_bool2switch ( SRS_AUTO_INGEST_BOOL ) ; <nl> + ss < < " , kafka : " < < srs_bool2switch ( SRS_AUTO_KAFKA_BOOL ) ; <nl> + ss < < " , stat : " < < srs_bool2switch ( SRS_AUTO_STAT_BOOL ) ; <nl> + ss < < " , nginx : " < < srs_bool2switch ( SRS_AUTO_NGINX_BOOL ) ; <nl> + / / ff ( ffmpeg ) <nl> + ss < < " , ff : " < < srs_bool2switch ( SRS_AUTO_FFMPEG_TOOL_BOOL ) ; <nl> + / / sc ( stream - caster ) <nl> + ss < < " , sc : " < < srs_bool2switch ( SRS_AUTO_STREAM_CASTER_BOOL ) ; <nl> + srs_trace ( ss . str ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + if ( true ) { <nl> + stringstream ss ; <nl> + ss < < " SRS on " ; <nl> + # ifdef SRS_OSX <nl> + ss < < " OSX " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_HTTP_CORE <nl> - srs_trace ( " check feature http parser : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature http parser : off " ) ; <nl> + # ifdef SRS_PI <nl> + ss < < " RespberryPi " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_DVR <nl> - srs_trace ( " check feature dvr : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature dvr : off " ) ; <nl> + # ifdef SRS_CUBIE <nl> + ss < < " CubieBoard " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_TRANSCODE <nl> - srs_trace ( " check feature transcode : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature transcode : off " ) ; <nl> + # ifdef SRS_ARM_UBUNTU12 <nl> + ss < < " ARM ( build on ubuntu ) " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_INGEST <nl> - srs_trace ( " check feature ingest : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature ingest : off " ) ; <nl> + # ifdef SRS_MIPS_UBUNTU12 <nl> + ss < < " MIPS ( build on ubuntu ) " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_STAT <nl> - srs_trace ( " check feature system stat : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature system stat : off " ) ; <nl> + <nl> + # if defined ( __amd64__ ) <nl> + ss < < " amd64 " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_NGINX <nl> - srs_trace ( " check feature compile nginx : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature compile nginx : off " ) ; <nl> + # if defined ( __x86_64__ ) <nl> + ss < < " x86_64 " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_FFMPEG_TOOL <nl> - srs_trace ( " check feature compile ffmpeg : on " ) ; <nl> - # else <nl> - srs_warn ( " check feature compile ffmpeg : off " ) ; <nl> + # if defined ( __i386__ ) <nl> + ss < < " i386 " ; <nl> # endif <nl> - <nl> - # ifdef SRS_AUTO_STREAM_CASTER <nl> - srs_trace ( " stream caster : on " ) ; <nl> - # else <nl> - srs_warn ( " stream caster : off " ) ; <nl> + # if defined ( __arm__ ) <nl> + ss < < " arm " ; <nl> # endif <nl> - <nl> + <nl> + # ifndef SRS_OSX <nl> + ss < < " , glibc " < < ( int ) __GLIBC__ < < " . " ( int ) __GLIBC_MINOR__ ; <nl> + # endif <nl> + <nl> + ss < < " , conf : " < < _srs_config - > config ( ) < < " , limit : " < < _srs_config - > get_max_connections ( ) <nl> + < < " , writev : " < < sysconf ( _SC_IOV_MAX ) < < " , encoding : " < < ( srs_is_little_endian ( ) ? " little - endian " : " big - endian " ) <nl> + < < " , HZ : " < < ( int ) sysconf ( _SC_CLK_TCK ) ; <nl> + <nl> + srs_trace ( ss . str ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + if ( true ) { <nl> + stringstream ss ; <nl> + <nl> + / / mw ( merged - write ) <nl> + ss < < " mw sleep : " < < SRS_PERF_MW_SLEEP < < " ms " ; <nl> + <nl> + / / mr ( merged - read ) <nl> + ss < < " . mr " ; <nl> # ifdef SRS_PERF_MERGED_READ <nl> - srs_trace ( " MR ( merged - read ) : on , @ see % s " , RTMP_SIG_SRS_ISSUES ( 241 ) ) ; <nl> + ss < < " enabled : on " ; <nl> # else <nl> - srs_warn ( " MR ( merged - read ) : off , @ see % s " , RTMP_SIG_SRS_ISSUES ( 241 ) ) ; <nl> + ss < < " enabled : off " ; <nl> # endif <nl> - <nl> - srs_trace ( " MR ( merged - read ) default % d sleep % d " , SRS_PERF_MR_ENABLED , SRS_PERF_MR_SLEEP ) ; <nl> - srs_trace ( " MW ( merged - write ) default sleep % d " , SRS_PERF_MW_SLEEP ) ; <nl> - srs_trace ( " read chunk stream cache cid [ 0 , % d ) " , SRS_PERF_CHUNK_STREAM_CACHE ) ; <nl> - srs_trace ( " default gop cache % d , play queue % ds " , SRS_PERF_GOP_CACHE , SRS_PERF_PLAY_QUEUE ) ; <nl> + ss < < " , default : " < < SRS_PERF_MR_ENABLED < < " , sleep : " < < SRS_PERF_MR_SLEEP < < " ms " ; <nl> + ss < < " , @ see " < < RTMP_SIG_SRS_ISSUES ( 241 ) ; <nl> + <nl> + srs_trace ( ss . str ( ) . c_str ( ) ) ; <nl> + } <nl> <nl> + if ( true ) { <nl> + stringstream ss ; <nl> + <nl> + / / gc ( gop - cache ) <nl> + ss < < " gc : " < < srs_bool2switch ( SRS_PERF_GOP_CACHE ) ; <nl> + / / pq ( play - queue ) <nl> + ss < < " , pq : " < < SRS_PERF_PLAY_QUEUE < < " s " ; <nl> + / / cscc ( chunk stream cache cid ) <nl> + ss < < " , cscc : [ 0 , " < < SRS_PERF_CHUNK_STREAM_CACHE < < " ) " ; <nl> + / / csa ( complex send algorithm ) <nl> + ss < < " , csa : " ; <nl> # ifndef SRS_PERF_COMPLEX_SEND <nl> - srs_warn ( " complex send algorithm disabled . " ) ; <nl> + ss < < " off " ; <nl> # else <nl> - srs_trace ( " complex send algorithm enabled . " ) ; <nl> + ss < < " on " ; <nl> # endif <nl> - <nl> + <nl> + / / tn ( TCP_NODELAY ) <nl> + ss < < " , tn : " ; <nl> # ifdef SRS_PERF_TCP_NODELAY <nl> - srs_warn ( " TCP_NODELAY enabled , may hurts performance . " ) ; <nl> + ss < < " on ( may hurts performance ) " ; <nl> # else <nl> - srs_trace ( " TCP_NODELAY disabled . " ) ; <nl> + ss < < " off " ; <nl> # endif <nl> - <nl> + <nl> + / / ss ( SO_SENDBUF ) <nl> + ss < < " , ss : " ; <nl> # ifdef SRS_PERF_SO_SNDBUF_SIZE <nl> - srs_warn ( " socket send buffer size % d " , SRS_PERF_SO_SNDBUF_SIZE ) ; <nl> + ss < < SRS_PERF_SO_SNDBUF_SIZE ; <nl> # else <nl> - srs_trace ( " auto guess socket send buffer by merged write " ) ; <nl> + ss < < " auto ( guess by merged write ) " ; <nl> # endif <nl> - <nl> + <nl> + srs_trace ( ss . str ( ) . c_str ( ) ) ; <nl> + } <nl> + <nl> + / / others <nl> int possible_mr_latency = 0 ; <nl> # ifdef SRS_PERF_MERGED_READ <nl> possible_mr_latency = SRS_PERF_MR_SLEEP ; <nl> # endif <nl> srs_trace ( " system default latency in ms : mw ( 0 - % d ) + mr ( 0 - % d ) + play - queue ( 0 - % d ) " , <nl> - SRS_PERF_MW_SLEEP , possible_mr_latency , SRS_PERF_PLAY_QUEUE * 1000 ) ; <nl> - } <nl> - <nl> - void check_macro_features ( ) <nl> - { <nl> - / / important preset . <nl> - # ifdef SRS_OSX <nl> - srs_trace ( " SRS for OSX " ) ; <nl> - # endif <nl> - # ifdef SRS_PI <nl> - srs_trace ( " SRS for pi " ) ; <nl> - # endif <nl> - # ifdef SRS_CUBIE <nl> - srs_trace ( " SRS for cubieboard " ) ; <nl> - # endif <nl> - # ifdef SRS_ARM_UBUNTU12 <nl> - srs_trace ( " SRS for arm ( build on ubuntu ) " ) ; <nl> - # endif <nl> - # ifdef SRS_MIPS_UBUNTU12 <nl> - srs_trace ( " SRS for mips ( build on ubuntu ) " ) ; <nl> - # endif <nl> - <nl> - / / for special features . <nl> - # ifndef SRS_PERF_MERGED_READ <nl> - srs_warn ( " MR ( merged - read ) is disabled , hurts read performance . @ see % s " , RTMP_SIG_SRS_ISSUES ( 241 ) ) ; <nl> - # endif <nl> - <nl> - srs_trace ( " writev limits write % d iovs a time " , sysconf ( _SC_IOV_MAX ) ) ; <nl> - <nl> - # if VERSION_MAJOR > VERSION_STABLE <nl> - # warning " current branch is not stable , please use stable branch instead . " <nl> - srs_warn ( " SRS % s is not stable , please use stable branch % s instead " , RTMP_SIG_SRS_VERSION , VERSION_STABLE_BRANCH ) ; <nl> - # endif <nl> + SRS_PERF_MW_SLEEP , possible_mr_latency , SRS_PERF_PLAY_QUEUE * 1000 ) ; <nl> <nl> # ifdef SRS_AUTO_MEM_WATCH <nl> # warning " srs memory watcher will hurts performance . user should kill by SIGTERM or init . d script . " <nl> srs_warn ( " srs memory watcher will hurts performance . user should kill by SIGTERM or init . d script . " ) ; <nl> # endif <nl> - <nl> + <nl> # if defined ( SRS_AUTO_STREAM_CASTER ) <nl> # warning " stream caster is experiment feature . " <nl> srs_warn ( " stream caster is experiment feature . " ) ; <nl> # endif <nl> - <nl> - # if defined ( SRS_PERF_SO_SNDBUF_SIZE ) & & ! defined ( SRS_PERF_MW_SO_SNDBUF ) <nl> - # error " SRS_PERF_SO_SNDBUF_SIZE depends on SRS_PERF_MW_SO_SNDBUF " <nl> + <nl> + # if VERSION_MAJOR > VERSION_STABLE <nl> + # warning " current branch is not stable , please use stable branch instead . " <nl> + srs_warn ( " SRS % s is not stable , please use stable branch % s instead " , RTMP_SIG_SRS_VERSION , VERSION_STABLE_BRANCH ) ; <nl> # endif <nl> <nl> - # ifndef SRS_OSX <nl> - # if defined ( __amd64__ ) <nl> - srs_trace ( " cpu is amd64 , glibc % d . % d " , ( int ) __GLIBC__ , ( int ) __GLIBC_MINOR__ ) ; <nl> - # endif <nl> - # if defined ( __x86_64__ ) <nl> - srs_trace ( " cpu is x86_64 , glibc % d . % d " , ( int ) __GLIBC__ , ( int ) __GLIBC_MINOR__ ) ; <nl> - # endif <nl> - # if defined ( __i386__ ) <nl> - srs_trace ( " cpu is i386 , glibc % d . % d " , ( int ) __GLIBC__ , ( int ) __GLIBC_MINOR__ ) ; <nl> - # endif <nl> - # if defined ( __arm__ ) <nl> - srs_trace ( " cpu is arm , glibc % d . % d " , ( int ) __GLIBC__ , ( int ) __GLIBC_MINOR__ ) ; <nl> - # endif <nl> + # if defined ( SRS_PERF_SO_SNDBUF_SIZE ) & & ! defined ( SRS_PERF_MW_SO_SNDBUF ) <nl> + # error " SRS_PERF_SO_SNDBUF_SIZE depends on SRS_PERF_MW_SO_SNDBUF " <nl> # endif <nl> } <nl> <nl> int main ( int argc , char * * argv ) <nl> if ( ( ret = _srs_log - > initialize ( ) ) ! = ERROR_SUCCESS ) { <nl> return ret ; <nl> } <nl> + <nl> + / / config already applied to log . <nl> + srs_trace ( " srs ( simple - rtmp - server ) " RTMP_SIG_SRS_VERSION " , stable is " RTMP_SIG_SRS_PRIMARY ) ; <nl> + srs_trace ( " license : " RTMP_SIG_SRS_LICENSE " , " RTMP_SIG_SRS_COPYRIGHT ) ; <nl> + srs_trace ( " authors : " RTMP_SIG_SRS_AUTHROS ) ; <nl> + srs_trace ( " contributors : " SRS_AUTO_CONSTRIBUTORS ) ; <nl> + srs_trace ( " build : % s , configure : % s , uname : % s " , SRS_AUTO_BUILD_DATE , SRS_AUTO_USER_CONFIGURE , SRS_AUTO_UNAME ) ; <nl> + srs_trace ( " configure detail : " SRS_AUTO_CONFIGURE ) ; <nl> + # ifdef SRS_AUTO_EMBEDED_TOOL_CHAIN <nl> + srs_trace ( " crossbuild tool chain : " SRS_AUTO_EMBEDED_TOOL_CHAIN ) ; <nl> + # endif <nl> <nl> / / we check the config when the log initialized . <nl> if ( ( ret = _srs_config - > check_config ( ) ) ! = ERROR_SUCCESS ) { <nl> return ret ; <nl> } <nl> <nl> - srs_trace ( " srs ( simple - rtmp - server ) " RTMP_SIG_SRS_VERSION ) ; <nl> - srs_trace ( " license : " RTMP_SIG_SRS_LICENSE " , " RTMP_SIG_SRS_COPYRIGHT ) ; <nl> - srs_trace ( " primary / master : " RTMP_SIG_SRS_PRIMARY ) ; <nl> - srs_trace ( " authors : " RTMP_SIG_SRS_AUTHROS ) ; <nl> - srs_trace ( " contributors : " SRS_AUTO_CONSTRIBUTORS ) ; <nl> - srs_trace ( " uname : " SRS_AUTO_UNAME ) ; <nl> - srs_trace ( " build : % s , % s " , SRS_AUTO_BUILD_DATE , srs_is_little_endian ( ) ? " little - endian " : " big - endian " ) ; <nl> - srs_trace ( " configure : " SRS_AUTO_USER_CONFIGURE ) ; <nl> - srs_trace ( " features : " SRS_AUTO_CONFIGURE ) ; <nl> - # ifdef SRS_AUTO_ARM_UBUNTU12 <nl> - srs_trace ( " arm tool chain : " SRS_AUTO_EMBEDED_TOOL_CHAIN ) ; <nl> - # endif <nl> - srs_trace ( " conf : % s , limit : % d " , _srs_config - > config ( ) . c_str ( ) , _srs_config - > get_max_connections ( ) ) ; <nl> - <nl> / / features <nl> - check_macro_features ( ) ; <nl> show_macro_features ( ) ; <nl> <nl> / * * <nl> mmm a / trunk / src / protocol / srs_protocol_io . hpp <nl> ppp b / trunk / src / protocol / srs_protocol_io . hpp <nl> CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . <nl> | IBufferReader | | IStatistic | | IBufferWriter | <nl> + mmmmmmmmmmmmmmm + + mmmmmmmmmmmmmmmmmm - - + + mmmmmmmmmmmmmmm + <nl> | + read ( ) | | + get_recv_bytes ( ) | | + write ( ) | <nl> - + mmmmmm + mmmmmm - - + | + get_recv_bytes ( ) | | + writev ( ) | <nl> + + mmmmmm + mmmmmm - - + | + get_send_bytes ( ) | | + writev ( ) | <nl> / \ + mmm + mmmmmmmmmmmm - - + - + + mmmmmm - + mmmmmm - + <nl> | / \ / \ / \ <nl> | | | | <nl> mmm a / trunk / src / utest / srs_utest_core . cpp <nl> ppp b / trunk / src / utest / srs_utest_core . cpp <nl> VOID TEST ( CoreMacroseTest , Check ) <nl> # ifndef SRS_AUTO_CONFIGURE <nl> EXPECT_TRUE ( false ) ; <nl> # endif <nl> - # ifndef SRS_AUTO_EMBEDED_TOOL_CHAIN <nl> - EXPECT_TRUE ( false ) ; <nl> - # endif <nl> # ifndef SRS_AUTO_PREFIX <nl> EXPECT_TRUE ( false ) ; <nl> # endif <nl>
|
refine the startup logs .
|
ossrs/srs
|
f187a7deef4a885a2e5fec58352d84188a4cc895
|
2015-09-24T04:15:12Z
|
mmm a / fdbserver / DataDistribution . actor . cpp <nl> ppp b / fdbserver / DataDistribution . actor . cpp <nl> ACTOR Future < Void > teamTracker ( DDTeamCollection * self , Reference < IDataDistribut <nl> <nl> for ( int i = 0 ; i < shards . size ( ) ; i + + ) { <nl> int maxPriority = team - > getPriority ( ) ; <nl> - auto teams = self - > shardsAffectedByTeamFailure - > getTeamsFor ( shards [ i ] ) ; <nl> - for ( int t = 0 ; t < teams . size ( ) ; t + + ) { <nl> - if ( teams [ t ] . servers . size ( ) & & self - > server_info . count ( teams [ t ] . servers [ 0 ] ) ) { <nl> - auto & info = self - > server_info [ teams [ t ] . servers [ 0 ] ] ; <nl> - <nl> - bool found = false ; <nl> - for ( int i = 0 ; i < info - > teams . size ( ) ; i + + ) { <nl> - if ( info - > teams [ i ] - > serverIDs = = teams [ t ] . servers ) { <nl> - maxPriority = std : : max ( maxPriority , info - > teams [ i ] - > getPriority ( ) ) ; <nl> - found = true ; <nl> - break ; <nl> + if ( maxPriority < PRIORITY_TEAM_0_LEFT ) { <nl> + auto teams = self - > shardsAffectedByTeamFailure - > getTeamsFor ( shards [ i ] ) ; <nl> + for ( int t = 0 ; t < teams . size ( ) ; t + + ) { <nl> + if ( teams [ t ] . servers . size ( ) & & self - > server_info . count ( teams [ t ] . servers [ 0 ] ) ) { <nl> + auto & info = self - > server_info [ teams [ t ] . servers [ 0 ] ] ; <nl> + <nl> + bool found = false ; <nl> + for ( int i = 0 ; i < info - > teams . size ( ) ; i + + ) { <nl> + if ( info - > teams [ i ] - > serverIDs = = teams [ t ] . servers ) { <nl> + maxPriority = std : : max ( maxPriority , info - > teams [ i ] - > getPriority ( ) ) ; <nl> + found = true ; <nl> + break ; <nl> + } <nl> } <nl> - } <nl> <nl> - TEST ( ! found ) ; / / A removed team is still associated with a shard in SABTF <nl> - } else { <nl> - TEST ( teams [ t ] . servers . size ( ) ) ; / / A removed server is still associated with a team in SABTF <nl> + TEST ( ! found ) ; / / A removed team is still associated with a shard in SABTF <nl> + } else { <nl> + TEST ( teams [ t ] . servers . size ( ) ) ; / / A removed server is still associated with a team in SABTF <nl> + } <nl> } <nl> } <nl> <nl> ACTOR Future < Void > dataDistribution ( <nl> ASSERT ( configuration . storageTeamSize > 0 ) ; <nl> <nl> state PromiseStream < RelocateShard > output ; <nl> + state PromiseStream < RelocateShard > input ; <nl> state PromiseStream < Promise < int64_t > > getAverageShardBytes ; <nl> state PromiseStream < GetMetricsRequest > getShardMetrics ; <nl> state Reference < AsyncVar < bool > > processingUnhealthy ( new AsyncVar < bool > ( false ) ) ; <nl> ACTOR Future < Void > dataDistribution ( <nl> } <nl> <nl> Reference < ShardsAffectedByTeamFailure > shardsAffectedByTeamFailure ( new ShardsAffectedByTeamFailure ) ; <nl> + actors . push_back ( yieldPromiseStream ( output . getFuture ( ) , input ) ) ; <nl> <nl> for ( int s = 0 ; s < initData - > shards . size ( ) - 1 ; s + + ) { <nl> KeyRangeRef keys = KeyRangeRef ( initData - > shards [ s ] . key , initData - > shards [ s + 1 ] . key ) ; <nl> ACTOR Future < Void > dataDistribution ( <nl> } <nl> <nl> actors . push_back ( pollMoveKeysLock ( cx , lock ) ) ; <nl> - actors . push_back ( reportErrorsExcept ( dataDistributionTracker ( initData , cx , output , getShardMetrics , getAverageShardBytes . getFuture ( ) , readyToStart , anyZeroHealthyTeams , mi . id ( ) ) , " DDTracker " , mi . id ( ) , & normalDDQueueErrors ( ) ) ) ; <nl> - actors . push_back ( reportErrorsExcept ( dataDistributionQueue ( cx , output , getShardMetrics , processingUnhealthy , tcis , shardsAffectedByTeamFailure , lock , getAverageShardBytes , mi , storageTeamSize , lastLimited , recoveryCommitVersion ) , " DDQueue " , mi . id ( ) , & normalDDQueueErrors ( ) ) ) ; <nl> + actors . push_back ( reportErrorsExcept ( dataDistributionTracker ( initData , cx , output , shardsAffectedByTeamFailure , getShardMetrics , getAverageShardBytes . getFuture ( ) , readyToStart , anyZeroHealthyTeams , mi . id ( ) ) , " DDTracker " , mi . id ( ) , & normalDDQueueErrors ( ) ) ) ; <nl> + actors . push_back ( reportErrorsExcept ( dataDistributionQueue ( cx , output , input . getFuture ( ) , getShardMetrics , processingUnhealthy , tcis , shardsAffectedByTeamFailure , lock , getAverageShardBytes , mi , storageTeamSize , lastLimited , recoveryCommitVersion ) , " DDQueue " , mi . id ( ) , & normalDDQueueErrors ( ) ) ) ; <nl> actors . push_back ( reportErrorsExcept ( dataDistributionTeamCollection ( initData , tcis [ 0 ] , cx , db , shardsAffectedByTeamFailure , lock , output , mi . id ( ) , configuration , primaryDcId , configuration . usableRegions > 1 ? remoteDcIds : std : : vector < Optional < Key > > ( ) , serverChanges , readyToStart . getFuture ( ) , zeroHealthyTeams [ 0 ] , true , processingUnhealthy ) , " DDTeamCollectionPrimary " , mi . id ( ) , & normalDDQueueErrors ( ) ) ) ; <nl> if ( configuration . usableRegions > 1 ) { <nl> actors . push_back ( reportErrorsExcept ( dataDistributionTeamCollection ( initData , tcis [ 1 ] , cx , db , shardsAffectedByTeamFailure , lock , output , mi . id ( ) , configuration , remoteDcIds , Optional < std : : vector < Optional < Key > > > ( ) , Optional < PromiseStream < std : : pair < UID , Optional < StorageServerInterface > > > > ( ) , readyToStart . getFuture ( ) & & remoteRecovered , zeroHealthyTeams [ 1 ] , false , processingUnhealthy ) , " DDTeamCollectionSecondary " , mi . id ( ) , & normalDDQueueErrors ( ) ) ) ; <nl> mmm a / fdbserver / DataDistribution . h <nl> ppp b / fdbserver / DataDistribution . h <nl> Future < Void > dataDistributionTracker ( <nl> Reference < InitialDataDistribution > const & initData , <nl> Database const & cx , <nl> PromiseStream < RelocateShard > const & output , <nl> + Reference < ShardsAffectedByTeamFailure > const & shardsAffectedByTeamFailure , <nl> PromiseStream < GetMetricsRequest > const & getShardMetrics , <nl> FutureStream < Promise < int64_t > > const & getAverageShardBytes , <nl> Promise < Void > const & readyToStart , <nl> Future < Void > dataDistributionTracker ( <nl> <nl> Future < Void > dataDistributionQueue ( <nl> Database const & cx , <nl> - PromiseStream < RelocateShard > const & input , <nl> + PromiseStream < RelocateShard > const & output , <nl> + FutureStream < RelocateShard > const & input , <nl> PromiseStream < GetMetricsRequest > const & getShardMetrics , <nl> Reference < AsyncVar < bool > > const & processingUnhealthy , <nl> vector < TeamCollectionInterface > const & teamCollection , <nl> struct ShardSizeBounds { <nl> ShardSizeBounds getShardSizeBounds ( KeyRangeRef shard , int64_t maxShardSize ) ; <nl> <nl> / / Determines the maximum shard size based on the size of the database <nl> - int64_t getMaxShardSize ( double dbSizeEstimate ) ; <nl> \ No newline at end of file <nl> + int64_t getMaxShardSize ( double dbSizeEstimate ) ; <nl> mmm a / fdbserver / DataDistributionQueue . actor . cpp <nl> ppp b / fdbserver / DataDistributionQueue . actor . cpp <nl> struct DDQueueData { <nl> PromiseStream < RelocateData > relocationComplete ; <nl> PromiseStream < RelocateData > fetchSourceServersComplete ; <nl> <nl> - PromiseStream < RelocateShard > input ; <nl> + PromiseStream < RelocateShard > output ; <nl> + FutureStream < RelocateShard > input ; <nl> PromiseStream < GetMetricsRequest > getShardMetrics ; <nl> <nl> double * lastLimited ; <nl> struct DDQueueData { <nl> <nl> DDQueueData ( MasterInterface mi , MoveKeysLock lock , Database cx , std : : vector < TeamCollectionInterface > teamCollections , <nl> Reference < ShardsAffectedByTeamFailure > sABTF , PromiseStream < Promise < int64_t > > getAverageShardBytes , <nl> - int teamSize , PromiseStream < RelocateShard > input , PromiseStream < GetMetricsRequest > getShardMetrics , double * lastLimited , Version recoveryVersion ) : <nl> + int teamSize , PromiseStream < RelocateShard > output , FutureStream < RelocateShard > input , PromiseStream < GetMetricsRequest > getShardMetrics , double * lastLimited , Version recoveryVersion ) : <nl> activeRelocations ( 0 ) , queuedRelocations ( 0 ) , bytesWritten ( 0 ) , teamCollections ( teamCollections ) , <nl> shardsAffectedByTeamFailure ( sABTF ) , getAverageShardBytes ( getAverageShardBytes ) , mi ( mi ) , lock ( lock ) , <nl> - cx ( cx ) , teamSize ( teamSize ) , input ( input ) , getShardMetrics ( getShardMetrics ) , startMoveKeysParallelismLock ( SERVER_KNOBS - > DD_MOVE_KEYS_PARALLELISM ) , <nl> + cx ( cx ) , teamSize ( teamSize ) , output ( output ) , input ( input ) , getShardMetrics ( getShardMetrics ) , startMoveKeysParallelismLock ( SERVER_KNOBS - > DD_MOVE_KEYS_PARALLELISM ) , <nl> finishMoveKeysParallelismLock ( SERVER_KNOBS - > DD_MOVE_KEYS_PARALLELISM ) , lastLimited ( lastLimited ) , recoveryVersion ( recoveryVersion ) , <nl> suppressIntervals ( 0 ) , lastInterval ( 0 ) , unhealthyRelocations ( 0 ) , rawProcessingUnhealthy ( new AsyncVar < bool > ( false ) ) { } <nl> <nl> struct DDQueueData { <nl> <nl> / / This function cannot handle relocation requests which split a shard into three pieces <nl> void queueRelocation ( RelocateData rd , std : : set < UID > & serversToLaunchFrom ) { <nl> - / / Update sabtf for changes from DDTracker <nl> - if ( rd . changesBoundaries ( ) ) <nl> - shardsAffectedByTeamFailure - > defineShard ( rd . keys ) ; <nl> - <nl> / / TraceEvent ( " QueueRelocationBegin " ) . detail ( " Begin " , printable ( rd . keys . begin ) ) . detail ( " End " , printable ( rd . keys . end ) ) ; <nl> <nl> / / remove all items from both queues that are fully contained in the new relocation ( i . e . will be overwritten ) <nl> ACTOR Future < bool > rebalanceTeams ( DDQueueData * self , int priority , Reference < ID <nl> . detail ( " SourceTeam " , sourceTeam - > getDesc ( ) ) <nl> . detail ( " DestTeam " , destTeam - > getDesc ( ) ) ; <nl> <nl> - self - > input . send ( RelocateShard ( moveShard , priority ) ) ; <nl> + self - > output . send ( RelocateShard ( moveShard , priority ) ) ; <nl> return true ; <nl> } <nl> } <nl> ACTOR Future < Void > BgDDValleyFiller ( DDQueueData * self , int teamCollectionIndex ) <nl> <nl> ACTOR Future < Void > dataDistributionQueue ( <nl> Database cx , <nl> - PromiseStream < RelocateShard > input , <nl> + PromiseStream < RelocateShard > output , <nl> + FutureStream < RelocateShard > input , <nl> PromiseStream < GetMetricsRequest > getShardMetrics , <nl> Reference < AsyncVar < bool > > processingUnhealthy , <nl> std : : vector < TeamCollectionInterface > teamCollections , <nl> ACTOR Future < Void > dataDistributionQueue ( <nl> double * lastLimited , <nl> Version recoveryVersion ) <nl> { <nl> - state DDQueueData self ( mi , lock , cx , teamCollections , shardsAffectedByTeamFailure , getAverageShardBytes , teamSize , input , getShardMetrics , lastLimited , recoveryVersion ) ; <nl> + state DDQueueData self ( mi , lock , cx , teamCollections , shardsAffectedByTeamFailure , getAverageShardBytes , teamSize , output , input , getShardMetrics , lastLimited , recoveryVersion ) ; <nl> state std : : set < UID > serversToLaunchFrom ; <nl> state KeyRange keysToLaunchFrom ; <nl> state RelocateData launchData ; <nl> ACTOR Future < Void > dataDistributionQueue ( <nl> ASSERT ( launchData . startTime = = - 1 & & keysToLaunchFrom . empty ( ) ) ; <nl> <nl> choose { <nl> - when ( RelocateShard rs = waitNext ( self . input . getFuture ( ) ) ) { <nl> + when ( RelocateShard rs = waitNext ( self . input ) ) { <nl> bool wasEmpty = serversToLaunchFrom . empty ( ) ; <nl> self . queueRelocation ( rs , serversToLaunchFrom ) ; <nl> if ( wasEmpty & & ! serversToLaunchFrom . empty ( ) ) <nl> mmm a / fdbserver / DataDistributionTracker . actor . cpp <nl> ppp b / fdbserver / DataDistributionTracker . actor . cpp <nl> struct DataDistributionTracker { <nl> <nl> / / CapacityTracker <nl> PromiseStream < RelocateShard > output ; <nl> + Reference < ShardsAffectedByTeamFailure > shardsAffectedByTeamFailure ; <nl> <nl> Promise < Void > readyToStart ; <nl> Reference < AsyncVar < bool > > anyZeroHealthyTeams ; <nl> <nl> - DataDistributionTracker ( Database cx , UID masterId , Promise < Void > const & readyToStart , PromiseStream < RelocateShard > const & output , Reference < AsyncVar < bool > > anyZeroHealthyTeams ) <nl> + DataDistributionTracker ( Database cx , UID masterId , Promise < Void > const & readyToStart , PromiseStream < RelocateShard > const & output , Reference < ShardsAffectedByTeamFailure > shardsAffectedByTeamFailure , Reference < AsyncVar < bool > > anyZeroHealthyTeams ) <nl> : cx ( cx ) , masterId ( masterId ) , dbSizeEstimate ( new AsyncVar < int64_t > ( ) ) , <nl> maxShardSize ( new AsyncVar < Optional < int64_t > > ( ) ) , <nl> - sizeChanges ( false ) , readyToStart ( readyToStart ) , output ( output ) , anyZeroHealthyTeams ( anyZeroHealthyTeams ) { } <nl> + sizeChanges ( false ) , readyToStart ( readyToStart ) , output ( output ) , shardsAffectedByTeamFailure ( shardsAffectedByTeamFailure ) , anyZeroHealthyTeams ( anyZeroHealthyTeams ) { } <nl> <nl> ~ DataDistributionTracker ( ) <nl> { <nl> ACTOR Future < Void > shardSplitter ( <nl> for ( int i = numShards - 1 ; i > skipRange ; i - - ) <nl> restartShardTrackers ( self , KeyRangeRef ( splitKeys [ i ] , splitKeys [ i + 1 ] ) ) ; <nl> <nl> - for ( int i = 0 ; i < skipRange ; i + + ) <nl> - self - > output . send ( RelocateShard ( KeyRangeRef ( splitKeys [ i ] , splitKeys [ i + 1 ] ) , PRIORITY_SPLIT_SHARD ) ) ; <nl> - for ( int i = numShards - 1 ; i > skipRange ; i - - ) <nl> - self - > output . send ( RelocateShard ( KeyRangeRef ( splitKeys [ i ] , splitKeys [ i + 1 ] ) , PRIORITY_SPLIT_SHARD ) ) ; <nl> + for ( int i = 0 ; i < skipRange ; i + + ) { <nl> + KeyRangeRef r ( splitKeys [ i ] , splitKeys [ i + 1 ] ) ; <nl> + self - > shardsAffectedByTeamFailure - > defineShard ( r ) ; <nl> + self - > output . send ( RelocateShard ( r , PRIORITY_SPLIT_SHARD ) ) ; <nl> + } <nl> + for ( int i = numShards - 1 ; i > skipRange ; i - - ) { <nl> + KeyRangeRef r ( splitKeys [ i ] , splitKeys [ i + 1 ] ) ; <nl> + self - > shardsAffectedByTeamFailure - > defineShard ( r ) ; <nl> + self - > output . send ( RelocateShard ( r , PRIORITY_SPLIT_SHARD ) ) ; <nl> + } <nl> <nl> self - > sizeChanges . add ( changeSizes ( self , keys , shardSize - > get ( ) . get ( ) . bytes ) ) ; <nl> } else { <nl> Future < Void > shardMerger ( <nl> . detail ( " TrackerID " , trackerId ) ; <nl> <nl> restartShardTrackers ( self , mergeRange , endingStats ) ; <nl> + self - > shardsAffectedByTeamFailure - > defineShard ( mergeRange ) ; <nl> self - > output . send ( RelocateShard ( mergeRange , PRIORITY_MERGE_SHARD ) ) ; <nl> <nl> / / We are about to be cancelled by the call to restartShardTrackers <nl> ACTOR Future < Void > dataDistributionTracker ( <nl> Reference < InitialDataDistribution > initData , <nl> Database cx , <nl> PromiseStream < RelocateShard > output , <nl> + Reference < ShardsAffectedByTeamFailure > shardsAffectedByTeamFailure , <nl> PromiseStream < GetMetricsRequest > getShardMetrics , <nl> FutureStream < Promise < int64_t > > getAverageShardBytes , <nl> Promise < Void > readyToStart , <nl> Reference < AsyncVar < bool > > anyZeroHealthyTeams , <nl> UID masterId ) <nl> { <nl> - state DataDistributionTracker self ( cx , masterId , readyToStart , output , anyZeroHealthyTeams ) ; <nl> + state DataDistributionTracker self ( cx , masterId , readyToStart , output , shardsAffectedByTeamFailure , anyZeroHealthyTeams ) ; <nl> state Future < Void > loggingTrigger = Void ( ) ; <nl> try { <nl> Void _ = wait ( trackInitialShards ( & self , initData ) ) ; <nl>
|
prevented a slow task when too many shards were sent to the data distribution queue after switching to a fearless deployment
|
apple/foundationdb
|
6f02ea843a1646dabcf546a58694434c0740d5db
|
2018-08-09T19:37:46Z
|
mmm a / emcc <nl> ppp b / emcc <nl> try : <nl> else : <nl> return ' eliminate ' <nl> <nl> - js_optimizer_queue + = [ get_eliminate ( ) , ' simplifyExpressions ' ] <nl> + js_optimizer_queue + = [ get_eliminate ( ) ] <nl> + <nl> + if opt_level > = 2 : <nl> + js_optimizer_queue + = [ ' simplifyExpressions ' ] <nl> <nl> if closure and not shared . Settings . ASM_JS : <nl> flush_js_optimizer_queue ( ) <nl>
|
do not run simplifyExpressions in - O1
|
emscripten-core/emscripten
|
c1dd12998fd862010c7c11e6fa9d003521923a3c
|
2013-10-06T17:34:58Z
|
mmm a / cmake / templates / OpenCVConfig . cmake . in <nl> ppp b / cmake / templates / OpenCVConfig . cmake . in <nl> <nl> # If the module is found then OPENCV_ < MODULE > _FOUND is set to TRUE . <nl> # <nl> # This file will define the following variables : <nl> - # - OpenCV_FOUND : Set to TRUE is OpenCV was find_packaged before <nl> # - OpenCV_LIBS : The list of all imported targets for OpenCV modules . <nl> # - OpenCV_INCLUDE_DIRS : The OpenCV include directories . <nl> # - OpenCV_COMPUTE_CAPABILITIES : The version of compute capability <nl> <nl> # <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - if ( OpenCV_FOUND ) <nl> - return ( ) <nl> - endif ( ) <nl> - set ( OpenCV_FOUND TRUE ) <nl> - <nl> set ( modules_file_suffix " " ) <nl> if ( ANDROID ) <nl> string ( REPLACE - _ modules_file_suffix " _ $ { ANDROID_NDK_ABI_NAME } " ) <nl> endif ( ) <nl> <nl> - include ( $ { CMAKE_CURRENT_LIST_DIR } / OpenCVModules $ { modules_file_suffix } . cmake ) <nl> + if ( NOT TARGET opencv_core ) <nl> + include ( $ { CMAKE_CURRENT_LIST_DIR } / OpenCVModules $ { modules_file_suffix } . cmake ) <nl> + endif ( ) <nl> <nl> # TODO All things below should be reviewed . What is about of moving this code into related modules ( special vars / hooks / files ) <nl> <nl>
|
cleaner fix
|
opencv/opencv
|
8e944cfe70679f2c605899d354606d09bec8ccb0
|
2013-11-14T10:45:42Z
|
deleted file mode 100644 <nl> index 36599059c7 . . 0000000000 <nl> mmm a / tests / fast / ReportConflictingKeys . txt <nl> ppp / dev / null <nl> <nl> - testTitle = ReportConflictingKeysTest <nl> - testName = ReportConflictingKeys <nl> - testDuration = 20 . 0 <nl> - nodeCount = 10000 <nl> - keyPrefix = RCK <nl> - keyBytes = 64 <nl> - readConflictRangeCountPerTx = 10 <nl> - writeConflictRangeCountPerTx = 10 <nl> - connectionFailuresDisableDuration = 100000 <nl> - buggify = off <nl> \ No newline at end of file <nl>
|
Remove ReportConflictingKeys . txt workload
|
apple/foundationdb
|
308d82245ce70775741e50de74f7bf674315ef86
|
2020-03-19T17:34:14Z
|
mmm a / api / API_VERSIONING . md <nl> ppp b / api / API_VERSIONING . md <nl> Envoy will support at most three major versions of any API package at all times : <nl> * The previous stable major version , e . g . v2 . This is needed to ensure that we provide at least 1 <nl> year for a supported major version to sunset . By supporting two stable major versions <nl> simultaneously , this makes it easier to coordinate control plane and Envoy <nl> - rollouts as well . This previous stable major version will be supported for 1 <nl> - year after the introduction of the new current stable major version . <nl> + rollouts as well . This previous stable major version will be supported for exactly 1 <nl> + year after the introduction of the new current stable major version , after which it will be <nl> + removed from the Envoy implementation . <nl> * Optionally , the next experimental alpha major version , e . g . v4alpha . This is a release candidate <nl> for the next stable major version . This is only generated when the current stable major version <nl> requires a breaking change at the next cycle , e . g . a deprecation or field rename . This release <nl> major version and support for ` envoy . config . bootstrap . v2 ` will be dropped from t <nl> implementation . Note that some transitively referenced package , e . g . <nl> ` envoy . config . filter . network . foo . v2 ` may remain at version 2 during this release , if no changes were <nl> made to the referenced package . If no major version is justified at this point , the decision to cut <nl> - v4 might occur at some point in 2021 or beyond . <nl> + v4 might occur at some point in 2021 or beyond , however v2 support will still be removed at the end <nl> + of 2020 . <nl> <nl> The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will <nl> retain implementation support for at least 1 - 2 years . <nl> new file mode 100644 <nl> index 00000000000 . . f9a2f977864 <nl> mmm / dev / null <nl> ppp b / docs / root / faq / api / envoy_v2_support . rst <nl> <nl> + How long will the v2 APIs be supported ? <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + The v2 xDS APIs are deprecated and will be removed form Envoy at the end of 2020 , as per the <nl> + : repo : ` API versioning policy < / api / API_VERSIONING . md > ` . <nl> + <nl> mmm a / docs / root / faq / overview . rst <nl> ppp b / docs / root / faq / overview . rst <nl> API <nl> . . toctree : : <nl> : maxdepth : 2 <nl> <nl> + api / envoy_v2_support <nl> api / envoy_v3 <nl> api / envoy_upgrade_v3 <nl> api / extensions <nl>
|
api : clarify that v2 is removed at EOY 2020 regardless . ( )
|
envoyproxy/envoy
|
311aee4cbe2b8779070f2f0729dfa6ae4f2dc80e
|
2020-05-06T17:07:59Z
|
mmm a / benchmark / single - source / Ackermann . swift <nl> ppp b / benchmark / single - source / Ackermann . swift <nl> public func run_Ackermann ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( result = = ref_result [ n ] , <nl> - " IncorrectResults in Ackermann : \ ( result ) ! = \ ( ref_result [ n ] ) . " ) <nl> + CheckResults ( result = = ref_result [ n ] ) <nl> } <nl> mmm a / benchmark / single - source / ArrayLiteral . swift <nl> ppp b / benchmark / single - source / ArrayLiteral . swift <nl> public func run_ArrayValueProp ( _ N : Int ) { <nl> res + = addLiteralArray ( ) <nl> res - = addLiteralArray ( ) <nl> } <nl> - CheckResults ( res = = 123 , " Wrong result in ArrayValueProp 123 ! = \ ( res ) " ) <nl> + CheckResults ( res = = 123 ) <nl> } <nl> <nl> <nl> public func run_ArrayValueProp2 ( _ N : Int ) { <nl> res + = addLiteralArray2 ( ) <nl> res - = addLiteralArray2 ( ) <nl> } <nl> - CheckResults ( res = = 123 , " Wrong result in ArrayValueProp 123 ! = \ ( res ) " ) <nl> + CheckResults ( res = = 123 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_ArrayValueProp3 ( _ N : Int ) { <nl> res + = addLiteralArray3 ( ) <nl> res - = addLiteralArray3 ( ) <nl> } <nl> - CheckResults ( res = = 123 , " Wrong result in ArrayValueProp 123 ! = \ ( res ) " ) <nl> + CheckResults ( res = = 123 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_ArrayValueProp4 ( _ N : Int ) { <nl> res + = addLiteralArray4 ( ) <nl> res - = addLiteralArray4 ( ) <nl> } <nl> - CheckResults ( res = = 123 , " Wrong result in ArrayValueProp 123 ! = \ ( res ) " ) <nl> + CheckResults ( res = = 123 ) <nl> } <nl> mmm a / benchmark / single - source / ArraySubscript . swift <nl> ppp b / benchmark / single - source / ArraySubscript . swift <nl> public func run_ArraySubscript ( _ N : Int ) { <nl> arrays [ i ] [ bound ( i ) ] = <nl> max ( arrays [ i - 1 ] [ bound ( i - 1 ) ] , arrays [ i ] [ bound ( i ) ] ) <nl> } <nl> - CheckResults ( arrays [ 0 ] [ 0 ] < = arrays [ numArrays - 1 ] [ bound ( numArrays - 1 ) ] , <nl> - " Incorrect results in QuickSort . " ) <nl> + CheckResults ( arrays [ 0 ] [ 0 ] < = arrays [ numArrays - 1 ] [ bound ( numArrays - 1 ) ] ) <nl> } <nl> mmm a / benchmark / single - source / BitCount . swift <nl> ppp b / benchmark / single - source / BitCount . swift <nl> func countBitSet ( _ num : Int ) - > Int { <nl> public func run_BitCount ( _ N : Int ) { <nl> for _ in 1 . . . 100 * N { <nl> / / Check some results . <nl> - CheckResults ( countBitSet ( 1 ) = = 1 , " Incorrect results in BitCount . " ) <nl> - CheckResults ( countBitSet ( 2 ) = = 1 , " Incorrect results in BitCount . " ) <nl> - CheckResults ( countBitSet ( 2457 ) = = 6 , " Incorrect results in BitCount . " ) <nl> + CheckResults ( countBitSet ( 1 ) = = 1 ) <nl> + CheckResults ( countBitSet ( 2 ) = = 1 ) <nl> + CheckResults ( countBitSet ( 2457 ) = = 6 ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / ByteSwap . swift <nl> ppp b / benchmark / single - source / ByteSwap . swift <nl> func byteswap_logn ( _ a : UInt64 ) - > UInt64 { <nl> public func run_ByteSwap ( _ N : Int ) { <nl> for _ in 1 . . . 100 * N { <nl> / / Check some results . <nl> - CheckResults ( byteswap_logn ( byteswap_n ( 2457 ) ) = = 2457 , " Incorrect results in ByteSwap . " ) <nl> - CheckResults ( byteswap_logn ( byteswap_n ( 9129 ) ) = = 9129 , " Incorrect results in ByteSwap . " ) <nl> - CheckResults ( byteswap_logn ( byteswap_n ( 3333 ) ) = = 3333 , " Incorrect results in ByteSwap . " ) <nl> + CheckResults ( byteswap_logn ( byteswap_n ( 2457 ) ) = = 2457 ) <nl> + CheckResults ( byteswap_logn ( byteswap_n ( 9129 ) ) = = 9129 ) <nl> + CheckResults ( byteswap_logn ( byteswap_n ( 3333 ) ) = = 3333 ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / CString . swift <nl> ppp b / benchmark / single - source / CString . swift <nl> public func run_CStringLongAscii ( _ N : Int ) { <nl> / / static string to c - > from c to String - > implicit conversion <nl> res & = strlen ( ascii . withCString ( String . init ( cString : ) ) ) <nl> } <nl> - CheckResults ( res = = 0 , " IncorrectResults in run_CStringLongAscii : \ ( res ) ! = \ ( refResult ) " ) <nl> + CheckResults ( res = = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_CStringLongNonAscii ( _ N : Int ) { <nl> for _ in 1 . . . N * 500 { <nl> res & = strlen ( japanese . withCString ( String . init ( cString : ) ) ) <nl> } <nl> - CheckResults ( res = = 0 , " IncorrectResults in run_CStringLongAscii : \ ( res ) ! = \ ( refResult ) " ) <nl> + CheckResults ( res = = 0 ) <nl> } <nl> <nl> <nl> public func run_CStringShortAscii ( _ N : Int ) { <nl> } <nl> res = res & DoOneIter ( strings ) <nl> } <nl> - assert ( res = = reference , " IncorrectResults in StrToInt : \ ( res ) ! = \ ( reference ) " ) <nl> + assert ( res = = reference ) <nl> } <nl> <nl> mmm a / benchmark / single - source / Calculator . swift <nl> ppp b / benchmark / single - source / Calculator . swift <nl> public func run_Calculator ( _ N : Int ) { <nl> for _ in 1 . . . N * 5000 { <nl> c + = my_atoi_impl ( " 10 " ) <nl> } <nl> - CheckResults ( c = = 0 , " IncorrectResults in run_Calculator " ) <nl> + CheckResults ( c = = 0 ) <nl> } <nl> <nl> mmm a / benchmark / single - source / DeadArray . swift <nl> ppp b / benchmark / single - source / DeadArray . swift <nl> public func run_DeadArray ( _ N : Int ) { <nl> Count = 0 <nl> runLoop ( 0 , var2 : 0 ) <nl> } <nl> - CheckResults ( Count = = 100_000 , " Incorrect number of calls in loop " ) <nl> + CheckResults ( Count = = 100_000 ) <nl> } <nl> mmm a / benchmark / single - source / DictTest . swift <nl> ppp b / benchmark / single - source / DictTest . swift <nl> public func run_Dictionary ( scale : Int ) { <nl> Dict [ word ] = true <nl> } <nl> } <nl> - CheckResults ( Dict . count = = 270 , <nl> - " IncorrectResults in DictTest : \ ( Dict . count ) ! = 270 . " ) <nl> + CheckResults ( Dict . count = = 270 ) <nl> <nl> / / Check performance of searching in the dictionary : <nl> / / Fill the dictionary with words from the first half of the text <nl> public func run_Dictionary ( scale : Int ) { <nl> } <nl> } <nl> } <nl> - CheckResults ( count = = N * 541 , <nl> - " IncorrectResults in DictTest : \ ( count ) ! = \ ( N * 541 ) . " ) <nl> + CheckResults ( count = = N * 541 ) <nl> } <nl> <nl> class Box < T : Hashable > : Hashable { <nl> public func run_DictionaryOfObjects ( scale : Int ) { <nl> Dict [ Box ( word ) ] = Box ( true ) <nl> } <nl> } <nl> - CheckResults ( Dict . count = = 270 , <nl> - " IncorrectResults in DictTest : \ ( Dict . count ) ! = 270 . " ) <nl> + CheckResults ( Dict . count = = 270 ) <nl> <nl> / / Check performance of searching in the dictionary : <nl> / / Fill the dictionary with words from the first half of the text <nl> public func run_DictionaryOfObjects ( scale : Int ) { <nl> } <nl> } <nl> } <nl> - CheckResults ( count = = N * 541 , <nl> - " IncorrectResults in DictTestAllObjects : \ ( count ) ! = \ ( N * 541 ) . " ) <nl> + CheckResults ( count = = N * 541 ) <nl> } <nl> mmm a / benchmark / single - source / DictTest2 . swift <nl> ppp b / benchmark / single - source / DictTest2 . swift <nl> public func run_Dictionary2 ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( res = = ref_result , " Incorrect results in Dictionary2 : \ ( res ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( res = = ref_result ) <nl> } <nl> <nl> class Box < T : Hashable > : Hashable { <nl> public func run_Dictionary2OfObjects ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( res = = ref_result , " Incorrect results in Dictionary2AllObjects : \ ( res ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( res = = ref_result ) <nl> } <nl> mmm a / benchmark / single - source / DictTest3 . swift <nl> ppp b / benchmark / single - source / DictTest3 . swift <nl> public func run_Dictionary3 ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( res = = ref_result , " Incorrect results in Dictionary3 : \ ( res ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( res = = ref_result ) <nl> } <nl> <nl> class Box < T : Hashable > : Hashable { <nl> public func run_Dictionary3OfObjects ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( res = = ref_result , " Incorrect results in Dictionary3OfObject : \ ( res ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( res = = ref_result ) <nl> } <nl> mmm a / benchmark / single - source / DictionaryBridge . swift <nl> ppp b / benchmark / single - source / DictionaryBridge . swift <nl> class Thing : NSObject { <nl> <nl> required override init ( ) { <nl> let c = type ( of : self ) . col ( ) <nl> - CheckResults ( c ! . count = = 10 , " The rules of the universe apply " ) <nl> + CheckResults ( c ! . count = = 10 ) <nl> } <nl> <nl> private class func col ( ) - > [ String : AnyObject ] ? { <nl> mmm a / benchmark / single - source / DictionaryRemove . swift <nl> ppp b / benchmark / single - source / DictionaryRemove . swift <nl> public func run_DictionaryRemove ( _ N : Int ) { <nl> for i in 1 . . . size { <nl> dict [ i ] = i <nl> } <nl> - CheckResults ( dict . count = = size , <nl> - " Incorrect dict count : \ ( dict . count ) ! = \ ( size ) . " ) <nl> + CheckResults ( dict . count = = size ) <nl> <nl> var tmpDict = dict <nl> for _ in 1 . . . 1000 * N { <nl> public func run_DictionaryRemove ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( tmpDict . isEmpty , <nl> - " tmpDict should be empty : \ ( tmpDict . count ) ! = 0 . " ) <nl> + CheckResults ( tmpDict . isEmpty ) <nl> } <nl> <nl> class Box < T : Hashable > : Hashable { <nl> public func run_DictionaryRemoveOfObjects ( _ N : Int ) { <nl> for i in 1 . . . size { <nl> dict [ Box ( i ) ] = Box ( i ) <nl> } <nl> - CheckResults ( dict . count = = size , <nl> - " Incorrect dict count : \ ( dict . count ) ! = \ ( size ) . " ) <nl> + CheckResults ( dict . count = = size ) <nl> <nl> var tmpDict = dict <nl> for _ in 1 . . . 1000 * N { <nl> public func run_DictionaryRemoveOfObjects ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( tmpDict . isEmpty , <nl> - " tmpDict should be empty : \ ( tmpDict . count ) ! = 0 . " ) <nl> + CheckResults ( tmpDict . isEmpty ) <nl> } <nl> mmm a / benchmark / single - source / DictionarySwap . swift <nl> ppp b / benchmark / single - source / DictionarySwap . swift <nl> public func run_DictionarySwap ( _ N : Int ) { <nl> for i in 1 . . . size { <nl> dict [ i ] = i <nl> } <nl> - CheckResults ( dict . count = = size , <nl> - " Incorrect dict count : \ ( dict . count ) ! = \ ( size ) . " ) <nl> + CheckResults ( dict . count = = size ) <nl> <nl> var swapped = false <nl> for _ in 1 . . . 10000 * N { <nl> public func run_DictionarySwap ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( swappedCorrectly ( swapped , dict [ 25 ] ! , dict [ 75 ] ! ) , <nl> - " Dictionary value swap failed " ) <nl> + CheckResults ( swappedCorrectly ( swapped , dict [ 25 ] ! , dict [ 75 ] ! ) ) <nl> } <nl> <nl> / / Return true if correctly swapped , false otherwise <nl> public func run_DictionarySwapOfObjects ( _ N : Int ) { <nl> for i in 1 . . . size { <nl> dict [ Box ( i ) ] = Box ( i ) <nl> } <nl> - CheckResults ( dict . count = = size , <nl> - " Incorrect dict count : \ ( dict . count ) ! = \ ( size ) . " ) <nl> + CheckResults ( dict . count = = size ) <nl> <nl> var swapped = false <nl> for _ in 1 . . . 10000 * N { <nl> public func run_DictionarySwapOfObjects ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( swappedCorrectly ( swapped , dict [ Box ( 25 ) ] ! . value , dict [ Box ( 75 ) ] ! . value ) , <nl> - " Dictionary value swap failed " ) <nl> + CheckResults ( swappedCorrectly ( swapped , dict [ Box ( 25 ) ] ! . value , dict [ Box ( 75 ) ] ! . value ) ) <nl> } <nl> mmm a / benchmark / single - source / DropFirst . swift <nl> ppp b / benchmark / single - source / DropFirst . swift <nl> public func run_DropFirstCountableRange ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstCountableRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstSequence ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstSequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnySequence ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnySequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnySeqCntRange ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnySeqCntRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnySeqCRangeIter ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnySeqCRangeIter : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnyCollection ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnyCollection : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstArray ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstArray : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstCountableRangeLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstCountableRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstSequenceLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstSequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnySequenceLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnySequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnySeqCntRangeLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnySeqCntRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnySeqCRangeIterLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnySeqCRangeIterLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstAnyCollectionLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstAnyCollectionLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropFirstArrayLazy ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirstArrayLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / DropFirst . swift . gyb <nl> ppp b / benchmark / single - source / DropFirst . swift . gyb <nl> public func run_DropFirst $ { Name } ( _ N : Int ) { <nl> for element in s . dropFirst ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropFirst $ { Name } : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> % end <nl> mmm a / benchmark / single - source / DropLast . swift <nl> ppp b / benchmark / single - source / DropLast . swift <nl> public func run_DropLastCountableRange ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastCountableRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastSequence ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastSequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnySequence ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnySequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnySeqCntRange ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnySeqCntRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnySeqCRangeIter ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnySeqCRangeIter : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnyCollection ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnyCollection : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastArray ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastArray : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastCountableRangeLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastCountableRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastSequenceLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastSequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnySequenceLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnySequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnySeqCntRangeLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnySeqCntRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnySeqCRangeIterLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnySeqCRangeIterLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastAnyCollectionLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastAnyCollectionLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropLastArrayLazy ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLastArrayLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / DropLast . swift . gyb <nl> ppp b / benchmark / single - source / DropLast . swift . gyb <nl> public func run_DropLast $ { Name } ( _ N : Int ) { <nl> for element in s . dropLast ( dropCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropLast $ { Name } : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> % end <nl> mmm a / benchmark / single - source / DropWhile . swift <nl> ppp b / benchmark / single - source / DropWhile . swift <nl> public func run_DropWhileCountableRange ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileCountableRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileSequence ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileSequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnySequence ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnySequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnySeqCntRange ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnySeqCntRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnySeqCRangeIter ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnySeqCRangeIter : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnyCollection ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnyCollection : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileArray ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileArray : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileCountableRangeLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileCountableRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileSequenceLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileSequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnySequenceLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnySequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnySeqCntRangeLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnySeqCntRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnySeqCRangeIterLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnySeqCRangeIterLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileAnyCollectionLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileAnyCollectionLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_DropWhileArrayLazy ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhileArrayLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / DropWhile . swift . gyb <nl> ppp b / benchmark / single - source / DropWhile . swift . gyb <nl> public func run_DropWhile $ { Name } ( _ N : Int ) { <nl> for element in s . drop ( while : { $ 0 < dropCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in DropWhile $ { Name } : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> % end <nl> mmm a / benchmark / single - source / Fibonacci . swift <nl> ppp b / benchmark / single - source / Fibonacci . swift <nl> public func run_Fibonacci ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( result = = ref_result , <nl> - " Incorrect results in Fibonacci : \ ( result ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( result = = ref_result ) <nl> } <nl> mmm a / benchmark / single - source / Hash . swift <nl> ppp b / benchmark / single - source / Hash . swift <nl> public func run_HashTest ( _ N : Int ) { <nl> let MD = MD5 ( ) <nl> for ( K , V ) in TestMD5 { <nl> MD . update ( K ) <nl> - CheckResults ( MD . digest ( ) = = V , <nl> - " Incorrect result in Hash : check 1 failed . " ) <nl> + CheckResults ( MD . digest ( ) = = V ) <nl> MD . reset ( ) <nl> } <nl> <nl> public func run_HashTest ( _ N : Int ) { <nl> } <nl> let MD2 = MD5 ( ) <nl> MD2 . update ( L ) <nl> - CheckResults ( MD . digest ( ) = = MD2 . digest ( ) , <nl> - " Incorrect result in Hash : check 2 failed . " ) <nl> + CheckResults ( MD . digest ( ) = = MD2 . digest ( ) ) <nl> <nl> / / Test the famous MD5 collision from 2009 : http : / / www . mscs . dal . ca / ~ selinger / md5collision / <nl> let Src1 : [ UInt8 ] = <nl> public func run_HashTest ( _ N : Int ) { <nl> H2 . update ( Src2 ) <nl> let A1 = H1 . digest ( ) <nl> let A2 = H2 . digest ( ) <nl> - CheckResults ( A1 = = A2 , <nl> - " Incorrect result in Hash : check 3 failed . " ) <nl> - CheckResults ( A1 = = " 79054025255fb1a26e4bc422aef54eb4 " , <nl> - " Incorrect result in Hash : check 4 failed . " ) <nl> + CheckResults ( A1 = = A2 ) <nl> + CheckResults ( A1 = = " 79054025255fb1a26e4bc422aef54eb4 " ) <nl> H1 . reset ( ) <nl> H2 . reset ( ) <nl> <nl> public func run_HashTest ( _ N : Int ) { <nl> let SH256 = SHA256 ( ) <nl> for ( K , V ) in TestSHA1 { <nl> SH . update ( K ) <nl> - CheckResults ( SH . digest ( ) = = V , <nl> - " Incorrect result in Hash : check 5 failed . " ) <nl> + CheckResults ( SH . digest ( ) = = V ) <nl> SH . reset ( ) <nl> } <nl> <nl> for ( K , V ) in TestSHA256 { <nl> SH256 . update ( K ) <nl> - CheckResults ( SH256 . digest ( ) = = V , <nl> - " Incorrect result in Hash : check 5 failed . " ) <nl> + CheckResults ( SH256 . digest ( ) = = V ) <nl> SH256 . reset ( ) <nl> } <nl> <nl> public func run_HashTest ( _ N : Int ) { <nl> } <nl> let SH2 = SHA1 ( ) <nl> SH2 . update ( L ) <nl> - CheckResults ( SH . digest ( ) = = SH2 . digest ( ) , <nl> - " Incorrect result in Hash : check 5 failed . " ) <nl> + CheckResults ( SH . digest ( ) = = SH2 . digest ( ) ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / HashQuadratic . swift <nl> ppp b / benchmark / single - source / HashQuadratic . swift <nl> public func run_HashQuadratic ( _ N : Int ) { <nl> dict2 [ k ] = v <nl> } <nl> <nl> - CheckResults ( dict1 [ size / 2 ] = = dict2 [ size / 2 ] , <nl> - " Incorrect results in HashQuadratic " ) <nl> + CheckResults ( dict1 [ size / 2 ] = = dict2 [ size / 2 ] ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / Integrate . swift <nl> ppp b / benchmark / single - source / Integrate . swift <nl> public func run_Integrate ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( abs ( result - ref_result ) < bound , <nl> - " Incorrect results in Integrate : abs ( \ ( result ) - \ ( ref_result ) ) > \ ( bound ) " ) <nl> + CheckResults ( abs ( result - ref_result ) < bound ) <nl> } <nl> mmm a / benchmark / single - source / LazyFilter . swift <nl> ppp b / benchmark / single - source / LazyFilter . swift <nl> public func run_LazilyFilteredRange ( _ N : Int ) { <nl> res + = Array ( c ) . count <nl> res - = Array ( c ) . count <nl> } <nl> - CheckResults ( res = = 123 , " Wrong result in LazilyFilteredRange 123 ! = \ ( res ) " ) <nl> + CheckResults ( res = = 123 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_LazilyFilteredArrays ( _ N : Int ) { <nl> res + = Array ( c ) . count <nl> res - = Array ( c ) . count <nl> } <nl> - CheckResults ( res = = 123 , " Wrong result in LazilyFilteredArray 123 ! = \ ( res ) " ) <nl> + CheckResults ( res = = 123 ) <nl> } <nl> <nl> mmm a / benchmark / single - source / LinkedList . swift <nl> ppp b / benchmark / single - source / LinkedList . swift <nl> public func run_LinkedList ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( sum = = ref_result , <nl> - " Incorrect results in LinkedList : \ ( sum ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( sum = = ref_result ) <nl> } <nl> mmm a / benchmark / single - source / MapReduce . swift <nl> ppp b / benchmark / single - source / MapReduce . swift <nl> public func run_MapReduce ( _ N : Int ) { <nl> numbers = numbers . map { $ 0 & + 5 } <nl> c + = numbers . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceAnyCollection ( _ N : Int ) { <nl> let mapped = numbers . map { $ 0 & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceAnyCollectionShort ( _ N : Int ) { <nl> let mapped = numbers . map { $ 0 & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceShort ( _ N : Int ) { <nl> numbers = numbers . map { $ 0 & + 5 } <nl> c + = numbers . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceSequence ( _ N : Int ) { <nl> let mapped = numbers . map { $ 0 & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceLazySequence ( _ N : Int ) { <nl> let mapped = numbers . lazy . map { $ 0 & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceLazyCollection ( _ N : Int ) { <nl> let mapped = numbers . lazy . map { $ 0 & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceLazyCollectionShort ( _ N : Int ) { <nl> let mapped = numbers . lazy . map { $ 0 & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceString ( _ N : Int ) { <nl> for _ in 1 . . . N * 100 { <nl> c + = s . utf8 . map { UInt64 ( $ 0 & + 5 ) } . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceShortString ( _ N : Int ) { <nl> for _ in 1 . . . N * 100 { <nl> c + = s . utf8 . map { UInt64 ( $ 0 & + 5 ) } . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_MapReduceClass ( _ N : Int ) { <nl> let mapped = numbers . map { $ 0 . intValue & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> # endif <nl> } <nl> <nl> public func run_MapReduceClassShort ( _ N : Int ) { <nl> let mapped = numbers . map { $ 0 . intValue & + 5 } <nl> c + = mapped . reduce ( 0 , & + ) <nl> } <nl> - CheckResults ( c ! = 0 , " IncorrectResults in MapReduce " ) <nl> + CheckResults ( c ! = 0 ) <nl> # endif <nl> } <nl> <nl> mmm a / benchmark / single - source / Memset . swift <nl> ppp b / benchmark / single - source / Memset . swift <nl> public func run_Memset ( _ N : Int ) { <nl> memset ( & a , 1 ) <nl> memset ( & a , 0 ) <nl> } <nl> - CheckResults ( a [ 87 ] = = 0 , " Incorrect result in Memset . " ) <nl> + CheckResults ( a [ 87 ] = = 0 ) <nl> } <nl> mmm a / benchmark / single - source / MonteCarloE . swift <nl> ppp b / benchmark / single - source / MonteCarloE . swift <nl> public func run_MonteCarloE ( scale : Int ) { <nl> let numEmptyIntervals = intervals . filter { ! $ 0 } . count <nl> / / If there are no empty intervals , then obviously the random generator is <nl> / / not ' random ' enough . <nl> - CheckResults ( numEmptyIntervals ! = N , <nl> - " Incorrect results in MonteCarloE : no empty intervals . " ) <nl> + CheckResults ( numEmptyIntervals ! = N ) <nl> let e_estimate = Double ( N ) / Double ( numEmptyIntervals ) <nl> let e = 2 . 71828 <nl> - CheckResults ( abs ( e_estimate - e ) < 0 . 1 , <nl> - " Incorrect results in MonteCarloE : e_estimate = = \ ( e_estimate ) " ) <nl> + CheckResults ( abs ( e_estimate - e ) < 0 . 1 ) <nl> } <nl> mmm a / benchmark / single - source / MonteCarloPi . swift <nl> ppp b / benchmark / single - source / MonteCarloPi . swift <nl> public func run_MonteCarloPi ( scale : Int ) { <nl> } <nl> let pi_estimate : Double = Double ( pointsInside ) * 4 . 0 / Double ( N ) <nl> let pi = 3 . 1415 <nl> - CheckResults ( abs ( pi_estimate - pi ) < 0 . 1 , <nl> - " Incorrect results in MonteCarloPi : pi_estimate = = \ ( pi_estimate ) " ) <nl> + CheckResults ( abs ( pi_estimate - pi ) < 0 . 1 ) <nl> } <nl> mmm a / benchmark / single - source / NSDictionaryCastToSwift . swift <nl> ppp b / benchmark / single - source / NSDictionaryCastToSwift . swift <nl> public func run_NSDictionaryCastToSwift ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( swiftDict . isEmpty , <nl> - " Incorrect result in swiftDict . isEmpty : " + <nl> - " \ ( swiftDict . isEmpty ) ! = true \ n " ) <nl> + CheckResults ( swiftDict . isEmpty ) <nl> # endif <nl> } <nl> mmm a / benchmark / single - source / NopDeinit . swift <nl> ppp b / benchmark / single - source / NopDeinit . swift <nl> public func run_NopDeinit ( _ N : Int ) { <nl> let size = 500 <nl> for i in 1 . . . size { arr . append ( X ( i ) ) } <nl> arr . removeAll ( ) <nl> - CheckResults ( arr . count = = 0 , <nl> - " Incorrect results in NopDeinit : \ ( arr . count ) ! = 0 . " ) <nl> + CheckResults ( arr . count = = 0 ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / ObjectAllocation . swift <nl> ppp b / benchmark / single - source / ObjectAllocation . swift <nl> public func run_ObjectAllocation ( _ N : Int ) { <nl> ArrayResult = testArray ( ) <nl> } <nl> <nl> - CheckResults ( SingleObjectResult = = 499500 , <nl> - " Incorrect results in testSingleObject " ) <nl> - CheckResults ( TreeResult = = 90000 , <nl> - " Incorrect results in testTree " ) <nl> - CheckResults ( ListResult = = 48375 , <nl> - " Incorrect results in testList " ) <nl> - CheckResults ( ArrayResult = = 3000 , <nl> - " Incorrect results in testArray " ) <nl> + CheckResults ( SingleObjectResult = = 499500 ) <nl> + CheckResults ( TreeResult = = 90000 ) <nl> + CheckResults ( ListResult = = 48375 ) <nl> + CheckResults ( ArrayResult = = 3000 ) <nl> } <nl> <nl> mmm a / benchmark / single - source / ObjectiveCBridging . swift <nl> ppp b / benchmark / single - source / ObjectiveCBridging . swift <nl> func testObjectiveCBridgeFromNSString ( ) { <nl> s = n ! <nl> } <nl> } <nl> - CheckResults ( s ! = nil & & s = = " NSString that does not fit in tagged pointer " , " Expected results did not match " ) <nl> + CheckResults ( s ! = nil & & s = = " NSString that does not fit in tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSStringForced ( ) { <nl> / / Call _forceBridgeFromObjectiveC <nl> s = forcedCast ( nsString ) <nl> } <nl> - CheckResults ( s ! = nil & & s = = " NSString that does not fit in tagged pointer " , " Expected results did not match " ) <nl> + CheckResults ( s ! = nil & & s = = " NSString that does not fit in tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeToNSString ( ) { <nl> / / Call _BridgedToObjectiveC <nl> s = nativeString as NSString <nl> } <nl> - CheckResults ( s ! = nil & & s = = " Native " , " Expected results did not match " ) <nl> + CheckResults ( s ! = nil & & s = = " Native " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSArrayAnyObject ( ) { <nl> nativeString = forcedCast ( nativeArray [ 0 ] ) <nl> } <nl> } <nl> - CheckResults ( nativeString ! = nil & & nativeString ! = = " NSString that does not fit in tagged pointer " , " Expected results did not match " ) <nl> + CheckResults ( nativeString ! = nil & & nativeString ! = = " NSString that does not fit in tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSArrayAnyObjectForced ( ) { <nl> let nativeArray : [ NSString ] = forcedCast ( nsArray ) <nl> nativeString = forcedCast ( nativeArray [ 0 ] ) <nl> } <nl> - CheckResults ( nativeString ! = nil & & nativeString ! = = " NSString that does not fit in tagged pointer " , " Expected results did not match " ) <nl> + CheckResults ( nativeString ! = nil & & nativeString ! = = " NSString that does not fit in tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeToNSArray ( ) { <nl> let nsArray = nativeArray as NSArray <nl> nsString = nsArray . object ( at : 0 ) <nl> } <nl> - CheckResults ( nsString ! = nil & & ( nsString ! as ! NSString ) . isEqual ( " abcde " ) , " Expected results did not match " ) <nl> + CheckResults ( nsString ! = nil & & ( nsString ! as ! NSString ) . isEqual ( " abcde " ) ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSArrayAnyObjectToString ( ) { <nl> nativeString = nativeArray [ 0 ] <nl> } <nl> } <nl> - CheckResults ( nativeString ! = nil & & nativeString = = " NSString that does not fit in tagged pointer " , " Expected results did not match " ) <nl> + CheckResults ( nativeString ! = nil & & nativeString = = " NSString that does not fit in tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSArrayAnyObjectToStringForced ( ) { <nl> let nativeArray : [ String ] = forcedCast ( nsArray ) <nl> nativeString = nativeArray [ 0 ] <nl> } <nl> - CheckResults ( nativeString ! = nil & & nativeString = = " NSString that does not fit in tagged pointer " , " Expected results did not match " ) <nl> + CheckResults ( nativeString ! = nil & & nativeString = = " NSString that does not fit in tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSDictionaryAnyObject ( ) { <nl> nativeInt = forcedCast ( nativeDictionary [ nsString ] ) <nl> } <nl> } <nl> - CheckResults ( nativeInt ! = nil & & nativeInt = = 1 , " Expected results did not match " ) <nl> + CheckResults ( nativeInt ! = nil & & nativeInt = = 1 ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSDictionaryAnyObjectForced ( ) { <nl> nativeInt = forcedCast ( nativeDictionary [ nsString ] ) <nl> } <nl> } <nl> - CheckResults ( nativeInt ! = nil & & nativeInt = = 1 , " Expected results did not match " ) <nl> + CheckResults ( nativeInt ! = nil & & nativeInt = = 1 ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeToNSDictionary ( ) { <nl> let nsDict = nativeDictionary as NSDictionary <nl> nsNumber = nsDict . object ( forKey : key ) <nl> } <nl> - CheckResults ( nsNumber ! = nil & & ( nsNumber as ! Int ) = = 1 , " Expected results did not match " ) <nl> + CheckResults ( nsNumber ! = nil & & ( nsNumber as ! Int ) = = 1 ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSDictionaryAnyObjectToString ( ) { <nl> nativeInt = nativeDictionary [ nativeString ] <nl> } <nl> } <nl> - CheckResults ( nativeInt ! = nil & & nativeInt = = 1 , " Expected results did not match " ) <nl> + CheckResults ( nativeInt ! = nil & & nativeInt = = 1 ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSDictionaryAnyObjectToStringForced ( ) { <nl> nativeInt = nativeDictionary [ nativeString ] <nl> } <nl> } <nl> - CheckResults ( nativeInt ! = nil & & nativeInt = = 1 , " Expected results did not match " ) <nl> + CheckResults ( nativeInt ! = nil & & nativeInt = = 1 ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSSetAnyObject ( ) { <nl> result = nativeSet . contains ( nsString ) <nl> } <nl> } <nl> - CheckResults ( result ! = nil & & result ! , " Expected results did not match " ) <nl> + CheckResults ( result ! = nil & & result ! ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSSetAnyObjectForced ( ) { <nl> result = nativeSet . contains ( nsString ) <nl> } <nl> } <nl> - CheckResults ( result ! = nil & & result ! , " Expected results did not match " ) <nl> + CheckResults ( result ! = nil & & result ! ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeToNSSet ( ) { <nl> let nsDict = nativeSet as NSSet <nl> nsString = nsDict . member ( key ) <nl> } <nl> - CheckResults ( nsString ! = nil & & ( nsString as ! String ) = = " abcde1 " , " Expected results did not match " ) <nl> + CheckResults ( nsString ! = nil & & ( nsString as ! String ) = = " abcde1 " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSSetAnyObjectToString ( ) { <nl> result = nativeSet . contains ( nativeString ) <nl> } <nl> } <nl> - CheckResults ( result ! = nil & & result ! , " Expected results did not match " ) <nl> + CheckResults ( result ! = nil & & result ! ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeFromNSSetAnyObjectToStringForced ( ) { <nl> result = nativeSet . contains ( nativeString ) <nl> } <nl> } <nl> - CheckResults ( result ! = nil & & result ! , " Expected results did not match " ) <nl> + CheckResults ( result ! = nil & & result ! ) <nl> } <nl> # endif <nl> <nl> mmm a / benchmark / single - source / ObjectiveCBridgingStubs . swift <nl> ppp b / benchmark / single - source / ObjectiveCBridgingStubs . swift <nl> func testObjectiveCBridgeStubFromNSString ( ) { <nl> for _ in 0 . . < 10_000 { <nl> str = b . testToString ( ) <nl> } <nl> - CheckResults ( str ! = " " & & str = = " Default string value no tagged pointer " , " Wrong value returned " ) <nl> + CheckResults ( str ! = " " & & str = = " Default string value no tagged pointer " ) <nl> } <nl> # endif <nl> <nl> func testObjectiveCBridgeStubFromArrayOfNSString ( ) { <nl> arr = b . testToArrayOfStrings ( ) <nl> str = arr [ 0 ] <nl> } <nl> - CheckResults ( str ! = " " & & str = = " Default string value no tagged pointer " , " Wrong value returned " ) <nl> + CheckResults ( str ! = " " & & str = = " Default string value no tagged pointer " ) <nl> } <nl> # endif <nl> <nl> mmm a / benchmark / single - source / ObjectiveCNoBridgingStubs . swift <nl> ppp b / benchmark / single - source / ObjectiveCNoBridgingStubs . swift <nl> func testObjectiveCBridgeStubFromNSStringRef ( ) { <nl> for _ in 0 . . < 10_000 { <nl> nsString = b . testToString ( ) <nl> } <nl> - CheckResults ( nsString . isEqual ( to : " Default string value no tagged pointer " as NSString ) , " Wrong value returned " ) <nl> + CheckResults ( nsString . isEqual ( to : " Default string value no tagged pointer " as NSString ) ) <nl> } <nl> # endif <nl> <nl> mmm a / benchmark / single - source / OpenClose . swift <nl> ppp b / benchmark / single - source / OpenClose . swift <nl> public func run_OpenClose ( _ N : Int ) { <nl> for _ in 1 . . . N * 10000 { <nl> c + = check_state ( MyState . Closed ) <nl> } <nl> - CheckResults ( c = = 0 , " IncorrectResults in run_OpenClose " ) <nl> + CheckResults ( c = = 0 ) <nl> } <nl> <nl> mmm a / benchmark / single - source / PopFront . swift <nl> ppp b / benchmark / single - source / PopFront . swift <nl> public func run_PopFrontArray ( _ N : Int ) { <nl> result + = a [ 0 ] <nl> a . remove ( at : 0 ) <nl> } <nl> - CheckResults ( result = = arrayCount , " IncorrectResults in StringInterpolation : \ ( result ) ! = \ ( arrayCount ) " ) <nl> + CheckResults ( result = = arrayCount ) <nl> } <nl> } <nl> } <nl> public func run_PopFrontUnsafePointer ( _ N : Int ) { <nl> a . assign ( from : a + 1 , count : count - 1 ) <nl> count - = 1 <nl> } <nl> - CheckResults ( result = = arrayCount , " IncorrectResults in StringInterpolation : \ ( result ) ! = \ ( arrayCount ) " ) <nl> + CheckResults ( result = = arrayCount ) <nl> } <nl> } <nl> a . deallocate ( capacity : arrayCount ) <nl> mmm a / benchmark / single - source / PopFrontGeneric . swift <nl> ppp b / benchmark / single - source / PopFrontGeneric . swift <nl> public func run_PopFrontArrayGeneric ( _ N : Int ) { <nl> result + = a [ 0 ] <nl> myArrayReplace ( & a , 0 . . < 1 , EmptyCollection ( ) ) <nl> } <nl> - CheckResults ( result = = arrayCount , " IncorrectResults in StringInterpolation : \ ( result ) ! = \ ( arrayCount ) " ) <nl> + CheckResults ( result = = arrayCount ) <nl> } <nl> } <nl> } <nl> mmm a / benchmark / single - source / Prefix . swift <nl> ppp b / benchmark / single - source / Prefix . swift <nl> public func run_PrefixCountableRange ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixCountableRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixSequence ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixSequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnySequence ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnySequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnySeqCntRange ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnySeqCntRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnySeqCRangeIter ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnySeqCRangeIter : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnyCollection ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnyCollection : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixArray ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixArray : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixCountableRangeLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixCountableRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixSequenceLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixSequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnySequenceLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnySequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnySeqCntRangeLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnySeqCntRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnySeqCRangeIterLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnySeqCRangeIterLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixAnyCollectionLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixAnyCollectionLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixArrayLazy ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixArrayLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / Prefix . swift . gyb <nl> ppp b / benchmark / single - source / Prefix . swift . gyb <nl> public func run_Prefix $ { Name } ( _ N : Int ) { <nl> for element in s . prefix ( prefixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in Prefix $ { Name } : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> % end <nl> mmm a / benchmark / single - source / PrefixWhile . swift <nl> ppp b / benchmark / single - source / PrefixWhile . swift <nl> public func run_PrefixWhileCountableRange ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileCountableRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileSequence ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileSequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnySequence ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnySequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnySeqCntRange ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnySeqCntRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnySeqCRangeIter ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnySeqCRangeIter : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnyCollection ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnyCollection : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileArray ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileArray : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileCountableRangeLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileCountableRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileSequenceLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileSequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnySequenceLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnySequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnySeqCntRangeLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnySeqCntRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnySeqCRangeIterLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnySeqCRangeIterLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileAnyCollectionLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileAnyCollectionLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_PrefixWhileArrayLazy ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhileArrayLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / PrefixWhile . swift . gyb <nl> ppp b / benchmark / single - source / PrefixWhile . swift . gyb <nl> public func run_PrefixWhile $ { Name } ( _ N : Int ) { <nl> for element in s . prefix ( while : { $ 0 < prefixCount } ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in PrefixWhile $ { Name } : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> % end <nl> mmm a / benchmark / single - source / Prims . swift <nl> ppp b / benchmark / single - source / Prims . swift <nl> public func run_Prims ( _ N : Int ) { <nl> for i in 1 . . < treeEdges . count { <nl> if let n = treeEdges [ i ] { cost + = map [ Edge ( start : n , end : i ) ] ! } <nl> } <nl> - CheckResults ( Int ( cost ) = = 49324 , <nl> - " Incorrect results in Prims : \ ( Int ( cost ) ) ! = 49324 . " ) <nl> + CheckResults ( Int ( cost ) = = 49324 ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / ProtocolDispatch2 . swift <nl> ppp b / benchmark / single - source / ProtocolDispatch2 . swift <nl> public func run_ProtocolDispatch2 ( _ N : Int ) { <nl> c + = wrapper ( i , g1 , g2 ) <nl> } <nl> } <nl> - CheckResults ( c = = 75000 , " IncorrectResults in ProtoDispatch " ) <nl> + CheckResults ( c = = 75000 ) <nl> } <nl> <nl> mmm a / benchmark / single - source / RC4 . swift <nl> ppp b / benchmark / single - source / RC4 . swift <nl> public func run_RC4 ( _ N : Int ) { <nl> Enc . encrypt ( & LongData ) <nl> } <nl> <nl> - CheckResults ( LongData = = RefResults , " Incorrect result in RC4 " ) <nl> + CheckResults ( LongData = = RefResults ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / RGBHistogram . swift <nl> ppp b / benchmark / single - source / RGBHistogram . swift <nl> public func run_RGBHistogram ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( isCorrectHistogram ( histogram ) , <nl> - " Incorrect results in histogram " ) <nl> + CheckResults ( isCorrectHistogram ( histogram ) ) <nl> } <nl> <nl> typealias rrggbb_t = UInt32 <nl> public func run_RGBHistogramOfObjects ( _ N : Int ) { <nl> break <nl> } <nl> } <nl> - CheckResults ( isCorrectHistogramOfObjects ( histogram ) , <nl> - " Incorrect results in histogram " ) <nl> + CheckResults ( isCorrectHistogramOfObjects ( histogram ) ) <nl> } <nl> <nl> <nl> mmm a / benchmark / single - source / RangeAssignment . swift <nl> ppp b / benchmark / single - source / RangeAssignment . swift <nl> public func run_RangeAssignment ( _ scale : Int ) { <nl> vector [ range ] = ArraySlice ( vector [ range ] . map { $ 0 + alfa } ) <nl> } <nl> <nl> - CheckResults ( vector [ 100 ] = = Double ( N ) , <nl> - " IncorrectResults in RangeAssignment : \ ( vector [ 100 ] ) ! = \ ( N ) . " ) <nl> + CheckResults ( vector [ 100 ] = = Double ( N ) ) <nl> } <nl> mmm a / benchmark / single - source / RecursiveOwnedParameter . swift <nl> ppp b / benchmark / single - source / RecursiveOwnedParameter . swift <nl> public func run_RecursiveOwnedParameter ( _ N : Int ) { <nl> } <nl> } <nl> let refResult = 100 * N <nl> - CheckResults ( result = = refResult , <nl> - " IncorrectResults in RecursiveOwnedParameter : \ ( result ) ! = \ ( refResult ) " ) <nl> + CheckResults ( result = = refResult ) <nl> } <nl> mmm a / benchmark / single - source / SetTests . swift <nl> ppp b / benchmark / single - source / SetTests . swift <nl> public func run_SetIsSubsetOf ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( ! isSubset , " Incorrect results in SetIsSubsetOf " ) <nl> + CheckResults ( ! isSubset ) <nl> } <nl> <nl> @ inline ( never ) <nl> public func run_SetIsSubsetOf_OfObjects ( _ N : Int ) { <nl> } <nl> } <nl> <nl> - CheckResults ( ! isSubset , " Incorrect results in SetIsSubsetOf " ) <nl> + CheckResults ( ! isSubset ) <nl> } <nl> <nl> @ inline ( never ) <nl> mmm a / benchmark / single - source / SevenBoom . swift <nl> ppp b / benchmark / single - source / SevenBoom . swift <nl> public func run_SevenBoom ( _ N : Int ) { <nl> catch _ { <nl> } <nl> } <nl> - CheckResults ( c = = 1 , " IncorrectResults in SevenBoom " ) <nl> + CheckResults ( c = = 1 ) <nl> } <nl> <nl> mmm a / benchmark / single - source / SortLargeExistentials . swift <nl> ppp b / benchmark / single - source / SortLargeExistentials . swift <nl> public func run_SortLargeExistentials ( _ N : Int ) { <nl> } <nl> <nl> / / Check whether letters are sorted . <nl> - CheckResults ( letters [ 0 ] . value < = letters [ letters . count / 2 ] . value , <nl> - " Incorrect results in SortLargeExistentials . " ) <nl> + CheckResults ( letters [ 0 ] . value < = letters [ letters . count / 2 ] . value ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / SortLettersInPlace . swift <nl> ppp b / benchmark / single - source / SortLettersInPlace . swift <nl> public func run_SortLettersInPlace ( _ N : Int ) { <nl> } <nl> <nl> / / Check whether letters are sorted . <nl> - CheckResults ( letters [ 0 ] . value < = letters [ letters . count / 2 ] . value , <nl> - " Incorrect results in SortLetterInPlace . " ) <nl> + CheckResults ( letters [ 0 ] . value < = letters [ letters . count / 2 ] . value ) <nl> } <nl> } <nl> <nl> mmm a / benchmark / single - source / StrComplexWalk . swift <nl> ppp b / benchmark / single - source / StrComplexWalk . swift <nl> public func run_StrComplexWalk ( _ N : Int ) { <nl> for _ in s . unicodeScalars { <nl> count + = 1 <nl> } <nl> - CheckResults ( count = = ref_result , " Incorrect results in StrComplexWalk : \ ( count ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( count = = ref_result ) <nl> } <nl> } <nl> <nl> mmm a / benchmark / single - source / StrToInt . swift <nl> ppp b / benchmark / single - source / StrToInt . swift <nl> public func run_StrToInt ( _ N : Int ) { <nl> for _ in 1 . . . 1000 * N { <nl> res = res & DoOneIter ( input ) <nl> } <nl> - CheckResults ( res = = ref_result , " IncorrectResults in StrToInt : \ ( res ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( res = = ref_result ) <nl> } <nl> mmm a / benchmark / single - source / StringInterpolation . swift <nl> ppp b / benchmark / single - source / StringInterpolation . swift <nl> public func run_StringInterpolation ( _ N : Int ) { <nl> / / with an operation on the native storage type . <nl> result = result & + Int ( utf16 [ utf16 . index ( before : utf16 . endIndex ) ] ) <nl> } <nl> - CheckResults ( result = = refResult , " IncorrectResults in StringInterpolation : \ ( result ) ! = \ ( refResult ) " ) <nl> + CheckResults ( result = = refResult ) <nl> } <nl> } <nl> <nl> mmm a / benchmark / single - source / StringTests . swift <nl> ppp b / benchmark / single - source / StringTests . swift <nl> public func run_StringHasPrefix ( _ N : Int ) { <nl> for _ in 0 . . < N { <nl> for _ in 0 . . < 100_000 { <nl> if ! testString . hasPrefix ( prefix ) { <nl> - CheckResults ( false , " prefix check failed " ) <nl> + CheckResults ( false ) <nl> } <nl> } <nl> } <nl> public func run_StringHasSuffix ( _ N : Int ) { <nl> for _ in 0 . . < N { <nl> for _ in 0 . . < 100_000 { <nl> if ! testString . hasSuffix ( suffix ) { <nl> - CheckResults ( false , " suffix check failed " ) <nl> + CheckResults ( false ) <nl> } <nl> } <nl> } <nl> public func run_StringHasPrefixUnicode ( _ N : Int ) { <nl> for _ in 0 . . < N { <nl> for _ in 0 . . < 100_000 { <nl> if ! testString . hasPrefix ( prefix ) { <nl> - CheckResults ( false , " prefix check failed " ) <nl> + CheckResults ( false ) <nl> } <nl> } <nl> } <nl> public func run_StringHasSuffixUnicode ( _ N : Int ) { <nl> for _ in 0 . . < N { <nl> for _ in 0 . . < 100_000 { <nl> if ! testString . hasSuffix ( suffix ) { <nl> - CheckResults ( false , " suffix check failed " ) <nl> + CheckResults ( false ) <nl> } <nl> } <nl> } <nl> public func run_StringEqualPointerComparison ( _ N : Int ) { <nl> for _ in 0 . . < N { <nl> for _ in 0 . . < 100_000 { <nl> if ! compareEqual ( str1 , str2 ) { <nl> - CheckResults ( false , " Strings should be equal " ) <nl> + CheckResults ( false ) <nl> } <nl> } <nl> } <nl> mmm a / benchmark / single - source / Suffix . swift <nl> ppp b / benchmark / single - source / Suffix . swift <nl> public func run_SuffixCountableRange ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixCountableRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixSequence ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixSequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnySequence ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnySequence : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnySeqCntRange ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnySeqCntRange : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnySeqCRangeIter ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnySeqCRangeIter : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnyCollection ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnyCollection : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixArray ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixArray : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixCountableRangeLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixCountableRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixSequenceLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixSequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnySequenceLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnySequenceLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnySeqCntRangeLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnySeqCntRangeLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnySeqCRangeIterLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnySeqCRangeIterLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixAnyCollectionLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixAnyCollectionLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> @ inline ( never ) <nl> public func run_SuffixArrayLazy ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in SuffixArrayLazy : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> mmm a / benchmark / single - source / Suffix . swift . gyb <nl> ppp b / benchmark / single - source / Suffix . swift . gyb <nl> public func run_Suffix $ { Name } ( _ N : Int ) { <nl> for element in s . suffix ( suffixCount ) { <nl> result + = element <nl> } <nl> - CheckResults ( result = = sumCount , <nl> - " IncorrectResults in Suffix $ { Name } : \ ( result ) ! = \ ( sumCount ) " ) <nl> + CheckResults ( result = = sumCount ) <nl> } <nl> } <nl> % end <nl> mmm a / benchmark / single - source / TwoSum . swift <nl> ppp b / benchmark / single - source / TwoSum . swift <nl> public func run_TwoSum ( _ N : Int ) { <nl> } <nl> Dict [ array [ n ] ] = n <nl> } <nl> - CheckResults ( i1 ! = nil & & i2 ! = nil , <nl> - " Incorrect results in TwoSum : i1 or i2 wasn ' t found . " ) <nl> - CheckResults ( Sum = = array [ i1 ! ] + array [ i2 ! ] , <nl> - " Incorrect results in TwoSum : Sum : \ ( Sum ) , " + <nl> - " array [ i1 ] : \ ( array [ i1 ! ] ) , array [ i2 ] : \ ( array [ i2 ! ] ) . " ) <nl> + CheckResults ( i1 ! = nil & & i2 ! = nil ) <nl> + CheckResults ( Sum = = array [ i1 ! ] + array [ i2 ! ] ) <nl> } <nl> } <nl> } <nl> mmm a / benchmark / single - source / Walsh . swift <nl> ppp b / benchmark / single - source / Walsh . swift <nl> func checkCorrectness ( ) { <nl> InverseWalshTransform ( & data ) <nl> for i in 0 . . < In . count { <nl> / / Check encode . <nl> - CheckResults ( abs ( data [ i ] - In [ i ] ) < 0 . 0001 , " Incorrect results in Walsh . " ) <nl> + CheckResults ( abs ( data [ i ] - In [ i ] ) < 0 . 0001 ) <nl> / / Check decode . <nl> - CheckResults ( abs ( mid [ i ] - Out [ i ] ) < 0 . 0001 , " Incorrect results in Walsh . " ) <nl> + CheckResults ( abs ( mid [ i ] - Out [ i ] ) < 0 . 0001 ) <nl> } <nl> } <nl> <nl> mmm a / benchmark / single - source / XorLoop . swift <nl> ppp b / benchmark / single - source / XorLoop . swift <nl> public func run_XorLoop ( _ N : Int ) { <nl> x [ i ] = x [ i ] ^ 12345678 <nl> } <nl> let res = x [ 10 ] + x [ 100 ] + x [ 1000 ] + x [ 10000 ] <nl> - CheckResults ( res = = ref_result , <nl> - " Incorrect results in XorLoop : \ ( res ) ! = \ ( ref_result ) " ) <nl> + CheckResults ( res = = ref_result ) <nl> } <nl> } <nl> mmm a / benchmark / utils / TestsUtils . swift <nl> ppp b / benchmark / utils / TestsUtils . swift <nl> public func someProtocolFactory ( ) - > SomeProtocol { return MyStruct ( ) } <nl> public func blackHole < T > ( _ x : T ) { <nl> } <nl> <nl> + @ inline ( __always ) <nl> + public func CheckResults ( _ resultsMatch : Bool ) { <nl> + guard _fastPath ( resultsMatch ) else { abort ( ) } <nl> + } <nl> + <nl>
|
Merge pull request from apple / only - benchmark - interpolation - when - you - mean - to
|
apple/swift
|
85831df50396c7f7e4db636bf6e97218af065186
|
2017-05-05T21:31:37Z
|
deleted file mode 100644 <nl> index 030234edc693 . . 000000000000 <nl> mmm a / unittests / Basic / ADTTests . cpp <nl> ppp / dev / null <nl> <nl> - # include " swift / Basic / Range . h " <nl> - # include " swift / Basic / OptionSet . h " <nl> - # include " swift / Basic / ValueEnumerator . h " <nl> - # include " gtest / gtest . h " <nl> - <nl> - using namespace swift ; <nl> - <nl> - TEST ( OptionSet , contains ) { <nl> - enum class Flags { <nl> - A = 1 < < 0 , <nl> - B = 1 < < 1 , <nl> - C = 1 < < 2 <nl> - } ; <nl> - <nl> - OptionSet < Flags > emptySet ; <nl> - OptionSet < Flags > aSet = Flags : : A ; <nl> - OptionSet < Flags > abSet = aSet | Flags : : B ; <nl> - OptionSet < Flags > abcSet = abSet | Flags : : C ; <nl> - OptionSet < Flags > bcSet = abcSet - Flags : : A ; <nl> - OptionSet < Flags > cSet = bcSet - Flags : : B ; <nl> - <nl> - EXPECT_TRUE ( emptySet . contains ( emptySet ) ) ; <nl> - EXPECT_FALSE ( emptySet . contains ( aSet ) ) ; <nl> - EXPECT_FALSE ( emptySet . contains ( abSet ) ) ; <nl> - EXPECT_FALSE ( emptySet . contains ( abcSet ) ) ; <nl> - EXPECT_FALSE ( emptySet . contains ( bcSet ) ) ; <nl> - EXPECT_FALSE ( emptySet . contains ( cSet ) ) ; <nl> - <nl> - EXPECT_TRUE ( aSet . contains ( emptySet ) ) ; <nl> - EXPECT_TRUE ( aSet . contains ( aSet ) ) ; <nl> - EXPECT_FALSE ( aSet . contains ( abSet ) ) ; <nl> - EXPECT_FALSE ( aSet . contains ( abcSet ) ) ; <nl> - EXPECT_FALSE ( aSet . contains ( bcSet ) ) ; <nl> - EXPECT_FALSE ( aSet . contains ( cSet ) ) ; <nl> - <nl> - EXPECT_TRUE ( abSet . contains ( emptySet ) ) ; <nl> - EXPECT_TRUE ( abSet . contains ( aSet ) ) ; <nl> - EXPECT_TRUE ( abSet . contains ( abSet ) ) ; <nl> - EXPECT_FALSE ( abSet . contains ( abcSet ) ) ; <nl> - EXPECT_FALSE ( abSet . contains ( bcSet ) ) ; <nl> - EXPECT_FALSE ( abSet . contains ( cSet ) ) ; <nl> - <nl> - EXPECT_TRUE ( abcSet . contains ( emptySet ) ) ; <nl> - EXPECT_TRUE ( abcSet . contains ( aSet ) ) ; <nl> - EXPECT_TRUE ( abcSet . contains ( abSet ) ) ; <nl> - EXPECT_TRUE ( abcSet . contains ( abcSet ) ) ; <nl> - EXPECT_TRUE ( abcSet . contains ( bcSet ) ) ; <nl> - EXPECT_TRUE ( abcSet . contains ( cSet ) ) ; <nl> - } <nl> - <nl> - <nl> - TEST ( OptionSet , intptr_t ) { <nl> - enum class Small : int8_t { <nl> - A = 1 < < 0 <nl> - } ; <nl> - <nl> - OptionSet < Small > small = Small : : A ; <nl> - EXPECT_EQ ( static_cast < intptr_t > ( Small : : A ) , static_cast < intptr_t > ( small ) ) ; <nl> - <nl> - <nl> - enum class UPtr : uintptr_t { <nl> - A = std : : numeric_limits < uintptr_t > : : max ( ) <nl> - } ; <nl> - <nl> - OptionSet < UPtr > uptr = UPtr : : A ; <nl> - EXPECT_EQ ( static_cast < intptr_t > ( UPtr : : A ) , static_cast < intptr_t > ( uptr ) ) ; <nl> - <nl> - <nl> - enum class Ptr : intptr_t { <nl> - A = std : : numeric_limits < intptr_t > : : min ( ) <nl> - } ; <nl> - <nl> - OptionSet < Ptr > ptr = Ptr : : A ; <nl> - EXPECT_EQ ( static_cast < intptr_t > ( Ptr : : A ) , static_cast < intptr_t > ( ptr ) ) ; <nl> - } <nl> - <nl> - TEST ( OptionSet , intptr_t_isConstructible ) { <nl> - / / First check that std : : is_constructible counts explicit conversion <nl> - / / operators . <nl> - class AlwaysConvertible { <nl> - public : <nl> - explicit operator intptr_t ( ) const { return 0 ; } <nl> - } ; <nl> - <nl> - if ( ! std : : is_constructible < intptr_t , AlwaysConvertible > : : value ) { <nl> - / / std : : is_constructible doesn ' t test what we want it to . Just exit early . <nl> - return ; <nl> - } <nl> - <nl> - enum class LongLong : unsigned long long { <nl> - A = 1 <nl> - } ; <nl> - bool isConvertible = <nl> - std : : is_constructible < intptr_t , OptionSet < LongLong > > : : value ; <nl> - <nl> - if ( sizeof ( intptr_t ) < sizeof ( long long ) ) { <nl> - EXPECT_FALSE ( isConvertible ) ; <nl> - } else { <nl> - EXPECT_TRUE ( isConvertible ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> - TEST ( ValueEnumerator , basic ) { <nl> - <nl> - { <nl> - ValueEnumerator < int > Trans ; <nl> - / / Check that indexing is persistent . <nl> - EXPECT_EQ ( Trans . getIndex ( 99 ) , Trans . getIndex ( 99 ) ) ; <nl> - EXPECT_EQ ( Trans . getIndex ( 100 ) , Trans . getIndex ( 100 ) ) ; <nl> - <nl> - / / Check that we don ' t have collisions . <nl> - bool SameIndex = Trans . getIndex ( 82 ) = = Trans . getIndex ( 73 ) ; <nl> - EXPECT_FALSE ( SameIndex ) ; <nl> - <nl> - / / Check that invalidation works . <nl> - / / After invalidation the old index must not be equal to the new index . <nl> - size_t oldIndex = Trans . getIndex ( 99 ) ; <nl> - Trans . invalidateValue ( 99 ) ; <nl> - size_t newIndex = Trans . getIndex ( 99 ) ; <nl> - EXPECT_FALSE ( newIndex = = oldIndex ) ; <nl> - } <nl> - <nl> - { <nl> - const char * string_1 = " hello " ; <nl> - const char * string_2 = " goodbye " ; <nl> - const char * string_3 = " : - ) " ; <nl> - ValueEnumerator < const char * > Trans ; <nl> - EXPECT_EQ ( Trans . getIndex ( nullptr ) , Trans . getIndex ( nullptr ) ) ; <nl> - EXPECT_EQ ( Trans . getIndex ( string_1 ) , Trans . getIndex ( string_1 ) ) ; <nl> - EXPECT_EQ ( Trans . getIndex ( string_2 ) , Trans . getIndex ( string_2 ) ) ; <nl> - <nl> - / / Check that invalidation works . <nl> - size_t oldIndex = Trans . getIndex ( string_3 ) ; <nl> - Trans . invalidateValue ( string_3 ) ; <nl> - size_t newIndex = Trans . getIndex ( string_3 ) ; <nl> - EXPECT_FALSE ( newIndex = = oldIndex ) ; <nl> - <nl> - / / Check that different values don ' t give the same index . <nl> - EXPECT_FALSE ( Trans . getIndex ( string_2 ) = = Trans . getIndex ( string_3 ) ) ; <nl> - } <nl> - <nl> - <nl> - { <nl> - ValueEnumerator < int > Trans ; <nl> - / / Check a bunch of integers . <nl> - for ( int i = 1 ; i < 10000 ; i + + ) { <nl> - EXPECT_TRUE ( Trans . getIndex ( 0 ) ! = Trans . getIndex ( i ) ) ; <nl> - } <nl> - <nl> - / / Check that there are no accidental collisions . <nl> - for ( int i = 0 ; i < 10000 ; i + + ) { <nl> - for ( int j = 1 ; j < 10 ; j + + ) { <nl> - EXPECT_TRUE ( Trans . getIndex ( i ) ! = Trans . getIndex ( i + j ) ) ; <nl> - } <nl> - } <nl> - <nl> - / / Check that indexing is still persistent . <nl> - EXPECT_EQ ( Trans . getIndex ( 100 ) , Trans . getIndex ( 100 ) ) ; <nl> - } <nl> - <nl> - } <nl> - <nl> - TEST ( Range , basic ) { <nl> - unsigned start = 0 ; <nl> - unsigned end = 50 ; <nl> - unsigned expected_i = start ; <nl> - bool sawEndMinusOne = false ; <nl> - for ( unsigned i : range ( start , end ) ) { <nl> - EXPECT_GE ( i , start ) ; <nl> - EXPECT_LT ( i , end ) ; <nl> - EXPECT_EQ ( expected_i , i ) ; <nl> - + + expected_i ; <nl> - <nl> - sawEndMinusOne | = ( i = = ( end - 1 ) ) ; <nl> - } <nl> - EXPECT_TRUE ( sawEndMinusOne ) ; <nl> - } <nl> - <nl> - TEST ( ReverseRange , basic ) { <nl> - unsigned start = 0 ; <nl> - unsigned end = 50 ; <nl> - unsigned expected_i = end ; <nl> - bool sawStartPlusOne = false ; <nl> - for ( unsigned i : reverse_range ( start , end ) ) { <nl> - EXPECT_GT ( i , start ) ; <nl> - EXPECT_LE ( i , end ) ; <nl> - EXPECT_EQ ( expected_i , i ) ; <nl> - - - expected_i ; <nl> - <nl> - sawStartPlusOne | = ( i = = start + 1 ) ; <nl> - } <nl> - EXPECT_TRUE ( sawStartPlusOne ) ; <nl> - } <nl> mmm a / unittests / Basic / CMakeLists . txt <nl> ppp b / unittests / Basic / CMakeLists . txt <nl> handle_gyb_sources ( <nl> $ { SWIFT_HOST_VARIANT_ARCH } ) <nl> <nl> add_swift_unittest ( SwiftBasicTests <nl> - ADTTests . cpp <nl> + ValueEnumeratorTest . cpp <nl> + OptionSetTest . cpp <nl> + RangeTest . cpp <nl> BlotMapVectorTest . cpp <nl> ClusteredBitVectorTest . cpp <nl> Demangle . cpp <nl> new file mode 100644 <nl> index 000000000000 . . cff0a7732b11 <nl> mmm / dev / null <nl> ppp b / unittests / Basic / OptionSetTest . cpp <nl> <nl> + # include " swift / Basic / OptionSet . h " <nl> + # include " swift / Basic / Range . h " <nl> + # include " swift / Basic / ValueEnumerator . h " <nl> + # include " gtest / gtest . h " <nl> + <nl> + using namespace swift ; <nl> + <nl> + TEST ( OptionSet , contains ) { <nl> + enum class Flags { A = 1 < < 0 , B = 1 < < 1 , C = 1 < < 2 } ; <nl> + <nl> + OptionSet < Flags > emptySet ; <nl> + OptionSet < Flags > aSet = Flags : : A ; <nl> + OptionSet < Flags > abSet = aSet | Flags : : B ; <nl> + OptionSet < Flags > abcSet = abSet | Flags : : C ; <nl> + OptionSet < Flags > bcSet = abcSet - Flags : : A ; <nl> + OptionSet < Flags > cSet = bcSet - Flags : : B ; <nl> + <nl> + EXPECT_TRUE ( emptySet . contains ( emptySet ) ) ; <nl> + EXPECT_FALSE ( emptySet . contains ( aSet ) ) ; <nl> + EXPECT_FALSE ( emptySet . contains ( abSet ) ) ; <nl> + EXPECT_FALSE ( emptySet . contains ( abcSet ) ) ; <nl> + EXPECT_FALSE ( emptySet . contains ( bcSet ) ) ; <nl> + EXPECT_FALSE ( emptySet . contains ( cSet ) ) ; <nl> + <nl> + EXPECT_TRUE ( aSet . contains ( emptySet ) ) ; <nl> + EXPECT_TRUE ( aSet . contains ( aSet ) ) ; <nl> + EXPECT_FALSE ( aSet . contains ( abSet ) ) ; <nl> + EXPECT_FALSE ( aSet . contains ( abcSet ) ) ; <nl> + EXPECT_FALSE ( aSet . contains ( bcSet ) ) ; <nl> + EXPECT_FALSE ( aSet . contains ( cSet ) ) ; <nl> + <nl> + EXPECT_TRUE ( abSet . contains ( emptySet ) ) ; <nl> + EXPECT_TRUE ( abSet . contains ( aSet ) ) ; <nl> + EXPECT_TRUE ( abSet . contains ( abSet ) ) ; <nl> + EXPECT_FALSE ( abSet . contains ( abcSet ) ) ; <nl> + EXPECT_FALSE ( abSet . contains ( bcSet ) ) ; <nl> + EXPECT_FALSE ( abSet . contains ( cSet ) ) ; <nl> + <nl> + EXPECT_TRUE ( abcSet . contains ( emptySet ) ) ; <nl> + EXPECT_TRUE ( abcSet . contains ( aSet ) ) ; <nl> + EXPECT_TRUE ( abcSet . contains ( abSet ) ) ; <nl> + EXPECT_TRUE ( abcSet . contains ( abcSet ) ) ; <nl> + EXPECT_TRUE ( abcSet . contains ( bcSet ) ) ; <nl> + EXPECT_TRUE ( abcSet . contains ( cSet ) ) ; <nl> + } <nl> + <nl> + TEST ( OptionSet , intptr_t ) { <nl> + enum class Small : int8_t { A = 1 < < 0 } ; <nl> + <nl> + OptionSet < Small > small = Small : : A ; <nl> + EXPECT_EQ ( static_cast < intptr_t > ( Small : : A ) , static_cast < intptr_t > ( small ) ) ; <nl> + <nl> + enum class UPtr : uintptr_t { A = std : : numeric_limits < uintptr_t > : : max ( ) } ; <nl> + <nl> + OptionSet < UPtr > uptr = UPtr : : A ; <nl> + EXPECT_EQ ( static_cast < intptr_t > ( UPtr : : A ) , static_cast < intptr_t > ( uptr ) ) ; <nl> + <nl> + enum class Ptr : intptr_t { A = std : : numeric_limits < intptr_t > : : min ( ) } ; <nl> + <nl> + OptionSet < Ptr > ptr = Ptr : : A ; <nl> + EXPECT_EQ ( static_cast < intptr_t > ( Ptr : : A ) , static_cast < intptr_t > ( ptr ) ) ; <nl> + } <nl> + <nl> + TEST ( OptionSet , intptr_t_isConstructible ) { <nl> + / / First check that std : : is_constructible counts explicit conversion <nl> + / / operators . <nl> + class AlwaysConvertible { <nl> + public : <nl> + explicit operator intptr_t ( ) const { return 0 ; } <nl> + } ; <nl> + <nl> + if ( ! std : : is_constructible < intptr_t , AlwaysConvertible > : : value ) { <nl> + / / std : : is_constructible doesn ' t test what we want it to . Just exit early . <nl> + return ; <nl> + } <nl> + <nl> + enum class LongLong : unsigned long long { A = 1 } ; <nl> + bool isConvertible = <nl> + std : : is_constructible < intptr_t , OptionSet < LongLong > > : : value ; <nl> + <nl> + if ( sizeof ( intptr_t ) < sizeof ( long long ) ) { <nl> + EXPECT_FALSE ( isConvertible ) ; <nl> + } else { <nl> + EXPECT_TRUE ( isConvertible ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . a08d3631b351 <nl> mmm / dev / null <nl> ppp b / unittests / Basic / RangeTest . cpp <nl> <nl> + # include " swift / Basic / Range . h " <nl> + # include " swift / Basic / OptionSet . h " <nl> + # include " swift / Basic / ValueEnumerator . h " <nl> + # include " gtest / gtest . h " <nl> + <nl> + using namespace swift ; <nl> + <nl> + TEST ( Range , basic ) { <nl> + unsigned start = 0 ; <nl> + unsigned end = 50 ; <nl> + unsigned expected_i = start ; <nl> + bool sawEndMinusOne = false ; <nl> + for ( unsigned i : range ( start , end ) ) { <nl> + EXPECT_GE ( i , start ) ; <nl> + EXPECT_LT ( i , end ) ; <nl> + EXPECT_EQ ( expected_i , i ) ; <nl> + + + expected_i ; <nl> + <nl> + sawEndMinusOne | = ( i = = ( end - 1 ) ) ; <nl> + } <nl> + EXPECT_TRUE ( sawEndMinusOne ) ; <nl> + } <nl> + <nl> + TEST ( ReverseRange , basic ) { <nl> + unsigned start = 0 ; <nl> + unsigned end = 50 ; <nl> + unsigned expected_i = end ; <nl> + bool sawStartPlusOne = false ; <nl> + for ( unsigned i : reverse_range ( start , end ) ) { <nl> + EXPECT_GT ( i , start ) ; <nl> + EXPECT_LE ( i , end ) ; <nl> + EXPECT_EQ ( expected_i , i ) ; <nl> + - - expected_i ; <nl> + <nl> + sawStartPlusOne | = ( i = = start + 1 ) ; <nl> + } <nl> + EXPECT_TRUE ( sawStartPlusOne ) ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . ee229e5b3af5 <nl> mmm / dev / null <nl> ppp b / unittests / Basic / ValueEnumeratorTest . cpp <nl> <nl> + # include " swift / Basic / ValueEnumerator . h " <nl> + # include " swift / Basic / OptionSet . h " <nl> + # include " swift / Basic / Range . h " <nl> + # include " gtest / gtest . h " <nl> + <nl> + using namespace swift ; <nl> + <nl> + TEST ( ValueEnumerator , basic ) { <nl> + <nl> + { <nl> + ValueEnumerator < int > Trans ; <nl> + / / Check that indexing is persistent . <nl> + EXPECT_EQ ( Trans . getIndex ( 99 ) , Trans . getIndex ( 99 ) ) ; <nl> + EXPECT_EQ ( Trans . getIndex ( 100 ) , Trans . getIndex ( 100 ) ) ; <nl> + <nl> + / / Check that we don ' t have collisions . <nl> + bool SameIndex = Trans . getIndex ( 82 ) = = Trans . getIndex ( 73 ) ; <nl> + EXPECT_FALSE ( SameIndex ) ; <nl> + <nl> + / / Check that invalidation works . <nl> + / / After invalidation the old index must not be equal to the new index . <nl> + size_t oldIndex = Trans . getIndex ( 99 ) ; <nl> + Trans . invalidateValue ( 99 ) ; <nl> + size_t newIndex = Trans . getIndex ( 99 ) ; <nl> + EXPECT_FALSE ( newIndex = = oldIndex ) ; <nl> + } <nl> + <nl> + { <nl> + const char * string_1 = " hello " ; <nl> + const char * string_2 = " goodbye " ; <nl> + const char * string_3 = " : - ) " ; <nl> + ValueEnumerator < const char * > Trans ; <nl> + EXPECT_EQ ( Trans . getIndex ( nullptr ) , Trans . getIndex ( nullptr ) ) ; <nl> + EXPECT_EQ ( Trans . getIndex ( string_1 ) , Trans . getIndex ( string_1 ) ) ; <nl> + EXPECT_EQ ( Trans . getIndex ( string_2 ) , Trans . getIndex ( string_2 ) ) ; <nl> + <nl> + / / Check that invalidation works . <nl> + size_t oldIndex = Trans . getIndex ( string_3 ) ; <nl> + Trans . invalidateValue ( string_3 ) ; <nl> + size_t newIndex = Trans . getIndex ( string_3 ) ; <nl> + EXPECT_FALSE ( newIndex = = oldIndex ) ; <nl> + <nl> + / / Check that different values don ' t give the same index . <nl> + EXPECT_FALSE ( Trans . getIndex ( string_2 ) = = Trans . getIndex ( string_3 ) ) ; <nl> + } <nl> + <nl> + { <nl> + ValueEnumerator < int > Trans ; <nl> + / / Check a bunch of integers . <nl> + for ( int i = 1 ; i < 10000 ; i + + ) { <nl> + EXPECT_TRUE ( Trans . getIndex ( 0 ) ! = Trans . getIndex ( i ) ) ; <nl> + } <nl> + <nl> + / / Check that there are no accidental collisions . <nl> + for ( int i = 0 ; i < 10000 ; i + + ) { <nl> + for ( int j = 1 ; j < 10 ; j + + ) { <nl> + EXPECT_TRUE ( Trans . getIndex ( i ) ! = Trans . getIndex ( i + j ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / Check that indexing is still persistent . <nl> + EXPECT_EQ ( Trans . getIndex ( 100 ) , Trans . getIndex ( 100 ) ) ; <nl> + } <nl> + } <nl>
|
[ gardening ] Split ADTTests . cpp into separate test files .
|
apple/swift
|
79c4034bb1abb97b93fcf8c96b826bf160a774e6
|
2017-02-10T05:48:30Z
|
mmm a / src / core / arm / skyeye_common / vfp / vfpsingle . cpp <nl> ppp b / src / core / arm / skyeye_common / vfp / vfpsingle . cpp <nl> static u32 vfp_single_ftoui ( ARMul_State * state , int sd , int unused , s32 m , u32 f <nl> exceptions | = FPSCR_IDC ; <nl> <nl> if ( tm & VFP_NAN ) <nl> - vsm . sign = 0 ; <nl> + vsm . sign = 1 ; <nl> <nl> if ( vsm . exponent > = 127 + 32 ) { <nl> d = vsm . sign ? 0 : 0xffffffff ; <nl> static u32 vfp_single_fsub ( ARMul_State * state , int sd , int sn , s32 m , u32 fpscr ) <nl> / * <nl> * Subtraction is addition with one sign inverted . <nl> * / <nl> - return vfp_single_fadd ( state , sd , sn , vfp_single_packed_negate ( m ) , fpscr ) ; <nl> + if ( m ! = 0x7FC00000 ) / / Only negate if m isn ' t NaN . <nl> + m = vfp_single_packed_negate ( m ) ; <nl> + <nl> + return vfp_single_fadd ( state , sd , sn , m , fpscr ) ; <nl> } <nl> <nl> / * <nl>
|
Merge pull request from lioncash / vsub
|
yuzu-emu/yuzu
|
ec9ac2f51a9669fba1a88c93d0edb02a522492f6
|
2014-12-17T04:22:58Z
|
mmm a / xbmc / platform / android / jni / AudioFormat . cpp <nl> ppp b / xbmc / platform / android / jni / AudioFormat . cpp <nl> int CJNIAudioFormat : : CHANNEL_OUT_BACK_RIGHT = 0x00000080 ; <nl> <nl> int CJNIAudioFormat : : CHANNEL_INVALID = 0x00000000 ; <nl> <nl> + const char * CJNIAudioFormat : : m_classname = " android / media / AudioFormat " ; <nl> const char * CJNIAudioFormatBuilder : : m_classname = " android / media / AudioFormat $ Builder " ; <nl> <nl> void CJNIAudioFormat : : GetStaticValue ( jhclass & c , int & field , char * value ) <nl> void CJNIAudioFormat : : PopulateStaticFields ( ) <nl> int sdk = CJNIBase : : GetSDKVersion ( ) ; <nl> if ( sdk > = 3 ) <nl> { <nl> - jhclass c = find_class ( " android / media / AudioFormat " ) ; <nl> + jhclass c = find_class ( m_classname ) ; <nl> CJNIAudioFormat : : ENCODING_PCM_16BIT = get_static_field < int > ( c , " ENCODING_PCM_16BIT " ) ; <nl> if ( sdk > = 5 ) <nl> { <nl> void CJNIAudioFormat : : PopulateStaticFields ( ) <nl> } <nl> } <nl> <nl> + int CJNIAudioFormat : : getChannelCount ( ) const <nl> + { <nl> + return call_method < int > ( m_object , " getChannelCount " , " ( ) I " ) ; <nl> + } <nl> + <nl> + int CJNIAudioFormat : : getChannelIndexMask ( ) const <nl> + { <nl> + return call_method < int > ( m_object , " getChannelIndexMask " , " ( ) I " ) ; <nl> + } <nl> + <nl> + int CJNIAudioFormat : : getChannelMask ( ) const <nl> + { <nl> + return call_method < int > ( m_object , " getChannelMask " , " ( ) I " ) ; <nl> + } <nl> + <nl> + int CJNIAudioFormat : : getEncoding ( ) const <nl> + { <nl> + return call_method < int > ( m_object , " getEncoding " , " ( ) I " ) ; <nl> + } <nl> + <nl> + int CJNIAudioFormat : : getSampleRate ( ) const <nl> + { <nl> + return call_method < int > ( m_object , " getSampleRate " , " ( ) I " ) ; <nl> + } <nl> <nl> <nl> CJNIAudioFormatBuilder : : CJNIAudioFormatBuilder ( ) <nl> mmm a / xbmc / platform / android / jni / AudioFormat . h <nl> ppp b / xbmc / platform / android / jni / AudioFormat . h <nl> <nl> * <nl> * / <nl> <nl> - # include " jutils / jutils - details . hpp " <nl> + # include " JNIBase . h " <nl> <nl> namespace jni <nl> { <nl> <nl> - class CJNIAudioFormat <nl> + class CJNIAudioFormat : public CJNIBase <nl> { <nl> - public : <nl> - static void PopulateStaticFields ( ) ; <nl> + public : <nl> + CJNIAudioFormat ( const jni : : jhobject & object ) : CJNIBase ( object ) { } <nl> + <nl> + static void PopulateStaticFields ( ) ; <nl> + <nl> + static int ENCODING_PCM_16BIT ; <nl> + static int ENCODING_PCM_FLOAT ; <nl> + static int ENCODING_AC3 ; <nl> + static int ENCODING_E_AC3 ; <nl> + static int ENCODING_DTS ; <nl> + static int ENCODING_DTS_HD ; <nl> + static int ENCODING_DOLBY_TRUEHD ; <nl> + static int ENCODING_IEC61937 ; <nl> <nl> - static int ENCODING_PCM_16BIT ; <nl> - static int ENCODING_PCM_FLOAT ; <nl> - static int ENCODING_AC3 ; <nl> - static int ENCODING_E_AC3 ; <nl> - static int ENCODING_DTS ; <nl> - static int ENCODING_DTS_HD ; <nl> - static int ENCODING_DOLBY_TRUEHD ; <nl> - static int ENCODING_IEC61937 ; <nl> + static int CHANNEL_OUT_STEREO ; <nl> + static int CHANNEL_OUT_5POINT1 ; <nl> <nl> - static int CHANNEL_OUT_STEREO ; <nl> - static int CHANNEL_OUT_5POINT1 ; <nl> + static int CHANNEL_OUT_FRONT_LEFT ; <nl> + static int CHANNEL_OUT_FRONT_LEFT_OF_CENTER ; <nl> + static int CHANNEL_OUT_FRONT_CENTER ; <nl> + static int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER ; <nl> + static int CHANNEL_OUT_FRONT_RIGHT ; <nl> + static int CHANNEL_OUT_LOW_FREQUENCY ; <nl> + static int CHANNEL_OUT_SIDE_LEFT ; <nl> + static int CHANNEL_OUT_SIDE_RIGHT ; <nl> + static int CHANNEL_OUT_BACK_LEFT ; <nl> + static int CHANNEL_OUT_BACK_CENTER ; <nl> + static int CHANNEL_OUT_BACK_RIGHT ; <nl> <nl> - static int CHANNEL_OUT_FRONT_LEFT ; <nl> - static int CHANNEL_OUT_FRONT_LEFT_OF_CENTER ; <nl> - static int CHANNEL_OUT_FRONT_CENTER ; <nl> - static int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER ; <nl> - static int CHANNEL_OUT_FRONT_RIGHT ; <nl> - static int CHANNEL_OUT_LOW_FREQUENCY ; <nl> - static int CHANNEL_OUT_SIDE_LEFT ; <nl> - static int CHANNEL_OUT_SIDE_RIGHT ; <nl> - static int CHANNEL_OUT_BACK_LEFT ; <nl> - static int CHANNEL_OUT_BACK_CENTER ; <nl> - static int CHANNEL_OUT_BACK_RIGHT ; <nl> + static int CHANNEL_INVALID ; <nl> <nl> - static int CHANNEL_INVALID ; <nl> + int getChannelCount ( ) const ; <nl> + int getChannelIndexMask ( ) const ; <nl> + int getChannelMask ( ) const ; <nl> + int getEncoding ( ) const ; <nl> + int getSampleRate ( ) const ; <nl> <nl> protected : <nl> static void GetStaticValue ( jhclass & c , int & field , char * value ) ; <nl> + static const char * m_classname ; <nl> } ; <nl> <nl> class CJNIAudioFormatBuilder : public CJNIBase <nl> mmm a / xbmc / platform / android / jni / AudioTrack . cpp <nl> ppp b / xbmc / platform / android / jni / AudioTrack . cpp <nl> CJNIAudioTrack : : CJNIAudioTrack ( int streamType , int sampleRateInHz , int channelCo <nl> } <nl> <nl> m_audioFormat = audioFormat ; <nl> - if ( m_audioFormat = = CJNIAudioFormat : : ENCODING_IEC61937 ) <nl> - m_buffer = jharray ( xbmc_jnienv ( ) - > NewShortArray ( bufferSizeInBytes / sizeof ( short ) ) ) ; <nl> - else if ( m_audioFormat = = CJNIAudioFormat : : ENCODING_PCM_FLOAT ) <nl> + if ( m_audioFormat = = CJNIAudioFormat : : ENCODING_PCM_FLOAT ) <nl> + m_buffer = jharray ( xbmc_jnienv ( ) - > NewFloatArray ( bufferSizeInBytes / sizeof ( float ) ) ) ; <nl> + else if ( m_audioFormat = = CJNIAudioFormat : : ENCODING_IEC61937 ) <nl> + m_buffer = jharray ( xbmc_jnienv ( ) - > NewShortArray ( bufferSizeInBytes / sizeof ( uint16_t ) ) ) ; <nl> + else <nl> + m_buffer = jharray ( xbmc_jnienv ( ) - > NewByteArray ( bufferSizeInBytes ) ) ; <nl> + <nl> + m_object . setGlobal ( ) ; <nl> + m_buffer . setGlobal ( ) ; <nl> + } <nl> + <nl> + CJNIAudioTrack : : CJNIAudioTrack ( const CJNIAudioAttributes & attributes , const CJNIAudioFormat & format , int bufferSizeInBytes , int mode , int sessionId ) throw ( std : : invalid_argument ) <nl> + : CJNIBase ( " android / media / AudioTrack " ) <nl> + { <nl> + m_object = new_object ( GetClassName ( ) , " < init > " , " ( Landroid / media / AudioAttributes ; Landroid / media / AudioFormat ; III ) V " , <nl> + attributes . get_raw ( ) , format . get_raw ( ) , bufferSizeInBytes , mode , sessionId ) ; <nl> + <nl> + / * AudioTrack constructor may throw IllegalArgumentException , pass it to <nl> + * caller instead of getting us killed * / <nl> + JNIEnv * jenv = xbmc_jnienv ( ) ; <nl> + jthrowable exception = jenv - > ExceptionOccurred ( ) ; <nl> + if ( exception ) <nl> + { <nl> + jenv - > ExceptionClear ( ) ; <nl> + jhclass excClass = find_class ( jenv , " java / lang / Throwable " ) ; <nl> + jmethodID toStrMethod = get_method_id ( jenv , excClass , " toString " , " ( ) Ljava / lang / String ; " ) ; <nl> + jhstring msg = call_method < jhstring > ( exception , toStrMethod ) ; <nl> + throw std : : invalid_argument ( jcast < std : : string > ( msg ) ) ; <nl> + } <nl> + <nl> + m_audioFormat = format . getEncoding ( ) ; <nl> + if ( m_audioFormat = = CJNIAudioFormat : : ENCODING_PCM_FLOAT ) <nl> m_buffer = jharray ( xbmc_jnienv ( ) - > NewFloatArray ( bufferSizeInBytes / sizeof ( float ) ) ) ; <nl> else <nl> m_buffer = jharray ( xbmc_jnienv ( ) - > NewByteArray ( bufferSizeInBytes ) ) ; <nl> mmm a / xbmc / platform / android / jni / AudioTrack . h <nl> ppp b / xbmc / platform / android / jni / AudioTrack . h <nl> <nl> # include " JNIBase . h " <nl> # include " ByteBuffer . h " <nl> # include " AudioTimestamp . h " <nl> + # include " AudioFormat . h " <nl> + # include " AudioAttributes . h " <nl> <nl> namespace jni <nl> { <nl> class CJNIAudioTrack : public CJNIBase <nl> <nl> public : <nl> CJNIAudioTrack ( int streamType , int sampleRateInHz , int channelConfig , int audioFormat , int bufferSizeInBytes , int mode ) throw ( std : : invalid_argument ) ; <nl> + CJNIAudioTrack ( const CJNIAudioAttributes & attributes , const CJNIAudioFormat & format , int bufferSizeInBytes , int mode , int sessionId ) throw ( std : : invalid_argument ) ; <nl> <nl> void play ( ) ; <nl> void pause ( ) ; <nl>
|
ADD : [ jni ] AudioTrack by attributes
|
xbmc/xbmc
|
734669095482650d9d4e1c2897f9fc457907d8e4
|
2016-10-29T16:38:40Z
|
mmm a / docs / en / operations / table_engines / mergetree . md <nl> ppp b / docs / en / operations / table_engines / mergetree . md <nl> INDEX index_name expr TYPE type ( . . . ) GRANULARITY granularity_value <nl> <nl> For tables from the ` * MergeTree ` family data skipping indices can be specified . <nl> <nl> - These indices aggregate some information about the specified expression on blocks , which consist of ` granularity_value ` granules , <nl> + These indices aggregate some information about the specified expression on blocks , which consist of ` granularity_value ` granules ( size of the granule is specified using ` index_granularity ` setting in the table engine ) , <nl> then these aggregates are used in ` SELECT ` queries for reducing the amount of data to read from the disk by skipping big blocks of data where ` where ` query cannot be satisfied . <nl> <nl> <nl>
|
docs granule
|
ClickHouse/ClickHouse
|
3e5a44db4dc68160d14efa9ae24a01e7c7862a00
|
2019-03-14T09:04:30Z
|
mmm a / src / rdb_protocol / protocol . cc <nl> ppp b / src / rdb_protocol / protocol . cc <nl> void rdb_r_unshard_visitor_t : : operator ( ) ( UNUSED const sindex_status_t & ss ) { <nl> * response_out = read_response_t ( sindex_status_response_t ( ) ) ; <nl> auto ss_response = boost : : get < sindex_status_response_t > ( & response_out - > response ) ; <nl> for ( size_t i = 0 ; i < count ; + + i ) { <nl> - auto resp = boost : : get < sindex_status_response_t > ( & responses [ 0 ] . response ) ; <nl> + auto resp = boost : : get < sindex_status_response_t > ( & responses [ i ] . response ) ; <nl> guarantee ( resp ! = NULL ) ; <nl> for ( auto it = resp - > statuses . begin ( ) ; it ! = resp - > statuses . end ( ) ; + + it ) { <nl> add_status ( it - > second , & ss_response - > statuses [ it - > first ] ) ; <nl>
|
Fixed a bug in the sindex status retrieval . It only considered the first shard multiple times , rather than considering all shards .
|
rethinkdb/rethinkdb
|
ac87e4a0a745a31f021857d7fd3ed741c5286cd3
|
2014-03-27T02:53:10Z
|
mmm a / include / swift / SILAnalysis / CallGraphAnalysis . h <nl> ppp b / include / swift / SILAnalysis / CallGraphAnalysis . h <nl> class CallGraph { <nl> llvm : : DenseMap < SILFunction * , CallGraphNode * > FunctionToNodeMap ; <nl> llvm : : SmallVector < CallGraphSCC * , 16 > BottomUpSCCOrder ; <nl> std : : vector < SILFunction * > BottomUpFunctionOrder ; <nl> - llvm : : BumpPtrAllocator NodeAllocator ; <nl> - llvm : : SpecificBumpPtrAllocator < CallGraphEdge > EdgeAllocator ; <nl> + <nl> + / / / An allocator used by the callgraph . <nl> + llvm : : BumpPtrAllocator Allocator ; <nl> <nl> public : <nl> CallGraph ( SILModule * M , bool completeModule ) ; <nl> <nl> - ~ CallGraph ( ) { <nl> - for ( auto * SCC : BottomUpSCCOrder ) <nl> - delete SCC ; <nl> - } <nl> + ~ CallGraph ( ) { } <nl> <nl> llvm : : SmallVectorImpl < CallGraphNode * > & getCallGraphRoots ( ) { <nl> return CallGraphRoots ; <nl> mmm a / lib / SILAnalysis / CallGraphAnalysis . cpp <nl> ppp b / lib / SILAnalysis / CallGraphAnalysis . cpp <nl> void CallGraph : : addCallGraphNode ( SILFunction * F , unsigned NodeOrdinal ) { <nl> / / TODO : Compute this from the call graph itself after stripping <nl> / / unreachable nodes from graph . <nl> + + NumCallGraphNodes ; <nl> - auto * Node = new ( NodeAllocator ) CallGraphNode ( F , NodeOrdinal ) ; <nl> + auto * Node = new ( Allocator ) CallGraphNode ( F , NodeOrdinal ) ; <nl> <nl> assert ( ! FunctionToNodeMap . count ( F ) & & <nl> " Added function already has a call graph node ! " ) ; <nl> void CallGraph : : addEdgesForApply ( ApplyInst * AI , CallGraphNode * CallerNode ) { <nl> bool Complete = false ; <nl> <nl> if ( tryGetCalleeSet ( AI - > getCallee ( ) , CalleeSet , Complete ) ) { <nl> - auto * Edge = new ( EdgeAllocator . Allocate ( ) ) CallGraphEdge ( AI , CalleeSet , <nl> - Complete ) ; <nl> + auto * Edge = new ( Allocator ) CallGraphEdge ( AI , CalleeSet , Complete ) ; <nl> CallerNode - > addCalleeEdge ( Edge ) ; <nl> <nl> for ( auto * CalleeNode : CalleeSet ) <nl> static void orderCallees ( const CallGraphEdge : : CalleeSetType & Callees , <nl> / / / point to multiple call graph nodes in the case where we can call <nl> / / / one of several different functions . <nl> class CallGraphSCCFinder { <nl> + unsigned NextDFSNum ; <nl> + llvm : : SmallVectorImpl < CallGraphSCC * > & TheSCCs ; <nl> + <nl> + llvm : : DenseMap < CallGraphNode * , unsigned > DFSNum ; <nl> + llvm : : DenseMap < CallGraphNode * , unsigned > MinDFSNum ; <nl> + llvm : : SetVector < CallGraphNode * > DFSStack ; <nl> + <nl> + llvm : : BumpPtrAllocator & BPA ; <nl> + <nl> public : <nl> - CallGraphSCCFinder ( llvm : : SmallVectorImpl < CallGraphSCC * > & TheSCCs ) <nl> - : NextDFSNum ( 0 ) , TheSCCs ( TheSCCs ) { } <nl> + CallGraphSCCFinder ( llvm : : SmallVectorImpl < CallGraphSCC * > & TheSCCs , <nl> + llvm : : BumpPtrAllocator & BPA ) <nl> + : NextDFSNum ( 0 ) , TheSCCs ( TheSCCs ) , BPA ( BPA ) { } <nl> <nl> void DFS ( CallGraphNode * Node ) { <nl> / / Set the DFSNum for this node if we haven ' t already , and if we <nl> class CallGraphSCCFinder { <nl> / / If this node is the root of an SCC ( including SCCs with a <nl> / / single node ) , pop the SCC and push it on our SCC stack . <nl> if ( DFSNum [ Node ] = = MinDFSNum [ Node ] ) { <nl> - auto * SCC = new CallGraphSCC ( ) ; <nl> + auto * SCC = new ( BPA ) CallGraphSCC ( ) ; <nl> <nl> CallGraphNode * Popped ; <nl> do { <nl> class CallGraphSCCFinder { <nl> TheSCCs . push_back ( SCC ) ; <nl> } <nl> } <nl> - <nl> - private : <nl> - unsigned NextDFSNum ; <nl> - llvm : : SmallVectorImpl < CallGraphSCC * > & TheSCCs ; <nl> - <nl> - llvm : : DenseMap < CallGraphNode * , unsigned > DFSNum ; <nl> - llvm : : DenseMap < CallGraphNode * , unsigned > MinDFSNum ; <nl> - llvm : : SetVector < CallGraphNode * > DFSStack ; <nl> } ; <nl> <nl> void CallGraph : : computeBottomUpSCCOrder ( ) { <nl> void CallGraph : : computeBottomUpSCCOrder ( ) { <nl> BottomUpSCCOrder . clear ( ) ; <nl> } <nl> <nl> - CallGraphSCCFinder SCCFinder ( BottomUpSCCOrder ) ; <nl> + CallGraphSCCFinder SCCFinder ( BottomUpSCCOrder , Allocator ) ; <nl> for ( auto * Node : getCallGraphRoots ( ) ) <nl> SCCFinder . DFS ( Node ) ; <nl> } <nl>
|
[ callgraph ] Use one bump ptr alloctor for all memory allocated in the callgraph .
|
apple/swift
|
28cde0e785200d528e21893f85c2e2cdede0166a
|
2015-02-24T22:49:47Z
|
mmm a / configure . in <nl> ppp b / configure . in <nl> AC_CHECK_HEADER ( [ FLAC / stream_decoder . h ] , , AC_MSG_ERROR ( $ missing_library ) ) <nl> # we need to check for the header because if it exists we set the openssl <nl> # and gcrypt MT callback hooks . This is mostly so that libcurl operates <nl> # in MT manner correctly . <nl> + AC_MSG_CHECKING ( [ for CRYPTO_set_locking_callback ( 0 ) ] ) <nl> + AC_TRY_LINK ( [ ] , [ CRYPTO_set_locking_callback ( 0 ) ; ] , <nl> + [ have_curl_static = yes ] , <nl> + [ have_curl_static = no ] ) <nl> + AC_MSG_RESULT ( $ have_curl_static ) <nl> AC_CHECK_HEADER ( [ openssl / crypto . h ] , AC_DEFINE ( [ HAVE_OPENSSL ] , [ 1 ] , [ Define if we have openssl ] ) , ) <nl> AC_CHECK_HEADER ( [ gcrypt . h ] , gcrypt_headers_available = yes , gcrypt_headers_available = no ) <nl> if test " $ gcrypt_headers_available " = " yes " ; then <nl> else <nl> AC_DEFINE ( [ HAVE_LIBSSH ] , [ 1 ] , [ Whether to use libSSH library . ] ) <nl> fi <nl> <nl> + # libcurl <nl> + if test " x $ have_curl_static " = " xyes " ; then <nl> + AC_DEFINE ( [ HAS_CURL_STATIC ] , [ 1 ] , [ Whether OpenSSL inside libcurl is static . ] ) <nl> + fi <nl> + <nl> # libRTMP <nl> if test " $ use_librtmp " ! = " no " ; then <nl> AC_CHECK_HEADERS ( [ librtmp / log . h librtmp / amf . h librtmp / rtmp . h ] , , <nl> mmm a / xbmc / filesystem / DllLibCurl . cpp <nl> ppp b / xbmc / filesystem / DllLibCurl . cpp <nl> <nl> <nl> # include < assert . h > <nl> <nl> + # ifdef HAVE_OPENSSL <nl> + # include " threads / Thread . h " <nl> + # include " openssl / crypto . h " <nl> + <nl> + static CCriticalSection * * m_sslLockArray = NULL ; <nl> + <nl> + # ifdef __cplusplus <nl> + extern " C " <nl> + { <nl> + # endif <nl> + <nl> + void ssl_lock_callback ( int mode , int type , char * file , int line ) <nl> + { <nl> + if ( ! m_sslLockArray ) <nl> + return ; <nl> + <nl> + if ( mode & CRYPTO_LOCK ) <nl> + m_sslLockArray [ type ] - > lock ( ) ; <nl> + else <nl> + m_sslLockArray [ type ] - > unlock ( ) ; <nl> + } <nl> + <nl> + unsigned long ssl_thread_id ( void ) <nl> + { <nl> + return ( unsigned long ) CThread : : GetCurrentThreadId ( ) ; <nl> + } <nl> + <nl> + # ifdef __cplusplus <nl> + } <nl> + # endif <nl> + <nl> + # endif / / HAVE_OPENSSL <nl> + <nl> using namespace XCURL ; <nl> <nl> / * okey this is damn ugly . our dll loader doesn ' t allow for postload , preunload functions * / <nl> bool DllLibCurlGlobal : : Load ( ) <nl> / * check idle will clean up the last one * / <nl> g_curlReferences = 2 ; <nl> <nl> + # if defined ( HAS_CURL_STATIC ) <nl> + / / Initialize ssl locking array <nl> + m_sslLockArray = new CCriticalSection * [ CRYPTO_num_locks ( ) ] ; <nl> + for ( int i = 0 ; i < CRYPTO_num_locks ( ) ; i + + ) <nl> + m_sslLockArray [ i ] = new CCriticalSection ; <nl> + <nl> + crypto_set_id_callback ( ( unsigned long ( * ) ( ) ) ssl_thread_id ) ; <nl> + crypto_set_locking_callback ( ( void ( * ) ( int , int , const char * , int ) ) ssl_lock_callback ) ; <nl> + # endif <nl> + <nl> return true ; <nl> } <nl> <nl> void DllLibCurlGlobal : : Unload ( ) <nl> / / close libcurl <nl> global_cleanup ( ) ; <nl> <nl> + # if defined ( HAS_CURL_STATIC ) <nl> + / / Cleanup ssl locking array <nl> + crypto_set_id_callback ( NULL ) ; <nl> + crypto_set_locking_callback ( NULL ) ; <nl> + for ( int i = 0 ; i < CRYPTO_num_locks ( ) ; i + + ) <nl> + delete m_sslLockArray [ i ] ; <nl> + <nl> + delete [ ] m_sslLockArray ; <nl> + # endif <nl> + <nl> DllDynamic : : Unload ( ) ; <nl> } <nl> <nl> mmm a / xbmc / filesystem / DllLibCurl . h <nl> ppp b / xbmc / filesystem / DllLibCurl . h <nl> namespace XCURL <nl> DEFINE_METHOD2 ( struct curl_slist * , slist_append , ( struct curl_slist * p1 , const char * p2 ) ) <nl> DEFINE_METHOD1 ( void , slist_free_all , ( struct curl_slist * p1 ) ) <nl> DEFINE_METHOD1 ( const char * , easy_strerror , ( CURLcode p1 ) ) <nl> + # if defined ( HAS_CURL_STATIC ) <nl> + DEFINE_METHOD1 ( void , crypto_set_id_callback , ( unsigned long ( * p1 ) ( void ) ) ) <nl> + DEFINE_METHOD1 ( void , crypto_set_locking_callback , ( void ( * p1 ) ( int , int , const char * , int ) ) ) <nl> + # endif <nl> BEGIN_METHOD_RESOLVE ( ) <nl> RESOLVE_METHOD_RENAME ( curl_global_init , global_init ) <nl> RESOLVE_METHOD_RENAME ( curl_global_cleanup , global_cleanup ) <nl> namespace XCURL <nl> RESOLVE_METHOD_RENAME ( curl_multi_cleanup , multi_cleanup ) <nl> RESOLVE_METHOD_RENAME ( curl_slist_append , slist_append ) <nl> RESOLVE_METHOD_RENAME ( curl_slist_free_all , slist_free_all ) <nl> + # if defined ( HAS_CURL_STATIC ) <nl> + RESOLVE_METHOD_RENAME ( CRYPTO_set_id_callback , crypto_set_id_callback ) <nl> + RESOLVE_METHOD_RENAME ( CRYPTO_set_locking_callback , crypto_set_locking_callback ) <nl> + # endif <nl> END_METHOD_RESOLVE ( ) <nl> <nl> } ; <nl>
|
FIX : implement libcurl openssl thread - safe locking
|
xbmc/xbmc
|
6922aa8bf809ac59524ba0545c4a9476f428758d
|
2014-02-17T14:00:32Z
|
mmm a / core / io / file_access_buffered . h <nl> ppp b / core / io / file_access_buffered . h <nl> class FileAccessBuffered : public FileAccess { <nl> Error set_error ( Error p_error ) const ; <nl> <nl> mutable struct File { <nl> - bool open ; <nl> - int size ; <nl> - int offset ; <nl> + bool open = false ; <nl> + int size = 0 ; <nl> + int offset = 0 ; <nl> String name ; <nl> - int access_flags ; <nl> + int access_flags = 0 ; <nl> } file ; <nl> <nl> mutable struct Cache { <nl> Vector < uint8_t > buffer ; <nl> - int offset ; <nl> + int offset = 0 ; <nl> } cache ; <nl> <nl> virtual int read_data_block ( int p_offset , int p_size , uint8_t * p_dest = nullptr ) const = 0 ; <nl>
|
Ensure FileAccessBuffered structs are properly initialized
|
godotengine/godot
|
37a2a5a99789d2ec7cfed616ee11d5560143faac
|
2020-06-19T15:36:28Z
|
mmm a / tensorflow / contrib / learn / python / learn / ops / dnn_ops . py <nl> ppp b / tensorflow / contrib / learn / python / learn / ops / dnn_ops . py <nl> <nl> <nl> from tensorflow . contrib import layers <nl> from tensorflow . contrib . learn . python . learn . ops import dropout_ops <nl> + <nl> from tensorflow . python . framework import ops <nl> + from tensorflow . python . platform import tf_logging as logging <nl> from tensorflow . python . ops import array_ops as array_ops_ <nl> from tensorflow . python . ops import control_flow_ops <nl> from tensorflow . python . ops import nn <nl> <nl> <nl> def dnn ( tensor_in , hidden_units , activation = nn . relu , dropout = None ) : <nl> " " " Creates fully connected deep neural network subgraph . <nl> + This is deprecated . Please use contrib . layers . dnn instead . <nl> <nl> Args : <nl> tensor_in : tensor or placeholder for input features . <nl> def dnn ( tensor_in , hidden_units , activation = nn . relu , dropout = None ) : <nl> Returns : <nl> A tensor which would be a deep neural network . <nl> " " " <nl> + logging . warning ( " learn . ops . dnn is deprecated , \ <nl> + please use contrib . layers . dnn . " ) <nl> with vs . variable_scope ( ' dnn ' ) : <nl> for i , n_units in enumerate ( hidden_units ) : <nl> with vs . variable_scope ( ' layer % d ' % i ) : <nl>
|
Simply add deprecated message to contrib . learn . ops . dnn
|
tensorflow/tensorflow
|
7e32f9283d9346313a501fd98a0aa269b595b596
|
2016-06-16T00:28:16Z
|
mmm a / hphp / runtime / vm / jit / type - profile . h <nl> ppp b / hphp / runtime / vm / jit / type - profile . h <nl> struct TypeProfile { <nl> a . type | = b . type ; <nl> } <nl> <nl> + void serialize ( ProfDataSerializer & ser ) const { type . serialize ( ser ) ; } <nl> + void deserialize ( ProfDataDeserializer & ser ) { type = Type : : deserialize ( ser ) ; } <nl> + <nl> Type type ; / / This gets initialized with 0 , which is TBottom . <nl> static_assert ( Type : : Bits : : kBottom = = 0 , " Assuming TBottom is 0 " ) ; <nl> <nl> mmm a / hphp / runtime / vm / jit / type . cpp <nl> ppp b / hphp / runtime / vm / jit / type . cpp <nl> enum TypeKey : uint8_t { <nl> } ; <nl> } <nl> <nl> - void Type : : serialize ( ProfDataSerializer & ser ) { <nl> + void Type : : serialize ( ProfDataSerializer & ser ) const { <nl> SCOPE_EXIT { <nl> ITRACE_MOD ( Trace : : hhbc , 2 , " Type : { } \ n " , toString ( ) ) ; <nl> } ; <nl> mmm a / hphp / runtime / vm / jit / type . h <nl> ppp b / hphp / runtime / vm / jit / type . h <nl> struct Type { <nl> / * <nl> * Serialization / deserialization . <nl> * / <nl> - void serialize ( ProfDataSerializer & ) ; <nl> + void serialize ( ProfDataSerializer & ) const ; <nl> static Type deserialize ( ProfDataDeserializer & ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl>
|
Properly serialize / deserialize TypeProfiles
|
facebook/hhvm
|
c36321bde066edc9bdc6ab90f9f954b24019c819
|
2018-04-18T17:53:23Z
|
mmm a / code / data_structures / linked_list / README . md <nl> ppp b / code / data_structures / linked_list / README . md <nl> <nl> - # Cosmos <nl> + # Linked List <nl> <nl> - Collaborative effort by [ OpenGenus ] ( https : / / github . com / OpenGenus / cosmos ) <nl> \ No newline at end of file <nl> + A linked list is a data structure containing a series of data that ' s linked together . Each node of a linked list contains a piece of data and a link to the next node , as shown in the diagram below : <nl> + <nl> + ! [ Singly linked list diagram ] ( https : / / upload . wikimedia . org / wikipedia / commons / thumb / 6 / 6d / Singly - linked - list . svg / 408px - Singly - linked - list . svg . png ) <nl> + <nl> + Image credit : ` By Lasindi - Own work , Public Domain , https : / / commons . wikimedia . org / w / index . php ? curid = 2245162 ` <nl> + <nl> + # # Types of Linked Lists <nl> + <nl> + There are several types of linked lists . The most common ones are : <nl> + <nl> + # # # Singly linked list <nl> + <nl> + The simplest type . Each node has a data field and a link to the next node , with the last node ' s link being empty or pointing to ` null ` . <nl> + <nl> + # # # Double linked list <nl> + <nl> + ! [ Doubly linked list diagram ] ( https : / / upload . wikimedia . org / wikipedia / commons / thumb / 5 / 5e / Doubly - linked - list . svg / 610px - Doubly - linked - list . svg . png ) <nl> + <nl> + Image credit : ` By Lasindi , Public Domain , https : / / commons . wikimedia . org / wiki / File : Doubly - linked - list . svg ` <nl> + <nl> + Each node of a doubly linked list contains , in addition to the data field and a link to the next code , a link to the previous node . <nl> + <nl> + # # Circularly linked list <nl> + <nl> + ! [ Circularly linked list diagram ] ( https : / / upload . wikimedia . org / wikipedia / commons / thumb / d / df / Circularly - linked - list . svg / 350px - Circularly - linked - list . svg . png ) <nl> + <nl> + Image credit : ` By Lasindi , Public Domain , https : / / commons . wikimedia . org / wiki / File : Circularly - linked - list . svg ` <nl> + <nl> + A singly linked list , except that the last item links back to the first item , as opposed to a singly linked list which links to nothing or ` null ` . <nl> + <nl> + # # Sources and more detailed information : <nl> + <nl> + - https : / / en . wikipedia . org / wiki / Linked_list <nl> + <nl> + mmm <nl> + <nl> + < p align = " center " > <nl> + A massive collaborative effort by < a href = " https : / / github . com / OpenGenus / cosmos " > OpenGenus Foundation < / a > <nl> + < / p > <nl> + <nl> + mmm <nl>
|
Merge pull request from IvanFon / linked - list - readme
|
OpenGenus/cosmos
|
286c50cf58b9d9da4b4107a9211a0b5301ea24f3
|
2017-10-16T21:26:09Z
|
mmm a / src / script / standard . cpp <nl> ppp b / src / script / standard . cpp <nl> bool ExtractDestinations ( const CScript & scriptPubKey , txnouttype & typeRet , std : : <nl> <nl> namespace <nl> { <nl> - class CScriptVisitor : public boost : : static_visitor < bool > <nl> + class CScriptVisitor : public boost : : static_visitor < CScript > <nl> { <nl> - private : <nl> - CScript * script ; <nl> public : <nl> - explicit CScriptVisitor ( CScript * scriptin ) { script = scriptin ; } <nl> - <nl> - bool operator ( ) ( const CNoDestination & dest ) const { <nl> - script - > clear ( ) ; <nl> - return false ; <nl> + CScript operator ( ) ( const CNoDestination & dest ) const <nl> + { <nl> + return CScript ( ) ; <nl> } <nl> <nl> - bool operator ( ) ( const PKHash & keyID ) const { <nl> - script - > clear ( ) ; <nl> - * script < < OP_DUP < < OP_HASH160 < < ToByteVector ( keyID ) < < OP_EQUALVERIFY < < OP_CHECKSIG ; <nl> - return true ; <nl> + CScript operator ( ) ( const PKHash & keyID ) const <nl> + { <nl> + return CScript ( ) < < OP_DUP < < OP_HASH160 < < ToByteVector ( keyID ) < < OP_EQUALVERIFY < < OP_CHECKSIG ; <nl> } <nl> <nl> - bool operator ( ) ( const ScriptHash & scriptID ) const { <nl> - script - > clear ( ) ; <nl> - * script < < OP_HASH160 < < ToByteVector ( scriptID ) < < OP_EQUAL ; <nl> - return true ; <nl> + CScript operator ( ) ( const ScriptHash & scriptID ) const <nl> + { <nl> + return CScript ( ) < < OP_HASH160 < < ToByteVector ( scriptID ) < < OP_EQUAL ; <nl> } <nl> <nl> - bool operator ( ) ( const WitnessV0KeyHash & id ) const <nl> + CScript operator ( ) ( const WitnessV0KeyHash & id ) const <nl> { <nl> - script - > clear ( ) ; <nl> - * script < < OP_0 < < ToByteVector ( id ) ; <nl> - return true ; <nl> + return CScript ( ) < < OP_0 < < ToByteVector ( id ) ; <nl> } <nl> <nl> - bool operator ( ) ( const WitnessV0ScriptHash & id ) const <nl> + CScript operator ( ) ( const WitnessV0ScriptHash & id ) const <nl> { <nl> - script - > clear ( ) ; <nl> - * script < < OP_0 < < ToByteVector ( id ) ; <nl> - return true ; <nl> + return CScript ( ) < < OP_0 < < ToByteVector ( id ) ; <nl> } <nl> <nl> - bool operator ( ) ( const WitnessUnknown & id ) const <nl> + CScript operator ( ) ( const WitnessUnknown & id ) const <nl> { <nl> - script - > clear ( ) ; <nl> - * script < < CScript : : EncodeOP_N ( id . version ) < < std : : vector < unsigned char > ( id . program , id . program + id . length ) ; <nl> - return true ; <nl> + return CScript ( ) < < CScript : : EncodeOP_N ( id . version ) < < std : : vector < unsigned char > ( id . program , id . program + id . length ) ; <nl> } <nl> } ; <nl> + <nl> + const CScriptVisitor g_script_visitor ; <nl> + <nl> } / / namespace <nl> <nl> CScript GetScriptForDestination ( const CTxDestination & dest ) <nl> { <nl> - CScript script ; <nl> - <nl> - boost : : apply_visitor ( CScriptVisitor ( & script ) , dest ) ; <nl> - return script ; <nl> + return boost : : apply_visitor ( : : g_script_visitor , dest ) ; <nl> } <nl> <nl> CScript GetScriptForRawPubKey ( const CPubKey & pubKey ) <nl>
|
refactor : Make CScriptVisitor stateless
|
bitcoin/bitcoin
|
3351c91ed402895dcb4f803a29d2cac70ccfa8b4
|
2020-06-05T23:41:09Z
|
mmm a / xbmc / PasswordManager . cpp <nl> ppp b / xbmc / PasswordManager . cpp <nl> bool CPasswordManager : : PromptToAuthenticateURL ( CURL & url ) <nl> url . SetUserName ( username ) ; <nl> <nl> / / save the information for later <nl> + SaveAuthenticatedURL ( url , saveDetails ) ; <nl> + return true ; <nl> + } <nl> + <nl> + void CPasswordManager : : SaveAuthenticatedURL ( const CURL & url , bool saveToProfile ) <nl> + { <nl> + CSingleLock lock ( m_critSection ) ; <nl> + <nl> + CStdString path = GetLookupPath ( url ) ; <nl> CStdString authenticatedPath = url . Get ( ) ; <nl> <nl> if ( ! m_loaded ) <nl> Load ( ) ; <nl> <nl> - if ( saveDetails ) <nl> + if ( saveToProfile ) <nl> { / / write to some random XML file . . . <nl> m_permanentCache [ path ] = authenticatedPath ; <nl> Save ( ) ; <nl> bool CPasswordManager : : PromptToAuthenticateURL ( CURL & url ) <nl> / / save for both this path and more generally the server as a whole . <nl> m_temporaryCache [ path ] = authenticatedPath ; <nl> m_temporaryCache [ GetServerLookup ( path ) ] = authenticatedPath ; <nl> - return true ; <nl> } <nl> <nl> void CPasswordManager : : Clear ( ) <nl> mmm a / xbmc / PasswordManager . h <nl> ppp b / xbmc / PasswordManager . h <nl> class CPasswordManager <nl> <nl> \ param url the URL to authenticate . <nl> \ return true if the user entered details , false if the user cancelled the dialog . <nl> - \ sa CURL <nl> + \ sa CURL , SaveAuthenticatedURL <nl> * / <nl> bool PromptToAuthenticateURL ( CURL & url ) ; <nl> <nl> + / * ! <nl> + \ brief Save an authenticated URL . <nl> + <nl> + This routine stores an authenticated URL in the temporary cache , and optionally <nl> + saves these details into the users profile . <nl> + <nl> + \ param url the URL to authenticate . <nl> + \ param saveToProfile whether to save in the users profile , defaults to true . <nl> + \ sa CURL , PromptToAuthenticateURL <nl> + * / <nl> + void SaveAuthenticatedURL ( const CURL & url , bool saveToProfile = true ) ; <nl> + <nl> / * ! <nl> \ brief Clear any previously cached passwords <nl> * / <nl>
|
splits saving of auth details out of CPasswordManager : : PromptForAuthentication
|
xbmc/xbmc
|
870c888eabb3bb360e257395441f6b69c1742c00
|
2012-07-07T22:39:01Z
|
mmm a / test / core / compression / BUILD <nl> ppp b / test / core / compression / BUILD <nl> <nl> # limitations under the License . <nl> <nl> load ( " / / bazel : grpc_build_system . bzl " , " grpc_cc_binary " , " grpc_cc_library " , " grpc_cc_test " , " grpc_package " ) <nl> + load ( " / / test / core / util : grpc_fuzzer . bzl " , " grpc_fuzzer " ) <nl> <nl> grpc_package ( name = " test / core / compression " ) <nl> <nl> grpc_cc_test ( <nl> ] , <nl> ) <nl> <nl> + grpc_fuzzer ( <nl> + name = " message_compress_fuzzer " , <nl> + srcs = [ " message_compress_fuzzer . cc " ] , <nl> + corpus = " message_compress_corpus " , <nl> + tags = [ " no_windows " ] , <nl> + deps = [ <nl> + " / / : grpc " , <nl> + " / / test / core / util : grpc_test_util " , <nl> + ] , <nl> + ) <nl> + <nl> + grpc_fuzzer ( <nl> + name = " message_decompress_fuzzer " , <nl> + srcs = [ " message_decompress_fuzzer . cc " ] , <nl> + corpus = " message_decompress_corpus " , <nl> + tags = [ " no_windows " ] , <nl> + deps = [ <nl> + " / / : grpc " , <nl> + " / / test / core / util : grpc_test_util " , <nl> + ] , <nl> + ) <nl> + <nl> grpc_cc_test ( <nl> name = " message_compress_test " , <nl> srcs = [ " message_compress_test . cc " ] , <nl> grpc_cc_test ( <nl> ] , <nl> ) <nl> <nl> + grpc_fuzzer ( <nl> + name = " stream_compression_fuzzer " , <nl> + srcs = [ " stream_compression_fuzzer . cc " ] , <nl> + corpus = " stream_compression_corpus " , <nl> + tags = [ " no_windows " ] , <nl> + deps = [ <nl> + " / / : grpc " , <nl> + " / / test / core / util : grpc_test_util " , <nl> + ] , <nl> + ) <nl> + <nl> + grpc_fuzzer ( <nl> + name = " stream_decompression_fuzzer " , <nl> + srcs = [ " stream_decompression_fuzzer . cc " ] , <nl> + corpus = " stream_decompression_corpus " , <nl> + tags = [ " no_windows " ] , <nl> + deps = [ <nl> + " / / : grpc " , <nl> + " / / test / core / util : grpc_test_util " , <nl> + ] , <nl> + ) <nl> + <nl> grpc_cc_test ( <nl> name = " stream_compression_test " , <nl> srcs = [ " stream_compression_test . cc " ] , <nl> new file mode 100644 <nl> index 00000000000 . . 1ea0853d2a3 <nl> mmm / dev / null <nl> ppp b / test / core / compression / message_compress_fuzzer . cc <nl> <nl> + / * <nl> + * <nl> + * Copyright 2019 gRPC authors . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * <nl> + * / <nl> + <nl> + # include < grpc / grpc . h > <nl> + # include < stdbool . h > <nl> + # include < stdint . h > <nl> + # include < string . h > <nl> + <nl> + # include " src / core / lib / compression / message_compress . h " <nl> + # include " src / core / lib / security / credentials / credentials . h " <nl> + # include " test / core / util / memory_counters . h " <nl> + <nl> + bool squelch = true ; <nl> + bool leak_check = true ; <nl> + <nl> + extern " C " int LLVMFuzzerTestOneInput ( const uint8_t * data , size_t size ) { <nl> + if ( size < 1 ) return 0 ; <nl> + <nl> + / / Instead of rolling something complicated to convert a uint8_t to the enum , <nl> + / / just bail out if it isn ' t trivially convertible . <nl> + if ( data [ 0 ] > = GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT ) return 0 ; <nl> + const auto compression_algorithm = <nl> + static_cast < grpc_message_compression_algorithm > ( data [ 0 ] ) ; <nl> + <nl> + grpc_core : : testing : : LeakDetector leak_detector ( true ) ; <nl> + grpc_init ( ) ; <nl> + grpc_test_only_control_plane_credentials_force_init ( ) ; <nl> + grpc_slice_buffer input_buffer ; <nl> + grpc_slice_buffer_init ( & input_buffer ) ; <nl> + grpc_slice_buffer_add ( & input_buffer , <nl> + grpc_slice_from_copied_buffer ( <nl> + reinterpret_cast < const char * > ( data + 1 ) , size - 1 ) ) ; <nl> + grpc_slice_buffer output_buffer ; <nl> + grpc_slice_buffer_init ( & output_buffer ) ; <nl> + <nl> + grpc_msg_compress ( compression_algorithm , & input_buffer , & output_buffer ) ; <nl> + <nl> + grpc_slice_buffer_destroy ( & input_buffer ) ; <nl> + grpc_slice_buffer_destroy ( & output_buffer ) ; <nl> + grpc_test_only_control_plane_credentials_destroy ( ) ; <nl> + grpc_shutdown_blocking ( ) ; <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . c600a740782 <nl> mmm / dev / null <nl> ppp b / test / core / compression / message_decompress_fuzzer . cc <nl> <nl> + / * <nl> + * <nl> + * Copyright 2019 gRPC authors . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * <nl> + * / <nl> + <nl> + # include < grpc / grpc . h > <nl> + # include < stdbool . h > <nl> + # include < stdint . h > <nl> + # include < string . h > <nl> + <nl> + # include " src / core / lib / compression / message_compress . h " <nl> + # include " src / core / lib / security / credentials / credentials . h " <nl> + # include " test / core / util / memory_counters . h " <nl> + <nl> + bool squelch = true ; <nl> + bool leak_check = true ; <nl> + <nl> + extern " C " int LLVMFuzzerTestOneInput ( const uint8_t * data , size_t size ) { <nl> + if ( size < 1 ) return 0 ; <nl> + <nl> + / / Instead of rolling something complicated to convert a uint8_t to the enum , <nl> + / / just bail out if it isn ' t trivially convertible . <nl> + if ( data [ 0 ] > = GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT ) return 0 ; <nl> + const auto compression_algorithm = <nl> + static_cast < grpc_message_compression_algorithm > ( data [ 0 ] ) ; <nl> + <nl> + grpc_core : : testing : : LeakDetector leak_detector ( true ) ; <nl> + grpc_init ( ) ; <nl> + grpc_test_only_control_plane_credentials_force_init ( ) ; <nl> + grpc_slice_buffer input_buffer ; <nl> + grpc_slice_buffer_init ( & input_buffer ) ; <nl> + grpc_slice_buffer_add ( & input_buffer , <nl> + grpc_slice_from_copied_buffer ( <nl> + reinterpret_cast < const char * > ( data + 1 ) , size - 1 ) ) ; <nl> + grpc_slice_buffer output_buffer ; <nl> + grpc_slice_buffer_init ( & output_buffer ) ; <nl> + <nl> + grpc_msg_decompress ( compression_algorithm , & input_buffer , & output_buffer ) ; <nl> + <nl> + grpc_slice_buffer_destroy ( & input_buffer ) ; <nl> + grpc_slice_buffer_destroy ( & output_buffer ) ; <nl> + grpc_test_only_control_plane_credentials_destroy ( ) ; <nl> + grpc_shutdown_blocking ( ) ; <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . c147aa5bfd6 <nl> mmm / dev / null <nl> ppp b / test / core / compression / stream_compression_fuzzer . cc <nl> <nl> + / * <nl> + * <nl> + * Copyright 2019 gRPC authors . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * <nl> + * / <nl> + <nl> + # include < grpc / grpc . h > <nl> + # include < stdbool . h > <nl> + # include < stdint . h > <nl> + # include < string . h > <nl> + <nl> + # include " src / core / lib / compression / stream_compression . h " <nl> + # include " src / core / lib / security / credentials / credentials . h " <nl> + # include " test / core / util / memory_counters . h " <nl> + <nl> + bool squelch = true ; <nl> + bool leak_check = true ; <nl> + <nl> + extern " C " int LLVMFuzzerTestOneInput ( const uint8_t * data , size_t size ) { <nl> + grpc_core : : testing : : LeakDetector leak_detector ( true ) ; <nl> + grpc_init ( ) ; <nl> + grpc_test_only_control_plane_credentials_force_init ( ) ; <nl> + auto * context = grpc_stream_compression_context_create ( <nl> + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS ) ; <nl> + grpc_slice_buffer input_buffer ; <nl> + grpc_slice_buffer_init ( & input_buffer ) ; <nl> + grpc_slice_buffer_add ( <nl> + & input_buffer , <nl> + grpc_slice_from_copied_buffer ( reinterpret_cast < const char * > ( data ) , size ) ) ; <nl> + grpc_slice_buffer output_buffer ; <nl> + grpc_slice_buffer_init ( & output_buffer ) ; <nl> + <nl> + grpc_stream_compress ( context , & input_buffer , & output_buffer , nullptr , <nl> + SIZE_MAX , GRPC_STREAM_COMPRESSION_FLUSH_SYNC ) ; <nl> + <nl> + grpc_stream_compression_context_destroy ( context ) ; <nl> + grpc_slice_buffer_destroy ( & input_buffer ) ; <nl> + grpc_slice_buffer_destroy ( & output_buffer ) ; <nl> + grpc_test_only_control_plane_credentials_destroy ( ) ; <nl> + grpc_shutdown_blocking ( ) ; <nl> + return 0 ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . e460e6db25f <nl> mmm / dev / null <nl> ppp b / test / core / compression / stream_decompression_fuzzer . cc <nl> <nl> + / * <nl> + * <nl> + * Copyright 2019 gRPC authors . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * <nl> + * / <nl> + <nl> + # include < grpc / grpc . h > <nl> + # include < stdbool . h > <nl> + # include < stdint . h > <nl> + # include < string . h > <nl> + <nl> + # include " src / core / lib / compression / stream_compression . h " <nl> + # include " src / core / lib / security / credentials / credentials . h " <nl> + # include " test / core / util / memory_counters . h " <nl> + <nl> + bool squelch = true ; <nl> + bool leak_check = true ; <nl> + <nl> + extern " C " int LLVMFuzzerTestOneInput ( const uint8_t * data , size_t size ) { <nl> + grpc_core : : testing : : LeakDetector leak_detector ( true ) ; <nl> + grpc_init ( ) ; <nl> + grpc_test_only_control_plane_credentials_force_init ( ) ; <nl> + auto * context = grpc_stream_compression_context_create ( <nl> + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS ) ; <nl> + grpc_slice_buffer input_buffer ; <nl> + grpc_slice_buffer_init ( & input_buffer ) ; <nl> + grpc_slice_buffer_add ( <nl> + & input_buffer , <nl> + grpc_slice_from_copied_buffer ( reinterpret_cast < const char * > ( data ) , size ) ) ; <nl> + grpc_slice_buffer output_buffer ; <nl> + grpc_slice_buffer_init ( & output_buffer ) ; <nl> + bool end_of_context ; <nl> + <nl> + grpc_stream_decompress ( context , & input_buffer , & output_buffer , nullptr , <nl> + SIZE_MAX , & end_of_context ) ; <nl> + <nl> + grpc_stream_compression_context_destroy ( context ) ; <nl> + grpc_slice_buffer_destroy ( & input_buffer ) ; <nl> + grpc_slice_buffer_destroy ( & output_buffer ) ; <nl> + grpc_test_only_control_plane_credentials_destroy ( ) ; <nl> + grpc_shutdown_blocking ( ) ; <nl> + return 0 ; <nl> + } <nl>
|
Fuzz message / stream compression and decompression
|
grpc/grpc
|
aa7cb6a3350e4961ac603e47f7ed61fd99802bf1
|
2019-11-06T02:02:58Z
|
mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / VDPAU . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / VDPAU . cpp <nl> int CVDPAU : : Check ( AVCodecContext * avctx ) <nl> if ( ! m_DisplayEvent . WaitMSec ( 2000 ) ) <nl> { <nl> CLog : : Log ( LOGERROR , " CVDPAU : : Check - device didn ' t reset in reasonable time " ) ; <nl> - return VC_ERROR ; <nl> + state = VDPAU_RESET ; <nl> } <nl> - { CSharedLock lock ( m_DisplaySection ) ; <nl> + else <nl> + { <nl> + CSharedLock lock ( m_DisplaySection ) ; <nl> state = m_DisplayState ; <nl> } <nl> } <nl> bool CVDPAU : : CheckStatus ( VdpStatus vdp_st , int line ) <nl> if ( m_DisplayState = = VDPAU_OPEN ) <nl> { <nl> if ( vdp_st = = VDP_STATUS_DISPLAY_PREEMPTED ) <nl> + { <nl> + m_DisplayEvent . Reset ( ) ; <nl> m_DisplayState = VDPAU_LOST ; <nl> + } <nl> else <nl> m_DisplayState = VDPAU_ERROR ; <nl> } <nl>
|
Merge pull request from FernetMenta / mainline2
|
xbmc/xbmc
|
3449f21684a74da24fd15738c6c2518e6b1d28ac
|
2012-02-22T21:19:10Z
|
mmm a / cocos2dx / cocoa / CCObject . cpp <nl> ppp b / cocos2dx / cocoa / CCObject . cpp <nl> CCObject * CCObject : : autorelease ( void ) <nl> return this ; <nl> } <nl> <nl> - bool CCObject : : isSingleRefrence ( void ) <nl> + bool CCObject : : isSingleReference ( void ) <nl> { <nl> return m_uReference = = 1 ; <nl> } <nl> mmm a / cocos2dx / cocoa / CCObject . h <nl> ppp b / cocos2dx / cocoa / CCObject . h <nl> class CC_DLL CCObject : public CCCopying <nl> void retain ( void ) ; <nl> CCObject * autorelease ( void ) ; <nl> CCObject * copy ( void ) ; <nl> - bool isSingleRefrence ( void ) ; <nl> + bool isSingleReference ( void ) ; <nl> unsigned int retainCount ( void ) ; <nl> virtual bool isEqual ( const CCObject * pObject ) ; <nl> <nl>
|
Fixed Spelling Error CCObject : : isSingleRefrence to CCObject : : isSingleReference
|
cocos2d/cocos2d-x
|
c9408b7b755ddd1068dfa0054c155e87bf9a2309
|
2012-07-14T01:56:07Z
|
mmm a / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeBlockOutputStream . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeBlockOutputStream . h <nl> class ReplicatedMergeTreeBlockOutputStream : public IBlockOutputStream <nl> std : : call_once ( once_flag , [ & ] <nl> { <nl> zookeeper - > createIfNotExists ( storage . zookeeper_path + " / quorum " , " " ) ; <nl> + zookeeper - > createIfNotExists ( storage . zookeeper_path + " / quorum / last_part " , " " ) ; <nl> + zookeeper - > createIfNotExists ( storage . zookeeper_path + " / quorum / failed_parts " , " " ) ; <nl> } ) ; <nl> <nl> ReplicatedMergeTreeQuorumEntry quorum_entry ; <nl> mmm a / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeCleanupThread . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeCleanupThread . h <nl> class ReplicatedMergeTreeCleanupThread <nl> <nl> / / / Удалить из ZooKeeper старые хеши блоков . Это делает ведущая реплика . <nl> void clearOldBlocks ( ) ; <nl> + <nl> + / / / TODO Удаление старых quorum / failed_parts <nl> } ; <nl> <nl> <nl> mmm a / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . h <nl> ppp b / dbms / include / DB / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . h <nl> class ReplicatedMergeTreeRestartingThread <nl> / / / Отметить в ZooKeeper , что эта реплика сейчас активна . <nl> void activateReplica ( ) ; <nl> <nl> + / / / Удалить куски , для которых кворум пофейлился ( за то время , когда реплика была неактивной ) . <nl> + void removeFailedQuorumParts ( ) ; <nl> + <nl> + / / / Если есть недостигнутый кворум , и у нас есть кусок , то добавить эту реплику в кворум . <nl> + void updateQuorumIfWeHavePart ( ) ; <nl> + <nl> void partialShutdown ( ) ; <nl> <nl> / / / Запретить запись в таблицу и завершить все фоновые потоки . <nl> mmm a / dbms / include / DB / Storages / StorageReplicatedMergeTree . h <nl> ppp b / dbms / include / DB / Storages / StorageReplicatedMergeTree . h <nl> class StorageReplicatedMergeTree : public IStorage <nl> * / <nl> void checkPartAndAddToZooKeeper ( const MergeTreeData : : DataPartPtr & part , zkutil : : Ops & ops , String name_override = " " ) ; <nl> <nl> + / / / Кладет в ops действия , удаляющие кусок из ZooKeeper . <nl> + void removePartFromZooKeeper ( const String & part_name , zkutil : : Ops & ops ) ; <nl> + <nl> / / / Убирает кусок из ZooKeeper и добавляет в очередь задание скачать его . Предполагается это делать с битыми кусками . <nl> void removePartAndEnqueueFetch ( const String & part_name ) ; <nl> <nl> class StorageReplicatedMergeTree : public IStorage <nl> * / <nl> void fetchPart ( const String & part_name , const String & replica_path , bool to_detached , size_t quorum ) ; <nl> <nl> + / * * При отслеживаемом кворуме - добавить реплику в кворум для куска . <nl> + * / <nl> + void updateQuorum ( const String & part_name ) ; <nl> + <nl> AbandonableLockInZooKeeper allocateBlockNumber ( const String & month_name ) ; <nl> <nl> / * * Дождаться , пока все реплики , включая эту , выполнят указанное действие из лога . <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeCleanupThread . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeCleanupThread . cpp <nl> void ReplicatedMergeTreeCleanupThread : : clearOldParts ( ) <nl> LOG_DEBUG ( log , " Removing " < < part - > name ) ; <nl> <nl> zkutil : : Ops ops ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( storage . replica_path + " / parts / " + part - > name + " / columns " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( storage . replica_path + " / parts / " + part - > name + " / checksums " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( storage . replica_path + " / parts / " + part - > name , - 1 ) ) ; <nl> + storage . removePartFromZooKeeper ( part - > name , ops ) ; <nl> auto code = zookeeper - > tryMulti ( ops ) ; <nl> if ( code ! = ZOK ) <nl> LOG_WARNING ( log , " Couldn ' t remove " < < part - > name < < " from ZooKeeper : " < < zkutil : : ZooKeeper : : error2string ( code ) ) ; <nl> mmm a / dbms / src / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . cpp <nl> ppp b / dbms / src / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . cpp <nl> <nl> # include < DB / IO / Operators . h > <nl> # include < DB / Storages / StorageReplicatedMergeTree . h > <nl> # include < DB / Storages / MergeTree / ReplicatedMergeTreeRestartingThread . h > <nl> + # include < DB / Storages / MergeTree / ReplicatedMergeTreeQuorumEntry . h > <nl> <nl> <nl> namespace DB <nl> bool ReplicatedMergeTreeRestartingThread : : tryStartup ( ) <nl> { <nl> try <nl> { <nl> + removeFailedQuorumParts ( ) ; <nl> activateReplica ( ) ; <nl> + updateQuorumIfWeHavePart ( ) ; <nl> <nl> storage . leader_election = new zkutil : : LeaderElection ( <nl> storage . zookeeper_path + " / leader_election " , <nl> bool ReplicatedMergeTreeRestartingThread : : tryStartup ( ) <nl> } <nl> <nl> <nl> + void ReplicatedMergeTreeRestartingThread : : removeFailedQuorumParts ( ) <nl> + { <nl> + auto zookeeper = storage . getZooKeeper ( ) ; <nl> + <nl> + Strings failed_parts ; <nl> + if ( ! zookeeper - > tryGetChildren ( storage . zookeeper_path + " / quorum / failed_parts " , failed_parts ) ) <nl> + return ; <nl> + <nl> + for ( auto part_name : failed_parts ) <nl> + { <nl> + auto part = storage . data . getPartIfExists ( part_name ) ; <nl> + if ( part ) <nl> + { <nl> + LOG_DEBUG ( log , " Found part " < < part_name < < " with failed quorum . Moving to detached . This shouldn ' t happen often . " ) ; <nl> + <nl> + zkutil : : Ops ops ; <nl> + storage . removePartFromZooKeeper ( part_name , ops ) ; <nl> + auto code = zookeeper - > tryMulti ( ops ) ; <nl> + if ( code = = ZNONODE ) <nl> + LOG_WARNING ( log , " Part " < < part_name < < " with failed quorum is not in ZooKeeper . This shouldn ' t happen often . " ) ; <nl> + <nl> + storage . data . renameAndDetachPart ( part , " noquorum " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + void ReplicatedMergeTreeRestartingThread : : updateQuorumIfWeHavePart ( ) <nl> + { <nl> + auto zookeeper = storage . getZooKeeper ( ) ; <nl> + <nl> + String quorum_str ; <nl> + if ( zookeeper - > tryGet ( storage . zookeeper_path + " / quorum / status " , quorum_str ) ) <nl> + { <nl> + ReplicatedMergeTreeQuorumEntry quorum_entry ; <nl> + quorum_entry . fromString ( quorum_str ) ; <nl> + <nl> + if ( ! quorum_entry . replicas . count ( storage . replica_name ) <nl> + & & zookeeper - > exists ( storage . replica_path + " / parts / " + quorum_entry . part_name ) ) <nl> + { <nl> + LOG_WARNING ( log , " We have part " < < quorum_entry . part_name <nl> + < < " but we is not in quorum . Updating quorum . This shouldn ' t happen often . " ) ; <nl> + storage . updateQuorum ( quorum_entry . part_name ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> void ReplicatedMergeTreeRestartingThread : : activateReplica ( ) <nl> { <nl> auto host_port = storage . context . getInterserverIOAddress ( ) ; <nl> void ReplicatedMergeTreeRestartingThread : : activateReplica ( ) <nl> < < " port : " < < host_port . second < < ' \ n ' ; <nl> } <nl> <nl> + String is_active_path = storage . replica_path + " / is_active " ; <nl> + <nl> / * * Если нода отмечена как активная , но отметка сделана в этом же экземпляре , удалим ее . <nl> * Такое возможно только при истечении сессии в ZooKeeper . <nl> - * Здесь есть небольшой race condition ( можем удалить не ту ноду , для которой сделали tryGet ) , <nl> - * но он крайне маловероятен при нормальном использовании . <nl> * / <nl> String data ; <nl> - if ( zookeeper - > tryGet ( storage . replica_path + " / is_active " , data ) & & data = = active_node_identifier ) <nl> - zookeeper - > tryRemove ( storage . replica_path + " / is_active " ) ; <nl> + Stat stat ; <nl> + bool has_is_active = zookeeper - > tryGet ( is_active_path , data , & stat ) ; <nl> + if ( has_is_active & & data = = active_node_identifier ) <nl> + { <nl> + auto code = zookeeper - > tryRemove ( is_active_path , stat . version ) ; <nl> + <nl> + if ( code = = ZBADVERSION ) <nl> + throw Exception ( " Another instance of replica " + storage . replica_path + " was created just now . " <nl> + " You shouldn ' t run multiple instances of same replica . You need to check configuration files . " , <nl> + ErrorCodes : : REPLICA_IS_ALREADY_ACTIVE ) ; <nl> + <nl> + if ( code ! = ZOK & & code ! = ZNONODE ) <nl> + throw zkutil : : KeeperException ( code , is_active_path ) ; <nl> + } <nl> <nl> / / / Одновременно объявим , что эта реплика активна , и обновим хост . <nl> zkutil : : Ops ops ; <nl> - ops . push_back ( new zkutil : : Op : : Create ( storage . replica_path + " / is_active " , <nl> + ops . push_back ( new zkutil : : Op : : Create ( is_active_path , <nl> active_node_identifier , zookeeper - > getDefaultACL ( ) , zkutil : : CreateMode : : Ephemeral ) ) ; <nl> ops . push_back ( new zkutil : : Op : : SetData ( storage . replica_path + " / host " , address , - 1 ) ) ; <nl> <nl> void ReplicatedMergeTreeRestartingThread : : activateReplica ( ) <nl> <nl> / / / current_zookeeper живёт в течение времени жизни replica_is_active_node , <nl> / / / так как до изменения current_zookeeper , объект replica_is_active_node уничтожается в методе partialShutdown . <nl> - storage . replica_is_active_node = zkutil : : EphemeralNodeHolder : : existing ( storage . replica_path + " / is_active " , * storage . current_zookeeper ) ; <nl> + storage . replica_is_active_node = zkutil : : EphemeralNodeHolder : : existing ( is_active_path , * storage . current_zookeeper ) ; <nl> } <nl> <nl> <nl> mmm a / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> void StorageReplicatedMergeTree : : checkParts ( bool skip_sanity_checks ) <nl> LOG_ERROR ( log , " Removing unexpectedly merged local part from ZooKeeper : " < < name ) ; <nl> <nl> zkutil : : Ops ops ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + name + " / columns " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + name + " / checksums " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + name , - 1 ) ) ; <nl> + removePartFromZooKeeper ( name , ops ) ; <nl> zookeeper - > multi ( ops ) ; <nl> } <nl> <nl> void StorageReplicatedMergeTree : : checkParts ( bool skip_sanity_checks ) <nl> <nl> / / / Полагаемся , что это происходит до загрузки очереди ( loadQueue ) . <nl> zkutil : : Ops ops ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + name + " / columns " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + name + " / checksums " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + name , - 1 ) ) ; <nl> + removePartFromZooKeeper ( name , ops ) ; <nl> ops . push_back ( new zkutil : : Op : : Create ( <nl> replica_path + " / queue / queue - " , log_entry . toString ( ) , zookeeper - > getDefaultACL ( ) , zkutil : : CreateMode : : PersistentSequential ) ) ; <nl> zookeeper - > multi ( ops ) ; <nl> bool StorageReplicatedMergeTree : : executeLogEntry ( const LogEntry & entry , Backgro <nl> if ( containing_part & & zookeeper - > exists ( replica_path + " / parts / " + containing_part - > name ) ) <nl> { <nl> if ( ! ( entry . type = = LogEntry : : GET_PART & & entry . source_replica = = replica_name ) ) <nl> - LOG_DEBUG ( log , " Skipping action for part " + entry . new_part_name + " - part already exists " ) ; <nl> + LOG_DEBUG ( log , " Skipping action for part " < < entry . new_part_name < < " - part already exists . " ) ; <nl> return true ; <nl> } <nl> } <nl> bool StorageReplicatedMergeTree : : executeLogEntry ( const LogEntry & entry , Backgro <nl> if ( entry . type = = LogEntry : : GET_PART & & entry . source_replica = = replica_name ) <nl> LOG_WARNING ( log , " Part " < < entry . new_part_name < < " from own log doesn ' t exist . " ) ; <nl> <nl> + / / / Возможно , этот кусок нам не нужен , так как при записи с кворумом , кворум пофейлился ( см . ниже про / quorum / failed_parts ) . <nl> + if ( entry . quorum & & zookeeper - > exists ( zookeeper_path + " / quorum / failed_parts " ) ) <nl> + { <nl> + LOG_DEBUG ( log , " Skipping action for part " < < entry . new_part_name < < " because quorum for that part was failed . " ) ; <nl> + return true ; / / / NOTE Удаление из virtual_parts не делается , но оно нужно только для мерджей . <nl> + } <nl> + <nl> bool do_fetch = false ; <nl> <nl> if ( entry . type = = LogEntry : : GET_PART ) <nl> bool StorageReplicatedMergeTree : : executeLogEntry ( const LogEntry & entry , Backgro <nl> <nl> if ( replica . empty ( ) ) <nl> { <nl> - ProfileEvents : : increment ( ProfileEvents : : ReplicatedPartFailedFetches ) ; <nl> - throw Exception ( " No active replica has part " + entry . new_part_name , ErrorCodes : : NO_REPLICA_HAS_PART ) ; <nl> + / * * Если кусок должен быть записан с кворумом , и кворум ещё недостигнут , <nl> + * то ( из - за того , что кусок невозможно прямо сейчас скачать ) , <nl> + * кворумную запись следует считать безуспешной . <nl> + * TODO Сложный код , вынести отдельно . <nl> + * / <nl> + if ( entry . quorum ) <nl> + { <nl> + if ( entry . type ! = LogEntry : : GET_PART ) <nl> + throw Exception ( " Logical error : log entry with quorum but type is not GET_PART " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + LOG_DEBUG ( log , " No active replica has part " < < entry . new_part_name < < " which needs to be written with quorum . " <nl> + " Will try to mark that quorum as failed . " ) ; <nl> + <nl> + / * * Атомарно : <nl> + * - если реплики не стали активными ; <nl> + * - если существует узел quorum с этим куском ; <nl> + * - удалим узел quorum ; <nl> + * - установим nonincrement_block_numbers , чтобы разрешить мерджи через номер потерянного куска ; <nl> + * - добавим кусок в список quorum / failed_parts . <nl> + * <nl> + * Если что - то изменится , то ничего не сделаем - попадём сюда снова в следующий раз . <nl> + * / <nl> + <nl> + / * * Соберём версии узлов host у реплик . <nl> + * Когда реплика становится активной , она в той же транзакции ( с созданием is_active ) , меняет значение host . <nl> + * Это позволит проследить , что реплики не стали активными . <nl> + * / <nl> + <nl> + Strings replicas = zookeeper - > getChildren ( zookeeper_path + " / replicas " ) ; <nl> + <nl> + zkutil : : Ops ops ; <nl> + <nl> + for ( size_t i = 0 , size = replicas . size ( ) ; i < size ; + + i ) <nl> + { <nl> + Stat stat ; <nl> + String path = zookeeper_path + " / replicas / " + replicas [ i ] + " / host " ; <nl> + zookeeper - > get ( path , & stat ) ; <nl> + ops . push_back ( new zkutil : : Op : : Check ( path , stat . version ) ) ; <nl> + } <nl> + <nl> + / / / Проверяем , что пока мы собирали версии , не ожила реплика с нужным куском . <nl> + replica = findReplicaHavingPart ( entry . new_part_name , true ) ; <nl> + <nl> + / / / Также за это время могла быть создана совсем новая реплика . Но если на старых не появится куска , то на новой его тоже не может быть . <nl> + <nl> + if ( replica . empty ( ) ) <nl> + { <nl> + Stat quorum_stat ; <nl> + String quorum_path = zookeeper_path + " / quorum / status " ; <nl> + String quorum_str = zookeeper - > get ( quorum_path , & quorum_stat ) ; <nl> + ReplicatedMergeTreeQuorumEntry quorum_entry ; <nl> + quorum_entry . fromString ( quorum_str ) ; <nl> + <nl> + if ( quorum_entry . part_name = = entry . new_part_name ) <nl> + { <nl> + ops . push_back ( new zkutil : : Op : : Remove ( quorum_path , quorum_stat . version ) ) ; <nl> + <nl> + const auto partition_str = entry . new_part_name . substr ( 0 , 6 ) ; <nl> + ActiveDataPartSet : : Part part_info ; <nl> + ActiveDataPartSet : : parsePartName ( entry . new_part_name , part_info ) ; <nl> + <nl> + if ( part_info . left ! = part_info . right ) <nl> + throw Exception ( " Logical error : log entry with quorum for part covering more than one block number " , <nl> + ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> + ops . push_back ( new zkutil : : Op : : Create ( <nl> + zookeeper_path + " / nonincrement_block_numbers / " + partition_str + " / block - " + padIndex ( part_info . left ) , <nl> + " " , <nl> + zookeeper - > getDefaultACL ( ) , <nl> + zkutil : : CreateMode : : Persistent ) ) ; <nl> + <nl> + ops . push_back ( new zkutil : : Op : : Create ( <nl> + zookeeper_path + " / quorum / failed_parts / " + entry . new_part_name , <nl> + " " , <nl> + zookeeper - > getDefaultACL ( ) , <nl> + zkutil : : CreateMode : : Persistent ) ) ; <nl> + <nl> + auto code = zookeeper - > tryMulti ( ops ) ; <nl> + <nl> + if ( code = = ZOK ) <nl> + { <nl> + LOG_DEBUG ( log , " Marked quorum for part " < < entry . new_part_name < < " as failed . " ) ; <nl> + return true ; / / / NOTE Удаление из virtual_parts не делается , но оно нужно только для мерджей . <nl> + } <nl> + else if ( code = = ZBADVERSION | | code = = ZNONODE | | code = = ZNODEEXISTS ) <nl> + { <nl> + LOG_DEBUG ( log , " State was changed or isn ' t expected when trying to mark quorum for part " <nl> + < < entry . new_part_name < < " as failed . " ) ; <nl> + } <nl> + else <nl> + throw zkutil : : KeeperException ( code ) ; <nl> + } <nl> + else <nl> + { <nl> + LOG_WARNING ( log , " No active replica has part " < < entry . new_part_name <nl> + < < " , but that part needs quorum and / quorum / status contains entry about another part " < < quorum_entry . part_name <nl> + < < " . It means that part was successfully written to " < < entry . quorum < < " replicas , but then all of them goes offline . " <nl> + < < " Or it is a bug . " ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + if ( replica . empty ( ) ) <nl> + { <nl> + ProfileEvents : : increment ( ProfileEvents : : ReplicatedPartFailedFetches ) ; <nl> + throw Exception ( " No active replica has part " + entry . new_part_name , ErrorCodes : : NO_REPLICA_HAS_PART ) ; <nl> + } <nl> } <nl> <nl> fetchPart ( entry . new_part_name , zookeeper_path + " / replicas / " + replica , false , entry . quorum ) ; <nl> void StorageReplicatedMergeTree : : executeDropRange ( const StorageReplicatedMergeTr <nl> data . renameAndDetachPart ( part ) ; <nl> <nl> zkutil : : Ops ops ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + part - > name + " / columns " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + part - > name + " / checksums " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( replica_path + " / parts / " + part - > name , - 1 ) ) ; <nl> + removePartFromZooKeeper ( part - > name , ops ) ; <nl> zookeeper - > multi ( ops ) ; <nl> <nl> / / / Если кусок нужно удалить , надежнее удалить директорию после изменений в ZooKeeper . <nl> void StorageReplicatedMergeTree : : alterThread ( ) <nl> } <nl> <nl> <nl> + void StorageReplicatedMergeTree : : removePartFromZooKeeper ( const String & part_name , zkutil : : Ops & ops ) <nl> + { <nl> + String part_path = replica_path + " / parts / " + part_name ; <nl> + <nl> + ops . push_back ( new zkutil : : Op : : Remove ( part_path + " / checksums " , - 1 ) ) ; <nl> + ops . push_back ( new zkutil : : Op : : Remove ( part_path + " / columns " , - 1 ) ) ; <nl> + ops . push_back ( new zkutil : : Op : : Remove ( part_path , - 1 ) ) ; <nl> + } <nl> + <nl> + <nl> void StorageReplicatedMergeTree : : removePartAndEnqueueFetch ( const String & part_name ) <nl> { <nl> auto zookeeper = getZooKeeper ( ) ; <nl> void StorageReplicatedMergeTree : : removePartAndEnqueueFetch ( const String & part_n <nl> ops . push_back ( new zkutil : : Op : : Create ( <nl> replica_path + " / queue / queue - " , log_entry - > toString ( ) , zookeeper - > getDefaultACL ( ) , <nl> zkutil : : CreateMode : : PersistentSequential ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( part_path + " / checksums " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( part_path + " / columns " , - 1 ) ) ; <nl> - ops . push_back ( new zkutil : : Op : : Remove ( part_path , - 1 ) ) ; <nl> + <nl> + removePartFromZooKeeper ( part_name , ops ) ; <nl> + <nl> auto results = zookeeper - > multi ( ops ) ; <nl> <nl> { <nl> String StorageReplicatedMergeTree : : findReplicaHavingPart ( const String & part_nam <nl> <nl> / * * Если для куска отслеживается кворум , то обновить информацию о нём в ZK . <nl> * / <nl> - static void updateQuorum ( <nl> - zkutil : : ZooKeeperPtr & zookeeper , <nl> - const String & zookeeper_path , <nl> - const String & replica_name , <nl> - const String & part_name , <nl> - size_t quorum ) <nl> + void StorageReplicatedMergeTree : : updateQuorum ( const String & part_name ) <nl> { <nl> - if ( ! quorum ) <nl> - return ; <nl> + auto zookeeper = getZooKeeper ( ) ; <nl> <nl> + / / / Информация , на какие реплики был добавлен кусок , если кворум ещё не достигнут . <nl> const String quorum_status_path = zookeeper_path + " / quorum / status " ; <nl> + / / / Имя предыдущего куска , для которого был достигнут кворум . <nl> + const String quorum_last_part_path = zookeeper_path + " / quorum / last_part " ; <nl> + <nl> String value ; <nl> zkutil : : Stat stat ; <nl> <nl> static void updateQuorum ( <nl> break ; <nl> } <nl> <nl> - if ( quorum_entry . required_number_of_replicas ! = quorum ) <nl> - throw Exception ( " Logical error : quorum size in log entry is different than quorum size in node / quorum / status " , <nl> - ErrorCodes : : LOGICAL_ERROR ) ; <nl> - <nl> quorum_entry . replicas . insert ( replica_name ) ; <nl> <nl> if ( quorum_entry . replicas . size ( ) > = quorum_entry . required_number_of_replicas ) <nl> { <nl> - / / / Кворум достигнут . Удаляем узел . <nl> - auto code = zookeeper - > tryRemove ( quorum_status_path , stat . version ) ; <nl> + / / / Кворум достигнут . Удаляем узел , а также обновляем информацию о последнем куске , который был успешно записан с кворумом . <nl> + <nl> + zkutil : : Ops ops ; <nl> + ops . push_back ( new zkutil : : Op : : Remove ( quorum_status_path , stat . version ) ) ; <nl> + ops . push_back ( new zkutil : : Op : : SetData ( quorum_last_part_path , part_name , - 1 ) ) ; <nl> + auto code = zookeeper - > tryMulti ( ops ) ; <nl> <nl> if ( code = = ZOK ) <nl> { <nl> void StorageReplicatedMergeTree : : fetchPart ( const String & part_name , const Strin <nl> transaction . commit ( ) ; <nl> <nl> / * * Если для этого куска отслеживается кворум , то надо его обновить . <nl> - * TODO Обработка в случае неизвестной ошибки , потери сессии , при перезапуске сервера . <nl> + * Если не успеем , в случае потери сессии , при перезапуске сервера - см . метод ReplicatedMergeTreeRestartingThread : : updateQuorumIfWeHavePart . <nl> * / <nl> - updateQuorum ( zookeeper , zookeeper_path , replica_name , part_name , quorum ) ; <nl> + if ( quorum ) <nl> + updateQuorum ( part_name ) ; <nl> <nl> merge_selecting_event . set ( ) ; <nl> <nl>
|
dbms : quorum insert : development [ # METR - 16779 ] .
|
ClickHouse/ClickHouse
|
3b5a3e7325b9e1bac16a1170c36caab430c172cf
|
2015-09-20T11:02:59Z
|
mmm a / include / swift / AST / PrintOptions . h <nl> ppp b / include / swift / AST / PrintOptions . h <nl> class Type ; <nl> enum DeclAttrKind : unsigned ; <nl> class PrinterArchetypeTransformer ; <nl> <nl> + / / / Necessary information for archetype transformation during printing . <nl> + struct ArchetypeTransformContext { <nl> + Type getTypeBase ( ) ; <nl> + NominalTypeDecl * getNominal ( ) ; <nl> + PrinterArchetypeTransformer * getTransformer ( ) { return Transformer . get ( ) ; } <nl> + bool isPrintingSynthesizedExtension ( ) ; <nl> + bool isPrintingTypeInteface ( ) ; <nl> + ArchetypeTransformContext ( PrinterArchetypeTransformer * Transformer ) ; <nl> + ArchetypeTransformContext ( PrinterArchetypeTransformer * Transformer , <nl> + Type T ) ; <nl> + ArchetypeTransformContext ( PrinterArchetypeTransformer * Transformer , <nl> + NominalTypeDecl * NTD ) ; <nl> + Type transform ( Type Input ) ; <nl> + StringRef transform ( StringRef Input ) ; <nl> + private : <nl> + std : : shared_ptr < PrinterArchetypeTransformer > Transformer ; <nl> + <nl> + / / When printing a type interface , this is the type to print . <nl> + / / When synthesizing extensions , this is the target nominal . <nl> + llvm : : PointerUnion < TypeBase * , NominalTypeDecl * > TypeBaseOrNominal ; <nl> + } ; <nl> + <nl> / / / Options for printing AST nodes . <nl> / / / <nl> / / / A default - constructed PrintOptions is suitable for printing to users ; <nl> struct PrintOptions { <nl> / / / \ brief Print types with alternative names from their canonical names . <nl> llvm : : DenseMap < CanType , Identifier > * AlternativeTypeNames = nullptr ; <nl> <nl> - / / / \ brief When printing a type interface , register the type to print . <nl> - TypeBase * TypeToPrint = nullptr ; <nl> - <nl> - std : : shared_ptr < PrinterArchetypeTransformer > pTransformer ; <nl> - <nl> - NominalTypeDecl * SynthesizedTarget = nullptr ; <nl> + / / / \ brief The information for converting archetypes to specialized types . <nl> + std : : shared_ptr < ArchetypeTransformContext > TransformContext ; <nl> <nl> / / / Retrieve the set of options for verbose printing to users . <nl> static PrintOptions printVerbose ( ) { <nl> struct PrintOptions { <nl> <nl> void initArchetypeTransformerForSynthesizedExtensions ( NominalTypeDecl * D ) ; <nl> <nl> - bool isPrintingSynthesizedExtension ( ) ; <nl> - <nl> void clearArchetypeTransformerForSynthesizedExtensions ( ) ; <nl> <nl> / / / Retrieve the print options that are suitable to print the testable interface . <nl> mmm a / lib / AST / ASTPrinter . cpp <nl> ppp b / lib / AST / ASTPrinter . cpp <nl> class ArchetypeSelfTransformer : public PrinterArchetypeTransformer { <nl> <nl> PrintOptions PrintOptions : : printTypeInterface ( Type T , const DeclContext * DC ) { <nl> PrintOptions result = printInterface ( ) ; <nl> - result . setArchetypeTransform ( T , DC ) ; <nl> - result . TypeToPrint = T . getPointer ( ) ; <nl> + result . TransformContext = std : : make_shared < ArchetypeTransformContext > ( <nl> + new PrinterArchetypeNameTransformer ( T , DC ) , T ) ; <nl> return result ; <nl> } <nl> <nl> void PrintOptions : : setArchetypeTransform ( Type T , const DeclContext * DC ) { <nl> - pTransformer = std : : make_shared < PrinterArchetypeNameTransformer > ( T , DC ) ; <nl> + TransformContext = std : : make_shared < ArchetypeTransformContext > ( <nl> + new PrinterArchetypeNameTransformer ( T , DC ) ) ; <nl> } <nl> <nl> void PrintOptions : : setArchetypeTransformForQuickHelp ( Type T , DeclContext * DC ) { <nl> - pTransformer = std : : make_shared < ArchetypeSelfTransformer > ( T , * DC ) ; <nl> + TransformContext = std : : make_shared < ArchetypeTransformContext > ( <nl> + new ArchetypeSelfTransformer ( T , * DC ) ) ; <nl> } <nl> <nl> void PrintOptions : : initArchetypeTransformerForSynthesizedExtensions ( NominalTypeDecl * D ) { <nl> - pTransformer = std : : make_shared < ArchetypeSelfTransformer > ( D ) ; <nl> - SynthesizedTarget = D ; <nl> + TransformContext = std : : make_shared < ArchetypeTransformContext > ( <nl> + new ArchetypeSelfTransformer ( D ) , D ) ; <nl> } <nl> <nl> - bool PrintOptions : : isPrintingSynthesizedExtension ( ) { <nl> - return pTransformer & & SynthesizedTarget ; <nl> + void PrintOptions : : clearArchetypeTransformerForSynthesizedExtensions ( ) { <nl> + TransformContext . reset ( ) ; <nl> } <nl> <nl> - void PrintOptions : : clearArchetypeTransformerForSynthesizedExtensions ( ) { <nl> - pTransformer = nullptr ; <nl> - SynthesizedTarget = nullptr ; <nl> + ArchetypeTransformContext : : ArchetypeTransformContext ( <nl> + PrinterArchetypeTransformer * Transformer ) : Transformer ( Transformer ) { } ; <nl> + <nl> + ArchetypeTransformContext : : ArchetypeTransformContext ( <nl> + PrinterArchetypeTransformer * Transformer , Type T ) : <nl> + Transformer ( Transformer ) , TypeBaseOrNominal ( T . getPointer ( ) ) { } ; <nl> + <nl> + ArchetypeTransformContext : : ArchetypeTransformContext ( <nl> + PrinterArchetypeTransformer * Transformer , NominalTypeDecl * NTD ) : <nl> + Transformer ( Transformer ) , TypeBaseOrNominal ( NTD ) { } ; <nl> + <nl> + NominalTypeDecl * ArchetypeTransformContext : : getNominal ( ) { <nl> + return TypeBaseOrNominal . get < NominalTypeDecl * > ( ) ; <nl> + } <nl> + <nl> + Type ArchetypeTransformContext : : getTypeBase ( ) { <nl> + return TypeBaseOrNominal . get < TypeBase * > ( ) ; <nl> + } <nl> + <nl> + bool ArchetypeTransformContext : : isPrintingSynthesizedExtension ( ) { <nl> + return ! TypeBaseOrNominal . isNull ( ) & & TypeBaseOrNominal . is < NominalTypeDecl * > ( ) ; <nl> + } <nl> + bool ArchetypeTransformContext : : isPrintingTypeInteface ( ) { <nl> + return ! TypeBaseOrNominal . isNull ( ) & & TypeBaseOrNominal . is < TypeBase * > ( ) ; <nl> + } <nl> + <nl> + Type ArchetypeTransformContext : : transform ( Type Input ) { <nl> + return Transformer - > transform ( Input ) ; <nl> + } <nl> + <nl> + StringRef ArchetypeTransformContext : : transform ( StringRef Input ) { <nl> + return Transformer - > transform ( Input ) ; <nl> } <nl> <nl> std : : string ASTPrinter : : sanitizeUtf8 ( StringRef Text ) { <nl> class PrintAST : public ASTVisitor < PrintAST > { <nl> } <nl> <nl> void printTypeLoc ( const TypeLoc & TL ) { <nl> - if ( Options . pTransformer & & TL . getType ( ) ) { <nl> - if ( auto RT = Options . pTransformer - > transform ( TL . getType ( ) ) ) { <nl> + if ( Options . TransformContext & & TL . getType ( ) ) { <nl> + if ( auto RT = Options . TransformContext - > transform ( TL . getType ( ) ) ) { <nl> PrintOptions FreshOptions ; <nl> RT . print ( Printer , FreshOptions ) ; <nl> return ; <nl> class PrintAST : public ASTVisitor < PrintAST > { <nl> if ( ! shouldPrint ( D , true ) ) <nl> return false ; <nl> <nl> - bool Synthesize = Options . isPrintingSynthesizedExtension ( ) & & <nl> + bool Synthesize = Options . TransformContext & & <nl> + Options . TransformContext - > isPrintingSynthesizedExtension ( ) & & <nl> D - > getKind ( ) = = DeclKind : : Extension ; <nl> if ( Synthesize ) <nl> - Printer . setSynthesizedTarget ( Options . SynthesizedTarget ) ; <nl> + Printer . setSynthesizedTarget ( Options . TransformContext - > getNominal ( ) ) ; <nl> Printer . callPrintDeclPre ( D ) ; <nl> ASTVisitor : : visit ( D ) ; <nl> if ( Synthesize ) { <nl> Printer . setSynthesizedTarget ( nullptr ) ; <nl> Printer . printSynthesizedExtensionPost ( cast < ExtensionDecl > ( D ) , <nl> - Options . SynthesizedTarget ) ; <nl> + Options . TransformContext - > getNominal ( ) ) ; <nl> } else { <nl> Printer . printDeclPost ( D ) ; <nl> } <nl> void PrintAST : : printGenericParams ( GenericParamList * Params ) { <nl> Printer < < " < " ; <nl> bool IsFirst = true ; <nl> SmallVector < Type , 4 > Scrach ; <nl> - if ( Options . TypeToPrint ) { <nl> - auto ArgArr = Options . TypeToPrint - > getAllGenericArgs ( Scrach ) ; <nl> + if ( Options . TransformContext & & <nl> + Options . TransformContext - > isPrintingTypeInteface ( ) ) { <nl> + auto ArgArr = Options . TransformContext - > getTypeBase ( ) - > <nl> + getAllGenericArgs ( Scrach ) ; <nl> for ( auto Arg : ArgArr ) { <nl> if ( IsFirst ) { <nl> IsFirst = false ; <nl> void PrintAST : : printWhereClause ( ArrayRef < RequirementRepr > requirements ) { <nl> auto FirstType = std : : get < 0 > ( Tuple ) ; <nl> auto SecondType = std : : get < 1 > ( Tuple ) ; <nl> auto Kind = std : : get < 2 > ( Tuple ) ; <nl> - if ( Options . pTransformer ) { <nl> - FirstType = Options . pTransformer - > transform ( FirstType ) ; <nl> - SecondType = Options . pTransformer - > transform ( SecondType ) ; <nl> + if ( Options . TransformContext ) { <nl> + FirstType = Options . TransformContext - > transform ( FirstType ) ; <nl> + SecondType = Options . TransformContext - > transform ( SecondType ) ; <nl> } <nl> if ( FirstType = = SecondType ) <nl> continue ; <nl> bool PrintAST : : shouldPrintPattern ( const Pattern * P ) { <nl> void PrintAST : : printPatternType ( const Pattern * P ) { <nl> if ( P - > hasType ( ) ) { <nl> Type T = P - > getType ( ) ; <nl> - if ( Options . pTransformer ) { <nl> - T = Options . pTransformer - > transform ( T ) ; <nl> + if ( Options . TransformContext ) { <nl> + T = Options . TransformContext - > transform ( T ) ; <nl> } <nl> Printer < < " : " ; <nl> T . print ( Printer , Options ) ; <nl> void PrintAST : : printExtension ( ExtensionDecl * decl ) { <nl> } <nl> <nl> void PrintAST : : visitExtensionDecl ( ExtensionDecl * decl ) { <nl> - if ( Options . SynthesizedTarget & & Options . pTransformer ) <nl> - printSynthesizedExtension ( Options . SynthesizedTarget , decl ) ; <nl> + if ( Options . TransformContext & & <nl> + Options . TransformContext - > isPrintingSynthesizedExtension ( ) ) <nl> + printSynthesizedExtension ( Options . TransformContext - > getNominal ( ) , decl ) ; <nl> else <nl> printExtension ( decl ) ; <nl> } <nl> void PrintAST : : visitVarDecl ( VarDecl * decl ) { <nl> } ) ; <nl> if ( decl - > hasType ( ) ) { <nl> Printer < < " : " ; <nl> - if ( Options . pTransformer ) <nl> - Options . pTransformer - > transform ( decl - > getType ( ) ) . print ( Printer , Options ) ; <nl> + if ( Options . TransformContext ) <nl> + Options . TransformContext - > transform ( decl - > getType ( ) ) . <nl> + print ( Printer , Options ) ; <nl> else <nl> decl - > getType ( ) . print ( Printer , Options ) ; <nl> } <nl> void PrintAST : : visitFuncDecl ( FuncDecl * decl ) { <nl> Type ResultTy = decl - > getResultType ( ) ; <nl> if ( ResultTy & & ! ResultTy - > isEqual ( TupleType : : getEmpty ( Context ) ) ) { <nl> Printer < < " - > " ; <nl> - if ( Options . pTransformer ) { <nl> - ResultTy = Options . pTransformer - > transform ( ResultTy ) ; <nl> + if ( Options . TransformContext ) { <nl> + ResultTy = Options . TransformContext - > transform ( ResultTy ) ; <nl> PrintOptions FreshOptions ; <nl> ResultTy - > print ( Printer , FreshOptions ) ; <nl> } else <nl>
|
[ gardening ] ASTPrinter : encapsulate archetype - transformation related stuff into a sub - structure . NFC
|
apple/swift
|
17cde5322f3dd2c80b08b03f622c32f512ea9e8b
|
2016-02-17T23:35:28Z
|
mmm a / src / gui / mainwindow . cpp <nl> ppp b / src / gui / mainwindow . cpp <nl> void MainWindow : : displayRSSTab ( bool enable ) <nl> if ( ! m_rssWidget ) { <nl> m_rssWidget = new RSSWidget ( m_tabs ) ; <nl> connect ( m_rssWidget . data ( ) , & RSSWidget : : unreadCountUpdated , this , & MainWindow : : handleRSSUnreadCountUpdated ) ; <nl> - # ifndef Q_OS_MAC <nl> + # ifdef Q_OS_MAC <nl> + m_tabs - > addTab ( m_rssWidget , tr ( " RSS ( % 1 ) " ) . arg ( RSS : : Session : : instance ( ) - > rootFolder ( ) - > unreadCount ( ) ) ) ; <nl> + # else <nl> const int indexTab = m_tabs - > addTab ( m_rssWidget , tr ( " RSS ( % 1 ) " ) . arg ( RSS : : Session : : instance ( ) - > rootFolder ( ) - > unreadCount ( ) ) ) ; <nl> m_tabs - > setTabIcon ( indexTab , GuiIconProvider : : instance ( ) - > getIcon ( " application - rss + xml " ) ) ; <nl> # endif <nl> void MainWindow : : on_actionExecutionLogs_triggered ( bool checked ) <nl> if ( checked ) { <nl> Q_ASSERT ( ! m_executionLog ) ; <nl> m_executionLog = new ExecutionLog ( m_tabs , static_cast < Log : : MsgType > ( executionLogMsgTypes ( ) ) ) ; <nl> - # ifndef Q_OS_MAC <nl> + # ifdef Q_OS_MAC <nl> + m_tabs - > addTab ( m_executionLog , tr ( " Execution Log " ) ) ; <nl> + # else <nl> const int indexTab = m_tabs - > addTab ( m_executionLog , tr ( " Execution Log " ) ) ; <nl> m_tabs - > setTabIcon ( indexTab , GuiIconProvider : : instance ( ) - > getIcon ( " view - calendar - journal " ) ) ; <nl> # endif <nl>
|
Add RSS / Log widgets to the QTabWidget on macOS
|
qbittorrent/qBittorrent
|
90bb67c654180d44487c674bef5fefa4ce81a7c5
|
2018-05-04T16:56:42Z
|
mmm a / modules / localization / msf / local_pyramid_map / base_map / BUILD <nl> ppp b / modules / localization / msf / local_pyramid_map / base_map / BUILD <nl> cc_library ( <nl> " base_map_fwd . h " , <nl> " base_map_matrix . h " , <nl> ] , <nl> - linkopts = [ <nl> - " - lz " , <nl> - ] , <nl> deps = [ <nl> " / / cyber / common " , <nl> + " / / third_party : zlib " , <nl> " @ com_google_glog / / : glog " , <nl> " @ eigen " , <nl> " @ opencv " , <nl> mmm a / third_party / BUILD <nl> ppp b / third_party / BUILD <nl> cc_library ( <nl> " - lcblas " , <nl> ] , <nl> ) <nl> + <nl> + cc_library ( <nl> + name = " zlib " , <nl> + linkopts = [ <nl> + " - lz " , <nl> + ] , <nl> + ) <nl>
|
Build : specify explicit zlib dependency
|
ApolloAuto/apollo
|
f045d61e68e9fe69d3c2ec6b25b0bd55653eda36
|
2020-06-27T16:16:08Z
|
mmm a / tensorflow / python / distribute / distribute_lib . py <nl> ppp b / tensorflow / python / distribute / distribute_lib . py <nl> def strategy ( self ) : <nl> return self . _strategy <nl> <nl> @ property <nl> + @ deprecation . deprecated ( None , " Please avoid relying on devices property . " ) <nl> def devices ( self ) : <nl> - " " " The devices this replica is to be executed on , as a tuple of strings . " " " <nl> + " " " Returns the devices this replica is to be executed on , as a tuple of strings . <nl> + <nl> + NOTE : For ` tf . distribute . MirroredStrategy ` and <nl> + ` tf . distribute . experimental . MultiWorkerMirroredStrategy ` , this returns a <nl> + nested <nl> + list of device strings , e . g , [ [ " gpu : 0 " ] ] . <nl> + " " " <nl> require_replica_context ( self ) <nl> return ( device_util . current ( ) , ) <nl> <nl>
|
Clarify ReplicaContext . devices returns a nested list for MS and MWMS .
|
tensorflow/tensorflow
|
9db000200763326cce30f0f9e744253d813afd1b
|
2020-06-25T02:01:02Z
|
mmm a / ci / test / 00_setup_env_i686 . sh <nl> ppp b / ci / test / 00_setup_env_i686 . sh <nl> <nl> export LC_ALL = C . UTF - 8 <nl> <nl> export HOST = i686 - pc - linux - gnu <nl> + export DEP_OPTS = " PROTOBUF = 1 " <nl> export PACKAGES = " g + + - multilib python3 - zmq " <nl> export GOAL = " install " <nl> export BITCOIN_CONFIG = " - - enable - zmq - - with - gui = qt5 - - enable - bip70 - - enable - glibc - back - compat - - enable - reduce - exports LDFLAGS = - static - libstdc + + " <nl> mmm a / depends / Makefile <nl> ppp b / depends / Makefile <nl> WORK_PATH = $ ( BASEDIR ) / work <nl> BASE_CACHE ? = $ ( BASEDIR ) / built <nl> SDK_PATH ? = $ ( BASEDIR ) / SDKs <nl> NO_QT ? = <nl> + PROTOBUF ? = <nl> RAPIDCHECK ? = <nl> NO_WALLET ? = <nl> NO_ZMQ ? = <nl> wallet_packages_ $ ( NO_WALLET ) = $ ( wallet_packages ) <nl> upnp_packages_ $ ( NO_UPNP ) = $ ( upnp_packages ) <nl> zmq_packages_ $ ( NO_ZMQ ) = $ ( zmq_packages ) <nl> <nl> + protobuf_packages_ $ ( PROTOBUF ) = $ ( protobuf_packages ) <nl> rapidcheck_packages_ $ ( RAPIDCHECK ) = $ ( rapidcheck_packages ) <nl> <nl> packages + = $ ( $ ( host_arch ) _ $ ( host_os ) _packages ) $ ( $ ( host_os ) _packages ) $ ( qt_packages_ ) $ ( wallet_packages_ ) $ ( upnp_packages_ ) <nl> native_packages + = $ ( $ ( host_arch ) _ $ ( host_os ) _native_packages ) $ ( $ ( host_os ) _native_packages ) <nl> <nl> - ifneq ( $ ( qt_packages_ ) , ) <nl> - native_packages + = $ ( qt_native_packages ) <nl> + ifeq ( $ ( protobuf_packages_ ) , ) <nl> + native_packages + = $ ( protobuf_native_packages ) <nl> + packages + = $ ( protobuf_packages ) <nl> endif <nl> <nl> ifneq ( $ ( zmq_packages_ ) , ) <nl> $ ( host_prefix ) / share / config . site : config . site . in $ ( host_prefix ) / . stamp_ $ ( final_ <nl> - e ' s | @ allow_host_packages @ | $ ( ALLOW_HOST_PACKAGES ) | ' \ <nl> - e ' s | @ no_qt @ | $ ( NO_QT ) | ' \ <nl> - e ' s | @ no_zmq @ | $ ( NO_ZMQ ) | ' \ <nl> + - e ' s | @ enable_bip70 @ | $ ( PROTOBUF ) | ' \ <nl> - e ' s | @ no_wallet @ | $ ( NO_WALLET ) | ' \ <nl> - e ' s | @ no_upnp @ | $ ( NO_UPNP ) | ' \ <nl> - e ' s | @ debug @ | $ ( DEBUG ) | ' \ <nl> mmm a / depends / README . md <nl> ppp b / depends / README . md <nl> The following can be set when running make : make FOO = bar <nl> NO_UPNP : Don ' t download / build / cache packages needed for enabling upnp <nl> DEBUG : disable some optimizations and enable more runtime checking <nl> RAPIDCHECK : build rapidcheck ( experimental , requires cmake ) <nl> + PROTOBUF : build protobuf ( used for deprecated BIP70 support ) <nl> HOST_ID_SALT : Optional salt to use when generating host package ids <nl> BUILD_ID_SALT : Optional salt to use when generating build package ids <nl> <nl> mmm a / depends / config . site . in <nl> ppp b / depends / config . site . in <nl> if test - z $ enable_zmq & & test - n " @ no_zmq @ " ; then <nl> enable_zmq = no <nl> fi <nl> <nl> + if test - n $ enable_bip70 & & test - n " @ enable_bip70 @ " ; then <nl> + enable_bip70 = yes <nl> + fi <nl> + <nl> if test x @ host_os @ = xdarwin ; then <nl> BREW = no <nl> PORT = no <nl> mmm a / depends / packages / packages . mk <nl> ppp b / depends / packages / packages . mk <nl> <nl> packages : = boost openssl libevent <nl> <nl> - qt_native_packages = native_protobuf <nl> - qt_packages = qrencode protobuf zlib <nl> + protobuf_native_packages = native_protobuf <nl> + protobuf_packages = protobuf <nl> + <nl> + qt_packages = qrencode zlib <nl> <nl> qt_linux_packages : = qt expat libxcb xcb_proto libXau xproto freetype fontconfig <nl> <nl>
|
build : make protobuf optional in depends
|
bitcoin/bitcoin
|
107e030723552cf272dc8da01bb682032a457a3d
|
2019-09-14T04:58:59Z
|
mmm a / src / base / bittorrent / session . cpp <nl> ppp b / src / base / bittorrent / session . cpp <nl> Session : : Session ( QObject * parent ) <nl> , m_useOSCache ( BITTORRENT_SESSION_KEY ( " UseOSCache " ) , true ) <nl> , m_isAnonymousModeEnabled ( BITTORRENT_SESSION_KEY ( " AnonymousModeEnabled " ) , false ) <nl> , m_isQueueingEnabled ( BITTORRENT_SESSION_KEY ( " QueueingSystemEnabled " ) , true ) <nl> - , m_maxActiveDownloads ( BITTORRENT_SESSION_KEY ( " MaxActiveDownloads " ) , 3 , lowerLimited ( 0 ) ) <nl> - , m_maxActiveUploads ( BITTORRENT_SESSION_KEY ( " MaxActiveUploads " ) , 3 , lowerLimited ( 0 ) ) <nl> - , m_maxActiveTorrents ( BITTORRENT_SESSION_KEY ( " MaxActiveTorrents " ) , 5 , lowerLimited ( 0 ) ) <nl> + , m_maxActiveDownloads ( BITTORRENT_SESSION_KEY ( " MaxActiveDownloads " ) , 3 , lowerLimited ( - 1 ) ) <nl> + , m_maxActiveUploads ( BITTORRENT_SESSION_KEY ( " MaxActiveUploads " ) , 3 , lowerLimited ( - 1 ) ) <nl> + , m_maxActiveTorrents ( BITTORRENT_SESSION_KEY ( " MaxActiveTorrents " ) , 5 , lowerLimited ( - 1 ) ) <nl> , m_ignoreSlowTorrentsForQueueing ( BITTORRENT_SESSION_KEY ( " IgnoreSlowTorrentsForQueueing " ) , false ) <nl> , m_outgoingPortsMin ( BITTORRENT_SESSION_KEY ( " OutgoingPortsMin " ) , 0 ) <nl> , m_outgoingPortsMax ( BITTORRENT_SESSION_KEY ( " OutgoingPortsMax " ) , 0 ) <nl> void Session : : configure ( libtorrent : : settings_pack & settingsPack ) <nl> void Session : : adjustLimits ( libt : : session_settings & sessionSettings ) <nl> { <nl> / / Internally increase the queue limits to ensure that the magnet is started <nl> - int maxDownloading = maxActiveDownloads ( ) ; <nl> + int maxDownloads = maxActiveDownloads ( ) ; <nl> int maxActive = maxActiveTorrents ( ) ; <nl> <nl> - if ( maxDownloading > - 1 ) <nl> - sessionSettings . active_downloads = maxDownloading + m_extraLimit ; <nl> - else <nl> - sessionSettings . active_downloads = maxDownloading ; <nl> - <nl> - if ( maxActive > - 1 ) <nl> - sessionSettings . active_limit = maxActive + m_extraLimit ; <nl> - else <nl> - sessionSettings . active_limit = maxActive ; <nl> + sessionSettings . active_downloads = maxDownloads > - 1 ? maxDownloads + m_extraLimit : maxDownloads ; <nl> + sessionSettings . active_limit = maxActive > - 1 ? maxActive + m_extraLimit : maxActive ; <nl> } <nl> <nl> void Session : : configure ( libtorrent : : session_settings & sessionSettings ) <nl>
|
Fix setting limits to unlimited .
|
qbittorrent/qBittorrent
|
10cd45dc81e99d312251309b7d368e15025b721f
|
2016-11-01T01:41:35Z
|
new file mode 100644 <nl> index 000000000000 . . 070b6fad21b3 <nl> mmm / dev / null <nl> ppp b / apinotes / ScreenSaver . apinotes <nl> <nl> + mmm <nl> + Name : ScreenSaver <nl> + SwiftVersions : <nl> + - Version : 3 <nl> + Classes : <nl> + - Name : ScreenSaverView <nl> + Properties : <nl> + - Name : hasConfigureSheet <nl> + PropertyKind : Instance <nl> + SwiftImportAsAccessors : true <nl> + - Name : configureSheet <nl> + PropertyKind : Instance <nl> + SwiftImportAsAccessors : true <nl>
|
Add Swift 3 backward compatibility apinotes for ScreenSaver .
|
apple/swift
|
e1b95e2d9c2034c99413b5569e70e9b8ec8f9437
|
2017-06-24T05:38:23Z
|
mmm a / hphp / test / hackc_failing_tests_quick <nl> ppp b / hphp / test / hackc_failing_tests_quick <nl> quick / debugger / list . php <nl> quick / dv_i0 . php <nl> quick / eval_noreturn . php <nl> quick / exceptions . php <nl> - quick / float - overflow - literals . php <nl> quick / foreach - bad . php <nl> quick / hh_bad_end . php <nl> quick / hh_bad_start . php <nl> mmm a / hphp / test / hackc_failing_tests_slow <nl> ppp b / hphp / test / hackc_failing_tests_slow <nl> slow / class_type_constant / type_constant2 . php <nl> slow / class_type_constant / type_constant3 . php <nl> slow / class_type_constant / type_constant6 . php <nl> slow / closure / ctor . php <nl> - slow / compilation / aliased - namespaces . php <nl> slow / constant / bad - arrays . php <nl> slow / constant / globals - array - constant . php <nl> slow / debug_backtrace / compact_crash . php <nl> slow / hack_arr_compat / gena . php <nl> slow / hack_arr_compat / scalars . php <nl> slow / hack_mode / 2203 . php <nl> slow / hack_mode / 2204 . php <nl> - slow / hack_mode / 2214 . php <nl> slow / hack_mode / closing - tag - asp . php <nl> slow / hack_mode / closing - tag - php . php <nl> slow / hack_mode / closing - tag - script . php <nl> slow / hack_mode / function_hint_with_params_trailing_variadic_param . php <nl> slow / hack_mode / function_hint_with_type_hinted_variadic_param - trailing - comma . php <nl> slow / hh / hhconfig / no - config / test . php <nl> - slow / hh_namespace_migration / hh_pair4 . php <nl> slow / hh_namespace_migration / hh_vector5 . php <nl> slow / hhbbc / iteration . php <nl> slow / inout / autoloader . php <nl> mmm a / hphp / test / hackc_repo_failing_tests_quick <nl> ppp b / hphp / test / hackc_repo_failing_tests_quick <nl> <nl> - quick / CGetMVar . php <nl> - quick / EmitSProp . php <nl> - quick / FPassR . php <nl> quick / Xhp . php <nl> quick / bad_iter_warning . php <nl> - quick / builtin_extension_DOMAttr . php <nl> - quick / builtin_extension_DOMCDATASection . php <nl> - quick / builtin_extension_DOMCharacterData . php <nl> - quick / builtin_extension_DOMComment . php <nl> - quick / builtin_extension_DOMDocument . php <nl> - quick / builtin_extension_DOMDocumentFragment . php <nl> - quick / builtin_extension_DOMDocumentType . php <nl> - quick / builtin_extension_DOMElement . php <nl> - quick / builtin_extension_DOMEntity . php <nl> - quick / builtin_extension_DOMEntityReference . php <nl> - quick / builtin_extension_DOMImplementation . php <nl> - quick / builtin_extension_DOMNamedNodeMap . php <nl> - quick / builtin_extension_DOMNode . php <nl> - quick / builtin_extension_DOMNodeIterator . php <nl> - quick / builtin_extension_DOMNodeList . php <nl> - quick / builtin_extension_DOMNotation . php <nl> - quick / builtin_extension_DOMProcessingInstruction . php <nl> - quick / builtin_extension_DOMText . php <nl> - quick / builtin_extension_DOMXPath . php <nl> - quick / builtin_extension_DateInterval . php <nl> - quick / builtin_extension_DateTimeZone . php <nl> - quick / builtin_extension_LibXMLError . php <nl> - quick / builtin_extension_Memcache . php <nl> - quick / builtin_extension_Memcached . php <nl> - quick / builtin_extension_SQLite3 . php <nl> - quick / builtin_extension_SQLite3Result . php <nl> - quick / builtin_extension_SQLite3Stmt . php <nl> - quick / builtin_extension_SimpleXMLElement . php <nl> - quick / builtin_extension_SimpleXMLElementIterator . php <nl> - quick / builtin_extension_SoapClient . php <nl> - quick / builtin_extension_SoapHeader . php <nl> - quick / builtin_extension_SoapParam . php <nl> - quick / builtin_extension_SoapServer . php <nl> - quick / builtin_extension_XMLReader . php <nl> - quick / builtin_extension_XMLWriter . php <nl> - quick / byref2 . php <nl> - quick / class_constant . php <nl> quick / closure . php <nl> quick / coverage . php <nl> - quick / crossUnitRefs . php <nl> - quick / crossUnitRefsInc . php <nl> quick / debug_backtrace . php <nl> - quick / dict / refs . php <nl> quick / dv_i0 . php <nl> quick / exceptions . php <nl> quick / exceptions2 . php <nl> - quick / exceptions3 . php <nl> quick / exceptions7 . php <nl> quick / foreach - bad . php <nl> quick / function_varargs . php <nl> quick / generator_varargs . php <nl> quick / hh_bad_end . php <nl> quick / hh_bad_start . php <nl> quick / hh_numbers . php <nl> - quick / hopt - ret5 . php <nl> - quick / incdec - magic4 . php <nl> - quick / incdec - magic5 . php <nl> quick / intercept3 . php <nl> quick / interface - bad - methodbody . php <nl> quick / keyref_error . php <nl> - quick / keyset / refs . php <nl> - quick / method_param_promotion_error . php <nl> - quick / new - by - ref . php <nl> - quick / nonstrict_hashbang_start . php <nl> - quick / nonstrict_hashbang_whitespace_start . php <nl> quick / ns_existing_names_class . php <nl> - quick / parse_fail_shapes . php <nl> - quick / parse_fail_shapes2 . php <nl> - quick / parse_fail_shapes3 . php <nl> quick / parse_fail_strict_hashbang_whitespace_start . php <nl> - quick / parse_fail_type_constraint . php <nl> quick / parser_massive_add_exp . php <nl> quick / parser_massive_concat_exp . php <nl> quick / post_try_error . php <nl> quick / profile / Setprofile . php <nl> - quick / reassign_this1 . php <nl> quick / reassign_this2 . php <nl> - quick / redeclared_function . php <nl> quick / redeclared_method . php <nl> - quick / reqonce . php <nl> - quick / reserved_class1 . php <nl> - quick / reserved_class2 . php <nl> - quick / setop - magic4 . php <nl> - quick / setop - magic5 . php <nl> - quick / strict_hashbang_start . php <nl> quick / syntax - error - incl . php <nl> quick / syntax - error . php <nl> - quick / trailing_comma_bad1 . php <nl> - quick / trailing_comma_bad2 . php <nl> - quick / trailing_comma_bad3 . php <nl> - quick / trailing_comma_bad4 . php <nl> - quick / trailing_comma_bad5 . php <nl> - quick / trailing_comma_bad6 . php <nl> - quick / vec / refs . php <nl> - quick / vector - stack - base . php <nl> - quick / xenon / xenon . php <nl> - quick / xenon / xenon_init . php <nl> quick / xhp - malformed . php <nl> \ No newline at end of file <nl> mmm a / hphp / test / hackc_repo_failing_tests_slow <nl> ppp b / hphp / test / hackc_repo_failing_tests_slow <nl> <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - <nl> - slow / array_cse / 544 . php <nl> slow / async / async - gen - and - genva . php <nl> slow / async / await - all - callback . php <nl> slow / async / await_gena . php <nl> slow / async / genva - list . php <nl> slow / async / genva - refcnt . php <nl> slow / async / genva - variadic . php <nl> slow / async / genva . php <nl> - slow / async / lambda6 . php <nl> slow / async / return - annotation - 2 . php <nl> slow / async / return - annotation - 3 . php <nl> slow / autoload / autoload - enum - hint - deprecated . php <nl> slow / autoload / autoload - fail . php <nl> slow / autoload / systemlib - duplicate - autoload . php <nl> slow / class_constant / abstract_const2 . php <nl> - slow / class_constant / abstract_const7 . php <nl> slow / class_constant / globals - array . php <nl> slow / class_type_constant / type_constant2 . php <nl> slow / class_type_constant / type_constant3 . php <nl> slow / ext_ipc / shared_memory . php <nl> slow / ext_oauth / overflow_redir . php <nl> slow / ext_oauth / plaintext . php <nl> slow / ext_oauth / plaintext2 . php <nl> + slow / ext_oauth / rsa . php <nl> slow / ext_process / ext_process . php <nl> slow / ext_process / fork_log . php <nl> slow / ext_socket / accept_block . php <nl> slow / ext_socket / persistent_socket . php <nl> - slow / ext_xdebug / profiling_tester . php <nl> - slow / ext_xdebug / remote / breakpoint_calibration . php <nl> - slow / ext_xdebug / remote / breakpoint_manipulation . php <nl> - slow / ext_xdebug / remote / breakpoint_multiline . php <nl> - slow / ext_xdebug / remote / context_names . php <nl> - slow / ext_xdebug / remote / detach . php <nl> - slow / ext_xdebug / remote / errors . php <nl> - slow / ext_xdebug / remote / feature . php <nl> - slow / ext_xdebug / remote / init . php <nl> - slow / ext_xdebug / remote / jit . php <nl> - slow / ext_xdebug / remote / map - bug . php <nl> - slow / ext_xdebug / remote / profiler_name_get . php <nl> - slow / ext_xdebug / remote / property_get . php <nl> - slow / ext_xdebug / remote / property_set . php <nl> - slow / ext_xdebug / remote / remote - attached . php <nl> - slow / ext_xdebug / remote / source . php <nl> - slow / ext_xdebug / remote / status . php <nl> - slow / ext_xdebug / remote / step . php <nl> - slow / ext_xdebug / remote / step_async . php <nl> - slow / ext_xdebug / remote / step_over . php <nl> - slow / function / classname - default . php <nl> slow / function / closure - default . php <nl> slow / function / cuf_by_ref . php <nl> - slow / global_vars / global_vars . php <nl> slow / hack_arr_compat / cuf - safe - array . php <nl> slow / hack_arr_compat / gena . php <nl> - slow / hack_arr_compat / refs . php <nl> slow / hack_arr_compat / scalars . php <nl> slow / hack_arr_compat / variadic - by - ref . php <nl> slow / hack_mode / 2203 . php <nl> slow / hack_mode / 2204 . php <nl> - slow / hack_mode / 2210 . php <nl> - slow / hack_mode / 2211 . php <nl> - slow / hack_mode / 2212 . php <nl> - slow / hack_mode / 2214 . php <nl> slow / hack_mode / closing - tag - asp . php <nl> slow / hack_mode / closing - tag - php . php <nl> slow / hack_mode / closing - tag - script . php <nl> slow / hack_mode / function_hint_with_params_trailing_variadic_param . php <nl> slow / hack_mode / function_hint_with_type_hinted_variadic_param - trailing - comma . php <nl> - slow / hh_namespace_migration / hh_frozenset4 . php <nl> - slow / hh_namespace_migration / hh_frozenset7 . php <nl> - slow / hh_namespace_migration / hh_frozenvector4 . php <nl> - slow / hh_namespace_migration / hh_frozenvector7 . php <nl> - slow / hh_namespace_migration / hh_map4 . php <nl> - slow / hh_namespace_migration / hh_map7 . php <nl> - slow / hh_namespace_migration / hh_pair4 . php <nl> - slow / hh_namespace_migration / hh_pair7 . php <nl> - slow / hh_namespace_migration / hh_set4 . php <nl> - slow / hh_namespace_migration / hh_set7 . php <nl> - slow / hh_namespace_migration / hh_vector3 . php <nl> slow / hh_namespace_migration / hh_vector5 . php <nl> - slow / hhbbc / func_family_005 . php <nl> slow / hhbbc / iteration . php <nl> - slow / inout / autoloader . php <nl> - slow / inout / bad - abstract . php <nl> slow / inout / bad - async - generator . php <nl> - slow / inout / bad - async . php <nl> slow / inout / bad - call - 1 . php <nl> - slow / inout / bad - call - 10 . php <nl> - slow / inout / bad - call - 11 . php <nl> - slow / inout / bad - call - 12 . php <nl> - slow / inout / bad - call - 13 . php <nl> - slow / inout / bad - call - 14 . php <nl> slow / inout / bad - call - 15 . php <nl> slow / inout / bad - call - 2 . php <nl> slow / inout / bad - call - 3 . php <nl> slow / inout / bad - decl - 1 . php <nl> slow / inout / bad - decl - 2 . php <nl> slow / inout / bad - func . php <nl> slow / inout / bad - generator . php <nl> - slow / inout / bad - interface . php <nl> - slow / inout / bad - memoize . php <nl> - slow / inout / bad - mixed - mode - 1 . php <nl> - slow / inout / bad - mixed - mode - 2 . php <nl> - slow / inout / bad - parent - 1 . php <nl> - slow / inout / bad - parent - 2 . php <nl> - slow / inout / bad - parent - 3 . php <nl> - slow / inout / bad - parent - 4 . php <nl> - slow / inout / bad - parent - 5 . php <nl> - slow / inout / bad - parent - 6 . php <nl> - slow / inout / bad - trait - 1 . php <nl> - slow / inout / bad - trait - 2 . php <nl> - slow / inout / builtin - interop . php <nl> - slow / inout / call - dynamic . php <nl> - slow / inout / call - static . php <nl> - slow / inout / closure . php <nl> - slow / inout / complex - expr . php <nl> - slow / inout / def - func . php <nl> - slow / inout / func - interop - dynamic . php <nl> - slow / inout / func - interop - static . php <nl> - slow / inout / inout - decl . php <nl> - slow / inout / method - dynamic . php <nl> slow / inout / method - interop - dynamic . php <nl> slow / inout / method - interop - static . php <nl> - slow / inout / method - static . php <nl> - slow / inout / out - type - hints . php <nl> slow / inout / side - effects . php <nl> slow / inout / trait - magic . php <nl> slow / intercept / extra_args . php <nl> - slow / ir_inlining / ref_noref_methods . php <nl> slow / keyset / init - parse - error1 . php <nl> slow / keyset / init - parse - error2 . php <nl> slow / lang / deprecated . php <nl> slow / lang / hh_return_type_this_3 . php <nl> slow / nullsafe / nullsafe - prop - 11 . php <nl> slow / nullsafe / nullsafe - prop - 14 . php <nl> slow / nullsafe / nullsafe - prop - 15 . php <nl> - slow / nullsafe / nullsafe - prop - 2 . php <nl> - slow / nullsafe / nullsafe - prop - 3 . php <nl> slow / nullsafe / nullsafe - prop - 6 . php <nl> slow / nullsafe / nullsafe - prop - 7 . php <nl> slow / nullsafe / nullsafe - prop - 8 . php <nl> slow / nullsafe / nullsafe - prop - 9 . php <nl> slow / object / 672 . php <nl> - slow / parser / async - constructor . php <nl> slow / parser / async . php <nl> - slow / parser / bad_yield . php <nl> slow / parser / closure_return_types_both_places . php <nl> slow / parser / hh - namespace - conflict - 2 . php <nl> slow / parser / hh - namespace - conflict - 3 . php <nl> slow / parser / hh - namespace - conflict - 5 . php <nl> slow / parser / hh - namespace - conflict - 6 . php <nl> slow / parser / hh - reserved - name - array - 1 . php <nl> slow / parser / hh - reserved - name - array - 2 . php <nl> - slow / parser / hh - reserved - name - arraykey - 1 . php <nl> - slow / parser / hh - reserved - name - arraykey - 2 . php <nl> - slow / parser / hh - reserved - name - bool - 1 . php <nl> - slow / parser / hh - reserved - name - bool - 2 . php <nl> - slow / parser / hh - reserved - name - boolean - 1 . php <nl> - slow / parser / hh - reserved - name - boolean - 2 . php <nl> slow / parser / hh - reserved - name - callable - 1 . php <nl> slow / parser / hh - reserved - name - callable - 2 . php <nl> - slow / parser / hh - reserved - name - double - 1 . php <nl> - slow / parser / hh - reserved - name - double - 2 . php <nl> - slow / parser / hh - reserved - name - float - 1 . php <nl> - slow / parser / hh - reserved - name - float - 2 . php <nl> - slow / parser / hh - reserved - name - int - 1 . php <nl> - slow / parser / hh - reserved - name - int - 2 . php <nl> - slow / parser / hh - reserved - name - integer - 1 . php <nl> - slow / parser / hh - reserved - name - integer - 2 . php <nl> - slow / parser / hh - reserved - name - noreturn - 1 . php <nl> - slow / parser / hh - reserved - name - noreturn - 2 . php <nl> - slow / parser / hh - reserved - name - num - 1 . php <nl> - slow / parser / hh - reserved - name - num - 2 . php <nl> - slow / parser / hh - reserved - name - parent - 1 . php <nl> slow / parser / hh - reserved - name - parent - 2 . php <nl> - slow / parser / hh - reserved - name - real - 1 . php <nl> - slow / parser / hh - reserved - name - real - 2 . php <nl> - slow / parser / hh - reserved - name - resource - 1 . php <nl> - slow / parser / hh - reserved - name - resource - 2 . php <nl> - slow / parser / hh - reserved - name - self - 1 . php <nl> slow / parser / hh - reserved - name - self - 2 . php <nl> - slow / parser / hh - reserved - name - string - 1 . php <nl> - slow / parser / hh - reserved - name - string - 2 . php <nl> - slow / parser / hh - reserved - name - void - 1 . php <nl> - slow / parser / hh - reserved - name - void - 2 . php <nl> slow / parser / semi - reserved - keywords / enum_class_constant_bad . php <nl> slow / parser / semi - reserved - keywords / type_const_bad . php <nl> - slow / parser / use - strict - hack . php <nl> - slow / parser / xhp - ambiguity . php <nl> slow / ref - annotate . php <nl> slow / reflection / ReflectionTypeAliasAttrs . php <nl> slow / reflection / ReflectionTypeAliasObjAttrs . php <nl> slow / reflection_classes / xhp . php <nl> slow / return_statement / finally - typecheck . php <nl> slow / traits / 2080 . php <nl> - slow / traits / duplicate_require . php <nl> slow / traits / require_constraint_basic_error . php <nl> slow / traits / require_constraint_iface_implements_error . php <nl> slow / traits / require_constraint_recurse . php <nl> slow / using / bad - lambda5 . php <nl> slow / using / bad - lambda6 . php <nl> slow / using / bad - name . php <nl> slow / using / empty - exprs . php <nl> - slow / using / goto . php <nl> slow / variadic_args / destructors . php <nl> slow / vec / bad - init1 . php <nl> slow / vec / bad - init2 . php <nl> slow / xhp / attr - syntax - 2 . php <nl> - slow / xhp / attr - syntax - 3 . php <nl> \ No newline at end of file <nl> + slow / xhp / attr - syntax - 3 . php <nl> mmm a / hphp / test / hhcodegen_failing_tests_slow <nl> ppp b / hphp / test / hhcodegen_failing_tests_slow <nl> <nl> slow / anon_class / anon_1 . php <nl> + slow / ext_oauth / rsa . php <nl> slow / anon_class / extend_local . php <nl> slow / class_type_constant / type_constant . php <nl> slow / class_type_constant / type_constant6 . php <nl> slow / php7_backported / ns_093 . php <nl> slow / php7_backported / scalar - types / scalar_relative_typehint_disallowed . php <nl> slow / reflection / param_tostring_zendcompat . php <nl> slow / reflection_classes / xhp . php <nl> - slow / using / goto . php <nl> \ No newline at end of file <nl> + slow / using / goto . php <nl>
|
passing tests removed from the list of failing tests
|
facebook/hhvm
|
fa15b6276624598ba9e57fa00731c17ccf0e70e8
|
2017-12-05T22:43:58Z
|
new file mode 100644 <nl> index 000000000 . . 7309d5c1f <nl> mmm / dev / null <nl> ppp b / Reference . md <nl> <nl> + # Reference <nl> + <nl> + # # Types and default values <nl> + <nl> + | JSON type | value_type | C + + type | type alias | default value | <nl> + | mmmmmmmmmmmmmmmmmmmmm - - | mmmmmmmmmmmmmmmmmmmmmmmm - - | mmmmmmmmmmmmmmmmmmmmmmmmmmm - - | mmmmmmmmmmmmmmmmmmmmm - | mmmmmmmmmmmm - - <nl> + | null | ` value_type : : null ` | ` nullptr_t ` | - | ` nullptr ` | <nl> + | string | ` value_type : : string ` | ` std : : string ` | ` JSON : : string_t ` | ` " " ` | <nl> + | number ( integer ) | ` value_type : : number ` | ` int ` | ` JSON : : number_t ` | ` 0 ` | <nl> + | number ( floating point ) | ` value_type : : number_float ` | ` double ` | ` JSON : : number_float_t ` | ` 0 . 0 ` | <nl> + | array | ` value_type : : array ` | ` std : : array < JSON > ` | ` JSON : : array_t ` | ` { } ` | <nl> + | object | ` value_type : : object ` | ` std : : map < std : : string , JSON > ` | ` JSON : : object_t ` | ` { } ` | <nl> + <nl> + # # Conversions <nl> + <nl> + There are only a few type conversions possible : <nl> + <nl> + - An integer number can be translated to a floating point number ( e . g . , by calling ` get < double > ( ) ` ) . <nl> + - A floating pointnnumber can be translated to an integer number ( e . g . , by calling ` get < int > ( ) ` ) . Note the number is truncated and not rounded , ceiled or floored . <nl> + - Any value but JSON objects can be translated into an array . The result is a singleton array that consists of the value before . <nl> + - Any other conversion will throw a ` std : : logic_error ` exception . <nl> mmm a / src / JSON . cc <nl> ppp b / src / JSON . cc <nl> JSON : : const_iterator JSON : : find ( const char * key ) const <nl> } <nl> } <nl> <nl> - / * ! <nl> - @ return the payload of the JSON object . <nl> - * / <nl> - JSON : : value JSON : : data ( ) noexcept <nl> - { <nl> - return _value ; <nl> - } <nl> - <nl> - const JSON : : value JSON : : data ( ) const noexcept <nl> - { <nl> - return _value ; <nl> - } <nl> - <nl> bool JSON : : operator = = ( const JSON & o ) const noexcept <nl> { <nl> switch ( _type ) <nl> mmm a / src / JSON . h <nl> ppp b / src / JSON . h <nl> class JSON <nl> / / / find an element in an object ( returns end ( ) iterator otherwise ) <nl> const_iterator find ( const char * ) const ; <nl> <nl> - / / / direct access to the underlying payload <nl> - value data ( ) noexcept ; <nl> - / / / direct access to the underlying payload <nl> - const value data ( ) const noexcept ; <nl> - <nl> / / / lexicographically compares the values <nl> bool operator = = ( const JSON & ) const noexcept ; <nl> / / / lexicographically compares the values <nl> mmm a / test / JSON_unit . cc <nl> ppp b / test / JSON_unit . cc <nl> TEST_CASE ( " array " ) <nl> CHECK ( j . begin ( ) ! = j . end ( ) ) ; <nl> CHECK ( j . cbegin ( ) ! = j . cend ( ) ) ; <nl> <nl> - / / check payload <nl> - / / CHECK ( * ( j . data ( ) . array ) = = JSON : : array_t ( ) ) ; <nl> - / / CHECK ( * ( j_const . data ( ) . array ) = = JSON : : array_t ( ) ) ; <nl> - <nl> / / container members <nl> CHECK ( j . size ( ) = = 0 ) ; <nl> CHECK ( j . empty ( ) = = true ) ; <nl> TEST_CASE ( " object " ) <nl> CHECK ( j . begin ( ) ! = j . end ( ) ) ; <nl> CHECK ( j . cbegin ( ) ! = j . cend ( ) ) ; <nl> <nl> - / / check payload <nl> - / / CHECK ( * ( j . data ( ) . object ) = = JSON : : object_t ( ) ) ; <nl> - / / CHECK ( * ( j_const . data ( ) . object ) = = JSON : : object_t ( ) ) ; <nl> - <nl> / / container members <nl> CHECK ( j . size ( ) = = 0 ) ; <nl> CHECK ( j . empty ( ) = = true ) ; <nl> TEST_CASE ( " string " ) <nl> / / string representation of default value <nl> CHECK ( j . toString ( ) = = " \ " \ " " ) ; <nl> <nl> - / / check payload <nl> - CHECK ( * ( j . data ( ) . string ) = = JSON : : string_t ( ) ) ; <nl> - CHECK ( * ( j_const . data ( ) . string ) = = JSON : : string_t ( ) ) ; <nl> - <nl> / / container members <nl> CHECK ( j . size ( ) = = 1 ) ; <nl> CHECK ( j . empty ( ) = = false ) ; <nl> TEST_CASE ( " boolean " ) <nl> / / string representation of default value <nl> CHECK ( j . toString ( ) = = " false " ) ; <nl> <nl> - / / check payload <nl> - CHECK ( j . data ( ) . boolean = = JSON : : boolean_t ( ) ) ; <nl> - CHECK ( j_const . data ( ) . boolean = = JSON : : boolean_t ( ) ) ; <nl> - <nl> / / container members <nl> CHECK ( j . size ( ) = = 1 ) ; <nl> CHECK ( j . empty ( ) = = false ) ; <nl> TEST_CASE ( " number ( int ) " ) <nl> / / string representation of default value <nl> CHECK ( j . toString ( ) = = " 0 " ) ; <nl> <nl> - / / check payload <nl> - CHECK ( j . data ( ) . number = = JSON : : number_t ( ) ) ; <nl> - CHECK ( j_const . data ( ) . number = = JSON : : number_t ( ) ) ; <nl> - <nl> / / container members <nl> CHECK ( j . size ( ) = = 1 ) ; <nl> CHECK ( j . empty ( ) = = false ) ; <nl> TEST_CASE ( " number ( float ) " ) <nl> / / string representation of default value <nl> CHECK ( j . toString ( ) = = " 0 . 000000 " ) ; <nl> <nl> - / / check payload <nl> - CHECK ( j . data ( ) . number_float = = JSON : : number_float_t ( ) ) ; <nl> - CHECK ( j_const . data ( ) . number_float = = JSON : : number_float_t ( ) ) ; <nl> - <nl> / / container members <nl> CHECK ( j . size ( ) = = 1 ) ; <nl> CHECK ( j . empty ( ) = = false ) ; <nl>
|
+ removed data ( ) function
|
nlohmann/json
|
a8c4f84fb89f8d7b50775e1736db84406f4cac62
|
2014-12-29T15:45:54Z
|
mmm a / src / arm / lithium - arm . cc <nl> ppp b / src / arm / lithium - arm . cc <nl> LInstruction * LChunkBuilder : : DoChange ( HChange * instr ) { <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> LInstruction * result = new ( zone ( ) ) LCheckNonSmi ( value ) ; <nl> - if ( ! instr - > value ( ) - > IsHeapObject ( ) ) result = AssignEnvironment ( result ) ; <nl> + if ( ! instr - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> + result = AssignEnvironment ( result ) ; <nl> + } <nl> return result ; <nl> } <nl> <nl> mmm a / src / arm / lithium - codegen - arm . cc <nl> ppp b / src / arm / lithium - codegen - arm . cc <nl> void LCodeGen : : DoIsStringAndBranch ( LIsStringAndBranch * instr ) { <nl> Register temp1 = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> Condition true_cond = <nl> EmitIsString ( reg , temp1 , instr - > FalseLabel ( chunk_ ) , check_needed ) ; <nl> void LCodeGen : : DoIsUndetectableAndBranch ( LIsUndetectableAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> __ ldr ( temp , FieldMemOperand ( input , HeapObject : : kMapOffset ) ) ; <nl> void LCodeGen : : DoHasInstanceTypeAndBranch ( LHasInstanceTypeAndBranch * instr ) { <nl> Register scratch = scratch0 ( ) ; <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> <nl> void LCodeGen : : DoStoreContextSlot ( LStoreContextSlot * instr ) { <nl> __ str ( value , target ) ; <nl> if ( instr - > hydrogen ( ) - > NeedsWriteBarrier ( ) ) { <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> __ RecordWriteContextSlot ( context , <nl> target . offset ( ) , <nl> void LCodeGen : : DoStoreKeyedFixedArray ( LStoreKeyed * instr ) { <nl> <nl> if ( instr - > hydrogen ( ) - > NeedsWriteBarrier ( ) ) { <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> / / Compute address of modified element and store it into key register . <nl> __ add ( key , store_base , Operand ( offset ) ) ; <nl> void LCodeGen : : DoCheckSmi ( LCheckSmi * instr ) { <nl> <nl> <nl> void LCodeGen : : DoCheckNonSmi ( LCheckNonSmi * instr ) { <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> LOperand * input = instr - > value ( ) ; <nl> __ SmiTst ( ToRegister ( input ) ) ; <nl> DeoptimizeIf ( eq , instr - > environment ( ) ) ; <nl> mmm a / src / arm64 / lithium - arm64 . cc <nl> ppp b / src / arm64 / lithium - arm64 . cc <nl> LInstruction * LChunkBuilder : : DoCheckMaps ( HCheckMaps * instr ) { <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> LInstruction * result = new ( zone ( ) ) LCheckNonSmi ( value ) ; <nl> - if ( ! instr - > value ( ) - > IsHeapObject ( ) ) result = AssignEnvironment ( result ) ; <nl> + if ( ! instr - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> + result = AssignEnvironment ( result ) ; <nl> + } <nl> return result ; <nl> } <nl> <nl> mmm a / src / arm64 / lithium - codegen - arm64 . cc <nl> ppp b / src / arm64 / lithium - codegen - arm64 . cc <nl> void LCodeGen : : DoCheckMaps ( LCheckMaps * instr ) { <nl> <nl> <nl> void LCodeGen : : DoCheckNonSmi ( LCheckNonSmi * instr ) { <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> DeoptimizeIfSmi ( ToRegister ( instr - > value ( ) ) , instr - > environment ( ) ) ; <nl> } <nl> } <nl> void LCodeGen : : DoHasInstanceTypeAndBranch ( LHasInstanceTypeAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> Register scratch = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> __ CompareObjectType ( input , scratch , scratch , TestType ( instr - > hydrogen ( ) ) ) ; <nl> void LCodeGen : : DoIsStringAndBranch ( LIsStringAndBranch * instr ) { <nl> Register scratch = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> Condition true_cond = <nl> EmitIsString ( val , scratch , instr - > FalseLabel ( chunk_ ) , check_needed ) ; <nl> void LCodeGen : : DoIsUndetectableAndBranch ( LIsUndetectableAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> __ Ldr ( temp , FieldMemOperand ( input , HeapObject : : kMapOffset ) ) ; <nl> void LCodeGen : : DoStoreContextSlot ( LStoreContextSlot * instr ) { <nl> __ Str ( value , target ) ; <nl> if ( instr - > hydrogen ( ) - > NeedsWriteBarrier ( ) ) { <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> __ RecordWriteContextSlot ( context , <nl> target . offset ( ) , <nl> void LCodeGen : : DoStoreKeyedFixed ( LStoreKeyedFixed * instr ) { <nl> / / This assignment may cause element_addr to alias store_base . <nl> Register element_addr = scratch ; <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> / / Compute address of modified element and store it into key register . <nl> __ Add ( element_addr , mem_op . base ( ) , mem_op . OffsetAsOperand ( ) ) ; <nl> mmm a / src / code - stubs - hydrogen . cc <nl> ppp b / src / code - stubs - hydrogen . cc <nl> HValue * CodeStubGraphBuilder < FastNewContextStub > : : BuildCodeStub ( ) { <nl> / / Allocate the context in new space . <nl> HAllocate * function_context = Add < HAllocate > ( <nl> Add < HConstant > ( length * kPointerSize + FixedArray : : kHeaderSize ) , <nl> - HType : : Tagged ( ) , NOT_TENURED , FIXED_ARRAY_TYPE ) ; <nl> + HType : : HeapObject ( ) , NOT_TENURED , FIXED_ARRAY_TYPE ) ; <nl> <nl> / / Set up the object header . <nl> AddStoreMapConstant ( function_context , <nl> mmm a / src / hydrogen - instructions . cc <nl> ppp b / src / hydrogen - instructions . cc <nl> bool Range : : MulAndCheckOverflow ( const Representation & r , Range * other ) { <nl> } <nl> <nl> <nl> - const char * HType : : ToString ( ) { <nl> - / / Note : The c1visualizer syntax for locals allows only a sequence of the <nl> - / / following characters : A - Za - z0 - 9_ - | : <nl> - switch ( type_ ) { <nl> - case kNone : return " none " ; <nl> - case kTagged : return " tagged " ; <nl> - case kTaggedPrimitive : return " primitive " ; <nl> - case kTaggedNumber : return " number " ; <nl> - case kSmi : return " smi " ; <nl> - case kHeapNumber : return " heap - number " ; <nl> - case kString : return " string " ; <nl> - case kBoolean : return " boolean " ; <nl> - case kNonPrimitive : return " non - primitive " ; <nl> - case kJSArray : return " array " ; <nl> - case kJSObject : return " object " ; <nl> - } <nl> - UNREACHABLE ( ) ; <nl> - return " unreachable " ; <nl> - } <nl> - <nl> - <nl> - HType HType : : TypeFromValue ( Handle < Object > value ) { <nl> - HType result = HType : : Tagged ( ) ; <nl> - if ( value - > IsSmi ( ) ) { <nl> - result = HType : : Smi ( ) ; <nl> - } else if ( value - > IsHeapNumber ( ) ) { <nl> - result = HType : : HeapNumber ( ) ; <nl> - } else if ( value - > IsString ( ) ) { <nl> - result = HType : : String ( ) ; <nl> - } else if ( value - > IsBoolean ( ) ) { <nl> - result = HType : : Boolean ( ) ; <nl> - } else if ( value - > IsJSObject ( ) ) { <nl> - result = HType : : JSObject ( ) ; <nl> - } else if ( value - > IsJSArray ( ) ) { <nl> - result = HType : : JSArray ( ) ; <nl> - } else if ( value - > IsHeapObject ( ) ) { <nl> - result = HType : : NonPrimitive ( ) ; <nl> - } <nl> - return result ; <nl> - } <nl> - <nl> - <nl> bool HValue : : IsDefinedAfter ( HBasicBlock * other ) const { <nl> return block ( ) - > block_id ( ) > other - > block_id ( ) ; <nl> } <nl> HValue * HUnaryMathOperation : : Canonicalize ( ) { <nl> <nl> <nl> HValue * HCheckInstanceType : : Canonicalize ( ) { <nl> - if ( check_ = = IS_STRING & & value ( ) - > type ( ) . IsString ( ) ) { <nl> + if ( ( check_ = = IS_SPEC_OBJECT & & value ( ) - > type ( ) . IsJSObject ( ) ) | | <nl> + ( check_ = = IS_JS_ARRAY & & value ( ) - > type ( ) . IsJSArray ( ) ) | | <nl> + ( check_ = = IS_STRING & & value ( ) - > type ( ) . IsString ( ) ) ) { <nl> return value ( ) ; <nl> } <nl> <nl> static bool IsInteger32 ( double value ) { <nl> <nl> <nl> HConstant : : HConstant ( Handle < Object > object , Representation r ) <nl> - : HTemplateInstruction < 0 > ( HType : : TypeFromValue ( object ) ) , <nl> + : HTemplateInstruction < 0 > ( HType : : FromValue ( object ) ) , <nl> object_ ( Unique < Object > : : CreateUninitialized ( object ) ) , <nl> object_map_ ( Handle < Map > : : null ( ) ) , <nl> has_stable_map_value_ ( false ) , <nl> HConstant : : HConstant ( Unique < Object > object , <nl> is_undetectable_ ( is_undetectable ) , <nl> instance_type_ ( instance_type ) { <nl> ASSERT ( ! object . handle ( ) . is_null ( ) ) ; <nl> - ASSERT ( ! type . IsTaggedNumber ( ) ) ; <nl> + ASSERT ( ! type . IsTaggedNumber ( ) | | type . IsNone ( ) ) ; <nl> Initialize ( r ) ; <nl> } <nl> <nl> HConstant : : HConstant ( double double_value , <nl> <nl> <nl> HConstant : : HConstant ( ExternalReference reference ) <nl> - : HTemplateInstruction < 0 > ( HType : : None ( ) ) , <nl> + : HTemplateInstruction < 0 > ( HType : : Any ( ) ) , <nl> object_ ( Unique < Object > ( Handle < Object > : : null ( ) ) ) , <nl> object_map_ ( Handle < Map > : : null ( ) ) , <nl> has_stable_map_value_ ( false ) , <nl> mmm a / src / hydrogen - instructions . h <nl> ppp b / src / hydrogen - instructions . h <nl> <nl> # include " conversions . h " <nl> # include " data - flow . h " <nl> # include " deoptimizer . h " <nl> + # include " hydrogen - types . h " <nl> # include " small - pointer - list . h " <nl> # include " string - stream . h " <nl> # include " unique . h " <nl> class Range V8_FINAL : public ZoneObject { <nl> } ; <nl> <nl> <nl> - class HType V8_FINAL { <nl> - public : <nl> - static HType None ( ) { return HType ( kNone ) ; } <nl> - static HType Tagged ( ) { return HType ( kTagged ) ; } <nl> - static HType TaggedPrimitive ( ) { return HType ( kTaggedPrimitive ) ; } <nl> - static HType TaggedNumber ( ) { return HType ( kTaggedNumber ) ; } <nl> - static HType Smi ( ) { return HType ( kSmi ) ; } <nl> - static HType HeapNumber ( ) { return HType ( kHeapNumber ) ; } <nl> - static HType String ( ) { return HType ( kString ) ; } <nl> - static HType Boolean ( ) { return HType ( kBoolean ) ; } <nl> - static HType NonPrimitive ( ) { return HType ( kNonPrimitive ) ; } <nl> - static HType JSArray ( ) { return HType ( kJSArray ) ; } <nl> - static HType JSObject ( ) { return HType ( kJSObject ) ; } <nl> - <nl> - / / Return the weakest ( least precise ) common type . <nl> - HType Combine ( HType other ) { <nl> - return HType ( static_cast < Type > ( type_ & other . type_ ) ) ; <nl> - } <nl> - <nl> - bool Equals ( const HType & other ) const { <nl> - return type_ = = other . type_ ; <nl> - } <nl> - <nl> - bool IsSubtypeOf ( const HType & other ) { <nl> - return Combine ( other ) . Equals ( other ) ; <nl> - } <nl> - <nl> - bool IsTaggedPrimitive ( ) const { <nl> - return ( ( type_ & kTaggedPrimitive ) = = kTaggedPrimitive ) ; <nl> - } <nl> - <nl> - bool IsTaggedNumber ( ) const { <nl> - return ( ( type_ & kTaggedNumber ) = = kTaggedNumber ) ; <nl> - } <nl> - <nl> - bool IsSmi ( ) const { <nl> - return ( ( type_ & kSmi ) = = kSmi ) ; <nl> - } <nl> - <nl> - bool IsHeapNumber ( ) const { <nl> - return ( ( type_ & kHeapNumber ) = = kHeapNumber ) ; <nl> - } <nl> - <nl> - bool IsString ( ) const { <nl> - return ( ( type_ & kString ) = = kString ) ; <nl> - } <nl> - <nl> - bool IsNonString ( ) const { <nl> - return IsTaggedPrimitive ( ) | | IsSmi ( ) | | IsHeapNumber ( ) | | <nl> - IsBoolean ( ) | | IsJSArray ( ) ; <nl> - } <nl> - <nl> - bool IsBoolean ( ) const { <nl> - return ( ( type_ & kBoolean ) = = kBoolean ) ; <nl> - } <nl> - <nl> - bool IsNonPrimitive ( ) const { <nl> - return ( ( type_ & kNonPrimitive ) = = kNonPrimitive ) ; <nl> - } <nl> - <nl> - bool IsJSArray ( ) const { <nl> - return ( ( type_ & kJSArray ) = = kJSArray ) ; <nl> - } <nl> - <nl> - bool IsJSObject ( ) const { <nl> - return ( ( type_ & kJSObject ) = = kJSObject ) ; <nl> - } <nl> - <nl> - bool IsHeapObject ( ) const { <nl> - return IsHeapNumber ( ) | | IsString ( ) | | IsBoolean ( ) | | IsNonPrimitive ( ) ; <nl> - } <nl> - <nl> - bool ToStringOrToNumberCanBeObserved ( Representation representation ) { <nl> - switch ( type_ ) { <nl> - case kTaggedPrimitive : / / fallthru <nl> - case kTaggedNumber : / / fallthru <nl> - case kSmi : / / fallthru <nl> - case kHeapNumber : / / fallthru <nl> - case kString : / / fallthru <nl> - case kBoolean : <nl> - return false ; <nl> - case kJSArray : / / fallthru <nl> - case kJSObject : <nl> - return true ; <nl> - case kTagged : <nl> - break ; <nl> - } <nl> - return ! representation . IsSmiOrInteger32 ( ) & & ! representation . IsDouble ( ) ; <nl> - } <nl> - <nl> - static HType TypeFromValue ( Handle < Object > value ) ; <nl> - <nl> - const char * ToString ( ) ; <nl> - <nl> - private : <nl> - enum Type { <nl> - kNone = 0x0 , / / 0000 0000 0000 0000 <nl> - kTagged = 0x1 , / / 0000 0000 0000 0001 <nl> - kTaggedPrimitive = 0x5 , / / 0000 0000 0000 0101 <nl> - kTaggedNumber = 0xd , / / 0000 0000 0000 1101 <nl> - kSmi = 0x1d , / / 0000 0000 0001 1101 <nl> - kHeapNumber = 0x2d , / / 0000 0000 0010 1101 <nl> - kString = 0x45 , / / 0000 0000 0100 0101 <nl> - kBoolean = 0x85 , / / 0000 0000 1000 0101 <nl> - kNonPrimitive = 0x101 , / / 0000 0001 0000 0001 <nl> - kJSObject = 0x301 , / / 0000 0011 0000 0001 <nl> - kJSArray = 0x701 / / 0000 0111 0000 0001 <nl> - } ; <nl> - <nl> - / / Make sure type fits in int16 . <nl> - STATIC_ASSERT ( kJSArray < ( 1 < < ( 2 * kBitsPerByte ) ) ) ; <nl> - <nl> - explicit HType ( Type t ) : type_ ( t ) { } <nl> - <nl> - int16_t type_ ; <nl> - } ; <nl> - <nl> - <nl> class HUseListNode : public ZoneObject { <nl> public : <nl> HUseListNode ( HValue * value , int index , HUseListNode * tail ) <nl> class HValue : public ZoneObject { <nl> type_ = new_type ; <nl> } <nl> <nl> - bool IsHeapObject ( ) { <nl> - return representation_ . IsHeapObject ( ) | | type_ . IsHeapObject ( ) ; <nl> - } <nl> - <nl> / / There are HInstructions that do not really change a value , they <nl> / / only add pieces of information to it ( like bounds checks , map checks , <nl> / / smi checks . . . ) . <nl> class HValue : public ZoneObject { <nl> / / Returns true conservatively if the program might be able to observe a <nl> / / ToString ( ) operation on this value . <nl> bool ToStringCanBeObserved ( ) const { <nl> - return type ( ) . ToStringOrToNumberCanBeObserved ( representation ( ) ) ; <nl> + return ToStringOrToNumberCanBeObserved ( ) ; <nl> } <nl> <nl> / / Returns true conservatively if the program might be able to observe a <nl> / / ToNumber ( ) operation on this value . <nl> bool ToNumberCanBeObserved ( ) const { <nl> - return type ( ) . ToStringOrToNumberCanBeObserved ( representation ( ) ) ; <nl> + return ToStringOrToNumberCanBeObserved ( ) ; <nl> } <nl> <nl> MinusZeroMode GetMinusZeroMode ( ) { <nl> class HValue : public ZoneObject { <nl> return false ; <nl> } <nl> <nl> + bool ToStringOrToNumberCanBeObserved ( ) const { <nl> + if ( type ( ) . IsTaggedPrimitive ( ) ) return false ; <nl> + if ( type ( ) . IsJSObject ( ) ) return true ; <nl> + return ! representation ( ) . IsSmiOrInteger32 ( ) & & ! representation ( ) . IsDouble ( ) ; <nl> + } <nl> + <nl> virtual Representation RepresentationFromInputs ( ) { <nl> return representation ( ) ; <nl> } <nl> class HCheckMaps V8_FINAL : public HTemplateInstruction < 2 > { <nl> virtual Representation RequiredInputRepresentation ( int index ) V8_OVERRIDE { <nl> return Representation : : Tagged ( ) ; <nl> } <nl> + <nl> + virtual HType CalculateInferredType ( ) V8_OVERRIDE { <nl> + if ( value ( ) - > type ( ) . IsHeapObject ( ) ) return value ( ) - > type ( ) ; <nl> + return HType : : HeapObject ( ) ; <nl> + } <nl> + <nl> virtual void PrintDataTo ( StringStream * stream ) V8_OVERRIDE ; <nl> <nl> - HValue * value ( ) { return OperandAt ( 0 ) ; } <nl> - HValue * typecheck ( ) { return OperandAt ( 1 ) ; } <nl> + HValue * value ( ) const { return OperandAt ( 0 ) ; } <nl> + HValue * typecheck ( ) const { return OperandAt ( 1 ) ; } <nl> <nl> const UniqueSet < Map > * maps ( ) const { return maps_ ; } <nl> void set_maps ( const UniqueSet < Map > * maps ) { maps_ = maps ; } <nl> class HCheckMaps V8_FINAL : public HTemplateInstruction < 2 > { <nl> <nl> private : <nl> HCheckMaps ( HValue * value , const UniqueSet < Map > * maps , bool maps_are_stable ) <nl> - : HTemplateInstruction < 2 > ( value - > type ( ) ) , maps_ ( maps ) , <nl> + : HTemplateInstruction < 2 > ( HType : : HeapObject ( ) ) , maps_ ( maps ) , <nl> has_migration_target_ ( false ) , is_stability_check_ ( false ) , <nl> maps_are_stable_ ( maps_are_stable ) { <nl> ASSERT_NE ( 0 , maps - > size ( ) ) ; <nl> class HCheckMaps V8_FINAL : public HTemplateInstruction < 2 > { <nl> } <nl> <nl> HCheckMaps ( HValue * value , const UniqueSet < Map > * maps , HValue * typecheck ) <nl> - : HTemplateInstruction < 2 > ( value - > type ( ) ) , maps_ ( maps ) , <nl> + : HTemplateInstruction < 2 > ( HType : : HeapObject ( ) ) , maps_ ( maps ) , <nl> has_migration_target_ ( false ) , is_stability_check_ ( false ) , <nl> maps_are_stable_ ( true ) { <nl> ASSERT_NE ( 0 , maps - > size ( ) ) ; <nl> class HCheckInstanceType V8_FINAL : public HUnaryOperation { <nl> return Representation : : Tagged ( ) ; <nl> } <nl> <nl> + virtual HType CalculateInferredType ( ) V8_OVERRIDE { <nl> + switch ( check_ ) { <nl> + case IS_SPEC_OBJECT : return HType : : JSObject ( ) ; <nl> + case IS_JS_ARRAY : return HType : : JSArray ( ) ; <nl> + case IS_STRING : return HType : : String ( ) ; <nl> + case IS_INTERNALIZED_STRING : return HType : : String ( ) ; <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + return HType : : Tagged ( ) ; <nl> + } <nl> + <nl> virtual HValue * Canonicalize ( ) V8_OVERRIDE ; <nl> <nl> bool is_interval_check ( ) const { return check_ < = LAST_INTERVAL_CHECK ; } <nl> class HCheckInstanceType V8_FINAL : public HUnaryOperation { <nl> const char * GetCheckName ( ) ; <nl> <nl> HCheckInstanceType ( HValue * value , Check check ) <nl> - : HUnaryOperation ( value ) , check_ ( check ) { <nl> + : HUnaryOperation ( value , HType : : HeapObject ( ) ) , check_ ( check ) { <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> SetFlag ( kUseGVN ) ; <nl> } <nl> class HCheckHeapObject V8_FINAL : public HUnaryOperation { <nl> return Representation : : Tagged ( ) ; <nl> } <nl> <nl> + virtual HType CalculateInferredType ( ) V8_OVERRIDE { <nl> + if ( value ( ) - > type ( ) . IsHeapObject ( ) ) return value ( ) - > type ( ) ; <nl> + return HType : : HeapObject ( ) ; <nl> + } <nl> + <nl> # ifdef DEBUG <nl> virtual void Verify ( ) V8_OVERRIDE ; <nl> # endif <nl> class HCheckHeapObject V8_FINAL : public HUnaryOperation { <nl> virtual bool DataEquals ( HValue * other ) V8_OVERRIDE { return true ; } <nl> <nl> private : <nl> - explicit HCheckHeapObject ( HValue * value ) <nl> - : HUnaryOperation ( value , HType : : NonPrimitive ( ) ) { <nl> + explicit HCheckHeapObject ( HValue * value ) : HUnaryOperation ( value ) { <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> SetFlag ( kUseGVN ) ; <nl> } <nl> class HConstant V8_FINAL : public HTemplateInstruction < 0 > { <nl> HInstruction * instruction ) { <nl> return instruction - > Prepend ( new ( zone ) HConstant ( <nl> map , Unique < Map > ( Handle < Map > : : null ( ) ) , map_is_stable , <nl> - Representation : : Tagged ( ) , HType : : Tagged ( ) , true , <nl> + Representation : : Tagged ( ) , HType : : HeapObject ( ) , true , <nl> false , false , MAP_TYPE ) ) ; <nl> } <nl> <nl> class HConstant V8_FINAL : public HTemplateInstruction < 0 > { <nl> HInstruction * instruction ) { <nl> return instruction - > Append ( new ( zone ) HConstant ( <nl> map , Unique < Map > ( Handle < Map > : : null ( ) ) , map_is_stable , <nl> - Representation : : Tagged ( ) , HType : : Tagged ( ) , true , <nl> + Representation : : Tagged ( ) , HType : : HeapObject ( ) , true , <nl> false , false , MAP_TYPE ) ) ; <nl> } <nl> <nl> class HBoundsCheckBaseIndexInformation V8_FINAL <nl> class HBitwiseBinaryOperation : public HBinaryOperation { <nl> public : <nl> HBitwiseBinaryOperation ( HValue * context , HValue * left , HValue * right , <nl> - HType type = HType : : Tagged ( ) ) <nl> + HType type = HType : : TaggedNumber ( ) ) <nl> : HBinaryOperation ( context , left , right , type ) { <nl> SetFlag ( kFlexibleRepresentation ) ; <nl> SetFlag ( kTruncatingToInt32 ) ; <nl> class HBitwise V8_FINAL : public HBitwiseBinaryOperation { <nl> Token : : Value op , <nl> HValue * left , <nl> HValue * right ) <nl> - : HBitwiseBinaryOperation ( context , left , right , HType : : TaggedNumber ( ) ) , <nl> + : HBitwiseBinaryOperation ( context , left , right ) , <nl> op_ ( op ) { <nl> ASSERT ( op = = Token : : BIT_AND | | op = = Token : : BIT_OR | | op = = Token : : BIT_XOR ) ; <nl> / / BIT_AND with a smi - range positive value will always unset the <nl> class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction < 2 > { <nl> HValue * context , <nl> HValue * value , <nl> HValue * offset , <nl> - HType type = HType : : Tagged ( ) ) { <nl> + HType type ) { <nl> return new ( zone ) HInnerAllocatedObject ( value , offset , type ) ; <nl> } <nl> <nl> class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction < 2 > { <nl> private : <nl> HInnerAllocatedObject ( HValue * value , <nl> HValue * offset , <nl> - HType type = HType : : Tagged ( ) ) <nl> - : HTemplateInstruction < 2 > ( type ) { <nl> + HType type ) : HTemplateInstruction < 2 > ( type ) { <nl> ASSERT ( value - > IsAllocate ( ) ) ; <nl> + ASSERT ( type . IsHeapObject ( ) ) ; <nl> SetOperandAt ( 0 , value ) ; <nl> SetOperandAt ( 1 , offset ) ; <nl> - set_type ( type ) ; <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> } <nl> } ; <nl> <nl> <nl> inline bool StoringValueNeedsWriteBarrier ( HValue * value ) { <nl> - return ! value - > type ( ) . IsBoolean ( ) <nl> - & & ! value - > type ( ) . IsSmi ( ) <nl> + return ! value - > type ( ) . IsSmi ( ) <nl> + & & ! value - > type ( ) . IsNull ( ) <nl> + & & ! value - > type ( ) . IsBoolean ( ) <nl> + & & ! value - > type ( ) . IsUndefined ( ) <nl> & & ! ( value - > IsConstant ( ) & & HConstant : : cast ( value ) - > ImmortalImmovable ( ) ) ; <nl> } <nl> <nl> class HLoadNamedField V8_FINAL : public HTemplateInstruction < 2 > { <nl> representation . IsInteger32 ( ) ) { <nl> set_representation ( representation ) ; <nl> } else if ( representation . IsHeapObject ( ) ) { <nl> - / / TODO ( bmeurer ) : This is probably broken . What we actually want to to <nl> - / / instead is set_representation ( Representation : : HeapObject ( ) ) . <nl> - set_type ( HType : : NonPrimitive ( ) ) ; <nl> + set_type ( HType : : HeapObject ( ) ) ; <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> } else { <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> class HLoadNamedField V8_FINAL : public HTemplateInstruction < 2 > { <nl> SetOperandAt ( 1 , dependency ? dependency : object ) ; <nl> <nl> ASSERT ( access . representation ( ) . IsHeapObject ( ) ) ; <nl> - / / TODO ( bmeurer ) : This is probably broken . What we actually want to to <nl> - / / instead is set_representation ( Representation : : HeapObject ( ) ) . <nl> - if ( ! type . IsHeapObject ( ) ) set_type ( HType : : NonPrimitive ( ) ) ; <nl> + ASSERT ( type . IsHeapObject ( ) ) ; <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> <nl> access . SetGVNFlags ( this , LOAD ) ; <nl> class HStoreNamedField V8_FINAL : public HTemplateInstruction < 3 > { <nl> <nl> SmiCheck SmiCheckForWriteBarrier ( ) const { <nl> if ( field_representation ( ) . IsHeapObject ( ) ) return OMIT_SMI_CHECK ; <nl> - if ( value ( ) - > IsHeapObject ( ) ) return OMIT_SMI_CHECK ; <nl> + if ( value ( ) - > type ( ) . IsHeapObject ( ) ) return OMIT_SMI_CHECK ; <nl> return INLINE_SMI_CHECK ; <nl> } <nl> <nl> class HCheckMapValue V8_FINAL : public HTemplateInstruction < 2 > { <nl> virtual void PrintDataTo ( StringStream * stream ) V8_OVERRIDE ; <nl> <nl> virtual HType CalculateInferredType ( ) V8_OVERRIDE { <nl> - return HType : : Tagged ( ) ; <nl> + if ( value ( ) - > type ( ) . IsHeapObject ( ) ) return value ( ) - > type ( ) ; <nl> + return HType : : HeapObject ( ) ; <nl> } <nl> <nl> - HValue * value ( ) { return OperandAt ( 0 ) ; } <nl> - HValue * map ( ) { return OperandAt ( 1 ) ; } <nl> + HValue * value ( ) const { return OperandAt ( 0 ) ; } <nl> + HValue * map ( ) const { return OperandAt ( 1 ) ; } <nl> <nl> virtual HValue * Canonicalize ( ) V8_OVERRIDE ; <nl> <nl> class HCheckMapValue V8_FINAL : public HTemplateInstruction < 2 > { <nl> } <nl> <nl> private : <nl> - HCheckMapValue ( HValue * value , <nl> - HValue * map ) { <nl> + HCheckMapValue ( HValue * value , HValue * map ) <nl> + : HTemplateInstruction < 2 > ( HType : : HeapObject ( ) ) { <nl> SetOperandAt ( 0 , value ) ; <nl> SetOperandAt ( 1 , map ) ; <nl> set_representation ( Representation : : Tagged ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 2d83e1bf2a5 <nl> mmm / dev / null <nl> ppp b / src / hydrogen - types . cc <nl> <nl> + / / Copyright 2014 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # include " hydrogen - types . h " <nl> + <nl> + # include " types - inl . h " <nl> + <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + / / static <nl> + template < class T > <nl> + HType HType : : FromType ( typename T : : TypeHandle type ) { <nl> + if ( T : : Any ( ) - > Is ( type ) ) return HType : : Any ( ) ; <nl> + if ( type - > Is ( T : : None ( ) ) ) return HType : : None ( ) ; <nl> + if ( type - > Is ( T : : SignedSmall ( ) ) ) return HType : : Smi ( ) ; <nl> + if ( type - > Is ( T : : Number ( ) ) ) return HType : : TaggedNumber ( ) ; <nl> + if ( type - > Is ( T : : Null ( ) ) ) return HType : : Null ( ) ; <nl> + if ( type - > Is ( T : : String ( ) ) ) return HType : : String ( ) ; <nl> + if ( type - > Is ( T : : Boolean ( ) ) ) return HType : : Boolean ( ) ; <nl> + if ( type - > Is ( T : : Undefined ( ) ) ) return HType : : Undefined ( ) ; <nl> + if ( type - > Is ( T : : Array ( ) ) ) return HType : : JSArray ( ) ; <nl> + if ( type - > Is ( T : : Object ( ) ) ) return HType : : JSObject ( ) ; <nl> + return HType : : Tagged ( ) ; <nl> + } <nl> + <nl> + <nl> + / / static <nl> + template <nl> + HType HType : : FromType < Type > ( Type * type ) ; <nl> + <nl> + <nl> + / / static <nl> + template <nl> + HType HType : : FromType < HeapType > ( Handle < HeapType > type ) ; <nl> + <nl> + <nl> + / / static <nl> + HType HType : : FromValue ( Handle < Object > value ) { <nl> + if ( value - > IsSmi ( ) ) return HType : : Smi ( ) ; <nl> + if ( value - > IsNull ( ) ) return HType : : Null ( ) ; <nl> + if ( value - > IsHeapNumber ( ) ) return HType : : HeapNumber ( ) ; <nl> + if ( value - > IsString ( ) ) return HType : : String ( ) ; <nl> + if ( value - > IsBoolean ( ) ) return HType : : Boolean ( ) ; <nl> + if ( value - > IsUndefined ( ) ) return HType : : Undefined ( ) ; <nl> + if ( value - > IsJSArray ( ) ) return HType : : JSArray ( ) ; <nl> + if ( value - > IsJSObject ( ) ) return HType : : JSObject ( ) ; <nl> + ASSERT ( value - > IsHeapObject ( ) ) ; <nl> + return HType : : HeapObject ( ) ; <nl> + } <nl> + <nl> + <nl> + const char * HType : : ToString ( ) const { <nl> + / / Note : The c1visualizer syntax for locals allows only a sequence of the <nl> + / / following characters : A - Za - z0 - 9_ - | : <nl> + switch ( kind_ ) { <nl> + # define DEFINE_CASE ( Name , mask ) case k # # Name : return # Name ; <nl> + HTYPE_LIST ( DEFINE_CASE ) <nl> + # undef DEFINE_CASE <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + return NULL ; <nl> + } <nl> + <nl> + } } / / namespace v8 : : internal <nl> new file mode 100644 <nl> index 00000000000 . . c34c278eb25 <nl> mmm / dev / null <nl> ppp b / src / hydrogen - types . h <nl> <nl> + / / Copyright 2014 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # ifndef HYDROGEN_TYPES_H_ <nl> + # define HYDROGEN_TYPES_H_ <nl> + <nl> + # include < climits > <nl> + <nl> + # include " base / macros . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + / / Forward declarations . <nl> + template < typename T > class Handle ; <nl> + class Object ; <nl> + <nl> + # define HTYPE_LIST ( V ) \ <nl> + V ( Any , 0x0 ) / * 0000 0000 0000 0000 * / \ <nl> + V ( Tagged , 0x1 ) / * 0000 0000 0000 0001 * / \ <nl> + V ( TaggedPrimitive , 0x5 ) / * 0000 0000 0000 0101 * / \ <nl> + V ( TaggedNumber , 0xd ) / * 0000 0000 0000 1101 * / \ <nl> + V ( Smi , 0x1d ) / * 0000 0000 0001 1101 * / \ <nl> + V ( HeapObject , 0x21 ) / * 0000 0000 0010 0001 * / \ <nl> + V ( HeapPrimitive , 0x25 ) / * 0000 0000 0010 0101 * / \ <nl> + V ( Null , 0x27 ) / * 0000 0000 0010 0111 * / \ <nl> + V ( HeapNumber , 0x2d ) / * 0000 0000 0010 1101 * / \ <nl> + V ( String , 0x65 ) / * 0000 0000 0110 0101 * / \ <nl> + V ( Boolean , 0xa5 ) / * 0000 0000 1010 0101 * / \ <nl> + V ( Undefined , 0x125 ) / * 0000 0001 0010 0101 * / \ <nl> + V ( JSObject , 0x221 ) / * 0000 0010 0010 0001 * / \ <nl> + V ( JSArray , 0x621 ) / * 0000 0110 0010 0001 * / \ <nl> + V ( None , 0x7ff ) / * 0000 0111 1111 1111 * / <nl> + <nl> + class HType V8_FINAL { <nl> + public : <nl> + # define DECLARE_CONSTRUCTOR ( Name , mask ) \ <nl> + static HType Name ( ) V8_WARN_UNUSED_RESULT { return HType ( k # # Name ) ; } <nl> + HTYPE_LIST ( DECLARE_CONSTRUCTOR ) <nl> + # undef DECLARE_CONSTRUCTOR <nl> + <nl> + / / Return the weakest ( least precise ) common type . <nl> + HType Combine ( HType other ) const V8_WARN_UNUSED_RESULT { <nl> + return HType ( static_cast < Kind > ( kind_ & other . kind_ ) ) ; <nl> + } <nl> + <nl> + bool Equals ( HType other ) const V8_WARN_UNUSED_RESULT { <nl> + return kind_ = = other . kind_ ; <nl> + } <nl> + <nl> + bool IsSubtypeOf ( HType other ) const V8_WARN_UNUSED_RESULT { <nl> + return Combine ( other ) . Equals ( other ) ; <nl> + } <nl> + <nl> + # define DECLARE_IS_TYPE ( Name , mask ) \ <nl> + bool Is # # Name ( ) const V8_WARN_UNUSED_RESULT { \ <nl> + return IsSubtypeOf ( HType : : Name ( ) ) ; \ <nl> + } <nl> + HTYPE_LIST ( DECLARE_IS_TYPE ) <nl> + # undef DECLARE_IS_TYPE <nl> + <nl> + template < class T > <nl> + static HType FromType ( typename T : : TypeHandle type ) V8_WARN_UNUSED_RESULT ; <nl> + static HType FromValue ( Handle < Object > value ) V8_WARN_UNUSED_RESULT ; <nl> + <nl> + const char * ToString ( ) const V8_WARN_UNUSED_RESULT ; <nl> + <nl> + private : <nl> + enum Kind { <nl> + # define DECLARE_TYPE ( Name , mask ) k # # Name = mask , <nl> + HTYPE_LIST ( DECLARE_TYPE ) <nl> + # undef DECLARE_TYPE <nl> + LAST_KIND = kNone <nl> + } ; <nl> + <nl> + / / Make sure type fits in int16 . <nl> + STATIC_ASSERT ( LAST_KIND < ( 1 < < ( CHAR_BIT * sizeof ( int16_t ) ) ) ) ; <nl> + <nl> + explicit HType ( Kind kind ) : kind_ ( kind ) { } <nl> + <nl> + int16_t kind_ ; <nl> + } ; <nl> + <nl> + } } / / namespace v8 : : internal <nl> + <nl> + # endif / / HYDROGEN_TYPES_H_ <nl> mmm a / src / hydrogen . cc <nl> ppp b / src / hydrogen . cc <nl> HConstant * HGraph : : GetConstant # # Name ( ) { \ <nl> } <nl> <nl> <nl> - DEFINE_GET_CONSTANT ( Undefined , undefined , undefined , HType : : Tagged ( ) , false ) <nl> + DEFINE_GET_CONSTANT ( Undefined , undefined , undefined , HType : : Undefined ( ) , false ) <nl> DEFINE_GET_CONSTANT ( True , true , boolean , HType : : Boolean ( ) , true ) <nl> DEFINE_GET_CONSTANT ( False , false , boolean , HType : : Boolean ( ) , false ) <nl> - DEFINE_GET_CONSTANT ( Hole , the_hole , the_hole , HType : : Tagged ( ) , false ) <nl> - DEFINE_GET_CONSTANT ( Null , null , null , HType : : Tagged ( ) , false ) <nl> + DEFINE_GET_CONSTANT ( Hole , the_hole , the_hole , HType : : None ( ) , false ) <nl> + DEFINE_GET_CONSTANT ( Null , null , null , HType : : Null ( ) , false ) <nl> <nl> <nl> # undef DEFINE_GET_CONSTANT <nl> HValue * HGraphBuilder : : BuildRegExpConstructResult ( HValue * length , <nl> <nl> / / Determine the elements FixedArray . <nl> HValue * elements = Add < HInnerAllocatedObject > ( <nl> - result , Add < HConstant > ( JSRegExpResult : : kSize ) ) ; <nl> + result , Add < HConstant > ( JSRegExpResult : : kSize ) , HType : : HeapObject ( ) ) ; <nl> <nl> / / Initialize the JSRegExpResult header . <nl> HValue * global_object = Add < HLoadNamedField > ( <nl> HValue * HGraphBuilder : : BuildAllocateElements ( ElementsKind kind , <nl> HValue * total_size = AddUncasted < HAdd > ( mul , header_size ) ; <nl> total_size - > ClearFlag ( HValue : : kCanOverflow ) ; <nl> <nl> - return Add < HAllocate > ( total_size , HType : : NonPrimitive ( ) , NOT_TENURED , <nl> + return Add < HAllocate > ( total_size , HType : : HeapObject ( ) , NOT_TENURED , <nl> instance_type ) ; <nl> } <nl> <nl> HInnerAllocatedObject * HGraphBuilder : : BuildJSArrayHeader ( HValue * array , <nl> } <nl> <nl> HInnerAllocatedObject * elements = Add < HInnerAllocatedObject > ( <nl> - array , Add < HConstant > ( elements_location ) ) ; <nl> + array , Add < HConstant > ( elements_location ) , HType : : HeapObject ( ) ) ; <nl> Add < HStoreNamedField > ( array , HObjectAccess : : ForElementsPointer ( ) , elements ) ; <nl> return elements ; <nl> } <nl> HValue * HGraphBuilder : : BuildCloneShallowArrayCommon ( <nl> <nl> if ( extra_size ! = NULL ) { <nl> HValue * elements = Add < HInnerAllocatedObject > ( object , <nl> - Add < HConstant > ( array_size ) ) ; <nl> + Add < HConstant > ( array_size ) , HType : : HeapObject ( ) ) ; <nl> if ( return_elements ! = NULL ) * return_elements = elements ; <nl> } <nl> <nl> void HGraphBuilder : : BuildCreateAllocationMemento ( <nl> HValue * allocation_site ) { <nl> ASSERT ( allocation_site ! = NULL ) ; <nl> HInnerAllocatedObject * allocation_memento = Add < HInnerAllocatedObject > ( <nl> - previous_object , previous_object_size ) ; <nl> + previous_object , previous_object_size , HType : : HeapObject ( ) ) ; <nl> AddStoreMapConstant ( <nl> allocation_memento , isolate ( ) - > factory ( ) - > allocation_memento_map ( ) ) ; <nl> Add < HStoreNamedField > ( <nl> HInstruction * HOptimizedGraphBuilder : : BuildStoreNamedField ( <nl> <nl> / / TODO ( hpayer ) : Allocation site pretenuring support . <nl> HInstruction * heap_number = Add < HAllocate > ( heap_number_size , <nl> - HType : : Tagged ( ) , <nl> + HType : : HeapObject ( ) , <nl> NOT_TENURED , <nl> HEAP_NUMBER_TYPE ) ; <nl> AddStoreMapConstant ( heap_number , isolate ( ) - > factory ( ) - > heap_number_map ( ) ) ; <nl> void HOptimizedGraphBuilder : : PropertyAccessInfo : : LoadFieldMaps ( <nl> ASSERT_EQ ( num_field_maps , field_maps_ . length ( ) ) ; <nl> <nl> / / Determine field HType from field HeapType . <nl> - if ( field_type - > Is ( HeapType : : Number ( ) ) ) { <nl> - field_type_ = HType : : HeapNumber ( ) ; <nl> - } else if ( field_type - > Is ( HeapType : : String ( ) ) ) { <nl> - field_type_ = HType : : String ( ) ; <nl> - } else if ( field_type - > Is ( HeapType : : Boolean ( ) ) ) { <nl> - field_type_ = HType : : Boolean ( ) ; <nl> - } else if ( field_type - > Is ( HeapType : : Array ( ) ) ) { <nl> - field_type_ = HType : : JSArray ( ) ; <nl> - } else if ( field_type - > Is ( HeapType : : Object ( ) ) ) { <nl> - field_type_ = HType : : JSObject ( ) ; <nl> - } else if ( field_type - > Is ( HeapType : : Null ( ) ) | | <nl> - field_type - > Is ( HeapType : : Undefined ( ) ) ) { <nl> - field_type_ = HType : : NonPrimitive ( ) ; <nl> - } <nl> + field_type_ = HType : : FromType < HeapType > ( field_type ) ; <nl> + ASSERT ( field_type_ . IsHeapObject ( ) ) ; <nl> <nl> / / Add dependency on the map that introduced the field . <nl> Map : : AddDependentCompilationInfo ( <nl> HValue * HOptimizedGraphBuilder : : BuildAllocateExternalElements ( <nl> HValue * elements = <nl> Add < HAllocate > ( <nl> Add < HConstant > ( ExternalArray : : kAlignedSize ) , <nl> - HType : : NonPrimitive ( ) , <nl> + HType : : HeapObject ( ) , <nl> NOT_TENURED , <nl> external_array_map - > instance_type ( ) ) ; <nl> <nl> HValue * HOptimizedGraphBuilder : : BuildAllocateFixedTypedArray ( <nl> Handle < Map > fixed_typed_array_map ( <nl> isolate ( ) - > heap ( ) - > MapForFixedTypedArray ( array_type ) ) ; <nl> HValue * elements = <nl> - Add < HAllocate > ( total_size , HType : : NonPrimitive ( ) , <nl> + Add < HAllocate > ( total_size , HType : : HeapObject ( ) , <nl> NOT_TENURED , fixed_typed_array_map - > instance_type ( ) ) ; <nl> AddStoreMapConstant ( elements , fixed_typed_array_map ) ; <nl> <nl> HInstruction * HOptimizedGraphBuilder : : BuildFastLiteral ( <nl> InstanceType instance_type = boilerplate_object - > HasFastDoubleElements ( ) <nl> ? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE ; <nl> object_elements = Add < HAllocate > ( <nl> - object_elements_size , HType : : NonPrimitive ( ) , <nl> + object_elements_size , HType : : HeapObject ( ) , <nl> pretenure_flag , instance_type , site_context - > current ( ) ) ; <nl> } <nl> BuildInitElementsInObjectHeader ( boilerplate_object , object , object_elements ) ; <nl> mmm a / src / ia32 / lithium - codegen - ia32 . cc <nl> ppp b / src / ia32 / lithium - codegen - ia32 . cc <nl> void LCodeGen : : DoIsStringAndBranch ( LIsStringAndBranch * instr ) { <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> <nl> Condition true_cond = EmitIsString ( <nl> void LCodeGen : : DoIsUndetectableAndBranch ( LIsUndetectableAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> STATIC_ASSERT ( kSmiTag = = 0 ) ; <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> void LCodeGen : : DoHasInstanceTypeAndBranch ( LHasInstanceTypeAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> <nl> void LCodeGen : : DoStoreContextSlot ( LStoreContextSlot * instr ) { <nl> __ mov ( target , value ) ; <nl> if ( instr - > hydrogen ( ) - > NeedsWriteBarrier ( ) ) { <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> int offset = Context : : SlotOffset ( instr - > slot_index ( ) ) ; <nl> void LCodeGen : : DoStoreKeyedFixedArray ( LStoreKeyed * instr ) { <nl> Register value = ToRegister ( instr - > value ( ) ) ; <nl> ASSERT ( ! instr - > key ( ) - > IsConstantOperand ( ) ) ; <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> / / Compute address of modified element and store it into key register . <nl> __ lea ( key , operand ) ; <nl> void LCodeGen : : DoCheckSmi ( LCheckSmi * instr ) { <nl> <nl> <nl> void LCodeGen : : DoCheckNonSmi ( LCheckNonSmi * instr ) { <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> LOperand * input = instr - > value ( ) ; <nl> __ test ( ToOperand ( input ) , Immediate ( kSmiTagMask ) ) ; <nl> DeoptimizeIf ( zero , instr - > environment ( ) ) ; <nl> mmm a / src / ia32 / lithium - ia32 . cc <nl> ppp b / src / ia32 / lithium - ia32 . cc <nl> LInstruction * LChunkBuilder : : DoChange ( HChange * instr ) { <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> LOperand * value = UseAtStart ( instr - > value ( ) ) ; <nl> LInstruction * result = new ( zone ( ) ) LCheckNonSmi ( value ) ; <nl> - if ( ! instr - > value ( ) - > IsHeapObject ( ) ) result = AssignEnvironment ( result ) ; <nl> + if ( ! instr - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> + result = AssignEnvironment ( result ) ; <nl> + } <nl> return result ; <nl> } <nl> <nl> mmm a / src / x64 / lithium - codegen - x64 . cc <nl> ppp b / src / x64 / lithium - codegen - x64 . cc <nl> void LCodeGen : : DoIsStringAndBranch ( LIsStringAndBranch * instr ) { <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> <nl> Condition true_cond = EmitIsString ( <nl> void LCodeGen : : DoIsUndetectableAndBranch ( LIsUndetectableAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> Register temp = ToRegister ( instr - > temp ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> __ movp ( temp , FieldOperand ( input , HeapObject : : kMapOffset ) ) ; <nl> static Condition BranchCondition ( HHasInstanceTypeAndBranch * instr ) { <nl> void LCodeGen : : DoHasInstanceTypeAndBranch ( LHasInstanceTypeAndBranch * instr ) { <nl> Register input = ToRegister ( instr - > value ( ) ) ; <nl> <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> __ JumpIfSmi ( input , instr - > FalseLabel ( chunk_ ) ) ; <nl> } <nl> <nl> void LCodeGen : : DoStoreContextSlot ( LStoreContextSlot * instr ) { <nl> <nl> if ( instr - > hydrogen ( ) - > NeedsWriteBarrier ( ) ) { <nl> SmiCheck check_needed = <nl> - instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) <nl> + instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> int offset = Context : : SlotOffset ( instr - > slot_index ( ) ) ; <nl> Register scratch = ToRegister ( instr - > temp ( ) ) ; <nl> void LCodeGen : : DoStoreKeyedFixedArray ( LStoreKeyed * instr ) { <nl> ASSERT ( instr - > value ( ) - > IsRegister ( ) ) ; <nl> Register value = ToRegister ( instr - > value ( ) ) ; <nl> ASSERT ( ! key - > IsConstantOperand ( ) ) ; <nl> - SmiCheck check_needed = hinstr - > value ( ) - > IsHeapObject ( ) <nl> + SmiCheck check_needed = hinstr - > value ( ) - > type ( ) . IsHeapObject ( ) <nl> ? OMIT_SMI_CHECK : INLINE_SMI_CHECK ; <nl> / / Compute address of modified element and store it into key register . <nl> Register key_reg ( ToRegister ( key ) ) ; <nl> void LCodeGen : : DoCheckSmi ( LCheckSmi * instr ) { <nl> <nl> <nl> void LCodeGen : : DoCheckNonSmi ( LCheckNonSmi * instr ) { <nl> - if ( ! instr - > hydrogen ( ) - > value ( ) - > IsHeapObject ( ) ) { <nl> + if ( ! instr - > hydrogen ( ) - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> LOperand * input = instr - > value ( ) ; <nl> Condition cc = masm ( ) - > CheckSmi ( ToRegister ( input ) ) ; <nl> DeoptimizeIf ( cc , instr - > environment ( ) ) ; <nl> mmm a / src / x64 / lithium - x64 . cc <nl> ppp b / src / x64 / lithium - x64 . cc <nl> LInstruction * LChunkBuilder : : DoChange ( HChange * instr ) { <nl> LInstruction * LChunkBuilder : : DoCheckHeapObject ( HCheckHeapObject * instr ) { <nl> LOperand * value = UseRegisterAtStart ( instr - > value ( ) ) ; <nl> LInstruction * result = new ( zone ( ) ) LCheckNonSmi ( value ) ; <nl> - if ( ! instr - > value ( ) - > IsHeapObject ( ) ) result = AssignEnvironment ( result ) ; <nl> + if ( ! instr - > value ( ) - > type ( ) . IsHeapObject ( ) ) { <nl> + result = AssignEnvironment ( result ) ; <nl> + } <nl> return result ; <nl> } <nl> <nl> mmm a / test / cctest / cctest . gyp <nl> ppp b / test / cctest / cctest . gyp <nl> <nl> ' test - hashmap . cc ' , <nl> ' test - heap . cc ' , <nl> ' test - heap - profiler . cc ' , <nl> + ' test - hydrogen - types . cc ' , <nl> ' test - libplatform - task - queue . cc ' , <nl> ' test - libplatform - worker - thread . cc ' , <nl> ' test - list . cc ' , <nl> mmm a / test / cctest / cctest . status <nl> ppp b / test / cctest / cctest . status <nl> <nl> <nl> # This tests only the type system , so there is no point in running several <nl> # variants . <nl> + ' test - hydrogen - types / * ' : [ PASS , NO_VARIANTS ] , <nl> ' test - types / * ' : [ PASS , NO_VARIANTS ] , <nl> <nl> # BUG ( 2999 ) . <nl> new file mode 100644 <nl> index 00000000000 . . 1d333c6c268 <nl> mmm / dev / null <nl> ppp b / test / cctest / test - hydrogen - types . cc <nl> <nl> + / / Copyright 2014 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # include " hydrogen - types . h " <nl> + <nl> + # include " cctest . h " <nl> + <nl> + using namespace v8 : : internal ; <nl> + <nl> + <nl> + static const HType kTypes [ ] = { <nl> + # define DECLARE_TYPE ( Name , mask ) HType : : Name ( ) , <nl> + HTYPE_LIST ( DECLARE_TYPE ) <nl> + # undef DECLARE_TYPE <nl> + } ; <nl> + <nl> + static const int kNumberOfTypes = sizeof ( kTypes ) / sizeof ( kTypes [ 0 ] ) ; <nl> + <nl> + <nl> + TEST ( HTypeDistinct ) { <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + for ( int j = 0 ; j < kNumberOfTypes ; + + j ) { <nl> + CHECK ( i = = j | | ! kTypes [ i ] . Equals ( kTypes [ j ] ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeReflexivity ) { <nl> + / / Reflexivity of = <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + CHECK ( kTypes [ i ] . Equals ( kTypes [ i ] ) ) ; <nl> + } <nl> + <nl> + / / Reflexivity of < <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + CHECK ( kTypes [ i ] . IsSubtypeOf ( kTypes [ i ] ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeTransitivity ) { <nl> + / / Transitivity of = <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + for ( int j = 0 ; j < kNumberOfTypes ; + + j ) { <nl> + for ( int k = 0 ; k < kNumberOfTypes ; + + k ) { <nl> + HType ti = kTypes [ i ] ; <nl> + HType tj = kTypes [ j ] ; <nl> + HType tk = kTypes [ k ] ; <nl> + CHECK ( ! ti . Equals ( tj ) | | ! tj . Equals ( tk ) | | ti . Equals ( tk ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Transitivity of < <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + for ( int j = 0 ; j < kNumberOfTypes ; + + j ) { <nl> + for ( int k = 0 ; k < kNumberOfTypes ; + + k ) { <nl> + HType ti = kTypes [ i ] ; <nl> + HType tj = kTypes [ j ] ; <nl> + HType tk = kTypes [ k ] ; <nl> + CHECK ( ! ti . IsSubtypeOf ( tj ) | | ! tj . IsSubtypeOf ( tk ) | | ti . IsSubtypeOf ( tk ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeCombine ) { <nl> + / / T < T / \ T ' and T ' < T / \ T ' for all T , T ' <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + for ( int j = 0 ; j < kNumberOfTypes ; + + j ) { <nl> + HType ti = kTypes [ i ] ; <nl> + HType tj = kTypes [ j ] ; <nl> + CHECK ( ti . IsSubtypeOf ( ti . Combine ( tj ) ) ) ; <nl> + CHECK ( tj . IsSubtypeOf ( ti . Combine ( tj ) ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeAny ) { <nl> + / / T < Any for all T <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + HType ti = kTypes [ i ] ; <nl> + CHECK ( ti . IsAny ( ) ) ; <nl> + } <nl> + <nl> + / / Any < T implies T = Any for all T <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + HType ti = kTypes [ i ] ; <nl> + CHECK ( ! HType : : Any ( ) . IsSubtypeOf ( ti ) | | HType : : Any ( ) . Equals ( ti ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeTagged ) { <nl> + / / T < Tagged for all T \ { Any } <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + HType ti = kTypes [ i ] ; <nl> + CHECK ( ti . IsTagged ( ) | | HType : : Any ( ) . Equals ( ti ) ) ; <nl> + } <nl> + <nl> + / / Tagged < T implies T = Tagged or T = Any <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + HType ti = kTypes [ i ] ; <nl> + CHECK ( ! HType : : Tagged ( ) . IsSubtypeOf ( ti ) | | <nl> + HType : : Tagged ( ) . Equals ( ti ) | | <nl> + HType : : Any ( ) . Equals ( ti ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeSmi ) { <nl> + / / T < Smi implies T = None or T = Smi for all T <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + HType ti = kTypes [ i ] ; <nl> + CHECK ( ! ti . IsSmi ( ) | | <nl> + ti . Equals ( HType : : Smi ( ) ) | | <nl> + ti . Equals ( HType : : None ( ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeHeapObject ) { <nl> + CHECK ( ! HType : : TaggedPrimitive ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( ! HType : : TaggedNumber ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( ! HType : : Smi ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : HeapObject ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : HeapPrimitive ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : Null ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : HeapNumber ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : String ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : Boolean ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : Undefined ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : JSObject ( ) . IsHeapObject ( ) ) ; <nl> + CHECK ( HType : : JSArray ( ) . IsHeapObject ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST ( HTypePrimitive ) { <nl> + CHECK ( HType : : TaggedNumber ( ) . IsTaggedPrimitive ( ) ) ; <nl> + CHECK ( HType : : Smi ( ) . IsTaggedPrimitive ( ) ) ; <nl> + CHECK ( ! HType : : HeapObject ( ) . IsTaggedPrimitive ( ) ) ; <nl> + CHECK ( HType : : HeapPrimitive ( ) . IsTaggedPrimitive ( ) ) ; <nl> + CHECK ( HType : : Null ( ) . IsHeapPrimitive ( ) ) ; <nl> + CHECK ( HType : : HeapNumber ( ) . IsHeapPrimitive ( ) ) ; <nl> + CHECK ( HType : : String ( ) . IsHeapPrimitive ( ) ) ; <nl> + CHECK ( HType : : Boolean ( ) . IsHeapPrimitive ( ) ) ; <nl> + CHECK ( HType : : Undefined ( ) . IsHeapPrimitive ( ) ) ; <nl> + CHECK ( ! HType : : JSObject ( ) . IsTaggedPrimitive ( ) ) ; <nl> + CHECK ( ! HType : : JSArray ( ) . IsTaggedPrimitive ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeJSObject ) { <nl> + CHECK ( HType : : JSArray ( ) . IsJSObject ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST ( HTypeNone ) { <nl> + / / None < T for all T <nl> + for ( int i = 0 ; i < kNumberOfTypes ; + + i ) { <nl> + HType ti = kTypes [ i ] ; <nl> + CHECK ( HType : : None ( ) . IsSubtypeOf ( ti ) ) ; <nl> + } <nl> + } <nl> mmm a / test / cctest / test - types . cc <nl> ppp b / test / cctest / test - types . cc <nl> <nl> # include < vector > <nl> <nl> # include " cctest . h " <nl> + # include " hydrogen - types . h " <nl> # include " types . h " <nl> # include " utils / random - number - generator . h " <nl> <nl> struct Tests : Rep { <nl> CheckEqual ( type1 , type3 ) ; <nl> } <nl> } <nl> + <nl> + void HTypeFromType ( ) { <nl> + for ( TypeIterator it1 = T . types . begin ( ) ; it1 ! = T . types . end ( ) ; + + it1 ) { <nl> + for ( TypeIterator it2 = T . types . begin ( ) ; it2 ! = T . types . end ( ) ; + + it2 ) { <nl> + TypeHandle type1 = * it1 ; <nl> + TypeHandle type2 = * it2 ; <nl> + HType htype1 = HType : : FromType < Type > ( type1 ) ; <nl> + HType htype2 = HType : : FromType < Type > ( type2 ) ; <nl> + CHECK ( ! type1 - > Is ( type2 ) | | htype1 . IsSubtypeOf ( htype2 ) ) ; <nl> + } <nl> + } <nl> + } <nl> } ; <nl> <nl> typedef Tests < Type , Type * , Zone , ZoneRep > ZoneTests ; <nl> TEST ( Convert ) { <nl> ZoneTests ( ) . Convert < HeapType , Handle < HeapType > , Isolate , HeapRep > ( ) ; <nl> HeapTests ( ) . Convert < Type , Type * , Zone , ZoneRep > ( ) ; <nl> } <nl> + <nl> + <nl> + TEST ( HTypeFromType ) { <nl> + CcTest : : InitializeVM ( ) ; <nl> + ZoneTests ( ) . HTypeFromType ( ) ; <nl> + HeapTests ( ) . HTypeFromType ( ) ; <nl> + } <nl> mmm a / tools / gyp / v8 . gyp <nl> ppp b / tools / gyp / v8 . gyp <nl> <nl> ' . . / . . / src / hydrogen - sce . h ' , <nl> ' . . / . . / src / hydrogen - store - elimination . cc ' , <nl> ' . . / . . / src / hydrogen - store - elimination . h ' , <nl> + ' . . / . . / src / hydrogen - types . cc ' , <nl> + ' . . / . . / src / hydrogen - types . h ' , <nl> ' . . / . . / src / hydrogen - uint32 - analysis . cc ' , <nl> ' . . / . . / src / hydrogen - uint32 - analysis . h ' , <nl> ' . . / . . / src / i18n . cc ' , <nl>
|
Refactor HType to get rid of various hacks .
|
v8/v8
|
e9357a5e7795f7436b633ba24babcc5d574bd3af
|
2014-05-29T04:13:50Z
|
mmm a / programs / copier / ClusterCopier . cpp <nl> ppp b / programs / copier / ClusterCopier . cpp <nl> decltype ( auto ) ClusterCopier : : retry ( T & & func , UInt64 max_tries ) <nl> { <nl> std : : exception_ptr exception ; <nl> <nl> + if ( max_tries = = 0 ) <nl> + throw Exception ( " Cannot perform zero retries " , ErrorCodes : : LOGICAL_ERROR ) ; <nl> + <nl> for ( UInt64 try_number = 1 ; try_number < = max_tries ; + + try_number ) <nl> { <nl> try <nl> UInt64 ClusterCopier : : executeQueryOnCluster ( <nl> if ( execution_mode = = ClusterExecutionMode : : ON_EACH_NODE ) <nl> max_successful_executions_per_shard = 0 ; <nl> <nl> - std : : atomic < size_t > origin_replicas_number ; <nl> + std : : atomic < size_t > origin_replicas_number = 0 ; <nl> <nl> / / / We need to execute query on one replica at least <nl> auto do_for_shard = [ & ] ( UInt64 shard_index , Settings shard_settings ) <nl>
|
Merge pull request from nikitamikhaylov / copier - uninitialized - var
|
ClickHouse/ClickHouse
|
d6dd133bee0cb0c13c58d417aa8f79ba88c477f6
|
2020-11-25T12:12:42Z
|
mmm a / ios / sdk / WeexSDK / Sources / Bridge / WXCoreBridge . mm <nl> ppp b / ios / sdk / WeexSDK / Sources / Bridge / WXCoreBridge . mm <nl> <nl> # import < objc / runtime . h > <nl> # include < fstream > <nl> <nl> - # ifdef DEBUG <nl> - # include < os / log . h > <nl> - # endif <nl> - <nl> namespace WeexCore <nl> { <nl> static void consoleWithArguments ( NSArray * arguments , WXLogFlag logLevel ) <nl> virtual void log ( LogLevel level , const char * file , unsigned long line , const cha <nl> # ifdef DEBUG <nl> switch ( level ) { <nl> case LogLevel : : Error : <nl> - os_log_error ( OS_LOG_DEFAULT , " < % s : Error | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> + printf ( " < % s : Error | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> break ; <nl> case LogLevel : : Warn : <nl> - os_log_error ( OS_LOG_DEFAULT , " < % s : Warn | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> + printf ( " < % s : Warn | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> break ; <nl> case LogLevel : : Info : <nl> - os_log_info ( OS_LOG_DEFAULT , " < % s : Info | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> + printf ( " < % s : Info | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> break ; <nl> case LogLevel : : Debug : <nl> - os_log_debug ( OS_LOG_DEFAULT , " < % s : Debug | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> + printf ( " < % s : Debug | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log ) ; <nl> break ; <nl> default : <nl> break ; <nl> mmm a / weex_core / Source / base / log_defines . cpp <nl> ppp b / weex_core / Source / base / log_defines . cpp <nl> <nl> <nl> # ifdef __ANDROID__ <nl> # include < android / log . h > <nl> - # elif __APPLE__ <nl> - # include < os / log . h > <nl> # endif <nl> <nl> # include " core / manager / weex_core_manager . h " <nl> namespace WeexCore { <nl> # elif __APPLE__ <nl> switch ( level ) { <nl> case LogLevel : : Error : <nl> - os_log_error ( OS_LOG_DEFAULT , " < % s : Error | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> + printf ( " < % s : Error | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> break ; <nl> case LogLevel : : Warn : <nl> - os_log_error ( OS_LOG_DEFAULT , " < % s : Warn | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> + printf ( " < % s : Warn | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> break ; <nl> case LogLevel : : Info : <nl> - os_log_info ( OS_LOG_DEFAULT , " < % s : Info | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> + printf ( " < % s : Info | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> break ; <nl> case LogLevel : : Debug : <nl> - os_log_debug ( OS_LOG_DEFAULT , " < % s : Debug | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> + printf ( " < % s : Debug | % s : % lu > % s \ n " , WEEX_CORE_LOG_TAG , file , line , log . str ( ) ) ; <nl> break ; <nl> default : <nl> break ; <nl>
|
[ iOS ] Avoid using os / log . h because it is only supported on iOS9 and later . ( )
|
apache/incubator-weex
|
87783c63424cd037435f822ca09b38e48e94d106
|
2019-05-28T08:13:47Z
|
mmm a / build . sh <nl> ppp b / build . sh <nl> elif [ [ $ OSTYPE = darwin * ] ] ; then <nl> # We only support modern Mac machines , they are at least using <nl> # hyperthreaded dual - core CPU . <nl> COMPILE_JOBS = 4 <nl> + elif [ [ $ OSTYPE = = freebsd * ] ] ; then <nl> + COMPILE_JOBS = ` sysctl - n hw . ncpu ` <nl> else <nl> CPU_CORES = ` grep - c ^ processor / proc / cpuinfo ` <nl> if [ [ " $ CPU_CORES " - gt 1 ] ] ; then <nl> COMPILE_JOBS = $ CPU_CORES <nl> - if [ [ " $ COMPILE_JOBS " - gt 8 ] ] ; then <nl> - # Safety net . <nl> - COMPILE_JOBS = 8 <nl> - fi <nl> fi <nl> fi <nl> <nl> + if [ [ " $ COMPILE_JOBS " - gt 8 ] ] ; then <nl> + # Safety net . <nl> + COMPILE_JOBS = 8 <nl> + fi <nl> + <nl> until [ - z " $ 1 " ] ; do <nl> case $ 1 in <nl> " - - qt - config " ) <nl>
|
Use hw . ncpu sysctl on FreeBSD to determine processor count .
|
ariya/phantomjs
|
c3c17a53647395465f411eff8da78b8d7eae97d9
|
2013-01-09T08:07:14Z
|
mmm a / src / heap / concurrent - marking . cc <nl> ppp b / src / heap / concurrent - marking . cc <nl> class ConcurrentMarkingVisitor final <nl> return 0 ; <nl> } <nl> <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / Strings with pointers = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + int VisitConsString ( Map * map , ConsString * object ) { <nl> + int size = ConsString : : BodyDescriptor : : SizeOf ( map , object ) ; <nl> + const SlotSnapshot & snapshot = MakeSlotSnapshot ( map , object , size ) ; <nl> + if ( ! ShouldVisit ( object ) ) return 0 ; <nl> + VisitPointersInSnapshot ( object , snapshot ) ; <nl> + return size ; <nl> + } <nl> + <nl> + int VisitSlicedString ( Map * map , SlicedString * object ) { <nl> + int size = SlicedString : : BodyDescriptor : : SizeOf ( map , object ) ; <nl> + const SlotSnapshot & snapshot = MakeSlotSnapshot ( map , object , size ) ; <nl> + if ( ! ShouldVisit ( object ) ) return 0 ; <nl> + VisitPointersInSnapshot ( object , snapshot ) ; <nl> + return size ; <nl> + } <nl> + <nl> + int VisitThinString ( Map * map , ThinString * object ) { <nl> + int size = ThinString : : BodyDescriptor : : SizeOf ( map , object ) ; <nl> + const SlotSnapshot & snapshot = MakeSlotSnapshot ( map , object , size ) ; <nl> + if ( ! ShouldVisit ( object ) ) return 0 ; <nl> + VisitPointersInSnapshot ( object , snapshot ) ; <nl> + return size ; <nl> + } <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / Strings without pointers = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + int VisitSeqOneByteString ( Map * map , SeqOneByteString * object ) { <nl> + int size = SeqOneByteString : : SizeFor ( object - > synchronized_length ( ) ) ; <nl> + if ( ! ShouldVisit ( object ) ) return 0 ; <nl> + VisitMapPointer ( object , object - > map_slot ( ) ) ; <nl> + return size ; <nl> + } <nl> + <nl> + int VisitSeqTwoByteString ( Map * map , SeqTwoByteString * object ) { <nl> + int size = SeqTwoByteString : : SizeFor ( object - > synchronized_length ( ) ) ; <nl> + if ( ! ShouldVisit ( object ) ) return 0 ; <nl> + VisitMapPointer ( object , object - > map_slot ( ) ) ; <nl> + return size ; <nl> + } <nl> + <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / Fixed array object = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> class ConcurrentMarkingVisitor final <nl> SlotSnapshot * slot_snapshot_ ; <nl> } ; <nl> <nl> - const SlotSnapshot & MakeSlotSnapshot ( Map * map , HeapObject * object , int size ) { <nl> + template < typename T > <nl> + const SlotSnapshot & MakeSlotSnapshot ( Map * map , T * object , int size ) { <nl> / / TODO ( ulan ) : Iterate only the existing fields and skip slack at the end <nl> / / of the object . <nl> SlotSnapshottingVisitor visitor ( & slot_snapshot_ ) ; <nl> visitor . VisitPointer ( object , <nl> reinterpret_cast < Object * * > ( object - > map_slot ( ) ) ) ; <nl> - JSObject : : BodyDescriptor : : IterateBody ( object , size , & visitor ) ; <nl> + T : : BodyDescriptor : : IterateBody ( object , size , & visitor ) ; <nl> return slot_snapshot_ ; <nl> } <nl> ConcurrentMarking : : MarkingWorklist : : View shared_ ; <nl>
|
[ heap ] Correctly handle strings in concurrent marking .
|
v8/v8
|
18b8fbb528a8021e04a029e06eafee50b918bce0
|
2017-10-10T19:37:02Z
|
mmm a / docs / cn / getting_started . md <nl> ppp b / docs / cn / getting_started . md <nl> brew install gperftools <nl> <nl> If you need to run tests , install and compile googletest ( which is not compiled yet ) : <nl> ` ` ` shell <nl> - git clone https : / / github . com / google / googletest & & cd googletest / googletest & & mkdir bld & & cd bld & & cmake . . & & make & & sudo mv libgtest * / usr / lib / & & cd - <nl> + git clone https : / / github . com / google / googletest & & cd googletest / googletest & & mkdir bld & & cd bld & & cmake - DCMAKE_CXX_FLAGS = " - std = c + + 11 " . . & & make & & sudo mv libgtest * / usr / lib / & & cd - <nl> ` ` ` <nl> <nl> # # # Compile brpc with config_brpc . sh <nl>
|
fix googletest compile commands on macos
|
apache/incubator-brpc
|
55e39400f01f2af5bb86998aacdcfcba0c06bcc1
|
2019-02-14T03:19:19Z
|
mmm a / src / arm / codegen - arm . cc <nl> ppp b / src / arm / codegen - arm . cc <nl> void CodeGenerator : : GenerateGetCachedArrayIndex ( ZoneList < Expression * > * args ) { <nl> } <nl> <nl> <nl> + void CodeGenerator : : GenerateFastAsciiArrayJoin ( ZoneList < Expression * > * args ) { <nl> + ASSERT ( args - > length ( ) = = 2 ) ; <nl> + Load ( args - > at ( 0 ) ) ; <nl> + Register value = frame_ - > PopToRegister ( ) ; <nl> + __ LoadRoot ( value , Heap : : kUndefinedValueRootIndex ) ; <nl> + frame_ - > EmitPush ( value ) ; <nl> + } <nl> + <nl> + <nl> void CodeGenerator : : VisitCallRuntime ( CallRuntime * node ) { <nl> # ifdef DEBUG <nl> int original_height = frame_ - > height ( ) ; <nl> mmm a / src / arm / codegen - arm . h <nl> ppp b / src / arm / codegen - arm . h <nl> class CodeGenerator : public AstVisitor { <nl> <nl> void GenerateHasCachedArrayIndex ( ZoneList < Expression * > * args ) ; <nl> void GenerateGetCachedArrayIndex ( ZoneList < Expression * > * args ) ; <nl> + void GenerateFastAsciiArrayJoin ( ZoneList < Expression * > * args ) ; <nl> <nl> / / Simple condition analysis . <nl> enum ConditionAnalysis { <nl> mmm a / src / arm / full - codegen - arm . cc <nl> ppp b / src / arm / full - codegen - arm . cc <nl> void FullCodeGenerator : : EmitGetCachedArrayIndex ( ZoneList < Expression * > * args ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitFastAsciiArrayJoin ( ZoneList < Expression * > * args ) { <nl> + __ LoadRoot ( r0 , Heap : : kUndefinedValueRootIndex ) ; <nl> + context ( ) - > Plug ( r0 ) ; <nl> + return ; <nl> + } <nl> + <nl> + <nl> void FullCodeGenerator : : VisitCallRuntime ( CallRuntime * expr ) { <nl> Handle < String > name = expr - > name ( ) ; <nl> if ( name - > length ( ) > 0 & & name - > Get ( 0 ) = = ' _ ' ) { <nl> mmm a / src / array . js <nl> ppp b / src / array . js <nl> function ArrayJoin ( separator ) { <nl> } else if ( ! IS_STRING ( separator ) ) { <nl> separator = ToString ( separator ) ; <nl> } <nl> + <nl> + var result = % _FastAsciiArrayJoin ( this , separator ) ; <nl> + if ( typeof result ! = " undefined " ) return result ; <nl> + <nl> var length = TO_UINT32 ( this . length ) ; <nl> return Join ( this , length , separator , ConvertToString ) ; <nl> } <nl> mmm a / src / ia32 / codegen - ia32 . cc <nl> ppp b / src / ia32 / codegen - ia32 . cc <nl> void CodeGenerator : : GenerateIsArray ( ZoneList < Expression * > * args ) { <nl> } <nl> <nl> <nl> + void CodeGenerator : : GenerateFastAsciiArrayJoin ( ZoneList < Expression * > * args ) { <nl> + ASSERT ( args - > length ( ) = = 2 ) ; <nl> + Load ( args - > at ( 1 ) ) ; <nl> + Load ( args - > at ( 0 ) ) ; <nl> + Result array_result = frame_ - > Pop ( ) ; <nl> + array_result . ToRegister ( eax ) ; <nl> + frame_ - > SpillAll ( ) ; <nl> + <nl> + Label bailout ; <nl> + Label done ; <nl> + / / All aliases of the same register have disjoint lifetimes . <nl> + Register array = eax ; <nl> + Register result_pos = no_reg ; <nl> + <nl> + Register index = edi ; <nl> + <nl> + Register current_string_length = ecx ; / / Will be ecx when live . <nl> + <nl> + Register current_string = edx ; <nl> + <nl> + Register scratch = ebx ; <nl> + <nl> + Register scratch_2 = esi ; <nl> + Register new_padding_chars = scratch_2 ; <nl> + <nl> + Operand separator = Operand ( esp , 4 * kPointerSize ) ; / / Already pushed . <nl> + Operand elements = Operand ( esp , 3 * kPointerSize ) ; <nl> + Operand result = Operand ( esp , 2 * kPointerSize ) ; <nl> + Operand padding_chars = Operand ( esp , 1 * kPointerSize ) ; <nl> + Operand array_length = Operand ( esp , 0 ) ; <nl> + __ sub ( Operand ( esp ) , Immediate ( 4 * kPointerSize ) ) ; <nl> + <nl> + / / Check that eax is a JSArray <nl> + __ test ( array , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ CmpObjectType ( array , JS_ARRAY_TYPE , scratch ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + <nl> + / / Check that the array has fast elements . <nl> + __ test_b ( FieldOperand ( scratch , Map : : kBitField2Offset ) , <nl> + 1 < < Map : : kHasFastElements ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + <nl> + / / If the array is empty , return the empty string . <nl> + __ mov ( scratch , FieldOperand ( array , JSArray : : kLengthOffset ) ) ; <nl> + __ sar ( scratch , 1 ) ; <nl> + Label non_trivial ; <nl> + __ j ( not_zero , & non_trivial ) ; <nl> + __ mov ( result , Factory : : empty_string ( ) ) ; <nl> + __ jmp ( & done ) ; <nl> + <nl> + __ bind ( & non_trivial ) ; <nl> + __ mov ( array_length , scratch ) ; <nl> + <nl> + __ mov ( scratch , FieldOperand ( array , JSArray : : kElementsOffset ) ) ; <nl> + __ mov ( elements , scratch ) ; <nl> + <nl> + / / End of array ' s live range . <nl> + result_pos = array ; <nl> + array = no_reg ; <nl> + <nl> + <nl> + / / Check that the separator is a flat ascii string . <nl> + __ mov ( current_string , separator ) ; <nl> + __ test ( current_string , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , HeapObject : : kMapOffset ) ) ; <nl> + __ mov_b ( scratch , FieldOperand ( scratch , Map : : kInstanceTypeOffset ) ) ; <nl> + __ and_ ( scratch , Immediate ( <nl> + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask ) ) ; <nl> + __ cmp ( scratch , kStringTag | kAsciiStringTag | kSeqStringTag ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + / / If the separator is the empty string , replace it with NULL . <nl> + / / The test for NULL is quicker than the empty string test , in a loop . <nl> + __ cmp ( FieldOperand ( current_string , SeqAsciiString : : kLengthOffset ) , <nl> + Immediate ( 0 ) ) ; <nl> + Label separator_checked ; <nl> + __ j ( not_zero , & separator_checked ) ; <nl> + __ mov ( separator , Immediate ( 0 ) ) ; <nl> + __ bind ( & separator_checked ) ; <nl> + <nl> + / / Check that elements [ 0 ] is a flat ascii string , and copy it in new space . <nl> + __ mov ( scratch , elements ) ; <nl> + __ mov ( current_string , FieldOperand ( scratch , FixedArray : : kHeaderSize ) ) ; <nl> + __ test ( current_string , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , HeapObject : : kMapOffset ) ) ; <nl> + __ mov_b ( scratch , FieldOperand ( scratch , Map : : kInstanceTypeOffset ) ) ; <nl> + __ and_ ( scratch , Immediate ( <nl> + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask ) ) ; <nl> + __ cmp ( scratch , kStringTag | kAsciiStringTag | kSeqStringTag ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + <nl> + / / Allocate space to copy it . Round up the size to the alignment granularity . <nl> + __ mov ( current_string_length , <nl> + FieldOperand ( current_string , String : : kLengthOffset ) ) ; <nl> + __ shr ( current_string_length , 1 ) ; <nl> + <nl> + / / Live registers and stack values : <nl> + / / current_string_length : length of elements [ 0 ] . <nl> + <nl> + / / New string result in new space = elements [ 0 ] <nl> + __ AllocateAsciiString ( result_pos , current_string_length , scratch_2 , <nl> + index , no_reg , & bailout ) ; <nl> + __ mov ( result , result_pos ) ; <nl> + <nl> + / / Adjust current_string_length to include padding bytes at end of string . <nl> + / / Keep track of the number of padding bytes . <nl> + __ mov ( new_padding_chars , current_string_length ) ; <nl> + __ add ( Operand ( current_string_length ) , Immediate ( kObjectAlignmentMask ) ) ; <nl> + __ and_ ( Operand ( current_string_length ) , Immediate ( ~ kObjectAlignmentMask ) ) ; <nl> + __ sub ( new_padding_chars , Operand ( current_string_length ) ) ; <nl> + __ neg ( new_padding_chars ) ; <nl> + __ mov ( padding_chars , new_padding_chars ) ; <nl> + <nl> + Label copy_loop_1_done ; <nl> + Label copy_loop_1 ; <nl> + __ test ( current_string_length , Operand ( current_string_length ) ) ; <nl> + __ j ( zero , & copy_loop_1_done ) ; <nl> + __ bind ( & copy_loop_1 ) ; <nl> + __ sub ( Operand ( current_string_length ) , Immediate ( kPointerSize ) ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , current_string_length , <nl> + times_1 , SeqAsciiString : : kHeaderSize ) ) ; <nl> + __ mov ( FieldOperand ( result_pos , current_string_length , <nl> + times_1 , SeqAsciiString : : kHeaderSize ) , <nl> + scratch ) ; <nl> + __ j ( not_zero , & copy_loop_1 ) ; <nl> + __ bind ( & copy_loop_1_done ) ; <nl> + <nl> + __ mov ( index , Immediate ( 1 ) ) ; <nl> + / / Loop condition : while ( index < length ) . <nl> + Label loop ; <nl> + __ bind ( & loop ) ; <nl> + __ cmp ( index , array_length ) ; <nl> + __ j ( greater_equal , & done ) ; <nl> + <nl> + / / If the separator is the empty string , signalled by NULL , skip it . <nl> + Label separator_done ; <nl> + __ mov ( current_string , separator ) ; <nl> + __ test ( current_string , Operand ( current_string ) ) ; <nl> + __ j ( zero , & separator_done ) ; <nl> + <nl> + / / Append separator to result . It is known to be a flat ascii string . <nl> + __ AppendStringToTopOfNewSpace ( current_string , current_string_length , <nl> + result_pos , scratch , scratch_2 , result , <nl> + padding_chars , & bailout ) ; <nl> + __ bind ( & separator_done ) ; <nl> + <nl> + / / Add next element of array to the end of the result . <nl> + / / Get current_string = array [ index ] . <nl> + __ mov ( scratch , elements ) ; <nl> + __ mov ( current_string , FieldOperand ( scratch , index , <nl> + times_pointer_size , <nl> + FixedArray : : kHeaderSize ) ) ; <nl> + / / If current ! = flat ascii string drop result , return undefined . <nl> + __ test ( current_string , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , HeapObject : : kMapOffset ) ) ; <nl> + __ mov_b ( scratch , FieldOperand ( scratch , Map : : kInstanceTypeOffset ) ) ; <nl> + __ and_ ( scratch , Immediate ( <nl> + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask ) ) ; <nl> + __ cmp ( scratch , kStringTag | kAsciiStringTag | kSeqStringTag ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + <nl> + / / Append current to the result . <nl> + __ AppendStringToTopOfNewSpace ( current_string , current_string_length , <nl> + result_pos , scratch , scratch_2 , result , <nl> + padding_chars , & bailout ) ; <nl> + __ add ( Operand ( index ) , Immediate ( 1 ) ) ; <nl> + __ jmp ( & loop ) ; / / End while ( index < length ) . <nl> + <nl> + __ bind ( & bailout ) ; <nl> + __ mov ( result , Factory : : undefined_value ( ) ) ; <nl> + __ bind ( & done ) ; <nl> + __ mov ( eax , result ) ; <nl> + / / Drop temp values from the stack , and restore context register . <nl> + __ add ( Operand ( esp ) , Immediate ( 4 * kPointerSize ) ) ; <nl> + <nl> + __ mov ( esi , Operand ( ebp , StandardFrameConstants : : kContextOffset ) ) ; <nl> + frame_ - > Drop ( 1 ) ; <nl> + frame_ - > Push ( & array_result ) ; <nl> + } <nl> + <nl> + <nl> void CodeGenerator : : GenerateIsRegExp ( ZoneList < Expression * > * args ) { <nl> ASSERT ( args - > length ( ) = = 1 ) ; <nl> Load ( args - > at ( 0 ) ) ; <nl> mmm a / src / ia32 / codegen - ia32 . h <nl> ppp b / src / ia32 / codegen - ia32 . h <nl> class CodeGenerator : public AstVisitor { <nl> <nl> void GenerateHasCachedArrayIndex ( ZoneList < Expression * > * args ) ; <nl> void GenerateGetCachedArrayIndex ( ZoneList < Expression * > * args ) ; <nl> + void GenerateFastAsciiArrayJoin ( ZoneList < Expression * > * args ) ; <nl> <nl> / / Simple condition analysis . <nl> enum ConditionAnalysis { <nl> mmm a / src / ia32 / full - codegen - ia32 . cc <nl> ppp b / src / ia32 / full - codegen - ia32 . cc <nl> void FullCodeGenerator : : EmitGetCachedArrayIndex ( ZoneList < Expression * > * args ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitFastAsciiArrayJoin ( ZoneList < Expression * > * args ) { <nl> + Label bailout ; <nl> + Label done ; <nl> + <nl> + ASSERT ( args - > length ( ) = = 2 ) ; <nl> + / / We will leave the separator on the stack until the end of the function . <nl> + VisitForStackValue ( args - > at ( 1 ) ) ; <nl> + / / Load this to eax ( = array ) <nl> + VisitForAccumulatorValue ( args - > at ( 0 ) ) ; <nl> + <nl> + / / All aliases of the same register have disjoint lifetimes . <nl> + Register array = eax ; <nl> + Register result_pos = no_reg ; <nl> + <nl> + Register index = edi ; <nl> + <nl> + Register current_string_length = ecx ; / / Will be ecx when live . <nl> + <nl> + Register current_string = edx ; <nl> + <nl> + Register scratch = ebx ; <nl> + <nl> + Register scratch_2 = esi ; <nl> + Register new_padding_chars = scratch_2 ; <nl> + <nl> + Operand separator = Operand ( esp , 4 * kPointerSize ) ; / / Already pushed . <nl> + Operand elements = Operand ( esp , 3 * kPointerSize ) ; <nl> + Operand result = Operand ( esp , 2 * kPointerSize ) ; <nl> + Operand padding_chars = Operand ( esp , 1 * kPointerSize ) ; <nl> + Operand array_length = Operand ( esp , 0 ) ; <nl> + __ sub ( Operand ( esp ) , Immediate ( 4 * kPointerSize ) ) ; <nl> + <nl> + <nl> + / / Check that eax is a JSArray <nl> + __ test ( array , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ CmpObjectType ( array , JS_ARRAY_TYPE , scratch ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + <nl> + / / Check that the array has fast elements . <nl> + __ test_b ( FieldOperand ( scratch , Map : : kBitField2Offset ) , <nl> + 1 < < Map : : kHasFastElements ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + <nl> + / / If the array is empty , return the empty string . <nl> + __ mov ( scratch , FieldOperand ( array , JSArray : : kLengthOffset ) ) ; <nl> + __ sar ( scratch , 1 ) ; <nl> + Label non_trivial ; <nl> + __ j ( not_zero , & non_trivial ) ; <nl> + __ mov ( result , Factory : : empty_string ( ) ) ; <nl> + __ jmp ( & done ) ; <nl> + <nl> + __ bind ( & non_trivial ) ; <nl> + __ mov ( array_length , scratch ) ; <nl> + <nl> + __ mov ( scratch , FieldOperand ( array , JSArray : : kElementsOffset ) ) ; <nl> + __ mov ( elements , scratch ) ; <nl> + <nl> + / / End of array ' s live range . <nl> + result_pos = array ; <nl> + array = no_reg ; <nl> + <nl> + <nl> + / / Check that the separator is a flat ascii string . <nl> + __ mov ( current_string , separator ) ; <nl> + __ test ( current_string , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , HeapObject : : kMapOffset ) ) ; <nl> + __ mov_b ( scratch , FieldOperand ( scratch , Map : : kInstanceTypeOffset ) ) ; <nl> + __ and_ ( scratch , Immediate ( <nl> + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask ) ) ; <nl> + __ cmp ( scratch , kStringTag | kAsciiStringTag | kSeqStringTag ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + / / If the separator is the empty string , replace it with NULL . <nl> + / / The test for NULL is quicker than the empty string test , in a loop . <nl> + __ cmp ( FieldOperand ( current_string , SeqAsciiString : : kLengthOffset ) , <nl> + Immediate ( 0 ) ) ; <nl> + Label separator_checked ; <nl> + __ j ( not_zero , & separator_checked ) ; <nl> + __ mov ( separator , Immediate ( 0 ) ) ; <nl> + __ bind ( & separator_checked ) ; <nl> + <nl> + / / Check that elements [ 0 ] is a flat ascii string , and copy it in new space . <nl> + __ mov ( scratch , elements ) ; <nl> + __ mov ( current_string , FieldOperand ( scratch , FixedArray : : kHeaderSize ) ) ; <nl> + __ test ( current_string , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , HeapObject : : kMapOffset ) ) ; <nl> + __ mov_b ( scratch , FieldOperand ( scratch , Map : : kInstanceTypeOffset ) ) ; <nl> + __ and_ ( scratch , Immediate ( <nl> + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask ) ) ; <nl> + __ cmp ( scratch , kStringTag | kAsciiStringTag | kSeqStringTag ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + <nl> + / / Allocate space to copy it . Round up the size to the alignment granularity . <nl> + __ mov ( current_string_length , <nl> + FieldOperand ( current_string , String : : kLengthOffset ) ) ; <nl> + __ shr ( current_string_length , 1 ) ; <nl> + <nl> + / / Live registers and stack values : <nl> + / / current_string_length : length of elements [ 0 ] . <nl> + <nl> + / / New string result in new space = elements [ 0 ] <nl> + __ AllocateAsciiString ( result_pos , current_string_length , scratch_2 , <nl> + index , no_reg , & bailout ) ; <nl> + __ mov ( result , result_pos ) ; <nl> + <nl> + / / Adjust current_string_length to include padding bytes at end of string . <nl> + / / Keep track of the number of padding bytes . <nl> + __ mov ( new_padding_chars , current_string_length ) ; <nl> + __ add ( Operand ( current_string_length ) , Immediate ( kObjectAlignmentMask ) ) ; <nl> + __ and_ ( Operand ( current_string_length ) , Immediate ( ~ kObjectAlignmentMask ) ) ; <nl> + __ sub ( new_padding_chars , Operand ( current_string_length ) ) ; <nl> + __ neg ( new_padding_chars ) ; <nl> + __ mov ( padding_chars , new_padding_chars ) ; <nl> + <nl> + Label copy_loop_1_done ; <nl> + Label copy_loop_1 ; <nl> + __ test ( current_string_length , Operand ( current_string_length ) ) ; <nl> + __ j ( zero , & copy_loop_1_done ) ; <nl> + __ bind ( & copy_loop_1 ) ; <nl> + __ sub ( Operand ( current_string_length ) , Immediate ( kPointerSize ) ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , current_string_length , <nl> + times_1 , SeqAsciiString : : kHeaderSize ) ) ; <nl> + __ mov ( FieldOperand ( result_pos , current_string_length , <nl> + times_1 , SeqAsciiString : : kHeaderSize ) , <nl> + scratch ) ; <nl> + __ j ( not_zero , & copy_loop_1 ) ; <nl> + __ bind ( & copy_loop_1_done ) ; <nl> + <nl> + __ mov ( index , Immediate ( 1 ) ) ; <nl> + / / Loop condition : while ( index < length ) . <nl> + Label loop ; <nl> + __ bind ( & loop ) ; <nl> + __ cmp ( index , array_length ) ; <nl> + __ j ( greater_equal , & done ) ; <nl> + <nl> + / / If the separator is the empty string , signalled by NULL , skip it . <nl> + Label separator_done ; <nl> + __ mov ( current_string , separator ) ; <nl> + __ test ( current_string , Operand ( current_string ) ) ; <nl> + __ j ( zero , & separator_done ) ; <nl> + <nl> + / / Append separator to result . It is known to be a flat ascii string . <nl> + __ AppendStringToTopOfNewSpace ( current_string , current_string_length , <nl> + result_pos , scratch , scratch_2 , result , <nl> + padding_chars , & bailout ) ; <nl> + __ bind ( & separator_done ) ; <nl> + <nl> + / / Add next element of array to the end of the result . <nl> + / / Get current_string = array [ index ] . <nl> + __ mov ( scratch , elements ) ; <nl> + __ mov ( current_string , FieldOperand ( scratch , index , <nl> + times_pointer_size , <nl> + FixedArray : : kHeaderSize ) ) ; <nl> + / / If current ! = flat ascii string drop result , return undefined . <nl> + __ test ( current_string , Immediate ( kSmiTagMask ) ) ; <nl> + __ j ( zero , & bailout ) ; <nl> + __ mov ( scratch , FieldOperand ( current_string , HeapObject : : kMapOffset ) ) ; <nl> + __ mov_b ( scratch , FieldOperand ( scratch , Map : : kInstanceTypeOffset ) ) ; <nl> + __ and_ ( scratch , Immediate ( <nl> + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask ) ) ; <nl> + __ cmp ( scratch , kStringTag | kAsciiStringTag | kSeqStringTag ) ; <nl> + __ j ( not_equal , & bailout ) ; <nl> + <nl> + / / Append current to the result . <nl> + __ AppendStringToTopOfNewSpace ( current_string , current_string_length , <nl> + result_pos , scratch , scratch_2 , result , <nl> + padding_chars , & bailout ) ; <nl> + __ add ( Operand ( index ) , Immediate ( 1 ) ) ; <nl> + __ jmp ( & loop ) ; / / End while ( index < length ) . <nl> + <nl> + __ bind ( & bailout ) ; <nl> + __ mov ( result , Factory : : undefined_value ( ) ) ; <nl> + __ bind ( & done ) ; <nl> + __ mov ( eax , result ) ; <nl> + / / Drop temp values from the stack , and restore context register . <nl> + __ add ( Operand ( esp ) , Immediate ( 5 * kPointerSize ) ) ; <nl> + <nl> + __ mov ( esi , Operand ( ebp , StandardFrameConstants : : kContextOffset ) ) ; <nl> + context ( ) - > Plug ( eax ) ; <nl> + } <nl> + <nl> + <nl> void FullCodeGenerator : : VisitCallRuntime ( CallRuntime * expr ) { <nl> Handle < String > name = expr - > name ( ) ; <nl> if ( name - > length ( ) > 0 & & name - > Get ( 0 ) = = ' _ ' ) { <nl> mmm a / src / ia32 / macro - assembler - ia32 . cc <nl> ppp b / src / ia32 / macro - assembler - ia32 . cc <nl> void MacroAssembler : : AllocateAsciiConsString ( Register result , <nl> Immediate ( Factory : : cons_ascii_string_map ( ) ) ) ; <nl> } <nl> <nl> + / / All registers must be distinct . Only current_string needs valid contents <nl> + / / on entry . All registers may be invalid on exit . result_operand is <nl> + / / unchanged , padding_chars is updated correctly . <nl> + void MacroAssembler : : AppendStringToTopOfNewSpace ( <nl> + Register current_string , / / Tagged pointer to string to copy . <nl> + Register current_string_length , <nl> + Register result_pos , <nl> + Register scratch , <nl> + Register new_padding_chars , <nl> + Operand operand_result , <nl> + Operand operand_padding_chars , <nl> + Label * bailout ) { <nl> + mov ( current_string_length , <nl> + FieldOperand ( current_string , String : : kLengthOffset ) ) ; <nl> + shr ( current_string_length , 1 ) ; <nl> + sub ( current_string_length , operand_padding_chars ) ; <nl> + mov ( new_padding_chars , current_string_length ) ; <nl> + add ( Operand ( current_string_length ) , Immediate ( kObjectAlignmentMask ) ) ; <nl> + and_ ( Operand ( current_string_length ) , Immediate ( ~ kObjectAlignmentMask ) ) ; <nl> + sub ( new_padding_chars , Operand ( current_string_length ) ) ; <nl> + neg ( new_padding_chars ) ; <nl> + / / We need an allocation even if current_string_length is 0 , to fetch <nl> + / / result_pos . Consider using a faster fetch of result_pos in that case . <nl> + AllocateInNewSpace ( current_string_length , result_pos , scratch , no_reg , <nl> + bailout , NO_ALLOCATION_FLAGS ) ; <nl> + sub ( result_pos , operand_padding_chars ) ; <nl> + mov ( operand_padding_chars , new_padding_chars ) ; <nl> + <nl> + Register scratch_2 = new_padding_chars ; / / Used to compute total length . <nl> + / / Copy string to the end of result . <nl> + mov ( current_string_length , <nl> + FieldOperand ( current_string , String : : kLengthOffset ) ) ; <nl> + mov ( scratch , operand_result ) ; <nl> + mov ( scratch_2 , current_string_length ) ; <nl> + add ( scratch_2 , FieldOperand ( scratch , String : : kLengthOffset ) ) ; <nl> + mov ( FieldOperand ( scratch , String : : kLengthOffset ) , scratch_2 ) ; <nl> + shr ( current_string_length , 1 ) ; <nl> + lea ( current_string , <nl> + FieldOperand ( current_string , SeqAsciiString : : kHeaderSize ) ) ; <nl> + / / Loop condition : while ( - - current_string_length > = 0 ) . <nl> + Label copy_loop ; <nl> + Label copy_loop_entry ; <nl> + jmp ( & copy_loop_entry ) ; <nl> + bind ( & copy_loop ) ; <nl> + mov_b ( scratch , Operand ( current_string , current_string_length , times_1 , 0 ) ) ; <nl> + mov_b ( Operand ( result_pos , current_string_length , times_1 , 0 ) , scratch ) ; <nl> + bind ( & copy_loop_entry ) ; <nl> + sub ( Operand ( current_string_length ) , Immediate ( 1 ) ) ; <nl> + j ( greater_equal , & copy_loop ) ; <nl> + } <nl> + <nl> <nl> void MacroAssembler : : NegativeZeroTest ( CodeGenerator * cgen , <nl> Register result , <nl> mmm a / src / ia32 / macro - assembler - ia32 . h <nl> ppp b / src / ia32 / macro - assembler - ia32 . h <nl> class MacroAssembler : public Assembler { <nl> Register scratch2 , <nl> Label * gc_required ) ; <nl> <nl> + / / All registers must be distinct . Only current_string needs valid contents <nl> + / / on entry . All registers may be invalid on exit . result_operand is <nl> + / / unchanged , padding_chars is updated correctly . <nl> + / / The top of new space must contain a sequential ascii string with <nl> + / / padding_chars bytes free in its top word . The sequential ascii string <nl> + / / current_string is concatenated to it , allocating the necessary amount <nl> + / / of new memory . <nl> + void AppendStringToTopOfNewSpace ( <nl> + Register current_string , / / Tagged pointer to string to copy . <nl> + Register current_string_length , <nl> + Register result_pos , <nl> + Register scratch , <nl> + Register new_padding_chars , <nl> + Operand operand_result , <nl> + Operand operand_padding_chars , <nl> + Label * bailout ) ; <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm <nl> / / Support functions . <nl> <nl> mmm a / src / runtime . h <nl> ppp b / src / runtime . h <nl> namespace internal { <nl> F ( MathSqrt , 1 , 1 ) \ <nl> F ( IsRegExpEquivalent , 2 , 1 ) \ <nl> F ( HasCachedArrayIndex , 1 , 1 ) \ <nl> - F ( GetCachedArrayIndex , 1 , 1 ) <nl> + F ( GetCachedArrayIndex , 1 , 1 ) \ <nl> + F ( FastAsciiArrayJoin , 2 , 1 ) <nl> <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> mmm a / src / x64 / codegen - x64 . cc <nl> ppp b / src / x64 / codegen - x64 . cc <nl> void CodeGenerator : : GenerateGetCachedArrayIndex ( ZoneList < Expression * > * args ) { <nl> } <nl> <nl> <nl> + void CodeGenerator : : GenerateFastAsciiArrayJoin ( ZoneList < Expression * > * args ) { <nl> + frame_ - > Push ( Factory : : undefined_value ( ) ) ; <nl> + } <nl> + <nl> + <nl> void CodeGenerator : : VisitCallRuntime ( CallRuntime * node ) { <nl> if ( CheckForInlineRuntimeCall ( node ) ) { <nl> return ; <nl> mmm a / src / x64 / codegen - x64 . h <nl> ppp b / src / x64 / codegen - x64 . h <nl> class CodeGenerator : public AstVisitor { <nl> <nl> void GenerateHasCachedArrayIndex ( ZoneList < Expression * > * args ) ; <nl> void GenerateGetCachedArrayIndex ( ZoneList < Expression * > * args ) ; <nl> + void GenerateFastAsciiArrayJoin ( ZoneList < Expression * > * args ) ; <nl> <nl> / / Simple condition analysis . <nl> enum ConditionAnalysis { <nl> mmm a / src / x64 / full - codegen - x64 . cc <nl> ppp b / src / x64 / full - codegen - x64 . cc <nl> void FullCodeGenerator : : EmitGetCachedArrayIndex ( ZoneList < Expression * > * args ) { <nl> } <nl> <nl> <nl> + void FullCodeGenerator : : EmitFastAsciiArrayJoin ( ZoneList < Expression * > * args ) { <nl> + context ( ) - > Plug ( Heap : : kUndefinedValueRootIndex ) ; <nl> + } <nl> + <nl> + <nl> void FullCodeGenerator : : VisitCallRuntime ( CallRuntime * expr ) { <nl> Handle < String > name = expr - > name ( ) ; <nl> if ( name - > length ( ) > 0 & & name - > Get ( 0 ) = = ' _ ' ) { <nl>
|
Add a fast case to Array . join when all the elements and the separator are flat ascii strings .
|
v8/v8
|
1d11e32a01e8b767f8d9e9f8d0c4fda88e36a685
|
2010-11-19T09:25:46Z
|
mmm a / npm / README . md <nl> ppp b / npm / README . md <nl> Then you can run your app using : <nl> ` ` ` <nl> atom - shell your - app / <nl> ` ` ` <nl> + <nl> + # # Programmatic usage <nl> + <nl> + If you require ` atom - shell ` inside your node app it will return the file path to the binary . <nl> + Use this to spawn atom shell <nl> + <nl> + ` ` ` js <nl> + var atom = require ( ' atom - shell ' ) <nl> + var proc = require ( ' child_process ' ) <nl> + <nl> + / / will something similar to print / Users / maf / . . . / Atom <nl> + console . log ( atom ) <nl> + <nl> + / / spawn atom - shell <nl> + var child = proc . spawn ( atom ) <nl> + ` ` ` <nl> \ No newline at end of file <nl>
|
add programmatic usage
|
electron/electron
|
eb04836d51dc322a85123fe2b48f85b12bfff914
|
2014-10-20T07:16:35Z
|
mmm a / Makefile . in <nl> ppp b / Makefile . in <nl> VIS_DIRS = \ <nl> xbmc / visualizations / OpenGLSpectrum \ <nl> xbmc / visualizations / WaveForm \ <nl> xbmc / visualizations / XBMCProjectM <nl> + ifeq ( $ ( findstring osx , $ ( ARCH ) ) , osx ) <nl> + VIS_DIRS + = xbmc / visualizations / iTunes <nl> + endif <nl> ifneq ( @ BUILD_GOOM @ , 1 ) <nl> VIS_DIRS + = xbmc / visualizations / Goom <nl> endif <nl> visualizations : exports <nl> $ ( MAKE ) - C xbmc / visualizations / OpenGLSpectrum <nl> $ ( MAKE ) - C xbmc / visualizations / WaveForm <nl> $ ( MAKE ) - C xbmc / visualizations / XBMCProjectM <nl> + ifeq ( $ ( findstring osx , $ ( ARCH ) ) , osx ) <nl> + $ ( MAKE ) - C xbmc / visualizations / iTunes <nl> + endif <nl> ifneq ( @ BUILD_GOOM @ , 1 ) <nl> ifeq ( $ ( or $ ( findstring powerpc - linux , $ ( ARCH ) ) , $ ( findstring powerpc64 - linux , $ ( ARCH ) ) ) , ) <nl> $ ( MAKE ) - C xbmc / visualizations / Goom <nl>
|
fixed , actually build itunes viz for osx platform
|
xbmc/xbmc
|
e76c3bb01b5c0d5d53fd5def897596892653fe83
|
2010-03-10T21:42:14Z
|
mmm a / buildscripts / resmokeconfig / suites / replica_sets_multi_stmt_txn_jscore_passthrough . yml <nl> ppp b / buildscripts / resmokeconfig / suites / replica_sets_multi_stmt_txn_jscore_passthrough . yml <nl> selector : <nl> - jstests / core / optimized_match_explain . js <nl> - jstests / core / sort_array . js <nl> - jstests / core / views / views_collation . js <nl> + - jstests / core / wildcard_index_count . js <nl> <nl> # $ out <nl> - jstests / core / bypass_doc_validation . js <nl> new file mode 100644 <nl> index 000000000000 . . 7b684b29f3cc <nl> mmm / dev / null <nl> ppp b / jstests / core / wildcard_index_count . js <nl> <nl> + / / Test that a wildcard index can be used to accelerate count commands , as well as the $ count agg <nl> + / / stage . <nl> + / / <nl> + / / The collection cannot be sharded , since the requirement to SHARD_FILTER precludes the planner <nl> + / / from generating a COUNT_SCAN plan . Further , we do not allow stepdowns , since the code responsible <nl> + / / for retrying on interrupt is not prepared to handle aggregation explain . <nl> + / / @ tags : [ assumes_unsharded_collection , does_not_support_stepdowns ] <nl> + ( function ( ) { <nl> + " use strict " ; <nl> + <nl> + load ( " jstests / libs / analyze_plan . js " ) ; <nl> + <nl> + const coll = db . wildcard_index_count ; <nl> + coll . drop ( ) ; <nl> + <nl> + assert . commandWorked ( coll . insert ( [ <nl> + { a : 3 } , <nl> + { a : null } , <nl> + { a : [ - 1 , 0 ] } , <nl> + { a : [ 4 , - 3 , 5 ] } , <nl> + { } , <nl> + { a : { b : 4 } } , <nl> + { a : [ ] } , <nl> + { a : [ [ ] , { } ] } , <nl> + { a : { } } , <nl> + ] ) ) ; <nl> + assert . commandWorked ( coll . createIndex ( { " $ * * " : 1 } ) ) ; <nl> + <nl> + assert . eq ( 2 , coll . count ( { a : { $ gt : 0 } } ) ) ; <nl> + assert . eq ( 2 , coll . find ( { a : { $ gt : 0 } } ) . itcount ( ) ) ; <nl> + assert . eq ( 2 , coll . aggregate ( [ { $ match : { a : { $ gt : 0 } } } , { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + <nl> + / / Verify that this query uses a COUNT_SCAN . <nl> + let explain = coll . explain ( ) . count ( { a : { $ gt : 0 } } ) ; <nl> + let countScan = getPlanStage ( explain . queryPlanner . winningPlan , " COUNT_SCAN " ) ; <nl> + assert . neq ( null , countScan , explain ) ; <nl> + assert . eq ( { $ _path : 1 , a : 1 } , countScan . keyPattern , countScan ) ; <nl> + <nl> + / / Query should also COUNT_SCAN when expressed as an aggregation . <nl> + explain = coll . explain ( ) . aggregate ( [ { $ match : { a : { $ gt : 0 } } } , { $ count : " count " } ] ) ; <nl> + countScan = getAggPlanStage ( explain , " COUNT_SCAN " ) ; <nl> + assert . neq ( null , countScan , explain ) ; <nl> + assert . eq ( { $ _path : 1 , a : 1 } , countScan . keyPattern , countScan ) ; <nl> + <nl> + / / $ count of entire collection does not COUNT_SCAN . <nl> + assert . eq ( 9 , coll . find ( ) . itcount ( ) ) ; <nl> + assert . eq ( 9 , coll . aggregate ( [ { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + explain = coll . explain ( ) . aggregate ( [ { $ count : " count " } ] ) ; <nl> + countScan = getAggPlanStage ( explain , " COUNT_SCAN " ) ; <nl> + assert . eq ( null , countScan , explain ) ; <nl> + <nl> + / / When the count consists of multiple intervals , we cannot use COUNT_SCAN . <nl> + assert . eq ( 2 , coll . count ( { a : { $ in : [ 3 , 4 ] } } ) ) ; <nl> + assert . eq ( 2 , coll . find ( { a : { $ in : [ 3 , 4 ] } } ) . itcount ( ) ) ; <nl> + assert . eq ( 2 , coll . aggregate ( [ { $ match : { a : { $ in : [ 3 , 4 ] } } } , { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + explain = coll . explain ( ) . aggregate ( [ { $ match : { a : { $ in : [ 3 , 4 ] } } } , { $ count : " count " } ] ) ; <nl> + countScan = getAggPlanStage ( explain , " COUNT_SCAN " ) ; <nl> + assert . eq ( null , countScan , explain ) ; <nl> + let ixscan = getAggPlanStage ( explain , " IXSCAN " ) ; <nl> + assert . neq ( null , ixscan , explain ) ; <nl> + assert . eq ( { $ _path : 1 , a : 1 } , ixscan . keyPattern , ixscan ) ; <nl> + <nl> + / / Count with an equality match on an empty array cannot use COUNT_SCAN . <nl> + assert . eq ( 2 , coll . count ( { a : { $ eq : [ ] } } ) ) ; <nl> + assert . eq ( 2 , coll . find ( { a : { $ eq : [ ] } } ) . itcount ( ) ) ; <nl> + assert . eq ( 2 , coll . aggregate ( [ { $ match : { a : { $ eq : [ ] } } } , { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + explain = coll . explain ( ) . count ( { a : { $ eq : [ ] } } ) ; <nl> + countScan = getPlanStage ( explain . queryPlanner . winningPlan , " COUNT_SCAN " ) ; <nl> + assert . eq ( null , countScan , explain ) ; <nl> + ixscan = getPlanStage ( explain . queryPlanner . winningPlan , " IXSCAN " ) ; <nl> + assert . neq ( null , ixscan , explain ) ; <nl> + assert . eq ( { $ _path : 1 , a : 1 } , ixscan . keyPattern , ixscan ) ; <nl> + <nl> + / / Count with an equality match on an empty object can use COUNT_SCAN . <nl> + assert . eq ( 2 , coll . count ( { a : { $ eq : { } } } ) ) ; <nl> + assert . eq ( 2 , coll . find ( { a : { $ eq : { } } } ) . itcount ( ) ) ; <nl> + assert . eq ( 2 , coll . aggregate ( [ { $ match : { a : { $ eq : { } } } } , { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + explain = coll . explain ( ) . count ( { a : { $ eq : { } } } ) ; <nl> + countScan = getPlanStage ( explain . queryPlanner . winningPlan , " COUNT_SCAN " ) ; <nl> + assert . eq ( { $ _path : 1 , a : 1 } , countScan . keyPattern , explain ) ; <nl> + <nl> + / / Count with equality to a non - empty object cannot use the wildcard index . <nl> + assert . eq ( 1 , coll . count ( { a : { b : 4 } } ) ) ; <nl> + assert . eq ( 1 , coll . find ( { a : { b : 4 } } ) . itcount ( ) ) ; <nl> + assert . eq ( 1 , coll . aggregate ( [ { $ match : { a : { b : 4 } } } , { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + explain = coll . explain ( ) . count ( { a : { b : 4 } } ) ; <nl> + assert ( isCollscan ( db , explain . queryPlanner . winningPlan ) , explain ) ; <nl> + <nl> + / / Count with equality to a non - empty array cannot use the wildcard index . <nl> + assert . eq ( 1 , coll . count ( { a : [ - 1 , 0 ] } ) ) ; <nl> + assert . eq ( 1 , coll . find ( { a : [ - 1 , 0 ] } ) . itcount ( ) ) ; <nl> + assert . eq ( 1 , coll . aggregate ( [ { $ match : { a : [ - 1 , 0 ] } } , { $ count : " count " } ] ) . next ( ) . count ) ; <nl> + explain = coll . explain ( ) . count ( { a : [ - 1 , 0 ] } ) ; <nl> + assert ( isCollscan ( db , explain . queryPlanner . winningPlan ) , explain ) ; <nl> + } ( ) ) ; <nl>
|
SERVER - 37441 Add integration test for count using wildcard indexes .
|
mongodb/mongo
|
3e4023b631af2055055605af5bf89ee95e432237
|
2018-10-08T22:42:51Z
|
mmm a / src / parsing / expression - classifier . h <nl> ppp b / src / parsing / expression - classifier . h <nl> <nl> # ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H_ <nl> # define V8_PARSING_EXPRESSION_CLASSIFIER_H_ <nl> <nl> + # include < type_traits > <nl> + <nl> # include " src / messages . h " <nl> # include " src / parsing / scanner . h " <nl> <nl> class DuplicateFinder ; <nl> / / by calling the method Discard . Both actions result in removing the <nl> / / classifier from the parser ' s stack . <nl> <nl> + / / Expression classifier is split into four parts . The base implementing the <nl> + / / general expression classifier logic . Two parts that implement the error <nl> + / / tracking interface , where one is the actual implementation and the other is <nl> + / / an empty class providing only the interface without logic . The expression <nl> + / / classifier class then combines the other parts and provides the full <nl> + / / expression classifier interface by inheriting conditionally , controlled by <nl> + / / Types : : ExpressionClassifierReportErrors , either from the ErrorTracker or the <nl> + / / EmptyErrorTracker . <nl> + / / <nl> + / / Base <nl> + / / / \ <nl> + / / / \ <nl> + / / / \ <nl> + / / / \ <nl> + / / ErrorTracker EmptyErrorTracker <nl> + / / \ / <nl> + / / \ / <nl> + / / \ / <nl> + / / \ / <nl> + / / ExpressionClassifier <nl> + <nl> template < typename Types > <nl> - class ExpressionClassifier { <nl> + class ExpressionClassifier ; <nl> + <nl> + template < typename Types , typename ErrorTracker > <nl> + class ExpressionClassifierBase { <nl> public : <nl> enum ErrorKind : unsigned { <nl> # define DEFINE_ERROR_KIND ( NAME , CODE ) k # # NAME = CODE , <nl> ERROR_CODES ( DEFINE_ERROR_KIND ) <nl> # undef DEFINE_ERROR_KIND <nl> - kUnusedError = 15 / / Larger than error codes ; should fit in 4 bits <nl> + kUnusedError = 15 / / Larger than error codes ; should fit in 4 bits <nl> } ; <nl> <nl> struct Error { <nl> class ExpressionClassifier { <nl> } ; <nl> / / clang - format on <nl> <nl> - explicit ExpressionClassifier ( typename Types : : Base * base , <nl> - DuplicateFinder * duplicate_finder = nullptr ) <nl> + explicit ExpressionClassifierBase ( typename Types : : Base * base , <nl> + DuplicateFinder * duplicate_finder = nullptr ) <nl> : base_ ( base ) , <nl> - previous_ ( base - > classifier_ ) , <nl> - zone_ ( base - > impl ( ) - > zone ( ) ) , <nl> - reported_errors_ ( base - > impl ( ) - > GetReportedErrorList ( ) ) , <nl> duplicate_finder_ ( duplicate_finder ) , <nl> invalid_productions_ ( 0 ) , <nl> - is_non_simple_parameter_list_ ( 0 ) { <nl> - base - > classifier_ = this ; <nl> - reported_errors_begin_ = reported_errors_end_ = reported_errors_ - > length ( ) ; <nl> - } <nl> + is_non_simple_parameter_list_ ( 0 ) { } <nl> <nl> - V8_INLINE ~ ExpressionClassifier ( ) { <nl> - Discard ( ) ; <nl> - if ( base_ - > classifier_ = = this ) base_ - > classifier_ = previous_ ; <nl> - } <nl> + virtual ~ ExpressionClassifierBase ( ) = default ; <nl> <nl> V8_INLINE bool is_valid ( unsigned productions ) const { <nl> return ( invalid_productions_ & productions ) = = 0 ; <nl> class ExpressionClassifier { <nl> return is_valid ( AsyncArrowFormalParametersProduction ) ; <nl> } <nl> <nl> + V8_INLINE bool is_simple_parameter_list ( ) const { <nl> + return ! is_non_simple_parameter_list_ ; <nl> + } <nl> + <nl> + V8_INLINE void RecordNonSimpleParameter ( ) { <nl> + is_non_simple_parameter_list_ = 1 ; <nl> + } <nl> + <nl> + V8_INLINE void Accumulate ( ExpressionClassifier < Types > * const inner , <nl> + unsigned productions ) { <nl> + # ifdef DEBUG <nl> + static_cast < ErrorTracker * > ( this ) - > CheckErrorPositions ( inner ) ; <nl> + # endif <nl> + / / Propagate errors from inner , but don ' t overwrite already recorded <nl> + / / errors . <nl> + unsigned non_arrow_inner_invalid_productions = <nl> + inner - > invalid_productions_ & ~ ArrowFormalParametersProduction ; <nl> + if ( non_arrow_inner_invalid_productions ) { <nl> + unsigned errors = non_arrow_inner_invalid_productions & productions & <nl> + ~ this - > invalid_productions_ ; <nl> + / / The result will continue to be a valid arrow formal parameters if the <nl> + / / inner expression is a valid binding pattern . <nl> + bool copy_BP_to_AFP = false ; <nl> + if ( productions & ArrowFormalParametersProduction & & <nl> + this - > is_valid_arrow_formal_parameters ( ) ) { <nl> + / / Also whether we ' ve seen any non - simple parameters <nl> + / / if expecting an arrow function parameter . <nl> + this - > is_non_simple_parameter_list_ | = <nl> + inner - > is_non_simple_parameter_list_ ; <nl> + if ( ! inner - > is_valid_binding_pattern ( ) ) { <nl> + copy_BP_to_AFP = true ; <nl> + this - > invalid_productions_ | = ArrowFormalParametersProduction ; <nl> + } <nl> + } <nl> + if ( errors ! = 0 | | copy_BP_to_AFP ) { <nl> + this - > invalid_productions_ | = errors ; <nl> + static_cast < ErrorTracker * > ( this ) - > AccumulateErrorImpl ( <nl> + inner , productions , errors , copy_BP_to_AFP ) ; <nl> + } <nl> + } <nl> + static_cast < ErrorTracker * > ( this ) - > RewindErrors ( inner ) ; <nl> + } <nl> + <nl> + protected : <nl> + typename Types : : Base * base_ ; <nl> + DuplicateFinder * duplicate_finder_ ; <nl> + unsigned invalid_productions_ : kUnusedError ; <nl> + STATIC_ASSERT ( kUnusedError < = 15 ) ; <nl> + unsigned is_non_simple_parameter_list_ : 1 ; <nl> + } ; <nl> + <nl> + template < typename Types > <nl> + class ExpressionClassifierErrorTracker <nl> + : public ExpressionClassifierBase < Types , <nl> + ExpressionClassifierErrorTracker < Types > > { <nl> + public : <nl> + using BaseClassType = <nl> + ExpressionClassifierBase < Types , ExpressionClassifierErrorTracker < Types > > ; <nl> + using typename BaseClassType : : Error ; <nl> + using typename BaseClassType : : ErrorKind ; <nl> + using TP = typename BaseClassType : : TargetProduction ; <nl> + <nl> + ExpressionClassifierErrorTracker ( typename Types : : Base * base , <nl> + DuplicateFinder * duplicate_finder ) <nl> + : BaseClassType ( base , duplicate_finder ) , <nl> + reported_errors_ ( base - > impl ( ) - > GetReportedErrorList ( ) ) { <nl> + reported_errors_begin_ = reported_errors_end_ = reported_errors_ - > length ( ) ; <nl> + } <nl> + <nl> + ~ ExpressionClassifierErrorTracker ( ) override { Discard ( ) ; } <nl> + <nl> + V8_INLINE void Discard ( ) { <nl> + if ( reported_errors_end_ = = reported_errors_ - > length ( ) ) { <nl> + reported_errors_ - > Rewind ( reported_errors_begin_ ) ; <nl> + reported_errors_end_ = reported_errors_begin_ ; <nl> + } <nl> + DCHECK_EQ ( reported_errors_begin_ , reported_errors_end_ ) ; <nl> + } <nl> + <nl> + protected : <nl> + V8_INLINE const Error & reported_error ( ErrorKind kind ) const { <nl> + if ( this - > invalid_productions_ & ( 1 < < kind ) ) { <nl> + for ( int i = reported_errors_begin_ ; i < reported_errors_end_ ; i + + ) { <nl> + if ( reported_errors_ - > at ( i ) . kind = = kind ) <nl> + return reported_errors_ - > at ( i ) ; <nl> + } <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + / / We should only be looking for an error when we know that one has <nl> + / / been reported . But we ' re not . . . So this is to make sure we have <nl> + / / the same behaviour . <nl> + UNREACHABLE ( ) ; <nl> + <nl> + / / Make MSVC happy by returning an error from this inaccessible path . <nl> + static Error none ; <nl> + return none ; <nl> + } <nl> + <nl> + / / Adds e to the end of the list of reported errors for this classifier . <nl> + / / It is expected that this classifier is the last one in the stack . <nl> + V8_INLINE void Add ( const Error & e ) { <nl> + DCHECK_EQ ( reported_errors_end_ , reported_errors_ - > length ( ) ) ; <nl> + reported_errors_ - > Add ( e , this - > base_ - > impl ( ) - > zone ( ) ) ; <nl> + reported_errors_end_ + + ; <nl> + } <nl> + <nl> + / / Copies the error at position i of the list of reported errors , so that <nl> + / / it becomes the last error reported for this classifier . Position i <nl> + / / could be either after the existing errors of this classifier ( i . e . , <nl> + / / in an inner classifier ) or it could be an existing error ( in case a <nl> + / / copy is needed ) . <nl> + V8_INLINE void Copy ( int i ) { <nl> + DCHECK_LT ( i , reported_errors_ - > length ( ) ) ; <nl> + if ( reported_errors_end_ ! = i ) <nl> + reported_errors_ - > at ( reported_errors_end_ ) = reported_errors_ - > at ( i ) ; <nl> + reported_errors_end_ + + ; <nl> + } <nl> + <nl> + private : <nl> + # ifdef DEBUG <nl> + V8_INLINE void CheckErrorPositions ( ExpressionClassifier < Types > * const inner ) { <nl> + DCHECK_EQ ( inner - > reported_errors_ , this - > reported_errors_ ) ; <nl> + DCHECK_EQ ( inner - > reported_errors_begin_ , this - > reported_errors_end_ ) ; <nl> + DCHECK_EQ ( inner - > reported_errors_end_ , this - > reported_errors_ - > length ( ) ) ; <nl> + } <nl> + # endif <nl> + <nl> + V8_INLINE void RewindErrors ( ExpressionClassifier < Types > * const inner ) { <nl> + this - > reported_errors_ - > Rewind ( this - > reported_errors_end_ ) ; <nl> + inner - > reported_errors_begin_ = inner - > reported_errors_end_ = <nl> + this - > reported_errors_end_ ; <nl> + } <nl> + <nl> + void AccumulateErrorImpl ( ExpressionClassifier < Types > * const inner , <nl> + unsigned productions , unsigned errors , <nl> + bool copy_BP_to_AFP ) { <nl> + / / Traverse the list of errors reported by the inner classifier <nl> + / / to copy what ' s necessary . <nl> + int binding_pattern_index = inner - > reported_errors_end_ ; <nl> + for ( int i = inner - > reported_errors_begin_ ; i < inner - > reported_errors_end_ ; <nl> + i + + ) { <nl> + int k = this - > reported_errors_ - > at ( i ) . kind ; <nl> + if ( errors & ( 1 < < k ) ) this - > Copy ( i ) ; <nl> + / / Check if it ' s a BP error that has to be copied to an AFP error . <nl> + if ( k = = ErrorKind : : kBindingPatternProduction & & copy_BP_to_AFP ) { <nl> + if ( this - > reported_errors_end_ < = i ) { <nl> + / / If the BP error itself has not already been copied , <nl> + / / copy it now and change it to an AFP error . <nl> + this - > Copy ( i ) ; <nl> + this - > reported_errors_ - > at ( this - > reported_errors_end_ - 1 ) . kind = <nl> + ErrorKind : : kArrowFormalParametersProduction ; <nl> + } else { <nl> + / / Otherwise , if the BP error was already copied , keep its <nl> + / / position and wait until the end of the traversal . <nl> + DCHECK_EQ ( this - > reported_errors_end_ , i + 1 ) ; <nl> + binding_pattern_index = i ; <nl> + } <nl> + } <nl> + } <nl> + / / Do we still have to copy the BP error to an AFP error ? <nl> + if ( binding_pattern_index < inner - > reported_errors_end_ ) { <nl> + / / If there ' s still unused space in the list of the inner <nl> + / / classifier , copy it there , otherwise add it to the end <nl> + / / of the list . <nl> + if ( this - > reported_errors_end_ < inner - > reported_errors_end_ ) <nl> + this - > Copy ( binding_pattern_index ) ; <nl> + else <nl> + Add ( this - > reported_errors_ - > at ( binding_pattern_index ) ) ; <nl> + this - > reported_errors_ - > at ( this - > reported_errors_end_ - 1 ) . kind = <nl> + ErrorKind : : kArrowFormalParametersProduction ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + ZoneList < Error > * reported_errors_ ; <nl> + / / The uint16_t for reported_errors_begin_ and reported_errors_end_ will <nl> + / / not be enough in the case of a long series of expressions using nested <nl> + / / classifiers , e . g . , a long sequence of assignments , as in : <nl> + / / literals with spreads , as in : <nl> + / / var N = 65536 ; eval ( " var x ; " + " x = " . repeat ( N ) + " 42 " ) ; <nl> + / / This should not be a problem , as such things currently fail with a <nl> + / / stack overflow while parsing . <nl> + uint16_t reported_errors_begin_ ; <nl> + uint16_t reported_errors_end_ ; <nl> + <nl> + friend BaseClassType ; <nl> + } ; <nl> + <nl> + template < typename Types > <nl> + class ExpressionClassifierEmptyErrorTracker <nl> + : public ExpressionClassifierBase < <nl> + Types , ExpressionClassifierEmptyErrorTracker < Types > > { <nl> + public : <nl> + using BaseClassType = <nl> + ExpressionClassifierBase < Types , <nl> + ExpressionClassifierEmptyErrorTracker < Types > > ; <nl> + using typename BaseClassType : : Error ; <nl> + using typename BaseClassType : : ErrorKind ; <nl> + using TP = typename BaseClassType : : TargetProduction ; <nl> + <nl> + ExpressionClassifierEmptyErrorTracker ( typename Types : : Base * base , <nl> + DuplicateFinder * duplicate_finder ) <nl> + : BaseClassType ( base , duplicate_finder ) { } <nl> + <nl> + V8_INLINE void Discard ( ) { } <nl> + <nl> + protected : <nl> + V8_INLINE const Error & reported_error ( ErrorKind kind ) const { <nl> + static Error none ; <nl> + return none ; <nl> + } <nl> + <nl> + V8_INLINE void Add ( const Error & e ) { } <nl> + <nl> + private : <nl> + # ifdef DEBUG <nl> + V8_INLINE void CheckErrorPositions ( ExpressionClassifier < Types > * const inner ) { <nl> + } <nl> + # endif <nl> + V8_INLINE void RewindErrors ( ExpressionClassifier < Types > * const inner ) { } <nl> + V8_INLINE void AccumulateErrorImpl ( ExpressionClassifier < Types > * const inner , <nl> + unsigned productions , unsigned errors , <nl> + bool copy_BP_to_AFP ) { } <nl> + <nl> + friend BaseClassType ; <nl> + } ; <nl> + <nl> + template < typename Types > <nl> + class ExpressionClassifier <nl> + : public std : : conditional < <nl> + Types : : ExpressionClassifierReportErrors , <nl> + ExpressionClassifierErrorTracker < Types > , <nl> + ExpressionClassifierEmptyErrorTracker < Types > > : : type { <nl> + static constexpr bool ReportErrors = Types : : ExpressionClassifierReportErrors ; <nl> + <nl> + public : <nl> + using BaseClassType = typename std : : conditional < <nl> + Types : : ExpressionClassifierReportErrors , <nl> + typename ExpressionClassifierErrorTracker < Types > : : BaseClassType , <nl> + typename ExpressionClassifierEmptyErrorTracker < Types > : : BaseClassType > : : <nl> + type ; <nl> + using typename BaseClassType : : Error ; <nl> + using typename BaseClassType : : ErrorKind ; <nl> + using TP = typename BaseClassType : : TargetProduction ; <nl> + <nl> + explicit ExpressionClassifier ( typename Types : : Base * base , <nl> + DuplicateFinder * duplicate_finder = nullptr ) <nl> + : std : : conditional < Types : : ExpressionClassifierReportErrors , <nl> + ExpressionClassifierErrorTracker < Types > , <nl> + ExpressionClassifierEmptyErrorTracker < Types > > : : <nl> + type ( base , duplicate_finder ) , <nl> + previous_ ( base - > classifier_ ) { <nl> + base - > classifier_ = this ; <nl> + } <nl> + <nl> + V8_INLINE ~ ExpressionClassifier ( ) override { <nl> + if ( this - > base_ - > classifier_ = = this ) this - > base_ - > classifier_ = previous_ ; <nl> + } <nl> + <nl> V8_INLINE const Error & expression_error ( ) const { <nl> - return reported_error ( kExpressionProduction ) ; <nl> + return this - > reported_error ( ErrorKind : : kExpressionProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & formal_parameter_initializer_error ( ) const { <nl> - return reported_error ( kFormalParameterInitializerProduction ) ; <nl> + return this - > reported_error ( <nl> + ErrorKind : : kFormalParameterInitializerProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & binding_pattern_error ( ) const { <nl> - return reported_error ( kBindingPatternProduction ) ; <nl> + return this - > reported_error ( ErrorKind : : kBindingPatternProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & assignment_pattern_error ( ) const { <nl> - return reported_error ( kAssignmentPatternProduction ) ; <nl> + return this - > reported_error ( ErrorKind : : kAssignmentPatternProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & arrow_formal_parameters_error ( ) const { <nl> - return reported_error ( kArrowFormalParametersProduction ) ; <nl> + return this - > reported_error ( ErrorKind : : kArrowFormalParametersProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & duplicate_formal_parameter_error ( ) const { <nl> - return reported_error ( kDistinctFormalParametersProduction ) ; <nl> + return this - > reported_error ( ErrorKind : : kDistinctFormalParametersProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & strict_mode_formal_parameter_error ( ) const { <nl> - return reported_error ( kStrictModeFormalParametersProduction ) ; <nl> + return this - > reported_error ( <nl> + ErrorKind : : kStrictModeFormalParametersProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & let_pattern_error ( ) const { <nl> - return reported_error ( kLetPatternProduction ) ; <nl> + return this - > reported_error ( ErrorKind : : kLetPatternProduction ) ; <nl> } <nl> <nl> V8_INLINE const Error & async_arrow_formal_parameters_error ( ) const { <nl> - return reported_error ( kAsyncArrowFormalParametersProduction ) ; <nl> + return this - > reported_error ( <nl> + ErrorKind : : kAsyncArrowFormalParametersProduction ) ; <nl> } <nl> <nl> - V8_INLINE bool is_simple_parameter_list ( ) const { <nl> - return ! is_non_simple_parameter_list_ ; <nl> - } <nl> - <nl> - V8_INLINE void RecordNonSimpleParameter ( ) { <nl> - is_non_simple_parameter_list_ = 1 ; <nl> - } <nl> + V8_INLINE bool does_error_reporting ( ) { return ReportErrors ; } <nl> <nl> void RecordExpressionError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_expression ( ) ) return ; <nl> - invalid_productions_ | = ExpressionProduction ; <nl> - Add ( Error ( loc , message , kExpressionProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_expression ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : ExpressionProduction ; <nl> + this - > Add ( Error ( loc , message , ErrorKind : : kExpressionProduction , arg ) ) ; <nl> } <nl> <nl> void RecordFormalParameterInitializerError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_formal_parameter_initializer ( ) ) return ; <nl> - invalid_productions_ | = FormalParameterInitializerProduction ; <nl> - Add ( Error ( loc , message , kFormalParameterInitializerProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_formal_parameter_initializer ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : FormalParameterInitializerProduction ; <nl> + this - > Add ( Error ( loc , message , <nl> + ErrorKind : : kFormalParameterInitializerProduction , arg ) ) ; <nl> } <nl> <nl> void RecordBindingPatternError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_binding_pattern ( ) ) return ; <nl> - invalid_productions_ | = BindingPatternProduction ; <nl> - Add ( Error ( loc , message , kBindingPatternProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_binding_pattern ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : BindingPatternProduction ; <nl> + this - > Add ( Error ( loc , message , ErrorKind : : kBindingPatternProduction , arg ) ) ; <nl> } <nl> <nl> void RecordAssignmentPatternError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_assignment_pattern ( ) ) return ; <nl> - invalid_productions_ | = AssignmentPatternProduction ; <nl> - Add ( Error ( loc , message , kAssignmentPatternProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_assignment_pattern ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : AssignmentPatternProduction ; <nl> + this - > Add ( <nl> + Error ( loc , message , ErrorKind : : kAssignmentPatternProduction , arg ) ) ; <nl> } <nl> <nl> void RecordPatternError ( const Scanner : : Location & loc , <nl> class ExpressionClassifier { <nl> void RecordArrowFormalParametersError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_arrow_formal_parameters ( ) ) return ; <nl> - invalid_productions_ | = ArrowFormalParametersProduction ; <nl> - Add ( Error ( loc , message , kArrowFormalParametersProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_arrow_formal_parameters ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : ArrowFormalParametersProduction ; <nl> + this - > Add ( <nl> + Error ( loc , message , ErrorKind : : kArrowFormalParametersProduction , arg ) ) ; <nl> } <nl> <nl> void RecordAsyncArrowFormalParametersError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_async_arrow_formal_parameters ( ) ) return ; <nl> - invalid_productions_ | = AsyncArrowFormalParametersProduction ; <nl> - Add ( Error ( loc , message , kAsyncArrowFormalParametersProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_async_arrow_formal_parameters ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : AsyncArrowFormalParametersProduction ; <nl> + this - > Add ( Error ( loc , message , <nl> + ErrorKind : : kAsyncArrowFormalParametersProduction , arg ) ) ; <nl> } <nl> <nl> void RecordDuplicateFormalParameterError ( const Scanner : : Location & loc ) { <nl> - if ( ! is_valid_formal_parameter_list_without_duplicates ( ) ) return ; <nl> - invalid_productions_ | = DistinctFormalParametersProduction ; <nl> - Add ( Error ( loc , MessageTemplate : : kParamDupe , <nl> - kDistinctFormalParametersProduction ) ) ; <nl> + if ( ! this - > is_valid_formal_parameter_list_without_duplicates ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : DistinctFormalParametersProduction ; <nl> + this - > Add ( Error ( loc , MessageTemplate : : kParamDupe , <nl> + ErrorKind : : kDistinctFormalParametersProduction ) ) ; <nl> } <nl> <nl> / / Record a binding that would be invalid in strict mode . Confusingly this <nl> class ExpressionClassifier { <nl> void RecordStrictModeFormalParameterError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_strict_mode_formal_parameters ( ) ) return ; <nl> - invalid_productions_ | = StrictModeFormalParametersProduction ; <nl> - Add ( Error ( loc , message , kStrictModeFormalParametersProduction , arg ) ) ; <nl> + if ( ! this - > is_valid_strict_mode_formal_parameters ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : StrictModeFormalParametersProduction ; <nl> + this - > Add ( Error ( loc , message , <nl> + ErrorKind : : kStrictModeFormalParametersProduction , arg ) ) ; <nl> } <nl> <nl> void RecordLetPatternError ( const Scanner : : Location & loc , <nl> MessageTemplate : : Template message , <nl> const char * arg = nullptr ) { <nl> - if ( ! is_valid_let_pattern ( ) ) return ; <nl> - invalid_productions_ | = LetPatternProduction ; <nl> - Add ( Error ( loc , message , kLetPatternProduction , arg ) ) ; <nl> - } <nl> - <nl> - void Accumulate ( ExpressionClassifier * inner , unsigned productions ) { <nl> - DCHECK_EQ ( inner - > reported_errors_ , reported_errors_ ) ; <nl> - DCHECK_EQ ( inner - > reported_errors_begin_ , reported_errors_end_ ) ; <nl> - DCHECK_EQ ( inner - > reported_errors_end_ , reported_errors_ - > length ( ) ) ; <nl> - / / Propagate errors from inner , but don ' t overwrite already recorded <nl> - / / errors . <nl> - unsigned non_arrow_inner_invalid_productions = <nl> - inner - > invalid_productions_ & ~ ArrowFormalParametersProduction ; <nl> - if ( non_arrow_inner_invalid_productions ) { <nl> - unsigned errors = non_arrow_inner_invalid_productions & productions & <nl> - ~ invalid_productions_ ; <nl> - / / The result will continue to be a valid arrow formal parameters if the <nl> - / / inner expression is a valid binding pattern . <nl> - bool copy_BP_to_AFP = false ; <nl> - if ( productions & ArrowFormalParametersProduction & & <nl> - is_valid_arrow_formal_parameters ( ) ) { <nl> - / / Also whether we ' ve seen any non - simple parameters <nl> - / / if expecting an arrow function parameter . <nl> - is_non_simple_parameter_list_ | = inner - > is_non_simple_parameter_list_ ; <nl> - if ( ! inner - > is_valid_binding_pattern ( ) ) { <nl> - copy_BP_to_AFP = true ; <nl> - invalid_productions_ | = ArrowFormalParametersProduction ; <nl> - } <nl> - } <nl> - / / Traverse the list of errors reported by the inner classifier <nl> - / / to copy what ' s necessary . <nl> - if ( errors ! = 0 | | copy_BP_to_AFP ) { <nl> - invalid_productions_ | = errors ; <nl> - int binding_pattern_index = inner - > reported_errors_end_ ; <nl> - for ( int i = inner - > reported_errors_begin_ ; <nl> - i < inner - > reported_errors_end_ ; i + + ) { <nl> - int k = reported_errors_ - > at ( i ) . kind ; <nl> - if ( errors & ( 1 < < k ) ) Copy ( i ) ; <nl> - / / Check if it ' s a BP error that has to be copied to an AFP error . <nl> - if ( k = = kBindingPatternProduction & & copy_BP_to_AFP ) { <nl> - if ( reported_errors_end_ < = i ) { <nl> - / / If the BP error itself has not already been copied , <nl> - / / copy it now and change it to an AFP error . <nl> - Copy ( i ) ; <nl> - reported_errors_ - > at ( reported_errors_end_ - 1 ) . kind = <nl> - kArrowFormalParametersProduction ; <nl> - } else { <nl> - / / Otherwise , if the BP error was already copied , keep its <nl> - / / position and wait until the end of the traversal . <nl> - DCHECK_EQ ( reported_errors_end_ , i + 1 ) ; <nl> - binding_pattern_index = i ; <nl> - } <nl> - } <nl> - } <nl> - / / Do we still have to copy the BP error to an AFP error ? <nl> - if ( binding_pattern_index < inner - > reported_errors_end_ ) { <nl> - / / If there ' s still unused space in the list of the inner <nl> - / / classifier , copy it there , otherwise add it to the end <nl> - / / of the list . <nl> - if ( reported_errors_end_ < inner - > reported_errors_end_ ) <nl> - Copy ( binding_pattern_index ) ; <nl> - else <nl> - Add ( reported_errors_ - > at ( binding_pattern_index ) ) ; <nl> - reported_errors_ - > at ( reported_errors_end_ - 1 ) . kind = <nl> - kArrowFormalParametersProduction ; <nl> - } <nl> - } <nl> - } <nl> - reported_errors_ - > Rewind ( reported_errors_end_ ) ; <nl> - inner - > reported_errors_begin_ = inner - > reported_errors_end_ = <nl> - reported_errors_end_ ; <nl> - } <nl> - <nl> - V8_INLINE void Discard ( ) { <nl> - if ( reported_errors_end_ = = reported_errors_ - > length ( ) ) { <nl> - reported_errors_ - > Rewind ( reported_errors_begin_ ) ; <nl> - reported_errors_end_ = reported_errors_begin_ ; <nl> - } <nl> - DCHECK_EQ ( reported_errors_begin_ , reported_errors_end_ ) ; <nl> + if ( ! this - > is_valid_let_pattern ( ) ) return ; <nl> + this - > invalid_productions_ | = TP : : LetPatternProduction ; <nl> + this - > Add ( Error ( loc , message , ErrorKind : : kLetPatternProduction , arg ) ) ; <nl> } <nl> <nl> ExpressionClassifier * previous ( ) const { return previous_ ; } <nl> <nl> private : <nl> - V8_INLINE const Error & reported_error ( ErrorKind kind ) const { <nl> - if ( invalid_productions_ & ( 1 < < kind ) ) { <nl> - for ( int i = reported_errors_begin_ ; i < reported_errors_end_ ; i + + ) { <nl> - if ( reported_errors_ - > at ( i ) . kind = = kind ) <nl> - return reported_errors_ - > at ( i ) ; <nl> - } <nl> - UNREACHABLE ( ) ; <nl> - } <nl> - / / We should only be looking for an error when we know that one has <nl> - / / been reported . But we ' re not . . . So this is to make sure we have <nl> - / / the same behaviour . <nl> - UNREACHABLE ( ) ; <nl> - <nl> - / / Make MSVC happy by returning an error from this inaccessible path . <nl> - static Error none ; <nl> - return none ; <nl> - } <nl> - <nl> - / / Adds e to the end of the list of reported errors for this classifier . <nl> - / / It is expected that this classifier is the last one in the stack . <nl> - V8_INLINE void Add ( const Error & e ) { <nl> - DCHECK_EQ ( reported_errors_end_ , reported_errors_ - > length ( ) ) ; <nl> - reported_errors_ - > Add ( e , zone_ ) ; <nl> - reported_errors_end_ + + ; <nl> - } <nl> - <nl> - / / Copies the error at position i of the list of reported errors , so that <nl> - / / it becomes the last error reported for this classifier . Position i <nl> - / / could be either after the existing errors of this classifier ( i . e . , <nl> - / / in an inner classifier ) or it could be an existing error ( in case a <nl> - / / copy is needed ) . <nl> - V8_INLINE void Copy ( int i ) { <nl> - DCHECK_LT ( i , reported_errors_ - > length ( ) ) ; <nl> - if ( reported_errors_end_ ! = i ) <nl> - reported_errors_ - > at ( reported_errors_end_ ) = reported_errors_ - > at ( i ) ; <nl> - reported_errors_end_ + + ; <nl> - } <nl> - <nl> - typename Types : : Base * base_ ; <nl> ExpressionClassifier * previous_ ; <nl> - Zone * zone_ ; <nl> - ZoneList < Error > * reported_errors_ ; <nl> - DuplicateFinder * duplicate_finder_ ; <nl> - unsigned invalid_productions_ : 15 ; <nl> - unsigned is_non_simple_parameter_list_ : 1 ; <nl> - / / The uint16_t for reported_errors_begin_ and reported_errors_end_ will <nl> - / / not be enough in the case of a long series of expressions using nested <nl> - / / classifiers , e . g . , a long sequence of assignments , as in : <nl> - / / literals with spreads , as in : <nl> - / / var N = 65536 ; eval ( " var x ; " + " x = " . repeat ( N ) + " 42 " ) ; <nl> - / / This should not be a problem , as such things currently fail with a <nl> - / / stack overflow while parsing . <nl> - uint16_t reported_errors_begin_ ; <nl> - uint16_t reported_errors_end_ ; <nl> <nl> DISALLOW_COPY_AND_ASSIGN ( ExpressionClassifier ) ; <nl> } ; <nl> <nl> - <nl> # undef ERROR_CODES <nl> <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / parsing / parser - base . h <nl> ppp b / src / parsing / parser - base . h <nl> class ParserBase { <nl> <nl> void ReportClassifierError ( <nl> const typename ExpressionClassifier : : Error & error ) { <nl> - impl ( ) - > ReportMessageAt ( error . location , error . message , error . arg ) ; <nl> + if ( classifier ( ) - > does_error_reporting ( ) ) { <nl> + impl ( ) - > ReportMessageAt ( error . location , error . message , error . arg ) ; <nl> + } else { <nl> + impl ( ) - > ReportUnidentifiableError ( ) ; <nl> + } <nl> } <nl> <nl> void ValidateExpression ( bool * ok ) { <nl> ParserBase < Impl > : : ParseArrowFunctionLiteral ( <nl> int dummy_num_parameters = - 1 ; <nl> DCHECK_NE ( kind & FunctionKind : : kArrowFunction , 0 ) ; <nl> FunctionLiteral : : EagerCompileHint hint ; <nl> - bool parse_result = impl ( ) - > SkipFunction ( <nl> + bool did_preparse_successfully = impl ( ) - > SkipFunction ( <nl> nullptr , kind , FunctionLiteral : : kAnonymousExpression , <nl> formal_parameters . scope , & dummy_num_parameters , <nl> & produced_preparsed_scope_data , false , false , & hint , CHECK_OK ) ; <nl> - DCHECK ( parse_result ) ; <nl> - USE ( parse_result ) ; <nl> + <nl> DCHECK_NULL ( produced_preparsed_scope_data ) ; <nl> - / / Discard any queued destructuring assignments which appeared <nl> - / / in this function ' s parameter list , and which were adopted <nl> - / / into this function state , above . <nl> - function_state . RewindDestructuringAssignments ( 0 ) ; <nl> + <nl> + if ( did_preparse_successfully ) { <nl> + / / Discard any queued destructuring assignments which appeared <nl> + / / in this function ' s parameter list , and which were adopted <nl> + / / into this function state , above . <nl> + function_state . RewindDestructuringAssignments ( 0 ) ; <nl> + } else { <nl> + / / In case we did not sucessfully preparse the function because of an <nl> + / / unidentified error we do a full reparse to return the error . <nl> + Consume ( Token : : LBRACE ) ; <nl> + body = impl ( ) - > NewStatementList ( 8 ) ; <nl> + ParseFunctionBody ( body , impl ( ) - > NullIdentifier ( ) , kNoSourcePosition , <nl> + formal_parameters , kind , <nl> + FunctionLiteral : : kAnonymousExpression , ok ) ; <nl> + CHECK ( ! * ok ) ; <nl> + return impl ( ) - > NullExpression ( ) ; <nl> + } <nl> } else { <nl> Consume ( Token : : LBRACE ) ; <nl> body = impl ( ) - > NewStatementList ( 8 ) ; <nl> mmm a / src / parsing / parser . cc <nl> ppp b / src / parsing / parser . cc <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( <nl> / / abort lazy parsing if it suspects that wasn ' t a good idea . If so ( in <nl> / / which case the parser is expected to have backtracked ) , or if we didn ' t <nl> / / try to lazy parse in the first place , we ' ll have to parse eagerly . <nl> - bool did_preparse = <nl> + bool did_preparse_successfully = <nl> should_preparse & & <nl> SkipFunction ( function_name , kind , function_type , scope , & num_parameters , <nl> & produced_preparsed_scope_data , is_lazy_inner_function , <nl> is_lazy_top_level_function , & eager_compile_hint , CHECK_OK ) ; <nl> - if ( ! did_preparse ) { <nl> + if ( ! did_preparse_successfully ) { <nl> body = ParseFunction ( <nl> function_name , pos , kind , function_type , scope , & num_parameters , <nl> & function_length , & has_duplicate_parameters , & expected_property_count , <nl> FunctionLiteral * Parser : : ParseFunctionLiteral ( <nl> reinterpret_cast < const char * > ( function_name - > raw_data ( ) ) , <nl> function_name - > byte_length ( ) ) ; <nl> } <nl> - if ( V8_UNLIKELY ( FLAG_runtime_stats ) & & did_preparse ) { <nl> + if ( V8_UNLIKELY ( FLAG_runtime_stats ) & & did_preparse_successfully ) { <nl> const RuntimeCallCounterId counters [ 2 ] [ 2 ] = { <nl> { RuntimeCallCounterId : : kPreParseBackgroundNoVariableResolution , <nl> RuntimeCallCounterId : : kPreParseNoVariableResolution } , <nl> bool Parser : : SkipFunction ( <nl> / / Propagate stack overflow . <nl> set_stack_overflow ( ) ; <nl> * ok = false ; <nl> + } else if ( pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) ) { <nl> + / / If we encounter an error that the preparser can not identify we reset to <nl> + / / the state before preparsing . The caller may then fully parse the function <nl> + / / to identify the actual error . <nl> + bookmark . Apply ( ) ; <nl> + function_scope - > ResetAfterPreparsing ( ast_value_factory ( ) , true ) ; <nl> + pending_error_handler ( ) - > ResetUnidentifiableError ( ) ; <nl> + return false ; <nl> } else if ( pending_error_handler ( ) - > has_pending_error ( ) ) { <nl> * ok = false ; <nl> } else { <nl> mmm a / src / parsing / parser . h <nl> ppp b / src / parsing / parser . h <nl> struct ParserTypes < Parser > { <nl> <nl> typedef ParserTarget Target ; <nl> typedef ParserTargetScope TargetScope ; <nl> + <nl> + static constexpr bool ExpressionClassifierReportErrors = true ; <nl> } ; <nl> <nl> class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE ( ParserBase < Parser > ) { <nl> class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE ( ParserBase < Parser > ) { <nl> <nl> private : <nl> friend class ParserBase < Parser > ; <nl> - friend class v8 : : internal : : ExpressionClassifier < ParserTypes < Parser > > ; <nl> + friend class v8 : : internal : : ExpressionClassifierErrorTracker < <nl> + ParserTypes < Parser > > ; <nl> friend bool v8 : : internal : : parsing : : ParseProgram ( ParseInfo * , Isolate * ) ; <nl> friend bool v8 : : internal : : parsing : : ParseFunction ( <nl> ParseInfo * , Handle < SharedFunctionInfo > shared_info , Isolate * ) ; <nl> class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE ( ParserBase < Parser > ) { <nl> / / by parsing the function with PreParser . Consumes the ending } . <nl> / / If may_abort = = true , the ( pre - ) parser may decide to abort skipping <nl> / / in order to force the function to be eagerly parsed , after all . <nl> + / / In case the preparser detects an error it cannot identify , it resets the <nl> + / / scanner - and preparser state to the initial one , before PreParsing the <nl> + / / function . <nl> + / / SkipFunction returns true if it correctly parsed the function , including <nl> + / / cases where we detect an error . It returns false , if we needed to stop <nl> + / / parsing or could not identify an error correctly , meaning the caller needs <nl> + / / to fully reparse . In this case it resets the scanner and preparser state . <nl> bool SkipFunction ( const AstRawString * function_name , FunctionKind kind , <nl> FunctionLiteral : : FunctionType function_type , <nl> DeclarationScope * function_scope , int * num_parameters , <nl> class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE ( ParserBase < Parser > ) { <nl> arg , error_type ) ; <nl> } <nl> <nl> + / / Dummy implementation . The parser should never have a unidentifiable <nl> + / / error . <nl> + V8_INLINE void ReportUnidentifiableError ( ) { UNREACHABLE ( ) ; } <nl> + <nl> void ReportMessageAt ( Scanner : : Location source_location , <nl> MessageTemplate : : Template message , <nl> const AstRawString * arg , <nl> mmm a / src / parsing / preparser . cc <nl> ppp b / src / parsing / preparser . cc <nl> PreParser : : PreParseResult PreParser : : PreParseFunction ( <nl> function_scope - > set_is_being_lazily_parsed ( true ) ; <nl> # endif <nl> <nl> - DCHECK ( ! track_unresolved_variables_ ) ; <nl> track_unresolved_variables_ = <nl> ShouldTrackUnresolvedVariables ( is_inner_function ) ; <nl> <nl> PreParser : : PreParseResult PreParser : : PreParseFunction ( <nl> formals_classifier . reset ( new ExpressionClassifier ( this , & duplicate_finder ) ) ; <nl> / / We return kPreParseSuccess in failure cases too - errors are retrieved <nl> / / separately by Parser : : SkipLazyFunctionBody . <nl> - ParseFormalParameterList ( & formals , CHECK_OK_VALUE ( kPreParseSuccess ) ) ; <nl> + ParseFormalParameterList ( <nl> + & formals , <nl> + CHECK_OK_VALUE ( pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) <nl> + ? kPreParseNotIdentifiableError <nl> + : kPreParseSuccess ) ) ; <nl> Expect ( Token : : RPAREN , CHECK_OK_VALUE ( kPreParseSuccess ) ) ; <nl> int formals_end_position = scanner ( ) - > location ( ) . end_pos ; <nl> <nl> PreParser : : PreParseResult PreParser : : PreParseFunction ( <nl> track_unresolved_variables_ = false ; <nl> <nl> if ( result = = kLazyParsingAborted ) { <nl> + DCHECK ( ! pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) ) ; <nl> return kPreParseAbort ; <nl> } else if ( stack_overflow ( ) ) { <nl> + DCHECK ( ! pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) ) ; <nl> return kPreParseStackOverflow ; <nl> } else if ( ! * ok ) { <nl> + if ( pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) ) { <nl> + return kPreParseNotIdentifiableError ; <nl> + } <nl> DCHECK ( pending_error_handler ( ) - > has_pending_error ( ) ) ; <nl> } else { <nl> DCHECK_EQ ( Token : : RBRACE , scanner ( ) - > peek ( ) ) ; <nl> PreParser : : PreParseResult PreParser : : PreParseFunction ( <nl> const bool allow_duplicate_parameters = <nl> is_sloppy ( function_scope - > language_mode ( ) ) & & formals . is_simple & & <nl> ! IsConciseMethod ( kind ) ; <nl> - ValidateFormalParameters ( function_scope - > language_mode ( ) , <nl> - allow_duplicate_parameters , <nl> - CHECK_OK_VALUE ( kPreParseSuccess ) ) ; <nl> + ValidateFormalParameters ( <nl> + function_scope - > language_mode ( ) , allow_duplicate_parameters , <nl> + CHECK_OK_VALUE ( <nl> + pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) <nl> + ? kPreParseNotIdentifiableError <nl> + : kPreParseSuccess ) ) ; <nl> <nl> * produced_preparsed_scope_data = ProducedPreParsedScopeData : : For ( <nl> preparsed_scope_data_builder_ , main_zone ( ) ) ; <nl> } <nl> + DCHECK ( ! pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) ) ; <nl> <nl> if ( is_strict ( function_scope - > language_mode ( ) ) ) { <nl> int end_pos = scanner ( ) - > location ( ) . end_pos ; <nl> CheckStrictOctalLiteral ( function_scope - > start_position ( ) , end_pos , ok ) ; <nl> } <nl> } <nl> + <nl> + DCHECK ( ! pending_error_handler ( ) - > ErrorUnidentifiableByPreParser ( ) ) ; <nl> return kPreParseSuccess ; <nl> } <nl> <nl> mmm a / src / parsing / preparser . h <nl> ppp b / src / parsing / preparser . h <nl> struct ParserTypes < PreParser > { <nl> typedef PreParserFuncNameInferrer FuncNameInferrer ; <nl> typedef PreParserSourceRange SourceRange ; <nl> typedef PreParserSourceRangeScope SourceRangeScope ; <nl> + static constexpr bool ExpressionClassifierReportErrors = false ; <nl> } ; <nl> <nl> <nl> class PreParser : public ParserBase < PreParser > { <nl> enum PreParseResult { <nl> kPreParseStackOverflow , <nl> kPreParseAbort , <nl> + kPreParseNotIdentifiableError , <nl> kPreParseSuccess <nl> } ; <nl> <nl> class PreParser : public ParserBase < PreParser > { <nl> arg , error_type ) ; <nl> } <nl> <nl> + V8_INLINE void ReportUnidentifiableError ( ) { <nl> + pending_error_handler ( ) - > SetUnidentifiableError ( ) ; <nl> + } <nl> + <nl> V8_INLINE void ReportMessageAt ( Scanner : : Location source_location , <nl> MessageTemplate : : Template message , <nl> const PreParserIdentifier & arg , <nl> mmm a / src / pending - compilation - error - handler . h <nl> ppp b / src / pending - compilation - error - handler . h <nl> class PendingCompilationErrorHandler { <nl> <nl> Handle < String > FormatErrorMessageForTest ( Isolate * isolate ) const ; <nl> <nl> + bool SetUnidentifiableError ( ) { return unidentifiable_error_ = true ; } <nl> + <nl> + bool ResetUnidentifiableError ( ) { return unidentifiable_error_ = false ; } <nl> + <nl> + bool ErrorUnidentifiableByPreParser ( ) { return unidentifiable_error_ ; } <nl> + <nl> private : <nl> class MessageDetails { <nl> public : <nl> class PendingCompilationErrorHandler { <nl> <nl> bool has_pending_error_ ; <nl> bool stack_overflow_ ; <nl> + bool unidentifiable_error_ = false ; <nl> <nl> MessageDetails error_details_ ; <nl> ParseErrorType error_type_ ; <nl> mmm a / test / cctest / test - parsing . cc <nl> ppp b / test / cctest / test - parsing . cc <nl> void TestParserSyncWithFlags ( i : : Handle < i : : String > source , <nl> " However , the preparser succeeded " , <nl> source - > ToCString ( ) . get ( ) , message_string - > ToCString ( ) . get ( ) ) ; <nl> } <nl> - / / Check that preparser and parser produce the same error . <nl> - if ( test_preparser & & ! ignore_error_msg ) { <nl> + / / Check that preparser and parser produce the same error , except for cases <nl> + / / where we do not track errors in the preparser . <nl> + if ( test_preparser & & ! ignore_error_msg & & <nl> + ! pending_error_handler . ErrorUnidentifiableByPreParser ( ) ) { <nl> i : : Handle < i : : String > preparser_message = <nl> pending_error_handler . FormatErrorMessageForTest ( CcTest : : i_isolate ( ) ) ; <nl> if ( ! i : : String : : Equals ( isolate , message_string , preparser_message ) ) { <nl> TEST ( ErrorsFutureStrictReservedWords ) { <nl> { " ( ) = > { " , " } " } , <nl> { nullptr , nullptr } } ; <nl> const char * invalid_statements [ ] = { <nl> - FUTURE_STRICT_RESERVED_LEX_BINDINGS ( " let " ) nullptr } ; <nl> + FUTURE_STRICT_RESERVED_LEX_BINDINGS ( let ) nullptr } ; <nl> <nl> RunParserSyncTest ( non_strict_contexts , invalid_statements , kError ) ; <nl> } <nl>
|
[ preparser ] Remove ExpressionClassifier error tracking in the PreParser .
|
v8/v8
|
7b11480f3bd92317c3fe97ae51b11cb7e1d97ec2
|
2018-09-28T09:17:03Z
|
mmm a / tensorflow / compiler / xla / service / hlo_evaluator . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_evaluator . cc <nl> Status HloEvaluator : : HandleCall ( HloInstruction * call ) { <nl> } <nl> <nl> HloEvaluator embedded_evaluator ; <nl> - <nl> - Literal result ; <nl> - TF_ASSIGN_OR_RETURN ( result , embedded_evaluator . Evaluate < const Literal * > ( <nl> - * computation , arg_literals ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + Literal result , <nl> + embedded_evaluator . Evaluate < const Literal * > ( * computation , arg_literals ) ) ; <nl> <nl> evaluated_ [ call ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> Status HloEvaluator : : HandleFusion ( HloInstruction * fusion ) { <nl> } <nl> <nl> HloEvaluator embedded_evaluator ; <nl> - Literal result = <nl> - embedded_evaluator <nl> - . Evaluate < const Literal * > ( * readded_computation , arg_literals ) <nl> - . ConsumeValueOrDie ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( Literal result , <nl> + embedded_evaluator . Evaluate < const Literal * > ( <nl> + * readded_computation , arg_literals ) ) ; <nl> <nl> evaluated_ [ fusion ] = std : : move ( result ) ; <nl> return Status : : OK ( ) ; <nl> Status HloEvaluator : : HandleConditional ( HloInstruction * conditional ) { <nl> HloEvaluator embedded_evaluator ; <nl> Literal result ; <nl> if ( pred . Get < bool > ( { } ) ) { <nl> - result = embedded_evaluator <nl> - . Evaluate < const Literal * > ( * true_computation , <nl> - { & true_computation_arg } ) <nl> - . ConsumeValueOrDie ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( result , <nl> + embedded_evaluator . Evaluate < const Literal * > ( <nl> + * true_computation , { & true_computation_arg } ) ) ; <nl> } else { <nl> - result = embedded_evaluator <nl> - . Evaluate < const Literal * > ( * false_computation , <nl> - { & false_computation_arg } ) <nl> - . ConsumeValueOrDie ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( result , <nl> + embedded_evaluator . Evaluate < const Literal * > ( <nl> + * false_computation , { & false_computation_arg } ) ) ; <nl> } <nl> <nl> evaluated_ [ conditional ] = std : : move ( result ) ; <nl>
|
Always use TF_ASSIGN_OR_RETURN for getting literals in hlo_evaluator
|
tensorflow/tensorflow
|
53dc5669184fba9f76c2fe7254fbeb78a85af588
|
2018-09-14T08:35:28Z
|
mmm a / doc / syntax / display - logic . md <nl> ppp b / doc / syntax / display - logic . md <nl> There are two attributes for display logic control : ` if ` and ` repeat ` . We can cr <nl> <nl> # # ` repeat ` <nl> <nl> - ` repeat ` statement is just for array rendering . Every item in an array is also a structed data . This means in ` repeat ` ed component , you can bind their item properties directly . <nl> + ` repeat ` statement is just for array rendering . Every item in an array is also a structured data . This means in ` repeat ` ed component , you can bind their item properties directly . <nl> <nl> ` ` ` html <nl> < template > <nl>
|
Update display - logic . md ( )
|
apache/incubator-weex
|
a2633cd57afcbc46874cd26763f0e276d6979670
|
2016-09-07T08:49:33Z
|
mmm a / src / ppc / assembler - ppc - inl . h <nl> ppp b / src / ppc / assembler - ppc - inl . h <nl> int Assembler : : GetConstantPoolOffset ( Address pc , <nl> void Assembler : : PatchConstantPoolAccessInstruction ( <nl> int pc_offset , int offset , ConstantPoolEntry : : Access access , <nl> ConstantPoolEntry : : Type type ) { <nl> - Address pc = reinterpret_cast < Address > ( buffer_ ) + pc_offset ; <nl> + Address pc = reinterpret_cast < Address > ( buffer_start_ ) + pc_offset ; <nl> bool overflowed = ( access = = ConstantPoolEntry : : OVERFLOWED ) ; <nl> CHECK ( overflowed ! = is_int16 ( offset ) ) ; <nl> # ifdef DEBUG <nl> mmm a / src / ppc / assembler - ppc . cc <nl> ppp b / src / ppc / assembler - ppc . cc <nl> void Assembler : : AllocateAndInstallRequestedHeapObjects ( Isolate * isolate ) { <nl> break ; <nl> } <nl> } <nl> - Address pc = reinterpret_cast < Address > ( buffer_ ) + request . offset ( ) ; <nl> + Address pc = reinterpret_cast < Address > ( buffer_start_ ) + request . offset ( ) ; <nl> Address constant_pool = kNullAddress ; <nl> set_target_address_at ( pc , constant_pool , object . address ( ) , <nl> SKIP_ICACHE_FLUSH ) ; <nl> void Assembler : : AllocateAndInstallRequestedHeapObjects ( Isolate * isolate ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Specific instructions , constants , and masks . <nl> <nl> - Assembler : : Assembler ( const AssemblerOptions & options , void * buffer , <nl> - int buffer_size ) <nl> - : AssemblerBase ( options , buffer , buffer_size ) , <nl> + Assembler : : Assembler ( const AssemblerOptions & options , <nl> + std : : unique_ptr < AssemblerBuffer > buffer ) <nl> + : AssemblerBase ( options , std : : move ( buffer ) ) , <nl> constant_pool_builder_ ( kLoadPtrMaxReachBits , kLoadDoubleMaxReachBits ) { <nl> - reloc_info_writer . Reposition ( buffer_ + buffer_size_ , pc_ ) ; <nl> + reloc_info_writer . Reposition ( buffer_start_ + buffer_ - > size ( ) , pc_ ) ; <nl> <nl> no_trampoline_pool_before_ = 0 ; <nl> trampoline_pool_blocked_nesting_ = 0 ; <nl> void Assembler : : GetCode ( Isolate * isolate , CodeDesc * desc ) { <nl> AllocateAndInstallRequestedHeapObjects ( isolate ) ; <nl> <nl> / / Set up code descriptor . <nl> - desc - > buffer = buffer_ ; <nl> - desc - > buffer_size = buffer_size_ ; <nl> + desc - > buffer = buffer_start_ ; <nl> + desc - > buffer_size = buffer_ - > size ( ) ; <nl> desc - > instr_size = pc_offset ( ) ; <nl> - desc - > reloc_size = ( buffer_ + buffer_size_ ) - reloc_info_writer . pos ( ) ; <nl> + desc - > reloc_size = <nl> + ( buffer_start_ + desc - > buffer_size ) - reloc_info_writer . pos ( ) ; <nl> desc - > constant_pool_size = constant_pool_size ; <nl> desc - > origin = this ; <nl> desc - > unwinding_info_size = 0 ; <nl> void Assembler : : target_at_put ( int pos , int target_pos , bool * is_branch ) { <nl> Register dst = Register : : from_code ( instr_at ( pos + kInstrSize ) ) ; <nl> int32_t offset = target_pos + ( Code : : kHeaderSize - kHeapObjectTag ) ; <nl> PatchingAssembler patcher ( options ( ) , <nl> - reinterpret_cast < byte * > ( buffer_ + pos ) , 2 ) ; <nl> + reinterpret_cast < byte * > ( buffer_start_ + pos ) , <nl> + 2 ) ; <nl> patcher . bitwise_mov32 ( dst , offset ) ; <nl> break ; <nl> } <nl> void Assembler : : target_at_put ( int pos , int target_pos , bool * is_branch ) { <nl> : ( SIGN_EXT_IMM22 ( operands & kImm22Mask ) ) ; <nl> int32_t offset = target_pos + delta ; <nl> PatchingAssembler patcher ( <nl> - options ( ) , reinterpret_cast < byte * > ( buffer_ + pos ) , <nl> + options ( ) , reinterpret_cast < byte * > ( buffer_start_ + pos ) , <nl> 2 + static_cast < int32_t > ( opcode = = kUnboundAddLabelLongOffsetOpcode ) ) ; <nl> patcher . bitwise_add32 ( dst , base , offset ) ; <nl> if ( opcode = = kUnboundAddLabelLongOffsetOpcode ) patcher . nop ( ) ; <nl> void Assembler : : target_at_put ( int pos , int target_pos , bool * is_branch ) { <nl> / / Load the address of the label in a register . <nl> Register dst = Register : : from_code ( instr_at ( pos + kInstrSize ) ) ; <nl> PatchingAssembler patcher ( options ( ) , <nl> - reinterpret_cast < byte * > ( buffer_ + pos ) , <nl> + reinterpret_cast < byte * > ( buffer_start_ + pos ) , <nl> kMovInstructionsNoConstantPool ) ; <nl> / / Keep internal references relative until EmitRelocations . <nl> patcher . bitwise_mov ( dst , target_pos ) ; <nl> void Assembler : : target_at_put ( int pos , int target_pos , bool * is_branch ) { <nl> } <nl> case kUnboundJumpTableEntryOpcode : { <nl> PatchingAssembler patcher ( options ( ) , <nl> - reinterpret_cast < byte * > ( buffer_ + pos ) , <nl> + reinterpret_cast < byte * > ( buffer_start_ + pos ) , <nl> kPointerSize / kInstrSize ) ; <nl> / / Keep internal references relative until EmitRelocations . <nl> patcher . dp ( target_pos ) ; <nl> bool Assembler : : IsNop ( Instr instr , int type ) { <nl> <nl> <nl> void Assembler : : GrowBuffer ( int needed ) { <nl> - if ( ! own_buffer_ ) FATAL ( " external code buffer is too small " ) ; <nl> + DCHECK_EQ ( buffer_start_ , buffer_ - > start ( ) ) ; <nl> <nl> / / Compute new buffer size . <nl> - CodeDesc desc ; / / the new buffer <nl> - if ( buffer_size_ < 4 * KB ) { <nl> - desc . buffer_size = 4 * KB ; <nl> - } else if ( buffer_size_ < 1 * MB ) { <nl> - desc . buffer_size = 2 * buffer_size_ ; <nl> - } else { <nl> - desc . buffer_size = buffer_size_ + 1 * MB ; <nl> - } <nl> - int space = buffer_space ( ) + ( desc . buffer_size - buffer_size_ ) ; <nl> - if ( space < needed ) { <nl> - desc . buffer_size + = needed - space ; <nl> - } <nl> + int old_size = buffer_ - > size ( ) ; <nl> + int new_size = std : : min ( 2 * old_size , old_size + 1 * MB ) ; <nl> + int space = buffer_space ( ) + ( new_size - old_size ) ; <nl> + new_size + = ( space < needed ) ? needed - space : 0 ; <nl> <nl> / / Some internal data structures overflow for very large buffers , <nl> / / they must ensure that kMaximalBufferSize is not too large . <nl> - if ( desc . buffer_size > kMaximalBufferSize ) { <nl> + if ( new_size > kMaximalBufferSize ) { <nl> V8 : : FatalProcessOutOfMemory ( nullptr , " Assembler : : GrowBuffer " ) ; <nl> } <nl> <nl> / / Set up new buffer . <nl> - desc . buffer = NewArray < byte > ( desc . buffer_size ) ; <nl> - desc . origin = this ; <nl> - <nl> - desc . instr_size = pc_offset ( ) ; <nl> - desc . reloc_size = ( buffer_ + buffer_size_ ) - reloc_info_writer . pos ( ) ; <nl> + std : : unique_ptr < AssemblerBuffer > new_buffer = buffer_ - > Grow ( new_size ) ; <nl> + DCHECK_EQ ( new_size , new_buffer - > size ( ) ) ; <nl> + byte * new_start = new_buffer - > start ( ) ; <nl> <nl> / / Copy the data . <nl> - intptr_t pc_delta = desc . buffer - buffer_ ; <nl> - intptr_t rc_delta = <nl> - ( desc . buffer + desc . buffer_size ) - ( buffer_ + buffer_size_ ) ; <nl> - memmove ( desc . buffer , buffer_ , desc . instr_size ) ; <nl> - memmove ( reloc_info_writer . pos ( ) + rc_delta , reloc_info_writer . pos ( ) , <nl> - desc . reloc_size ) ; <nl> + intptr_t pc_delta = new_start - buffer_start_ ; <nl> + intptr_t rc_delta = ( new_start + new_size ) - ( buffer_start_ + old_size ) ; <nl> + size_t reloc_size = ( buffer_start_ + old_size ) - reloc_info_writer . pos ( ) ; <nl> + MemMove ( new_start , buffer_start_ , pc_offset ( ) ) ; <nl> + MemMove ( reloc_info_writer . pos ( ) + rc_delta , reloc_info_writer . pos ( ) , <nl> + reloc_size ) ; <nl> <nl> / / Switch buffers . <nl> - DeleteArray ( buffer_ ) ; <nl> - buffer_ = desc . buffer ; <nl> - buffer_size_ = desc . buffer_size ; <nl> + buffer_ = std : : move ( new_buffer ) ; <nl> + buffer_start_ = new_start ; <nl> pc_ + = pc_delta ; <nl> reloc_info_writer . Reposition ( reloc_info_writer . pos ( ) + rc_delta , <nl> reloc_info_writer . last_pc ( ) + pc_delta ) ; <nl> <nl> - / / Nothing else to do here since we keep all internal references and <nl> - / / deferred relocation entries relative to the buffer ( until <nl> - / / EmitRelocations ) . <nl> + / / None of our relocation types are pc relative pointing outside the code <nl> + / / buffer nor pc absolute pointing inside the code buffer , so there is no need <nl> + / / to relocate any emitted relocation entries . <nl> } <nl> <nl> <nl> void Assembler : : EmitRelocations ( ) { <nl> for ( std : : vector < DeferredRelocInfo > : : iterator it = relocations_ . begin ( ) ; <nl> it ! = relocations_ . end ( ) ; it + + ) { <nl> RelocInfo : : Mode rmode = it - > rmode ( ) ; <nl> - Address pc = reinterpret_cast < Address > ( buffer_ ) + it - > position ( ) ; <nl> + Address pc = reinterpret_cast < Address > ( buffer_start_ ) + it - > position ( ) ; <nl> RelocInfo rinfo ( pc , rmode , it - > data ( ) , Code ( ) ) ; <nl> <nl> / / Fix up internal references now that they are guaranteed to be bound . <nl> if ( RelocInfo : : IsInternalReference ( rmode ) ) { <nl> / / Jump table entry <nl> intptr_t pos = static_cast < intptr_t > ( Memory < Address > ( pc ) ) ; <nl> - Memory < Address > ( pc ) = reinterpret_cast < Address > ( buffer_ ) + pos ; <nl> + Memory < Address > ( pc ) = reinterpret_cast < Address > ( buffer_start_ ) + pos ; <nl> } else if ( RelocInfo : : IsInternalReferenceEncoded ( rmode ) ) { <nl> / / mov sequence <nl> intptr_t pos = static_cast < intptr_t > ( target_address_at ( pc , kNullAddress ) ) ; <nl> - set_target_address_at ( pc , 0 , reinterpret_cast < Address > ( buffer_ ) + pos , <nl> + set_target_address_at ( pc , 0 , <nl> + reinterpret_cast < Address > ( buffer_start_ ) + pos , <nl> SKIP_ICACHE_FLUSH ) ; <nl> } <nl> <nl> void Assembler : : CheckTrampolinePool ( ) { <nl> <nl> PatchingAssembler : : PatchingAssembler ( const AssemblerOptions & options , <nl> byte * address , int instructions ) <nl> - : Assembler ( options , address , instructions * kInstrSize + kGap ) { <nl> - DCHECK_EQ ( reloc_info_writer . pos ( ) , buffer_ + buffer_size_ ) ; <nl> + : Assembler ( options , ExternalAssemblerBuffer ( <nl> + address , instructions * kInstrSize + kGap ) ) { <nl> + DCHECK_EQ ( reloc_info_writer . pos ( ) , buffer_start_ + buffer_ - > size ( ) ) ; <nl> } <nl> <nl> PatchingAssembler : : ~ PatchingAssembler ( ) { <nl> / / Check that the code was patched as expected . <nl> - DCHECK_EQ ( pc_ , buffer_ + buffer_size_ - kGap ) ; <nl> - DCHECK_EQ ( reloc_info_writer . pos ( ) , buffer_ + buffer_size_ ) ; <nl> + DCHECK_EQ ( pc_ , buffer_start_ + buffer_ - > size ( ) - kGap ) ; <nl> + DCHECK_EQ ( reloc_info_writer . pos ( ) , buffer_start_ + buffer_ - > size ( ) ) ; <nl> } <nl> <nl> } / / namespace internal <nl> mmm a / src / ppc / assembler - ppc . h <nl> ppp b / src / ppc / assembler - ppc . h <nl> class Assembler : public AssemblerBase { <nl> / / for a detailed comment on the layout ( globals . h ) . <nl> / / <nl> / / If the provided buffer is nullptr , the assembler allocates and grows its <nl> + / / own buffer . Otherwise it takes ownership of the provided buffer . <nl> + explicit Assembler ( const AssemblerOptions & , <nl> + std : : unique_ptr < AssemblerBuffer > = { } ) ; <nl> + <nl> + / / Legacy constructor . <nl> + / / If the provided buffer is nullptr , the assembler allocates and grows its <nl> / / own buffer , and buffer_size determines the initial buffer size . The buffer <nl> / / is owned by the assembler and deallocated upon destruction of the <nl> / / assembler . <nl> class Assembler : public AssemblerBase { <nl> / / buffer for code generation and assumes its size to be buffer_size . If the <nl> / / buffer is too small , a fatal error occurs . No deallocation of the buffer is <nl> / / done upon destruction of the assembler . <nl> - Assembler ( const AssemblerOptions & options , void * buffer , int buffer_size ) ; <nl> + / / <nl> + / / TODO ( clemensh ) : Remove this constructor , refactor all call sites to use the <nl> + / / one above . <nl> + Assembler ( const AssemblerOptions & options , void * buffer , int buffer_size ) <nl> + : Assembler ( options , buffer ? ExternalAssemblerBuffer ( buffer , buffer_size ) <nl> + : NewAssemblerBuffer ( <nl> + buffer_size ? buffer_size <nl> + : kMinimalBufferSize ) ) { } <nl> virtual ~ Assembler ( ) { } <nl> <nl> / / GetCode emits any pending ( non - emitted ) code and fills the descriptor <nl> class Assembler : public AssemblerBase { <nl> void dp ( uintptr_t data ) ; <nl> <nl> / / Read / patch instructions <nl> - Instr instr_at ( int pos ) { return * reinterpret_cast < Instr * > ( buffer_ + pos ) ; } <nl> + Instr instr_at ( int pos ) { <nl> + return * reinterpret_cast < Instr * > ( buffer_start_ + pos ) ; <nl> + } <nl> void instr_at_put ( int pos , Instr instr ) { <nl> - * reinterpret_cast < Instr * > ( buffer_ + pos ) = instr ; <nl> + * reinterpret_cast < Instr * > ( buffer_start_ + pos ) = instr ; <nl> } <nl> static Instr instr_at ( Address pc ) { return * reinterpret_cast < Instr * > ( pc ) ; } <nl> static void instr_at_put ( Address pc , Instr instr ) { <nl> mmm a / src / s390 / assembler - s390 . cc <nl> ppp b / src / s390 / assembler - s390 . cc <nl> void Assembler : : AllocateAndInstallRequestedHeapObjects ( Isolate * isolate ) { <nl> DCHECK_IMPLIES ( isolate = = nullptr , heap_object_requests_ . empty ( ) ) ; <nl> for ( auto & request : heap_object_requests_ ) { <nl> Handle < HeapObject > object ; <nl> - Address pc = reinterpret_cast < Address > ( buffer_ + request . offset ( ) ) ; <nl> + Address pc = reinterpret_cast < Address > ( buffer_start_ ) + request . offset ( ) ; <nl> switch ( request . kind ( ) ) { <nl> case HeapObjectRequest : : kHeapNumber : { <nl> object = <nl> void Assembler : : AllocateAndInstallRequestedHeapObjects ( Isolate * isolate ) { <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / Specific instructions , constants , and masks . <nl> <nl> - Assembler : : Assembler ( const AssemblerOptions & options , void * buffer , <nl> - int buffer_size ) <nl> - : AssemblerBase ( options , buffer , buffer_size ) { <nl> - reloc_info_writer . Reposition ( buffer_ + buffer_size_ , pc_ ) ; <nl> + Assembler : : Assembler ( const AssemblerOptions & options , <nl> + std : : unique_ptr < AssemblerBuffer > buffer ) <nl> + : AssemblerBase ( options , std : : move ( buffer ) ) { <nl> + reloc_info_writer . Reposition ( buffer_start_ + buffer_ - > size ( ) , pc_ ) ; <nl> ReserveCodeTargetSpace ( 100 ) ; <nl> last_bound_pos_ = 0 ; <nl> relocations_ . reserve ( 128 ) ; <nl> void Assembler : : GetCode ( Isolate * isolate , CodeDesc * desc ) { <nl> AllocateAndInstallRequestedHeapObjects ( isolate ) ; <nl> <nl> / / Set up code descriptor . <nl> - desc - > buffer = buffer_ ; <nl> - desc - > buffer_size = buffer_size_ ; <nl> + desc - > buffer = buffer_start_ ; <nl> + desc - > buffer_size = buffer_ - > size ( ) ; <nl> desc - > instr_size = pc_offset ( ) ; <nl> - desc - > reloc_size = ( buffer_ + buffer_size_ ) - reloc_info_writer . pos ( ) ; <nl> + desc - > reloc_size = <nl> + ( buffer_start_ + desc - > buffer_size ) - reloc_info_writer . pos ( ) ; <nl> desc - > constant_pool_size = 0 ; <nl> desc - > origin = this ; <nl> desc - > unwinding_info_size = 0 ; <nl> const int kEndOfChain = - 4 ; <nl> int Assembler : : target_at ( int pos ) { <nl> SixByteInstr instr = instr_at ( pos ) ; <nl> / / check which type of branch this is 16 or 26 bit offset <nl> - Opcode opcode = Instruction : : S390OpcodeValue ( buffer_ + pos ) ; <nl> + Opcode opcode = Instruction : : S390OpcodeValue ( buffer_start_ + pos ) ; <nl> <nl> if ( BRC = = opcode | | BRCT = = opcode | | BRCTG = = opcode | | BRXH = = opcode ) { <nl> int16_t imm16 = SIGN_EXT_IMM16 ( ( instr & kImm16Mask ) ) ; <nl> int Assembler : : target_at ( int pos ) { <nl> / / Update the target address of the current relative instruction . <nl> void Assembler : : target_at_put ( int pos , int target_pos , bool * is_branch ) { <nl> SixByteInstr instr = instr_at ( pos ) ; <nl> - Opcode opcode = Instruction : : S390OpcodeValue ( buffer_ + pos ) ; <nl> + Opcode opcode = Instruction : : S390OpcodeValue ( buffer_start_ + pos ) ; <nl> <nl> if ( is_branch ! = nullptr ) { <nl> * is_branch = ( opcode = = BRC | | opcode = = BRCT | | opcode = = BRCTG | | <nl> void Assembler : : target_at_put ( int pos , int target_pos , bool * is_branch ) { <nl> <nl> / / Returns the maximum number of bits given instruction can address . <nl> int Assembler : : max_reach_from ( int pos ) { <nl> - Opcode opcode = Instruction : : S390OpcodeValue ( buffer_ + pos ) ; <nl> + Opcode opcode = Instruction : : S390OpcodeValue ( buffer_start_ + pos ) ; <nl> / / Check which type of instr . In theory , we can return <nl> / / the values below + 1 , given offset is # of halfwords <nl> if ( BRC = = opcode | | BRCT = = opcode | | BRCTG = = opcode | | BRXH = = opcode | | <nl> void Assembler : : dumy ( int r1 , int x2 , int b2 , int d2 ) { <nl> } <nl> <nl> void Assembler : : GrowBuffer ( int needed ) { <nl> - if ( ! own_buffer_ ) FATAL ( " external code buffer is too small " ) ; <nl> + DCHECK_EQ ( buffer_start_ , buffer_ - > start ( ) ) ; <nl> <nl> / / Compute new buffer size . <nl> - CodeDesc desc ; / / the new buffer <nl> - if ( buffer_size_ < 4 * KB ) { <nl> - desc . buffer_size = 4 * KB ; <nl> - } else if ( buffer_size_ < 1 * MB ) { <nl> - desc . buffer_size = 2 * buffer_size_ ; <nl> - } else { <nl> - desc . buffer_size = buffer_size_ + 1 * MB ; <nl> - } <nl> - int space = buffer_space ( ) + ( desc . buffer_size - buffer_size_ ) ; <nl> - if ( space < needed ) { <nl> - desc . buffer_size + = needed - space ; <nl> - } <nl> + int old_size = buffer_ - > size ( ) ; <nl> + int new_size = std : : min ( 2 * old_size , old_size + 1 * MB ) ; <nl> + int space = buffer_space ( ) + ( new_size - old_size ) ; <nl> + new_size + = ( space < needed ) ? needed - space : 0 ; <nl> <nl> / / Some internal data structures overflow for very large buffers , <nl> / / they must ensure that kMaximalBufferSize is not too large . <nl> - if ( desc . buffer_size > kMaximalBufferSize ) { <nl> + if ( new_size > kMaximalBufferSize ) { <nl> V8 : : FatalProcessOutOfMemory ( nullptr , " Assembler : : GrowBuffer " ) ; <nl> } <nl> <nl> / / Set up new buffer . <nl> - desc . buffer = NewArray < byte > ( desc . buffer_size ) ; <nl> - desc . origin = this ; <nl> - <nl> - desc . instr_size = pc_offset ( ) ; <nl> - desc . reloc_size = ( buffer_ + buffer_size_ ) - reloc_info_writer . pos ( ) ; <nl> + std : : unique_ptr < AssemblerBuffer > new_buffer = buffer_ - > Grow ( new_size ) ; <nl> + DCHECK_EQ ( new_size , new_buffer - > size ( ) ) ; <nl> + byte * new_start = new_buffer - > start ( ) ; <nl> <nl> / / Copy the data . <nl> - intptr_t pc_delta = desc . buffer - buffer_ ; <nl> - intptr_t rc_delta = <nl> - ( desc . buffer + desc . buffer_size ) - ( buffer_ + buffer_size_ ) ; <nl> - memmove ( desc . buffer , buffer_ , desc . instr_size ) ; <nl> - memmove ( reloc_info_writer . pos ( ) + rc_delta , reloc_info_writer . pos ( ) , <nl> - desc . reloc_size ) ; <nl> + intptr_t pc_delta = new_start - buffer_start_ ; <nl> + intptr_t rc_delta = ( new_start + new_size ) - ( buffer_start_ + old_size ) ; <nl> + size_t reloc_size = ( buffer_start_ + old_size ) - reloc_info_writer . pos ( ) ; <nl> + MemMove ( new_start , buffer_start_ , pc_offset ( ) ) ; <nl> + MemMove ( reloc_info_writer . pos ( ) + rc_delta , reloc_info_writer . pos ( ) , <nl> + reloc_size ) ; <nl> <nl> / / Switch buffers . <nl> - DeleteArray ( buffer_ ) ; <nl> - buffer_ = desc . buffer ; <nl> - buffer_size_ = desc . buffer_size ; <nl> + buffer_ = std : : move ( new_buffer ) ; <nl> + buffer_start_ = new_start ; <nl> pc_ + = pc_delta ; <nl> reloc_info_writer . Reposition ( reloc_info_writer . pos ( ) + rc_delta , <nl> reloc_info_writer . last_pc ( ) + pc_delta ) ; <nl> void Assembler : : EmitRelocations ( ) { <nl> for ( std : : vector < DeferredRelocInfo > : : iterator it = relocations_ . begin ( ) ; <nl> it ! = relocations_ . end ( ) ; it + + ) { <nl> RelocInfo : : Mode rmode = it - > rmode ( ) ; <nl> - Address pc = reinterpret_cast < Address > ( buffer_ ) + it - > position ( ) ; <nl> + Address pc = reinterpret_cast < Address > ( buffer_start_ ) + it - > position ( ) ; <nl> RelocInfo rinfo ( pc , rmode , it - > data ( ) , Code ( ) ) ; <nl> <nl> / / Fix up internal references now that they are guaranteed to be bound . <nl> if ( RelocInfo : : IsInternalReference ( rmode ) ) { <nl> / / Jump table entry <nl> Address pos = Memory < Address > ( pc ) ; <nl> - Memory < Address > ( pc ) = reinterpret_cast < Address > ( buffer_ ) + pos ; <nl> + Memory < Address > ( pc ) = reinterpret_cast < Address > ( buffer_start_ ) + pos ; <nl> } else if ( RelocInfo : : IsInternalReferenceEncoded ( rmode ) ) { <nl> / / mov sequence <nl> Address pos = target_address_at ( pc , 0 ) ; <nl> - set_target_address_at ( pc , 0 , reinterpret_cast < Address > ( buffer_ ) + pos , <nl> + set_target_address_at ( pc , 0 , <nl> + reinterpret_cast < Address > ( buffer_start_ ) + pos , <nl> SKIP_ICACHE_FLUSH ) ; <nl> } <nl> <nl> mmm a / src / s390 / assembler - s390 . h <nl> ppp b / src / s390 / assembler - s390 . h <nl> class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { <nl> / / for a detailed comment on the layout ( globals . h ) . <nl> / / <nl> / / If the provided buffer is nullptr , the assembler allocates and grows its <nl> + / / own buffer . Otherwise it takes ownership of the provided buffer . <nl> + explicit Assembler ( const AssemblerOptions & , <nl> + std : : unique_ptr < AssemblerBuffer > = { } ) ; <nl> + <nl> + / / Legacy constructor . <nl> + / / If the provided buffer is nullptr , the assembler allocates and grows its <nl> / / own buffer , and buffer_size determines the initial buffer size . The buffer <nl> / / is owned by the assembler and deallocated upon destruction of the <nl> / / assembler . <nl> class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { <nl> / / buffer for code generation and assumes its size to be buffer_size . If the <nl> / / buffer is too small , a fatal error occurs . No deallocation of the buffer is <nl> / / done upon destruction of the assembler . <nl> - Assembler ( const AssemblerOptions & options , void * buffer , int buffer_size ) ; <nl> + / / <nl> + / / TODO ( clemensh ) : Remove this constructor , refactor all call sites to use the <nl> + / / one above . <nl> + Assembler ( const AssemblerOptions & options , void * buffer , int buffer_size ) <nl> + : Assembler ( options , buffer ? ExternalAssemblerBuffer ( buffer , buffer_size ) <nl> + : NewAssemblerBuffer ( <nl> + buffer_size ? buffer_size <nl> + : kMinimalBufferSize ) ) { } <nl> virtual ~ Assembler ( ) { } <nl> <nl> / / GetCode emits any pending ( non - emitted ) code and fills the descriptor <nl> inline void ss_a_format ( Opcode op , int f1 , int f2 , int f3 , int f4 , int f5 ) { <nl> <nl> / / Read / patch instructions <nl> SixByteInstr instr_at ( int pos ) { <nl> - return Instruction : : InstructionBits ( buffer_ + pos ) ; <nl> + return Instruction : : InstructionBits ( buffer_start_ + pos ) ; <nl> } <nl> template < typename T > <nl> void instr_at_put ( int pos , T instr ) { <nl> - Instruction : : SetInstructionBits < T > ( buffer_ + pos , instr ) ; <nl> + Instruction : : SetInstructionBits < T > ( buffer_start_ + pos , instr ) ; <nl> } <nl> <nl> / / Decodes instruction at pos , and returns its length <nl> int32_t instr_length_at ( int pos ) { <nl> - return Instruction : : InstructionLength ( buffer_ + pos ) ; <nl> + return Instruction : : InstructionLength ( buffer_start_ + pos ) ; <nl> } <nl> <nl> static SixByteInstr instr_at ( byte * pc ) { <nl> inline void ss_a_format ( Opcode op , int f1 , int f2 , int f3 , int f4 , int f5 ) { <nl> void emit_label_addr ( Label * label ) ; <nl> <nl> public : <nl> - byte * buffer_pos ( ) const { return buffer_ ; } <nl> + byte * buffer_pos ( ) const { return buffer_start_ ; } <nl> <nl> protected : <nl> int buffer_space ( ) const { return reloc_info_writer . pos ( ) - pc_ ; } <nl>
|
PPC / s390 : [ assembler ] Allow to pass custom buffer implementations
|
v8/v8
|
943b5d029dd749ee0d02be8e6a81ee3378d0d06b
|
2019-01-16T21:27:51Z
|
new file mode 100644 <nl> index 00000000000 . . b41cf1b9f1b <nl> Binary files / dev / null and b / samples / gpu / bowlingL . png differ <nl> new file mode 100644 <nl> index 00000000000 . . 7e0ff049967 <nl> Binary files / dev / null and b / samples / gpu / bowlingR . png differ <nl>
|
added images for gpu samples
|
opencv/opencv
|
937cbcecb6f4836bfe3246be6495c8acf4c76e06
|
2011-01-28T15:00:08Z
|
similarity index 100 % <nl> rename from hphp / test / zend / good / ext / sockets / tests / socket_read_params . php <nl> rename to hphp / test / zend / bad / ext / sockets / tests / socket_read_params . php <nl> similarity index 100 % <nl> rename from hphp / test / zend / good / ext / sockets / tests / socket_read_params . php . expectf <nl> rename to hphp / test / zend / bad / ext / sockets / tests / socket_read_params . php . expectf <nl> similarity index 100 % <nl> rename from hphp / test / zend / good / ext / sockets / tests / socket_read_params . php . skipif <nl> rename to hphp / test / zend / bad / ext / sockets / tests / socket_read_params . php . skipif <nl> mmm a / hphp / tools / import_zend_test . py <nl> ppp b / hphp / tools / import_zend_test . py <nl> <nl> ' / ext / standard / tests / file / copy_variation8 . php ' , <nl> <nl> # flaky : t3851970 <nl> + ' / ext / sockets / tests / socket_read_params . php ' , <nl> ' / ext / standard / tests / network / bug20134 . php ' , <nl> ' / ext / pcre / tests / preg_match_all_edit_basic . php ' , <nl> ' / ext / standard / tests / general_functions / bug39322 . php ' , <nl>
|
Disable flaky zend test
|
facebook/hhvm
|
78fd4f6c51fa5258b6f96c07337f02877c836504
|
2014-03-17T20:31:40Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.