diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / tensorflow / python / framework / python_op_gen . cc <nl> ppp b / tensorflow / python / framework / python_op_gen . cc <nl> void GenEagerPythonOp : : HandleGraphMode ( <nl> / / For list outputs , convert the right subrange of _result into a list . <nl> Unflatten ( " " , output_sizes , " _result " , & result_ ) ; <nl> / / Convert to a named tuple . <nl> - strings : : StrAppend ( & result_ , " _result = _ " , op_def_ . name ( ) , <nl> - " Output . _make ( _result ) \ n " ) ; <nl> + strings : : StrAppend ( <nl> + & result_ , " _result = _ " , <nl> + python_op_gen_internal : : AvoidPythonReserved ( op_def_ . name ( ) ) , <nl> + " Output . _make ( _result ) \ n " ) ; <nl> } <nl> strings : : StrAppend ( & result_ , " return _result \ n \ n " ) ; <nl> } else { <nl> void GenEagerPythonOp : : AddEagerFunctionTeardown ( <nl> / / For list outputs , convert the right subrange of _result into a list . <nl> Unflatten ( indentation , output_sizes , " _result " , & result_ ) ; <nl> / / Convert to a named tuple . <nl> - strings : : StrAppend ( & result_ , indentation , " _result = _ " , op_def_ . name ( ) , <nl> - " Output . _make ( _result ) \ n " ) ; <nl> + strings : : StrAppend ( <nl> + & result_ , indentation , " _result = _ " , <nl> + python_op_gen_internal : : AvoidPythonReserved ( op_def_ . name ( ) ) , <nl> + " Output . _make ( _result ) \ n " ) ; <nl> } <nl> } else { <nl> strings : : StrAppend ( & result_ , indentation , " _result = None \ n " ) ; <nl> void GenEagerPythonOp : : AddEagerFastPathExecute ( ) { <nl> " \ n " ) ; <nl> <nl> if ( op_def_ . output_arg_size ( ) > 1 ) { <nl> - const string output_tuple_name = <nl> - strings : : StrCat ( " _ " , op_def_ . name ( ) , " Output " ) ; <nl> + const string output_tuple_name = strings : : StrCat ( <nl> + " _ " , python_op_gen_internal : : AvoidPythonReserved ( op_def_ . name ( ) ) , <nl> + " Output " ) ; <nl> strings : : StrAppend ( & result_ , " " , " _result = " , output_tuple_name , <nl> " . _make ( _result ) \ n " ) ; <nl> } <nl> mmm a / tensorflow / python / framework / python_op_gen_internal . cc <nl> ppp b / tensorflow / python / framework / python_op_gen_internal . cc <nl> limitations under the License . <nl> # include < unordered_map > <nl> <nl> # include " absl / strings / escaping . h " <nl> + # include " absl / strings / str_replace . h " <nl> # include " tensorflow / core / framework / api_def . pb . h " <nl> # include " tensorflow / core / framework / attr_value . pb . h " <nl> # include " tensorflow / core / framework / op . h " <nl> bool IsOpWithUnderscorePrefix ( const string & s ) { <nl> } <nl> <nl> string AvoidPythonReserved ( const string & s ) { <nl> - const char namespace_separator = ' > ' ; <nl> - const char joiner = ' _ ' ; <nl> - const int last_index = s . size ( ) ; <nl> - string result ; <nl> - for ( int i = 0 ; i < last_index ; + + i ) { <nl> - const char c = s [ i ] ; <nl> - / / Convert namespace separators ( ' > ' characters ) to joiners <nl> - if ( c = = namespace_separator ) { <nl> - result . push_back ( joiner ) ; <nl> - } else { <nl> - result . push_back ( c ) ; <nl> - } <nl> - } <nl> + / / Convert namespace separators ( ' > ' characters ) to joiners <nl> + string result = absl : : StrReplaceAll ( s , { { " > " , " _ " } } ) ; <nl> <nl> if ( IsPythonReserved ( result ) ) return strings : : StrCat ( result , " _ " ) ; <nl> return result ; <nl> void GenPythonOp : : AddOutputGlobals ( ) { <nl> out_names . push_back ( strings : : StrCat ( " \ " " , out_name , " \ " " ) ) ; <nl> } <nl> <nl> - strings : : StrAppend ( & prelude_ , " _ " , op_def_ . name ( ) , <nl> + strings : : StrAppend ( & prelude_ , " _ " , AvoidPythonReserved ( op_def_ . name ( ) ) , <nl> " Output = collections . namedtuple ( \ n " ) ; <nl> - strings : : StrAppend ( & prelude_ , " \ " " , op_def_ . name ( ) , " \ " , \ n " ) ; <nl> + strings : : StrAppend ( & prelude_ , " \ " " , AvoidPythonReserved ( op_def_ . name ( ) ) , <nl> + " \ " , \ n " ) ; <nl> strings : : StrAppend ( & prelude_ , " [ " , absl : : StrJoin ( out_names , " , " ) , <nl> " ] ) " ) ; <nl> strings : : StrAppend ( & prelude_ , " \ n \ n " ) ; <nl> void GenPythonOp : : AddBody ( const string & prefix ) { <nl> prefix , " _result = _op_def_lib . apply_op ( \ " " , op_def_ . name ( ) , " \ " , " ) ; <nl> AddBodyNoReturn ( apply_prefix ) ; <nl> if ( num_outs_ > 1 ) { <nl> - strings : : StrAppend ( & result_ , prefix , " _result = _ " , op_def_ . name ( ) , <nl> + strings : : StrAppend ( & result_ , prefix , " _result = _ " , <nl> + AvoidPythonReserved ( op_def_ . name ( ) ) , <nl> " Output . _make ( _result ) \ n " ) ; <nl> } <nl> strings : : StrAppend ( & result_ , prefix , " return _result \ n " ) ; <nl> mmm a / tensorflow / python / framework / python_op_gen_internal . h <nl> ppp b / tensorflow / python / framework / python_op_gen_internal . h <nl> bool IsPythonReserved ( const string & s ) ; <nl> bool IsOpWithUnderscorePrefix ( const string & s ) ; <nl> <nl> / / Add a _ to the end of s if necessary to avoid a Python keyword or built - in . <nl> + / / Also convert namespace characters ( ' > ' ) to ' _ ' because python does not <nl> + / / support ' > ' in names <nl> string AvoidPythonReserved ( const string & s ) ; <nl> <nl> / / Convert an AttrValue with type ` type ` to the Python representation for <nl> mmm a / tensorflow / python / framework / test_ops . cc <nl> ppp b / tensorflow / python / framework / test_ops . cc <nl> REGISTER_OP ( " TestStringOutput " ) <nl> . Output ( " output2 : string " ) <nl> . SetShapeFn ( shape_inference : : UnknownShape ) ; <nl> <nl> + REGISTER_OP ( " Namespace > TestStringOutput " ) <nl> + . Input ( " input : float " ) <nl> + . Output ( " output1 : float " ) <nl> + . Output ( " output2 : string " ) <nl> + . SetShapeFn ( shape_inference : : UnknownShape ) ; <nl> + <nl> REGISTER_OP ( " TestAttr " ) <nl> . Output ( " out : T " ) <nl> . Attr ( " T : { float , double } " ) <nl> mmm a / tensorflow / python / ops / gradients_test . py <nl> ppp b / tensorflow / python / ops / gradients_test . py <nl> def _TestOpGrad ( _ , float_grad , string_grad ) : <nl> z = x * 2 . 0 <nl> w = z * 3 . 0 <nl> grads = gradients . gradients ( z , [ c ] ) <nl> - self . assertTrue ( isinstance ( grads [ 0 ] , ops . Tensor ) ) <nl> + self . assertIsInstance ( grads [ 0 ] , ops . Tensor ) <nl> grads = gradients . gradients ( w , [ c ] ) <nl> - self . assertTrue ( isinstance ( grads [ 0 ] , ops . Tensor ) ) <nl> + self . assertIsInstance ( grads [ 0 ] , ops . Tensor ) <nl> + <nl> + def testNoGradientForStringOutputsWithOpNamespace ( self ) : <nl> + with ops . Graph ( ) . as_default ( ) : <nl> + <nl> + def _TestOpGrad ( _ , float_grad , string_grad ) : <nl> + " " " Gradient function for TestStringOutput . " " " <nl> + self . assertEqual ( float_grad . dtype , dtypes . float32 ) <nl> + self . assertFalse ( string_grad ) <nl> + return float_grad <nl> + <nl> + ops . RegisterGradient ( " Namespace > TestStringOutput " ) ( _TestOpGrad ) <nl> + <nl> + c = constant ( 1 . 0 ) <nl> + x , _ = test_ops . namespace_test_string_output ( c ) <nl> + z = x * 2 . 0 <nl> + w = z * 3 . 0 <nl> + grads = gradients . gradients ( z , [ c ] ) <nl> + self . assertIsInstance ( grads [ 0 ] , ops . Tensor ) <nl> + grads = gradients . gradients ( w , [ c ] ) <nl> + self . assertIsInstance ( grads [ 0 ] , ops . Tensor ) <nl> <nl> def testSingletonIndexedSlices ( self ) : <nl> with ops . Graph ( ) . as_default ( ) : <nl>
|
Yet another gen_file fix to support ' > ' namespace separators in op names . ( In this case when an op has multiple named outputs )
|
tensorflow/tensorflow
|
e63e8446790b224c055aa74ff42b6ec4c7dbe323
|
2019-09-17T01:15:30Z
|
mmm a / scripting / lua / cocos2dx_support / LuaCocos2d . cpp . REMOVED . git - id <nl> ppp b / scripting / lua / cocos2dx_support / LuaCocos2d . cpp . REMOVED . git - id <nl> @ @ - 1 + 1 @ @ <nl> - 6a5779f5c60c7562d9d17fe70329db7e210fcf56 <nl> \ No newline at end of file <nl> + 913fef8b506bd074ec4c381fee8239424484f0ef <nl> \ No newline at end of file <nl> mmm a / tools / tolua + + / CCDrawingPrimitives . pkg <nl> ppp b / tools / tolua + + / CCDrawingPrimitives . pkg <nl> void ccDrawColor4F ( GLubyte r , GLubyte g , GLubyte b , GLubyte a ) ; <nl> void ccPointSize ( GLfloat pointSize ) ; <nl> <nl> / / glew . h API : <nl> - void glLineWidth ( GLfloat width ) ; <nl> + / / void glLineWidth ( GLfloat width ) ; <nl>
|
[ Lua ] remove glLineWidth ( )
|
cocos2d/cocos2d-x
|
c86595dddeb719c57ca2127eb3a79bad77ed9d4f
|
2012-09-01T17:18:28Z
|
mmm a / hphp / compiler / analysis / emitter . cpp <nl> ppp b / hphp / compiler / analysis / emitter . cpp <nl> <nl> * / <nl> <nl> # include " hphp / compiler / analysis / emitter . h " <nl> - <nl> - # include " folly / ScopeGuard . h " <nl> - <nl> - # include < iostream > <nl> - # include < iomanip > <nl> - # include < vector > <nl> - # include < algorithm > <nl> - <nl> - # include " hphp / util / logger . h " <nl> - # include " hphp / util / util . h " <nl> - # include " hphp / util / job_queue . h " <nl> - # include " hphp / util / parser / hphp . tab . hpp " <nl> - # include " hphp / runtime / vm / bytecode . h " <nl> - # include " hphp / runtime / vm / repo . h " <nl> - # include " hphp / runtime / vm / as . h " <nl> - # include " hphp / runtime / base / stats . h " <nl> - # include " hphp / runtime / base / runtime_option . h " <nl> - # include " hphp / runtime / base / zend / zend_string . h " <nl> - # include " hphp / runtime / base / type_conversions . h " <nl> - # include " hphp / runtime / base / builtin_functions . h " <nl> - # include " hphp / runtime / base / variable_serializer . h " <nl> - # include " hphp / runtime / base / program_functions . h " <nl> - # include " hphp / runtime / eval / runtime / file_repository . h " <nl> - # include " hphp / runtime / ext_hhvm / ext_hhvm . h " <nl> - <nl> # include " hphp / compiler / builtin_symbols . h " <nl> # include " hphp / compiler / analysis / class_scope . h " <nl> # include " hphp / compiler / analysis / file_scope . h " <nl> # include " hphp / compiler / analysis / function_scope . h " <nl> # include " hphp / compiler / analysis / peephole . h " <nl> - <nl> # include " hphp / compiler / expression / array_element_expression . h " <nl> # include " hphp / compiler / expression / array_pair_expression . h " <nl> # include " hphp / compiler / expression / assignment_expression . h " <nl> <nl> # include " hphp / compiler / expression / static_member_expression . h " <nl> # include " hphp / compiler / expression / unary_op_expression . h " <nl> # include " hphp / compiler / expression / yield_expression . h " <nl> - <nl> # include " hphp / compiler / statement / break_statement . h " <nl> # include " hphp / compiler / statement / case_statement . h " <nl> # include " hphp / compiler / statement / catch_statement . h " <nl> <nl> # include " hphp / compiler / statement / trait_prec_statement . h " <nl> # include " hphp / compiler / statement / trait_alias_statement . h " <nl> # include " hphp / compiler / statement / typedef_statement . h " <nl> - <nl> # include " hphp / compiler / parser / parser . h " <nl> <nl> + # include " hphp / util / logger . h " <nl> + # include " hphp / util / util . h " <nl> + # include " hphp / util / job_queue . h " <nl> + # include " hphp / util / parser / hphp . tab . hpp " <nl> + # include " hphp / runtime / vm / bytecode . h " <nl> + # include " hphp / runtime / vm / repo . h " <nl> + # include " hphp / runtime / vm / as . h " <nl> + # include " hphp / runtime / base / stats . h " <nl> + # include " hphp / runtime / base / runtime_option . h " <nl> + # include " hphp / runtime / base / zend / zend_string . h " <nl> + # include " hphp / runtime / base / type_conversions . h " <nl> + # include " hphp / runtime / base / builtin_functions . h " <nl> + # include " hphp / runtime / base / variable_serializer . h " <nl> + # include " hphp / runtime / base / program_functions . h " <nl> + # include " hphp / runtime / eval / runtime / file_repository . h " <nl> + # include " hphp / runtime / ext_hhvm / ext_hhvm . h " <nl> + <nl> # include " hphp / system / lib / systemlib . h " <nl> <nl> + # include " folly / ScopeGuard . h " <nl> + <nl> + # include < iostream > <nl> + # include < iomanip > <nl> + # include < vector > <nl> + # include < algorithm > <nl> + <nl> namespace HPHP { <nl> namespace Compiler { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static Unit * emitHHBCNativeFuncUnit ( const HhbcExtFuncInfo * builtinFuncs , <nl> mfe - > finish ( ue - > bcPos ( ) , false ) ; <nl> ue - > recordFunction ( mfe ) ; <nl> <nl> + TypedValue mainReturn ; <nl> + mainReturn . m_data . num = 1 ; <nl> + mainReturn . m_type = KindOfInt64 ; <nl> + ue - > setMainReturn ( & mainReturn ) ; <nl> + ue - > setMergeOnly ( true ) ; <nl> + <nl> / * <nl> Special function used by FPushCuf * when its argument <nl> is not callable . <nl> * / <nl> StringData * name = StringData : : GetStaticString ( " 86null " ) ; <nl> FuncEmitter * fe = ue - > newFuncEmitter ( name , / * top * / true ) ; <nl> - / * <nl> - Dont mark it AttrPersistent , because it would be <nl> - deleted , and we need to be able to find it in the <nl> - unit ' s m_mergeInfo <nl> - * / <nl> - fe - > init ( 0 , 0 , ue - > bcPos ( ) , AttrUnique , <nl> + fe - > init ( 0 , 0 , ue - > bcPos ( ) , AttrUnique | AttrPersistent , <nl> true , empty_string . get ( ) ) ; <nl> ue - > emitOp ( OpNull ) ; <nl> ue - > emitOp ( OpRetC ) ; <nl> static Unit * emitHHBCNativeClassUnit ( const HhbcExtClassInfo * builtinClasses , <nl> <nl> TypedValue mainReturn ; <nl> mainReturn . m_data . num = 1 ; <nl> - mainReturn . m_type = KindOfBoolean ; <nl> + mainReturn . m_type = KindOfInt64 ; <nl> ue - > setMainReturn ( & mainReturn ) ; <nl> ue - > setMergeOnly ( true ) ; <nl> <nl> mmm a / hphp / hhvm / process_init . cpp <nl> ppp b / hphp / hhvm / process_init . cpp <nl> void ProcessInit ( ) { <nl> Unit * nativeFuncUnit = build_native_func_unit ( hhbc_ext_funcs , <nl> hhbc_ext_funcs_count ) ; <nl> SystemLib : : s_nativeFuncUnit = nativeFuncUnit ; <nl> - <nl> String currentDir = g_vmContext - > getCwd ( ) ; <nl> <nl> if ( RuntimeOption : : RepoAuthoritative ) { <nl> void ProcessInit ( ) { <nl> <nl> / / load builtins <nl> SystemLib : : s_nativeFuncUnit - > merge ( ) ; <nl> + SystemLib : : s_nullFunc = <nl> + Unit : : lookupFunc ( StringData : : GetStaticString ( " 86null " ) ) ; <nl> <nl> / / We call a special bytecode emitter function to build the native <nl> / / unit which will contain all of our cppext functions and classes . <nl> mmm a / hphp / runtime / vm / bytecode . cpp <nl> ppp b / hphp / runtime / vm / bytecode . cpp <nl> inline void OPTBLD_INLINE VMExecutionContext : : doFPushCuf ( PC & pc , <nl> if ( safe ) m_stack . topTV ( ) [ 1 ] = m_stack . topTV ( ) [ 0 ] ; <nl> m_stack . ndiscard ( 1 ) ; <nl> if ( f = = nullptr ) { <nl> - f = SystemLib : : GetNullFunction ( ) ; <nl> + f = SystemLib : : s_nullFunc ; <nl> if ( safe ) { <nl> m_stack . pushFalse ( ) ; <nl> } <nl> void VMExecutionContext : : requestInit ( ) { <nl> tx64 = nextTx64 ; <nl> tx64 - > requestInit ( ) ; <nl> <nl> - / / Merge the systemlib unit into the ExecutionContext <nl> - SystemLib : : s_unit - > merge ( ) ; <nl> - SystemLib : : s_nativeFuncUnit - > merge ( ) ; <nl> - SystemLib : : s_nativeClassUnit - > merge ( ) ; <nl> + if ( UNLIKELY ( RuntimeOption : : EvalJitEnableRenameFunction ) ) { <nl> + SystemLib : : s_unit - > merge ( ) ; <nl> + SystemLib : : s_nativeFuncUnit - > merge ( ) ; <nl> + SystemLib : : s_nativeClassUnit - > merge ( ) ; <nl> + } else { <nl> + / / System units are always merge only , and <nl> + / / everything is persistent . <nl> + assert ( SystemLib : : s_unit - > isEmpty ( ) ) ; <nl> + assert ( SystemLib : : s_nativeFuncUnit - > isEmpty ( ) ) ; <nl> + assert ( SystemLib : : s_nativeClassUnit - > isEmpty ( ) ) ; <nl> + } <nl> + <nl> profileRequestStart ( ) ; <nl> <nl> # ifdef DEBUG <nl> mmm a / hphp / runtime / vm / translator / targetcache . cpp <nl> ppp b / hphp / runtime / vm / translator / targetcache . cpp <nl> CacheHandle allocKnownClass ( const Class * cls ) { <nl> if ( ne - > m_cachedClassOffset ) return ne - > m_cachedClassOffset ; <nl> <nl> return allocKnownClass ( ne , <nl> - RuntimeOption : : RepoAuthoritative & & <nl> + ( ! SystemLib : : s_inited | | <nl> + RuntimeOption : : RepoAuthoritative ) & & <nl> cls - > verifyPersistent ( ) ) ; <nl> } <nl> <nl> mmm a / hphp / runtime / vm / translator / translator - x64 . cpp <nl> ppp b / hphp / runtime / vm / translator / translator - x64 . cpp <nl> int64_t checkClass ( TargetCache : : CacheHandle ch , StringData * clsName , <nl> VMRegAnchor _ ; <nl> AutoloadHandler : : s_instance - > invokeHandler ( clsName - > data ( ) ) ; <nl> if ( * ( Class * * ) TargetCache : : handleToPtr ( ch ) ) return true ; <nl> - ar - > m_func = SystemLib : : GetNullFunction ( ) ; <nl> + ar - > m_func = SystemLib : : s_nullFunc ; <nl> if ( ar - > hasThis ( ) ) { <nl> / / cannot hit zero , we just inc ' ed it <nl> ar - > getThis ( ) - > decRefCount ( ) ; <nl> static const Func * autoloadMissingFunc ( const StringData * funcName , <nl> throw_invalid_argument ( " function : method ' % s ' not found " , <nl> funcName - > data ( ) ) ; <nl> } <nl> - return SystemLib : : GetNullFunction ( ) ; <nl> + return SystemLib : : s_nullFunc ; <nl> } <nl> <nl> void <nl> TranslatorX64 : : translateFPushCufOp ( const Tracelet & t , <nl> emitVStackStore ( astubs , ni , rax , funcOff ) ; <nl> if ( safe ) { <nl> astubs . xorq ( r ( flag ) , r ( flag ) ) ; <nl> - astubs . cmpq ( SystemLib : : GetNullFunction ( ) , rax ) ; <nl> + astubs . cmpq ( SystemLib : : s_nullFunc , rax ) ; <nl> astubs . setne ( rbyte ( flag ) ) ; <nl> } <nl> } <nl> mmm a / hphp / runtime / vm / unit . cpp <nl> ppp b / hphp / runtime / vm / unit . cpp <nl> void Unit : : loadFunc ( const Func * func ) { <nl> assert ( ! func - > isMethod ( ) ) ; <nl> const NamedEntity * ne = func - > getNamedEntity ( ) ; <nl> if ( UNLIKELY ( ! ne - > m_cachedFuncOffset ) ) { <nl> - Transl : : TargetCache : : allocFixedFunction ( ne , <nl> - func - > attrs ( ) & AttrPersistent & & <nl> - RuntimeOption : : RepoAuthoritative ) ; <nl> + Transl : : TargetCache : : allocFixedFunction ( <nl> + ne , func - > attrs ( ) & AttrPersistent & & <nl> + ( RuntimeOption : : RepoAuthoritative | | ! SystemLib : : s_inited ) ) ; <nl> } <nl> const_cast < Func * > ( func ) - > m_cachedOffset = ne - > m_cachedFuncOffset ; <nl> } <nl> void Unit : : initialMerge ( ) { <nl> unitInitLock . assertOwnedBySelf ( ) ; <nl> if ( LIKELY ( m_mergeState = = UnitMergeStateUnmerged ) ) { <nl> int state = 0 ; <nl> + bool needsCompact = false ; <nl> m_mergeState = UnitMergeStateMerging ; <nl> <nl> bool allFuncsUnique = RuntimeOption : : RepoAuthoritative ; <nl> void Unit : : initialMerge ( ) { <nl> allFuncsUnique = ( f - > attrs ( ) & AttrUnique ) ; <nl> } <nl> loadFunc ( f ) ; <nl> + if ( TargetCache : : isPersistentHandle ( f - > m_cachedOffset ) ) { <nl> + needsCompact = true ; <nl> + } <nl> } <nl> if ( allFuncsUnique ) state | = UnitMergeStateUniqueFuncs ; <nl> - if ( ! RuntimeOption : : RepoAuthoritative ) { <nl> + if ( ! RuntimeOption : : RepoAuthoritative & & SystemLib : : s_inited ) { <nl> Transl : : mergePreConsts ( m_preConsts ) ; <nl> } else { <nl> / * <nl> void Unit : : initialMerge ( ) { <nl> * the pointer will be followed by a TypedValue representing <nl> * the value being defined / assigned . <nl> * / <nl> - bool needsCompact = false ; <nl> int ix = m_mergeInfo - > m_firstHoistablePreClass ; <nl> int end = m_mergeInfo - > m_firstMergeablePreClass ; <nl> while ( ix < end ) { <nl> mmm a / hphp / runtime / vm / unit . h <nl> ppp b / hphp / runtime / vm / unit . h <nl> struct Unit { <nl> } <nl> bool isMergeOnly ( ) const { return m_mergeOnly ; } <nl> void clearMergeOnly ( ) { m_mergeOnly = false ; } <nl> + bool isEmpty ( ) const { return m_mergeState & UnitMergeStateEmpty ; } <nl> void * replaceUnit ( ) const ; <nl> public : <nl> static Mutex s_classesMutex ; <nl> mmm a / hphp / system / lib / systemlib . cpp <nl> ppp b / hphp / system / lib / systemlib . cpp <nl> bool SystemLib : : s_inited = false ; <nl> HPHP : : Unit * SystemLib : : s_unit = nullptr ; <nl> HPHP : : Unit * SystemLib : : s_nativeFuncUnit = nullptr ; <nl> HPHP : : Unit * SystemLib : : s_nativeClassUnit = nullptr ; <nl> + HPHP : : Func * SystemLib : : s_nullFunc = nullptr ; <nl> <nl> # define DEFINE_SYSTEMLIB_CLASS ( cls ) \ <nl> HPHP : : Class * SystemLib : : s_ # # cls # # Class = nullptr ; <nl> ObjectData * SystemLib : : AllocKeyedIterableViewObject ( CVarRef iterable ) { <nl> <nl> # undef CREATE_AND_CONSTRUCT <nl> <nl> - Func * <nl> - SystemLib : : GetNullFunction ( ) { <nl> - Func * f = s_nativeFuncUnit - > firstHoistable ( ) ; <nl> - assert ( ! strcmp ( f - > name ( ) - > data ( ) , " 86null " ) ) ; <nl> - return f ; <nl> - } <nl> - <nl> ALLOC_OBJECT_STUB ( Directory ) ; <nl> ALLOC_OBJECT_STUB ( RecursiveDirectoryIterator ) ; <nl> ALLOC_OBJECT_STUB ( PDOException ) ; <nl> mmm a / hphp / system / lib / systemlib . h <nl> ppp b / hphp / system / lib / systemlib . h <nl> namespace HPHP { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> class ObjectData ; <nl> - class Unit ; <nl> - class Class ; <nl> - class Func ; <nl> + class Unit ; <nl> + class Class ; <nl> + class Func ; <nl> namespace Eval { <nl> class PhpFile ; <nl> } <nl> class SystemLib { <nl> SYSTEMLIB_CLASSES ( DECLARE_SYSTEMLIB_CLASS ) <nl> # undef DECLARE_SYSTEMLIB_CLASS <nl> <nl> - static HPHP : : Func * GetNullFunction ( ) ; <nl> - <nl> + static HPHP : : Func * s_nullFunc ; <nl> <nl> static ObjectData * AllocStdClassObject ( ) ; <nl> static ObjectData * AllocPinitSentinel ( ) ; <nl>
|
Don ' t reinitialize native funcs
|
facebook/hhvm
|
5c491af2de6fa58ec8accda2359f1a7af43e651e
|
2013-05-20T20:52:29Z
|
mmm a / stdlib / public / runtime / Errors . cpp <nl> ppp b / stdlib / public / runtime / Errors . cpp <nl> <nl> # include < unwind . h > <nl> # endif <nl> <nl> + # include < inttypes . h > <nl> + <nl> namespace FatalErrorFlags { <nl> enum : uint32_t { <nl> ReportBacktrace = 1 < < 0 <nl>
|
Merge pull request from plotfi / master - dec6 - 1
|
apple/swift
|
c93d5095fc23300847178674231be3abadea6e82
|
2019-12-07T01:56:50Z
|
mmm a / BUILD . gn <nl> ppp b / BUILD . gn <nl> if ( is_mac ) { <nl> # TODO : move non - windows specific deps into the non - windows - specific list <nl> " / / third_party / breakpad : breakpad_handler " , <nl> " / / third_party / breakpad : breakpad_sender " , <nl> - " / / third_party / webrtc / system_wrappers : metrics_default " , <nl> " / / ui / native_theme : native_theme_browser " , <nl> " / / ui / shell_dialogs " , <nl> " / / ui / views / controls / webview " , <nl>
|
build : [ gn win ] fix webrtc link error
|
electron/electron
|
1aa16d0ac0e2a7b94fa8e85250b05a87b47b9007
|
2018-07-20T21:30:14Z
|
mmm a / README . md <nl> ppp b / README . md <nl> Contributing to FoundationDB can be in contributions to the code base , sharing y <nl> <nl> # # # Binary downloads <nl> <nl> - Developers interested in using the FoundationDB store for an application can get started easily by downloading and installing a binary package . Please see the [ downloads page ] ( https : / / www . foundationdb . org / download / ) for a list of available packages . <nl> + Developers interested in using FoundationDB can get started by downloading and installing a binary package . Please see the [ downloads page ] ( https : / / www . foundationdb . org / download / ) for a list of available packages . <nl> <nl> <nl> # # # Compiling from source <nl> Developers interested in using the FoundationDB store for an application can get <nl> Developers on an OS for which there is no binary package , or who would like <nl> to start hacking on the code , can get started by compiling from source . <nl> <nl> - Currently there are two build systems : a collection of Makefiles and a <nl> - CMake - based build system . Both of them should currently work for most users , <nl> - and CMake should be the preferred choice as it will eventually become the only <nl> - build system available . <nl> + The official docker image for building is ` foundationdb / foundationdb - build ` . It has all dependencies installed . To build outside the official docker image you ' ll need at least these dependencies : <nl> + <nl> + 1 . Install cmake Version 3 . 13 or higher [ CMake ] ( https : / / cmake . org / ) <nl> + 1 . Install [ Mono ] ( http : / / www . mono - project . com / download / stable / ) <nl> + 1 . Install [ Ninja ] ( https : / / ninja - build . org / ) ( optional , but recommended ) <nl> <nl> If compiling for local development , please set ` - DUSE_WERROR = ON ` in <nl> cmake . Our CI compiles with ` - Werror ` on , so this way you ' ll find out about <nl> compiler warnings that break the build earlier . <nl> <nl> - # # CMake <nl> - <nl> - To build with CMake , generally the following is required ( works on Linux and <nl> - Mac OS - for Windows see below ) : <nl> + Once you have your dependencies , you can run cmake and then build : <nl> <nl> 1 . Check out this repository . <nl> - 1 . Install cmake Version 3 . 13 or higher [ CMake ] ( https : / / cmake . org / ) <nl> - 1 . Download version 1 . 67 of [ Boost ] ( https : / / sourceforge . net / projects / boost / files / boost / 1 . 67 . 0 / ) . <nl> - 1 . Unpack boost ( you don ' t need to compile it ) <nl> - 1 . Install [ Mono ] ( http : / / www . mono - project . com / download / stable / ) . <nl> - 1 . Install a [ JDK ] ( http : / / www . oracle . com / technetwork / java / javase / downloads / index . html ) . FoundationDB currently builds with Java 8 . <nl> 1 . Create a build directory ( you can have the build directory anywhere you <nl> - like ) : ` mkdir build ` <nl> - 1 . ` cd build ` <nl> - 1 . ` cmake - GNinja - DBOOST_ROOT = < PATH_TO_BOOST > < PATH_TO_FOUNDATIONDB_DIRECTORY > ` <nl> - 1 . ` ninja ` <nl> - <nl> - CMake will try to find its dependencies . However , for LibreSSL this can be often <nl> - problematic ( especially if OpenSSL is installed as well ) . For that we recommend <nl> - passing the argument ` - DLibreSSL_ROOT ` to cmake . So , for example , if you <nl> - LibreSSL is installed under ` / usr / local / libressl - 2 . 8 . 3 ` , you should call cmake like <nl> - this : <nl> - <nl> - ` ` ` <nl> - cmake - GNinja - DLibreSSL_ROOT = / usr / local / libressl - 2 . 8 . 3 / . . / foundationdb <nl> - ` ` ` <nl> - <nl> - FoundationDB will build just fine without LibreSSL , however , the resulting <nl> - binaries won ' t support TLS connections . <nl> + like ) . There is currently a directory in the source tree called build , but you should not use it . See [ # 3098 ] ( https : / / github . com / apple / foundationdb / issues / 3098 ) <nl> + 1 . ` cd < PATH_TO_BUILD_DIRECTORY > ` <nl> + 1 . ` cmake - G Ninja < PATH_TO_FOUNDATIONDB_DIRECTORY > ` <nl> + 1 . ` ninja # If this crashes it probably ran out of memory . Try ninja - j1 ` <nl> <nl> # # # Language Bindings <nl> <nl> create a XCode - project with the following command : <nl> cmake - G Xcode - DOPEN_FOR_IDE = ON < FDB_SOURCE_DIRECTORY > <nl> ` ` ` <nl> <nl> - You should create a second build - directory which you will use for building <nl> - ( probably with make or ninja ) and debugging . <nl> + You should create a second build - directory which you will use for building and debugging . <nl> <nl> # # # # FreeBSD <nl> <nl> There are no special requirements for Linux . A docker image can be pulled from <nl> ` foundationdb / foundationdb - build ` that has all of FoundationDB ' s dependencies <nl> pre - installed , and is what the CI uses to build and test PRs . <nl> <nl> - If you want to create a package you have to tell cmake what platform it is for . <nl> - And then you can build by simply calling ` cpack ` . So for debian , call : <nl> - <nl> ` ` ` <nl> - cmake - GNinja < FDB_SOURCE_DIR > <nl> + cmake - G Ninja < FDB_SOURCE_DIR > <nl> ninja <nl> cpack - G DEB <nl> ` ` ` <nl> For RPM simply replace ` DEB ` with ` RPM ` . <nl> <nl> # # # MacOS <nl> <nl> - The build under MacOS will work the same way as on Linux . To get LibreSSL , <nl> - boost , and ninja you can use [ Homebrew ] ( https : / / brew . sh / ) . LibreSSL will not be <nl> - installed in ` / usr / local ` instead it will stay in ` / usr / local / Cellar ` . So the <nl> - cmake command will look something like this : <nl> + The build under MacOS will work the same way as on Linux . To get boost and ninja you can use [ Homebrew ] ( https : / / brew . sh / ) . <nl> <nl> ` ` ` sh <nl> - cmake - GNinja - DLibreSSL_ROOT = / usr / local / Cellar / libressl / 2 . 8 . 3 < PATH_TO_FOUNDATIONDB_SOURCE > <nl> + cmake - G Ninja < PATH_TO_FOUNDATIONDB_SOURCE > <nl> ` ` ` <nl> <nl> - To generate a installable package , you have to call CMake with the corresponding <nl> - arguments and then use cpack to generate the package : <nl> + To generate a installable package , you can use cpack : <nl> <nl> ` ` ` sh <nl> - cmake - GNinja < FDB_SOURCE_DIR > <nl> ninja <nl> cpack - G productbuild <nl> ` ` ` <nl> that Visual Studio is used to compile . <nl> <nl> 1 . Install Visual Studio 2017 ( Community Edition is tested ) <nl> 1 . Install cmake Version 3 . 12 or higher [ CMake ] ( https : / / cmake . org / ) <nl> - 1 . Download version 1 . 67 of [ Boost ] ( https : / / sourceforge . net / projects / boost / files / boost / 1 . 67 . 0 / ) . <nl> + 1 . Download version 1 . 72 of [ Boost ] ( https : / / dl . bintray . com / boostorg / release / 1 . 72 . 0 / source / boost_1_72_0 . tar . bz2 ) <nl> 1 . Unpack boost ( you don ' t need to compile it ) <nl> - 1 . Install [ Mono ] ( http : / / www . mono - project . com / download / stable / ) . <nl> - 1 . Install a [ JDK ] ( http : / / www . oracle . com / technetwork / java / javase / downloads / index . html ) . FoundationDB currently builds with Java 8 . <nl> + 1 . Install [ Mono ] ( http : / / www . mono - project . com / download / stable / ) <nl> + 1 . ( Optional ) Install a [ JDK ] ( http : / / www . oracle . com / technetwork / java / javase / downloads / index . html ) . FoundationDB currently builds with Java 8 <nl> 1 . Set ` JAVA_HOME ` to the unpacked location and JAVA_COMPILE to <nl> ` $ JAVA_HOME / bin / javac ` . <nl> - 1 . Install [ Python ] ( https : / / www . python . org / downloads / ) if it is not already installed by Visual Studio . <nl> + 1 . Install [ Python ] ( https : / / www . python . org / downloads / ) if it is not already installed by Visual Studio <nl> 1 . ( Optional ) Install [ WIX ] ( http : / / wixtoolset . org / ) . Without it Visual Studio <nl> - won ' t build the Windows installer . <nl> + won ' t build the Windows installer <nl> 1 . Create a build directory ( you can have the build directory anywhere you <nl> like ) : ` mkdir build ` <nl> 1 . ` cd build ` <nl> that Visual Studio is used to compile . <nl> Studio will only know about the generated files . ` msbuild ` is located at <nl> ` c : \ Program Files ( x86 ) \ MSBuild \ 14 . 0 \ Bin \ MSBuild . exe ` for Visual Studio 15 . <nl> <nl> - If you want TLS support to be enabled under Windows you currently have to build <nl> - and install LibreSSL yourself as the newer LibreSSL versions are not provided <nl> - for download from the LibreSSL homepage . To build LibreSSL : <nl> - <nl> - 1 . Download and unpack libressl ( > = 2 . 8 . 2 ) <nl> - 2 . ` cd libressl - 2 . 8 . 2 ` <nl> - 3 . ` mkdir build ` <nl> - 4 . ` cd build ` <nl> - 5 . ` cmake - G " Visual Studio 15 2017 Win64 " . . ` <nl> - 6 . Open the generated ` LibreSSL . sln ` in Visual Studio as administrator ( this is <nl> - necessary for the install ) <nl> - 7 . Build the ` INSTALL ` project in ` Release ` mode <nl> - <nl> - This will install LibreSSL under ` C : \ Program Files \ LibreSSL ` . After that ` cmake ` <nl> - will automatically find it and build with TLS support . <nl> - <nl> If you installed WIX before running ` cmake ` you should find the <nl> ` FDBInstaller . msi ` in your build directory under ` packaging / msi ` . <nl> <nl> + TODO : Re - add instructions for TLS support [ # 3022 ] ( https : / / github . com / apple / foundationdb / issues / 3022 ) <nl> mmm a / bindings / java / src / main / com / apple / foundationdb / tuple / FastByteComparisons . java <nl> ppp b / bindings / java / src / main / com / apple / foundationdb / tuple / FastByteComparisons . java <nl> <nl> / * <nl> - * ByteArrayUtil . java <nl> + * FastByteComparisons . java <nl> * <nl> * This source file is part of the FoundationDB open source project <nl> * <nl> mmm a / cmake / ConfigureCompiler . cmake <nl> ppp b / cmake / ConfigureCompiler . cmake <nl> include ( CheckFunctionExists ) <nl> set ( CMAKE_REQUIRED_INCLUDES stdlib . h malloc . h ) <nl> set ( CMAKE_REQUIRED_LIBRARIES c ) <nl> set ( CMAKE_CXX_STANDARD 17 ) <nl> + set ( CMAKE_CXX_STANDARD_REQUIRED ON ) <nl> set ( CMAKE_C_STANDARD 11 ) <nl> + set ( CMAKE_C_STANDARD_REQUIRED ON ) <nl> + <nl> + if ( NOT WIN32 ) <nl> + include ( CheckIncludeFile ) <nl> + CHECK_INCLUDE_FILE ( " stdatomic . h " HAS_C11_ATOMICS ) <nl> + if ( NOT HAS_C11_ATOMICS ) <nl> + message ( FATAL_ERROR " C compiler does not support c11 atomics " ) <nl> + endif ( ) <nl> + endif ( ) <nl> <nl> if ( WIN32 ) <nl> # see : https : / / docs . microsoft . com / en - us / windows / desktop / WinProg / using - the - windows - headers <nl> mmm a / contrib / transaction_profiling_analyzer / transaction_profiling_analyzer . py <nl> ppp b / contrib / transaction_profiling_analyzer / transaction_profiling_analyzer . py <nl> def __init__ ( self , code , param_one , param_two ) : <nl> <nl> <nl> class BaseInfo ( object ) : <nl> - def __init__ ( self , start_timestamp ) : <nl> - self . start_timestamp = start_timestamp <nl> - <nl> + def __init__ ( self , bb , protocol_version ) : <nl> + self . start_timestamp = bb . get_double ( ) <nl> + if protocol_version > = PROTOCOL_VERSION_6_3 : <nl> + self . dc_id = bb . get_bytes_with_length ( ) <nl> <nl> class GetVersionInfo ( BaseInfo ) : <nl> def __init__ ( self , bb , protocol_version ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . latency = bb . get_double ( ) <nl> if protocol_version > = PROTOCOL_VERSION_6_2 : <nl> self . transaction_priority_type = bb . get_int ( ) <nl> if protocol_version > = PROTOCOL_VERSION_6_3 : <nl> - self . read_version = bb . get_long ( ) <nl> + self . read_version = bb . get_long ( ) <nl> <nl> class GetInfo ( BaseInfo ) : <nl> - def __init__ ( self , bb ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + def __init__ ( self , bb , protocol_version ) : <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . latency = bb . get_double ( ) <nl> self . value_size = bb . get_int ( ) <nl> self . key = bb . get_bytes_with_length ( ) <nl> <nl> <nl> class GetRangeInfo ( BaseInfo ) : <nl> - def __init__ ( self , bb ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + def __init__ ( self , bb , protocol_version ) : <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . latency = bb . get_double ( ) <nl> self . range_size = bb . get_int ( ) <nl> self . key_range = bb . get_key_range ( ) <nl> <nl> <nl> class CommitInfo ( BaseInfo ) : <nl> - def __init__ ( self , bb , full_output = True ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + def __init__ ( self , bb , protocol_version , full_output = True ) : <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . latency = bb . get_double ( ) <nl> self . num_mutations = bb . get_int ( ) <nl> self . commit_bytes = bb . get_int ( ) <nl> - <nl> + <nl> if protocol_version > = PROTOCOL_VERSION_6_3 : <nl> - self . commit_version = bb . get_long ( ) <nl> + self . commit_version = bb . get_long ( ) <nl> read_conflict_range = bb . get_key_range_list ( ) <nl> if full_output : <nl> self . read_conflict_range = read_conflict_range <nl> def __init__ ( self , bb , full_output = True ) : <nl> <nl> <nl> class ErrorGetInfo ( BaseInfo ) : <nl> - def __init__ ( self , bb ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + def __init__ ( self , bb , protocol_version ) : <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . error_code = bb . get_int ( ) <nl> self . key = bb . get_bytes_with_length ( ) <nl> <nl> <nl> class ErrorGetRangeInfo ( BaseInfo ) : <nl> - def __init__ ( self , bb ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + def __init__ ( self , bb , protocol_version ) : <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . error_code = bb . get_int ( ) <nl> self . key_range = bb . get_key_range ( ) <nl> <nl> <nl> class ErrorCommitInfo ( BaseInfo ) : <nl> - def __init__ ( self , bb , full_output = True ) : <nl> - super ( ) . __init__ ( bb . get_double ( ) ) <nl> + def __init__ ( self , bb , protocol_version , full_output = True ) : <nl> + super ( ) . __init__ ( bb , protocol_version ) <nl> self . error_code = bb . get_int ( ) <nl> <nl> read_conflict_range = bb . get_key_range_list ( ) <nl> def __init__ ( self , bb , full_output = True , type_filter = None ) : <nl> if ( not type_filter or " get_version " in type_filter ) : <nl> self . get_version = get_version <nl> elif event = = 1 : <nl> - get = GetInfo ( bb ) <nl> + get = GetInfo ( bb , protocol_version ) <nl> if ( not type_filter or " get " in type_filter ) : <nl> # because of the crappy json serializtion using __dict__ we have to set the list here otherwise <nl> # it doesn ' t print <nl> if not self . gets : self . gets = [ ] <nl> self . gets . append ( get ) <nl> elif event = = 2 : <nl> - get_range = GetRangeInfo ( bb ) <nl> + get_range = GetRangeInfo ( bb , protocol_version ) <nl> if ( not type_filter or " get_range " in type_filter ) : <nl> if not self . get_ranges : self . get_ranges = [ ] <nl> self . get_ranges . append ( get_range ) <nl> elif event = = 3 : <nl> - commit = CommitInfo ( bb , full_output = full_output ) <nl> + commit = CommitInfo ( bb , protocol_version , full_output = full_output ) <nl> if ( not type_filter or " commit " in type_filter ) : <nl> self . commit = commit <nl> elif event = = 4 : <nl> - error_get = ErrorGetInfo ( bb ) <nl> + error_get = ErrorGetInfo ( bb , protocol_version ) <nl> if ( not type_filter or " error_gets " in type_filter ) : <nl> if not self . error_gets : self . error_gets = [ ] <nl> self . error_gets . append ( error_get ) <nl> elif event = = 5 : <nl> - error_get_range = ErrorGetRangeInfo ( bb ) <nl> + error_get_range = ErrorGetRangeInfo ( bb , protocol_version ) <nl> if ( not type_filter or " error_get_range " in type_filter ) : <nl> if not self . error_get_ranges : self . error_get_ranges = [ ] <nl> self . error_get_ranges . append ( error_get_range ) <nl> elif event = = 6 : <nl> - error_commit = ErrorCommitInfo ( bb , full_output = full_output ) <nl> + error_commit = ErrorCommitInfo ( bb , protocol_version , full_output = full_output ) <nl> if ( not type_filter or " error_commit " in type_filter ) : <nl> if not self . error_commits : self . error_commits = [ ] <nl> self . error_commits . append ( error_commit ) <nl> def print_range_boundaries ( range_boundaries , context ) : <nl> <nl> if __name__ = = " __main__ " : <nl> main ( ) <nl> - <nl> mmm a / design / special - key - space . md <nl> ppp b / design / special - key - space . md <nl> Currently , there are several client functions implemented as FDB calls by passin <nl> - * * cluster_file_path * * : ` get ( " \ xff \ xff / cluster_file_path ) ` <nl> - * * connection_string * * : ` get ( " \ xff \ xff / connection_string ) ` <nl> - * * worker_interfaces * * : ` getRange ( " \ xff \ xff / worker_interfaces " , < any_key > ) ` <nl> - - * * conflicting - keys * * : ` getRange ( " \ xff \ xff / transaction / conflicting_keys / " , " \ xff \ xff / transaction / conflicting_keys / \ xff " ) ` <nl> + - * * conflicting_keys * * : ` getRange ( " \ xff \ xff / transaction / conflicting_keys / " , " \ xff \ xff / transaction / conflicting_keys / \ xff " ) ` <nl> <nl> At present , implementions are hard - coded and the pain points are obvious : <nl> - * * Maintainability * * : As more features added , the hard - coded snippets are hard to maintain <nl> ASSERT ( <nl> res2 [ 0 ] . value = = LiteralStringRef ( " London " ) & & <nl> res2 [ 1 ] . value = = LiteralStringRef ( " Washington , D . C . " ) <nl> ) ; <nl> - ` ` ` <nl> \ No newline at end of file <nl> + ` ` ` <nl> + <nl> + # # Module <nl> + We introduce this ` module ` concept after a [ discussion ] ( https : / / forums . foundationdb . org / t / versioning - of - special - key - space / 2068 ) on cross module read on special - key - space . By default , range reads cover more than one module will not be allowed with ` special_keys_cross_module_read ` errors . In addition , range reads touch no modules will come with ` special_keys_no_module_found ` errors . The motivation here is to avoid unexpected blocking or errors happen in a wide - scope range read . In particular , you write code ` getRange ( " A " , " Z " ) ` when all registered calls between ` [ A , Z ) ` happen locally , thus your code does not have any error - handling . However , if in the future , anyone register a new call in ` [ A , Z ) ` and sometimes throw errors like ` time_out ( ) ` , then your original code is broken . The ` module ` is like a top - level directory where inside the module , calls are homogeneous . So we allow cross range read inside each module by default but cross module reads are forbidden . Right now , there are two modules available to use : <nl> + <nl> + - TRANSACTION : ` \ xff \ xff / transaction / , \ xff \ xff / transaction0 ` , all transaction related information like * read_conflict_range * , * write_conflict_range * , * conflicting_keys * . ( All happen locally ) . Right now we have : <nl> + - ` \ xff \ xff / transaction / conflicting_keys / , \ xff \ xff / transaction / conflicting_keys0 ` : conflicting keys that caused conflicts <nl> + - ` \ xff \ xff / transaction / read_conflict_range / , \ xff \ xff / transaction / read_conflict_range0 ` : read conflict ranges of the transaction <nl> + - ` \ xff \ xff / transaction / write_conflict_range / , \ xff \ xff / transaction / write_conflict_range0 ` : write conflict ranges of the transaction <nl> + - METRICS : ` \ xff \ xff / metrics / , \ xff \ xff / metrics0 ` , all metrics like data - distribution metrics or healthy metrics are planned to put here . All need to call the rpc , so time_out error s may happen . Right now we have : <nl> + - ` \ xff \ xff / metrics / data_distribution_stats , \ xff \ xff / metrics / data_distribution_stats ` : stats info about data - distribution <nl> + - WORKERINTERFACE : ` \ xff \ xff / worker_interfaces / , \ xff \ xff / worker_interfaces0 ` , which is compatible with previous implementation , thus should not be used to add new functions . <nl> + <nl> + In addition , all singleKeyRanges are formatted as modules and cannot be used again . In particular , you should call ` get ` not ` getRange ` on these keys . Below are existing ones : <nl> + <nl> + - STATUSJSON : ` \ xff \ xff / status / json ` <nl> + - CONNECTIONSTRING : ` \ xff \ xff / connection_string ` <nl> + - CLUSTERFILEPATH : ` \ xff \ xff / cluster_file_path ` <nl> \ No newline at end of file <nl> mmm a / documentation / sphinx / source / mr - status - json - schemas . rst . inc <nl> ppp b / documentation / sphinx / source / mr - status - json - schemas . rst . inc <nl> <nl> } , <nl> " limiting_queue_bytes_storage_server " : 0 , <nl> " worst_queue_bytes_storage_server " : 0 , <nl> - " limiting_version_lag_storage_server " : 0 , <nl> - " worst_version_lag_storage_server " : 0 , <nl> " limiting_data_lag_storage_server " : { <nl> " versions " : 0 , <nl> " seconds " : 0 . 0 <nl> mmm a / fdbbackup / FileDecoder . actor . cpp <nl> ppp b / fdbbackup / FileDecoder . actor . cpp <nl> struct VersionedMutations { <nl> * / <nl> struct DecodeProgress { <nl> DecodeProgress ( ) = default ; <nl> - DecodeProgress ( const LogFile & file , std : : vector < std : : tuple < Arena , Version , int32_t , StringRef > > values ) <nl> - : file ( file ) , keyValues ( values ) { } <nl> + template < class U > <nl> + DecodeProgress ( const LogFile & file , U & & values ) <nl> + : file ( file ) , keyValues ( std : : forward < U > ( values ) ) { } <nl> <nl> / / If there are no more mutations to pull from the file . <nl> / / However , we could have unfinished version in the buffer when EOF is true , <nl> struct DecodeProgress { <nl> / / should call getUnfinishedBuffer ( ) to get these left data . <nl> bool finished ( ) { return ( eof & & keyValues . empty ( ) ) | | ( leftover & & ! keyValues . empty ( ) ) ; } <nl> <nl> - std : : vector < std : : tuple < Arena , Version , int32_t , StringRef > > & & getUnfinishedBuffer ( ) { return std : : move ( keyValues ) ; } <nl> + std : : vector < std : : tuple < Arena , Version , int32_t , StringRef > > & & getUnfinishedBuffer ( ) & & { return std : : move ( keyValues ) ; } <nl> <nl> / / Returns all mutations of the next version in a batch . <nl> Future < VersionedMutations > getNextBatch ( ) { return getNextBatchImpl ( this ) ; } <nl> ACTOR Future < Void > decode_logs ( DecodeParams params ) { <nl> for ( ; i < logs . size ( ) ; i + + ) { <nl> if ( logs [ i ] . fileSize = = 0 ) continue ; <nl> <nl> - state DecodeProgress progress ( logs [ i ] , left ) ; <nl> + state DecodeProgress progress ( logs [ i ] , std : : move ( left ) ) ; <nl> wait ( progress . openFile ( container ) ) ; <nl> while ( ! progress . finished ( ) ) { <nl> VersionedMutations vms = wait ( progress . getNextBatch ( ) ) ; <nl> ACTOR Future < Void > decode_logs ( DecodeParams params ) { <nl> std : : cout < < vms . version < < " " < < m . toString ( ) < < " \ n " ; <nl> } <nl> } <nl> - left = progress . getUnfinishedBuffer ( ) ; <nl> + left = std : : move ( progress ) . getUnfinishedBuffer ( ) ; <nl> if ( ! left . empty ( ) ) { <nl> TraceEvent ( " UnfinishedFile " ) . detail ( " File " , logs [ i ] . fileName ) . detail ( " Q " , left . size ( ) ) ; <nl> } <nl> mmm a / fdbbackup / backup . actor . cpp <nl> ppp b / fdbbackup / backup . actor . cpp <nl> using std : : endl ; <nl> # endif <nl> # endif <nl> <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> # include " flow / SimpleOpt . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> CSimpleOpt : : SOption g_rgRestoreOptions [ ] = { <nl> { OPT_RESTORE_TIMESTAMP , " - - timestamp " , SO_REQ_SEP } , <nl> { OPT_KNOB , " - - knob_ " , SO_REQ_SEP } , <nl> { OPT_RESTORECONTAINER , " - r " , SO_REQ_SEP } , <nl> - { OPT_PREFIX_ADD , " - add_prefix " , SO_REQ_SEP } , / / TODO : Remove in 6 . 3 <nl> { OPT_PREFIX_ADD , " - - add_prefix " , SO_REQ_SEP } , <nl> - { OPT_PREFIX_REMOVE , " - remove_prefix " , SO_REQ_SEP } , / / TODO : Remove in 6 . 3 <nl> { OPT_PREFIX_REMOVE , " - - remove_prefix " , SO_REQ_SEP } , <nl> { OPT_TAGNAME , " - t " , SO_REQ_SEP } , <nl> { OPT_TAGNAME , " - - tagname " , SO_REQ_SEP } , <nl> extern uint8_t * g_extra_memory ; <nl> int main ( int argc , char * argv [ ] ) { <nl> platformInit ( ) ; <nl> <nl> - int status = FDB_EXIT_SUCCESS ; <nl> + int status = FDB_EXIT_SUCCESS ; <nl> + <nl> + std : : string commandLine ; <nl> + for ( int a = 0 ; a < argc ; a + + ) { <nl> + if ( a ) commandLine + = ' ' ; <nl> + commandLine + = argv [ a ] ; <nl> + } <nl> <nl> try { <nl> # ifdef ALLOC_INSTRUMENTATION <nl> int main ( int argc , char * argv [ ] ) { <nl> args = NULL ; <nl> } <nl> <nl> - std : : string commandLine ; <nl> - for ( int a = 0 ; a < argc ; a + + ) { <nl> - if ( a ) commandLine + = ' ' ; <nl> - commandLine + = argv [ a ] ; <nl> - } <nl> - <nl> delete FLOW_KNOBS ; <nl> FlowKnobs * flowKnobs = new FlowKnobs ; <nl> FLOW_KNOBS = flowKnobs ; <nl> mmm a / fdbcli / fdbcli . actor . cpp <nl> ppp b / fdbcli / fdbcli . actor . cpp <nl> <nl> # include " fdbcli / linenoise / linenoise . h " <nl> # endif <nl> <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> Reference < ReadYourWritesTransaction > getTransaction ( Database db , Reference < ReadY <nl> return tr ; <nl> } <nl> <nl> - std : : string new_completion ( const char * base , const char * name ) { <nl> + std : : string newCompletion ( const char * base , const char * name ) { <nl> return format ( " % s % s " , base , name ) ; <nl> } <nl> <nl> - void comp_generator ( const char * text , bool help , std : : vector < std : : string > & lc ) { <nl> + void compGenerator ( const char * text , bool help , std : : vector < std : : string > & lc ) { <nl> std : : map < std : : string , CommandHelp > : : const_iterator iter ; <nl> int len = strlen ( text ) ; <nl> <nl> void comp_generator ( const char * text , bool help , std : : vector < std : : string > & lc ) { <nl> for ( auto iter = helpMap . begin ( ) ; iter ! = helpMap . end ( ) ; + + iter ) { <nl> const char * name = ( * iter ) . first . c_str ( ) ; <nl> if ( ! strncmp ( name , text , len ) ) { <nl> - lc . push_back ( new_completion ( help ? " help " : " " , name ) ) ; <nl> + lc . push_back ( newCompletion ( help ? " help " : " " , name ) ) ; <nl> } <nl> } <nl> <nl> void comp_generator ( const char * text , bool help , std : : vector < std : : string > & lc ) { <nl> const char * name = * he ; <nl> he + + ; <nl> if ( ! strncmp ( name , text , len ) ) <nl> - lc . push_back ( new_completion ( " help " , name ) ) ; <nl> + lc . push_back ( newCompletion ( " help " , name ) ) ; <nl> } <nl> } <nl> } <nl> <nl> - void cmd_generator ( const char * text , std : : vector < std : : string > & lc ) { <nl> - comp_generator ( text , false , lc ) ; <nl> + void cmdGenerator ( const char * text , std : : vector < std : : string > & lc ) { <nl> + compGenerator ( text , false , lc ) ; <nl> } <nl> <nl> - void help_generator ( const char * text , std : : vector < std : : string > & lc ) { <nl> - comp_generator ( text , true , lc ) ; <nl> + void helpGenerator ( const char * text , std : : vector < std : : string > & lc ) { <nl> + compGenerator ( text , true , lc ) ; <nl> } <nl> <nl> - void option_generator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> + void optionGenerator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> int len = strlen ( text ) ; <nl> <nl> for ( auto iter = validOptions . begin ( ) ; iter ! = validOptions . end ( ) ; + + iter ) { <nl> const char * name = ( * iter ) . c_str ( ) ; <nl> if ( ! strncmp ( name , text , len ) ) { <nl> - lc . push_back ( new_completion ( line , name ) ) ; <nl> + lc . push_back ( newCompletion ( line , name ) ) ; <nl> } <nl> } <nl> } <nl> <nl> - void array_generator ( const char * text , const char * line , const char * * options , std : : vector < std : : string > & lc ) { <nl> + void arrayGenerator ( const char * text , const char * line , const char * * options , std : : vector < std : : string > & lc ) { <nl> const char * * iter = options ; <nl> int len = strlen ( text ) ; <nl> <nl> void array_generator ( const char * text , const char * line , const char * * options , s <nl> const char * name = * iter ; <nl> iter + + ; <nl> if ( ! strncmp ( name , text , len ) ) { <nl> - lc . push_back ( new_completion ( line , name ) ) ; <nl> + lc . push_back ( newCompletion ( line , name ) ) ; <nl> } <nl> } <nl> } <nl> <nl> - void onoff_generator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> - const char * opts [ ] = { " on " , " off " , NULL } ; <nl> - array_generator ( text , line , opts , lc ) ; <nl> + void onOffGenerator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> + const char * opts [ ] = { " on " , " off " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> } <nl> <nl> - void configure_generator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> - const char * opts [ ] = { " new " , " single " , " double " , " triple " , " three_data_hall " , " three_datacenter " , " ssd " , " ssd - 1 " , " ssd - 2 " , " memory " , " memory - 1 " , " memory - 2 " , " memory - radixtree - beta " , " proxies = " , " logs = " , " resolvers = " , NULL } ; <nl> - array_generator ( text , line , opts , lc ) ; <nl> + void configureGenerator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> + const char * opts [ ] = { " new " , " single " , " double " , " triple " , " three_data_hall " , " three_datacenter " , " ssd " , " ssd - 1 " , " ssd - 2 " , " memory " , " memory - 1 " , " memory - 2 " , " memory - radixtree - beta " , " proxies = " , " logs = " , " resolvers = " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> } <nl> <nl> - void status_generator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> - const char * opts [ ] = { " minimal " , " details " , " json " , NULL } ; <nl> - array_generator ( text , line , opts , lc ) ; <nl> + void statusGenerator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> + const char * opts [ ] = { " minimal " , " details " , " json " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> } <nl> <nl> - void kill_generator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> - const char * opts [ ] = { " all " , " list " , NULL } ; <nl> - array_generator ( text , line , opts , lc ) ; <nl> + void killGenerator ( const char * text , const char * line , std : : vector < std : : string > & lc ) { <nl> + const char * opts [ ] = { " all " , " list " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> } <nl> <nl> - void fdbcli_comp_cmd ( std : : string const & text , std : : vector < std : : string > & lc ) { <nl> + void throttleGenerator ( const char * text , const char * line , std : : vector < std : : string > & lc , std : : vector < StringRef > const & tokens ) { <nl> + if ( tokens . size ( ) = = 1 ) { <nl> + const char * opts [ ] = { " on tag " , " off " , " enable auto " , " disable auto " , " list " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> + } <nl> + else if ( tokens . size ( ) > = 2 & & tokencmp ( tokens [ 1 ] , " on " ) ) { <nl> + if ( tokens . size ( ) = = 2 ) { <nl> + const char * opts [ ] = { " tag " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> + } <nl> + else if ( tokens . size ( ) = = 6 ) { <nl> + const char * opts [ ] = { " default " , " immediate " , " batch " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> + } <nl> + } <nl> + else if ( tokens . size ( ) > = 2 & & tokencmp ( tokens [ 1 ] , " off " ) & & ! tokencmp ( tokens [ tokens . size ( ) - 1 ] , " tag " ) ) { <nl> + const char * opts [ ] = { " all " , " auto " , " manual " , " tag " , " default " , " immediate " , " batch " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> + } <nl> + else if ( tokens . size ( ) = = 2 & & tokencmp ( tokens [ 1 ] , " enable " ) | | tokencmp ( tokens [ 1 ] , " disable " ) ) { <nl> + const char * opts [ ] = { " auto " , nullptr } ; <nl> + arrayGenerator ( text , line , opts , lc ) ; <nl> + } <nl> + } <nl> + <nl> + void fdbcliCompCmd ( std : : string const & text , std : : vector < std : : string > & lc ) { <nl> bool err , partial ; <nl> std : : string whole_line = text ; <nl> auto parsed = parseLine ( whole_line , err , partial ) ; <nl> void fdbcli_comp_cmd ( std : : string const & text , std : : vector < std : : string > & lc ) { <nl> / / printf ( " final text ( % d tokens ) : ` % s ' & ` % s ' \ n " , count , base_input . c_str ( ) , ntext . c_str ( ) ) ; <nl> <nl> if ( ! count ) { <nl> - cmd_generator ( ntext . c_str ( ) , lc ) ; <nl> + cmdGenerator ( ntext . c_str ( ) , lc ) ; <nl> return ; <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " help " ) & & count = = 1 ) { <nl> - help_generator ( ntext . c_str ( ) , lc ) ; <nl> + helpGenerator ( ntext . c_str ( ) , lc ) ; <nl> return ; <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " option " ) ) { <nl> if ( count = = 1 ) <nl> - onoff_generator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + onOffGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> if ( count = = 2 ) <nl> - option_generator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + optionGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " writemode " ) & & count = = 1 ) { <nl> - onoff_generator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + onOffGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " configure " ) ) { <nl> - configure_generator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + configureGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " status " ) & & count = = 1 ) { <nl> - status_generator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + statusGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " kill " ) & & count = = 1 ) { <nl> - kill_generator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + killGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc ) ; <nl> + } <nl> + <nl> + if ( tokencmp ( tokens [ 0 ] , " throttle " ) ) { <nl> + throttleGenerator ( ntext . c_str ( ) , base_input . c_str ( ) , lc , tokens ) ; <nl> } <nl> } <nl> <nl> + std : : vector < const char * > throttleHintGenerator ( std : : vector < StringRef > const & tokens , bool inArgument ) { <nl> + if ( tokens . size ( ) = = 1 ) { <nl> + return { " < on | off | enable auto | disable auto | list > " , " [ ARGS ] " } ; <nl> + } <nl> + else if ( tokencmp ( tokens [ 1 ] , " on " ) ) { <nl> + std : : vector < const char * > opts = { " tag " , " < TAG > " , " [ RATE ] " , " [ DURATION ] " , " [ default | immediate | batch ] " } ; <nl> + if ( tokens . size ( ) = = 2 ) { <nl> + return opts ; <nl> + } <nl> + else if ( ( ( tokens . size ( ) = = 3 & & inArgument ) | | tokencmp ( tokens [ 2 ] , " tag " ) ) & & tokens . size ( ) < 7 ) { <nl> + return std : : vector < const char * > ( opts . begin ( ) + tokens . size ( ) - 2 , opts . end ( ) ) ; <nl> + } <nl> + } <nl> + else if ( tokencmp ( tokens [ 1 ] , " off " ) ) { <nl> + if ( tokencmp ( tokens [ tokens . size ( ) - 1 ] , " tag " ) ) { <nl> + return { " < TAG > " } ; <nl> + } <nl> + else { <nl> + bool hasType = false ; <nl> + bool hasTag = false ; <nl> + bool hasPriority = false ; <nl> + for ( int i = 2 ; i < tokens . size ( ) ; + + i ) { <nl> + if ( tokencmp ( tokens [ i ] , " all " ) | | tokencmp ( tokens [ i ] , " auto " ) | | tokencmp ( tokens [ i ] , " manual " ) ) { <nl> + hasType = true ; <nl> + } <nl> + else if ( tokencmp ( tokens [ i ] , " default " ) | | tokencmp ( tokens [ i ] , " immediate " ) | | tokencmp ( tokens [ i ] , " batch " ) ) { <nl> + hasPriority = true ; <nl> + } <nl> + else if ( tokencmp ( tokens [ i ] , " tag " ) ) { <nl> + hasTag = true ; <nl> + + + i ; <nl> + } <nl> + else { <nl> + return { } ; <nl> + } <nl> + } <nl> + <nl> + std : : vector < const char * > options ; <nl> + if ( ! hasType ) { <nl> + options . push_back ( " [ all | auto | manual ] " ) ; <nl> + } <nl> + if ( ! hasTag ) { <nl> + options . push_back ( " [ tag < TAG > ] " ) ; <nl> + } <nl> + if ( ! hasPriority ) { <nl> + options . push_back ( " [ default | immediate | batch ] " ) ; <nl> + } <nl> + <nl> + return options ; <nl> + } <nl> + } <nl> + else if ( ( tokencmp ( tokens [ 1 ] , " enable " ) | | tokencmp ( tokens [ 1 ] , " disable " ) ) & & tokens . size ( ) = = 2 ) { <nl> + return { " auto " } ; <nl> + } <nl> + else if ( tokens . size ( ) = = 2 & & inArgument ) { <nl> + return { " [ ARGS ] " } ; <nl> + } <nl> + <nl> + return std : : vector < const char * > ( ) ; <nl> + } <nl> + <nl> void LogCommand ( std : : string line , UID randomID , std : : string errMsg ) { <nl> printf ( " % s \ n " , errMsg . c_str ( ) ) ; <nl> TraceEvent ( SevInfo , " CLICommandLog " , randomID ) . detail ( " Command " , line ) . detail ( " Error " , errMsg ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> ( int ) ( itr - > tpsRate ) , <nl> std : : min ( ( int ) ( itr - > expirationTime - now ( ) ) , ( int ) ( itr - > initialDuration ) ) , <nl> transactionPriorityToString ( itr - > priority , false ) , <nl> - itr - > autoThrottled ? " auto " : " manual " , <nl> + itr - > throttleType = = TagThrottleType : : AUTO ? " auto " : " manual " , <nl> itr - > tag . toString ( ) . c_str ( ) ) ; <nl> } <nl> } <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> printf ( " There are no throttled tags \ n " ) ; <nl> } <nl> } <nl> - else if ( tokencmp ( tokens [ 1 ] , " on " ) & & tokens . size ( ) < = 6 ) { <nl> - if ( tokens . size ( ) < 4 | | ! tokencmp ( tokens [ 2 ] , " tag " ) ) { <nl> - printf ( " Usage : throttle on tag < TAG > [ RATE ] [ DURATION ] \ n " ) ; <nl> + else if ( tokencmp ( tokens [ 1 ] , " on " ) ) { <nl> + if ( tokens . size ( ) < 4 | | ! tokencmp ( tokens [ 2 ] , " tag " ) | | tokens . size ( ) > 7 ) { <nl> + printf ( " Usage : throttle on tag < TAG > [ RATE ] [ DURATION ] [ PRIORITY ] \ n " ) ; <nl> printf ( " \ n " ) ; <nl> printf ( " Enables throttling for transactions with the specified tag . \ n " ) ; <nl> printf ( " An optional transactions per second rate can be specified ( default 0 ) . \ n " ) ; <nl> printf ( " An optional duration can be specified , which must include a time suffix ( s , m , h , d ) ( default 1h ) . \ n " ) ; <nl> + printf ( " An optional priority can be specified . Choices are ` default ' , ` immediate ' , and ` batch ' ( default ` default ' ) . \ n " ) ; <nl> is_error = true ; <nl> continue ; <nl> } <nl> <nl> double tpsRate = 0 . 0 ; <nl> uint64_t duration = 3600 ; <nl> + TransactionPriority priority = TransactionPriority : : DEFAULT ; <nl> <nl> if ( tokens . size ( ) > = 5 ) { <nl> char * end ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> continue ; <nl> } <nl> duration = parsedDuration . get ( ) ; <nl> - } <nl> <nl> - if ( duration = = 0 ) { <nl> - printf ( " ERROR : throttle duration cannot be 0 \ n " ) ; <nl> - is_error = true ; <nl> - continue ; <nl> + if ( duration = = 0 ) { <nl> + printf ( " ERROR : throttle duration cannot be 0 \ n " ) ; <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + } <nl> + if ( tokens . size ( ) = = 7 ) { <nl> + if ( tokens [ 6 ] = = LiteralStringRef ( " default " ) ) { <nl> + priority = TransactionPriority : : DEFAULT ; <nl> + } <nl> + else if ( tokens [ 6 ] = = LiteralStringRef ( " immediate " ) ) { <nl> + priority = TransactionPriority : : IMMEDIATE ; <nl> + } <nl> + else if ( tokens [ 6 ] = = LiteralStringRef ( " batch " ) ) { <nl> + priority = TransactionPriority : : BATCH ; <nl> + } <nl> + else { <nl> + printf ( " ERROR : unrecognized priority ` % s ' . Must be one of ` default ' , \ n ` immediate ' , or ` batch ' . \ n " , tokens [ 6 ] . toString ( ) . c_str ( ) ) ; <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> } <nl> <nl> TagSet tags ; <nl> tags . addTag ( tokens [ 3 ] ) ; <nl> <nl> - wait ( ThrottleApi : : throttleTags ( db , tags , tpsRate , duration , false , TransactionPriority : : DEFAULT ) ) ; <nl> + wait ( ThrottleApi : : throttleTags ( db , tags , tpsRate , duration , TagThrottleType : : MANUAL , priority ) ) ; <nl> printf ( " Tag ` % s ' has been throttled \ n " , tokens [ 3 ] . toString ( ) . c_str ( ) ) ; <nl> } <nl> else if ( tokencmp ( tokens [ 1 ] , " off " ) ) { <nl> - if ( tokencmp ( tokens [ 2 ] , " tag " ) & & tokens . size ( ) = = 4 ) { <nl> - TagSet tags ; <nl> - tags . addTag ( tokens [ 3 ] ) ; <nl> - bool success = wait ( ThrottleApi : : unthrottleTags ( db , tags , false , TransactionPriority : : DEFAULT ) ) ; / / TODO : Allow targeting priority and auto / manual <nl> - if ( success ) { <nl> - printf ( " Unthrottled tag ` % s ' \ n " , tokens [ 3 ] . toString ( ) . c_str ( ) ) ; <nl> + int nextIndex = 2 ; <nl> + TagSet tags ; <nl> + bool throttleTypeSpecified = false ; <nl> + Optional < TagThrottleType > throttleType = TagThrottleType : : MANUAL ; <nl> + Optional < TransactionPriority > priority ; <nl> + <nl> + if ( tokens . size ( ) = = 2 ) { <nl> + is_error = true ; <nl> + } <nl> + <nl> + while ( nextIndex < tokens . size ( ) & & ! is_error ) { <nl> + if ( tokencmp ( tokens [ nextIndex ] , " all " ) ) { <nl> + if ( throttleTypeSpecified ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + throttleTypeSpecified = true ; <nl> + throttleType = Optional < TagThrottleType > ( ) ; <nl> + + + nextIndex ; <nl> } <nl> - else { <nl> - printf ( " Tag ` % s ' was not throttled \ n " , tokens [ 3 ] . toString ( ) . c_str ( ) ) ; <nl> + else if ( tokencmp ( tokens [ nextIndex ] , " auto " ) ) { <nl> + if ( throttleTypeSpecified ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + throttleTypeSpecified = true ; <nl> + throttleType = TagThrottleType : : AUTO ; <nl> + + + nextIndex ; <nl> } <nl> - } <nl> - else if ( tokencmp ( tokens [ 2 ] , " all " ) & & tokens . size ( ) = = 3 ) { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleAll ( db ) ) ; <nl> - if ( unthrottled ) { <nl> - printf ( " Unthrottled all tags \ n " ) ; <nl> + else if ( tokencmp ( tokens [ nextIndex ] , " manual " ) ) { <nl> + if ( throttleTypeSpecified ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + throttleTypeSpecified = true ; <nl> + throttleType = TagThrottleType : : MANUAL ; <nl> + + + nextIndex ; <nl> } <nl> - else { <nl> - printf ( " There were no tags being throttled \ n " ) ; <nl> + else if ( tokencmp ( tokens [ nextIndex ] , " default " ) ) { <nl> + if ( priority . present ( ) ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + priority = TransactionPriority : : DEFAULT ; <nl> + + + nextIndex ; <nl> } <nl> - } <nl> - else if ( tokencmp ( tokens [ 2 ] , " auto " ) & & tokens . size ( ) = = 3 ) { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleAuto ( db ) ) ; <nl> - if ( unthrottled ) { <nl> - printf ( " Unthrottled all auto - throttled tags \ n " ) ; <nl> + else if ( tokencmp ( tokens [ nextIndex ] , " immediate " ) ) { <nl> + if ( priority . present ( ) ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + priority = TransactionPriority : : IMMEDIATE ; <nl> + + + nextIndex ; <nl> } <nl> - else { <nl> - printf ( " There were no tags being throttled \ n " ) ; <nl> + else if ( tokencmp ( tokens [ nextIndex ] , " batch " ) ) { <nl> + if ( priority . present ( ) ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + priority = TransactionPriority : : BATCH ; <nl> + + + nextIndex ; <nl> + } <nl> + else if ( tokencmp ( tokens [ nextIndex ] , " tag " ) ) { <nl> + if ( tags . size ( ) > 0 | | nextIndex = = tokens . size ( ) - 1 ) { <nl> + is_error = true ; <nl> + continue ; <nl> + } <nl> + tags . addTag ( tokens [ nextIndex + 1 ] ) ; <nl> + nextIndex + = 2 ; <nl> } <nl> } <nl> - else if ( tokencmp ( tokens [ 2 ] , " manual " ) & & tokens . size ( ) = = 3 ) { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleManual ( db ) ) ; <nl> - if ( unthrottled ) { <nl> - printf ( " Unthrottled all manually throttled tags \ n " ) ; <nl> + <nl> + if ( ! is_error ) { <nl> + state const char * throttleTypeString = ! throttleType . present ( ) ? " " : ( throttleType . get ( ) = = TagThrottleType : : AUTO ? " auto - " : " manually " ) ; <nl> + state std : : string priorityString = priority . present ( ) ? format ( " at % s priority " , transactionPriorityToString ( priority . get ( ) , false ) ) : " " ; <nl> + <nl> + if ( tags . size ( ) > 0 ) { <nl> + bool success = wait ( ThrottleApi : : unthrottleTags ( db , tags , throttleType , priority ) ) ; <nl> + if ( success ) { <nl> + printf ( " Unthrottled tag ` % s ' % s \ n " , tokens [ 3 ] . toString ( ) . c_str ( ) , priorityString . c_str ( ) ) ; <nl> + } <nl> + else { <nl> + printf ( " Tag ` % s ' was not % sthrottled % s \ n " , tokens [ 3 ] . toString ( ) . c_str ( ) , throttleTypeString , priorityString . c_str ( ) ) ; <nl> + } <nl> } <nl> else { <nl> - printf ( " There were no tags being throttled \ n " ) ; <nl> + bool unthrottled = wait ( ThrottleApi : : unthrottleAll ( db , throttleType , priority ) ) ; <nl> + if ( unthrottled ) { <nl> + printf ( " Unthrottled all % sthrottled tags % s \ n " , throttleTypeString , priorityString . c_str ( ) ) ; <nl> + } <nl> + else { <nl> + printf ( " There were no tags being % sthrottled % s \ n " , throttleTypeString , priorityString . c_str ( ) ) ; <nl> + } <nl> } <nl> } <nl> else { <nl> - printf ( " Usage : throttle off < all | auto | manual | tag > [ TAG ] \ n " ) ; <nl> + printf ( " Usage : throttle off [ all | auto | manual ] [ tag < TAG > ] [ PRIORITY ] \ n " ) ; <nl> printf ( " \ n " ) ; <nl> - printf ( " Disables throttling for the specified tag ( s ) . \ n " ) ; <nl> - printf ( " Use ` all ' to turn off all tag throttles , ` auto ' to turn off throttles created by \ n " ) ; <nl> - printf ( " the cluster , and ` manual ' to turn off throttles created manually . Use ` tag < TAG > ' \ n " ) ; <nl> - printf ( " to turn off throttles for a specific tag \ n " ) ; <nl> - is_error = true ; <nl> + printf ( " Disables throttling for throttles matching the specified filters . At least one filter must be used . \ n \ n " ) ; <nl> + printf ( " An optional qualifier ` all ' , ` auto ' , or ` manual ' can be used to specify the type of throttle \ n " ) ; <nl> + printf ( " affected . ` all ' targets all throttles , ` auto ' targets those created by the cluster , and \ n " ) ; <nl> + printf ( " ` manual ' targets those created manually ( default ` manual ' ) . \ n \ n " ) ; <nl> + printf ( " The ` tag ' filter can be use to turn off only a specific tag . \ n \ n " ) ; <nl> + printf ( " The priority filter can be used to turn off only throttles at specific priorities . Choices are \ n " ) ; <nl> + printf ( " ` default ' , ` immediate ' , or ` batch ' . By default , all priorities are targeted . \ n " ) ; <nl> } <nl> } <nl> - else if ( ( tokencmp ( tokens [ 1 ] , " enable " ) | | tokencmp ( tokens [ 1 ] , " disable " ) ) & & tokens . size ( ) = = 3 & & tokencmp ( tokens [ 2 ] , " auto " ) ) { <nl> + else if ( tokencmp ( tokens [ 1 ] , " enable " ) | | tokencmp ( tokens [ 1 ] , " disable " ) ) { <nl> if ( tokens . size ( ) ! = 3 | | ! tokencmp ( tokens [ 2 ] , " auto " ) ) { <nl> printf ( " Usage : throttle < enable | disable > auto \ n " ) ; <nl> printf ( " \ n " ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> ACTOR Future < int > runCli ( CLIOptions opt ) { <nl> state LineNoise linenoise ( <nl> [ ] ( std : : string const & line , std : : vector < std : : string > & completions ) { <nl> - fdbcli_comp_cmd ( line , completions ) ; <nl> + fdbcliCompCmd ( line , completions ) ; <nl> } , <nl> [ enabled = opt . cliHints ] ( std : : string const & line ) - > LineNoise : : Hint { <nl> if ( ! enabled ) { <nl> ACTOR Future < int > runCli ( CLIOptions opt ) { <nl> / / being entered . <nl> if ( error & & line . back ( ) ! = ' \ \ ' ) return LineNoise : : Hint ( std : : string ( " { malformed escape sequence } " ) , 90 , false ) ; <nl> <nl> - auto iter = helpMap . find ( command . toString ( ) ) ; <nl> - if ( iter ! = helpMap . end ( ) ) { <nl> - std : : string helpLine = iter - > second . usage ; <nl> - std : : vector < std : : vector < StringRef > > parsedHelp = parseLine ( helpLine , error , partial ) ; <nl> - std : : string hintLine = ( * ( line . end ( ) - 1 ) = = ' ' ? " " : " " ) ; <nl> - for ( int i = finishedParameters ; i < parsedHelp . back ( ) . size ( ) ; i + + ) { <nl> - hintLine = hintLine + parsedHelp . back ( ) [ i ] . toString ( ) + " " ; <nl> + bool inArgument = * ( line . end ( ) - 1 ) ! = ' ' ; <nl> + std : : string hintLine = inArgument ? " " : " " ; <nl> + if ( tokencmp ( command , " throttle " ) ) { <nl> + std : : vector < const char * > hintItems = throttleHintGenerator ( parsed . back ( ) , inArgument ) ; <nl> + if ( hintItems . empty ( ) ) { <nl> + return LineNoise : : Hint ( ) ; <nl> + } <nl> + for ( auto item : hintItems ) { <nl> + hintLine = hintLine + item + " " ; <nl> + } <nl> + } <nl> + else { <nl> + auto iter = helpMap . find ( command . toString ( ) ) ; <nl> + if ( iter ! = helpMap . end ( ) ) { <nl> + std : : string helpLine = iter - > second . usage ; <nl> + std : : vector < std : : vector < StringRef > > parsedHelp = parseLine ( helpLine , error , partial ) ; <nl> + for ( int i = finishedParameters ; i < parsedHelp . back ( ) . size ( ) ; i + + ) { <nl> + hintLine = hintLine + parsedHelp . back ( ) [ i ] . toString ( ) + " " ; <nl> + } <nl> + } <nl> + else { <nl> + return LineNoise : : Hint ( ) ; <nl> } <nl> - return LineNoise : : Hint ( hintLine , 90 , false ) ; <nl> - } else { <nl> - return LineNoise : : Hint ( ) ; <nl> } <nl> + <nl> + return LineNoise : : Hint ( hintLine , 90 , false ) ; <nl> } , <nl> 1000 , <nl> false ) ; <nl> mmm a / fdbclient / ClientLogEvents . h <nl> ppp b / fdbclient / ClientLogEvents . h <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct Event { <nl> - Event ( EventType t , double ts ) : type ( t ) , startTs ( ts ) { } <nl> + Event ( EventType t , double ts , const Optional < Standalone < StringRef > > & dc ) : type ( t ) , startTs ( ts ) { <nl> + if ( dc . present ( ) ) <nl> + dcId = dc . get ( ) ; <nl> + } <nl> Event ( ) { } <nl> <nl> - template < typename Ar > Ar & serialize ( Ar & ar ) { return serializer ( ar , type , startTs ) ; } <nl> + template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> + if ( ar . protocolVersion ( ) . version ( ) > = ( uint64_t ) 0x0FDB00B063010001LL ) { <nl> + return serializer ( ar , type , startTs , dcId ) ; <nl> + } else { <nl> + return serializer ( ar , type , startTs ) ; <nl> + } <nl> + } <nl> <nl> EventType type { EVENTTYPEEND } ; <nl> double startTs { 0 } ; <nl> + Key dcId { } ; <nl> <nl> void logEvent ( std : : string id , int maxFieldLength ) const { } <nl> } ; <nl> <nl> struct EventGetVersion : public Event { <nl> - EventGetVersion ( double ts , double lat ) : Event ( GET_VERSION_LATENCY , ts ) , latency ( lat ) { } <nl> EventGetVersion ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> <nl> / / Version V2 of EventGetVersion starting at 6 . 2 <nl> struct EventGetVersion_V2 : public Event { <nl> - EventGetVersion_V2 ( double ts , double lat , TransactionPriority priority ) : Event ( GET_VERSION_LATENCY , ts ) , latency ( lat ) { <nl> - switch ( priority ) { <nl> - / / Unfortunately , the enum serialized here disagrees with the enum used elsewhere for the values used by each priority <nl> - case TransactionPriority : : IMMEDIATE : <nl> - priorityType = PRIORITY_IMMEDIATE ; <nl> - break ; <nl> - case TransactionPriority : : DEFAULT : <nl> - priorityType = PRIORITY_DEFAULT ; <nl> - break ; <nl> - case TransactionPriority : : BATCH : <nl> - priorityType = PRIORITY_BATCH ; <nl> - break ; <nl> - default : <nl> - ASSERT ( false ) ; <nl> - } <nl> - } <nl> EventGetVersion_V2 ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> <nl> / / Version V3 of EventGetVersion starting at 6 . 3 <nl> struct EventGetVersion_V3 : public Event { <nl> - EventGetVersion_V3 ( double ts , double lat , TransactionPriority priority , Version version ) : Event ( GET_VERSION_LATENCY , ts ) , latency ( lat ) , readVersion ( version ) { <nl> + EventGetVersion_V3 ( double ts , const Optional < Standalone < StringRef > > & dcId , double lat , TransactionPriority priority , Version version ) : Event ( GET_VERSION_LATENCY , ts , dcId ) , latency ( lat ) , readVersion ( version ) { <nl> switch ( priority ) { <nl> / / Unfortunately , the enum serialized here disagrees with the enum used elsewhere for the values used by each priority <nl> case TransactionPriority : : IMMEDIATE : <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct EventGet : public Event { <nl> - EventGet ( double ts , double lat , int size , const KeyRef & in_key ) : Event ( GET_LATENCY , ts ) , latency ( lat ) , valueSize ( size ) , key ( in_key ) { } <nl> + EventGet ( double ts , const Optional < Standalone < StringRef > > & dcId , double lat , int size , const KeyRef & in_key ) : Event ( GET_LATENCY , ts , dcId ) , latency ( lat ) , valueSize ( size ) , key ( in_key ) { } <nl> EventGet ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct EventGetRange : public Event { <nl> - EventGetRange ( double ts , double lat , int size , const KeyRef & start_key , const KeyRef & end_key ) : Event ( GET_RANGE_LATENCY , ts ) , latency ( lat ) , rangeSize ( size ) , startKey ( start_key ) , endKey ( end_key ) { } <nl> + EventGetRange ( double ts , const Optional < Standalone < StringRef > > & dcId , double lat , int size , const KeyRef & start_key , const KeyRef & end_key ) : Event ( GET_RANGE_LATENCY , ts , dcId ) , latency ( lat ) , rangeSize ( size ) , startKey ( start_key ) , endKey ( end_key ) { } <nl> EventGetRange ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct EventCommit : public Event { <nl> - EventCommit ( double ts , double lat , int mut , int bytes , const CommitTransactionRequest & commit_req ) : Event ( COMMIT_LATENCY , ts ) , latency ( lat ) , numMutations ( mut ) , commitBytes ( bytes ) , req ( commit_req ) { } <nl> EventCommit ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> <nl> / / Version V2 of EventGetVersion starting at 6 . 3 <nl> struct EventCommit_V2 : public Event { <nl> - EventCommit_V2 ( double ts , double lat , int mut , int bytes , Version version , const CommitTransactionRequest & commit_req ) <nl> - : Event ( COMMIT_LATENCY , ts ) , latency ( lat ) , numMutations ( mut ) , commitBytes ( bytes ) , commitVersion ( version ) , req ( commit_req ) { } <nl> + EventCommit_V2 ( double ts , const Optional < Standalone < StringRef > > & dcId , double lat , int mut , int bytes , Version version , const CommitTransactionRequest & commit_req ) <nl> + : Event ( COMMIT_LATENCY , ts , dcId ) , latency ( lat ) , numMutations ( mut ) , commitBytes ( bytes ) , commitVersion ( version ) , req ( commit_req ) { } <nl> EventCommit_V2 ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct EventGetError : public Event { <nl> - EventGetError ( double ts , int err_code , const KeyRef & in_key ) : Event ( ERROR_GET , ts ) , errCode ( err_code ) , key ( in_key ) { } <nl> + EventGetError ( double ts , const Optional < Standalone < StringRef > > & dcId , int err_code , const KeyRef & in_key ) : Event ( ERROR_GET , ts , dcId ) , errCode ( err_code ) , key ( in_key ) { } <nl> EventGetError ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct EventGetRangeError : public Event { <nl> - EventGetRangeError ( double ts , int err_code , const KeyRef & start_key , const KeyRef & end_key ) : Event ( ERROR_GET_RANGE , ts ) , errCode ( err_code ) , startKey ( start_key ) , endKey ( end_key ) { } <nl> + EventGetRangeError ( double ts , const Optional < Standalone < StringRef > > & dcId , int err_code , const KeyRef & start_key , const KeyRef & end_key ) : Event ( ERROR_GET_RANGE , ts , dcId ) , errCode ( err_code ) , startKey ( start_key ) , endKey ( end_key ) { } <nl> EventGetRangeError ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> namespace FdbClientLogEvents { <nl> } ; <nl> <nl> struct EventCommitError : public Event { <nl> - EventCommitError ( double ts , int err_code , const CommitTransactionRequest & commit_req ) : Event ( ERROR_COMMIT , ts ) , errCode ( err_code ) , req ( commit_req ) { } <nl> + EventCommitError ( double ts , const Optional < Standalone < StringRef > > & dcId , int err_code , const CommitTransactionRequest & commit_req ) : Event ( ERROR_COMMIT , ts , dcId ) , errCode ( err_code ) , req ( commit_req ) { } <nl> EventCommitError ( ) { } <nl> <nl> template < typename Ar > Ar & serialize ( Ar & ar ) { <nl> mmm a / fdbclient / FDBTypes . h <nl> ppp b / fdbclient / FDBTypes . h <nl> struct HealthMetrics { <nl> } <nl> } ; <nl> <nl> + struct DDMetricsRef { <nl> + int64_t shardBytes ; <nl> + KeyRef beginKey ; <nl> + <nl> + DDMetricsRef ( ) : shardBytes ( 0 ) { } <nl> + DDMetricsRef ( int64_t bytes , KeyRef begin ) : shardBytes ( bytes ) , beginKey ( begin ) { } <nl> + DDMetricsRef ( Arena & a , const DDMetricsRef & copyFrom ) <nl> + : shardBytes ( copyFrom . shardBytes ) , beginKey ( a , copyFrom . beginKey ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , shardBytes , beginKey ) ; <nl> + } <nl> + } ; <nl> + <nl> struct WorkerBackupStatus { <nl> LogEpoch epoch ; <nl> Version version ; <nl> deleted file mode 100644 <nl> index 66bdccf43d . . 0000000000 <nl> mmm a / fdbclient / IncludeVersions . h <nl> ppp / dev / null <nl> <nl> - / * <nl> - * IncludeVersions . h <nl> - * <nl> - * This source file is part of the FoundationDB open source project <nl> - * <nl> - * Copyright 2013 - 2020 Apple Inc . and the FoundationDB project authors <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - / / This is a simple header to isolate the stupidity that results out of two <nl> - / / build systems and versions . h include directives <nl> - <nl> - # if defined ( CMAKE_BUILD ) <nl> - # include " fdbclient / versions . h " <nl> - # elif ! defined ( WIN32 ) <nl> - # include " versions . h " <nl> - # endif <nl> mmm a / fdbclient / Knobs . cpp <nl> ppp b / fdbclient / Knobs . cpp <nl> void ClientKnobs : : initialize ( bool randomize ) { <nl> init ( STORAGE_METRICS_TOO_MANY_SHARDS_DELAY , 15 . 0 ) ; <nl> init ( AGGREGATE_HEALTH_METRICS_MAX_STALENESS , 0 . 5 ) ; <nl> init ( DETAILED_HEALTH_METRICS_MAX_STALENESS , 5 . 0 ) ; <nl> + init ( TAG_ENCODE_KEY_SERVERS , false ) ; if ( randomize & & BUGGIFY ) TAG_ENCODE_KEY_SERVERS = true ; <nl> <nl> / / KeyRangeMap <nl> init ( KRM_GET_RANGE_LIMIT , 1e5 ) ; if ( randomize & & BUGGIFY ) KRM_GET_RANGE_LIMIT = 10 ; <nl> mmm a / fdbclient / Knobs . h <nl> ppp b / fdbclient / Knobs . h <nl> class ClientKnobs : public Knobs { <nl> double STORAGE_METRICS_TOO_MANY_SHARDS_DELAY ; <nl> double AGGREGATE_HEALTH_METRICS_MAX_STALENESS ; <nl> double DETAILED_HEALTH_METRICS_MAX_STALENESS ; <nl> + bool TAG_ENCODE_KEY_SERVERS ; <nl> <nl> / / KeyRangeMap <nl> int KRM_GET_RANGE_LIMIT ; <nl> mmm a / fdbclient / MasterProxyInterface . h <nl> ppp b / fdbclient / MasterProxyInterface . h <nl> struct MasterProxyInterface { <nl> <nl> Optional < Key > processId ; <nl> bool provisional ; <nl> - Endpoint base ; <nl> RequestStream < struct CommitTransactionRequest > commit ; <nl> RequestStream < struct GetReadVersionRequest > getConsistentReadVersion ; / / Returns a version which ( 1 ) is committed , and ( 2 ) is > = the latest version reported committed ( by a commit response ) when this request was sent <nl> / / ( at some point between when this request is sent and when its response is received , the latest version reported committed ) <nl> struct MasterProxyInterface { <nl> RequestStream < struct GetHealthMetricsRequest > getHealthMetrics ; <nl> RequestStream < struct ProxySnapRequest > proxySnapReq ; <nl> RequestStream < struct ExclusionSafetyCheckRequest > exclusionSafetyCheckReq ; <nl> + RequestStream < struct GetDDMetricsRequest > getDDMetrics ; <nl> <nl> UID id ( ) const { return commit . getEndpoint ( ) . token ; } <nl> std : : string toString ( ) const { return id ( ) . shortString ( ) ; } <nl> struct MasterProxyInterface { <nl> <nl> template < class Archive > <nl> void serialize ( Archive & ar ) { <nl> - serializer ( ar , processId , provisional , base ) ; <nl> + serializer ( ar , processId , provisional , commit ) ; <nl> if ( Archive : : isDeserializing ) { <nl> - commit = RequestStream < struct CommitTransactionRequest > ( base . getAdjustedEndpoint ( 0 ) ) ; <nl> - getConsistentReadVersion = RequestStream < struct GetReadVersionRequest > ( base . getAdjustedEndpoint ( 1 ) ) ; <nl> - getKeyServersLocations = RequestStream < struct GetKeyServerLocationsRequest > ( base . getAdjustedEndpoint ( 2 ) ) ; <nl> - getStorageServerRejoinInfo = RequestStream < struct GetStorageServerRejoinInfoRequest > ( base . getAdjustedEndpoint ( 3 ) ) ; <nl> - waitFailure = RequestStream < ReplyPromise < Void > > ( base . getAdjustedEndpoint ( 4 ) ) ; <nl> - getRawCommittedVersion = RequestStream < struct GetRawCommittedVersionRequest > ( base . getAdjustedEndpoint ( 5 ) ) ; <nl> - txnState = RequestStream < struct TxnStateRequest > ( base . getAdjustedEndpoint ( 6 ) ) ; <nl> - getHealthMetrics = RequestStream < struct GetHealthMetricsRequest > ( base . getAdjustedEndpoint ( 7 ) ) ; <nl> - proxySnapReq = RequestStream < struct ProxySnapRequest > ( base . getAdjustedEndpoint ( 8 ) ) ; <nl> - exclusionSafetyCheckReq = RequestStream < struct ExclusionSafetyCheckRequest > ( base . getAdjustedEndpoint ( 9 ) ) ; <nl> + getConsistentReadVersion = RequestStream < struct GetReadVersionRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 1 ) ) ; <nl> + getKeyServersLocations = RequestStream < struct GetKeyServerLocationsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 2 ) ) ; <nl> + getStorageServerRejoinInfo = RequestStream < struct GetStorageServerRejoinInfoRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 3 ) ) ; <nl> + waitFailure = RequestStream < ReplyPromise < Void > > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 4 ) ) ; <nl> + getRawCommittedVersion = RequestStream < struct GetRawCommittedVersionRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 5 ) ) ; <nl> + txnState = RequestStream < struct TxnStateRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 6 ) ) ; <nl> + getHealthMetrics = RequestStream < struct GetHealthMetricsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 7 ) ) ; <nl> + proxySnapReq = RequestStream < struct ProxySnapRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 8 ) ) ; <nl> + exclusionSafetyCheckReq = RequestStream < struct ExclusionSafetyCheckRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 9 ) ) ; <nl> + getDDMetrics = RequestStream < struct GetDDMetricsRequest > ( commit . getEndpoint ( ) . getAdjustedEndpoint ( 10 ) ) ; <nl> } <nl> } <nl> <nl> struct MasterProxyInterface { <nl> streams . push_back ( getHealthMetrics . getReceiver ( ) ) ; <nl> streams . push_back ( proxySnapReq . getReceiver ( ) ) ; <nl> streams . push_back ( exclusionSafetyCheckReq . getReceiver ( ) ) ; <nl> - base = FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> + streams . push_back ( getDDMetrics . getReceiver ( ) ) ; <nl> + FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> } <nl> } ; <nl> <nl> struct GetHealthMetricsRequest <nl> } <nl> } ; <nl> <nl> + struct GetDDMetricsReply <nl> + { <nl> + constexpr static FileIdentifier file_identifier = 7277713 ; <nl> + Standalone < VectorRef < DDMetricsRef > > storageMetricsList ; <nl> + <nl> + GetDDMetricsReply ( ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , storageMetricsList ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct GetDDMetricsRequest { <nl> + constexpr static FileIdentifier file_identifier = 14536812 ; <nl> + KeyRange keys ; <nl> + int shardLimit ; <nl> + ReplyPromise < struct GetDDMetricsReply > reply ; <nl> + <nl> + GetDDMetricsRequest ( ) { } <nl> + explicit GetDDMetricsRequest ( KeyRange const & keys , const int shardLimit ) : keys ( keys ) , shardLimit ( shardLimit ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , keys , shardLimit , reply ) ; <nl> + } <nl> + } ; <nl> + <nl> struct ProxySnapRequest <nl> { <nl> constexpr static FileIdentifier file_identifier = 22204900 ; <nl> mmm a / fdbclient / NativeAPI . actor . cpp <nl> ppp b / fdbclient / NativeAPI . actor . cpp <nl> <nl> # include " flow / TLSConfig . actor . h " <nl> # include " flow / UnitTest . h " <nl> <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> # ifdef WIN32 <nl> # define WIN32_LEAN_AND_MEAN <nl> DatabaseContext : : DatabaseContext ( Reference < AsyncVar < Reference < ClusterConnectionF <nl> registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , std : : make_unique < ConflictingKeysImpl > ( conflictingKeysRange ) ) ; <nl> registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , std : : make_unique < ReadConflictRangeImpl > ( readConflictRangeKeysRange ) ) ; <nl> registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : TRANSACTION , std : : make_unique < WriteConflictRangeImpl > ( writeConflictRangeKeysRange ) ) ; <nl> + registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : METRICS , <nl> + std : : make_unique < DDStatsRangeImpl > ( ddStatsRange ) ) ; <nl> registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : WORKERINTERFACE , std : : make_unique < WorkerInterfacesSpecialKeyImpl > ( KeyRangeRef ( <nl> LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) ) ) ; <nl> registerSpecialKeySpaceModule ( SpecialKeySpace : : MODULE : : STATUSJSON , std : : make_unique < SingleSpecialKeyImpl > ( <nl> Reference < LocationInfo > DatabaseContext : : setCachedLocation ( const KeyRangeRef & k <nl> locationCache . insert ( KeyRangeRef ( begin , end ) , Reference < LocationInfo > ( ) ) ; <nl> } <nl> locationCache . insert ( keys , loc ) ; <nl> - return std : : move ( loc ) ; <nl> + return loc ; <nl> } <nl> <nl> void DatabaseContext : : invalidateCache ( const KeyRef & key , bool isBackward ) { <nl> ACTOR Future < Optional < Value > > getValue ( Future < Version > version , Key key , Databa <nl> cx - > readLatencies . addSample ( latency ) ; <nl> if ( trLogInfo ) { <nl> int valueSize = reply . value . present ( ) ? reply . value . get ( ) . size ( ) : 0 ; <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventGet ( startTimeD , latency , valueSize , key ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventGet ( startTimeD , cx - > clientLocality . dcId ( ) , latency , valueSize , key ) ) ; <nl> } <nl> cx - > getValueCompleted - > latency = timer_int ( ) - startTime ; <nl> cx - > getValueCompleted - > log ( ) ; <nl> ACTOR Future < Optional < Value > > getValue ( Future < Version > version , Key key , Databa <nl> wait ( delay ( CLIENT_KNOBS - > WRONG_SHARD_SERVER_DELAY , info . taskID ) ) ; <nl> } else { <nl> if ( trLogInfo ) <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventGetError ( startTimeD , static_cast < int > ( e . code ( ) ) , key ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventGetError ( startTimeD , cx - > clientLocality . dcId ( ) , static_cast < int > ( e . code ( ) ) , key ) ) ; <nl> throw e ; <nl> } <nl> } <nl> void getRangeFinished ( Database cx , Reference < TransactionLogInfo > trLogInfo , doub <nl> cx - > transactionKeysRead + = result . size ( ) ; <nl> <nl> if ( trLogInfo ) { <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventGetRange ( startTime , now ( ) - startTime , bytes , begin . getKey ( ) , end . getKey ( ) ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventGetRange ( startTime , cx - > clientLocality . dcId ( ) , now ( ) - startTime , bytes , begin . getKey ( ) , end . getKey ( ) ) ) ; <nl> } <nl> <nl> if ( ! snapshot ) { <nl> ACTOR Future < Standalone < RangeResultRef > > getRange ( Database cx , Reference < Transa <nl> wait ( delay ( CLIENT_KNOBS - > WRONG_SHARD_SERVER_DELAY , info . taskID ) ) ; <nl> } else { <nl> if ( trLogInfo ) <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventGetRangeError ( startTime , static_cast < int > ( e . code ( ) ) , begin . getKey ( ) , end . getKey ( ) ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventGetRangeError ( startTime , cx - > clientLocality . dcId ( ) , static_cast < int > ( e . code ( ) ) , begin . getKey ( ) , end . getKey ( ) ) ) ; <nl> <nl> throw e ; <nl> } <nl> ACTOR Future < Key > getKeyAndConflictRange ( <nl> conflictRange . send ( std : : make_pair ( rep , k . orEqual ? keyAfter ( k . getKey ( ) ) : Key ( k . getKey ( ) , k . arena ( ) ) ) ) ; <nl> else <nl> conflictRange . send ( std : : make_pair ( k . orEqual ? keyAfter ( k . getKey ( ) ) : Key ( k . getKey ( ) , k . arena ( ) ) , keyAfter ( rep ) ) ) ; <nl> - return std : : move ( rep ) ; <nl> + return rep ; <nl> } catch ( Error & e ) { <nl> conflictRange . send ( std : : make_pair ( Key ( ) , Key ( ) ) ) ; <nl> throw ; <nl> ACTOR static Future < Void > tryCommit ( Database cx , Reference < TransactionLogInfo > <nl> cx - > commitLatencies . addSample ( latency ) ; <nl> cx - > latencies . addSample ( now ( ) - tr - > startTime ) ; <nl> if ( trLogInfo ) <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventCommit_V2 ( startTime , latency , req . transaction . mutations . size ( ) , req . transaction . mutations . expectedSize ( ) , ci . version , req ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventCommit_V2 ( startTime , cx - > clientLocality . dcId ( ) , latency , req . transaction . mutations . size ( ) , req . transaction . mutations . expectedSize ( ) , ci . version , req ) ) ; <nl> return Void ( ) ; <nl> } else { <nl> / / clear the RYW transaction which contains previous conflicting keys <nl> ACTOR static Future < Void > tryCommit ( Database cx , Reference < TransactionLogInfo > <nl> TraceEvent ( SevError , " TryCommitError " ) . error ( e ) ; <nl> } <nl> if ( trLogInfo ) <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventCommitError ( startTime , static_cast < int > ( e . code ( ) ) , req ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventCommitError ( startTime , cx - > clientLocality . dcId ( ) , static_cast < int > ( e . code ( ) ) , req ) ) ; <nl> throw ; <nl> } <nl> } <nl> ACTOR Future < Version > extractReadVersion ( DatabaseContext * cx , TransactionPriorit <nl> double latency = now ( ) - startTime ; <nl> cx - > GRVLatencies . addSample ( latency ) ; <nl> if ( trLogInfo ) <nl> - trLogInfo - > addLog ( FdbClientLogEvents : : EventGetVersion_V3 ( startTime , latency , priority , rep . version ) ) ; <nl> + trLogInfo - > addLog ( FdbClientLogEvents : : EventGetVersion_V3 ( startTime , cx - > clientLocality . dcId ( ) , latency , priority , rep . version ) ) ; <nl> if ( rep . version = = 1 & & rep . locked ) { <nl> throw proxy_memory_limit_exceeded ( ) ; <nl> } <nl> Future < StorageMetrics > Transaction : : getStorageMetrics ( KeyRange const & keys , i <nl> } <nl> } <nl> <nl> + ACTOR Future < Standalone < VectorRef < DDMetricsRef > > > waitDataDistributionMetricsList ( Database cx , KeyRange keys , <nl> + int shardLimit ) { <nl> + state Future < Void > clientTimeout = delay ( 5 . 0 ) ; <nl> + loop { <nl> + choose { <nl> + when ( wait ( cx - > onMasterProxiesChanged ( ) ) ) { } <nl> + when ( ErrorOr < GetDDMetricsReply > rep = <nl> + wait ( errorOr ( basicLoadBalance ( cx - > getMasterProxies ( false ) , & MasterProxyInterface : : getDDMetrics , <nl> + GetDDMetricsRequest ( keys , shardLimit ) ) ) ) ) { <nl> + if ( rep . isError ( ) ) { <nl> + throw rep . getError ( ) ; <nl> + } <nl> + return rep . get ( ) . storageMetricsList ; <nl> + } <nl> + when ( wait ( clientTimeout ) ) { throw timed_out ( ) ; } <nl> + } <nl> + } <nl> + } <nl> + <nl> Future < Standalone < VectorRef < KeyRangeRef > > > Transaction : : getReadHotRanges ( KeyRange const & keys ) { <nl> return : : getReadHotRanges ( cx , keys ) ; <nl> } <nl> mmm a / fdbclient / NativeAPI . actor . h <nl> ppp b / fdbclient / NativeAPI . actor . h <nl> class Transaction : NonCopyable { <nl> } ; <nl> <nl> ACTOR Future < Version > waitForCommittedVersion ( Database cx , Version version ) ; <nl> + ACTOR Future < Standalone < VectorRef < DDMetricsRef > > > waitDataDistributionMetricsList ( Database cx , KeyRange keys , <nl> + int shardLimit ) ; <nl> <nl> std : : string unprintable ( const std : : string & ) ; <nl> <nl> mmm a / fdbclient / Schemas . cpp <nl> ppp b / fdbclient / Schemas . cpp <nl> const KeyRef JSONSchemas : : statusSchema = LiteralStringRef ( R " statusSchema ( <nl> } , <nl> " limiting_queue_bytes_storage_server " : 0 , <nl> " worst_queue_bytes_storage_server " : 0 , <nl> - " limiting_version_lag_storage_server " : 0 , <nl> - " worst_version_lag_storage_server " : 0 , <nl> " limiting_data_lag_storage_server " : { <nl> " versions " : 0 , <nl> " seconds " : 0 . 0 <nl> mmm a / fdbclient / SpecialKeySpace . actor . cpp <nl> ppp b / fdbclient / SpecialKeySpace . actor . cpp <nl> std : : unordered_map < SpecialKeySpace : : MODULE , KeyRange > SpecialKeySpace : : moduleToB <nl> KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / worker_interfaces / " ) , LiteralStringRef ( " \ xff \ xff / worker_interfaces0 " ) ) } , <nl> { SpecialKeySpace : : MODULE : : STATUSJSON , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / status / json " ) ) } , <nl> { SpecialKeySpace : : MODULE : : CONNECTIONSTRING , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / connection_string " ) ) } , <nl> - { SpecialKeySpace : : MODULE : : CLUSTERFILEPATH , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / cluster_file_path " ) ) } <nl> + { SpecialKeySpace : : MODULE : : CLUSTERFILEPATH , singleKeyRange ( LiteralStringRef ( " \ xff \ xff / cluster_file_path " ) ) } , <nl> + { SpecialKeySpace : : MODULE : : METRICS , <nl> + KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / metrics / " ) , LiteralStringRef ( " \ xff \ xff / metrics0 " ) ) } <nl> } ; <nl> <nl> / / This function will move the given KeySelector as far as possible to the standard form : <nl> SpecialKeySpace : : getRangeAggregationActor ( SpecialKeySpace * sks , Reference < ReadYo <nl> state Optional < SpecialKeySpace : : MODULE > lastModuleRead ; <nl> <nl> wait ( normalizeKeySelectorActor ( sks , ryw , & begin , & lastModuleRead , & actualBeginOffset , & result ) ) ; <nl> - / / TODO : check if end the boundary of a module <nl> wait ( normalizeKeySelectorActor ( sks , ryw , & end , & lastModuleRead , & actualEndOffset , & result ) ) ; <nl> / / Handle all corner cases like what RYW does <nl> / / return if range inverted <nl> Future < Standalone < RangeResultRef > > ConflictingKeysImpl : : getRange ( Reference < ReadY <nl> return result ; <nl> } <nl> <nl> + ACTOR Future < Standalone < RangeResultRef > > ddStatsGetRangeActor ( Reference < ReadYourWritesTransaction > ryw , <nl> + KeyRangeRef kr ) { <nl> + try { <nl> + auto keys = kr . removePrefix ( ddStatsRange . begin ) ; <nl> + Standalone < VectorRef < DDMetricsRef > > resultWithoutPrefix = <nl> + wait ( waitDataDistributionMetricsList ( ryw - > getDatabase ( ) , keys , CLIENT_KNOBS - > STORAGE_METRICS_SHARD_LIMIT ) ) ; <nl> + Standalone < RangeResultRef > result ; <nl> + for ( const auto & ddMetricsRef : resultWithoutPrefix ) { <nl> + / / each begin key is the previous end key , thus we only encode the begin key in the result <nl> + KeyRef beginKey = ddMetricsRef . beginKey . withPrefix ( ddStatsRange . begin , result . arena ( ) ) ; <nl> + / / Use json string encoded in utf - 8 to encode the values , easy for adding more fields in the future <nl> + json_spirit : : mObject statsObj ; <nl> + statsObj [ " ShardBytes " ] = ddMetricsRef . shardBytes ; <nl> + std : : string statsString = <nl> + json_spirit : : write_string ( json_spirit : : mValue ( statsObj ) , json_spirit : : Output_options : : raw_utf8 ) ; <nl> + ValueRef bytes ( result . arena ( ) , statsString ) ; <nl> + result . push_back ( result . arena ( ) , KeyValueRef ( beginKey , bytes ) ) ; <nl> + } <nl> + return result ; <nl> + } catch ( Error & e ) { <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + DDStatsRangeImpl : : DDStatsRangeImpl ( KeyRangeRef kr ) : SpecialKeyRangeBaseImpl ( kr ) { } <nl> + <nl> + Future < Standalone < RangeResultRef > > DDStatsRangeImpl : : getRange ( Reference < ReadYourWritesTransaction > ryw , <nl> + KeyRangeRef kr ) const { <nl> + return ddStatsGetRangeActor ( ryw , kr ) ; <nl> + } <nl> + <nl> class SpecialKeyRangeTestImpl : public SpecialKeyRangeBaseImpl { <nl> public : <nl> explicit SpecialKeyRangeTestImpl ( KeyRangeRef kr , const std : : string & prefix , int size ) <nl> mmm a / fdbclient / SpecialKeySpace . actor . h <nl> ppp b / fdbclient / SpecialKeySpace . actor . h <nl> class SpecialKeyRangeBaseImpl { <nl> class SpecialKeySpace { <nl> public : <nl> enum class MODULE { <nl> - UNKNOWN , / / default value for all unregistered range <nl> + CLUSTERFILEPATH , <nl> + CONNECTIONSTRING , <nl> + METRICS , / / data - distribution metrics <nl> TESTONLY , / / only used by correctness tests <nl> - TRANSACTION , <nl> - WORKERINTERFACE , <nl> + TRANSACTION , / / transaction related info , conflicting keys , read / write conflict range <nl> STATUSJSON , <nl> - CLUSTERFILEPATH , <nl> - CONNECTIONSTRING <nl> + UNKNOWN , / / default value for all unregistered range <nl> + WORKERINTERFACE , <nl> } ; <nl> <nl> Future < Optional < Value > > get ( Reference < ReadYourWritesTransaction > ryw , const Key & key ) ; <nl> class WriteConflictRangeImpl : public SpecialKeyRangeBaseImpl { <nl> KeyRangeRef kr ) const override ; <nl> } ; <nl> <nl> + class DDStatsRangeImpl : public SpecialKeyRangeBaseImpl { <nl> + public : <nl> + explicit DDStatsRangeImpl ( KeyRangeRef kr ) ; <nl> + Future < Standalone < RangeResultRef > > getRange ( Reference < ReadYourWritesTransaction > ryw , <nl> + KeyRangeRef kr ) const override ; <nl> + } ; <nl> + <nl> # include " flow / unactorcompiler . h " <nl> # endif <nl> mmm a / fdbclient / StorageServerInterface . h <nl> ppp b / fdbclient / StorageServerInterface . h <nl> struct StorageServerInterface { <nl> <nl> LocalityData locality ; <nl> UID uniqueID ; <nl> - Endpoint base ; <nl> <nl> RequestStream < struct GetValueRequest > getValue ; <nl> RequestStream < struct GetKeyRequest > getKey ; <nl> struct StorageServerInterface { <nl> / / versioned carefully ! <nl> <nl> if ( ar . protocolVersion ( ) . hasSmallEndpoints ( ) ) { <nl> - serializer ( ar , uniqueID , locality , base ) ; <nl> + serializer ( ar , uniqueID , locality , getValue ) ; <nl> if ( Ar : : isDeserializing ) { <nl> - getValue = RequestStream < struct GetValueRequest > ( base . getAdjustedEndpoint ( 0 ) ) ; <nl> - getKey = RequestStream < struct GetKeyRequest > ( base . getAdjustedEndpoint ( 1 ) ) ; <nl> - getKeyValues = RequestStream < struct GetKeyValuesRequest > ( base . getAdjustedEndpoint ( 2 ) ) ; <nl> - getShardState = RequestStream < struct GetShardStateRequest > ( base . getAdjustedEndpoint ( 3 ) ) ; <nl> - waitMetrics = RequestStream < struct WaitMetricsRequest > ( base . getAdjustedEndpoint ( 4 ) ) ; <nl> - splitMetrics = RequestStream < struct SplitMetricsRequest > ( base . getAdjustedEndpoint ( 5 ) ) ; <nl> - getStorageMetrics = RequestStream < struct GetStorageMetricsRequest > ( base . getAdjustedEndpoint ( 6 ) ) ; <nl> - waitFailure = RequestStream < ReplyPromise < Void > > ( base . getAdjustedEndpoint ( 7 ) ) ; <nl> - getQueuingMetrics = RequestStream < struct StorageQueuingMetricsRequest > ( base . getAdjustedEndpoint ( 8 ) ) ; <nl> - getKeyValueStoreType = RequestStream < ReplyPromise < KeyValueStoreType > > ( base . getAdjustedEndpoint ( 9 ) ) ; <nl> - watchValue = RequestStream < struct WatchValueRequest > ( base . getAdjustedEndpoint ( 10 ) ) ; <nl> - getReadHotRanges = RequestStream < struct ReadHotSubRangeRequest > ( base . getAdjustedEndpoint ( 11 ) ) ; <nl> + getKey = RequestStream < struct GetKeyRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 1 ) ) ; <nl> + getKeyValues = RequestStream < struct GetKeyValuesRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 2 ) ) ; <nl> + getShardState = RequestStream < struct GetShardStateRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 3 ) ) ; <nl> + waitMetrics = RequestStream < struct WaitMetricsRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 4 ) ) ; <nl> + splitMetrics = RequestStream < struct SplitMetricsRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 5 ) ) ; <nl> + getStorageMetrics = RequestStream < struct GetStorageMetricsRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 6 ) ) ; <nl> + waitFailure = RequestStream < ReplyPromise < Void > > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 7 ) ) ; <nl> + getQueuingMetrics = RequestStream < struct StorageQueuingMetricsRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 8 ) ) ; <nl> + getKeyValueStoreType = RequestStream < ReplyPromise < KeyValueStoreType > > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 9 ) ) ; <nl> + watchValue = RequestStream < struct WatchValueRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 10 ) ) ; <nl> + getReadHotRanges = RequestStream < struct ReadHotSubRangeRequest > ( getValue . getEndpoint ( ) . getAdjustedEndpoint ( 11 ) ) ; <nl> } <nl> } else { <nl> ASSERT ( Ar : : isDeserializing ) ; <nl> struct StorageServerInterface { <nl> serializer ( ar , uniqueID , locality , getValue , getKey , getKeyValues , getShardState , waitMetrics , <nl> splitMetrics , getStorageMetrics , waitFailure , getQueuingMetrics , getKeyValueStoreType ) ; <nl> if ( ar . protocolVersion ( ) . hasWatches ( ) ) serializer ( ar , watchValue ) ; <nl> - base = getValue . getEndpoint ( ) ; <nl> } <nl> } <nl> bool operator = = ( StorageServerInterface const & s ) const { return uniqueID = = s . uniqueID ; } <nl> struct StorageServerInterface { <nl> streams . push_back ( getKeyValueStoreType . getReceiver ( ) ) ; <nl> streams . push_back ( watchValue . getReceiver ( ) ) ; <nl> streams . push_back ( getReadHotRanges . getReceiver ( ) ) ; <nl> - base = FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> + FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> } <nl> } ; <nl> <nl> struct GetShardStateRequest { <nl> struct StorageMetrics { <nl> constexpr static FileIdentifier file_identifier = 13622226 ; <nl> int64_t bytes = 0 ; / / total storage <nl> + / / FIXME : currently , neither of bytesPerKSecond or iosPerKSecond are actually used in DataDistribution calculations . <nl> + / / This may change in the future , but this comment is left here to avoid any confusion for the time being . <nl> int64_t bytesPerKSecond = 0 ; / / network bandwidth ( average over 10s ) <nl> int64_t iosPerKSecond = 0 ; <nl> int64_t bytesReadPerKSecond = 0 ; <nl> mmm a / fdbclient / SystemData . cpp <nl> ppp b / fdbclient / SystemData . cpp <nl> const KeyRef keyServersKey ( const KeyRef & k , Arena & arena ) { <nl> return k . withPrefix ( keyServersPrefix , arena ) ; <nl> } <nl> const Value keyServersValue ( Standalone < RangeResultRef > result , const std : : vector < UID > & src , const std : : vector < UID > & dest ) { <nl> + if ( ! CLIENT_KNOBS - > TAG_ENCODE_KEY_SERVERS ) { <nl> + BinaryWriter wr ( IncludeVersion ( ) ) ; wr < < src < < dest ; <nl> + return wr . toValue ( ) ; <nl> + } <nl> + <nl> std : : vector < Tag > srcTag ; <nl> std : : vector < Tag > destTag ; <nl> <nl> const KeyRangeRef writeConflictRangeKeysRange = <nl> KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / transaction / write_conflict_range / " ) , <nl> LiteralStringRef ( " \ xff \ xff / transaction / write_conflict_range / \ xff \ xff " ) ) ; <nl> <nl> + const KeyRangeRef ddStatsRange = KeyRangeRef ( LiteralStringRef ( " \ xff \ xff / metrics / data_distribution_stats / " ) , <nl> + LiteralStringRef ( " \ xff \ xff / metrics / data_distribution_stats / \ xff \ xff " ) ) ; <nl> + <nl> / / " \ xff / storageCache / [ [ begin ] ] " : = " [ [ vector < uint16_t > ] ] " <nl> const KeyRangeRef storageCacheKeys ( LiteralStringRef ( " \ xff / storageCache / " ) , LiteralStringRef ( " \ xff / storageCache0 " ) ) ; <nl> const KeyRef storageCachePrefix = storageCacheKeys . begin ; <nl> mmm a / fdbclient / SystemData . h <nl> ppp b / fdbclient / SystemData . h <nl> extern const KeyRangeRef conflictingKeysRange ; <nl> extern const ValueRef conflictingKeysTrue , conflictingKeysFalse ; <nl> extern const KeyRangeRef writeConflictRangeKeysRange ; <nl> extern const KeyRangeRef readConflictRangeKeysRange ; <nl> + extern const KeyRangeRef ddStatsRange ; <nl> <nl> extern const KeyRef cacheKeysPrefix ; <nl> <nl> mmm a / fdbclient / TagThrottle . actor . cpp <nl> ppp b / fdbclient / TagThrottle . actor . cpp <nl> Key TagThrottleKey : : toKey ( ) const { <nl> memcpy ( str , tagThrottleKeysPrefix . begin ( ) , tagThrottleKeysPrefix . size ( ) ) ; <nl> str + = tagThrottleKeysPrefix . size ( ) ; <nl> <nl> - * ( str + + ) = autoThrottled ? 1 : 0 ; <nl> + * ( str + + ) = ( uint8_t ) throttleType ; <nl> * ( str + + ) = ( uint8_t ) priority ; <nl> <nl> for ( auto tag : tags ) { <nl> Key TagThrottleKey : : toKey ( ) const { <nl> <nl> TagThrottleKey TagThrottleKey : : fromKey ( const KeyRef & key ) { <nl> const uint8_t * str = key . substr ( tagThrottleKeysPrefix . size ( ) ) . begin ( ) ; <nl> - bool autoThrottled = * ( str + + ) ! = 0 ; <nl> + TagThrottleType throttleType = TagThrottleType ( * ( str + + ) ) ; <nl> TransactionPriority priority = TransactionPriority ( * ( str + + ) ) ; <nl> TagSet tags ; <nl> <nl> TagThrottleKey TagThrottleKey : : fromKey ( const KeyRef & key ) { <nl> str + = size ; <nl> } <nl> <nl> - return TagThrottleKey ( tags , autoThrottled , priority ) ; <nl> + return TagThrottleKey ( tags , throttleType , priority ) ; <nl> } <nl> <nl> TagThrottleValue TagThrottleValue : : fromValue ( const ValueRef & value ) { <nl> namespace ThrottleApi { <nl> } <nl> } <nl> <nl> - ACTOR Future < Void > throttleTags ( Database db , TagSet tags , double tpsRate , double initialDuration , bool autoThrottled , TransactionPriority priority , Optional < double > expirationTime ) { <nl> + ACTOR Future < Void > throttleTags ( Database db , TagSet tags , double tpsRate , double initialDuration , TagThrottleType throttleType , TransactionPriority priority , Optional < double > expirationTime ) { <nl> state Transaction tr ( db ) ; <nl> - state Key key = TagThrottleKey ( tags , autoThrottled , priority ) . toKey ( ) ; <nl> + state Key key = TagThrottleKey ( tags , throttleType , priority ) . toKey ( ) ; <nl> <nl> ASSERT ( initialDuration > 0 ) ; <nl> <nl> namespace ThrottleApi { <nl> <nl> loop { <nl> try { <nl> - if ( ! autoThrottled ) { <nl> + if ( throttleType = = TagThrottleType : : MANUAL ) { <nl> Optional < Value > oldThrottle = wait ( tr . get ( key ) ) ; <nl> if ( ! oldThrottle . present ( ) ) { <nl> wait ( updateThrottleCount ( & tr , 1 ) ) ; <nl> namespace ThrottleApi { <nl> <nl> tr . set ( key , value ) ; <nl> <nl> - if ( ! autoThrottled ) { <nl> + if ( throttleType = = TagThrottleType : : MANUAL ) { <nl> signalThrottleChange ( tr ) ; <nl> } <nl> <nl> namespace ThrottleApi { <nl> } <nl> } <nl> <nl> - ACTOR Future < bool > unthrottleTags ( Database db , TagSet tags , bool autoThrottled , TransactionPriority priority ) { <nl> + ACTOR Future < bool > unthrottleTags ( Database db , TagSet tags , Optional < TagThrottleType > throttleType , Optional < TransactionPriority > priority ) { <nl> state Transaction tr ( db ) ; <nl> - state Key key = TagThrottleKey ( tags , autoThrottled , priority ) . toKey ( ) ; <nl> - state bool removed = false ; <nl> <nl> + state std : : vector < Key > keys ; <nl> + for ( auto p : allTransactionPriorities ) { <nl> + if ( ! priority . present ( ) | | priority . get ( ) = = p ) { <nl> + if ( ! throttleType . present ( ) | | throttleType . get ( ) = = TagThrottleType : : AUTO ) { <nl> + keys . push_back ( TagThrottleKey ( tags , TagThrottleType : : AUTO , p ) . toKey ( ) ) ; <nl> + } <nl> + if ( ! throttleType . present ( ) | | throttleType . get ( ) = = TagThrottleType : : MANUAL ) { <nl> + keys . push_back ( TagThrottleKey ( tags , TagThrottleType : : MANUAL , p ) . toKey ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + state bool removed = false ; <nl> <nl> loop { <nl> try { <nl> - state Optional < Value > value = wait ( tr . get ( key ) ) ; <nl> - if ( value . present ( ) ) { <nl> - if ( ! autoThrottled ) { <nl> - wait ( updateThrottleCount ( & tr , - 1 ) ) ; <nl> + state std : : vector < Future < Optional < Value > > > values ; <nl> + for ( auto key : keys ) { <nl> + values . push_back ( tr . get ( key ) ) ; <nl> + } <nl> + <nl> + wait ( waitForAll ( values ) ) ; <nl> + <nl> + int delta = 0 ; <nl> + for ( int i = 0 ; i < values . size ( ) ; + + i ) { <nl> + if ( values [ i ] . get ( ) . present ( ) ) { <nl> + if ( TagThrottleKey : : fromKey ( keys [ i ] ) . throttleType = = TagThrottleType : : MANUAL ) { <nl> + delta - = 1 ; <nl> + } <nl> + <nl> + tr . clear ( keys [ i ] ) ; <nl> + <nl> + / / Report that we are removing this tag if we ever see it present . <nl> + / / This protects us from getting confused if the transaction is maybe committed . <nl> + / / It ' s ok if someone else actually ends up removing this tag at the same time <nl> + / / and we aren ' t the ones to actually do it . <nl> + removed = true ; <nl> } <nl> + } <nl> <nl> - tr . clear ( key ) ; <nl> + if ( delta ! = 0 ) { <nl> + wait ( updateThrottleCount ( & tr , delta ) ) ; <nl> + } <nl> + if ( removed ) { <nl> signalThrottleChange ( tr ) ; <nl> - <nl> - / / Report that we are removing this tag if we ever see it present . <nl> - / / This protects us from getting confused if the transaction is maybe committed . <nl> - / / It ' s ok if someone else actually ends up removing this tag at the same time <nl> - / / and we aren ' t the ones to actually do it . <nl> - removed = true ; <nl> wait ( tr . commit ( ) ) ; <nl> } <nl> <nl> namespace ThrottleApi { <nl> } <nl> } <nl> <nl> - ACTOR Future < bool > unthrottleTags ( Database db , KeyRef beginKey , KeyRef endKey , bool onlyExpiredThrottles ) { <nl> + ACTOR Future < bool > unthrottleMatchingThrottles ( Database db , KeyRef beginKey , KeyRef endKey , Optional < TransactionPriority > priority , bool onlyExpiredThrottles ) { <nl> state Transaction tr ( db ) ; <nl> <nl> state KeySelector begin = firstGreaterOrEqual ( beginKey ) ; <nl> namespace ThrottleApi { <nl> } <nl> } <nl> <nl> - bool autoThrottled = TagThrottleKey : : fromKey ( tag . key ) . autoThrottled ; <nl> - if ( ! autoThrottled ) { <nl> + TagThrottleKey key = TagThrottleKey : : fromKey ( tag . key ) ; <nl> + if ( priority . present ( ) & & key . priority ! = priority . get ( ) ) { <nl> + continue ; <nl> + } <nl> + <nl> + if ( key . throttleType = = TagThrottleType : : MANUAL ) { <nl> + + manualUnthrottledTags ; <nl> } <nl> <nl> namespace ThrottleApi { <nl> } <nl> } <nl> <nl> - Future < bool > unthrottleManual ( Database db ) { <nl> - return unthrottleTags ( db , tagThrottleKeysPrefix , tagThrottleAutoKeysPrefix , false ) ; <nl> - } <nl> + Future < bool > unthrottleAll ( Database db , Optional < TagThrottleType > tagThrottleType , Optional < TransactionPriority > priority ) { <nl> + KeyRef begin = tagThrottleKeys . begin ; <nl> + KeyRef end = tagThrottleKeys . end ; <nl> <nl> - Future < bool > unthrottleAuto ( Database db ) { <nl> - return unthrottleTags ( db , tagThrottleAutoKeysPrefix , tagThrottleKeys . end , false ) ; <nl> - } <nl> + if ( tagThrottleType . present ( ) & & tagThrottleType = = TagThrottleType : : AUTO ) { <nl> + begin = tagThrottleAutoKeysPrefix ; <nl> + } <nl> + else if ( tagThrottleType . present ( ) & & tagThrottleType = = TagThrottleType : : MANUAL ) { <nl> + end = tagThrottleAutoKeysPrefix ; <nl> + } <nl> <nl> - Future < bool > unthrottleAll ( Database db ) { <nl> - return unthrottleTags ( db , tagThrottleKeys . begin , tagThrottleKeys . end , false ) ; <nl> + return unthrottleMatchingThrottles ( db , begin , end , priority , false ) ; <nl> } <nl> <nl> Future < bool > expire ( Database db ) { <nl> - return unthrottleTags ( db , tagThrottleKeys . begin , tagThrottleKeys . end , true ) ; <nl> + return unthrottleMatchingThrottles ( db , tagThrottleKeys . begin , tagThrottleKeys . end , Optional < TransactionPriority > ( ) , true ) ; <nl> } <nl> <nl> ACTOR Future < Void > enableAuto ( Database db , bool enabled ) { <nl> mmm a / fdbclient / TagThrottle . h <nl> ppp b / fdbclient / TagThrottle . h <nl> struct dynamic_size_traits < TagSet > : std : : true_type { <nl> } <nl> } ; <nl> <nl> + enum class TagThrottleType : uint8_t { <nl> + MANUAL , <nl> + AUTO <nl> + } ; <nl> + <nl> struct TagThrottleKey { <nl> TagSet tags ; <nl> - bool autoThrottled ; <nl> + TagThrottleType throttleType ; <nl> TransactionPriority priority ; <nl> <nl> - TagThrottleKey ( ) : autoThrottled ( false ) , priority ( TransactionPriority : : DEFAULT ) { } <nl> - TagThrottleKey ( TagSet tags , bool autoThrottled , TransactionPriority priority ) <nl> - : tags ( tags ) , autoThrottled ( autoThrottled ) , priority ( priority ) { } <nl> + TagThrottleKey ( ) : throttleType ( TagThrottleType : : MANUAL ) , priority ( TransactionPriority : : DEFAULT ) { } <nl> + TagThrottleKey ( TagSet tags , TagThrottleType throttleType , TransactionPriority priority ) <nl> + : tags ( tags ) , throttleType ( throttleType ) , priority ( priority ) { } <nl> <nl> Key toKey ( ) const ; <nl> static TagThrottleKey fromKey ( const KeyRef & key ) ; <nl> struct TagThrottleValue { <nl> <nl> struct TagThrottleInfo { <nl> TransactionTag tag ; <nl> - bool autoThrottled ; <nl> + TagThrottleType throttleType ; <nl> TransactionPriority priority ; <nl> double tpsRate ; <nl> double expirationTime ; <nl> double initialDuration ; <nl> <nl> - TagThrottleInfo ( TransactionTag tag , bool autoThrottled , TransactionPriority priority , double tpsRate , double expirationTime , double initialDuration ) <nl> - : tag ( tag ) , autoThrottled ( autoThrottled ) , priority ( priority ) , tpsRate ( tpsRate ) , expirationTime ( expirationTime ) , initialDuration ( initialDuration ) { } <nl> + TagThrottleInfo ( TransactionTag tag , TagThrottleType throttleType , TransactionPriority priority , double tpsRate , double expirationTime , double initialDuration ) <nl> + : tag ( tag ) , throttleType ( throttleType ) , priority ( priority ) , tpsRate ( tpsRate ) , expirationTime ( expirationTime ) , initialDuration ( initialDuration ) { } <nl> <nl> TagThrottleInfo ( TagThrottleKey key , TagThrottleValue value ) <nl> - : autoThrottled ( key . autoThrottled ) , priority ( key . priority ) , tpsRate ( value . tpsRate ) , expirationTime ( value . expirationTime ) , initialDuration ( value . initialDuration ) <nl> + : throttleType ( key . throttleType ) , priority ( key . priority ) , tpsRate ( value . tpsRate ) , expirationTime ( value . expirationTime ) , initialDuration ( value . initialDuration ) <nl> { <nl> ASSERT ( key . tags . size ( ) = = 1 ) ; / / Multiple tags per throttle is not currently supported <nl> tag = * key . tags . begin ( ) ; <nl> namespace ThrottleApi { <nl> Future < std : : vector < TagThrottleInfo > > getThrottledTags ( Database const & db , int const & limit ) ; <nl> <nl> Future < Void > throttleTags ( Database const & db , TagSet const & tags , double const & tpsRate , double const & initialDuration , <nl> - bool const & autoThrottled , TransactionPriority const & priority , Optional < double > const & expirationTime = Optional < double > ( ) ) ; <nl> + TagThrottleType const & throttleType , TransactionPriority const & priority , Optional < double > const & expirationTime = Optional < double > ( ) ) ; <nl> <nl> - Future < bool > unthrottleTags ( Database const & db , TagSet const & tags , bool const & autoThrottled , TransactionPriority const & priority ) ; <nl> + Future < bool > unthrottleTags ( Database const & db , TagSet const & tags , Optional < TagThrottleType > const & throttleType , Optional < TransactionPriority > const & priority ) ; <nl> <nl> - Future < bool > unthrottleManual ( Database db ) ; <nl> - Future < bool > unthrottleAuto ( Database db ) ; <nl> - Future < bool > unthrottleAll ( Database db ) ; <nl> + Future < bool > unthrottleAll ( Database db , Optional < TagThrottleType > throttleType , Optional < TransactionPriority > priority ) ; <nl> Future < bool > expire ( Database db ) ; <nl> <nl> Future < Void > enableAuto ( Database const & db , bool const & enabled ) ; <nl> mmm a / fdbclient / ThreadSafeTransaction . actor . cpp <nl> ppp b / fdbclient / ThreadSafeTransaction . actor . cpp <nl> <nl> # include " fdbclient / ThreadSafeTransaction . h " <nl> # include " fdbclient / ReadYourWrites . h " <nl> # include " fdbclient / DatabaseContext . h " <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> / / Users of ThreadSafeTransaction might share Reference < ThreadSafe . . . > between different threads as long as they don ' t call addRef ( e . g . C API follows this ) . <nl> / / Therefore , it is unsafe to call ( explicitly or implicitly ) this - > addRef in any of these functions . <nl> mmm a / fdbmonitor / fdbmonitor . cpp <nl> ppp b / fdbmonitor / fdbmonitor . cpp <nl> <nl> # include " flow / SimpleOpt . h " <nl> # include " SimpleIni . h " <nl> <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> # ifdef __linux__ <nl> typedef fd_set * fdb_fd_set ; <nl> mmm a / fdbrpc / ActorFuzz . actor . cpp <nl> ppp b / fdbrpc / ActorFuzz . actor . cpp <nl> ACTOR Future < int > actorFuzz29 ( FutureStream < int > inputStream , PromiseStream < int > <nl> <nl> std : : pair < int , int > actorFuzzTests ( ) { <nl> int testsOK = 0 ; <nl> - testsOK + = testFuzzActor ( & actorFuzz0 , " actorFuzz0 " , ( vector < int > ( ) , 390229 , 596271 , 574865 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz1 , " actorFuzz1 " , ( vector < int > ( ) , 477566 , 815578 , 477566 , 815578 , 477566 , 815578 , 477566 , 815578 , 477566 , 815578 , 917160 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz2 , " actorFuzz2 " , ( vector < int > ( ) , 476677 , 930237 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz3 , " actorFuzz3 " , ( vector < int > ( ) , 1000 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz4 , " actorFuzz4 " , ( vector < int > ( ) , 180600 , 177605 , 177605 , 177605 , 954508 , 810052 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz5 , " actorFuzz5 " , ( vector < int > ( ) , 1000 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz6 , " actorFuzz6 " , ( vector < int > ( ) , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 945289 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz7 , " actorFuzz7 " , ( vector < int > ( ) , 406152 , 478841 , 609181 , 634881 , 253861 , 592023 , 240597 , 253861 , 593023 , 240597 , 253861 , 594023 , 240597 , 415949 , 169335 , 478331 , 634881 , 253861 , 596023 , 240597 , 253861 , 597023 , 240597 , 253861 , 598023 , 240597 , 415949 , 173335 , 478331 , 634881 , 253861 , 600023 , 240597 , 253861 , 601023 , 240597 , 253861 , 602023 , 240597 , 415949 , 177335 , 478331 , 634881 , 253861 , 604023 , 240597 , 253861 , 605023 , 240597 , 253861 , 606023 , 240597 , 415949 , 181335 , 478331 , 634881 , 253861 , 608023 , 240597 , 253861 , 609023 , 240597 , 253861 , 610023 , 240597 , 415949 , 185335 , 478331 , 331905 , 946924 , 663973 , 797073 , 971923 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 534407 , 814172 , 949658 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz8 , " actorFuzz8 " , ( vector < int > ( ) , 285937 , 696473 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz9 , " actorFuzz9 " , ( vector < int > ( ) , 141463 , 397424 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz10 , " actorFuzz10 " , ( vector < int > ( ) , 543113 , 1000 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz11 , " actorFuzz11 " , ( vector < int > ( ) , 1000 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz12 , " actorFuzz12 " , ( vector < int > ( ) , 970588 , 981887 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz13 , " actorFuzz13 " , ( vector < int > ( ) , 861219 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz14 , " actorFuzz14 " , ( vector < int > ( ) , 527098 , 527098 , 527098 , 628047 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz15 , " actorFuzz15 " , ( vector < int > ( ) , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 884781 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz16 , " actorFuzz16 " , ( vector < int > ( ) , 943071 , 492690 , 908751 , 198776 , 537939 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz17 , " actorFuzz17 " , ( vector < int > ( ) , 249436 , 416782 , 249436 , 416782 , 249436 , 416782 , 299183 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz18 , " actorFuzz18 " , ( vector < int > ( ) , 337649 , 395297 , 807261 , 517901 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz19 , " actorFuzz19 " , ( vector < int > ( ) , 492598 , 139186 , 742053 , 492598 , 140186 , 742053 , 492598 , 141186 , 742053 , 592919 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz20 , " actorFuzz20 " , ( vector < int > ( ) , 760082 , 1000 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz21 , " actorFuzz21 " , ( vector < int > ( ) , 806394 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz22 , " actorFuzz22 " , ( vector < int > ( ) , 722878 , 369302 , 416748 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz23 , " actorFuzz23 " , ( vector < int > ( ) , 562792 , 231437 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz24 , " actorFuzz24 " , ( vector < int > ( ) , 847672 , 835175 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz25 , " actorFuzz25 " , ( vector < int > ( ) , 843261 , 327560 , 592398 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz26 , " actorFuzz26 " , ( vector < int > ( ) , 520263 , 306397 , 944232 , 366272 , 700651 , 146918 , 191890 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz27 , " actorFuzz27 " , ( vector < int > ( ) , 313322 , 196907 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz28 , " actorFuzz28 " , ( vector < int > ( ) , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 743922 ) ) ; <nl> - testsOK + = testFuzzActor ( & actorFuzz29 , " actorFuzz29 " , ( vector < int > ( ) , 821092 , 901028 , 617942 , 821092 , 902028 , 617942 , 821092 , 903028 , 617942 , 821092 , 904028 , 617942 , 821092 , 905028 , 617942 , 560881 ) ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz0 , " actorFuzz0 " , { 390229 , 596271 , 574865 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz1 , " actorFuzz1 " , { 477566 , 815578 , 477566 , 815578 , 477566 , 815578 , 477566 , 815578 , 477566 , 815578 , 917160 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz2 , " actorFuzz2 " , { 476677 , 930237 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz3 , " actorFuzz3 " , { 1000 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz4 , " actorFuzz4 " , { 180600 , 177605 , 177605 , 177605 , 954508 , 810052 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz5 , " actorFuzz5 " , { 1000 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz6 , " actorFuzz6 " , { 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 320321 , 266526 , 762336 , 463730 , 945289 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz7 , " actorFuzz7 " , { 406152 , 478841 , 609181 , 634881 , 253861 , 592023 , 240597 , 253861 , 593023 , 240597 , 253861 , 594023 , 240597 , 415949 , 169335 , 478331 , 634881 , 253861 , 596023 , 240597 , 253861 , 597023 , 240597 , 253861 , 598023 , 240597 , 415949 , 173335 , 478331 , 634881 , 253861 , 600023 , 240597 , 253861 , 601023 , 240597 , 253861 , 602023 , 240597 , 415949 , 177335 , 478331 , 634881 , 253861 , 604023 , 240597 , 253861 , 605023 , 240597 , 253861 , 606023 , 240597 , 415949 , 181335 , 478331 , 634881 , 253861 , 608023 , 240597 , 253861 , 609023 , 240597 , 253861 , 610023 , 240597 , 415949 , 185335 , 478331 , 331905 , 946924 , 663973 , 797073 , 971923 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 295772 , 923567 , 559259 , 559259 , 559259 , 325678 , 679187 , 534407 , 814172 , 949658 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz8 , " actorFuzz8 " , { 285937 , 696473 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz9 , " actorFuzz9 " , { 141463 , 397424 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz10 , " actorFuzz10 " , { 543113 , 1000 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz11 , " actorFuzz11 " , { 1000 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz12 , " actorFuzz12 " , { 970588 , 981887 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz13 , " actorFuzz13 " , { 861219 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz14 , " actorFuzz14 " , { 527098 , 527098 , 527098 , 628047 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz15 , " actorFuzz15 " , { 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 582389 , 240216 , 732317 , 884781 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz16 , " actorFuzz16 " , { 943071 , 492690 , 908751 , 198776 , 537939 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz17 , " actorFuzz17 " , { 249436 , 416782 , 249436 , 416782 , 249436 , 416782 , 299183 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz18 , " actorFuzz18 " , { 337649 , 395297 , 807261 , 517901 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz19 , " actorFuzz19 " , { 492598 , 139186 , 742053 , 492598 , 140186 , 742053 , 492598 , 141186 , 742053 , 592919 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz20 , " actorFuzz20 " , { 760082 , 1000 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz21 , " actorFuzz21 " , { 806394 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz22 , " actorFuzz22 " , { 722878 , 369302 , 416748 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz23 , " actorFuzz23 " , { 562792 , 231437 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz24 , " actorFuzz24 " , { 847672 , 835175 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz25 , " actorFuzz25 " , { 843261 , 327560 , 592398 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz26 , " actorFuzz26 " , { 520263 , 306397 , 944232 , 366272 , 700651 , 146918 , 191890 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz27 , " actorFuzz27 " , { 313322 , 196907 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz28 , " actorFuzz28 " , { 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 715827 , 529509 , 449273 , 743922 } ) ; <nl> + testsOK + = testFuzzActor ( & actorFuzz29 , " actorFuzz29 " , { 821092 , 901028 , 617942 , 821092 , 902028 , 617942 , 821092 , 903028 , 617942 , 821092 , 904028 , 617942 , 821092 , 905028 , 617942 , 560881 } ) ; <nl> return std : : make_pair ( testsOK , 30 ) ; <nl> } <nl> # endif / / WIN32 <nl> mmm a / fdbrpc / ActorFuzz . h <nl> ppp b / fdbrpc / ActorFuzz . h <nl> <nl> <nl> using std : : vector ; <nl> <nl> - inline vector < int > & operator , ( vector < int > & v , int a ) { <nl> - v . push_back ( a ) ; <nl> - return v ; <nl> - } <nl> - <nl> - inline vector < int > & operator , ( vector < int > const & v , int a ) { <nl> - return ( const_cast < vector < int > & > ( v ) , a ) ; <nl> - } <nl> inline void throw_operation_failed ( ) { throw operation_failed ( ) ; } <nl> <nl> / / This is in dsltest . actor . cpp : <nl> mmm a / fdbrpc / AsyncFileCached . actor . cpp <nl> ppp b / fdbrpc / AsyncFileCached . actor . cpp <nl> Future < Reference < IAsyncFile > > AsyncFileCached : : open_impl ( std : : string filename , <nl> return open_impl ( filename , flags , mode , pageCache ) ; <nl> } <nl> <nl> - Future < Void > AsyncFileCached : : read_write_impl ( AsyncFileCached * self , void * data , int length , int64_t offset , bool writing ) { <nl> - if ( writing ) { <nl> + template < bool writing > <nl> + Future < Void > AsyncFileCached : : read_write_impl ( AsyncFileCached * self , <nl> + typename std : : conditional_t < writing , const uint8_t * , uint8_t * > data , <nl> + int length , int64_t offset ) { <nl> + if constexpr ( writing ) { <nl> if ( offset + length > self - > length ) <nl> self - > length = offset + length ; <nl> } <nl> <nl> std : : vector < Future < Void > > actors ; <nl> <nl> - uint8_t * cdata = static_cast < uint8_t * > ( data ) ; <nl> - <nl> int offsetInPage = offset % self - > pageCache - > pageSize ; <nl> int64_t pageOffset = offset - offsetInPage ; <nl> <nl> Future < Void > AsyncFileCached : : read_write_impl ( AsyncFileCached * self , void * data <nl> <nl> int bytesInPage = std : : min ( self - > pageCache - > pageSize - offsetInPage , remaining ) ; <nl> <nl> - auto w = writing <nl> - ? p - > second - > write ( cdata , bytesInPage , offsetInPage ) <nl> - : p - > second - > read ( cdata , bytesInPage , offsetInPage ) ; <nl> + Future < Void > w ; <nl> + if constexpr ( writing ) { <nl> + w = p - > second - > write ( data , bytesInPage , offsetInPage ) ; <nl> + } else { <nl> + w = p - > second - > read ( data , bytesInPage , offsetInPage ) ; <nl> + } <nl> if ( ! w . isReady ( ) | | w . isError ( ) ) <nl> actors . push_back ( w ) ; <nl> <nl> - cdata + = bytesInPage ; <nl> + data + = bytesInPage ; <nl> pageOffset + = self - > pageCache - > pageSize ; <nl> offsetInPage = 0 ; <nl> <nl> mmm a / fdbrpc / AsyncFileCached . actor . h <nl> ppp b / fdbrpc / AsyncFileCached . actor . h <nl> <nl> # define FLOW_ASYNCFILECACHED_ACTOR_H <nl> <nl> # include < boost / intrusive / list . hpp > <nl> + # include < type_traits > <nl> <nl> # include " flow / flow . h " <nl> # include " fdbrpc / IAsyncFile . h " <nl> class AsyncFileCached : public IAsyncFile , public ReferenceCounted < AsyncFileCach <nl> length = int ( this - > length - offset ) ; <nl> ASSERT ( length > = 0 ) ; <nl> } <nl> - auto f = read_write_impl ( this , data , length , offset , false ) ; <nl> + auto f = read_write_impl < false > ( this , static_cast < uint8_t * > ( data ) , length , offset ) ; <nl> if ( f . isReady ( ) & & ! f . isError ( ) ) return length ; <nl> + + countFileCacheReadsBlocked ; <nl> + + countCacheReadsBlocked ; <nl> class AsyncFileCached : public IAsyncFile , public ReferenceCounted < AsyncFileCach <nl> wait ( self - > currentTruncate ) ; <nl> + + self - > countFileCacheWrites ; <nl> + + self - > countCacheWrites ; <nl> - Future < Void > f = read_write_impl ( self , const_cast < void * > ( data ) , length , offset , true ) ; <nl> + Future < Void > f = read_write_impl < true > ( self , static_cast < const uint8_t * > ( data ) , length , offset ) ; <nl> if ( ! f . isReady ( ) ) { <nl> + + self - > countFileCacheWritesBlocked ; <nl> + + self - > countCacheWritesBlocked ; <nl> class AsyncFileCached : public IAsyncFile , public ReferenceCounted < AsyncFileCach <nl> return Void ( ) ; <nl> } <nl> <nl> - static Future < Void > read_write_impl ( AsyncFileCached * self , void * data , int length , int64_t offset , bool writing ) ; <nl> + template < bool writing > <nl> + static Future < Void > read_write_impl ( AsyncFileCached * self , <nl> + typename std : : conditional_t < writing , const uint8_t * , uint8_t * > data , <nl> + int length , int64_t offset ) ; <nl> <nl> void remove_page ( AFCPage * page ) ; <nl> } ; <nl> mmm a / fdbrpc / FailureMonitor . actor . cpp <nl> ppp b / fdbrpc / FailureMonitor . actor . cpp <nl> void SimpleFailureMonitor : : endpointNotFound ( Endpoint const & endpoint ) { <nl> . suppressFor ( 1 . 0 ) <nl> . detail ( " Address " , endpoint . getPrimaryAddress ( ) ) <nl> . detail ( " Token " , endpoint . token ) ; <nl> - endpointKnownFailed . set ( endpoint , true ) ; <nl> + failedEndpoints . insert ( endpoint ) ; <nl> + endpointKnownFailed . trigger ( endpoint ) ; <nl> } <nl> <nl> void SimpleFailureMonitor : : notifyDisconnect ( NetworkAddress const & address ) { <nl> void SimpleFailureMonitor : : notifyDisconnect ( NetworkAddress const & address ) { <nl> Future < Void > SimpleFailureMonitor : : onDisconnectOrFailure ( Endpoint const & endpoint ) { <nl> / / If the endpoint or address is already failed , return right away <nl> auto i = addressStatus . find ( endpoint . getPrimaryAddress ( ) ) ; <nl> - if ( i = = addressStatus . end ( ) | | i - > second . isFailed ( ) | | endpointKnownFailed . get ( endpoint ) ) { <nl> + if ( i = = addressStatus . end ( ) | | i - > second . isFailed ( ) | | failedEndpoints . count ( endpoint ) ) { <nl> TraceEvent ( " AlreadyDisconnected " ) . detail ( " Addr " , endpoint . getPrimaryAddress ( ) ) . detail ( " Tok " , endpoint . token ) ; <nl> return Void ( ) ; <nl> } <nl> Future < Void > SimpleFailureMonitor : : onStateChanged ( Endpoint const & endpoint ) { <nl> / / failure status for that endpoint can never change ( and we could be spuriously triggered by setStatus ) <nl> / / Also returns spuriously when notifyDisconnect is called ( which doesn ' t actually change the state ) , but callers <nl> / / check the state so it ' s OK <nl> - if ( endpointKnownFailed . get ( endpoint ) ) <nl> + if ( failedEndpoints . count ( endpoint ) ) <nl> return Never ( ) ; <nl> else <nl> return endpointKnownFailed . onChange ( endpoint ) ; <nl> } <nl> <nl> FailureStatus SimpleFailureMonitor : : getState ( Endpoint const & endpoint ) { <nl> - if ( endpointKnownFailed . get ( endpoint ) ) <nl> + if ( failedEndpoints . count ( endpoint ) ) <nl> return FailureStatus ( true ) ; <nl> else { <nl> auto a = addressStatus . find ( endpoint . getPrimaryAddress ( ) ) ; <nl> FailureStatus SimpleFailureMonitor : : getState ( NetworkAddress const & address ) { <nl> } <nl> <nl> bool SimpleFailureMonitor : : onlyEndpointFailed ( Endpoint const & endpoint ) { <nl> - if ( ! endpointKnownFailed . get ( endpoint ) ) return false ; <nl> + if ( ! failedEndpoints . count ( endpoint ) ) return false ; <nl> auto a = addressStatus . find ( endpoint . getPrimaryAddress ( ) ) ; <nl> if ( a = = addressStatus . end ( ) ) <nl> return true ; <nl> bool SimpleFailureMonitor : : onlyEndpointFailed ( Endpoint const & endpoint ) { <nl> } <nl> <nl> bool SimpleFailureMonitor : : permanentlyFailed ( Endpoint const & endpoint ) { <nl> - return endpointKnownFailed . get ( endpoint ) ; <nl> + return failedEndpoints . count ( endpoint ) ; <nl> } <nl> <nl> void SimpleFailureMonitor : : reset ( ) { <nl> addressStatus = std : : unordered_map < NetworkAddress , FailureStatus > ( ) ; <nl> + failedEndpoints = std : : unordered_set < Endpoint > ( ) ; <nl> endpointKnownFailed . resetNoWaiting ( ) ; <nl> } <nl> mmm a / fdbrpc / FailureMonitor . h <nl> ppp b / fdbrpc / FailureMonitor . h <nl> <nl> # include " flow / flow . h " <nl> # include " fdbrpc / FlowTransport . h " / / Endpoint <nl> # include < unordered_map > <nl> + # include < unordered_set > <nl> <nl> using std : : vector ; <nl> <nl> class SimpleFailureMonitor : public IFailureMonitor { <nl> private : <nl> std : : unordered_map < NetworkAddress , FailureStatus > addressStatus ; <nl> YieldedAsyncMap < Endpoint , bool > endpointKnownFailed ; <nl> + std : : unordered_set < Endpoint > failedEndpoints ; <nl> <nl> friend class OnStateChangedActorActor ; <nl> } ; <nl> mmm a / fdbrpc / FlowTransport . actor . cpp <nl> ppp b / fdbrpc / FlowTransport . actor . cpp <nl> const Endpoint & EndpointMap : : insert ( NetworkAddressList localAddresses , std : : vec <nl> } <nl> <nl> UID base = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> - for ( int i = 0 ; i < streams . size ( ) ; i + + ) { <nl> + for ( uint64_t i = 0 ; i < streams . size ( ) ; i + + ) { <nl> int index = adjacentStart + i ; <nl> - streams [ i ] . first - > setEndpoint ( Endpoint ( localAddresses , UID ( base . first ( ) | TOKEN_STREAM_FLAG , ( base . second ( ) & 0xffffffff00000000LL ) | index ) ) ) ; <nl> - data [ index ] . token ( ) = Endpoint : : Token ( base . first ( ) | TOKEN_STREAM_FLAG , ( base . second ( ) & 0xffffffff00000000LL ) | static_cast < uint32_t > ( streams [ i ] . second ) ) ; <nl> + uint64_t first = ( base . first ( ) + ( i < < 32 ) ) | TOKEN_STREAM_FLAG ; <nl> + streams [ i ] . first - > setEndpoint ( Endpoint ( localAddresses , UID ( first , ( base . second ( ) & 0xffffffff00000000LL ) | index ) ) ) ; <nl> + data [ index ] . token ( ) = Endpoint : : Token ( first , ( base . second ( ) & 0xffffffff00000000LL ) | static_cast < uint32_t > ( streams [ i ] . second ) ) ; <nl> data [ index ] . receiver = ( NetworkMessageReceiver * ) streams [ i ] . first ; <nl> } <nl> <nl> void FlowTransport : : addEndpoint ( Endpoint & endpoint , NetworkMessageReceiver * rec <nl> self - > endpoints . insert ( receiver , endpoint . token , taskID ) ; <nl> } <nl> <nl> - const Endpoint & FlowTransport : : addEndpoints ( std : : vector < std : : pair < FlowReceiver * , TaskPriority > > const & streams ) { <nl> - return self - > endpoints . insert ( self - > localAddresses , streams ) ; <nl> + void FlowTransport : : addEndpoints ( std : : vector < std : : pair < FlowReceiver * , TaskPriority > > const & streams ) { <nl> + self - > endpoints . insert ( self - > localAddresses , streams ) ; <nl> } <nl> <nl> void FlowTransport : : removeEndpoint ( const Endpoint & endpoint , NetworkMessageReceiver * receiver ) { <nl> mmm a / fdbrpc / FlowTransport . h <nl> ppp b / fdbrpc / FlowTransport . h <nl> class Endpoint { <nl> Endpoint getAdjustedEndpoint ( uint32_t index ) { <nl> uint32_t newIndex = token . second ( ) ; <nl> newIndex + = index ; <nl> - return Endpoint ( addresses , UID ( token . first ( ) , ( token . second ( ) & 0xffffffff00000000LL ) | newIndex ) ) ; <nl> + return Endpoint ( addresses , UID ( token . first ( ) + ( uint64_t ( index ) < < 32 ) , ( token . second ( ) & 0xffffffff00000000LL ) | newIndex ) ) ; <nl> } <nl> <nl> bool operator = = ( Endpoint const & r ) const { <nl> - return getPrimaryAddress ( ) = = r . getPrimaryAddress ( ) & & token = = r . token ; <nl> + return token = = r . token & & getPrimaryAddress ( ) = = r . getPrimaryAddress ( ) ; <nl> } <nl> bool operator ! = ( Endpoint const & r ) const { <nl> return ! ( * this = = r ) ; <nl> } <nl> - <nl> bool operator < ( Endpoint const & r ) const { <nl> - const NetworkAddress & left = getPrimaryAddress ( ) ; <nl> - const NetworkAddress & right = r . getPrimaryAddress ( ) ; <nl> - if ( left ! = right ) <nl> - return left < right ; <nl> - else <nl> - return token < r . token ; <nl> + return addresses . address < r . addresses . address | | ( addresses . address = = r . addresses . address & & token < r . token ) ; <nl> } <nl> <nl> template < class Ar > <nl> class Endpoint { <nl> } ; <nl> # pragma pack ( pop ) <nl> <nl> + namespace std <nl> + { <nl> + template < > <nl> + struct hash < Endpoint > <nl> + { <nl> + size_t operator ( ) ( const Endpoint & ep ) const <nl> + { <nl> + return ep . token . hash ( ) + ep . addresses . address . hash ( ) ; <nl> + } <nl> + } ; <nl> + } <nl> + <nl> class ArenaObjectReader ; <nl> class NetworkMessageReceiver { <nl> public : <nl> class FlowTransport { <nl> void addEndpoint ( Endpoint & endpoint , NetworkMessageReceiver * , TaskPriority taskID ) ; <nl> / / Sets endpoint to be a new local endpoint which delivers messages to the given receiver <nl> <nl> - const Endpoint & addEndpoints ( std : : vector < std : : pair < struct FlowReceiver * , TaskPriority > > const & streams ) ; <nl> + void addEndpoints ( std : : vector < std : : pair < struct FlowReceiver * , TaskPriority > > const & streams ) ; <nl> <nl> void removeEndpoint ( const Endpoint & , NetworkMessageReceiver * ) ; <nl> / / The given local endpoint no longer delivers messages to the given receiver or uses resources <nl> old mode 100644 <nl> new mode 100755 <nl> index 05eb22e4de . . dc83b7dbaa <nl> mmm a / fdbrpc / actorFuzz . py <nl> ppp b / fdbrpc / actorFuzz . py <nl> def randomActor ( index ) : <nl> <nl> print ( " std : : pair < int , int > actorFuzzTests ( ) { \ n \ tint testsOK = 0 ; " , file = outputFile ) <nl> for actor in actors : <nl> - print ( ' \ ttestsOK + = testFuzzActor ( & % s , " % s " , ( vector < int > ( ) , % s ) ) ; ' % ( actor . name , actor . name , ' , ' . join ( str ( e ) for e in actor . ecx . output ) ) , <nl> + print ( ' \ ttestsOK + = testFuzzActor ( & % s , " % s " , { % s } ) ; ' % ( actor . name , actor . name , ' , ' . join ( str ( e ) for e in actor . ecx . output ) ) , <nl> file = outputFile ) <nl> print ( " \ treturn std : : make_pair ( testsOK , % d ) ; \ n } " % len ( actors ) , file = outputFile ) <nl> print ( ' # endif / / WIN32 \ n ' , file = outputFile ) <nl> mmm a / fdbrpc / sim2 . actor . cpp <nl> ppp b / fdbrpc / sim2 . actor . cpp <nl> void ISimulator : : displayWorkers ( ) const <nl> return ; <nl> } <nl> <nl> - namespace std { <nl> - template < > <nl> - class hash < Endpoint > { <nl> - public : <nl> - size_t operator ( ) ( const Endpoint & s ) const <nl> - { <nl> - return crc32c_append ( 0 , ( const uint8_t * ) & s , sizeof ( s ) ) ; <nl> - } <nl> - } ; <nl> - } <nl> - <nl> const UID TOKEN_ENDPOINT_NOT_FOUND ( - 1 , - 1 ) ; <nl> <nl> ISimulator * g_pSimulator = 0 ; <nl> mmm a / fdbserver / BackupProgress . actor . cpp <nl> ppp b / fdbserver / BackupProgress . actor . cpp <nl> std : : map < std : : tuple < LogEpoch , Version , int > , std : : map < Tag , Version > > BackupProgr <nl> <nl> auto progressIt = progress . lower_bound ( epoch ) ; <nl> if ( progressIt ! = progress . end ( ) & & progressIt - > first = = epoch ) { <nl> - if ( progressIt ! = progress . begin ( ) ) { <nl> + std : : set < Tag > toCheck = tags ; <nl> + for ( auto current = progressIt ; current ! = progress . begin ( ) & & ! toCheck . empty ( ) ; ) { <nl> + auto prev = std : : prev ( current ) ; <nl> / / Previous epoch is gone , consolidate the progress . <nl> - auto prev = std : : prev ( progressIt ) ; <nl> for ( auto [ tag , version ] : prev - > second ) { <nl> - if ( tags . count ( tag ) > 0 ) { <nl> + if ( toCheck . count ( tag ) > 0 ) { <nl> progressIt - > second [ tag ] = std : : max ( version , progressIt - > second [ tag ] ) ; <nl> + toCheck . erase ( tag ) ; <nl> } <nl> } <nl> + current = prev ; <nl> } <nl> updateTagVersions ( & tagVersions , & tags , progressIt - > second , info . epochEnd , adjustedBeginVersion , epoch ) ; <nl> } else { <nl> auto rit = std : : find_if ( <nl> progress . rbegin ( ) , progress . rend ( ) , <nl> [ epoch = epoch ] ( const std : : pair < LogEpoch , std : : map < Tag , Version > > & p ) { return p . first < epoch ; } ) ; <nl> - if ( ! ( rit = = progress . rend ( ) ) ) { <nl> + while ( ! ( rit = = progress . rend ( ) ) ) { <nl> / / A partial recovery can result in empty epoch that copies previous <nl> / / epoch ' s version range . In this case , we should check previous <nl> / / epoch ' s savedVersion . <nl> std : : map < std : : tuple < LogEpoch , Version , int > , std : : map < Tag , Version > > BackupProgr <nl> / / ASSERT ( info . logRouterTags = = epochTags [ rit - > first ] ) ; <nl> <nl> updateTagVersions ( & tagVersions , & tags , rit - > second , info . epochEnd , adjustedBeginVersion , epoch ) ; <nl> + break ; <nl> } <nl> + rit + + ; <nl> } <nl> } <nl> <nl> mmm a / fdbserver / BackupWorker . actor . cpp <nl> ppp b / fdbserver / BackupWorker . actor . cpp <nl> <nl> <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> + # define SevDebugMemory SevVerbose <nl> + <nl> struct VersionedMessage { <nl> LogMessageVersion version ; <nl> StringRef message ; <nl> VectorRef < Tag > tags ; <nl> Arena arena ; / / Keep a reference to the memory containing the message <nl> + size_t bytes ; / / arena ' s size when inserted , which can grow afterwards <nl> <nl> VersionedMessage ( LogMessageVersion v , StringRef m , const VectorRef < Tag > & t , const Arena & a ) <nl> - : version ( v ) , message ( m ) , tags ( t ) , arena ( a ) { } <nl> + : version ( v ) , message ( m ) , tags ( t ) , arena ( a ) , bytes ( a . getSize ( ) ) { } <nl> const Version getVersion ( ) const { return version . version ; } <nl> const uint32_t getSubVersion ( ) const { return version . sub ; } <nl> <nl> struct VersionedMessage { <nl> } <nl> } ; <nl> <nl> + static bool sameArena ( const Arena & a , const Arena & b ) { <nl> + return a . impl . getPtr ( ) = = b . impl . getPtr ( ) ; <nl> + } <nl> + <nl> struct BackupData { <nl> const UID myId ; <nl> const Tag tag ; / / LogRouter tag for this worker , i . e . , ( - 2 , i ) <nl> struct BackupData { <nl> bool stopped = false ; <nl> bool exitEarly = false ; / / If the worker is on an old epoch and all backups starts a version > = the endVersion <nl> AsyncVar < bool > paused ; / / Track if " backupPausedKey " is set . <nl> + Reference < FlowLock > lock ; <nl> <nl> struct PerBackupInfo { <nl> PerBackupInfo ( ) = default ; <nl> struct BackupData { <nl> : myId ( id ) , tag ( req . routerTag ) , totalTags ( req . totalTags ) , startVersion ( req . startVersion ) , <nl> endVersion ( req . endVersion ) , recruitedEpoch ( req . recruitedEpoch ) , backupEpoch ( req . backupEpoch ) , <nl> minKnownCommittedVersion ( invalidVersion ) , savedVersion ( req . startVersion - 1 ) , popVersion ( req . startVersion - 1 ) , <nl> - cc ( " BackupWorker " , myId . toString ( ) ) , pulledVersion ( 0 ) , paused ( false ) { <nl> + cc ( " BackupWorker " , myId . toString ( ) ) , pulledVersion ( 0 ) , paused ( false ) , <nl> + lock ( new FlowLock ( SERVER_KNOBS - > BACKUP_LOCK_BYTES ) ) { <nl> cx = openDBOnServer ( db , TaskPriority : : DefaultEndpoint , true , true ) ; <nl> <nl> specialCounter ( cc , " SavedVersion " , [ this ] ( ) { return this - > savedVersion ; } ) ; <nl> specialCounter ( cc , " MinKnownCommittedVersion " , [ this ] ( ) { return this - > minKnownCommittedVersion ; } ) ; <nl> specialCounter ( cc , " MsgQ " , [ this ] ( ) { return this - > messages . size ( ) ; } ) ; <nl> + specialCounter ( cc , " BufferedBytes " , [ this ] ( ) { return this - > lock - > activePermits ( ) ; } ) ; <nl> logger = traceCounters ( " BackupWorkerMetrics " , myId , SERVER_KNOBS - > WORKER_LOGGING_INTERVAL , & cc , <nl> " BackupWorkerMetrics " ) ; <nl> } <nl> struct BackupData { <nl> doneTrigger . trigger ( ) ; <nl> } <nl> <nl> + / / Erases messages and updates lock with memory released . <nl> + void eraseMessages ( int num ) { <nl> + ASSERT ( num < = messages . size ( ) ) ; <nl> + if ( num = = 0 ) return ; <nl> + <nl> + if ( messages . size ( ) = = num ) { <nl> + messages . clear ( ) ; <nl> + TraceEvent ( SevDebugMemory , " BackupWorkerMemory " , myId ) . detail ( " ReleaseAll " , lock - > activePermits ( ) ) ; <nl> + lock - > release ( lock - > activePermits ( ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / keep track of each arena and accumulate their sizes <nl> + int64_t bytes = 0 ; <nl> + for ( int i = 0 ; i < num ; i + + ) { <nl> + const Arena & a = messages [ i ] . arena ; <nl> + const Arena & b = messages [ i + 1 ] . arena ; <nl> + if ( ! sameArena ( a , b ) ) { <nl> + bytes + = messages [ i ] . bytes ; <nl> + TraceEvent ( SevDebugMemory , " BackupWorkerMemory " , myId ) <nl> + . detail ( " Release " , messages [ i ] . bytes ) <nl> + . detail ( " Arena " , ( void * ) a . impl . getPtr ( ) ) ; <nl> + } <nl> + } <nl> + lock - > release ( bytes ) ; <nl> + messages . erase ( messages . begin ( ) , messages . begin ( ) + num ) ; <nl> + } <nl> + <nl> void eraseMessagesAfterEndVersion ( ) { <nl> ASSERT ( endVersion . present ( ) ) ; <nl> const Version ver = endVersion . get ( ) ; <nl> ACTOR Future < Void > saveMutationsToFile ( BackupData * self , Version popVersion , int <nl> state std : : vector < Reference < IBackupFile > > logFiles ; <nl> state std : : vector < int64_t > blockEnds ; <nl> state std : : vector < UID > activeUids ; / / active Backups ' UIDs <nl> + state std : : vector < Version > beginVersions ; / / logFiles ' begin versions <nl> state KeyRangeMap < std : : set < int > > keyRangeMap ; / / range to index in logFileFutures , logFiles , & blockEnds <nl> state std : : vector < Standalone < StringRef > > mutations ; <nl> state int idx ; <nl> ACTOR Future < Void > saveMutationsToFile ( BackupData * self , Version popVersion , int <nl> const int index = logFileFutures . size ( ) ; <nl> activeUids . push_back ( it - > first ) ; <nl> self - > insertRanges ( keyRangeMap , it - > second . ranges . get ( ) , index ) ; <nl> + <nl> if ( it - > second . lastSavedVersion = = invalidVersion ) { <nl> if ( it - > second . startVersion > self - > startVersion & & ! self - > messages . empty ( ) ) { <nl> / / True - up first mutation log ' s begin version <nl> it - > second . lastSavedVersion = self - > messages [ 0 ] . getVersion ( ) ; <nl> } else { <nl> - it - > second . lastSavedVersion = <nl> - std : : max ( self - > popVersion , std : : max ( self - > savedVersion , self - > startVersion ) ) ; <nl> + it - > second . lastSavedVersion = std : : max ( { self - > popVersion , self - > savedVersion , self - > startVersion } ) ; <nl> } <nl> + TraceEvent ( " BackupWorkerTrueUp " , self - > myId ) . detail ( " LastSavedVersion " , it - > second . lastSavedVersion ) ; <nl> } <nl> + / / The true - up version can be larger than first message version , so keep <nl> + / / the begin versions for later muation filtering . <nl> + beginVersions . push_back ( it - > second . lastSavedVersion ) ; <nl> + <nl> logFileFutures . push_back ( it - > second . container . get ( ) . get ( ) - > writeTaggedLogFile ( <nl> it - > second . lastSavedVersion , popVersion + 1 , blockSize , self - > tag . id , self - > totalTags ) ) ; <nl> it + + ; <nl> ACTOR Future < Void > saveMutationsToFile ( BackupData * self , Version popVersion , int <nl> std : : transform ( logFileFutures . begin ( ) , logFileFutures . end ( ) , std : : back_inserter ( logFiles ) , <nl> [ ] ( const Future < Reference < IBackupFile > > & f ) { return f . get ( ) ; } ) ; <nl> <nl> - ASSERT ( activeUids . size ( ) = = logFiles . size ( ) ) ; <nl> + ASSERT ( activeUids . size ( ) = = logFiles . size ( ) & & beginVersions . size ( ) = = logFiles . size ( ) ) ; <nl> for ( int i = 0 ; i < logFiles . size ( ) ; i + + ) { <nl> TraceEvent ( " OpenMutationFile " , self - > myId ) <nl> . detail ( " BackupID " , activeUids [ i ] ) <nl> ACTOR Future < Void > saveMutationsToFile ( BackupData * self , Version popVersion , int <nl> std : : vector < Future < Void > > adds ; <nl> if ( m . type ! = MutationRef : : Type : : ClearRange ) { <nl> for ( int index : keyRangeMap [ m . param1 ] ) { <nl> - adds . push_back ( addMutation ( logFiles [ index ] , message , message . message , & blockEnds [ index ] , blockSize ) ) ; <nl> + if ( message . getVersion ( ) > = beginVersions [ index ] ) { <nl> + adds . push_back ( <nl> + addMutation ( logFiles [ index ] , message , message . message , & blockEnds [ index ] , blockSize ) ) ; <nl> + } <nl> } <nl> } else { <nl> KeyRangeRef mutationRange ( m . param1 , m . param2 ) ; <nl> ACTOR Future < Void > saveMutationsToFile ( BackupData * self , Version popVersion , int <nl> wr < < subm ; <nl> mutations . push_back ( wr . toValue ( ) ) ; <nl> for ( int index : range . value ( ) ) { <nl> - adds . push_back ( <nl> - addMutation ( logFiles [ index ] , message , mutations . back ( ) , & blockEnds [ index ] , blockSize ) ) ; <nl> + if ( message . getVersion ( ) > = beginVersions [ index ] ) { <nl> + adds . push_back ( <nl> + addMutation ( logFiles [ index ] , message , mutations . back ( ) , & blockEnds [ index ] , blockSize ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> ACTOR Future < Void > uploadData ( BackupData * self ) { <nl> . detail ( " MsgQ " , self - > messages . size ( ) ) ; <nl> / / save an empty file for old epochs so that log file versions are continuous <nl> wait ( saveMutationsToFile ( self , popVersion , numMsg ) ) ; <nl> - self - > messages . erase ( self - > messages . begin ( ) , self - > messages . begin ( ) + numMsg ) ; <nl> + self - > eraseMessages ( numMsg ) ; <nl> } <nl> <nl> / / If transition into NOOP mode , should clear messages <nl> if ( ! self - > pulling ) { <nl> - self - > messages . clear ( ) ; <nl> + self - > eraseMessages ( self - > messages . size ( ) ) ; <nl> } <nl> <nl> if ( popVersion > self - > savedVersion & & popVersion > self - > popVersion ) { <nl> ACTOR Future < Void > uploadData ( BackupData * self ) { <nl> } <nl> <nl> if ( self - > allMessageSaved ( ) ) { <nl> - self - > messages . clear ( ) ; <nl> + self - > eraseMessages ( self - > messages . size ( ) ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> ACTOR Future < Void > pullAsyncData ( BackupData * self ) { <nl> state Future < Void > logSystemChange = Void ( ) ; <nl> state Reference < ILogSystem : : IPeekCursor > r ; <nl> state Version tagAt = std : : max ( self - > pulledVersion . get ( ) , std : : max ( self - > startVersion , self - > savedVersion ) ) ; <nl> + state Arena prev ; <nl> <nl> TraceEvent ( " BackupWorkerPull " , self - > myId ) ; <nl> loop { <nl> ACTOR Future < Void > pullAsyncData ( BackupData * self ) { <nl> / / Note we aggressively peek ( uncommitted ) messages , but only committed <nl> / / messages / mutations will be flushed to disk / blob in uploadData ( ) . <nl> while ( r - > hasMessage ( ) ) { <nl> + if ( ! sameArena ( prev , r - > arena ( ) ) ) { <nl> + TraceEvent ( SevDebugMemory , " BackupWorkerMemory " , self - > myId ) <nl> + . detail ( " Take " , r - > arena ( ) . getSize ( ) ) <nl> + . detail ( " Arena " , ( void * ) r - > arena ( ) . impl . getPtr ( ) ) <nl> + . detail ( " Current " , self - > lock - > activePermits ( ) ) ; <nl> + <nl> + wait ( self - > lock - > take ( TaskPriority : : DefaultYield , r - > arena ( ) . getSize ( ) ) ) ; <nl> + prev = r - > arena ( ) ; <nl> + } <nl> self - > messages . emplace_back ( r - > version ( ) , r - > getMessage ( ) , r - > getTags ( ) , r - > arena ( ) ) ; <nl> r - > nextMessage ( ) ; <nl> } <nl> mmm a / fdbserver / CMakeLists . txt <nl> ppp b / fdbserver / CMakeLists . txt <nl> set ( FDBSERVER_SRCS <nl> workloads / ConsistencyCheck . actor . cpp <nl> workloads / CpuProfiler . actor . cpp <nl> workloads / Cycle . actor . cpp <nl> + workloads / DataDistributionMetrics . actor . cpp <nl> workloads / DDBalance . actor . cpp <nl> workloads / DDMetrics . actor . cpp <nl> workloads / DDMetricsExclude . actor . cpp <nl> mmm a / fdbserver / DataDistribution . actor . cpp <nl> ppp b / fdbserver / DataDistribution . actor . cpp <nl> ACTOR Future < Void > monitorBatchLimitedTime ( Reference < AsyncVar < ServerDBInfo > > db , <nl> } <nl> } <nl> <nl> - ACTOR Future < Void > dataDistribution ( Reference < DataDistributorData > self ) <nl> + ACTOR Future < Void > dataDistribution ( Reference < DataDistributorData > self , PromiseStream < GetMetricsListRequest > getShardMetricsList ) <nl> { <nl> state double lastLimited = 0 ; <nl> self - > addActor . send ( monitorBatchLimitedTime ( self - > dbInfo , & lastLimited ) ) ; <nl> ACTOR Future < Void > dataDistribution ( Reference < DataDistributorData > self ) <nl> } <nl> <nl> actors . push_back ( pollMoveKeysLock ( cx , lock ) ) ; <nl> - actors . push_back ( reportErrorsExcept ( dataDistributionTracker ( initData , cx , output , shardsAffectedByTeamFailure , getShardMetrics , getAverageShardBytes . getFuture ( ) , readyToStart , anyZeroHealthyTeams , self - > ddId ) , " DDTracker " , self - > ddId , & normalDDQueueErrors ( ) ) ) ; <nl> + actors . push_back ( reportErrorsExcept ( dataDistributionTracker ( initData , cx , output , shardsAffectedByTeamFailure , getShardMetrics , getShardMetricsList , getAverageShardBytes . getFuture ( ) , readyToStart , anyZeroHealthyTeams , self - > ddId ) , " DDTracker " , self - > ddId , & normalDDQueueErrors ( ) ) ) ; <nl> actors . push_back ( reportErrorsExcept ( dataDistributionQueue ( cx , output , input . getFuture ( ) , getShardMetrics , processingUnhealthy , tcis , shardsAffectedByTeamFailure , lock , getAverageShardBytes , self - > ddId , storageTeamSize , configuration . storageTeamSize , & lastLimited ) , " DDQueue " , self - > ddId , & normalDDQueueErrors ( ) ) ) ; <nl> <nl> vector < DDTeamCollection * > teamCollectionsPtrs ; <nl> ACTOR Future < Void > ddExclusionSafetyCheck ( DistributorExclusionSafetyCheckRequest <nl> ACTOR Future < Void > dataDistributor ( DataDistributorInterface di , Reference < AsyncVar < struct ServerDBInfo > > db ) { <nl> state Reference < DataDistributorData > self ( new DataDistributorData ( db , di . id ( ) ) ) ; <nl> state Future < Void > collection = actorCollection ( self - > addActor . getFuture ( ) ) ; <nl> + state PromiseStream < GetMetricsListRequest > getShardMetricsList ; <nl> state Database cx = openDBOnServer ( db , TaskPriority : : DefaultDelay , true , true ) ; <nl> state ActorCollection actors ( false ) ; <nl> self - > addActor . send ( actors . getResult ( ) ) ; <nl> ACTOR Future < Void > dataDistributor ( DataDistributorInterface di , Reference < AsyncV <nl> try { <nl> TraceEvent ( " DataDistributorRunning " , di . id ( ) ) ; <nl> self - > addActor . send ( waitFailureServer ( di . waitFailure . getFuture ( ) ) ) ; <nl> - state Future < Void > distributor = reportErrorsExcept ( dataDistribution ( self ) , " DataDistribution " , di . id ( ) , & normalDataDistributorErrors ( ) ) ; <nl> + state Future < Void > distributor = reportErrorsExcept ( dataDistribution ( self , getShardMetricsList ) , " DataDistribution " , di . id ( ) , & normalDataDistributorErrors ( ) ) ; <nl> <nl> loop choose { <nl> when ( wait ( distributor | | collection ) ) { <nl> ACTOR Future < Void > dataDistributor ( DataDistributorInterface di , Reference < AsyncV <nl> TraceEvent ( " DataDistributorHalted " , di . id ( ) ) . detail ( " ReqID " , req . requesterID ) ; <nl> break ; <nl> } <nl> + when ( state GetDataDistributorMetricsRequest req = waitNext ( di . dataDistributorMetrics . getFuture ( ) ) ) { <nl> + ErrorOr < Standalone < VectorRef < DDMetricsRef > > > result = wait ( errorOr ( brokenPromiseToNever ( <nl> + getShardMetricsList . getReply ( GetMetricsListRequest ( req . keys , req . shardLimit ) ) ) ) ) ; <nl> + if ( result . isError ( ) ) { <nl> + req . reply . sendError ( result . getError ( ) ) ; <nl> + } else { <nl> + GetDataDistributorMetricsReply rep ; <nl> + rep . storageMetricsList = result . get ( ) ; <nl> + req . reply . send ( rep ) ; <nl> + } <nl> + } <nl> when ( DistributorSnapRequest snapReq = waitNext ( di . distributorSnapReq . getFuture ( ) ) ) { <nl> actors . add ( ddSnapCreate ( snapReq , db ) ) ; <nl> } <nl> mmm a / fdbserver / DataDistribution . actor . h <nl> ppp b / fdbserver / DataDistribution . actor . h <nl> struct GetMetricsRequest { <nl> GetMetricsRequest ( KeyRange const & keys ) : keys ( keys ) { } <nl> } ; <nl> <nl> + struct GetMetricsListRequest { <nl> + KeyRange keys ; <nl> + int shardLimit ; <nl> + Promise < Standalone < VectorRef < DDMetricsRef > > > reply ; <nl> + <nl> + GetMetricsListRequest ( ) { } <nl> + GetMetricsListRequest ( KeyRange const & keys , const int shardLimit ) : keys ( keys ) , shardLimit ( shardLimit ) { } <nl> + } ; <nl> + <nl> struct TeamCollectionInterface { <nl> PromiseStream < GetTeamRequest > getTeam ; <nl> } ; <nl> Future < Void > dataDistributionTracker ( <nl> PromiseStream < RelocateShard > const & output , <nl> Reference < ShardsAffectedByTeamFailure > const & shardsAffectedByTeamFailure , <nl> PromiseStream < GetMetricsRequest > const & getShardMetrics , <nl> + PromiseStream < GetMetricsListRequest > const & getShardMetricsList , <nl> FutureStream < Promise < int64_t > > const & getAverageShardBytes , <nl> Promise < Void > const & readyToStart , <nl> Reference < AsyncVar < bool > > const & zeroHealthyTeams , <nl> mmm a / fdbserver / DataDistributionTracker . actor . cpp <nl> ppp b / fdbserver / DataDistributionTracker . actor . cpp <nl> ACTOR Future < Void > fetchShardMetrics ( DataDistributionTracker * self , GetMetricsR <nl> return Void ( ) ; <nl> } <nl> <nl> + <nl> + ACTOR Future < Void > fetchShardMetricsList_impl ( DataDistributionTracker * self , GetMetricsListRequest req ) { <nl> + try { <nl> + loop { <nl> + / / used to control shard limit <nl> + int shardNum = 0 ; <nl> + / / list of metrics , regenerate on loop when full range unsuccessful <nl> + Standalone < VectorRef < DDMetricsRef > > result ; <nl> + Future < Void > onChange ; <nl> + for ( auto t : self - > shards . containedRanges ( req . keys ) ) { <nl> + auto & stats = t . value ( ) . stats ; <nl> + if ( ! stats - > get ( ) . present ( ) ) { <nl> + onChange = stats - > onChange ( ) ; <nl> + break ; <nl> + } <nl> + result . push_back_deep ( result . arena ( ) , <nl> + DDMetricsRef ( stats - > get ( ) . get ( ) . metrics . bytes , KeyRef ( t . begin ( ) . toString ( ) ) ) ) ; <nl> + + + shardNum ; <nl> + if ( shardNum > = req . shardLimit ) { <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( ! onChange . isValid ( ) ) { <nl> + req . reply . send ( result ) ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + wait ( onChange ) ; <nl> + } <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) ! = error_code_actor_cancelled & & ! req . reply . isSet ( ) ) <nl> + req . reply . sendError ( e ) ; <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + ACTOR Future < Void > fetchShardMetricsList ( DataDistributionTracker * self , GetMetricsListRequest req ) { <nl> + choose { <nl> + when ( wait ( fetchShardMetricsList_impl ( self , req ) ) ) { } <nl> + when ( wait ( delay ( SERVER_KNOBS - > DD_SHARD_METRICS_TIMEOUT ) ) ) { <nl> + req . reply . sendError ( timed_out ( ) ) ; <nl> + } <nl> + } <nl> + return Void ( ) ; <nl> + } <nl> + <nl> ACTOR Future < Void > dataDistributionTracker ( <nl> Reference < InitialDataDistribution > initData , <nl> Database cx , <nl> PromiseStream < RelocateShard > output , <nl> Reference < ShardsAffectedByTeamFailure > shardsAffectedByTeamFailure , <nl> PromiseStream < GetMetricsRequest > getShardMetrics , <nl> + PromiseStream < GetMetricsListRequest > getShardMetricsList , <nl> FutureStream < Promise < int64_t > > getAverageShardBytes , <nl> Promise < Void > readyToStart , <nl> Reference < AsyncVar < bool > > anyZeroHealthyTeams , <nl> ACTOR Future < Void > dataDistributionTracker ( <nl> when ( GetMetricsRequest req = waitNext ( getShardMetrics . getFuture ( ) ) ) { <nl> self . sizeChanges . add ( fetchShardMetrics ( & self , req ) ) ; <nl> } <nl> + when ( GetMetricsListRequest req = waitNext ( getShardMetricsList . getFuture ( ) ) ) { <nl> + self . sizeChanges . add ( fetchShardMetricsList ( & self , req ) ) ; <nl> + } <nl> when ( wait ( self . sizeChanges . getResult ( ) ) ) { } <nl> } <nl> } catch ( Error & e ) { <nl> mmm a / fdbserver / DataDistributorInterface . h <nl> ppp b / fdbserver / DataDistributorInterface . h <nl> struct DataDistributorInterface { <nl> struct LocalityData locality ; <nl> RequestStream < struct DistributorSnapRequest > distributorSnapReq ; <nl> RequestStream < struct DistributorExclusionSafetyCheckRequest > distributorExclCheckReq ; <nl> + RequestStream < struct GetDataDistributorMetricsRequest > dataDistributorMetrics ; <nl> <nl> DataDistributorInterface ( ) { } <nl> explicit DataDistributorInterface ( const struct LocalityData & l ) : locality ( l ) { } <nl> struct DataDistributorInterface { <nl> <nl> template < class Archive > <nl> void serialize ( Archive & ar ) { <nl> - serializer ( ar , waitFailure , haltDataDistributor , locality , distributorSnapReq , distributorExclCheckReq ) ; <nl> + serializer ( ar , waitFailure , haltDataDistributor , locality , distributorSnapReq , distributorExclCheckReq , dataDistributorMetrics ) ; <nl> } <nl> } ; <nl> <nl> struct HaltDataDistributorRequest { <nl> } <nl> } ; <nl> <nl> + struct GetDataDistributorMetricsReply { <nl> + constexpr static FileIdentifier file_identifier = 1284337 ; <nl> + Standalone < VectorRef < DDMetricsRef > > storageMetricsList ; <nl> + <nl> + GetDataDistributorMetricsReply ( ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , storageMetricsList ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct GetDataDistributorMetricsRequest { <nl> + constexpr static FileIdentifier file_identifier = 1059267 ; <nl> + KeyRange keys ; <nl> + int shardLimit ; <nl> + ReplyPromise < struct GetDataDistributorMetricsReply > reply ; <nl> + <nl> + GetDataDistributorMetricsRequest ( ) { } <nl> + explicit GetDataDistributorMetricsRequest ( KeyRange const & keys , const int shardLimit ) : keys ( keys ) , shardLimit ( shardLimit ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , keys , shardLimit , reply ) ; <nl> + } <nl> + } ; <nl> + <nl> struct DistributorSnapRequest <nl> { <nl> constexpr static FileIdentifier file_identifier = 22204900 ; <nl> mmm a / fdbserver / FDBExecHelper . actor . cpp <nl> ppp b / fdbserver / FDBExecHelper . actor . cpp <nl> <nl> # include " fdbserver / FDBExecHelper . actor . h " <nl> # include " flow / Trace . h " <nl> # include " flow / flow . h " <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> # include " fdbserver / Knobs . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> mmm a / fdbserver / Knobs . cpp <nl> ppp b / fdbserver / Knobs . cpp <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( BACKUP_TIMEOUT , 0 . 4 ) ; <nl> init ( BACKUP_NOOP_POP_DELAY , 5 . 0 ) ; <nl> init ( BACKUP_FILE_BLOCK_BYTES , 1024 * 1024 ) ; <nl> - init ( BACKUP_UPLOAD_DELAY , 10 . 0 ) ; if ( randomize & & BUGGIFY ) BACKUP_UPLOAD_DELAY = deterministicRandom ( ) - > random01 ( ) * 20 ; / / TODO : Increase delay range <nl> + init ( BACKUP_LOCK_BYTES , 3e9 ) ; if ( randomize & & BUGGIFY ) BACKUP_LOCK_BYTES = deterministicRandom ( ) - > randomInt ( 1024 , 4096 ) * 1024 ; <nl> + init ( BACKUP_UPLOAD_DELAY , 10 . 0 ) ; if ( randomize & & BUGGIFY ) BACKUP_UPLOAD_DELAY = deterministicRandom ( ) - > random01 ( ) * 60 ; <nl> <nl> / / Cluster Controller <nl> init ( CLUSTER_CONTROLLER_LOGGING_DELAY , 5 . 0 ) ; <nl> void ServerKnobs : : initialize ( bool randomize , ClientKnobs * clientKnobs , bool isSi <nl> init ( REDWOOD_DEFAULT_PAGE_SIZE , 4096 ) ; <nl> init ( REDWOOD_KVSTORE_CONCURRENT_READS , 64 ) ; <nl> init ( REDWOOD_PAGE_REBUILD_FILL_FACTOR , 0 . 66 ) ; <nl> + init ( REDWOOD_LAZY_CLEAR_BATCH_SIZE_PAGES , 10 ) ; <nl> + init ( REDWOOD_LAZY_CLEAR_MIN_PAGES , 0 ) ; <nl> + init ( REDWOOD_LAZY_CLEAR_MAX_PAGES , 1e6 ) ; <nl> + init ( REDWOOD_REMAP_CLEANUP_BATCH_SIZE , 5000 ) ; <nl> + init ( REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN , 4 ) ; <nl> + init ( REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX , 15 ) ; <nl> + init ( REDWOOD_LOGGING_INTERVAL , 5 . 0 ) ; <nl> <nl> / / clang - format on <nl> <nl> mmm a / fdbserver / Knobs . h <nl> ppp b / fdbserver / Knobs . h <nl> class ServerKnobs : public Knobs { <nl> int64_t DD_SS_FAILURE_VERSIONLAG ; / / Allowed SS version lag from the current read version before marking it as failed . <nl> int64_t DD_SS_ALLOWED_VERSIONLAG ; / / SS will be marked as healthy if it ' s version lag goes below this value . <nl> double DD_SS_STUCK_TIME_LIMIT ; / / If a storage server is not getting new versions for this amount of time , then it becomes undesired . <nl> - <nl> + <nl> / / TeamRemover to remove redundant teams <nl> bool TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER ; / / disable the machineTeamRemover actor <nl> double TR_REMOVE_MACHINE_TEAM_DELAY ; / / wait for the specified time before try to remove next machine team <nl> class ServerKnobs : public Knobs { <nl> double BACKUP_TIMEOUT ; / / master ' s reaction time for backup failure <nl> double BACKUP_NOOP_POP_DELAY ; <nl> int BACKUP_FILE_BLOCK_BYTES ; <nl> + int64_t BACKUP_LOCK_BYTES ; <nl> double BACKUP_UPLOAD_DELAY ; <nl> <nl> / / Cluster Controller <nl> class ServerKnobs : public Knobs { <nl> int REDWOOD_DEFAULT_PAGE_SIZE ; / / Page size for new Redwood files <nl> int REDWOOD_KVSTORE_CONCURRENT_READS ; / / Max number of simultaneous point or range reads in progress . <nl> double REDWOOD_PAGE_REBUILD_FILL_FACTOR ; / / When rebuilding pages , start a new page after this capacity <nl> + int REDWOOD_LAZY_CLEAR_BATCH_SIZE_PAGES ; / / Number of pages to try to pop from the lazy delete queue and process at once <nl> + int REDWOOD_LAZY_CLEAR_MIN_PAGES ; / / Minimum number of pages to free before ending a lazy clear cycle , unless the queue is empty <nl> + int REDWOOD_LAZY_CLEAR_MAX_PAGES ; / / Maximum number of pages to free before ending a lazy clear cycle , unless the queue is empty <nl> + int REDWOOD_REMAP_CLEANUP_BATCH_SIZE ; / / Number of queue entries for remap cleanup to process and potentially coalesce at once . <nl> + int REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN ; / / Number of versions between head of remap queue and oldest retained version before remap cleanup starts <nl> + int REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX ; / / Number of versions between head of remap queue and oldest retained version before remap cleanup may stop <nl> + double REDWOOD_LOGGING_INTERVAL ; <nl> <nl> ServerKnobs ( ) ; <nl> void initialize ( bool randomize = false , ClientKnobs * clientKnobs = NULL , bool isSimulated = false ) ; <nl> mmm a / fdbserver / MasterInterface . h <nl> ppp b / fdbserver / MasterInterface . h <nl> typedef uint64_t DBRecoveryCount ; <nl> struct MasterInterface { <nl> constexpr static FileIdentifier file_identifier = 5979145 ; <nl> LocalityData locality ; <nl> - Endpoint base ; <nl> RequestStream < ReplyPromise < Void > > waitFailure ; <nl> RequestStream < struct TLogRejoinRequest > tlogRejoin ; / / sent by tlog ( whether or not rebooted ) to communicate with a new master <nl> RequestStream < struct ChangeCoordinatorsRequest > changeCoordinators ; <nl> struct MasterInterface { <nl> if constexpr ( ! is_fb_function < Archive > ) { <nl> ASSERT ( ar . protocolVersion ( ) . isValid ( ) ) ; <nl> } <nl> - serializer ( ar , locality , base ) ; <nl> + serializer ( ar , locality , waitFailure ) ; <nl> if ( Archive : : isDeserializing ) { <nl> - waitFailure = RequestStream < ReplyPromise < Void > > ( base . getAdjustedEndpoint ( 0 ) ) ; <nl> - tlogRejoin = RequestStream < struct TLogRejoinRequest > ( base . getAdjustedEndpoint ( 1 ) ) ; <nl> - changeCoordinators = RequestStream < struct ChangeCoordinatorsRequest > ( base . getAdjustedEndpoint ( 2 ) ) ; <nl> - getCommitVersion = RequestStream < struct GetCommitVersionRequest > ( base . getAdjustedEndpoint ( 3 ) ) ; <nl> - notifyBackupWorkerDone = RequestStream < struct BackupWorkerDoneRequest > ( base . getAdjustedEndpoint ( 4 ) ) ; <nl> + tlogRejoin = RequestStream < struct TLogRejoinRequest > ( waitFailure . getEndpoint ( ) . getAdjustedEndpoint ( 1 ) ) ; <nl> + changeCoordinators = RequestStream < struct ChangeCoordinatorsRequest > ( waitFailure . getEndpoint ( ) . getAdjustedEndpoint ( 2 ) ) ; <nl> + getCommitVersion = RequestStream < struct GetCommitVersionRequest > ( waitFailure . getEndpoint ( ) . getAdjustedEndpoint ( 3 ) ) ; <nl> + notifyBackupWorkerDone = RequestStream < struct BackupWorkerDoneRequest > ( waitFailure . getEndpoint ( ) . getAdjustedEndpoint ( 4 ) ) ; <nl> } <nl> } <nl> <nl> struct MasterInterface { <nl> streams . push_back ( changeCoordinators . getReceiver ( ) ) ; <nl> streams . push_back ( getCommitVersion . getReceiver ( TaskPriority : : GetConsistentReadVersion ) ) ; <nl> streams . push_back ( notifyBackupWorkerDone . getReceiver ( ) ) ; <nl> - base = FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> + FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> } <nl> } ; <nl> <nl> mmm a / fdbserver / MasterProxyServer . actor . cpp <nl> ppp b / fdbserver / MasterProxyServer . actor . cpp <nl> ACTOR Future < Void > healthMetricsRequestServer ( MasterProxyInterface proxy , GetHea <nl> } <nl> } <nl> <nl> + ACTOR Future < Void > ddMetricsRequestServer ( MasterProxyInterface proxy , Reference < AsyncVar < ServerDBInfo > > db ) <nl> + { <nl> + loop { <nl> + choose { <nl> + when ( state GetDDMetricsRequest req = waitNext ( proxy . getDDMetrics . getFuture ( ) ) ) <nl> + { <nl> + ErrorOr < GetDataDistributorMetricsReply > reply = wait ( errorOr ( db - > get ( ) . distributor . get ( ) . dataDistributorMetrics . getReply ( GetDataDistributorMetricsRequest ( req . keys , req . shardLimit ) ) ) ) ; <nl> + if ( reply . isError ( ) ) { <nl> + req . reply . sendError ( reply . getError ( ) ) ; <nl> + } else { <nl> + GetDDMetricsReply newReply ; <nl> + newReply . storageMetricsList = reply . get ( ) . storageMetricsList ; <nl> + req . reply . send ( newReply ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> ACTOR Future < Void > monitorRemoteCommitted ( ProxyCommitData * self ) { <nl> loop { <nl> wait ( delay ( 0 ) ) ; / / allow this actor to be cancelled if we are removed after db changes . <nl> ACTOR Future < Void > masterProxyServerCore ( <nl> addActor . send ( readRequestServer ( proxy , addActor , & commitData ) ) ; <nl> addActor . send ( rejoinServer ( proxy , & commitData ) ) ; <nl> addActor . send ( healthMetricsRequestServer ( proxy , & healthMetricsReply , & detailedHealthMetricsReply ) ) ; <nl> + addActor . send ( ddMetricsRequestServer ( proxy , db ) ) ; <nl> <nl> / / wait for txnStateStore recovery <nl> wait ( success ( commitData . txnStateStore - > readValue ( StringRef ( ) ) ) ) ; <nl> mmm a / fdbserver / Ratekeeper . actor . cpp <nl> ppp b / fdbserver / Ratekeeper . actor . cpp <nl> ACTOR Future < Void > monitorThrottlingChanges ( RatekeeperData * self ) { <nl> TransactionTag tag = * tagKey . tags . begin ( ) ; <nl> Optional < ClientTagThrottleLimits > oldLimits = self - > throttledTags . getManualTagThrottleLimits ( tag , tagKey . priority ) ; <nl> <nl> - if ( tagKey . autoThrottled ) { <nl> + if ( tagKey . throttleType = = TagThrottleType : : AUTO ) { <nl> updatedTagThrottles . autoThrottleTag ( self - > id , tag , 0 , tagValue . tpsRate , tagValue . expirationTime ) ; <nl> } <nl> else { <nl> void tryAutoThrottleTag ( RatekeeperData * self , StorageQueueInfo const & ss , RkTagT <nl> TagSet tags ; <nl> tags . addTag ( ss . busiestTag . get ( ) ) ; <nl> <nl> - self - > addActor . send ( ThrottleApi : : throttleTags ( self - > db , tags , clientRate . get ( ) , SERVER_KNOBS - > AUTO_TAG_THROTTLE_DURATION , true , TransactionPriority : : DEFAULT , now ( ) + SERVER_KNOBS - > AUTO_TAG_THROTTLE_DURATION ) ) ; <nl> + self - > addActor . send ( ThrottleApi : : throttleTags ( self - > db , tags , clientRate . get ( ) , SERVER_KNOBS - > AUTO_TAG_THROTTLE_DURATION , TagThrottleType : : AUTO , TransactionPriority : : DEFAULT , now ( ) + SERVER_KNOBS - > AUTO_TAG_THROTTLE_DURATION ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / fdbserver / RestoreLoader . actor . cpp <nl> ppp b / fdbserver / RestoreLoader . actor . cpp <nl> ACTOR static Future < Void > _parsePartitionedLogFileOnLoader ( <nl> VersionedMutationsMap : : iterator it ; <nl> bool inserted ; <nl> std : : tie ( it , inserted ) = kvOps . emplace ( msgVersion , MutationsVec ( ) ) ; <nl> + / / A clear mutation can be split into multiple mutations with the same ( version , sub ) . <nl> + / / See saveMutationsToFile ( ) . Current tests only use one key range per backup , thus <nl> + / / only one clear mutation is generated ( i . e . , always inserted ) . <nl> ASSERT ( inserted ) ; <nl> <nl> ArenaReader rd ( buf . arena ( ) , StringRef ( message , msgSize ) , AssumeVersion ( currentProtocolVersion ) ) ; <nl> mmm a / fdbserver / SimulatedCluster . actor . cpp <nl> ppp b / fdbserver / SimulatedCluster . actor . cpp <nl> <nl> # include " fdbclient / ManagementAPI . actor . h " <nl> # include " fdbclient / NativeAPI . actor . h " <nl> # include " fdbclient / BackupAgent . actor . h " <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> # undef max <nl> mmm a / fdbserver / SkipList . cpp <nl> ppp b / fdbserver / SkipList . cpp <nl> StringRef setK ( Arena & arena , int i ) { <nl> # include " fdbserver / ConflictSet . h " <nl> <nl> struct ConflictSet { <nl> - ConflictSet ( ) : oldestVersion ( 0 ) { } <nl> + ConflictSet ( ) : oldestVersion ( 0 ) , removalKey ( makeString ( 0 ) ) { } <nl> ~ ConflictSet ( ) { } <nl> <nl> SkipList versionHistory ; <nl> mmm a / fdbserver / Status . actor . cpp <nl> ppp b / fdbserver / Status . actor . cpp <nl> JsonBuilderObject getLagObject ( int64_t versions ) { <nl> <nl> struct MachineMemoryInfo { <nl> double memoryUsage ; <nl> - double numProcesses ; <nl> + double aggregateLimit ; <nl> <nl> - MachineMemoryInfo ( ) : memoryUsage ( 0 ) , numProcesses ( 0 ) { } <nl> + MachineMemoryInfo ( ) : memoryUsage ( 0 ) , aggregateLimit ( 0 ) { } <nl> <nl> bool valid ( ) { return memoryUsage > = 0 ; } <nl> void invalidate ( ) { memoryUsage = - 1 ; } <nl> ACTOR static Future < JsonBuilderObject > processStatusFetcher ( <nl> try { <nl> ASSERT ( pMetrics . count ( workerItr - > interf . address ( ) ) ) ; <nl> const TraceEventFields & processMetrics = pMetrics [ workerItr - > interf . address ( ) ] ; <nl> + const TraceEventFields & programStart = programStarts [ workerItr - > interf . address ( ) ] ; <nl> <nl> if ( memInfo - > second . valid ( ) ) { <nl> - if ( processMetrics . size ( ) > 0 ) { <nl> + if ( processMetrics . size ( ) > 0 & & programStart . size ( ) > 0 ) { <nl> memInfo - > second . memoryUsage + = processMetrics . getDouble ( " Memory " ) ; <nl> - + + memInfo - > second . numProcesses ; <nl> + memInfo - > second . aggregateLimit + = programStart . getDouble ( " MemoryLimit " ) ; <nl> } <nl> else <nl> memInfo - > second . invalidate ( ) ; <nl> ACTOR static Future < JsonBuilderObject > processStatusFetcher ( <nl> memoryObj . setKeyRawNumber ( " unused_allocated_memory " , processMetrics . getValue ( " UnusedAllocatedMemory " ) ) ; <nl> } <nl> <nl> + int64_t memoryLimit = 0 ; <nl> if ( programStarts . count ( address ) ) { <nl> - auto const & psxml = programStarts . at ( address ) ; <nl> + auto const & programStartEvent = programStarts . at ( address ) ; <nl> <nl> - if ( psxml . size ( ) > 0 ) { <nl> - memoryObj . setKeyRawNumber ( " limit_bytes " , psxml . getValue ( " MemoryLimit " ) ) ; <nl> + if ( programStartEvent . size ( ) > 0 ) { <nl> + memoryLimit = programStartEvent . getInt64 ( " MemoryLimit " ) ; <nl> + memoryObj . setKey ( " limit_bytes " , memoryLimit ) ; <nl> <nl> std : : string version ; <nl> - if ( psxml . tryGetValue ( " Version " , version ) ) { <nl> + if ( programStartEvent . tryGetValue ( " Version " , version ) ) { <nl> statusObj [ " version " ] = version ; <nl> } <nl> <nl> std : : string commandLine ; <nl> - if ( psxml . tryGetValue ( " CommandLine " , commandLine ) ) { <nl> + if ( programStartEvent . tryGetValue ( " CommandLine " , commandLine ) ) { <nl> statusObj [ " command_line " ] = commandLine ; <nl> } <nl> } <nl> ACTOR static Future < JsonBuilderObject > processStatusFetcher ( <nl> availableMemory = mMetrics [ address ] . getDouble ( " AvailableMemory " ) ; <nl> <nl> auto machineMemInfo = machineMemoryUsage [ workerItr - > interf . locality . machineId ( ) ] ; <nl> - if ( machineMemInfo . valid ( ) ) { <nl> - ASSERT ( machineMemInfo . numProcesses > 0 ) ; <nl> - int64_t memory = ( availableMemory + machineMemInfo . memoryUsage ) / machineMemInfo . numProcesses ; <nl> - memoryObj [ " available_bytes " ] = std : : max < int64_t > ( memory , 0 ) ; <nl> + if ( machineMemInfo . valid ( ) & & memoryLimit > 0 ) { <nl> + ASSERT ( machineMemInfo . aggregateLimit > 0 ) ; <nl> + int64_t memory = ( availableMemory + machineMemInfo . memoryUsage ) * memoryLimit / machineMemInfo . aggregateLimit ; <nl> + memoryObj [ " available_bytes " ] = std : : min < int64_t > ( std : : max < int64_t > ( memory , 0 ) , memoryLimit ) ; <nl> } <nl> } <nl> <nl> ACTOR static Future < JsonBuilderObject > workloadStatusFetcher ( Reference < AsyncVar < <nl> ( * qos ) . setKeyRawNumber ( " worst_queue_bytes_storage_server " , ratekeeper . getValue ( " WorstStorageServerQueue " ) ) ; <nl> ( * qos ) . setKeyRawNumber ( " limiting_queue_bytes_storage_server " , ratekeeper . getValue ( " LimitingStorageServerQueue " ) ) ; <nl> <nl> - / / TODO : These can be removed in the next release after 6 . 2 <nl> - ( * qos ) . setKeyRawNumber ( " worst_version_lag_storage_server " , ratekeeper . getValue ( " WorstStorageServerVersionLag " ) ) ; <nl> - ( * qos ) . setKeyRawNumber ( " limiting_version_lag_storage_server " , ratekeeper . getValue ( " LimitingStorageServerVersionLag " ) ) ; <nl> - <nl> ( * qos ) [ " worst_data_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " WorstStorageServerVersionLag " ) ) ; <nl> ( * qos ) [ " limiting_data_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " LimitingStorageServerVersionLag " ) ) ; <nl> ( * qos ) [ " worst_durability_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " WorstStorageServerDurabilityLag " ) ) ; <nl> mmm a / fdbserver / TLogInterface . h <nl> ppp b / fdbserver / TLogInterface . h <nl> struct TLogInterface { <nl> LocalityData filteredLocality ; <nl> UID uniqueID ; <nl> UID sharedTLogID ; <nl> - Endpoint base ; <nl> <nl> RequestStream < struct TLogPeekRequest > peekMessages ; <nl> RequestStream < struct TLogPopRequest > popMessages ; <nl> struct TLogInterface { <nl> streams . push_back ( disablePopRequest . getReceiver ( ) ) ; <nl> streams . push_back ( enablePopRequest . getReceiver ( ) ) ; <nl> streams . push_back ( snapRequest . getReceiver ( ) ) ; <nl> - base = FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> + FlowTransport : : transport ( ) . addEndpoints ( streams ) ; <nl> } <nl> <nl> template < class Ar > <nl> struct TLogInterface { <nl> if constexpr ( ! is_fb_function < Ar > ) { <nl> ASSERT ( ar . isDeserializing | | uniqueID ! = UID ( ) ) ; <nl> } <nl> - serializer ( ar , uniqueID , sharedTLogID , filteredLocality , base ) ; <nl> + serializer ( ar , uniqueID , sharedTLogID , filteredLocality , peekMessages ) ; <nl> if ( Ar : : isDeserializing ) { <nl> - peekMessages = RequestStream < struct TLogPeekRequest > ( base . getAdjustedEndpoint ( 0 ) ) ; <nl> - popMessages = RequestStream < struct TLogPopRequest > ( base . getAdjustedEndpoint ( 1 ) ) ; <nl> - commit = RequestStream < struct TLogCommitRequest > ( base . getAdjustedEndpoint ( 2 ) ) ; <nl> - lock = RequestStream < ReplyPromise < struct TLogLockResult > > ( base . getAdjustedEndpoint ( 3 ) ) ; <nl> - getQueuingMetrics = RequestStream < struct TLogQueuingMetricsRequest > ( base . getAdjustedEndpoint ( 4 ) ) ; <nl> - confirmRunning = RequestStream < struct TLogConfirmRunningRequest > ( base . getAdjustedEndpoint ( 5 ) ) ; <nl> - waitFailure = RequestStream < ReplyPromise < Void > > ( base . getAdjustedEndpoint ( 6 ) ) ; <nl> - recoveryFinished = RequestStream < struct TLogRecoveryFinishedRequest > ( base . getAdjustedEndpoint ( 7 ) ) ; <nl> - disablePopRequest = RequestStream < struct TLogDisablePopRequest > ( base . getAdjustedEndpoint ( 8 ) ) ; <nl> - enablePopRequest = RequestStream < struct TLogEnablePopRequest > ( base . getAdjustedEndpoint ( 9 ) ) ; <nl> - snapRequest = RequestStream < struct TLogSnapRequest > ( base . getAdjustedEndpoint ( 10 ) ) ; <nl> + popMessages = RequestStream < struct TLogPopRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 1 ) ) ; <nl> + commit = RequestStream < struct TLogCommitRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 2 ) ) ; <nl> + lock = RequestStream < ReplyPromise < struct TLogLockResult > > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 3 ) ) ; <nl> + getQueuingMetrics = RequestStream < struct TLogQueuingMetricsRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 4 ) ) ; <nl> + confirmRunning = RequestStream < struct TLogConfirmRunningRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 5 ) ) ; <nl> + waitFailure = RequestStream < ReplyPromise < Void > > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 6 ) ) ; <nl> + recoveryFinished = RequestStream < struct TLogRecoveryFinishedRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 7 ) ) ; <nl> + disablePopRequest = RequestStream < struct TLogDisablePopRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 8 ) ) ; <nl> + enablePopRequest = RequestStream < struct TLogEnablePopRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 9 ) ) ; <nl> + snapRequest = RequestStream < struct TLogSnapRequest > ( peekMessages . getEndpoint ( ) . getAdjustedEndpoint ( 10 ) ) ; <nl> } <nl> } <nl> } ; <nl> mmm a / fdbserver / VersionedBTree . actor . cpp <nl> ppp b / fdbserver / VersionedBTree . actor . cpp <nl> class FIFOQueue { <nl> <nl> Future < Standalone < VectorRef < T > > > peekAll ( ) { return peekAll_impl ( this ) ; } <nl> <nl> + ACTOR static Future < Optional < T > > peek_impl ( FIFOQueue * self ) { <nl> + state Cursor c ; <nl> + c . initReadOnly ( self - > headReader ) ; <nl> + <nl> + Optional < T > x = wait ( c . readNext ( ) ) ; <nl> + return x ; <nl> + } <nl> + <nl> + Future < Optional < T > > peek ( ) { return peek_impl ( this ) ; } <nl> + <nl> / / Pop the next item on front of queue if it is < = upperBound or if upperBound is not present <nl> Future < Optional < T > > pop ( Optional < T > upperBound = { } ) { return headReader . readNext ( upperBound ) ; } <nl> <nl> class FastAllocatedPage : public IPage , public FastAllocated < FastAllocatedPage > , <nl> uint8_t * buffer ; <nl> } ; <nl> <nl> + struct RedwoodMetrics { <nl> + static constexpr int btreeLevels = 5 ; <nl> + <nl> + RedwoodMetrics ( ) { clear ( ) ; } <nl> + <nl> + void clear ( ) { <nl> + memset ( this , 0 , sizeof ( RedwoodMetrics ) ) ; <nl> + for ( auto & level : levels ) { <nl> + level = { } ; <nl> + } <nl> + startTime = g_network ? now ( ) : 0 ; <nl> + } <nl> + <nl> + struct Level { <nl> + unsigned int pageRead ; <nl> + unsigned int pageReadExt ; <nl> + unsigned int pageBuild ; <nl> + unsigned int pageBuildExt ; <nl> + unsigned int pageCommitStart ; <nl> + unsigned int pageModify ; <nl> + unsigned int pageModifyExt ; <nl> + unsigned int lazyClearRequeue ; <nl> + unsigned int lazyClearRequeueExt ; <nl> + unsigned int lazyClearFree ; <nl> + unsigned int lazyClearFreeExt ; <nl> + double buildStoredPct ; <nl> + double buildFillPct ; <nl> + unsigned int buildItemCount ; <nl> + double modifyStoredPct ; <nl> + double modifyFillPct ; <nl> + unsigned int modifyItemCount ; <nl> + } ; <nl> + <nl> + Level levels [ btreeLevels ] ; <nl> + <nl> + unsigned int opSet ; <nl> + unsigned int opSetKeyBytes ; <nl> + unsigned int opSetValueBytes ; <nl> + unsigned int opClear ; <nl> + unsigned int opClearKey ; <nl> + unsigned int opCommit ; <nl> + unsigned int opGet ; <nl> + unsigned int opGetRange ; <nl> + unsigned int pagerDiskWrite ; <nl> + unsigned int pagerDiskRead ; <nl> + unsigned int pagerRemapFree ; <nl> + unsigned int pagerRemapCopy ; <nl> + unsigned int pagerRemapSkip ; <nl> + unsigned int pagerCacheHit ; <nl> + unsigned int pagerCacheMiss ; <nl> + unsigned int pagerProbeHit ; <nl> + unsigned int pagerProbeMiss ; <nl> + unsigned int pagerEvictUnhit ; <nl> + unsigned int pagerEvictFail ; <nl> + unsigned int btreeLeafPreload ; <nl> + unsigned int btreeLeafPreloadExt ; <nl> + <nl> + double startTime ; <nl> + <nl> + Level & level ( unsigned int level ) { <nl> + static Level outOfBound ; <nl> + if ( level = = 0 | | level > btreeLevels ) { <nl> + return outOfBound ; <nl> + } <nl> + return levels [ level - 1 ] ; <nl> + } <nl> + <nl> + / / This will populate a trace event and / or a string with Redwood metrics . The string is a <nl> + / / reasonably well formatted page of information <nl> + void getFields ( TraceEvent * e , std : : string * s = nullptr ) { <nl> + std : : pair < const char * , unsigned int > metrics [ ] = { { " BTreePreload " , btreeLeafPreload } , <nl> + { " BTreePreloadExt " , btreeLeafPreloadExt } , <nl> + { " " , 0 } , <nl> + { " OpSet " , opSet } , <nl> + { " OpSetKeyBytes " , opSetKeyBytes } , <nl> + { " OpSetValueBytes " , opSetValueBytes } , <nl> + { " OpClear " , opClear } , <nl> + { " OpClearKey " , opClearKey } , <nl> + { " " , 0 } , <nl> + { " OpGet " , opGet } , <nl> + { " OpGetRange " , opGetRange } , <nl> + { " OpCommit " , opCommit } , <nl> + { " " , 0 } , <nl> + { " PagerDiskWrite " , pagerDiskWrite } , <nl> + { " PagerDiskRead " , pagerDiskRead } , <nl> + { " PagerCacheHit " , pagerCacheHit } , <nl> + { " PagerCacheMiss " , pagerCacheMiss } , <nl> + { " " , 0 } , <nl> + { " PagerProbeHit " , pagerProbeHit } , <nl> + { " PagerProbeMiss " , pagerProbeMiss } , <nl> + { " PagerEvictUnhit " , pagerEvictUnhit } , <nl> + { " PagerEvictFail " , pagerEvictFail } , <nl> + { " " , 0 } , <nl> + { " PagerRemapFree " , pagerRemapFree } , <nl> + { " PagerRemapCopy " , pagerRemapCopy } , <nl> + { " PagerRemapSkip " , pagerRemapSkip } } ; <nl> + double elapsed = now ( ) - startTime ; <nl> + for ( auto & m : metrics ) { <nl> + if ( * m . first = = ' \ 0 ' ) { <nl> + if ( s ! = nullptr ) { <nl> + * s + = " \ n " ; <nl> + } <nl> + } else { <nl> + if ( s ! = nullptr ) { <nl> + * s + = format ( " % - 15s % - 8u % 8u / s " , m . first , m . second , int ( m . second / elapsed ) ) ; <nl> + } <nl> + if ( e ! = nullptr ) { <nl> + e - > detail ( m . first , m . second ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + for ( int i = 0 ; i < btreeLevels ; + + i ) { <nl> + auto & level = levels [ i ] ; <nl> + std : : pair < const char * , unsigned int > metrics [ ] = { <nl> + { " PageBuild " , level . pageBuild } , <nl> + { " PageBuildExt " , level . pageBuildExt } , <nl> + { " PageModify " , level . pageModify } , <nl> + { " PageModifyExt " , level . pageModifyExt } , <nl> + { " " , 0 } , <nl> + { " PageRead " , level . pageRead } , <nl> + { " PageReadExt " , level . pageReadExt } , <nl> + { " PageCommitStart " , level . pageCommitStart } , <nl> + { " " , 0 } , <nl> + { " LazyClearInt " , level . lazyClearRequeue } , <nl> + { " LazyClearIntExt " , level . lazyClearRequeueExt } , <nl> + { " LazyClear " , level . lazyClearFree } , <nl> + { " LazyClearExt " , level . lazyClearFreeExt } , <nl> + { " " , 0 } , <nl> + { " - BldAvgCount " , level . pageBuild ? level . buildItemCount / level . pageBuild : 0 } , <nl> + { " - BldAvgFillPct " , level . pageBuild ? level . buildFillPct / level . pageBuild * 100 : 0 } , <nl> + { " - BldAvgStoredPct " , level . pageBuild ? level . buildStoredPct / level . pageBuild * 100 : 0 } , <nl> + { " " , 0 } , <nl> + { " - ModAvgCount " , level . pageModify ? level . modifyItemCount / level . pageModify : 0 } , <nl> + { " - ModAvgFillPct " , level . pageModify ? level . modifyFillPct / level . pageModify * 100 : 0 } , <nl> + { " - ModAvgStoredPct " , level . pageModify ? level . modifyStoredPct / level . pageModify * 100 : 0 } <nl> + } ; <nl> + <nl> + if ( s ! = nullptr ) { <nl> + * s + = format ( " \ nLevel % d \ n \ t " , i + 1 ) ; <nl> + } <nl> + for ( auto & m : metrics ) { <nl> + const char * name = m . first ; <nl> + bool rate = elapsed ! = 0 ; <nl> + if ( * name = = ' - ' ) { <nl> + + + name ; <nl> + rate = false ; <nl> + } <nl> + <nl> + if ( * name = = ' \ 0 ' ) { <nl> + if ( s ! = nullptr ) { <nl> + * s + = " \ n \ t " ; <nl> + } <nl> + } else { <nl> + if ( s ! = nullptr ) { <nl> + * s + = format ( " % - 15s % 8u % 8u / s " , name , m . second , rate ? int ( m . second / elapsed ) : 0 ) ; <nl> + } <nl> + if ( e ! = nullptr ) { <nl> + e - > detail ( format ( " L % d % s " , i + 1 , name ) , m . second ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + std : : string toString ( bool clearAfter ) { <nl> + std : : string s ; <nl> + getFields ( nullptr , & s ) ; <nl> + <nl> + if ( clearAfter ) { <nl> + clear ( ) ; <nl> + } <nl> + <nl> + return s ; <nl> + } <nl> + } ; <nl> + <nl> + / / Using a global for Redwood metrics because a single process shouldn ' t normally have multiple storage engines <nl> + RedwoodMetrics g_redwoodMetrics = { } ; <nl> + Future < Void > g_redwoodMetricsActor ; <nl> + <nl> + ACTOR Future < Void > redwoodMetricsLogger ( ) { <nl> + g_redwoodMetrics . clear ( ) ; <nl> + <nl> + loop { <nl> + wait ( delay ( SERVER_KNOBS - > REDWOOD_LOGGING_INTERVAL ) ) ; <nl> + <nl> + TraceEvent e ( " RedwoodMetrics " ) ; <nl> + double elapsed = now ( ) - g_redwoodMetrics . startTime ; <nl> + e . detail ( " Elapsed " , elapsed ) ; <nl> + g_redwoodMetrics . getFields ( & e ) ; <nl> + g_redwoodMetrics . clear ( ) ; <nl> + } <nl> + } <nl> + <nl> / / Holds an index of recently used objects . <nl> / / ObjectType must have the methods <nl> / / bool evictable ( ) const ; / / return true if the entry can be evicted <nl> class ObjectCache : NonCopyable { <nl> typedef boost : : intrusive : : list < Entry > EvictionOrderT ; <nl> <nl> public : <nl> - ObjectCache ( int sizeLimit = 1 ) <nl> - : sizeLimit ( sizeLimit ) , cacheHits ( 0 ) , cacheMisses ( 0 ) , noHitEvictions ( 0 ) , failedEvictions ( 0 ) { } <nl> + ObjectCache ( int sizeLimit = 1 ) : sizeLimit ( sizeLimit ) { } <nl> <nl> void setSizeLimit ( int n ) { <nl> ASSERT ( n > 0 ) ; <nl> class ObjectCache : NonCopyable { <nl> auto i = cache . find ( index ) ; <nl> if ( i ! = cache . end ( ) ) { <nl> + + i - > second . hits ; <nl> + + + g_redwoodMetrics . pagerProbeHit ; <nl> return & i - > second . item ; <nl> } <nl> + + + g_redwoodMetrics . pagerProbeMiss ; <nl> return nullptr ; <nl> } <nl> <nl> / / Get the object for i or create a new one . <nl> / / After a get ( ) , the object for i is the last in evictionOrder . <nl> - ObjectType & get ( const IndexType & index , bool noHit = false ) { <nl> + / / If noHit is set , do not consider this access to be cache hit if the object is present <nl> + / / If noMiss is set , do not consider this access to be a cache miss if the object is not present <nl> + ObjectType & get ( const IndexType & index , bool noHit = false , bool noMiss = false ) { <nl> Entry & entry = cache [ index ] ; <nl> <nl> / / If entry is linked into evictionOrder then move it to the back of the order <nl> if ( entry . is_linked ( ) ) { <nl> if ( ! noHit ) { <nl> + + entry . hits ; <nl> - + + cacheHits ; <nl> + + + g_redwoodMetrics . pagerCacheHit ; <nl> + <nl> + / / Move the entry to the back of the eviction order <nl> + evictionOrder . erase ( evictionOrder . iterator_to ( entry ) ) ; <nl> + evictionOrder . push_back ( entry ) ; <nl> } <nl> - / / Move the entry to the back of the eviction order <nl> - evictionOrder . erase ( evictionOrder . iterator_to ( entry ) ) ; <nl> - evictionOrder . push_back ( entry ) ; <nl> } else { <nl> - + + cacheMisses ; <nl> + if ( ! noMiss ) { <nl> + + + g_redwoodMetrics . pagerCacheMiss ; <nl> + } <nl> / / Finish initializing entry <nl> entry . index = index ; <nl> - entry . hits = noHit ? 0 : 1 ; <nl> + entry . hits = 0 ; <nl> / / Insert the newly created Entry at the back of the eviction order <nl> evictionOrder . push_back ( entry ) ; <nl> <nl> / / While the cache is too big , evict the oldest entry until the oldest entry can ' t be evicted . <nl> while ( cache . size ( ) > sizeLimit ) { <nl> Entry & toEvict = evictionOrder . front ( ) ; <nl> + <nl> + / / It ' s critical that we do not evict the item we just added because it would cause the reference <nl> + / / returned to be invalid . An eviction could happen with a no - hit access to a cache resident page <nl> + / / that is currently evictable and exists in the oversized portion of the cache eviction order due <nl> + / / to previously failed evictions . <nl> + if ( & entry = = & toEvict ) { <nl> + debug_printf ( " Cannot evict target index % s \ n " , toString ( index ) . c_str ( ) ) ; <nl> + break ; <nl> + } <nl> + <nl> debug_printf ( " Trying to evict % s to make room for % s \ n " , toString ( toEvict . index ) . c_str ( ) , <nl> toString ( index ) . c_str ( ) ) ; <nl> <nl> - / / It ' s critical that we do not evict the item we just added ( or the reference we return would be <nl> - / / invalid ) but since sizeLimit must be > 0 , entry was just added to the end of the evictionOrder , and <nl> - / / this loop will end if we move anything to the end of the eviction order , we can be guaraunted that <nl> - / / entry ! = toEvict , so we do not need to check . If the item is not evictable then move it to the back <nl> - / / of the eviction order and stop . <nl> if ( ! toEvict . item . evictable ( ) ) { <nl> evictionOrder . erase ( evictionOrder . iterator_to ( toEvict ) ) ; <nl> evictionOrder . push_back ( toEvict ) ; <nl> - + + failedEvictions ; <nl> + + + g_redwoodMetrics . pagerEvictFail ; <nl> break ; <nl> } else { <nl> if ( toEvict . hits = = 0 ) { <nl> - + + noHitEvictions ; <nl> + + + g_redwoodMetrics . pagerEvictUnhit ; <nl> } <nl> debug_printf ( " Evicting % s to make room for % s \ n " , toString ( toEvict . index ) . c_str ( ) , <nl> toString ( index ) . c_str ( ) ) ; <nl> class ObjectCache : NonCopyable { <nl> <nl> private : <nl> int64_t sizeLimit ; <nl> - int64_t cacheHits ; <nl> - int64_t cacheMisses ; <nl> - int64_t noHitEvictions ; <nl> - int64_t failedEvictions ; <nl> <nl> CacheT cache ; <nl> EvictionOrderT evictionOrder ; <nl> class DWALPager : public IPager2 { <nl> } ; <nl> <nl> struct RemappedPage { <nl> + RemappedPage ( ) : version ( invalidVersion ) { } <nl> + RemappedPage ( Version v , LogicalPageID o , LogicalPageID n ) : version ( v ) , originalPageID ( o ) , newPageID ( n ) { } <nl> + <nl> Version version ; <nl> LogicalPageID originalPageID ; <nl> LogicalPageID newPageID ; <nl> class DWALPager : public IPager2 { <nl> DWALPager ( int desiredPageSize , std : : string filename , int64_t pageCacheSizeBytes , bool memoryOnly = false ) <nl> : desiredPageSize ( desiredPageSize ) , filename ( filename ) , pHeader ( nullptr ) , pageCacheBytes ( pageCacheSizeBytes ) , <nl> memoryOnly ( memoryOnly ) { <nl> + <nl> + if ( ! g_redwoodMetricsActor . isValid ( ) ) { <nl> + g_redwoodMetricsActor = redwoodMetricsLogger ( ) ; <nl> + } <nl> + <nl> if ( pageCacheBytes = = 0 ) { <nl> pageCacheBytes = g_network - > isSimulated ( ) <nl> ? ( BUGGIFY ? FLOW_KNOBS - > BUGGIFY_SIM_PAGE_CACHE_4K : FLOW_KNOBS - > SIM_PAGE_CACHE_4K ) <nl> class DWALPager : public IPager2 { <nl> ACTOR static Future < Void > recover ( DWALPager * self ) { <nl> ASSERT ( ! self - > recoverFuture . isValid ( ) ) ; <nl> <nl> - self - > remapUndoFuture = Void ( ) ; <nl> state bool exists = false ; <nl> <nl> if ( ! self - > memoryOnly ) { <nl> class DWALPager : public IPager2 { <nl> / / header ) <nl> self - > updateCommittedHeader ( ) ; <nl> self - > addLatestSnapshot ( ) ; <nl> + self - > remapCleanupFuture = remapCleanup ( self ) ; <nl> } else { <nl> / / Note : If the file contains less than 2 pages but more than 0 bytes then the pager was never successfully <nl> / / committed . A new pager will be created in its place . <nl> class DWALPager : public IPager2 { <nl> / / Since there is no previously committed header use the initial header for the initial commit . <nl> self - > updateCommittedHeader ( ) ; <nl> <nl> + self - > remapCleanupFuture = Void ( ) ; <nl> wait ( self - > commit ( ) ) ; <nl> } <nl> <nl> class DWALPager : public IPager2 { <nl> debug_printf ( " DWALPager ( % s ) op = % s % s ptr = % p \ n " , filename . c_str ( ) , <nl> ( header ? " writePhysicalHeader " : " writePhysical " ) , toString ( pageID ) . c_str ( ) , page - > begin ( ) ) ; <nl> <nl> + + + g_redwoodMetrics . pagerDiskWrite ; <nl> VALGRIND_MAKE_MEM_DEFINED ( page - > begin ( ) , page - > size ( ) ) ; <nl> ( ( Page * ) page . getPtr ( ) ) - > updateChecksum ( pageID ) ; <nl> <nl> class DWALPager : public IPager2 { <nl> <nl> void updatePage ( LogicalPageID pageID , Reference < IPage > data ) override { <nl> / / Get the cache entry for this page , without counting it as a cache hit as we ' re replacing its contents now <nl> - PageCacheEntry & cacheEntry = pageCache . get ( pageID , true ) ; <nl> + / / or as a cache miss because there is no benefit to the page already being in cache <nl> + PageCacheEntry & cacheEntry = pageCache . get ( pageID , true , true ) ; <nl> debug_printf ( " DWALPager ( % s ) op = write % s cached = % d reading = % d writing = % d \ n " , filename . c_str ( ) , <nl> toString ( pageID ) . c_str ( ) , cacheEntry . initialized ( ) , <nl> cacheEntry . initialized ( ) & & cacheEntry . reading ( ) , <nl> class DWALPager : public IPager2 { <nl> <nl> Future < LogicalPageID > atomicUpdatePage ( LogicalPageID pageID , Reference < IPage > data , Version v ) override { <nl> debug_printf ( " DWALPager ( % s ) op = writeAtomic % s @ % " PRId64 " \ n " , filename . c_str ( ) , toString ( pageID ) . c_str ( ) , v ) ; <nl> - / / This pager does not support atomic update , so it always allocates and uses a new pageID <nl> Future < LogicalPageID > f = map ( newPageID ( ) , [ = ] ( LogicalPageID newPageID ) { <nl> updatePage ( newPageID , data ) ; <nl> / / TODO : Possibly limit size of remap queue since it must be recovered on cold start <nl> class DWALPager : public IPager2 { <nl> return f ; <nl> } <nl> <nl> - void freePage ( LogicalPageID pageID , Version v ) override { <nl> - / / If pageID has been remapped , then it can ' t be freed until all existing remaps for that page have been undone , <nl> - / / so queue it for later deletion <nl> - if ( remappedPages . find ( pageID ) ! = remappedPages . end ( ) ) { <nl> - debug_printf ( " DWALPager ( % s ) op = freeRemapped % s @ % " PRId64 " oldestVersion = % " PRId64 " \ n " , filename . c_str ( ) , <nl> - toString ( pageID ) . c_str ( ) , v , pLastCommittedHeader - > oldestVersion ) ; <nl> - remapQueue . pushBack ( RemappedPage { v , pageID , invalidLogicalPageID } ) ; <nl> - return ; <nl> - } <nl> - <nl> + void freeUnmappedPage ( LogicalPageID pageID , Version v ) { <nl> / / If v is older than the oldest version still readable then mark pageID as free as of the next commit <nl> if ( v < effectiveOldestVersion ( ) ) { <nl> debug_printf ( " DWALPager ( % s ) op = freeNow % s @ % " PRId64 " oldestVersion = % " PRId64 " \ n " , filename . c_str ( ) , <nl> class DWALPager : public IPager2 { <nl> toString ( pageID ) . c_str ( ) , v , pLastCommittedHeader - > oldestVersion ) ; <nl> delayedFreeList . pushBack ( { v , pageID } ) ; <nl> } <nl> + } <nl> + <nl> + void freePage ( LogicalPageID pageID , Version v ) override { <nl> + / / If pageID has been remapped , then it can ' t be freed until all existing remaps for that page have been undone , <nl> + / / so queue it for later deletion <nl> + if ( remappedPages . find ( pageID ) ! = remappedPages . end ( ) ) { <nl> + debug_printf ( " DWALPager ( % s ) op = freeRemapped % s @ % " PRId64 " oldestVersion = % " PRId64 " \ n " , filename . c_str ( ) , <nl> + toString ( pageID ) . c_str ( ) , v , pLastCommittedHeader - > oldestVersion ) ; <nl> + remapQueue . pushBack ( RemappedPage { v , pageID , invalidLogicalPageID } ) ; <nl> + return ; <nl> + } <nl> + <nl> + freeUnmappedPage ( pageID , v ) ; <nl> } ; <nl> <nl> / / Read a physical page from the page file . Note that header pages use a page size of smallestPhysicalBlock <nl> class DWALPager : public IPager2 { <nl> ACTOR static Future < Reference < IPage > > readPhysicalPage ( DWALPager * self , PhysicalPageID pageID , <nl> bool header = false ) { <nl> ASSERT ( ! self - > memoryOnly ) ; <nl> + + + g_redwoodMetrics . pagerDiskRead ; <nl> <nl> if ( g_network - > getCurrentTask ( ) > TaskPriority : : DiskRead ) { <nl> wait ( delay ( 0 , TaskPriority : : DiskRead ) ) ; <nl> class DWALPager : public IPager2 { <nl> return readPhysicalPage ( self , pageID , true ) ; <nl> } <nl> <nl> - / / Reads the most recent version of pageID either committed or written using updatePage ( ) <nl> + / / Reads the most recent version of pageID , either previously committed or written using updatePage ( ) in the current <nl> + / / commit <nl> Future < Reference < IPage > > readPage ( LogicalPageID pageID , bool cacheable , bool noHit = false ) override { <nl> / / Use cached page if present , without triggering a cache hit . <nl> / / Otherwise , read the page and return it but don ' t add it to the cache <nl> class DWALPager : public IPager2 { <nl> return std : : min ( pLastCommittedHeader - > oldestVersion , snapshots . front ( ) . version ) ; <nl> } <nl> <nl> - ACTOR static Future < Void > undoRemaps ( DWALPager * self ) { <nl> + ACTOR static Future < Void > remapCopyAndFree ( DWALPager * self , RemappedPage m ) { <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup copyAndFree % s \ n " , self - > filename . c_str ( ) , m . toString ( ) . c_str ( ) ) ; <nl> + <nl> + / / Read the data from the page that the original was mapped to <nl> + Reference < IPage > data = wait ( self - > readPage ( m . newPageID , false ) ) ; <nl> + <nl> + / / Write the data to the original page so it can be read using its original pageID <nl> + self - > updatePage ( m . originalPageID , data ) ; <nl> + + + g_redwoodMetrics . pagerRemapCopy ; <nl> + <nl> + / / Remove all remaps for the original page ID up through version <nl> + auto i = self - > remappedPages . find ( m . originalPageID ) ; <nl> + i - > second . erase ( i - > second . begin ( ) , i - > second . upper_bound ( m . version ) ) ; <nl> + / / If the version map for this page is now empty , erase it <nl> + if ( i - > second . empty ( ) ) { <nl> + self - > remappedPages . erase ( i ) ; <nl> + } <nl> + <nl> + / / Now that the remap has been undone nothing will read this page so it can be freed as of the next <nl> + / / commit . <nl> + self - > freeUnmappedPage ( m . newPageID , 0 ) ; <nl> + + + g_redwoodMetrics . pagerRemapFree ; <nl> + <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR static Future < Version > getRemapLag ( DWALPager * self ) { <nl> + Optional < RemappedPage > head = wait ( self - > remapQueue . peek ( ) ) ; <nl> + if ( head . present ( ) ) { <nl> + return self - > effectiveOldestVersion ( ) - head . get ( ) . version ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> + ACTOR static Future < Void > remapCleanup ( DWALPager * self ) { <nl> + self - > remapCleanupStop = false ; <nl> + <nl> + / / Cutoff is the version we can pop to <nl> state RemappedPage cutoff ; <nl> cutoff . version = self - > effectiveOldestVersion ( ) ; <nl> <nl> - / / TODO : Use parallel reads <nl> - / / TODO : One run of this actor might write to the same original page more than once , in which case just unmap <nl> - / / the latest <nl> + / / Each page is only updated at most once per version , so in order to coalesce multiple updates <nl> + / / to the same page and skip some page writes we have to accumulate multiple versions worth of <nl> + / / poppable entries . <nl> + Version lag = wait ( getRemapLag ( self ) ) ; <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup versionLag = % " PRId64 " \ n " , self - > filename . c_str ( ) , lag ) ; <nl> + if ( lag < SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN ) { <nl> + debug_printf ( " DWALPager ( % s ) not starting , lag too low \ n " , self - > filename . c_str ( ) ) ; <nl> + return Void ( ) ; <nl> + } <nl> + <nl> loop { <nl> - if ( self - > remapUndoStop ) { <nl> - break ; <nl> + / / Pop up to the pop size limit from the queue , but only keep the latest remap queue entry per <nl> + / / original page ID . This will coalesce multiple remaps of the same LogicalPageID within the <nl> + / / interval of pages being unmapped to a single page copy . <nl> + state int toPop = SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_BATCH_SIZE ; <nl> + state std : : unordered_map < LogicalPageID , RemappedPage > toCopy ; <nl> + toCopy . reserve ( toPop ) ; <nl> + <nl> + / / Take up to batch size pages from front of queue <nl> + while ( toPop > 0 ) { <nl> + state Optional < RemappedPage > p = wait ( self - > remapQueue . pop ( cutoff ) ) ; <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup popped % s \ n " , self - > filename . c_str ( ) , : : toString ( p ) . c_str ( ) ) ; <nl> + if ( ! p . present ( ) ) { <nl> + break ; <nl> + } <nl> + <nl> + / / Get the existing remap entry for the original page , which could be newly initialized <nl> + auto & m = toCopy [ p . get ( ) . originalPageID ] ; <nl> + / / If version is invalid then this is a newly constructed RemappedPage , so copy p . get ( ) over it <nl> + if ( m . version ! = invalidVersion ) { <nl> + ASSERT ( m . version < p . get ( ) . version ) ; <nl> + ASSERT ( m . newPageID ! = invalidLogicalPageID ) ; <nl> + / / We ' re replacing a previously popped item so we can avoid copying it over the original . <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup elided % s \ n " , self - > filename . c_str ( ) , <nl> + m . toString ( ) . c_str ( ) ) ; <nl> + / / The remapped pages entries will be cleaned up below . <nl> + self - > freeUnmappedPage ( m . newPageID , 0 ) ; <nl> + + + g_redwoodMetrics . pagerRemapFree ; <nl> + + + g_redwoodMetrics . pagerRemapSkip ; <nl> + } <nl> + m = p . get ( ) ; <nl> + <nl> + - - toPop ; <nl> } <nl> - state Optional < RemappedPage > p = wait ( self - > remapQueue . pop ( cutoff ) ) ; <nl> - if ( ! p . present ( ) ) { <nl> - break ; <nl> + <nl> + std : : vector < Future < Void > > copies ; <nl> + <nl> + for ( auto & e : toCopy ) { <nl> + const RemappedPage & m = e . second ; <nl> + / / If newPageID is invalid , originalPageID page was freed at version , not remapped <nl> + if ( m . newPageID = = invalidLogicalPageID ) { <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup freeNoCopy % s \ n " , self - > filename . c_str ( ) , <nl> + m . toString ( ) . c_str ( ) ) ; <nl> + self - > remappedPages . erase ( m . originalPageID ) ; <nl> + self - > freeUnmappedPage ( m . originalPageID , 0 ) ; <nl> + + + g_redwoodMetrics . pagerRemapFree ; <nl> + } else { <nl> + copies . push_back ( remapCopyAndFree ( self , m ) ) ; <nl> + } <nl> } <nl> - debug_printf ( " DWALPager ( % s ) undoRemaps popped % s \ n " , self - > filename . c_str ( ) , p . get ( ) . toString ( ) . c_str ( ) ) ; <nl> <nl> - if ( p . get ( ) . newPageID = = invalidLogicalPageID ) { <nl> - debug_printf ( " DWALPager ( % s ) undoRemaps freeing % s \ n " , self - > filename . c_str ( ) , <nl> - p . get ( ) . toString ( ) . c_str ( ) ) ; <nl> - self - > freePage ( p . get ( ) . originalPageID , p . get ( ) . version ) ; <nl> - } else { <nl> - / / Read the data from the page that the original was mapped to <nl> - Reference < IPage > data = wait ( self - > readPage ( p . get ( ) . newPageID , false ) ) ; <nl> + wait ( waitForAll ( copies ) ) ; <nl> <nl> - / / Write the data to the original page so it can be read using its original pageID <nl> - self - > updatePage ( p . get ( ) . originalPageID , data ) ; <nl> + / / Stop if there was nothing more that could be popped <nl> + if ( toPop > 0 ) { <nl> + break ; <nl> + } <nl> <nl> - / / Remove the remap from this page , deleting the entry for the pageID if its map becomes empty <nl> - auto i = self - > remappedPages . find ( p . get ( ) . originalPageID ) ; <nl> - if ( i - > second . size ( ) = = 1 ) { <nl> - self - > remappedPages . erase ( i ) ; <nl> + / / If the stop flag is set then stop but only if the remap lag is below the maximum allowed <nl> + if ( self - > remapCleanupStop ) { <nl> + Version lag = wait ( getRemapLag ( self ) ) ; <nl> + if ( lag < = SERVER_KNOBS - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX ) { <nl> + break ; <nl> } else { <nl> - i - > second . erase ( p . get ( ) . version ) ; <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup refusing to stop , versionLag = % " PRId64 " \ n " , <nl> + self - > filename . c_str ( ) , lag ) ; <nl> } <nl> - <nl> - / / Now that the remap has been undone nothing will read this page so it can be freed as of the next <nl> - / / commit . <nl> - self - > freePage ( p . get ( ) . newPageID , 0 ) ; <nl> } <nl> } <nl> <nl> - debug_printf ( " DWALPager ( % s ) undoRemaps stopped , remapQueue size is % d \ n " , self - > filename . c_str ( ) , <nl> - self - > remapQueue . numEntries ) ; <nl> + debug_printf ( " DWALPager ( % s ) remapCleanup stopped ( stop = % d ) \ n " , self - > filename . c_str ( ) , self - > remapCleanupStop ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> / / Flush all queues so they have no operations pending . <nl> ACTOR static Future < Void > flushQueues ( DWALPager * self ) { <nl> - ASSERT ( self - > remapUndoFuture . isReady ( ) ) ; <nl> + ASSERT ( self - > remapCleanupFuture . isReady ( ) ) ; <nl> <nl> / / Flush remap queue separately , it ' s not involved in free page management <nl> wait ( self - > remapQueue . flush ( ) ) ; <nl> class DWALPager : public IPager2 { <nl> self - > writeHeaderPage ( 1 , self - > lastCommittedHeaderPage ) ; <nl> <nl> / / Trigger the remap eraser to stop and then wait for it . <nl> - self - > remapUndoStop = true ; <nl> - wait ( self - > remapUndoFuture ) ; <nl> + self - > remapCleanupStop = true ; <nl> + wait ( self - > remapCleanupFuture ) ; <nl> <nl> wait ( flushQueues ( self ) ) ; <nl> <nl> class DWALPager : public IPager2 { <nl> self - > expireSnapshots ( self - > pHeader - > oldestVersion ) ; <nl> <nl> / / Start unmapping pages for expired versions <nl> - self - > remapUndoStop = false ; <nl> - self - > remapUndoFuture = undoRemaps ( self ) ; <nl> + self - > remapCleanupFuture = remapCleanup ( self ) ; <nl> <nl> return Void ( ) ; <nl> } <nl> class DWALPager : public IPager2 { <nl> debug_printf ( " DWALPager ( % s ) shutdown cancel commit \ n " , self - > filename . c_str ( ) ) ; <nl> self - > commitFuture . cancel ( ) ; <nl> debug_printf ( " DWALPager ( % s ) shutdown cancel remap \ n " , self - > filename . c_str ( ) ) ; <nl> - self - > remapUndoFuture . cancel ( ) ; <nl> + self - > remapCleanupFuture . cancel ( ) ; <nl> <nl> if ( self - > errorPromise . canBeSet ( ) ) { <nl> debug_printf ( " DWALPager ( % s ) shutdown sending error \ n " , self - > filename . c_str ( ) ) ; <nl> class DWALPager : public IPager2 { <nl> <nl> ACTOR static Future < Void > getUserPageCount_cleanup ( DWALPager * self ) { <nl> / / Wait for the remap eraser to finish all of its work ( not triggering stop ) <nl> - wait ( self - > remapUndoFuture ) ; <nl> + wait ( self - > remapCleanupFuture ) ; <nl> <nl> / / Flush queues so there are no pending freelist operations <nl> wait ( flushQueues ( self ) ) ; <nl> class DWALPager : public IPager2 { <nl> Future < Void > commitFuture ; <nl> SignalableActorCollection operations ; <nl> Future < Void > recoverFuture ; <nl> - Future < Void > remapUndoFuture ; <nl> - bool remapUndoStop ; <nl> + Future < Void > remapCleanupFuture ; <nl> + bool remapCleanupStop ; <nl> <nl> Reference < IAsyncFile > pageFile ; <nl> <nl> struct RedwoodRecordRef { <nl> Version version ; <nl> <nl> int expectedSize ( ) const { return key . expectedSize ( ) + value . expectedSize ( ) ; } <nl> + int kvBytes ( ) const { return expectedSize ( ) ; } <nl> <nl> class Reader { <nl> public : <nl> struct BTreePage { <nl> # pragma pack ( pop ) <nl> <nl> int size ( ) const { <nl> - const BinaryTree * t = & tree ( ) ; <nl> - return ( uint8_t * ) t - ( uint8_t * ) this + t - > size ( ) ; <nl> + auto & t = tree ( ) ; <nl> + return ( uint8_t * ) & t - ( uint8_t * ) this + t . size ( ) ; <nl> } <nl> <nl> bool isLeaf ( ) const { return height = = 1 ; } <nl> class VersionedBTree : public IVersionedStore { <nl> / / A record which is greater than the last possible record in the tree <nl> static RedwoodRecordRef dbEnd ; <nl> <nl> - struct LazyDeleteQueueEntry { <nl> + struct LazyClearQueueEntry { <nl> Version version ; <nl> Standalone < BTreePageIDRef > pageID ; <nl> <nl> - bool operator < ( const LazyDeleteQueueEntry & rhs ) const { return version < rhs . version ; } <nl> + bool operator < ( const LazyClearQueueEntry & rhs ) const { return version < rhs . version ; } <nl> <nl> int readFromBytes ( const uint8_t * src ) { <nl> version = * ( Version * ) src ; <nl> class VersionedBTree : public IVersionedStore { <nl> std : : string toString ( ) const { return format ( " { % s @ % " PRId64 " } " , : : toString ( pageID ) . c_str ( ) , version ) ; } <nl> } ; <nl> <nl> - typedef FIFOQueue < LazyDeleteQueueEntry > LazyDeleteQueueT ; <nl> + typedef FIFOQueue < LazyClearQueueEntry > LazyClearQueueT ; <nl> <nl> # pragma pack ( push , 1 ) <nl> struct MetaKey { <nl> - static constexpr int FORMAT_VERSION = 7 ; <nl> + static constexpr int FORMAT_VERSION = 8 ; <nl> / / This serves as the format version for the entire tree , individual pages will not be versioned <nl> uint16_t formatVersion ; <nl> uint8_t height ; <nl> - LazyDeleteQueueT : : QueueState lazyDeleteQueue ; <nl> + LazyClearQueueT : : QueueState lazyDeleteQueue ; <nl> InPlaceArray < LogicalPageID > root ; <nl> <nl> KeyRef asKeyRef ( ) const { return KeyRef ( ( uint8_t * ) this , sizeof ( MetaKey ) + root . extraSize ( ) ) ; } <nl> class VersionedBTree : public IVersionedStore { <nl> } ; <nl> # pragma pack ( pop ) <nl> <nl> - struct Counts { <nl> - Counts ( ) { <nl> - memset ( this , 0 , sizeof ( Counts ) ) ; <nl> - startTime = g_network ? now ( ) : 0 ; <nl> - } <nl> - <nl> - void clear ( ) { * this = Counts ( ) ; } <nl> - <nl> - int64_t pageReads ; <nl> - int64_t extPageReads ; <nl> - int64_t pagePreloads ; <nl> - int64_t extPagePreloads ; <nl> - int64_t setBytes ; <nl> - int64_t pageWrites ; <nl> - int64_t extPageWrites ; <nl> - int64_t sets ; <nl> - int64_t clears ; <nl> - int64_t clearSingleKey ; <nl> - int64_t commits ; <nl> - int64_t gets ; <nl> - int64_t getRanges ; <nl> - int64_t commitSubtreeStart ; <nl> - int64_t pageUpdates ; <nl> - double startTime ; <nl> - <nl> - std : : string toString ( bool clearAfter = false ) { <nl> - const char * labels [ ] = { " set " , <nl> - " clear " , <nl> - " clearSingleKey " , <nl> - " get " , <nl> - " getRange " , <nl> - " commit " , <nl> - " pageReads " , <nl> - " extPageRead " , <nl> - " pagePreloads " , <nl> - " extPagePreloads " , <nl> - " pageWrites " , <nl> - " pageUpdates " , <nl> - " extPageWrites " , <nl> - " commitSubtreeStart " } ; <nl> - const int64_t values [ ] = { <nl> - sets , clears , clearSingleKey , gets , getRanges , commits , pageReads , <nl> - extPageReads , pagePreloads , extPagePreloads , pageWrites , pageUpdates , extPageWrites , commitSubtreeStart <nl> - } ; <nl> - <nl> - double elapsed = now ( ) - startTime ; <nl> - std : : string s ; <nl> - for ( int i = 0 ; i < sizeof ( values ) / sizeof ( int64_t ) ; + + i ) { <nl> - s + = format ( " % s = % " PRId64 " ( % d / s ) " , labels [ i ] , values [ i ] , int ( values [ i ] / elapsed ) ) ; <nl> - } <nl> - <nl> - if ( clearAfter ) { <nl> - clear ( ) ; <nl> - } <nl> - <nl> - return s ; <nl> - } <nl> - } ; <nl> - <nl> - / / Using a static for metrics because a single process shouldn ' t normally have multiple storage engines <nl> - static Counts counts ; <nl> - <nl> / / All async opts on the btree are based on pager reads , writes , and commits , so <nl> / / we can mostly forward these next few functions to the pager <nl> Future < Void > getError ( ) { return m_pager - > getError ( ) ; } <nl> class VersionedBTree : public IVersionedStore { <nl> / / setWriteVersion ( ) A write shall not become durable until the following call to commit ( ) begins , and shall be <nl> / / durable once the following call to commit ( ) returns <nl> void set ( KeyValueRef keyValue ) { <nl> - + + counts . sets ; <nl> + + + g_redwoodMetrics . opSet ; <nl> + + + g_redwoodMetrics . opSetKeyBytes + = keyValue . key . size ( ) ; <nl> + + + g_redwoodMetrics . opSetValueBytes + = keyValue . value . size ( ) ; <nl> m_pBuffer - > insert ( keyValue . key ) . mutation ( ) . setBoundaryValue ( m_pBuffer - > copyToArena ( keyValue . value ) ) ; <nl> } <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> / / Optimization for single key clears to create just one mutation boundary instead of two <nl> if ( clearedRange . begin . size ( ) = = clearedRange . end . size ( ) - 1 & & <nl> clearedRange . end [ clearedRange . end . size ( ) - 1 ] = = 0 & & clearedRange . end . startsWith ( clearedRange . begin ) ) { <nl> - + + counts . clears ; <nl> - + + counts . clearSingleKey ; <nl> + + + g_redwoodMetrics . opClear ; <nl> + + + g_redwoodMetrics . opClearKey ; <nl> m_pBuffer - > insert ( clearedRange . begin ) . mutation ( ) . clearBoundary ( ) ; <nl> return ; <nl> } <nl> <nl> - + + counts . clears ; <nl> + + + g_redwoodMetrics . opClear ; <nl> MutationBuffer : : iterator iBegin = m_pBuffer - > insert ( clearedRange . begin ) ; <nl> MutationBuffer : : iterator iEnd = m_pBuffer - > insert ( clearedRange . end ) ; <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> VersionedBTree ( IPager2 * pager , std : : string name ) <nl> : m_pager ( pager ) , m_writeVersion ( invalidVersion ) , m_lastCommittedVersion ( invalidVersion ) , m_pBuffer ( nullptr ) , <nl> m_name ( name ) { <nl> + m_lazyClearActor = 0 ; <nl> m_init = init_impl ( this ) ; <nl> m_latestCommit = m_init ; <nl> } <nl> <nl> - ACTOR static Future < int > incrementalSubtreeClear ( VersionedBTree * self , bool * pStop = nullptr , int batchSize = 10 , <nl> - unsigned int minPages = 0 , <nl> - int maxPages = std : : numeric_limits < int > : : max ( ) ) { <nl> + ACTOR static Future < int > incrementalLazyClear ( VersionedBTree * self ) { <nl> + ASSERT ( self - > m_lazyClearActor . isReady ( ) ) ; <nl> + self - > m_lazyClearStop = false ; <nl> + <nl> / / TODO : Is it contractually okay to always to read at the latest version ? <nl> state Reference < IPagerSnapshot > snapshot = self - > m_pager - > getReadSnapshot ( self - > m_pager - > getLatestVersion ( ) ) ; <nl> state int freedPages = 0 ; <nl> <nl> loop { <nl> - state std : : vector < std : : pair < LazyDeleteQueueEntry , Future < Reference < const IPage > > > > entries ; <nl> + state int toPop = SERVER_KNOBS - > REDWOOD_LAZY_CLEAR_BATCH_SIZE_PAGES ; <nl> + state std : : vector < std : : pair < LazyClearQueueEntry , Future < Reference < const IPage > > > > entries ; <nl> + entries . reserve ( toPop ) ; <nl> <nl> / / Take up to batchSize pages from front of queue <nl> - while ( entries . size ( ) < batchSize ) { <nl> - Optional < LazyDeleteQueueEntry > q = wait ( self - > m_lazyDeleteQueue . pop ( ) ) ; <nl> - debug_printf ( " LazyDelete : popped % s \ n " , toString ( q ) . c_str ( ) ) ; <nl> + while ( toPop > 0 ) { <nl> + Optional < LazyClearQueueEntry > q = wait ( self - > m_lazyClearQueue . pop ( ) ) ; <nl> + debug_printf ( " LazyClear : popped % s \ n " , toString ( q ) . c_str ( ) ) ; <nl> if ( ! q . present ( ) ) { <nl> break ; <nl> } <nl> / / Start reading the page , without caching <nl> entries . push_back ( <nl> std : : make_pair ( q . get ( ) , self - > readPage ( snapshot , q . get ( ) . pageID , nullptr , nullptr , true ) ) ) ; <nl> - } <nl> <nl> - if ( entries . empty ( ) ) { <nl> - break ; <nl> + - - toPop ; <nl> } <nl> <nl> state int i ; <nl> for ( i = 0 ; i < entries . size ( ) ; + + i ) { <nl> Reference < const IPage > p = wait ( entries [ i ] . second ) ; <nl> - const LazyDeleteQueueEntry & entry = entries [ i ] . first ; <nl> + const LazyClearQueueEntry & entry = entries [ i ] . first ; <nl> const BTreePage & btPage = * ( BTreePage * ) p - > begin ( ) ; <nl> - debug_printf ( " LazyDelete : processing % s \ n " , toString ( entry ) . c_str ( ) ) ; <nl> + auto & metrics = g_redwoodMetrics . level ( btPage . height ) ; <nl> + <nl> + debug_printf ( " LazyClear : processing % s \ n " , toString ( entry ) . c_str ( ) ) ; <nl> <nl> / / Level 1 ( leaf ) nodes should never be in the lazy delete queue <nl> ASSERT ( btPage . height > 1 ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> while ( 1 ) { <nl> if ( c . get ( ) . value . present ( ) ) { <nl> BTreePageIDRef btChildPageID = c . get ( ) . getChildPage ( ) ; <nl> - / / If this page is height 2 , then the children are leaves so free <nl> + / / If this page is height 2 , then the children are leaves so free them directly <nl> if ( btPage . height = = 2 ) { <nl> - debug_printf ( " LazyDelete : freeing child % s \ n " , toString ( btChildPageID ) . c_str ( ) ) ; <nl> + debug_printf ( " LazyClear : freeing child % s \ n " , toString ( btChildPageID ) . c_str ( ) ) ; <nl> self - > freeBtreePage ( btChildPageID , v ) ; <nl> freedPages + = btChildPageID . size ( ) ; <nl> + metrics . lazyClearFree + = 1 ; <nl> + metrics . lazyClearFreeExt + = ( btChildPageID . size ( ) - 1 ) ; <nl> } else { <nl> / / Otherwise , queue them for lazy delete . <nl> - debug_printf ( " LazyDelete : queuing child % s \ n " , toString ( btChildPageID ) . c_str ( ) ) ; <nl> - self - > m_lazyDeleteQueue . pushFront ( LazyDeleteQueueEntry { v , btChildPageID } ) ; <nl> + debug_printf ( " LazyClear : queuing child % s \ n " , toString ( btChildPageID ) . c_str ( ) ) ; <nl> + self - > m_lazyClearQueue . pushFront ( LazyClearQueueEntry { v , btChildPageID } ) ; <nl> + metrics . lazyClearRequeue + = 1 ; <nl> + metrics . lazyClearRequeueExt + = ( btChildPageID . size ( ) - 1 ) ; <nl> } <nl> } <nl> if ( ! c . moveNext ( ) ) { <nl> class VersionedBTree : public IVersionedStore { <nl> } <nl> <nl> / / Free the page , now that its children have either been freed or queued <nl> - debug_printf ( " LazyDelete : freeing queue entry % s \ n " , toString ( entry . pageID ) . c_str ( ) ) ; <nl> + debug_printf ( " LazyClear : freeing queue entry % s \ n " , toString ( entry . pageID ) . c_str ( ) ) ; <nl> self - > freeBtreePage ( entry . pageID , v ) ; <nl> freedPages + = entry . pageID . size ( ) ; <nl> + metrics . lazyClearFree + = 1 ; <nl> + metrics . lazyClearFreeExt + = entry . pageID . size ( ) - 1 ; <nl> } <nl> <nl> - / / If stop is set and we ' ve freed the minimum number of pages required , or the maximum is exceeded , return . <nl> - if ( ( freedPages > = minPages & & pStop ! = nullptr & & * pStop ) | | freedPages > = maxPages ) { <nl> + / / Stop if <nl> + / / - the poppable items in the queue have already been exhausted <nl> + / / - stop flag is set and we ' ve freed the minimum number of pages required <nl> + / / - maximum number of pages to free met or exceeded <nl> + if ( toPop > 0 | | ( freedPages > = SERVER_KNOBS - > REDWOOD_LAZY_CLEAR_MIN_PAGES & & self - > m_lazyClearStop ) | | <nl> + ( freedPages > = SERVER_KNOBS - > REDWOOD_LAZY_CLEAR_MAX_PAGES ) ) { <nl> break ; <nl> } <nl> } <nl> <nl> - debug_printf ( " LazyDelete : freed % d pages , % s has % " PRId64 " entries \ n " , freedPages , <nl> - self - > m_lazyDeleteQueue . name . c_str ( ) , self - > m_lazyDeleteQueue . numEntries ) ; <nl> + debug_printf ( " LazyClear : freed % d pages , % s has % " PRId64 " entries \ n " , freedPages , <nl> + self - > m_lazyClearQueue . name . c_str ( ) , self - > m_lazyClearQueue . numEntries ) ; <nl> return freedPages ; <nl> } <nl> <nl> ACTOR static Future < Void > init_impl ( VersionedBTree * self ) { <nl> wait ( self - > m_pager - > init ( ) ) ; <nl> <nl> + self - > m_blockSize = self - > m_pager - > getUsablePageSize ( ) ; <nl> state Version latest = self - > m_pager - > getLatestVersion ( ) ; <nl> self - > m_newOldestVersion = self - > m_pager - > getOldestVersion ( ) ; <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> self - > m_pager - > setCommitVersion ( latest ) ; <nl> <nl> LogicalPageID newQueuePage = wait ( self - > m_pager - > newPageID ( ) ) ; <nl> - self - > m_lazyDeleteQueue . create ( self - > m_pager , newQueuePage , " LazyDeleteQueue " ) ; <nl> - self - > m_header . lazyDeleteQueue = self - > m_lazyDeleteQueue . getState ( ) ; <nl> + self - > m_lazyClearQueue . create ( self - > m_pager , newQueuePage , " LazyClearQueue " ) ; <nl> + self - > m_header . lazyDeleteQueue = self - > m_lazyClearQueue . getState ( ) ; <nl> self - > m_pager - > setMetaKey ( self - > m_header . asKeyRef ( ) ) ; <nl> wait ( self - > m_pager - > commit ( ) ) ; <nl> debug_printf ( " Committed initial commit . \ n " ) ; <nl> } else { <nl> self - > m_header . fromKeyRef ( meta ) ; <nl> - self - > m_lazyDeleteQueue . recover ( self - > m_pager , self - > m_header . lazyDeleteQueue , " LazyDeleteQueueRecovered " ) ; <nl> + self - > m_lazyClearQueue . recover ( self - > m_pager , self - > m_header . lazyDeleteQueue , " LazyClearQueueRecovered " ) ; <nl> } <nl> <nl> debug_printf ( " Recovered btree at version % " PRId64 " : % s \ n " , latest , self - > m_header . toString ( ) . c_str ( ) ) ; <nl> <nl> self - > m_lastCommittedVersion = latest ; <nl> + self - > m_lazyClearActor = incrementalLazyClear ( self ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> ACTOR static Future < Void > destroyAndCheckSanity_impl ( VersionedBTree * self ) { <nl> ASSERT ( g_network - > isSimulated ( ) ) ; <nl> <nl> + / / This isn ' t pretty but remap cleanup is controlled by knobs and for this test we need the entire remap queue <nl> + / / to be processed . <nl> + const_cast < ServerKnobs * > ( SERVER_KNOBS ) - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MIN = 0 ; <nl> + const_cast < ServerKnobs * > ( SERVER_KNOBS ) - > REDWOOD_REMAP_CLEANUP_VERSION_LAG_MAX = 0 ; <nl> + <nl> debug_printf ( " Clearing tree . \ n " ) ; <nl> self - > setWriteVersion ( self - > getLatestVersion ( ) + 1 ) ; <nl> self - > clear ( KeyRangeRef ( dbBegin . key , dbEnd . key ) ) ; <nl> + wait ( self - > commit ( ) ) ; <nl> <nl> + / / Loop commits until the the lazy delete queue is completely processed . <nl> loop { <nl> - state int freedPages = wait ( self - > incrementalSubtreeClear ( self ) ) ; <nl> wait ( self - > commit ( ) ) ; <nl> - / / Keep looping until the last commit doesn ' t do anything at all <nl> - if ( self - > m_lazyDeleteQueue . numEntries = = 0 & & freedPages = = 0 ) { <nl> + <nl> + / / If the lazy delete queue is completely processed then the last time the lazy delete actor <nl> + / / was started it , after the last commit , it would exist immediately and do no work , so its <nl> + / / future would be ready and its value would be 0 . <nl> + if ( self - > m_lazyClearActor . isReady ( ) & & self - > m_lazyClearActor . get ( ) = = 0 ) { <nl> break ; <nl> } <nl> self - > setWriteVersion ( self - > getLatestVersion ( ) + 1 ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> / / The lazy delete queue should now be empty and contain only the new page to start writing to <nl> / / on the next commit . <nl> - LazyDeleteQueueT : : QueueState s = self - > m_lazyDeleteQueue . getState ( ) ; <nl> + LazyClearQueueT : : QueueState s = self - > m_lazyClearQueue . getState ( ) ; <nl> ASSERT ( s . numEntries = = 0 ) ; <nl> ASSERT ( s . numPages = = 1 ) ; <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> Future < Void > m_latestCommit ; <nl> Future < Void > m_init ; <nl> std : : string m_name ; <nl> + int m_blockSize ; <nl> <nl> / / MetaKey changes size so allocate space for it to expand into <nl> union { <nl> class VersionedBTree : public IVersionedStore { <nl> MetaKey m_header ; <nl> } ; <nl> <nl> - LazyDeleteQueueT m_lazyDeleteQueue ; <nl> + LazyClearQueueT m_lazyClearQueue ; <nl> + Future < int > m_lazyClearActor ; <nl> + bool m_lazyClearStop ; <nl> <nl> / / Writes entries to 1 or more pages and return a vector of boundary keys with their IPage ( s ) <nl> ACTOR static Future < Standalone < VectorRef < RedwoodRecordRef > > > writePages ( <nl> class VersionedBTree : public IVersionedStore { <nl> state Standalone < VectorRef < RedwoodRecordRef > > records ; <nl> <nl> / / This is how much space for the binary tree exists in the page , after the header <nl> - state int blockSize = self - > m_pager - > getUsablePageSize ( ) ; <nl> + state int blockSize = self - > m_blockSize ; <nl> state int pageSize = blockSize - sizeof ( BTreePage ) ; <nl> state int pageFillTarget = pageSize * SERVER_KNOBS - > REDWOOD_PAGE_REBUILD_FILL_FACTOR ; <nl> state int blockCount = 1 ; <nl> class VersionedBTree : public IVersionedStore { <nl> / / overhead for the delta size must be assumed . <nl> int deltaSize = entry . deltaSize ( base , skip , true ) ; <nl> <nl> - int keySize = entry . key . size ( ) ; <nl> - int valueSize = entry . value . present ( ) ? entry . value . get ( ) . size ( ) : 0 ; <nl> - <nl> int nodeSize = BTreePage : : BinaryTree : : Node : : headerSize ( largeTree ) + deltaSize ; <nl> debug_printf ( " Adding % 3d of % 3lu ( i = % 3d ) klen % 4d vlen % 5d nodeSize % 5d deltaSize % 5d page usage : " <nl> " % d / % d ( % . 2f % % ) record = % s \ n " , <nl> - i + 1 , entries . size ( ) , i , keySize , valueSize , nodeSize , deltaSize , compressedBytes , <nl> - pageSize , ( float ) compressedBytes / pageSize * 100 , entry . toString ( height = = 1 ) . c_str ( ) ) ; <nl> + i + 1 , entries . size ( ) , i , entry . key . size ( ) , entry . value . orDefault ( StringRef ( ) ) . size ( ) , <nl> + nodeSize , deltaSize , compressedBytes , pageSize , ( float ) compressedBytes / pageSize * 100 , <nl> + entry . toString ( height = = 1 ) . c_str ( ) ) ; <nl> <nl> / / While the node doesn ' t fit , expand the page . <nl> / / This is a loop because if the page size moves into " large " range for DeltaTree <nl> class VersionedBTree : public IVersionedStore { <nl> pageFillTarget = pageSize * SERVER_KNOBS - > REDWOOD_PAGE_REBUILD_FILL_FACTOR ; <nl> } <nl> <nl> - kvBytes + = keySize + valueSize ; <nl> + kvBytes + = entry . kvBytes ( ) ; <nl> compressedBytes + = nodeSize ; <nl> + + i ; <nl> } <nl> class VersionedBTree : public IVersionedStore { <nl> state std : : vector < Reference < IPage > > pages ; <nl> BTreePage * btPage ; <nl> <nl> + int capacity = blockSize * blockCount ; <nl> if ( blockCount = = 1 ) { <nl> Reference < IPage > page = self - > m_pager - > newPageBuffer ( ) ; <nl> btPage = ( BTreePage * ) page - > mutate ( ) ; <nl> pages . push_back ( std : : move ( page ) ) ; <nl> } else { <nl> ASSERT ( blockCount > 1 ) ; <nl> - int size = blockSize * blockCount ; <nl> - btPage = ( BTreePage * ) new uint8_t [ size ] ; <nl> + btPage = ( BTreePage * ) new uint8_t [ capacity ] ; <nl> } <nl> <nl> btPage - > height = height ; <nl> class VersionedBTree : public IVersionedStore { <nl> ASSERT ( false ) ; <nl> } <nl> <nl> + auto & metrics = g_redwoodMetrics . level ( btPage - > height ) ; <nl> + metrics . pageBuild + = 1 ; <nl> + metrics . pageBuildExt + = blockCount - 1 ; <nl> + metrics . buildFillPct + = ( double ) written / capacity ; <nl> + metrics . buildStoredPct + = ( double ) btPage - > kvBytes / capacity ; <nl> + metrics . buildItemCount + = btPage - > tree ( ) . numItems ; <nl> + <nl> / / Create chunked pages <nl> / / TODO : Avoid copying page bytes , but this is not trivial due to how pager checksums are currently handled . <nl> if ( blockCount ! = 1 ) { <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> wait ( yield ( ) ) ; <nl> <nl> - / / Update activity counts <nl> - + + counts . pageWrites ; <nl> - if ( pages . size ( ) > 1 ) { <nl> - counts . extPageWrites + = pages . size ( ) - 1 ; <nl> - } <nl> - <nl> debug_printf ( " Flushing % s lastPage = % d original = % s start = % d i = % d count = % d page usage : % d / % d ( % . 2f % % ) " <nl> " bytes \ nlower : % s \ nupper : % s \ n " , <nl> toString ( childPageID ) . c_str ( ) , isLastPage , toString ( previousID ) . c_str ( ) , start , i , i - start , <nl> class VersionedBTree : public IVersionedStore { <nl> ACTOR static Future < Reference < const IPage > > readPage ( Reference < IPagerSnapshot > snapshot , BTreePageIDRef id , <nl> const RedwoodRecordRef * lowerBound , <nl> const RedwoodRecordRef * upperBound , <nl> - bool forLazyDelete = false ) { <nl> - if ( ! forLazyDelete ) { <nl> + bool forLazyClear = false ) { <nl> + if ( ! forLazyClear ) { <nl> debug_printf ( " readPage ( ) op = read % s @ % " PRId64 " lower = % s upper = % s \ n " , toString ( id ) . c_str ( ) , <nl> snapshot - > getVersion ( ) , lowerBound - > toString ( false ) . c_str ( ) , <nl> upperBound - > toString ( false ) . c_str ( ) ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> state Reference < const IPage > page ; <nl> <nl> - + + counts . pageReads ; <nl> if ( id . size ( ) = = 1 ) { <nl> - Reference < const IPage > p = wait ( snapshot - > getPhysicalPage ( id . front ( ) , ! forLazyDelete , false ) ) ; <nl> + Reference < const IPage > p = wait ( snapshot - > getPhysicalPage ( id . front ( ) , ! forLazyClear , false ) ) ; <nl> page = p ; <nl> } else { <nl> ASSERT ( ! id . empty ( ) ) ; <nl> - counts . extPageReads + = ( id . size ( ) - 1 ) ; <nl> std : : vector < Future < Reference < const IPage > > > reads ; <nl> for ( auto & pageID : id ) { <nl> - reads . push_back ( snapshot - > getPhysicalPage ( pageID , ! forLazyDelete , false ) ) ; <nl> + reads . push_back ( snapshot - > getPhysicalPage ( pageID , ! forLazyClear , false ) ) ; <nl> } <nl> std : : vector < Reference < const IPage > > pages = wait ( getAll ( reads ) ) ; <nl> / / TODO : Cache reconstituted super pages somehow , perhaps with help from the Pager . <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> debug_printf ( " readPage ( ) op = readComplete % s @ % " PRId64 " \ n " , toString ( id ) . c_str ( ) , snapshot - > getVersion ( ) ) ; <nl> const BTreePage * pTreePage = ( const BTreePage * ) page - > begin ( ) ; <nl> + auto & metrics = g_redwoodMetrics . level ( pTreePage - > height ) ; <nl> + metrics . pageRead + = 1 ; <nl> + metrics . pageReadExt + = ( id . size ( ) - 1 ) ; <nl> <nl> - if ( ! forLazyDelete & & page - > userData = = nullptr ) { <nl> + if ( ! forLazyClear & & page - > userData = = nullptr ) { <nl> debug_printf ( " readPage ( ) Creating Reader for % s @ % " PRId64 " lower = % s upper = % s \ n " , toString ( id ) . c_str ( ) , <nl> snapshot - > getVersion ( ) , lowerBound - > toString ( false ) . c_str ( ) , <nl> upperBound - > toString ( false ) . c_str ( ) ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> page - > userDataDestructor = [ ] ( void * ptr ) { delete ( BTreePage : : BinaryTree : : Mirror * ) ptr ; } ; <nl> } <nl> <nl> - if ( ! forLazyDelete ) { <nl> + if ( ! forLazyClear ) { <nl> debug_printf ( " readPage ( ) % s \ n " , <nl> pTreePage - > toString ( false , id , snapshot - > getVersion ( ) , lowerBound , upperBound ) . c_str ( ) ) ; <nl> } <nl> class VersionedBTree : public IVersionedStore { <nl> } <nl> <nl> static void preLoadPage ( IPagerSnapshot * snapshot , BTreePageIDRef id ) { <nl> - + + counts . pagePreloads ; <nl> - counts . extPagePreloads + = ( id . size ( ) - 1 ) ; <nl> + g_redwoodMetrics . btreeLeafPreload + = 1 ; <nl> + g_redwoodMetrics . btreeLeafPreloadExt + = ( id . size ( ) - 1 ) ; <nl> <nl> for ( auto pageID : id ) { <nl> snapshot - > getPhysicalPage ( pageID , true , true ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> } <nl> } <nl> <nl> - / / Update activity counts <nl> - + + counts . pageWrites ; <nl> - if ( newID . size ( ) > 1 ) { <nl> - counts . extPageWrites + = newID . size ( ) - 1 ; <nl> - } <nl> - <nl> return newID ; <nl> } <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> } <nl> <nl> / / Page was updated in - place through edits and written to maybeNewID <nl> - void updatedInPlace ( BTreePageIDRef maybeNewID ) { <nl> + void updatedInPlace ( BTreePageIDRef maybeNewID , BTreePage * btPage , int capacity ) { <nl> + auto & metrics = g_redwoodMetrics . level ( btPage - > height ) ; <nl> + metrics . pageModify + = 1 ; <nl> + metrics . pageModify + = ( maybeNewID . size ( ) - 1 ) ; <nl> + metrics . modifyFillPct + = ( double ) btPage - > size ( ) / capacity ; <nl> + metrics . modifyStoredPct + = ( double ) btPage - > kvBytes / capacity ; <nl> + metrics . modifyItemCount + = btPage - > tree ( ) . numItems ; <nl> + <nl> / / The boundaries can ' t have changed , but the child page link may have . <nl> if ( maybeNewID ! = decodeLowerBound - > getChildPage ( ) ) { <nl> / / Add page ' s decode lower bound to newLinks set without its child page , intially <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> struct InternalPageModifier { <nl> InternalPageModifier ( ) { } <nl> - InternalPageModifier ( BTreePage : : BinaryTree : : Mirror * m , bool updating ) <nl> - : m ( m ) , updating ( updating ) , changesMade ( false ) { } <nl> + InternalPageModifier ( BTreePage * p , BTreePage : : BinaryTree : : Mirror * m , bool updating ) <nl> + : btPage ( p ) , m ( m ) , updating ( updating ) , changesMade ( false ) { } <nl> <nl> bool updating ; <nl> + BTreePage * btPage ; <nl> BTreePage : : BinaryTree : : Mirror * m ; <nl> Standalone < VectorRef < RedwoodRecordRef > > rebuild ; <nl> bool changesMade ; <nl> class VersionedBTree : public IVersionedStore { <nl> updating = false ; <nl> break ; <nl> } <nl> + btPage - > kvBytes + = rec . kvBytes ( ) ; <nl> + + i ; <nl> } <nl> } <nl> class VersionedBTree : public IVersionedStore { <nl> auto c = u . cBegin ; <nl> while ( c ! = u . cEnd ) { <nl> debug_printf ( " internal page ( updating ) erasing : % s \ n " , c . get ( ) . toString ( false ) . c_str ( ) ) ; <nl> + btPage - > kvBytes - = c . get ( ) . kvBytes ( ) ; <nl> c . erase ( ) ; <nl> } <nl> / / [ cBegin , cEnd ) is now erased , and cBegin is invalid , so cEnd represents the end <nl> class VersionedBTree : public IVersionedStore { <nl> debug_printf ( " % s mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - \ n " , context . c_str ( ) ) ; <nl> } <nl> <nl> - + + self - > counts . commitSubtreeStart ; <nl> state Version writeVersion = self - > getLastCommittedVersion ( ) + 1 ; <nl> state Reference < const IPage > page = <nl> wait ( readPage ( snapshot , rootID , update - > decodeLowerBound , update - > decodeUpperBound ) ) ; <nl> state BTreePage * btPage = ( BTreePage * ) page - > begin ( ) ; <nl> ASSERT ( isLeaf = = btPage - > isLeaf ( ) ) ; <nl> + g_redwoodMetrics . level ( btPage - > height ) . pageCommitStart + = 1 ; <nl> <nl> / / TODO : Decide if it is okay to update if the subtree boundaries are expanded . It can result in <nl> / / records in a DeltaTree being outside its decode boundary range , which isn ' t actually invalid <nl> class VersionedBTree : public IVersionedStore { <nl> if ( updating ) { <nl> debug_printf ( " % s Erasing % s [ existing , boundary start ] \ n " , context . c_str ( ) , <nl> cursor . get ( ) . toString ( ) . c_str ( ) ) ; <nl> + btPage - > kvBytes - = cursor . get ( ) . kvBytes ( ) ; <nl> cursor . erase ( ) ; <nl> } else { <nl> debug_printf ( " % s Skipped % s [ existing , boundary start ] \ n " , context . c_str ( ) , <nl> class VersionedBTree : public IVersionedStore { <nl> / / If updating , add to the page , else add to the output set <nl> if ( updating ) { <nl> if ( cursor . mirror - > insert ( rec , update - > skipLen , maxHeightAllowed ) ) { <nl> + btPage - > kvBytes + = rec . kvBytes ( ) ; <nl> debug_printf ( " % s Inserted % s [ mutation , boundary start ] \ n " , context . c_str ( ) , <nl> rec . toString ( ) . c_str ( ) ) ; <nl> } else { <nl> class VersionedBTree : public IVersionedStore { <nl> if ( updating ) { <nl> debug_printf ( " % s Erasing % s [ existing , boundary start ] \ n " , context . c_str ( ) , <nl> cursor . get ( ) . toString ( ) . c_str ( ) ) ; <nl> + btPage - > kvBytes - = cursor . get ( ) . kvBytes ( ) ; <nl> cursor . erase ( ) ; <nl> changesMade = true ; <nl> } else { <nl> class VersionedBTree : public IVersionedStore { <nl> debug_printf ( <nl> " % s Erasing % s and beyond [ existing , matches changed upper mutation boundary ] \ n " , <nl> context . c_str ( ) , cursor . get ( ) . toString ( ) . c_str ( ) ) ; <nl> + btPage - > kvBytes - = cursor . get ( ) . kvBytes ( ) ; <nl> cursor . erase ( ) ; <nl> } else { <nl> merged . push_back ( merged . arena ( ) , cursor . get ( ) ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> BTreePageIDRef newID = wait ( self - > updateBtreePage ( self , rootID , & update - > newLinks . arena ( ) , <nl> page . castTo < IPage > ( ) , writeVersion ) ) ; <nl> <nl> - update - > updatedInPlace ( newID ) ; <nl> - + + counts . pageUpdates ; <nl> + update - > updatedInPlace ( newID , btPage , newID . size ( ) * self - > m_blockSize ) ; <nl> debug_printf ( " % s Page updated in - place , returning % s \ n " , context . c_str ( ) , <nl> toString ( * update ) . c_str ( ) ) ; <nl> } <nl> class VersionedBTree : public IVersionedStore { <nl> cursor . moveFirst ( ) ; <nl> <nl> bool first = true ; <nl> + <nl> while ( cursor . valid ( ) ) { <nl> InternalPageSliceUpdate & u = * new ( arena ) InternalPageSliceUpdate ( ) ; <nl> + slices . push_back ( & u ) ; <nl> <nl> / / At this point we should never be at a null child page entry because the first entry of a page <nl> / / can ' t be null and this loop will skip over null entries that come after non - null entries . <nl> class VersionedBTree : public IVersionedStore { <nl> if ( first ) { <nl> u . subtreeLowerBound = update - > subtreeLowerBound ; <nl> first = false ; <nl> + / / mbegin is already the first mutation that could affect this subtree described by update <nl> } else { <nl> u . subtreeLowerBound = u . decodeLowerBound ; <nl> + mBegin = mEnd ; <nl> + / / mBegin is either at or greater than subtreeLowerBound - > key , which was the subtreeUpperBound - > key <nl> + / / for the previous subtree slice . But we need it to be at or * before * subtreeLowerBound - > key <nl> + / / so if mBegin . key ( ) is not exactly the subtree lower bound key then decrement it . <nl> + if ( mBegin . key ( ) ! = u . subtreeLowerBound - > key ) { <nl> + - - mBegin ; <nl> + } <nl> } <nl> <nl> BTreePageIDRef pageID = cursor . get ( ) . getChildPage ( ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> } <nl> u . subtreeUpperBound = cursor . valid ( ) ? & cursor . get ( ) : update - > subtreeUpperBound ; <nl> u . cEnd = cursor ; <nl> - <nl> u . skipLen = 0 ; / / TODO : set this <nl> <nl> - slices . push_back ( & u ) ; <nl> - <nl> / / Find the mutation buffer range that includes all changes to the range described by u <nl> - MutationBuffer : : const_iterator mBegin = mutationBuffer - > upper_bound ( u . subtreeLowerBound - > key ) ; <nl> - MutationBuffer : : const_iterator mEnd = mutationBuffer - > lower_bound ( u . subtreeUpperBound - > key ) ; <nl> - <nl> - / / If mutation boundaries are the same , the range is fully described by ( mBegin - 1 ) . mutation ( ) <nl> - bool fullyCovered = ( mBegin = = mEnd ) ; <nl> - - - mBegin ; <nl> - <nl> - / / If mBegin describes the entire subtree range , see if there are either no changes or if the entire <nl> - / / range is cleared . <nl> - if ( fullyCovered ) { <nl> + mEnd = mutationBuffer - > lower_bound ( u . subtreeUpperBound - > key ) ; <nl> + <nl> + / / If the mutation range described by mBegin extends to mEnd , then see if the part of that range <nl> + / / that overlaps with u ' s subtree range is being fully cleared or fully unchanged . <nl> + auto next = mBegin ; <nl> + + + next ; <nl> + if ( next = = mEnd ) { <nl> + / / Check for uniform clearedness or unchangedness for the range mutation where it overlaps u ' s <nl> + / / subtree <nl> + const KeyRef & mutationBoundaryKey = mBegin . key ( ) ; <nl> const RangeMutation & range = mBegin . mutation ( ) ; <nl> - <nl> - / / Check for uniform clearedness or unchangedness for the range mutation <nl> - KeyRef mutationBoundaryKey = mBegin . key ( ) ; <nl> bool uniform ; <nl> - <nl> if ( range . clearAfterBoundary ) { <nl> / / If the mutation range after the boundary key is cleared , then the mutation boundary key must <nl> / / be cleared or must be different than the subtree lower bound key so that it doesn ' t matter <nl> class VersionedBTree : public IVersionedStore { <nl> uniform = ! range . boundaryChanged | | mutationBoundaryKey ! = u . subtreeLowerBound - > key ; <nl> } <nl> <nl> - / / If the subtree range described by u is either uniformly changed or unchanged <nl> + / / If u ' s subtree is either all cleared or all unchanged <nl> if ( uniform ) { <nl> - / / See if we can expand the subtree range to include more subtrees which are also covered by the <nl> - / / same mutation range <nl> - if ( cursor . valid ( ) & & mEnd . key ( ) ! = cursor . get ( ) . key ) { <nl> + / / We do not need to recurse to this subtree . Next , let ' s see if we can embiggen u ' s range to <nl> + / / include sibling subtrees also covered by ( mBegin , mEnd ) so we can not recurse to those , too . <nl> + / / If the cursor is valid , u . subtreeUpperBound is the cursor ' s position , which is > = mEnd . key ( ) . <nl> + / / If equal , no range expansion is possible . <nl> + if ( cursor . valid ( ) & & mEnd . key ( ) ! = u . subtreeUpperBound - > key ) { <nl> cursor . seekLessThanOrEqual ( mEnd . key ( ) , update - > skipLen , & cursor , 1 ) ; <nl> <nl> / / If this seek moved us ahead , to something other than cEnd , then update subtree range <nl> class VersionedBTree : public IVersionedStore { <nl> } else { <nl> debug_printf ( " % s : queuing subtree deletion cleared subtree range : % s \ n " , <nl> context . c_str ( ) , : : toString ( rec . getChildPage ( ) ) . c_str ( ) ) ; <nl> - self - > m_lazyDeleteQueue . pushFront ( <nl> - LazyDeleteQueueEntry { writeVersion , rec . getChildPage ( ) } ) ; <nl> + self - > m_lazyClearQueue . pushFront ( <nl> + LazyClearQueueEntry { writeVersion , rec . getChildPage ( ) } ) ; <nl> } <nl> } <nl> c . moveNext ( ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> / / Subtree range unchanged <nl> } <nl> <nl> - debug_printf ( " % s : MutationBuffer covers this range in a single mutation : % s \ n " , context . c_str ( ) , <nl> - u . toString ( ) . c_str ( ) ) ; <nl> + debug_printf ( " % s : MutationBuffer covers this range in a single mutation , not recursing : % s \ n " , <nl> + context . c_str ( ) , u . toString ( ) . c_str ( ) ) ; <nl> + <nl> + / / u has already been initialized with the correct result , no recursion needed , so restart the <nl> + / / loop . <nl> continue ; <nl> } <nl> } <nl> <nl> / / If this page has height of 2 then its children are leaf nodes <nl> - recursions . push_back ( self - > commitSubtree ( self , snapshot , mutationBuffer , pageID , btPage - > height = = 2 , <nl> - mBegin , mEnd , slices . back ( ) ) ) ; <nl> + recursions . push_back ( <nl> + self - > commitSubtree ( self , snapshot , mutationBuffer , pageID , btPage - > height = = 2 , mBegin , mEnd , & u ) ) ; <nl> } <nl> <nl> debug_printf ( <nl> class VersionedBTree : public IVersionedStore { <nl> wait ( waitForAll ( recursions ) ) ; <nl> debug_printf ( " % s Recursions done , processing slice updates . \ n " , context . c_str ( ) ) ; <nl> <nl> - state InternalPageModifier m ( cursor . mirror , tryToUpdate ) ; <nl> + state InternalPageModifier m ( btPage , cursor . mirror , tryToUpdate ) ; <nl> <nl> / / Apply the possible changes for each subtree range recursed to , except the last one . <nl> / / For each range , the expected next record , if any , is checked against the first boundary <nl> class VersionedBTree : public IVersionedStore { <nl> BTreePageIDRef newID = wait ( self - > updateBtreePage ( self , rootID , & update - > newLinks . arena ( ) , <nl> page . castTo < IPage > ( ) , writeVersion ) ) ; <nl> <nl> - update - > updatedInPlace ( newID ) ; <nl> - + + counts . pageUpdates ; <nl> + update - > updatedInPlace ( newID , btPage , newID . size ( ) * self - > m_blockSize ) ; <nl> debug_printf ( " % s Internal page updated in - place , returning % s \ n " , context . c_str ( ) , <nl> toString ( * update ) . c_str ( ) ) ; <nl> } else { <nl> class VersionedBTree : public IVersionedStore { <nl> debug_printf ( " % s : Beginning commit of version % " PRId64 " , new oldest version set to % " PRId64 " \ n " , <nl> self - > m_name . c_str ( ) , writeVersion , self - > m_newOldestVersion ) ; <nl> <nl> - state bool lazyDeleteStop = false ; <nl> - state Future < int > lazyDelete = incrementalSubtreeClear ( self , & lazyDeleteStop ) ; <nl> - <nl> / / Get the latest version from the pager , which is what we will read at <nl> state Version latestVersion = self - > m_pager - > getLatestVersion ( ) ; <nl> debug_printf ( " % s : pager latestVersion % " PRId64 " \ n " , self - > m_name . c_str ( ) , latestVersion ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> self - > m_header . root . set ( rootPageID , sizeof ( headerSpace ) - sizeof ( m_header ) ) ; <nl> <nl> - lazyDeleteStop = true ; <nl> - wait ( success ( lazyDelete ) ) ; <nl> - debug_printf ( " Lazy delete freed % u pages \ n " , lazyDelete . get ( ) ) ; <nl> + self - > m_lazyClearStop = true ; <nl> + wait ( success ( self - > m_lazyClearActor ) ) ; <nl> + debug_printf ( " Lazy delete freed % u pages \ n " , self - > m_lazyClearActor . get ( ) ) ; <nl> <nl> self - > m_pager - > setCommitVersion ( writeVersion ) ; <nl> <nl> - wait ( self - > m_lazyDeleteQueue . flush ( ) ) ; <nl> - self - > m_header . lazyDeleteQueue = self - > m_lazyDeleteQueue . getState ( ) ; <nl> + wait ( self - > m_lazyClearQueue . flush ( ) ) ; <nl> + self - > m_header . lazyDeleteQueue = self - > m_lazyClearQueue . getState ( ) ; <nl> <nl> debug_printf ( " Setting metakey \ n " ) ; <nl> self - > m_pager - > setMetaKey ( self - > m_header . asKeyRef ( ) ) ; <nl> class VersionedBTree : public IVersionedStore { <nl> self - > m_mutationBuffers . erase ( self - > m_mutationBuffers . begin ( ) ) ; <nl> <nl> self - > m_lastCommittedVersion = writeVersion ; <nl> - + + counts . commits ; <nl> - committed . send ( Void ( ) ) ; <nl> + + + g_redwoodMetrics . opCommit ; <nl> + self - > m_lazyClearActor = incrementalLazyClear ( self ) ; <nl> <nl> + committed . send ( Void ( ) ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> class VersionedBTree : public IVersionedStore { <nl> <nl> # include " fdbserver / art_impl . h " <nl> <nl> - RedwoodRecordRef VersionedBTree : : dbBegin ( StringRef ( ) , 0 ) ; <nl> + RedwoodRecordRef VersionedBTree : : dbBegin ( LiteralStringRef ( " " ) ) ; <nl> RedwoodRecordRef VersionedBTree : : dbEnd ( LiteralStringRef ( " \ xff \ xff \ xff \ xff \ xff " ) ) ; <nl> - VersionedBTree : : Counts VersionedBTree : : counts ; <nl> <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> public : <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> wait ( self - > m_concurrentReads . take ( ) ) ; <nl> state FlowLock : : Releaser releaser ( self - > m_concurrentReads ) ; <nl> <nl> - self - > m_tree - > counts . getRanges + + ; <nl> + + + g_redwoodMetrics . opGetRange ; <nl> state Standalone < RangeResultRef > result ; <nl> state int accumulatedBytes = 0 ; <nl> ASSERT ( byteLimit > 0 ) ; <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> wait ( self - > m_concurrentReads . take ( ) ) ; <nl> state FlowLock : : Releaser releaser ( self - > m_concurrentReads ) ; <nl> <nl> - self - > m_tree - > counts . gets + + ; <nl> + + + g_redwoodMetrics . opGet ; <nl> state Reference < IStoreCursor > cur = self - > m_tree - > readAtVersion ( self - > m_tree - > getLastCommittedVersion ( ) ) ; <nl> <nl> wait ( cur - > findEqual ( key ) ) ; <nl> class KeyValueStoreRedwoodUnversioned : public IKeyValueStore { <nl> wait ( self - > m_concurrentReads . take ( ) ) ; <nl> state FlowLock : : Releaser releaser ( self - > m_concurrentReads ) ; <nl> <nl> - self - > m_tree - > counts . gets + + ; <nl> + + + g_redwoodMetrics . opGet ; <nl> state Reference < IStoreCursor > cur = self - > m_tree - > readAtVersion ( self - > m_tree - > getLastCommittedVersion ( ) ) ; <nl> <nl> wait ( cur - > findEqual ( key ) ) ; <nl> TEST_CASE ( " ! / redwood / performance / mutationBuffer " ) { <nl> } <nl> <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> + g_redwoodMetricsActor = Void ( ) ; / / Prevent trace event metrics from starting <nl> + g_redwoodMetrics . clear ( ) ; <nl> + <nl> state std : : string pagerFile = " unittest_pageFile . redwood " ; <nl> IPager2 * pager ; <nl> <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> <nl> printf ( " Initializing . . . \ n " ) ; <nl> state double startTime = now ( ) ; <nl> + <nl> pager = new DWALPager ( pageSize , pagerFile , cacheSizeBytes , pagerMemoryOnly ) ; <nl> state VersionedBTree * btree = new VersionedBTree ( pager , pagerFile ) ; <nl> wait ( btree - > init ( ) ) ; <nl> TEST_CASE ( " ! / redwood / correctness / btree " ) { <nl> } <nl> <nl> commit = map ( btree - > commit ( ) , [ = ] ( Void ) { <nl> - printf ( " Committed : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Committed : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> / / Notify the background verifier that version is committed and therefore readable <nl> committedVersions . send ( v ) ; <nl> return Void ( ) ; <nl> TEST_CASE ( " ! / redwood / correctness / pager / cow " ) { <nl> <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> state SignalableActorCollection actors ; <nl> - VersionedBTree : : counts . clear ( ) ; <nl> + <nl> + g_redwoodMetricsActor = Void ( ) ; / / Prevent trace event metrics from starting <nl> + g_redwoodMetrics . clear ( ) ; <nl> <nl> / / If a test file is passed in by environment then don ' t write new data to it . <nl> state bool reload = getenv ( " TESTFILE " ) = = nullptr ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> deleteFile ( pagerFile ) ; <nl> } <nl> <nl> - state int pageSize = 4096 ; <nl> + state int pageSize = SERVER_KNOBS - > REDWOOD_DEFAULT_PAGE_SIZE ; <nl> state int64_t pageCacheBytes = FLOW_KNOBS - > PAGE_CACHE_4K ; <nl> DWALPager * pager = new DWALPager ( pageSize , pagerFile , pageCacheBytes ) ; <nl> state VersionedBTree * btree = new VersionedBTree ( pager , pagerFile ) ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> Version lastVer = btree - > getLatestVersion ( ) ; <nl> state Version version = lastVer + 1 ; <nl> btree - > setWriteVersion ( version ) ; <nl> - int changesThisVersion = deterministicRandom ( ) - > randomInt ( 0 , maxRecordsPerCommit - recordsThisCommit + 1 ) ; <nl> + state int changesThisVersion = <nl> + deterministicRandom ( ) - > randomInt ( 0 , maxRecordsPerCommit - recordsThisCommit + 1 ) ; <nl> <nl> while ( changesThisVersion > 0 & & kvBytesThisCommit < maxKVBytesPerCommit ) { <nl> KeyValue kv ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> kvBytesThisCommit + = kv . key . size ( ) + kv . value . size ( ) ; <nl> + + recordsThisCommit ; <nl> } <nl> + <nl> + wait ( yield ( ) ) ; <nl> } <nl> <nl> if ( kvBytesThisCommit > = maxKVBytesPerCommit | | recordsThisCommit > = maxRecordsPerCommit ) { <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> double * pIntervalStart = & intervalStart ; <nl> <nl> commit = map ( btree - > commit ( ) , [ = ] ( Void result ) { <nl> - printf ( " Committed : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Committed : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> double elapsed = timer ( ) - * pIntervalStart ; <nl> printf ( " Committed % d keyValueBytes in % d records in % f seconds , % . 2f MB / s \ n " , kvb , recs , elapsed , <nl> kvb / elapsed / 1e6 ) ; <nl> TEST_CASE ( " ! / redwood / performance / set " ) { <nl> actors . add ( randomSeeks ( btree , seeks / 3 , firstKeyChar , lastKeyChar ) ) ; <nl> actors . add ( randomSeeks ( btree , seeks / 3 , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> state int ops = 10000 ; <nl> <nl> printf ( " Serial scans with adaptive readAhead . . . \ n " ) ; <nl> actors . add ( randomScans ( btree , ops , 50 , - 1 , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> printf ( " Serial scans with readAhead 3 pages . . . \ n " ) ; <nl> actors . add ( randomScans ( btree , ops , 50 , 12000 , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> printf ( " Serial scans with readAhead 2 pages . . . \ n " ) ; <nl> actors . add ( randomScans ( btree , ops , 50 , 8000 , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> printf ( " Serial scans with readAhead 1 page . . . \ n " ) ; <nl> actors . add ( randomScans ( btree , ops , 50 , 4000 , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> printf ( " Serial scans . . . \ n " ) ; <nl> actors . add ( randomScans ( btree , ops , 50 , 0 , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> printf ( " Serial seeks . . . \ n " ) ; <nl> actors . add ( randomSeeks ( btree , ops , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> printf ( " Parallel seeks . . . \ n " ) ; <nl> actors . add ( randomSeeks ( btree , ops , firstKeyChar , lastKeyChar ) ) ; <nl> actors . add ( randomSeeks ( btree , ops , firstKeyChar , lastKeyChar ) ) ; <nl> actors . add ( randomSeeks ( btree , ops , firstKeyChar , lastKeyChar ) ) ; <nl> wait ( actors . signalAndReset ( ) ) ; <nl> - printf ( " Stats : % s \ n " , VersionedBTree : : counts . toString ( true ) . c_str ( ) ) ; <nl> + printf ( " Stats : \ n % s \ n " , g_redwoodMetrics . toString ( true ) . c_str ( ) ) ; <nl> <nl> Future < Void > closedFuture = btree - > onClosed ( ) ; <nl> btree - > close ( ) ; <nl> Future < Void > closeKVS ( IKeyValueStore * kvs ) { <nl> <nl> ACTOR Future < Void > doPrefixInsertComparison ( int suffixSize , int valueSize , int recordCountTarget , <nl> bool usePrefixesInOrder , KVSource source ) { <nl> - VersionedBTree : : counts . clear ( ) ; <nl> <nl> deleteFile ( " test . redwood " ) ; <nl> wait ( delay ( 5 ) ) ; <nl> mmm a / fdbserver / fdbserver . actor . cpp <nl> ppp b / fdbserver / fdbserver . actor . cpp <nl> <nl> # include " fdbrpc / AsyncFileCached . actor . h " <nl> # include " fdbserver / CoroFlow . h " <nl> # include " flow / TLSConfig . actor . h " <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> # include " fdbmonitor / SimpleIni . h " <nl> <nl> mmm a / fdbserver / storageserver . actor . cpp <nl> ppp b / fdbserver / storageserver . actor . cpp <nl> void versionedMapTest ( ) { <nl> printf ( " Memory used : % f MB \ n " , <nl> ( after - before ) / 1e6 ) ; <nl> } <nl> + <nl> mmm a / fdbserver / workloads / BackupAndParallelRestoreCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / BackupAndParallelRestoreCorrectness . actor . cpp <nl> struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload { <nl> if ( ! self - > locked & & BUGGIFY ) { <nl> TraceEvent ( " BARW_SubmitBackup2 " , randomID ) . detail ( " Tag " , printable ( self - > backupTag ) ) ; <nl> try { <nl> + / / Note the " partitionedLog " must be false , because we change <nl> + / / the configuration to disable backup workers before restore . <nl> extraBackup = backupAgent . submitBackup ( <nl> cx , LiteralStringRef ( " file : / / simfdb / backups / " ) , deterministicRandom ( ) - > randomInt ( 0 , 100 ) , <nl> - self - > backupTag . toString ( ) , self - > backupRanges , true , self - > usePartitionedLogs ) ; <nl> + self - > backupTag . toString ( ) , self - > backupRanges , true , false ) ; <nl> } catch ( Error & e ) { <nl> TraceEvent ( " BARW_SubmitBackup2Exception " , randomID ) <nl> . error ( e ) <nl> mmm a / fdbserver / workloads / ClientTransactionProfileCorrectness . actor . cpp <nl> ppp b / fdbserver / workloads / ClientTransactionProfileCorrectness . actor . cpp <nl> bool checkTxInfoEntryFormat ( BinaryReader & reader ) { <nl> <nl> while ( ! reader . empty ( ) ) { <nl> / / Get EventType and timestamp <nl> - FdbClientLogEvents : : EventType event ; <nl> + FdbClientLogEvents : : Event event ; <nl> reader > > event ; <nl> - double timeStamp ; <nl> - reader > > timeStamp ; <nl> - switch ( event ) <nl> + switch ( event . type ) <nl> { <nl> case FdbClientLogEvents : : GET_VERSION_LATENCY : <nl> parser - > parseGetVersion ( reader ) ; <nl> bool checkTxInfoEntryFormat ( BinaryReader & reader ) { <nl> parser - > parseErrorCommit ( reader ) ; <nl> break ; <nl> default : <nl> - TraceEvent ( SevError , " ClientTransactionProfilingUnknownEvent " ) . detail ( " EventType " , event ) ; <nl> + TraceEvent ( SevError , " ClientTransactionProfilingUnknownEvent " ) . detail ( " EventType " , event . type ) ; <nl> return false ; <nl> } <nl> } <nl> mmm a / fdbserver / workloads / ConfigureDatabase . actor . cpp <nl> ppp b / fdbserver / workloads / ConfigureDatabase . actor . cpp <nl> static const char * logTypes [ ] = { <nl> " log_version : = 2 " , " log_version : = 3 " , " log_version : = 4 " <nl> } ; <nl> static const char * redundancies [ ] = { " single " , " double " , " triple " } ; <nl> + static const char * backupTypes [ ] = { " backup_worker_enabled : = 0 " , " backup_worker_enabled : = 1 " } ; <nl> <nl> std : : string generateRegions ( ) { <nl> std : : string result ; <nl> struct ConfigureDatabaseWorkload : TestWorkload { <nl> if ( g_simulator . speedUpSimulation ) { <nl> return Void ( ) ; <nl> } <nl> - state int randomChoice = deterministicRandom ( ) - > randomInt ( 0 , 7 ) ; <nl> + state int randomChoice = deterministicRandom ( ) - > randomInt ( 0 , 8 ) ; <nl> if ( randomChoice = = 0 ) { <nl> wait ( success ( <nl> runRYWTransaction ( cx , [ = ] ( Reference < ReadYourWritesTransaction > tr ) - > Future < Optional < Value > > <nl> struct ConfigureDatabaseWorkload : TestWorkload { <nl> else if ( randomChoice = = 6 ) { <nl> / / Some configurations will be invalid , and that ' s fine . <nl> wait ( success ( IssueConfigurationChange ( cx , logTypes [ deterministicRandom ( ) - > randomInt ( 0 , sizeof ( logTypes ) / sizeof ( logTypes [ 0 ] ) ) ] , false ) ) ) ; <nl> + } else if ( randomChoice = = 7 ) { <nl> + wait ( success ( IssueConfigurationChange ( <nl> + cx , backupTypes [ deterministicRandom ( ) - > randomInt ( 0 , sizeof ( backupTypes ) / sizeof ( backupTypes [ 0 ] ) ) ] , <nl> + false ) ) ) ; <nl> } else { <nl> ASSERT ( false ) ; <nl> } <nl> new file mode 100644 <nl> index 0000000000 . . 96a0d37510 <nl> mmm / dev / null <nl> ppp b / fdbserver / workloads / DataDistributionMetrics . actor . cpp <nl> <nl> + / * <nl> + * DataDistributionMetrics . actor . cpp <nl> + * <nl> + * This source file is part of the FoundationDB open source project <nl> + * <nl> + * Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + # include < boost / lexical_cast . hpp > <nl> + <nl> + # include " fdbclient / ReadYourWrites . h " <nl> + # include " fdbserver / workloads / workloads . actor . h " <nl> + # include " flow / actorcompiler . h " / / This must be the last include <nl> + <nl> + struct DataDistributionMetricsWorkload : KVWorkload { <nl> + <nl> + int numTransactions ; <nl> + int writesPerTransaction ; <nl> + int transactionsCommitted ; <nl> + int numShards ; <nl> + int64_t avgBytes ; <nl> + <nl> + DataDistributionMetricsWorkload ( WorkloadContext const & wcx ) <nl> + : KVWorkload ( wcx ) , transactionsCommitted ( 0 ) , numShards ( 0 ) , avgBytes ( 0 ) { <nl> + numTransactions = getOption ( options , LiteralStringRef ( " numTransactions " ) , 100 ) ; <nl> + writesPerTransaction = getOption ( options , LiteralStringRef ( " writesPerTransaction " ) , 1000 ) ; <nl> + } <nl> + <nl> + static Value getRandomValue ( ) { <nl> + return Standalone < StringRef > ( format ( " Value / % 08d " , deterministicRandom ( ) - > randomInt ( 0 , 10e6 ) ) ) ; <nl> + } <nl> + <nl> + ACTOR static Future < Void > _start ( Database cx , DataDistributionMetricsWorkload * self ) { <nl> + state int tNum ; <nl> + for ( tNum = 0 ; tNum < self - > numTransactions ; + + tNum ) { <nl> + loop { <nl> + state ReadYourWritesTransaction tr ( cx ) ; <nl> + try { <nl> + state int i ; <nl> + for ( i = 0 ; i < self - > writesPerTransaction ; + + i ) { <nl> + tr . set ( StringRef ( format ( " Key / % 08d " , tNum * self - > writesPerTransaction + i ) ) , getRandomValue ( ) ) ; <nl> + } <nl> + wait ( tr . commit ( ) ) ; <nl> + + + self - > transactionsCommitted ; <nl> + break ; <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> + } <nl> + } <nl> + return Void ( ) ; <nl> + } <nl> + <nl> + ACTOR static Future < bool > _check ( Database cx , DataDistributionMetricsWorkload * self ) { <nl> + if ( self - > transactionsCommitted = = 0 ) { <nl> + TraceEvent ( SevError , " NoTransactionsCommitted " ) ; <nl> + return false ; <nl> + } <nl> + state Reference < ReadYourWritesTransaction > tr = <nl> + Reference < ReadYourWritesTransaction > ( new ReadYourWritesTransaction ( cx ) ) ; <nl> + try { <nl> + state Standalone < RangeResultRef > result = wait ( tr - > getRange ( ddStatsRange , 100 ) ) ; <nl> + ASSERT ( ! result . more ) ; <nl> + self - > numShards = result . size ( ) ; <nl> + if ( self - > numShards < 1 ) return false ; <nl> + state int64_t totalBytes = 0 ; <nl> + for ( int i = 0 ; i < result . size ( ) ; + + i ) { <nl> + ASSERT ( result [ i ] . key . startsWith ( ddStatsRange . begin ) ) ; <nl> + totalBytes + = readJSONStrictly ( result [ i ] . value . toString ( ) ) . get_obj ( ) [ " ShardBytes " ] . get_int64 ( ) ; <nl> + } <nl> + self - > avgBytes = totalBytes / self - > numShards ; <nl> + / / fetch data - distribution stats for a smalller range <nl> + state int idx = deterministicRandom ( ) - > randomInt ( 0 , result . size ( ) ) ; <nl> + Standalone < RangeResultRef > res = wait ( tr - > getRange ( <nl> + KeyRangeRef ( result [ idx ] . key , idx + 1 < result . size ( ) ? result [ idx + 1 ] . key : ddStatsRange . end ) , 100 ) ) ; <nl> + ASSERT_WE_THINK ( res . size ( ) = = 1 & & <nl> + res [ 0 ] = = result [ idx ] ) ; / / It works good now . However , not sure in any case of data - distribution , the number changes <nl> + } catch ( Error & e ) { <nl> + TraceEvent ( SevError , " FailedToRetrieveDDMetrics " ) . detail ( " Error " , e . what ( ) ) ; <nl> + return false ; <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + virtual std : : string description ( ) { return " DataDistributionMetrics " ; } <nl> + virtual Future < Void > setup ( Database const & cx ) { return Void ( ) ; } <nl> + virtual Future < Void > start ( Database const & cx ) { return _start ( cx , this ) ; } <nl> + virtual Future < bool > check ( Database const & cx ) { return _check ( cx , this ) ; } <nl> + <nl> + virtual void getMetrics ( vector < PerfMetric > & m ) { <nl> + m . push_back ( PerfMetric ( " NumShards " , numShards , true ) ) ; <nl> + m . push_back ( PerfMetric ( " AvgBytes " , avgBytes , true ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + WorkloadFactory < DataDistributionMetricsWorkload > DataDistributionMetricsWorkloadFactory ( " DataDistributionMetrics " ) ; <nl> mmm a / fdbserver / workloads / TagThrottleApi . actor . cpp <nl> ppp b / fdbserver / workloads / TagThrottleApi . actor . cpp <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> <nl> virtual void getMetrics ( vector < PerfMetric > & m ) { } <nl> <nl> + static Optional < TagThrottleType > randomTagThrottleType ( ) { <nl> + Optional < TagThrottleType > throttleType ; <nl> + switch ( deterministicRandom ( ) - > randomInt ( 0 , 3 ) ) { <nl> + case 0 : <nl> + throttleType = TagThrottleType : : AUTO ; <nl> + break ; <nl> + case 1 : <nl> + throttleType = TagThrottleType : : MANUAL ; <nl> + break ; <nl> + default : <nl> + break ; <nl> + } <nl> + <nl> + return throttleType ; <nl> + } <nl> + <nl> ACTOR Future < Void > throttleTag ( Database cx , std : : map < std : : pair < TransactionTag , TransactionPriority > , TagThrottleInfo > * manuallyThrottledTags ) { <nl> state TransactionTag tag = TransactionTagRef ( deterministicRandom ( ) - > randomChoice ( DatabaseContext : : debugTransactionTagChoices ) ) ; <nl> state TransactionPriority priority = deterministicRandom ( ) - > randomChoice ( allTransactionPriorities ) ; <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> tagSet . addTag ( tag ) ; <nl> <nl> try { <nl> - wait ( ThrottleApi : : throttleTags ( cx , tagSet , rate , duration , false , priority ) ) ; <nl> + wait ( ThrottleApi : : throttleTags ( cx , tagSet , rate , duration , TagThrottleType : : MANUAL , priority ) ) ; <nl> } <nl> catch ( Error & e ) { <nl> state Error err = e ; <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> throw err ; <nl> } <nl> <nl> - manuallyThrottledTags - > insert_or_assign ( std : : make_pair ( tag , priority ) , TagThrottleInfo ( tag , false , priority , rate , now ( ) + duration , duration ) ) ; <nl> + manuallyThrottledTags - > insert_or_assign ( std : : make_pair ( tag , priority ) , TagThrottleInfo ( tag , TagThrottleType : : MANUAL , priority , rate , now ( ) + duration , duration ) ) ; <nl> <nl> return Void ( ) ; <nl> } <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> TagSet tagSet ; <nl> tagSet . addTag ( tag ) ; <nl> <nl> - state bool autoThrottled = deterministicRandom ( ) - > coinflip ( ) ; <nl> - TransactionPriority priority = deterministicRandom ( ) - > randomChoice ( allTransactionPriorities ) ; <nl> + state Optional < TagThrottleType > throttleType = TagThrottleApiWorkload : : randomTagThrottleType ( ) ; <nl> + Optional < TransactionPriority > priority = deterministicRandom ( ) - > coinflip ( ) ? Optional < TransactionPriority > ( ) : deterministicRandom ( ) - > randomChoice ( allTransactionPriorities ) ; <nl> <nl> state bool erased = false ; <nl> - state double expiration = 0 ; <nl> - if ( ! autoThrottled ) { <nl> - auto itr = manuallyThrottledTags - > find ( std : : make_pair ( tag , priority ) ) ; <nl> - if ( itr ! = manuallyThrottledTags - > end ( ) ) { <nl> - expiration = itr - > second . expirationTime ; <nl> - erased = true ; <nl> - manuallyThrottledTags - > erase ( itr ) ; <nl> + state double maxExpiration = 0 ; <nl> + if ( ! throttleType . present ( ) | | throttleType . get ( ) = = TagThrottleType : : MANUAL ) { <nl> + for ( auto p : allTransactionPriorities ) { <nl> + if ( ! priority . present ( ) | | priority . get ( ) = = p ) { <nl> + auto itr = manuallyThrottledTags - > find ( std : : make_pair ( tag , p ) ) ; <nl> + if ( itr ! = manuallyThrottledTags - > end ( ) ) { <nl> + maxExpiration = std : : max ( maxExpiration , itr - > second . expirationTime ) ; <nl> + erased = true ; <nl> + manuallyThrottledTags - > erase ( itr ) ; <nl> + } <nl> + } <nl> } <nl> } <nl> <nl> - bool removed = wait ( ThrottleApi : : unthrottleTags ( cx , tagSet , autoThrottled , priority ) ) ; <nl> + bool removed = wait ( ThrottleApi : : unthrottleTags ( cx , tagSet , throttleType , priority ) ) ; <nl> if ( removed ) { <nl> - ASSERT ( erased | | autoThrottled ) ; <nl> + ASSERT ( erased | | ! throttleType . present ( ) | | throttleType . get ( ) = = TagThrottleType : : AUTO ) ; <nl> } <nl> else { <nl> - ASSERT ( expiration < now ( ) ) ; <nl> + ASSERT ( maxExpiration < now ( ) ) ; <nl> } <nl> <nl> return Void ( ) ; <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> int manualThrottledTags = 0 ; <nl> int activeAutoThrottledTags = 0 ; <nl> for ( auto & tag : tags ) { <nl> - if ( ! tag . autoThrottled ) { <nl> + if ( tag . throttleType = = TagThrottleType : : MANUAL ) { <nl> ASSERT ( manuallyThrottledTags - > find ( std : : make_pair ( tag . tag , tag . priority ) ) ! = manuallyThrottledTags - > end ( ) ) ; <nl> + + manualThrottledTags ; <nl> } <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> } <nl> <nl> ACTOR Future < Void > unthrottleTagGroup ( Database cx , std : : map < std : : pair < TransactionTag , TransactionPriority > , TagThrottleInfo > * manuallyThrottledTags ) { <nl> - state int choice = deterministicRandom ( ) - > randomInt ( 0 , 3 ) ; <nl> + state Optional < TagThrottleType > throttleType = TagThrottleApiWorkload : : randomTagThrottleType ( ) ; <nl> + state Optional < TransactionPriority > priority = deterministicRandom ( ) - > coinflip ( ) ? Optional < TransactionPriority > ( ) : deterministicRandom ( ) - > randomChoice ( allTransactionPriorities ) ; <nl> <nl> - if ( choice = = 0 ) { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleAll ( cx ) ) ; <nl> + bool unthrottled = wait ( ThrottleApi : : unthrottleAll ( cx , throttleType , priority ) ) ; <nl> + if ( ! throttleType . present ( ) | | throttleType . get ( ) = = TagThrottleType : : MANUAL ) { <nl> bool unthrottleExpected = false ; <nl> - for ( auto itr = manuallyThrottledTags - > begin ( ) ; itr ! = manuallyThrottledTags - > end ( ) ; + + itr ) { <nl> - if ( itr - > second . expirationTime > now ( ) ) { <nl> - unthrottleExpected = true ; <nl> + bool empty = manuallyThrottledTags - > empty ( ) ; <nl> + for ( auto itr = manuallyThrottledTags - > begin ( ) ; itr ! = manuallyThrottledTags - > end ( ) ; ) { <nl> + if ( ! priority . present ( ) | | priority . get ( ) = = itr - > first . second ) { <nl> + if ( itr - > second . expirationTime > now ( ) ) { <nl> + unthrottleExpected = true ; <nl> + } <nl> + <nl> + itr = manuallyThrottledTags - > erase ( itr ) ; <nl> } <nl> - } <nl> - <nl> - ASSERT ( ! unthrottleExpected | | unthrottled ) ; <nl> - manuallyThrottledTags - > clear ( ) ; <nl> - } <nl> - else if ( choice = = 1 ) { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleManual ( cx ) ) ; <nl> - bool unthrottleExpected = false ; <nl> - for ( auto itr = manuallyThrottledTags - > begin ( ) ; itr ! = manuallyThrottledTags - > end ( ) ; + + itr ) { <nl> - if ( itr - > second . expirationTime > now ( ) ) { <nl> - unthrottleExpected = true ; <nl> + else { <nl> + + + itr ; <nl> } <nl> } <nl> <nl> - ASSERT ( ( unthrottled & & ! manuallyThrottledTags - > empty ( ) ) | | ( ! unthrottled & & ! unthrottleExpected ) ) ; <nl> - manuallyThrottledTags - > clear ( ) ; <nl> - } <nl> - else { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleAuto ( cx ) ) ; <nl> + if ( throttleType . present ( ) ) { <nl> + ASSERT ( ( unthrottled & & ! empty ) | | ( ! unthrottled & & ! unthrottleExpected ) ) ; <nl> + } <nl> + else { <nl> + ASSERT ( unthrottled | | ! unthrottleExpected ) ; <nl> + } <nl> } <nl> <nl> return Void ( ) ; <nl> struct TagThrottleApiWorkload : TestWorkload { <nl> if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> wait ( ThrottleApi : : enableAuto ( cx , true ) ) ; <nl> if ( deterministicRandom ( ) - > coinflip ( ) ) { <nl> - bool unthrottled = wait ( ThrottleApi : : unthrottleAuto ( cx ) ) ; <nl> + bool unthrottled = wait ( ThrottleApi : : unthrottleAll ( cx , TagThrottleType : : AUTO , Optional < TransactionPriority > ( ) ) ) ; <nl> } <nl> } <nl> else { <nl> mmm a / fdbservice / FDBService . cpp <nl> ppp b / fdbservice / FDBService . cpp <nl> <nl> <nl> # include " flow / SimpleOpt . h " <nl> # include " fdbmonitor / SimpleIni . h " <nl> - # include " fdbclient / IncludeVersions . h " <nl> + # include " fdbclient / versions . h " <nl> <nl> / / For PathFileExists <nl> # include " Shlwapi . h " <nl> mmm a / flow / IThreadPool . cpp <nl> ppp b / flow / IThreadPool . cpp <nl> class ThreadPool : public IThreadPool , public ReferenceCounted < ThreadPool > { <nl> void operator ( ) ( ) { Thread : : dispatch ( action ) ; action = NULL ; } <nl> ~ ActionWrapper ( ) { if ( action ) { action - > cancel ( ) ; } } <nl> private : <nl> - void operator = ( ActionWrapper const & ) ; <nl> + ActionWrapper & operator = ( ActionWrapper const & ) ; <nl> } ; <nl> public : <nl> ThreadPool ( ) : dontstop ( ios ) , mode ( Run ) { } <nl> mmm a / flow / TLSConfig . actor . cpp <nl> ppp b / flow / TLSConfig . actor . cpp <nl> ACTOR static Future < Void > readEntireFile ( std : : string filename , std : : string * des <nl> throw file_too_large ( ) ; <nl> } <nl> destination - > resize ( filesize ) ; <nl> - wait ( success ( file - > read ( const_cast < char * > ( destination - > c_str ( ) ) , filesize , 0 ) ) ) ; <nl> + wait ( success ( file - > read ( & destination [ 0 ] , filesize , 0 ) ) ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> mmm a / flow / network . h <nl> ppp b / flow / network . h <nl> struct NetworkAddress { <nl> bool isTLS ( ) const { return ( flags & FLAG_TLS ) ! = 0 ; } <nl> bool isV6 ( ) const { return ip . isV6 ( ) ; } <nl> <nl> + size_t hash ( ) const { <nl> + size_t result = 0 ; <nl> + if ( ip . isV6 ( ) ) { <nl> + uint16_t * ptr = ( uint16_t * ) ip . toV6 ( ) . data ( ) ; <nl> + result = ( ( size_t ) ptr [ 5 ] < < 32 ) | ( ( size_t ) ptr [ 6 ] < < 16 ) | ptr [ 7 ] ; <nl> + } else { <nl> + result = ip . toV4 ( ) ; <nl> + } <nl> + return ( result < < 16 ) + port ; <nl> + } <nl> + <nl> static NetworkAddress parse ( std : : string const & ) ; / / May throw connection_string_invalid <nl> static Optional < NetworkAddress > parseOptional ( std : : string const & ) ; <nl> static std : : vector < NetworkAddress > parseList ( std : : string const & ) ; <nl> namespace std <nl> { <nl> size_t operator ( ) ( const NetworkAddress & na ) const <nl> { <nl> - size_t result = 0 ; <nl> - if ( na . ip . isV6 ( ) ) { <nl> - uint16_t * ptr = ( uint16_t * ) na . ip . toV6 ( ) . data ( ) ; <nl> - result = ( ( size_t ) ptr [ 5 ] < < 32 ) | ( ( size_t ) ptr [ 6 ] < < 16 ) | ptr [ 7 ] ; <nl> - } else { <nl> - result = na . ip . toV4 ( ) ; <nl> - } <nl> - return ( result < < 16 ) + na . port ; <nl> + return na . hash ( ) ; <nl> } <nl> } ; <nl> } <nl> mmm a / tests / CMakeLists . txt <nl> ppp b / tests / CMakeLists . txt <nl> if ( WITH_PYTHON ) <nl> add_fdb_test ( TEST_FILES BlobStore . txt IGNORE ) <nl> add_fdb_test ( TEST_FILES ConsistencyCheck . txt IGNORE ) <nl> add_fdb_test ( TEST_FILES DDMetricsExclude . txt IGNORE ) <nl> + add_fdb_test ( TEST_FILES DataDistributionMetrics . txt IGNORE ) <nl> add_fdb_test ( TEST_FILES DiskDurability . txt IGNORE ) <nl> add_fdb_test ( TEST_FILES FileSystem . txt IGNORE ) <nl> add_fdb_test ( TEST_FILES Happy . txt IGNORE ) <nl> new file mode 100644 <nl> index 0000000000 . . 77c83b0eb6 <nl> mmm / dev / null <nl> ppp b / tests / DataDistributionMetrics . txt <nl> <nl> + testTitle = DataDistributionMetrics <nl> + testName = Cycle <nl> + transactionsPerSecond = 2500 . 0 <nl> + testDuration = 10 . 0 <nl> + expectedRate = 0 . 025 <nl> + <nl> + testName = DataDistributionMetrics <nl> + numTransactions = 100 <nl> + writesPerTransaction = 1000 <nl> + <nl> + testName = Attrition <nl> + machinesToKill = 1 <nl> + machinesToLeave = 3 <nl> + reboot = true <nl> + testDuration = 10 . 0 <nl> + <nl> + testName = Attrition <nl> + machinesToKill = 1 <nl> + machinesToLeave = 3 <nl> + reboot = true <nl> + testDuration = 10 . 0 <nl> \ No newline at end of file <nl>
|
Merge release - 6 . 3 into master
|
apple/foundationdb
|
d128252e904f9cc0ee83c6218610d13daa10a399
|
2020-05-22T16:25:32Z
|
mmm a / hphp / hack / src / utils / collections / hashSet . ml <nl> ppp b / hphp / hack / src / utils / collections / hashSet . ml <nl> let iter f set = Hashtbl . iter ( fun k _ - > f k ) set <nl> let fold f set acc = Hashtbl . fold ( fun k _ acc - > f k acc ) set acc <nl> <nl> let length set = Hashtbl . length set <nl> + <nl> + let is_empty set = length set = 0 <nl> mmm a / hphp / hack / src / utils / collections / hashSet . mli <nl> ppp b / hphp / hack / src / utils / collections / hashSet . mli <nl> val iter : ( ' a - > unit ) - > ' a t - > unit <nl> val fold : ( ' a - > ' b - > ' b ) - > ' a t - > ' b - > ' b <nl> <nl> val length : ' a t - > int <nl> + <nl> + val is_empty : ' a t - > bool <nl>
|
add is_empty method to hashSet
|
facebook/hhvm
|
6a49c0875fea5e099a91b725fe8519fe652d86cc
|
2019-11-20T00:41:05Z
|
mmm a / src / crankshaft / s390 / lithium - codegen - s390 . cc <nl> ppp b / src / crankshaft / s390 / lithium - codegen - s390 . cc <nl> bool LCodeGen : : GeneratePrologue ( ) { <nl> / / Prologue logic requires its starting address in ip and the <nl> / / corresponding offset from the function entry . Need to add <nl> / / 4 bytes for the size of AHI / AGHI that AddP expands into . <nl> - __ AddP ( ip , ip , Operand ( prologue_offset + sizeof ( FourByteInstr ) ) ) ; <nl> + prologue_offset + = sizeof ( FourByteInstr ) ; <nl> + __ AddP ( ip , ip , Operand ( prologue_offset ) ) ; <nl> } <nl> info ( ) - > set_prologue_offset ( prologue_offset ) ; <nl> if ( NeedsEagerFrame ( ) ) { <nl>
|
S390 : Fix prologue offset in GeneratePrologue
|
v8/v8
|
2e48dc018aa432ad02d37f41bc71a2367a33bc48
|
2016-03-29T17:25:32Z
|
mmm a / libraries / chain / chain_controller . cpp <nl> ppp b / libraries / chain / chain_controller . cpp <nl> vector < transaction_trace > chain_controller : : _push_deferred_transactions ( bool fl <nl> candidates . emplace_back ( & gtrx ) ; <nl> } <nl> <nl> - auto deferred_transactions_deadline = fc : : time_point : : now ( ) + fc : : microseconds ( 20 * 1000 ) ; <nl> + auto deferred_transactions_deadline = fc : : time_point : : now ( ) + fc : : microseconds ( config : : deffered_transactions_max_time_per_block_us ) ; <nl> vector < transaction_trace > res ; <nl> for ( const auto * trx_p : candidates ) { <nl> if ( ! is_known_transaction ( trx_p - > trx_id ) ) { <nl>
|
deffered_transactions_max_time_per_block_us in config
|
EOSIO/eos
|
6b7524b9b952a6ca1186b7d09e3273de68106d3c
|
2018-03-28T20:27:51Z
|
mmm a / src / qt / paymentserver . cpp <nl> ppp b / src / qt / paymentserver . cpp <nl> void PaymentServer : : handleURIOrFile ( const QString & s ) <nl> if ( uri . hasQueryItem ( " r " ) ) / / payment request URI <nl> { <nl> # ifdef ENABLE_BIP70 <nl> + Q_EMIT message ( tr ( " URI handling " ) , <nl> + tr ( " You are using a BIP70 URL which will be unsupported in the future . " ) , <nl> + CClientUIInterface : : ICON_WARNING ) ; <nl> QByteArray temp ; <nl> temp . append ( uri . queryItemValue ( " r " ) ) ; <nl> QString decoded = QUrl : : fromPercentEncoding ( temp ) ; <nl>
|
Add BIP70 deprecation warning
|
bitcoin/bitcoin
|
fbb643d2a55ade3c06593a7490601acd2e36dce8
|
2018-10-22T10:10:40Z
|
mmm a / bindings / go / src / fdb / tuple / tuple . go <nl> ppp b / bindings / go / src / fdb / tuple / tuple . go <nl> func decodeInt ( b [ ] byte ) ( interface { } , int ) { <nl> <nl> bp : = make ( [ ] byte , 8 ) <nl> copy ( bp [ 8 - n : ] , b [ 1 : n + 1 ] ) <nl> + buf : = bytes . NewBuffer ( bp ) <nl> <nl> + var retInt int64 <nl> if neg { <nl> - var retInt int64 <nl> - binary . Read ( bytes . NewBuffer ( bp ) , binary . BigEndian , & retInt ) <nl> + binary . Read ( buf , binary . BigEndian , & retInt ) <nl> return retInt - int64 ( sizeLimits [ n ] ) , n + 1 <nl> } <nl> <nl> - var retInt int64 <nl> - binary . Read ( bytes . NewBuffer ( bp ) , binary . BigEndian , & retInt ) <nl> + binary . Read ( buf , binary . BigEndian , & retInt ) <nl> if retInt > 0 { <nl> return retInt , n + 1 <nl> } <nl> <nl> var retUint uint64 <nl> - binary . Read ( bytes . NewBuffer ( bp ) , binary . BigEndian , & retUint ) <nl> + binary . Read ( buf , binary . BigEndian , & retUint ) <nl> return retUint , n + 1 <nl> } <nl> <nl>
|
remove redundant calls to NewBuffer that use the bp slice . remove redundant declaration of retInt
|
apple/foundationdb
|
36b3818aef1874b70b527c82390bd07bebdfb974
|
2018-10-30T21:06:03Z
|
mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> MaybeHandle < FixedArray > JSReceiver : : GetKeys ( Handle < JSReceiver > object , <nl> <nl> / / Check access rights if required . <nl> if ( current - > IsAccessCheckNeeded ( ) & & ! isolate - > MayAccess ( current ) ) { <nl> - isolate - > ReportFailedAccessCheck ( current ) ; <nl> - RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION ( isolate , FixedArray ) ; <nl> - break ; <nl> + return content ; <nl> } <nl> <nl> / / Compute the element keys . <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> THREADED_TEST ( CrossDomainForIn ) { <nl> " for ( var p in obj ) { " <nl> " if ( p = = ' prop ' ) return false ; " <nl> " } " <nl> - " return false ; " <nl> - " } catch ( e ) { " <nl> " return true ; " <nl> + " } catch ( e ) { " <nl> + " return false ; " <nl> " } " <nl> " } ) ( ) " ) ; <nl> CHECK ( result - > IsTrue ( ) ) ; <nl> TEST ( AccessControl ) { <nl> " return false ; " <nl> " } " <nl> " } " <nl> - " return false ; " <nl> - " } catch ( e ) { " <nl> " return true ; " <nl> + " } catch ( e ) { " <nl> + " return false ; " <nl> " } " <nl> " } ) ( ) " ) ; <nl> CHECK ( value - > IsTrue ( ) ) ; <nl> TEST ( AccessCheckThrows ) { <nl> CheckCorrectThrow ( " % HasProperty ( other , ' x ' ) " ) ; <nl> CheckCorrectThrow ( " % HasElement ( other , 1 ) " ) ; <nl> CheckCorrectThrow ( " % IsPropertyEnumerable ( other , ' x ' ) " ) ; <nl> - CheckCorrectThrow ( " % GetPropertyNames ( other ) " ) ; <nl> / / PROPERTY_ATTRIBUTES_NONE = 0 <nl> CheckCorrectThrow ( " % DefineAccessorPropertyUnchecked ( " <nl> " other , ' x ' , null , null , 1 ) " ) ; <nl>
|
Fix object enumeration wrt access checked objects
|
v8/v8
|
08827f55fb5bb7be232dc375f3a57d87f33f46e6
|
2015-07-17T12:57:39Z
|
mmm a / src / ia32 / stub - cache - ia32 . cc <nl> ppp b / src / ia32 / stub - cache - ia32 . cc <nl> bool StubCompiler : : GenerateLoadCallback ( JSObject * object , <nl> __ push ( other ) ; <nl> __ push ( receiver ) ; / / receiver <nl> __ push ( reg ) ; / / holder <nl> - __ mov ( other , Immediate ( callback_handle ) ) ; <nl> - __ push ( FieldOperand ( other , AccessorInfo : : kDataOffset ) ) ; / / data <nl> + / / Push data from AccessorInfo . <nl> + if ( Heap : : InNewSpace ( callback_handle - > data ( ) ) ) { <nl> + __ mov ( other , Immediate ( callback_handle ) ) ; <nl> + __ push ( FieldOperand ( other , AccessorInfo : : kDataOffset ) ) ; <nl> + } else { <nl> + __ push ( Immediate ( Handle < Object > ( callback_handle - > data ( ) ) ) ) ; <nl> + } <nl> __ push ( name_reg ) ; / / name <nl> / / Save a pointer to where we pushed the arguments pointer . <nl> / / This will be passed as the const AccessorInfo & to the C + + callback . <nl>
|
Push AccessorInfo data directly if they reside in old space .
|
v8/v8
|
078d28535384506be9b6d05b90e3a6c03fbfc7ad
|
2010-06-17T17:13:40Z
|
mmm a / torch / backends / cudnn / __init__ . py <nl> ppp b / torch / backends / cudnn / __init__ . py <nl> def version ( ) : <nl> } <nl> <nl> <nl> + def is_available ( ) : <nl> + r " " " Returns a bool indicating if CUDNN is currently available . " " " <nl> + return torch . _C . has_cudnn <nl> + <nl> + <nl> def is_acceptable ( tensor ) : <nl> if not torch . _C . _get_cudnn_enabled ( ) : <nl> return False <nl> if tensor . type ( ) not in CUDNN_TENSOR_TYPES : <nl> return False <nl> - if not torch . _C . has_cudnn : <nl> + if not is_available ( ) : <nl> warnings . warn ( <nl> " PyTorch was compiled without cuDNN support . To use cuDNN , rebuild " <nl> " PyTorch making sure the library is visible to the build system . " ) <nl>
|
Add torch . cuda . cudnn_is_available ( )
|
pytorch/pytorch
|
0acddd6cee32bc7c3715bc8b93d0a33ef19064b1
|
2018-06-20T21:18:03Z
|
mmm a / contrib / arrow - cmake / cpp / src / arrow / util / config . h <nl> ppp b / contrib / arrow - cmake / cpp / src / arrow / util / config . h <nl> <nl> # define ARROW_VERSION_PATCH <nl> # define ARROW_VERSION ( ( ARROW_VERSION_MAJOR * 1000 ) + ARROW_VERSION_MINOR ) * 1000 + ARROW_VERSION_PATCH <nl> <nl> - / * # undef DOUBLE_CONVERSION_HAS_CASE_INSENSIBILITY * / <nl> + # define ARROW_SO_VERSION " " <nl> + # define ARROW_FULL_SO_VERSION " " <nl> + <nl> / * # undef GRPCPP_PP_INCLUDE * / <nl>
|
Update config . h for arrow
|
ClickHouse/ClickHouse
|
0aa97af108e80f114be6542f8aaa2b07892ed4e6
|
2020-07-09T07:19:44Z
|
mmm a / tensorflow / BUILD <nl> ppp b / tensorflow / BUILD <nl> config_setting ( <nl> <nl> package_group ( <nl> name = " internal " , <nl> - packages = [ <nl> - " / / learning / vis / . . . " , <nl> - " / / tensorflow / . . . " , <nl> - ] , <nl> + packages = [ " / / tensorflow / . . . " ] , <nl> ) <nl> <nl> sh_binary ( <nl>
|
Internal change .
|
tensorflow/tensorflow
|
b25217e46971e9292e710d0396853435299ec5ea
|
2016-09-30T13:47:52Z
|
mmm a / xbmc / MusicInfoTagLoaderWMA . cpp <nl> ppp b / xbmc / MusicInfoTagLoaderWMA . cpp <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> <nl> tag . SetURL ( strFileName ) ; <nl> <nl> - / / Note that we ' re reading in a bit more than 64k , because the ' peek ' ing <nl> + / / Note that we ' re reading in a bit more than the buffer size , because the ' peek ' ing <nl> / / below is dealing with integers and reads off the end . Rather than change <nl> / / all the checks below , I ' ve simply allocated a bigger buffer . <nl> - auto_aptr < unsigned char > pData ( new unsigned char [ 65536 + 32 ] ) ; <nl> - file . Read ( pData . get ( ) , 65536 + 32 ) ; <nl> + const unsigned int bufferSize = 256 * 1024 ; <nl> + auto_aptr < unsigned char > pData ( new unsigned char [ bufferSize + 32 ] ) ; <nl> + file . Read ( pData . get ( ) , bufferSize + 32 ) ; <nl> file . Close ( ) ; <nl> <nl> - int iOffset ; <nl> + unsigned int iOffset ; <nl> unsigned int * pDataI ; <nl> CStdString16 utf16String ; <nl> <nl> / / Play time <nl> iOffset = 0 ; <nl> pDataI = ( unsigned int * ) pData . get ( ) ; <nl> - while ( ! ( pDataI [ 0 ] = = 0x75B22630 & & pDataI [ 1 ] = = 0x11CF668E & & pDataI [ 2 ] = = 0xAA00D9A6 & & pDataI [ 3 ] = = 0x6CCE6200 ) & & iOffset < = 65536 - 4 ) <nl> + while ( ! ( pDataI [ 0 ] = = 0x75B22630 & & pDataI [ 1 ] = = 0x11CF668E & & pDataI [ 2 ] = = 0xAA00D9A6 & & pDataI [ 3 ] = = 0x6CCE6200 ) & & iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + + ; <nl> pDataI = ( unsigned int * ) ( pData . get ( ) + iOffset ) ; <nl> } <nl> - if ( iOffset > 65536 - 4 ) <nl> + if ( iOffset > bufferSize - 4 ) <nl> return false ; <nl> <nl> / / Play time <nl> iOffset = 0 ; <nl> pDataI = ( unsigned int * ) pData . get ( ) ; <nl> - while ( ! ( pDataI [ 0 ] = = 0x8CABDCA1 & & pDataI [ 1 ] = = 0x11CFA947 & & pDataI [ 2 ] = = 0xC000E48E & & pDataI [ 3 ] = = 0x6553200C ) & & iOffset < = 65536 - 4 ) <nl> + while ( ! ( pDataI [ 0 ] = = 0x8CABDCA1 & & pDataI [ 1 ] = = 0x11CFA947 & & pDataI [ 2 ] = = 0xC000E48E & & pDataI [ 3 ] = = 0x6553200C ) & & iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + + ; <nl> pDataI = ( unsigned int * ) ( pData . get ( ) + iOffset ) ; <nl> } <nl> - if ( iOffset < = 65536 - 4 ) <nl> + if ( iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + = 64 ; <nl> pDataI = ( unsigned int * ) ( pData . get ( ) + iOffset ) ; <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> / / Description Title <nl> iOffset = 0 ; <nl> pDataI = ( unsigned int * ) pData . get ( ) ; <nl> - while ( ! ( pDataI [ 0 ] = = 0x75B22633 & & pDataI [ 1 ] = = 0x11CF668E & & pDataI [ 2 ] = = 0xAA00D9A6 & & pDataI [ 3 ] = = 0x6CCE6200 ) & & iOffset < = 65536 - 4 ) <nl> + while ( ! ( pDataI [ 0 ] = = 0x75B22633 & & pDataI [ 1 ] = = 0x11CF668E & & pDataI [ 2 ] = = 0xAA00D9A6 & & pDataI [ 3 ] = = 0x6CCE6200 ) & & iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + + ; <nl> pDataI = ( unsigned int * ) ( pData . get ( ) + iOffset ) ; <nl> } <nl> - if ( iOffset < = 65536 - 4 ) <nl> + if ( iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + = 24 ; <nl> int nTitleSize = pData [ iOffset + 0 ] + pData [ iOffset + 1 ] * 0x100 ; <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> / / Info audio <nl> / / iOffset = 0 ; <nl> / / pDataI = ( unsigned int * ) pData ; <nl> - / / while ( ! ( pDataI [ 0 ] = = 0xF8699E40 & & pDataI [ 1 ] = = 0x11CF5B4D & & pDataI [ 2 ] = = 0x8000FDA8 & & pDataI [ 3 ] = = 0x2B445C5F ) & & iOffset < = 65536 - 4 ) <nl> + / / while ( ! ( pDataI [ 0 ] = = 0xF8699E40 & & pDataI [ 1 ] = = 0x11CF5B4D & & pDataI [ 2 ] = = 0x8000FDA8 & & pDataI [ 3 ] = = 0x2B445C5F ) & & iOffset < = bufferSize - 4 ) <nl> / / { <nl> / / iOffset + + ; <nl> / / pDataI = ( unsigned int * ) ( pData + iOffset ) ; <nl> / / } <nl> - / / if ( iOffset < = 65536 - 4 ) <nl> + / / if ( iOffset < = bufferSize - 4 ) <nl> / / { <nl> / / iOffset + = 54 ; <nl> / / / / Codec <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> / / Info video <nl> / / iOffset = 0 ; <nl> / / pDataI = ( unsigned int * ) pData ; <nl> - / / while ( ! ( pDataI [ 0 ] = = 0xBC19EFC0 & & pDataI [ 1 ] = = 0x11CF5B4D & & pDataI [ 2 ] = = 0x8000FDA8 & & pDataI [ 3 ] = = 0x2B445C5F ) & & iOffset < = 65536 - 4 ) <nl> + / / while ( ! ( pDataI [ 0 ] = = 0xBC19EFC0 & & pDataI [ 1 ] = = 0x11CF5B4D & & pDataI [ 2 ] = = 0x8000FDA8 & & pDataI [ 3 ] = = 0x2B445C5F ) & & iOffset < = bufferSize - 4 ) <nl> / / { <nl> / / iOffset + + ; <nl> / / pDataI = ( unsigned int * ) ( pData + iOffset ) ; <nl> / / } <nl> - / / if ( iOffset < = 65536 - 4 ) <nl> + / / if ( iOffset < = bufferSize - 4 ) <nl> / / { <nl> / / iOffset + = 54 ; <nl> / / iOffset + = 15 ; <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> / / Read extended metadata <nl> iOffset = 0 ; <nl> pDataI = ( unsigned int * ) pData . get ( ) ; <nl> - while ( ! ( pDataI [ 0 ] = = 0xD2D0A440 & & pDataI [ 1 ] = = 0x11D2E307 & & pDataI [ 2 ] = = 0xA000F097 & & pDataI [ 3 ] = = 0x50A85EC9 ) & & iOffset < = 65536 - 4 ) <nl> + while ( ! ( pDataI [ 0 ] = = 0xD2D0A440 & & pDataI [ 1 ] = = 0x11D2E307 & & pDataI [ 2 ] = = 0xA000F097 & & pDataI [ 3 ] = = 0x50A85EC9 ) & & iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + + ; <nl> pDataI = ( unsigned int * ) ( pData . get ( ) + iOffset ) ; <nl> } <nl> <nl> - if ( iOffset < = 65536 - 4 ) <nl> + if ( iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + = 24 ; <nl> <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> iOffset + = 2 ; <nl> <nl> / / Size of frame value <nl> - int iValueSize = pData [ iOffset ] + ( pData [ iOffset + 1 ] * 0x100 ) ; <nl> + unsigned int iValueSize = pData [ iOffset ] + ( pData [ iOffset + 1 ] * 0x100 ) ; <nl> iOffset + = 2 ; <nl> <nl> + / / Sanity check for buffer size <nl> + if ( iValueSize + iOffset > bufferSize ) <nl> + { <nl> + CLog : : Log ( LOGWARNING , " % s ( % s ) failed due to tag being larger than % ul " , __FUNCTION__ , strFileName . c_str ( ) , bufferSize ) ; <nl> + break ; <nl> + } <nl> + <nl> / / Parse frame value and fill <nl> / / tag with extended metadata <nl> if ( iFrameType = = WMT_TYPE_STRING & & iValueSize > 0 ) <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> / / Read extended metadata 2 <nl> iOffset = 0 ; <nl> pDataI = ( unsigned int * ) pData . get ( ) ; <nl> - while ( ! ( pDataI [ 0 ] = = 0x44231C94 & & pDataI [ 1 ] = = 0x49D19498 & & pDataI [ 2 ] = = 0x131D41A1 & & pDataI [ 3 ] = = 0x5470454E ) & & iOffset < = 65536 - 4 ) <nl> + while ( ! ( pDataI [ 0 ] = = 0x44231C94 & & pDataI [ 1 ] = = 0x49D19498 & & pDataI [ 2 ] = = 0x131D41A1 & & pDataI [ 3 ] = = 0x5470454E ) & & iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + + ; <nl> pDataI = ( unsigned int * ) ( pData . get ( ) + iOffset ) ; <nl> } <nl> <nl> - if ( iOffset < = 65536 - 4 ) <nl> + if ( iOffset < = bufferSize - 4 ) <nl> { <nl> iOffset + = 24 ; <nl> <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> iOffset + = 2 ; <nl> <nl> / / Size of frame value <nl> - int iValueSize = pData [ iOffset ] + ( pData [ iOffset + 1 ] * 0x100 ) ; <nl> + unsigned int iValueSize = pData [ iOffset ] + ( pData [ iOffset + 1 ] * 0x100 ) + ( pData [ iOffset + 2 ] * 0x10000 ) ; <nl> iOffset + = 4 ; <nl> <nl> / / Get frame name <nl> bool CMusicInfoTagLoaderWMA : : Load ( const CStdString & strFileName , CMusicInfoTag & <nl> g_charsetConverter . utf16LEtoUTF8 ( utf16String , strFrameName ) ; <nl> iOffset + = iFrameNameSize ; <nl> <nl> + / / Sanity check for buffer size <nl> + if ( iValueSize + iOffset > bufferSize ) <nl> + { <nl> + CLog : : Log ( LOGWARNING , " % s ( % s ) failed due to tag being larger than % ul " , __FUNCTION__ , strFileName . c_str ( ) , bufferSize ) ; <nl> + break ; <nl> + } <nl> + <nl> / / Parse frame value and fill <nl> / / tag with extended metadata <nl> if ( iFrameType = = WMT_TYPE_STRING & & iValueSize > 0 ) <nl> void CMusicInfoTagLoaderWMA : : SetTagValueBinary ( const CStdString & strFrameName , c <nl> picture . bPictureType = ( BYTE ) pValue [ iPicOffset ] ; <nl> iPicOffset + = 1 ; <nl> <nl> - picture . dwDataLen = ( DWORD ) pValue [ iPicOffset ] + ( pValue [ iPicOffset + 1 ] * 0x100 ) ; <nl> + picture . dwDataLen = ( DWORD ) pValue [ iPicOffset ] + ( pValue [ iPicOffset + 1 ] * 0x100 ) + ( pValue [ iPicOffset + 2 ] * 0x10000 ) ; <nl> iPicOffset + = 4 ; <nl> <nl> CStdStringW wString ; <nl>
|
fixed : Ticket - partial extraction of embedded images in wma files . We now use a 256k buffer rather than a 64k buffer , and ( some ) sanity checks have been added .
|
xbmc/xbmc
|
6132f44e0c687004d06d52eacd2fc1c0144cc4e3
|
2009-10-12T09:16:56Z
|
mmm a / Documentation / RefManual / JSModuleGraph . md <nl> ppp b / Documentation / RefManual / JSModuleGraph . md <nl> examples assume <nl> @ verbinclude graph - setup <nl> <nl> @ anchor JSModuleGraphGraphConstructor <nl> - @ copydetails JSF_Graph <nl> + @ copydetails JSF_Graph_prototype_initialize <nl> <nl> @ CLEARPAGE <nl> @ anchor JSModuleGraphGraphAddEdge <nl>
|
fixed Doxygen error
|
arangodb/arangodb
|
0d535e89c71fb4f5a3496ca3e4f224f60fa7c72a
|
2013-06-07T14:41:18Z
|
mmm a / include / swift / SIL / SILInstruction . h <nl> ppp b / include / swift / SIL / SILInstruction . h <nl> class KeyPathPatternComponent { <nl> StoredProperty , <nl> GettableProperty , <nl> SettableProperty , <nl> + TupleElement , <nl> OptionalChain , <nl> OptionalForce , <nl> OptionalWrap , <nl> - TupleElement , <nl> } ; <nl> <nl> / / Description of a captured index value and its Hashable conformance for a <nl> class KeyPathPatternComponent { <nl> / / Value is the VarDecl * for StoredProperty , the SILFunction * of the <nl> / / Getter for computed properties , or the Kind for other kinds <nl> llvm : : PointerIntPair < void * , KindPackingBits , unsigned > ValueAndKind ; <nl> - llvm : : PointerIntPair < SILFunction * , 2 , <nl> + <nl> + / / Setter is the SILFunction * of the Setter for computed properties , or the <nl> + / / tuple index for tuple elements <nl> + llvm : : PointerIntPair < void * , 2 , <nl> ComputedPropertyId : : KindType > SetterAndIdKind ; <nl> ComputedPropertyId : : ValueType IdValue ; <nl> ArrayRef < Index > Indices ; <nl> class KeyPathPatternComponent { <nl> & & " not an optional component " ) ; <nl> } <nl> <nl> + / / / Constructor for tuple element . <nl> + KeyPathPatternComponent ( unsigned tupleIndex , CanType componentType ) <nl> + : ValueAndKind ( ( void * ) ( ( uintptr_t ) Kind : : TupleElement < < KindPackingBits ) , PackedStored ) , <nl> + SetterAndIdKind ( ( void * ) ( ( uintptr_t ) tupleIndex < < 2 ) , ( ComputedPropertyId : : KindType ) 0 ) , <nl> + ComponentType ( componentType ) <nl> + { <nl> + / / fixme : [ technicated ] magic < < 2 shift <nl> + } <nl> + <nl> public : <nl> KeyPathPatternComponent ( ) : ValueAndKind ( nullptr , 0 ) { } <nl> <nl> class KeyPathPatternComponent { <nl> auto packedKind = ValueAndKind . getInt ( ) ; <nl> switch ( ( PackedKind ) packedKind ) { <nl> case PackedStored : <nl> - return Kind : : StoredProperty ; <nl> + return SetterAndIdKind . getPointer ( ) <nl> + ? Kind : : TupleElement : Kind : : StoredProperty ; <nl> case PackedComputed : <nl> return SetterAndIdKind . getPointer ( ) <nl> ? Kind : : SettableProperty : Kind : : GettableProperty ; <nl> class KeyPathPatternComponent { <nl> case Kind : : TupleElement : <nl> llvm_unreachable ( " not a settable computed property " ) ; <nl> case Kind : : SettableProperty : <nl> - return SetterAndIdKind . getPointer ( ) ; <nl> + return static_cast < SILFunction * > ( SetterAndIdKind . getPointer ( ) ) ; <nl> } <nl> llvm_unreachable ( " unhandled kind " ) ; <nl> } <nl> class KeyPathPatternComponent { <nl> case Kind : : SettableProperty : <nl> llvm_unreachable ( " not a tuple element " ) ; <nl> case Kind : : TupleElement : <nl> - llvm_unreachable ( " [ technicated ] " ) ; <nl> + / / fixme : [ technicated ] magic > > 2 shift <nl> + return ( uintptr_t ) SetterAndIdKind . getPointer ( ) > > 2 ; <nl> } <nl> llvm_unreachable ( " unhandled kind " ) ; <nl> } <nl> class KeyPathPatternComponent { <nl> } <nl> return KeyPathPatternComponent ( kind , ty ) ; <nl> } <nl> + <nl> + static KeyPathPatternComponent forTupleElement ( unsigned tupleIndex , <nl> + CanType ty ) { <nl> + return KeyPathPatternComponent ( tupleIndex , ty ) ; <nl> + } <nl> <nl> void incrementRefCounts ( ) const ; <nl> void decrementRefCounts ( ) const ; <nl> mmm a / lib / IRGen / GenKeyPath . cpp <nl> ppp b / lib / IRGen / GenKeyPath . cpp <nl> <nl> # include " GenMeta . h " <nl> # include " GenProto . h " <nl> # include " GenStruct . h " <nl> + # include " GenTuple . h " <nl> # include " GenType . h " <nl> # include " GenericRequirement . h " <nl> # include " IRGenDebugInfo . h " <nl> emitKeyPathComponent ( IRGenModule & IGM , <nl> fields . addInt32 ( KeyPathComponentHeader : : forOptionalWrap ( ) . getData ( ) ) ; <nl> break ; <nl> case KeyPathPatternComponent : : Kind : : TupleElement : <nl> - llvm_unreachable ( " [ technicated ] " ) ; <nl> - break ; <nl> + if ( ! baseTy - > is < TupleType > ( ) ) { <nl> + llvm_unreachable ( " not a tuple " ) ; <nl> + } <nl> + <nl> + SILType loweredTy = IGM . getSILTypes ( ) . getLoweredType ( baseTy ) ; <nl> + <nl> + if ( auto offset = getFixedTupleElementOffset ( IGM , loweredTy , component . getTupleIndex ( ) ) ) { <nl> + auto header = KeyPathComponentHeader <nl> + : : forStructComponentWithInlineOffset ( / * isLet * / false , <nl> + offset - > getValue ( ) ) ; <nl> + <nl> + fields . addInt32 ( header . getData ( ) ) ; <nl> + break ; <nl> + } <nl> + <nl> + llvm_unreachable ( " could not get element offset " ) ; <nl> } <nl> } <nl> <nl> mmm a / lib / SIL / SILVerifier . cpp <nl> ppp b / lib / SIL / SILVerifier . cpp <nl> void verifyKeyPathComponent ( SILModule & M , <nl> break ; <nl> } <nl> case KeyPathPatternComponent : : Kind : : TupleElement : { <nl> - llvm_unreachable ( " [ technicated ] " ) ; <nl> + / / llvm_unreachable ( " [ technicated ] " ) ; <nl> break ; <nl> } <nl> } <nl> mmm a / lib / SILGen / SILGen . h <nl> ppp b / lib / SILGen / SILGen . h <nl> class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor < SILGenModule > { <nl> bool forPropertyDescriptor ) ; <nl> <nl> KeyPathPatternComponent <nl> - emitKeyPathComponentForTupleElement ( ) ; <nl> + emitKeyPathComponentForTupleElement ( unsigned tupleIndex , CanType baseTy ) ; <nl> <nl> / / / Known functions for bridging . <nl> SILDeclRef getStringToNSStringFn ( ) ; <nl> mmm a / lib / SILGen / SILGenExpr . cpp <nl> ppp b / lib / SILGen / SILGenExpr . cpp <nl> SILGenModule : : emitKeyPathComponentForDecl ( SILLocation loc , <nl> } <nl> <nl> KeyPathPatternComponent <nl> - SILGenModule : : emitKeyPathComponentForTupleElement ( ) { <nl> - llvm_unreachable ( " technicated " ) ; <nl> + SILGenModule : : emitKeyPathComponentForTupleElement ( unsigned tupleIndex , <nl> + CanType baseTy ) { <nl> + if ( ! baseTy - > is < TupleType > ( ) ) { <nl> + llvm_unreachable ( " baseTy is expected to be a TupleType " ) ; <nl> + } <nl> + <nl> + auto elementTy = baseTy - > getAs < TupleType > ( ) <nl> + - > getElementType ( tupleIndex ) <nl> + - > getCanonicalType ( ) ; <nl> + <nl> + return KeyPathPatternComponent : : forTupleElement ( tupleIndex , elementTy ) ; <nl> } <nl> <nl> RValue RValueEmitter : : visitKeyPathExpr ( KeyPathExpr * E , SGFContext C ) { <nl> RValue RValueEmitter : : visitKeyPathExpr ( KeyPathExpr * E , SGFContext C ) { <nl> } <nl> <nl> case KeyPathExpr : : Component : : Kind : : TupleElement : { <nl> + auto tupleIndex = component . getTupleIndex ( ) ; <nl> loweredComponents . push_back ( <nl> - SGF . SGM . emitKeyPathComponentForTupleElement ( ) ) ; <nl> + SGF . SGM . emitKeyPathComponentForTupleElement ( tupleIndex , <nl> + baseTy ) ) ; <nl> <nl> baseTy = loweredComponents . back ( ) . getComponentType ( ) ; <nl> <nl>
|
Very minimal POC of tuple KP feature
|
apple/swift
|
17cf1360c4249d92558a982bc04cf2966f807e70
|
2019-02-18T08:04:43Z
|
mmm a / docs / ABI / Mangling . rst <nl> ppp b / docs / ABI / Mangling . rst <nl> Entities <nl> entity - spec : : = decl - name label - list ? type ' v ' ACCESSOR / / variable <nl> entity - spec : : = decl - name type ' fp ' / / generic type parameter <nl> entity - spec : : = decl - name type ' fo ' / / enum element ( currently not used ) <nl> - entity - spec : : = identifier ' Qa ' / / associated type declaration <nl> + entity - spec : : = identifier ' Qa ' / / associated type declaration <nl> <nl> ACCESSOR : : = ' m ' / / materializeForSet <nl> ACCESSOR : : = ' s ' / / setter <nl> Types <nl> any - generic - type : : = context decl - name ' a ' / / typealias type ( used in DWARF and USRs ) <nl> <nl> any - generic - type : : = standard - substitutions <nl> - <nl> + <nl> standard - substitutions : : = ' S ' KNOWN - TYPE - KIND / / known nominal type substitution <nl> standard - substitutions : : = ' S ' NATURAL KNOWN - TYPE - KIND / / repeated known type substitutions of the same kind <nl> <nl> Types <nl> type : : = type ' Xp ' / / existential metatype without representation <nl> type : : = type ' Xm ' METATYPE - REPR / / existential metatype with representation <nl> type : : = ' Xe ' / / error or unresolved type <nl> - <nl> + <nl> bound - generic - type : : = type ' y ' ( type * ' _ ' ) * type * retroactive - conformance * ' G ' / / one type - list per nesting level of type <nl> bound - generic - type : : = substitution <nl> <nl> FUNCTION - KIND : : = ' f ' / / @ thin function type <nl> - FUNCTION - KIND : : = ' U ' / / uncurried function type ( currently not used ) <nl> + FUNCTION - KIND : : = ' U ' / / uncurried function type ( currently not used ) <nl> FUNCTION - KIND : : = ' K ' / / @ auto_closure function type ( noescape ) <nl> FUNCTION - KIND : : = ' B ' / / objc block function type <nl> FUNCTION - KIND : : = ' C ' / / C function pointer type <nl> Types <nl> associated - type : : = substitution <nl> associated - type : : = protocol ' QP ' / / self type of protocol <nl> associated - type : : = archetype identifier ' Qa ' / / associated type <nl> - <nl> + <nl> assoc - type - name : : = identifier / / associated type name without protocol <nl> assoc - type - name : : = identifier protocol ' P ' / / <nl> <nl> Property behaviors are implemented using private protocol conformances . <nl> GENERIC - PARAM - INDEX : : = INDEX / / depth = 0 , idx = N + 1 <nl> GENERIC - PARAM - INDEX : : = ' d ' INDEX INDEX / / depth = M + 1 , idx = N <nl> <nl> - LAYOUT - CONSTRAINT : : = ' N ' / / NativeRefCountedObject <nl> - LAYOUT - CONSTRAINT : : = ' R ' / / RefCountedObject <nl> - LAYOUT - CONSTRAINT : : = ' T ' / / Trivial <nl> + LAYOUT - CONSTRAINT : : = ' N ' / / NativeRefCountedObject <nl> + LAYOUT - CONSTRAINT : : = ' R ' / / RefCountedObject <nl> + LAYOUT - CONSTRAINT : : = ' T ' / / Trivial <nl> LAYOUT - CONSTRAINT : : = ' C ' / / Class <nl> - LAYOUT - CONSTRAINT : : = ' D ' / / NativeClass <nl> - LAYOUT - CONSTRAINT : : = ' E ' LAYOUT - SIZE - AND - ALIGNMENT / / Trivial of exact size <nl> - LAYOUT - CONSTRAINT : : = ' e ' LAYOUT - SIZE / / Trivial of exact size <nl> - LAYOUT - CONSTRAINT : : = ' M ' LAYOUT - SIZE - AND - ALIGNMENT / / Trivial of size at most N bits <nl> - LAYOUT - CONSTRAINT : : = ' m ' LAYOUT - SIZE / / Trivial of size at most N bits <nl> + LAYOUT - CONSTRAINT : : = ' D ' / / NativeClass <nl> + LAYOUT - CONSTRAINT : : = ' E ' LAYOUT - SIZE - AND - ALIGNMENT / / Trivial of exact size <nl> + LAYOUT - CONSTRAINT : : = ' e ' LAYOUT - SIZE / / Trivial of exact size <nl> + LAYOUT - CONSTRAINT : : = ' M ' LAYOUT - SIZE - AND - ALIGNMENT / / Trivial of size at most N bits <nl> + LAYOUT - CONSTRAINT : : = ' m ' LAYOUT - SIZE / / Trivial of size at most N bits <nl> LAYOUT - CONSTRAINT : : = ' U ' / / Unknown layout <nl> <nl> LAYOUT - SIZE : : = INDEX / / Size only <nl>
|
[ docs ] Replace all spaces with U + 0020 in " docs / ABI / Mangling . rst " .
|
apple/swift
|
22e023f15972c9c2ceb9b0e19ae0514db9a43303
|
2018-08-28T05:43:14Z
|
mmm a / tensorflow / core / kernels / adjust_hue_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / adjust_hue_op_gpu . cu . cc <nl> void AdjustHueGPU < T > : : operator ( ) ( GPUDevice * device , <nl> const T * const input , const float * const delta , <nl> T * const output ) { <nl> const auto stream = device - > stream ( ) ; <nl> - const CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( number_of_elements , * device ) ; <nl> + const GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( number_of_elements , * device ) ; <nl> const int threads_per_block = config . thread_per_block ; <nl> const int block_count = <nl> ( number_of_elements + threads_per_block - 1 ) / threads_per_block ; <nl> mmm a / tensorflow / core / kernels / adjust_saturation_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / adjust_saturation_op_gpu . cu . cc <nl> void AdjustSaturationGPU < T > : : operator ( ) ( GPUDevice * device , <nl> const float * const scale , <nl> T * const output ) { <nl> const auto stream = device - > stream ( ) ; <nl> - const CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( number_of_elements , * device ) ; <nl> + const GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( number_of_elements , * device ) ; <nl> const int threads_per_block = config . thread_per_block ; <nl> const int block_count = <nl> ( number_of_elements + threads_per_block - 1 ) / threads_per_block ; <nl> mmm a / tensorflow / core / kernels / avgpooling_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / avgpooling_op_gpu . cu . cc <nl> bool RunAvePoolBackwardNHWC ( const T * const top_diff , const int num , <nl> const int pad_l , T * const bottom_diff , <nl> const GPUDevice & d ) { <nl> int x_size = num * height * width * channels ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( x_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( x_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> AvePoolBackwardNHWC < T > , config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , top_diff , num , height , width , <nl> mmm a / tensorflow / core / kernels / bias_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / bias_op_gpu . cu . cc <nl> void BiasGPU < T > : : compute ( const GPUDevice & d , const T * input , const T * bias , <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> if ( data_format = = FORMAT_NHWC ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( BiasNHWCKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , <nl> void BiasGradGPU < T > : : compute ( const GPUDevice & d , const T * output_backprop , <nl> return ; <nl> } <nl> static constexpr int32 kWarpSize = 32 ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> <nl> const int max_shared_memory_size = d . sharedMemPerBlock ( ) / 2 ; <nl> int32 shared_memory_size = 0 ; <nl> mmm a / tensorflow / core / kernels / bucketize_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / bucketize_op_gpu . cu . cc <nl> struct BucketizeFunctor < GPUDevice , T > { <nl> } <nl> TF_RETURN_IF_ERROR ( boundaries_array . Finalize ( ) ) ; <nl> <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( input . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( input . size ( ) , d ) ; <nl> int32 shared_mem_size = sizeof ( float ) * boundaries_vector . size ( ) ; <nl> const int32 kMaxSharedMemBytes = 16384 ; <nl> if ( shared_mem_size < d . sharedMemPerBlock ( ) & & <nl> mmm a / tensorflow / core / kernels / compare_and_bitpack_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / compare_and_bitpack_op_gpu . cu . cc <nl> __global__ void CompareAndBitpackKernel < double > ( const int size , <nl> TTypes < uint8 > : : Matrix output ) { \ <nl> const GPUDevice & d = c - > eigen_device < GPUDevice > ( ) ; \ <nl> int64 total_count = output . size ( ) ; \ <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; \ <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; \ <nl> \ <nl> TF_CHECK_OK ( CudaLaunchKernel ( CompareAndBitpackKernel < T > , \ <nl> config . block_count , config . thread_per_block , \ <nl> mmm a / tensorflow / core / kernels / conv_2d_gpu . h <nl> ppp b / tensorflow / core / kernels / conv_2d_gpu . h <nl> struct TransformFilter < GPUDevice , T , int , NDIMS > { <nl> } <nl> combined_dims [ 1 ] = in . dimension ( NDIMS - 2 ) ; / / input filters <nl> combined_dims [ 2 ] = in . dimension ( NDIMS - 1 ) ; / / output filters <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out . size ( ) , d ) ; <nl> <nl> if ( dst_filter_format = = FORMAT_OIHW ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( ShuffleInTensor3Simple < T , 2 , 1 , 0 > , <nl> struct ReverseTransformFilter < GPUDevice , T , NDIMS > { <nl> for ( int i = 3 ; i < NDIMS ; + + i ) { <nl> combined_dims [ 2 ] * = in . dimension ( i ) ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out . size ( ) , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( ShuffleInTensor3Simple < T , 2 , 1 , 0 > , <nl> config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , <nl> struct PadInput < GPUDevice , T , int , NDIMS > { <nl> const std : : array < int , NDIMS - 2 > & padding_right , <nl> typename TTypes < T , NDIMS , int > : : Tensor out , <nl> TensorFormat format ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out . size ( ) , d ) ; <nl> Dimension < NDIMS > input_dims ; <nl> for ( int i = 0 ; i < NDIMS ; + + i ) { <nl> input_dims [ i ] = in . dimension ( i ) ; <nl> void RunSwapDimension1And2InTensor3 ( const GPUDevice & d , const T * input , <nl> d , input , input_dims , output , kMinDimensionToUseTiles ) ; <nl> } else { <nl> int total_element_count = input_dims [ 0 ] * input_dims [ 1 ] * input_dims [ 2 ] ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_element_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_element_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( ShuffleInTensor3Simple < T , 0 , 2 , 1 , conjugate > , <nl> config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , input , <nl> struct SwapDimension0And2InTensor3 < GPUDevice , T , conjugate > { <nl> static_cast < int > ( combined_dims [ 1 ] ) , <nl> static_cast < int > ( combined_dims [ 2 ] ) } ; <nl> size_t total_size = combined_dims [ 0 ] * combined_dims [ 1 ] * combined_dims [ 2 ] ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( ShuffleInTensor3Simple < T , 2 , 1 , 0 , conjugate > , <nl> config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , in , <nl> mmm a / tensorflow / core / kernels / crop_and_resize_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / crop_and_resize_op_gpu . cu . cc <nl> struct CropAndResize < GPUDevice , T > { <nl> } <nl> <nl> if ( total_count > 0 ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> CropAndResizeKernel < T > , config . block_count , config . thread_per_block , <nl> 0 , d . stream ( ) , config . virtual_thread_count , image . data ( ) , <nl> struct CropAndResizeBackpropImage < GPUDevice , T > { <nl> const GPUDevice & d = context - > eigen_device < GPUDevice > ( ) ; <nl> <nl> int total_count ; <nl> - CudaLaunchConfig config ; <nl> + GpuLaunchConfig config ; <nl> <nl> / / Initialize grads_image with all zeros . <nl> total_count = batch * image_height * image_width * depth ; <nl> if ( total_count > 0 ) { <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> SetZero < T > , config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , grads_image . data ( ) ) ) ; <nl> struct CropAndResizeBackpropImage < GPUDevice , T > { <nl> / / Accumulate . <nl> total_count = num_boxes * crop_height * crop_width * depth ; <nl> if ( total_count > 0 ) { <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> CropAndResizeBackpropImageKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , config . virtual_thread_count , <nl> struct CropAndResizeBackpropBoxes < GPUDevice , T > { <nl> const int depth = grads . dimension ( 3 ) ; <nl> <nl> int total_count ; <nl> - CudaLaunchConfig config ; <nl> + GpuLaunchConfig config ; <nl> <nl> / / Initialize grads_boxes with all zeros . <nl> total_count = num_boxes * 4 ; <nl> if ( total_count > 0 ) { <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> SetZero < float > , config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , grads_boxes . data ( ) ) ) ; <nl> struct CropAndResizeBackpropBoxes < GPUDevice , T > { <nl> / / Accumulate . <nl> total_count = num_boxes * crop_height * crop_width * depth ; <nl> if ( total_count > 0 ) { <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> CropAndResizeBackpropBoxesKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , config . virtual_thread_count , <nl> mmm a / tensorflow / core / kernels / cwise_op_clip_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_clip_gpu . cu . cc <nl> struct UnaryClipOp < GPUDevice , T > { <nl> typename TTypes < T > : : ConstFlat & in1_flat , <nl> typename TTypes < T > : : ConstFlat & in2_flat , <nl> typename TTypes < T > : : Flat & out_flat ) const { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( in0_flat . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( in0_flat . size ( ) , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> UnaryClipCustomKernel < T > , config . block_count , config . thread_per_block , <nl> struct BinaryRightClipOp < GPUDevice , T > { <nl> typename TTypes < T > : : ConstFlat & in1_flat , <nl> typename TTypes < T > : : ConstFlat & in2_flat , <nl> typename TTypes < T > : : Flat & out_flat ) const { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( in0_flat . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( in0_flat . size ( ) , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> BinaryRightClipCustomKernel < T > , config . block_count , <nl> struct BinaryLeftClipOp < GPUDevice , T > { <nl> typename TTypes < T > : : ConstFlat & in1_flat , <nl> typename TTypes < T > : : ConstFlat & in2_flat , <nl> typename TTypes < T > : : Flat & out_flat ) const { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( in0_flat . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( in0_flat . size ( ) , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> BinaryLeftClipCustomKernel < T > , config . block_count , <nl> mmm a / tensorflow / core / kernels / depthtospace_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / depthtospace_op_gpu . cu . cc <nl> struct DepthToSpaceOpFunctor < GPUDevice , T , FORMAT_NHWC > { <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> D2S_NHWC < T > , config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> config . virtual_thread_count , input . data ( ) , block_size , batch_size , <nl> struct DepthToSpaceOpFunctor < GPUDevice , T , FORMAT_NCHW > { <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> switch ( block_size ) { <nl> case 2 : <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> struct DepthToSpaceOpFunctor < GPUDevice , T , FORMAT_NCHW > { <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - auto config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + auto config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> D2S_NCHW < T > , config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> config . virtual_thread_count , input . data ( ) , block_size , input_width , <nl> mmm a / tensorflow / core / kernels / depthwise_conv_op_gpu . h <nl> ppp b / tensorflow / core / kernels / depthwise_conv_op_gpu . h <nl> Status LaunchDepthwiseConv2dGPUSmall ( OpKernelContext * ctx , <nl> kBlockDepth * ( tile_pixels + filter_pixels ) * sizeof ( S ) ; <nl> const int num_outputs = args . out_rows * args . out_cols * block_count ; <nl> auto device = ctx - > eigen_gpu_device ( ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> + GpuLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> num_outputs , device , kernel , shared_memory_size , <nl> block_dim . x * block_dim . y * block_dim . z ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( kernel , config . block_count , block_dim , <nl> Status LaunchDepthwiseConv2dGPU ( OpKernelContext * ctx , const DepthwiseArgs & args , <nl> const int num_outputs = <nl> args . batch * args . out_rows * args . out_cols * args . out_depth ; <nl> auto device = ctx - > eigen_gpu_device ( ) ; <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( num_outputs , device , kernel , 0 , 0 ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( num_outputs , device , kernel , 0 , 0 ) ; <nl> / / The compile - time constant version runs faster with a single block . <nl> const int max_block_count = kKnownFilterWidth < 0 | | kKnownFilterHeight < 0 | | <nl> kKnownDepthMultiplier < 0 <nl> Status LaunchDepthwiseConv2dBackpropInputGPU ( OpKernelContext * ctx , <nl> const int num_in_backprop = <nl> args . batch * args . in_rows * args . in_cols * args . in_depth ; <nl> auto device = ctx - > eigen_gpu_device ( ) ; <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( num_in_backprop , device , kernel , 0 , 0 ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( num_in_backprop , device , kernel , 0 , 0 ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> kernel , config . block_count , config . thread_per_block , 0 , device . stream ( ) , <nl> args , out_backprop , filter , in_backprop , num_in_backprop ) ) ; <nl> Status TryLaunchDepthwiseConv2dBackpropFilterGPUSmall ( <nl> " is not supported " ) ; <nl> } <nl> const int num_out_backprop = args . out_rows * args . out_cols * block_count ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> + GpuLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> num_out_backprop , device , kernel , shared_memory_size , <nl> block_dim . x * block_dim . y * block_dim . z ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( kernel , config . block_count , block_dim , <nl> Status LaunchDepthwiseConv2dBackpropFilterGPU ( <nl> const int num_out_backprop = <nl> args . batch * args . out_rows * args . out_cols * args . out_depth ; <nl> auto device = ctx - > eigen_gpu_device ( ) ; <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( num_out_backprop , device , kernel , 0 , 0 ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( num_out_backprop , device , kernel , 0 , 0 ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> kernel , config . block_count , config . thread_per_block , 0 , device . stream ( ) , <nl> args , out_backprop , input , filter_backprop , num_out_backprop ) ) ; <nl> mmm a / tensorflow / core / kernels / determinant_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / determinant_op_gpu . cu . cc <nl> struct DeterminantFromPivotedLUFunctor < GPUDevice , Scalar > { <nl> int * info ) { <nl> const int64 num_matrices = output . size ( ) ; <nl> const int64 n = lu_factor . dimension ( 2 ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( num_matrices , device ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( num_matrices , device ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> DeterminantFromPivotedLUKernel < Scalar , / * compute_log_abs_det = * / false > , <nl> struct LogDeterminantFromPivotedLUFunctor < GPUDevice , Scalar > { <nl> typename TTypes < Scalar , 1 > : : Tensor log_abs_det ) { <nl> const int64 num_matrices = sign . size ( ) ; <nl> const int64 n = lu_factor . dimension ( 2 ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( num_matrices , device ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( num_matrices , device ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> DeterminantFromPivotedLUKernel < Scalar , / * compute_log_abs_det = * / true > , <nl> config . block_count , config . thread_per_block , 0 , device . stream ( ) , <nl> mmm a / tensorflow / core / kernels / diag_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / diag_op_gpu . cu . cc <nl> struct DiagFunctor < GPUDevice , T > { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / CudaLaunchConfig uses an int for virtual_thread_count , <nl> + / / GpuLaunchConfig uses an int for virtual_thread_count , <nl> / / so this may overflow for ` size * size ` in extreme cases , <nl> / / here is checking the multiplication overflow for integer . <nl> if ( size & & ( int ( size * size ) / size ) ! = size ) { <nl> struct DiagFunctor < GPUDevice , T > { <nl> <nl> / / Launch the GPU kernel . <nl> const GPUDevice & device = context - > eigen_device < GPUDevice > ( ) ; <nl> - CudaLaunchConfig diag_config = <nl> - GetCudaLaunchConfig ( virtual_thread_count , device ) ; <nl> + GpuLaunchConfig diag_config = <nl> + GetGpuLaunchConfig ( virtual_thread_count , device ) ; <nl> TF_CHECK_OK ( <nl> CudaLaunchKernel ( DiagCudaKernel < T > , diag_config . block_count , <nl> diag_config . thread_per_block , 0 , device . stream ( ) , <nl> struct DiagPartFunctor < GPUDevice , T > { <nl> const GPUDevice & device = context - > eigen_device < GPUDevice > ( ) ; <nl> <nl> / / Extract the diagonal elements . <nl> - CudaLaunchConfig diag_config = GetCudaLaunchConfig ( size , device ) ; <nl> + GpuLaunchConfig diag_config = GetCudaLaunchConfig ( size , device ) ; <nl> TF_CHECK_OK ( <nl> CudaLaunchKernel ( DiagPartCudaKernel < T > , diag_config . block_count , <nl> diag_config . thread_per_block , 0 , device . stream ( ) , <nl> mmm a / tensorflow / core / kernels / dilation_ops_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / dilation_ops_gpu . cu . cc <nl> struct Dilation < GPUDevice , T > { <nl> const int output_cols = output . dimension ( 2 ) ; <nl> <nl> const int total_count = batch * output_rows * output_cols * depth ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> DilationKernel < T > , config . block_count , config . thread_per_block , 0 , <nl> struct DilationBackpropInput < GPUDevice , T > { <nl> const int output_cols = out_backprop . dimension ( 2 ) ; <nl> <nl> int total_count ; <nl> - CudaLaunchConfig config ; <nl> + GpuLaunchConfig config ; <nl> <nl> / / Initialize in_backprop with all zeros . <nl> total_count = batch * input_rows * input_cols * depth ; <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( SetZero < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , <nl> total_count , in_backprop . data ( ) ) ) ; <nl> <nl> / / Accumulate . <nl> total_count = batch * output_rows * output_cols * depth ; <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> DilationBackpropInputKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , config . virtual_thread_count , <nl> struct DilationBackpropFilter < GPUDevice , T > { <nl> const int output_cols = out_backprop . dimension ( 2 ) ; <nl> <nl> int total_count ; <nl> - CudaLaunchConfig config ; <nl> + GpuLaunchConfig config ; <nl> <nl> / / Initialize filter_backprop with all zeros . <nl> total_count = filter_rows * filter_cols * depth ; <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( SetZero < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , <nl> total_count , filter_backprop . data ( ) ) ) ; <nl> <nl> / / Accumulate . <nl> total_count = batch * output_rows * output_cols * depth ; <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> DilationBackpropFilterKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , config . virtual_thread_count , <nl> mmm a / tensorflow / core / kernels / dynamic_partition_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / dynamic_partition_op_gpu . cu . cc <nl> __global__ void MoveValuesKernel ( const int32 * keys , const int32 * values , <nl> template < typename T > <nl> void RangeInit ( const GPUDevice & d , const T start , const T delta , <nl> const int32 size , typename TTypes < T > : : Flat out ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( RangeInitKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , start , <nl> delta , size , out . data ( ) ) ) ; <nl> void MoveValues ( const GPUDevice & d , int32 * keys , int32 * values , int32 * num_runs , <nl> / / This is valid for correct inputs , because then out_size > = * num_runs . <nl> / / For wrong inputs , we may have out_size < * num_runs . In this case we will <nl> / / only handle the first out_size values . <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( MoveValuesKernel , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , keys , <nl> values , num_runs , out_size , out ) ) ; <nl> template < typename T > <nl> void CallGatherKernel ( const GPUDevice & d , const T * params , const int32 * indices , <nl> T * out , int64 gather_dim_size , int64 indices_size , <nl> int64 slice_size , int64 out_size ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> GatherOpKernel < T , int32 , true > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , params , indices , out , <nl> mmm a / tensorflow / core / kernels / dynamic_stitch_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / dynamic_stitch_op_gpu . cu . cc <nl> void DynamicStitchGPUImpl ( const Eigen : : GpuDevice & gpu_device , <nl> const GpuDeviceArrayStruct < const T * > & input_ptrs , <nl> T * output ) { <nl> const int32 output_size = first_dim_size * slice_size ; <nl> - auto config = GetCudaLaunchConfig ( output_size , gpu_device ) ; <nl> + auto config = GetGpuLaunchConfig ( output_size , gpu_device ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( DynamicStitchKernel < T > , config . block_count , <nl> config . thread_per_block , 0 , gpu_device . stream ( ) , <nl> mmm a / tensorflow / core / kernels / eye_functor_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / eye_functor_gpu . cu . cc <nl> struct EyeFunctor < GPUDevice , Scalar > { <nl> const int batch_size = matrix_batch . dimension ( 0 ) ; <nl> const int m = matrix_batch . dimension ( 1 ) ; <nl> const int n = matrix_batch . dimension ( 2 ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( EyeKernel < Scalar > , config . block_count , <nl> config . thread_per_block , 0 , device . stream ( ) , <nl> config . virtual_thread_count , batch_size , m , n , <nl> mmm a / tensorflow / core / kernels / fused_batch_norm_op . cu . cc <nl> ppp b / tensorflow / core / kernels / fused_batch_norm_op . cu . cc <nl> template < class T > <nl> void VarianceToInvVariance < T > : : operator ( ) ( const Eigen : : GpuDevice & d , <nl> const T * variance , double epsilon , <nl> int channels , T * inv_variance ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( channels , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( channels , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( VarianceToInvVarianceKernel < T > , <nl> config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , <nl> template < class T > <nl> void InvVarianceToVariance < T > : : operator ( ) ( const Eigen : : GpuDevice & d , <nl> double epsilon , int sample_size , <nl> int channels , T * variance ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( channels , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( channels , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( InvVarianceToVarianceKernel < T > , <nl> config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , config . virtual_thread_count , epsilon , <nl> mmm a / tensorflow / core / kernels / gather_functor_gpu . cu . h <nl> ppp b / tensorflow / core / kernels / gather_functor_gpu . cu . h <nl> struct GatherFunctor < GPUDevice , T , Index > { <nl> const int64 indices_size = indices . size ( ) ; <nl> const int64 slice_size = params . dimension ( 2 ) ; <nl> <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> if ( is_axis_zero ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> GatherOpKernel < T , Index , true > , config . block_count , <nl> mmm a / tensorflow / core / kernels / gather_nd_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / gather_nd_op_gpu . cu . cc <nl> struct GatherNdSlice < GPUDevice , T , Index , IXDIM > { <nl> batch_indices [ i - 1 ] = Tparams . dimension ( i - 1 ) ; <nl> batch_strides [ i - 1 ] = batch_strides [ i ] * Tparams . dimension ( i ) ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( out_size , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( GatherSliceOpKernel < T , Index , IXDIM > , <nl> config . block_count , config . thread_per_block , 0 , <nl> mmm a / tensorflow / core / kernels / inplace_ops_functor_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / inplace_ops_functor_gpu . cu . cc <nl> template < typename T > <nl> Status DoParallelConcatUpdate ( const Device & d , const Tensor & value , int32 loc , <nl> Tensor * output ) { <nl> const int64 nelem = value . NumElements ( ) ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> auto Toutput = output - > flat_outer_dims < T > ( ) ; <nl> const int64 nrows = Toutput . dimension ( 0 ) ; <nl> const int64 ncols = Toutput . dimension ( 1 ) ; <nl> template < typename T > <nl> void DoInplaceOp ( const Device & d , InplaceOpType op , const Tensor & i , <nl> const Tensor & v , Tensor * y ) { <nl> const int64 nelem = v . NumElements ( ) ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> auto Ty = y - > flat_outer_dims < T > ( ) ; <nl> const int64 nrows = Ty . dimension ( 0 ) ; <nl> const int64 ncols = Ty . dimension ( 1 ) ; <nl> template < bool > <nl> void DoInplaceOp ( const Device & d , InplaceOpType op , const Tensor & i , <nl> const Tensor & v , Tensor * y ) { <nl> const int64 nelem = v . NumElements ( ) ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> auto Ty = y - > flat_outer_dims < bool > ( ) ; <nl> const int64 nrows = Ty . dimension ( 0 ) ; <nl> const int64 ncols = Ty . dimension ( 1 ) ; <nl> mmm a / tensorflow / core / kernels / lu_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / lu_op_gpu . cu . cc <nl> __device__ void ComputePermutationFromTranspositions ( <nl> / / transpositions . <nl> template < typename Scalar > <nl> __global__ void ComputePermutationFromTranspositionsKernel ( <nl> - CudaLaunchConfig config , const int64 num_rows , const int * all_pivots , <nl> + GpuLaunchConfig config , const int64 num_rows , const int * all_pivots , <nl> Scalar * all_permutation_indices ) { <nl> / / We only parallelize over batches here . Performance is not critical , <nl> / / since this cheap O ( num_rows ) kernel always follows an O ( num_rows ^ 3 ) <nl> class LuOpGpu : public AsyncOpKernel { <nl> int * pivots_ptr = pivots . flat < int > ( ) . data ( ) ; <nl> Tidx * permutation_indices_ptr = <nl> permutation_indices - > template flat < Tidx > ( ) . data ( ) ; <nl> - CudaLaunchConfig cfgPivots = GetCudaLaunchConfig ( batch_size , device ) ; <nl> + GpuLaunchConfig cfgPivots = GetCudaLaunchConfig ( batch_size , device ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> ComputePermutationFromTranspositionsKernel < Tidx > , cfgPivots . block_count , <nl> cfgPivots . thread_per_block , 0 , device . stream ( ) , cfgPivots , num_rows , <nl> mmm a / tensorflow / core / kernels / matrix_band_part_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / matrix_band_part_op_gpu . cu . cc <nl> struct MatrixBandPartFunctor < GPUDevice , Scalar > { <nl> const int batch_size = input . dimension ( 0 ) ; <nl> const int m = input . dimension ( 1 ) ; <nl> const int n = input . dimension ( 2 ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( MatrixBandPartKernel < Scalar > , <nl> config . block_count , config . thread_per_block , 0 , <nl> device . stream ( ) , config . virtual_thread_count , <nl> mmm a / tensorflow / core / kernels / matrix_set_diag_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / matrix_set_diag_op_gpu . cu . cc <nl> struct MatrixSetDiag < GPUDevice , Scalar > { <nl> CHECK_EQ ( diag . dimension ( 1 ) , minsize ) ; <nl> if ( batch_size = = 0 | | minsize = = 0 ) return ; <nl> if ( input . data ( ) = = output . data ( ) ) { <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( batch_size * minsize , device ) ; <nl> + GpuLaunchConfig config = GetGpuLaunchConfig ( batch_size * minsize , device ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( MatrixSetDiagKernel < Scalar > , <nl> config . block_count , config . thread_per_block , <nl> 0 , device . stream ( ) , <nl> config . virtual_thread_count , m , n , minsize , <nl> diag . data ( ) , output . data ( ) ) ) ; <nl> } else { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( MatrixCopyInputAndSetDiagKernel < Scalar > , <nl> config . block_count , config . thread_per_block , <nl> 0 , device . stream ( ) , <nl> mmm a / tensorflow / core / kernels / maxpooling_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / maxpooling_op_gpu . cu . cc <nl> bool MaxPoolGradBackwardNoMask < T > : : operator ( ) ( <nl> const Eigen : : GpuDevice & d ) { <nl> const int num_kernels = batch * channels * pooled_height * pooled_width ; <nl> if ( num_kernels = = 0 ) return true ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( num_kernels , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( num_kernels , d ) ; <nl> <nl> if ( data_format = = FORMAT_NHWC ) { <nl> TF_CHECK_OK ( <nl> bool MaxPoolGradBackwardWithArgmax < T > : : operator ( ) ( <nl> T * bottom_diff , const Eigen : : GpuDevice & d , <nl> const bool include_batch_in_index ) { <nl> if ( input_size = = 0 ) return true ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( output_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( output_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> MaxPoolGradBackward < T > , config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , output_size , top_diff , mask , top_offset , bottom_offset , <nl> mmm a / tensorflow / core / kernels / multinomial_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / multinomial_op_gpu . cu . cc <nl> struct MultinomialFunctor < GPUDevice , T , OutputType > { <nl> output . device ( d ) = output . constant ( 0LL ) ; <nl> <nl> const int32 work_items = batch_size * num_samples * num_classes ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( work_items , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( work_items , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> MultinomialKernel < OutputType > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , config . virtual_thread_count , <nl> mmm a / tensorflow / core / kernels / parameterized_truncated_normal_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / parameterized_truncated_normal_op_gpu . cu . cc <nl> struct TruncatedNormalFunctor < GPUDevice , T > { <nl> typename TTypes < T > : : ConstFlat maxvals , <nl> const random : : PhiloxRandom & gen , <nl> typename TTypes < T > : : Flat output ) { <nl> - const auto config = GetCudaLaunchConfig ( num_elements , d ) ; <nl> + const auto config = GetGpuLaunchConfig ( num_elements , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> TruncatedNormalKernel < T > , config . block_count , config . thread_per_block , <nl> mmm a / tensorflow / core / kernels / pooling_ops_3d_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / pooling_ops_3d_gpu . cu . cc <nl> bool MaxPool3dGradBackward < T > : : operator ( ) ( <nl> const T * top_diff , T * bottom_diff , const Eigen : : GpuDevice & d ) { <nl> int num_kernels = <nl> batch * channels * pooled_plane * pooled_height * pooled_width ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( num_kernels , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( num_kernels , d ) ; <nl> if ( data_format = = FORMAT_NHWC ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> MaxPoolGradBackwardNoMaskNDHWC < T > , config . block_count , <nl> mmm a / tensorflow / core / kernels / population_count_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / population_count_op_gpu . cu . cc <nl> __global__ void PopulationCountKernel < int64 > ( const int size , const int64 * input , <nl> TTypes < uint8 > : : Flat output ) { \ <nl> const GPUDevice & d = c - > eigen_device < GPUDevice > ( ) ; \ <nl> int64 total_count = input . size ( ) ; \ <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; \ <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; \ <nl> TF_CHECK_OK ( CudaLaunchKernel ( PopulationCountKernel < T > , config . block_count , \ <nl> config . thread_per_block , 0 , d . stream ( ) , \ <nl> total_count , input . data ( ) , output . data ( ) ) ) ; \ <nl> mmm a / tensorflow / core / kernels / relu_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / relu_op_gpu . cu . cc <nl> struct ReluGrad < Device , Eigen : : half > { <nl> if ( count = = 0 ) return ; <nl> int32 half2_count = Eigen : : divup ( count , 2 ) ; <nl> constexpr int32 kThreadInBlock = 512 ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> + GpuLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> half2_count , d , ReluGradHalfKernel , 0 , kThreadInBlock ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> ReluGradHalfKernel , config . block_count , config . thread_per_block , 0 , <nl> struct Relu < Device , qint8 > { <nl> <nl> int32 vect_count = Eigen : : divup ( count , 4 ) ; <nl> constexpr int32 kThreadInBlock = 512 ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> + GpuLaunchConfig config = GetCudaLaunchConfigFixedBlockSize ( <nl> vect_count , d , Relu_int8x4_kernel , 0 , kThreadInBlock ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> Relu_int8x4_kernel , config . block_count , config . thread_per_block , 0 , <nl> mmm a / tensorflow / core / kernels / resize_bilinear_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / resize_bilinear_op_gpu . cu . cc <nl> struct ResizeBilinear < GPUDevice , T > { <nl> const int total_count = batch * out_height * out_width * channels ; <nl> if ( total_count = = 0 ) return ; <nl> <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> if ( half_pixel_centers ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> ResizeBilinearKernel < T > , config . block_count , config . thread_per_block , <nl> struct ResizeBilinearGrad < GPUDevice , T > { <nl> const int resized_width = input_grad . dimension ( 2 ) ; <nl> <nl> int total_count ; <nl> - CudaLaunchConfig config ; <nl> + GpuLaunchConfig config ; <nl> <nl> / / Initialize output_grad with all zeros . <nl> total_count = batch * original_height * original_width * channels ; <nl> if ( total_count = = 0 ) return ; <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> SetZero < T > , config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> config . virtual_thread_count , output_grad . data ( ) ) ) ; <nl> <nl> / / Accumulate . <nl> total_count = batch * resized_height * resized_width * channels ; <nl> - config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_count , d ) ; <nl> if ( half_pixel_centers ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> ResizeBilinearGradKernel < T > , config . block_count , <nl> mmm a / tensorflow / core / kernels / resize_nearest_neighbor_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / resize_nearest_neighbor_op_gpu . cu . cc <nl> struct ResizeNearestNeighbor < GPUDevice , T , half_pixel_centers , align_corners > { <nl> const int output_size = batch_size * out_height * out_width * channels ; <nl> if ( output_size = = 0 ) return true ; <nl> <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( output_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( output_size , d ) ; <nl> if ( half_pixel_centers ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> ResizeNearestNeighborNHWC < T > , config . block_count , <nl> struct ResizeNearestNeighborGrad < GPUDevice , T , half_pixel_centers , <nl> <nl> const int output_size = batch_size * channels * out_height * out_width ; <nl> <nl> - CudaLaunchConfig output_config = GetCudaLaunchConfig ( output_size , d ) ; <nl> + GpuLaunchConfig output_config = GetCudaLaunchConfig ( output_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( SetZero < T > , output_config . block_count , <nl> output_config . thread_per_block , 0 , d . stream ( ) , <nl> output_size , output . data ( ) ) ) ; <nl> struct ResizeNearestNeighborGrad < GPUDevice , T , half_pixel_centers , <nl> const int input_size = batch_size * channels * in_height * in_width ; <nl> if ( input_size = = 0 ) return true ; <nl> <nl> - CudaLaunchConfig input_config = GetCudaLaunchConfig ( input_size , d ) ; <nl> + GpuLaunchConfig input_config = GetCudaLaunchConfig ( input_size , d ) ; <nl> if ( half_pixel_centers ) { <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> ResizeNearestNeighborBackwardNHWC < T > , input_config . block_count , <nl> mmm a / tensorflow / core / kernels / scatter_functor_gpu . cu . h <nl> ppp b / tensorflow / core / kernels / scatter_functor_gpu . cu . h <nl> struct ScatterFunctor < GPUDevice , T , Index , op > { <nl> const Index first_dim_size = params . dimension ( 0 ) ; <nl> const Index indices_size = indices . size ( ) ; <nl> const Index updates_size = updates . size ( ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( updates_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( updates_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> scatter_op_gpu : : ScatterOpCustomKernel < T , Index , op > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , params . data ( ) , updates . data ( ) , <nl> struct ScatterScalarFunctor < GPUDevice , T , Index , op > { <nl> const Index first_dim_size = params . dimension ( 0 ) ; <nl> const Index indices_size = indices . size ( ) ; <nl> const Index synthesized_updates_size = indices_size * params . dimension ( 1 ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( synthesized_updates_size , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( synthesized_updates_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> scatter_op_gpu : : ScatterScalarOpCustomKernel < T , Index , op > , <nl> config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> mmm a / tensorflow / core / kernels / scatter_nd_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / scatter_nd_op_gpu . cu . cc <nl> struct ScatterNdFunctor < GPUDevice , T , Index , op , IXDIM > { <nl> } <nl> } <nl> <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( Toutput . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( Toutput . size ( ) , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( ScatterNdOpKernel < T , Index , op , IXDIM > , <nl> config . block_count , config . thread_per_block , 0 , <nl> mmm a / tensorflow / core / kernels / searchsorted_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / searchsorted_op_gpu . cu . cc <nl> struct UpperBoundFunctor < GPUDevice , T , OutType > { <nl> int batch_size , int num_inputs , int num_values , <nl> typename TTypes < OutType , 1 > : : Tensor * output ) { <nl> const cudaStream_t & stream = GetCudaStream ( context ) ; <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( values . size ( ) , context - > eigen_gpu_device ( ) ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( values . size ( ) , context - > eigen_gpu_device ( ) ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> UpperBoundKernel < T , OutType > , config . block_count , <nl> struct LowerBoundFunctor < GPUDevice , T , OutType > { <nl> int batch_size , int num_inputs , int num_values , <nl> typename TTypes < OutType , 1 > : : Tensor * output ) { <nl> const cudaStream_t & stream = GetCudaStream ( context ) ; <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( values . size ( ) , context - > eigen_gpu_device ( ) ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( values . size ( ) , context - > eigen_gpu_device ( ) ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> LowerBoundKernel < T , OutType > , config . block_count , <nl> mmm a / tensorflow / core / kernels / segment_reduction_ops_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / segment_reduction_ops_gpu . cu . cc <nl> void SegmentSumFunctor < T , Index > : : operator ( ) ( <nl> return ; <nl> } <nl> / / Set ' output ' to zeros . <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( output . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( output . size ( ) , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( SetZero < T > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , <nl> output . size ( ) , output . data ( ) ) ) ; <nl> void SegmentSumFunctor < T , Index > : : operator ( ) ( <nl> const Index total_stripe_count = <nl> input_inner_dim_size * input_outer_dim_num_stripe ; <nl> <nl> - config = GetCudaLaunchConfig ( total_stripe_count , d ) ; <nl> + config = GetGpuLaunchConfig ( total_stripe_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> SortedSegmentSumCustomKernel < T , Index , OuterDimTileSize > , <nl> config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> struct UnsortedSegmentFunctor < GPUDevice , T , Index , InitialValueF , ReductionF > { <nl> } <nl> / / Set ' output ' to initial value . <nl> GPUDevice d = ctx - > template eigen_device < GPUDevice > ( ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( output . size ( ) , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( output . size ( ) , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> SetToValue < T > , config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , output . size ( ) , output . data ( ) , InitialValueF ( ) ( ) ) ) ; <nl> struct UnsortedSegmentFunctor < GPUDevice , T , Index , InitialValueF , ReductionF > { <nl> / / * ) ' input_outer_dim_size ' is the total number of segments to process . <nl> const Index input_outer_dim_size = segment_ids . dimension ( 0 ) ; <nl> const Index input_inner_dim_size = data_size / input_outer_dim_size ; <nl> - config = GetCudaLaunchConfig ( data_size , d ) ; <nl> + config = GetGpuLaunchConfig ( data_size , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> UnsortedSegmentCustomKernel < T , Index , ReductionF > , config . block_count , <nl> mmm a / tensorflow / core / kernels / spacetobatch_functor_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / spacetobatch_functor_gpu . cu . cc <nl> struct SpaceToBatchFunctor < GPUDevice , T , NUM_BLOCK_DIMS , B2S > { <nl> return errors : : InvalidArgument ( <nl> " number of batch_tensor elements exceeds 2 ^ 32 - 1 " ) ; <nl> } <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( static_cast < int32 > ( total_count ) , d ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( static_cast < int32 > ( total_count ) , d ) ; <nl> return CudaLaunchKernel ( S2B < T , NUM_BLOCK_DIMS , B2S > , config . block_count , <nl> config . thread_per_block , 0 , d . stream ( ) , <nl> config . virtual_thread_count , <nl> mmm a / tensorflow / core / kernels / spacetodepth_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / spacetodepth_op_gpu . cu . cc <nl> struct SpaceToDepthOpFunctor < GPUDevice , T , FORMAT_NHWC > { <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> S2D_NHWC < T > , config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> config . virtual_thread_count , input . data ( ) , block_size , batch_size , <nl> struct SpaceToDepthOpFunctor < GPUDevice , T , FORMAT_NCHW > { <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> switch ( block_size ) { <nl> case 2 : <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> struct SpaceToDepthOpFunctor < GPUDevice , T , FORMAT_NCHW > { <nl> if ( total_count = = 0 ) { <nl> return ; <nl> } <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( total_count , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> S2D_NCHW < T > , config . block_count , config . thread_per_block , 0 , d . stream ( ) , <nl> config . virtual_thread_count , input . data ( ) , block_size , output_width , <nl> mmm a / tensorflow / core / kernels / sparse_tensor_dense_matmul_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / sparse_tensor_dense_matmul_op_gpu . cu . cc <nl> struct SparseTensorDenseMatMulFunctor < GPUDevice , T , Tindices , ADJ_A , ADJ_B > { <nl> <nl> / / TODO ( ebrevdo ) : Should this be alpha * nnz instead of <nl> / / out . size ( ) ? Perhaps p * nnz ? <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( p * nnz , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( p * nnz , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> SparseTensorDenseMatMulKernel < T , Tindices , ADJ_A , ADJ_B > , <nl> mmm a / tensorflow / core / kernels / split_lib_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / split_lib_gpu . cu . cc <nl> void SplitOpGPULaunch < T > : : Run ( const Eigen : : GpuDevice & d , const T * input , <nl> int32 prefix_dim_size , int32 split_dim_size , <nl> int32 suffix_dim_size , <nl> const GpuDeviceArrayStruct < T * > & output_ptr_data ) { <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( <nl> prefix_dim_size * split_dim_size * suffix_dim_size , d ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( SplitOpKernel < T > , config . block_count , <nl> void SplitVOpGPULaunch < T , IntType > : : Run ( <nl> const GpuDeviceArrayStruct < IntType > & output_scan , <nl> const GpuDeviceArrayStruct < T * > & output_ptr_data ) { <nl> if ( fixed_size ) { <nl> - CudaLaunchConfig config = <nl> - GetCudaLaunchConfig ( total_rows * total_cols , gpu_device ) ; <nl> + GpuLaunchConfig config = <nl> + GetGpuLaunchConfig ( total_rows * total_cols , gpu_device ) ; <nl> <nl> TF_CHECK_OK ( CudaLaunchKernel ( SplitVOpKernel_fixed < T > , config . block_count , <nl> config . thread_per_block , 0 , <nl> mmm a / tensorflow / core / kernels / stateful_random_ops_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / stateful_random_ops_gpu . cu . cc <nl> void UpdateVariableAndFill_Philox < GPUDevice , Distribution > : : operator ( ) ( <nl> / / maximize occupancy <nl> const int kGroupSize = Distribution : : kResultElementCount ; <nl> int work_element_count = ( output_size + kGroupSize - 1 ) / kGroupSize ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( work_element_count , d , <nl> - FillKernel < Distribution > , 0 , 0 ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( work_element_count , d , <nl> + FillKernel < Distribution > , 0 , 0 ) ; <nl> <nl> int zero = 0 ; <nl> cudaMemcpyToSymbol ( thread_counter , & zero , sizeof ( int ) ) ; <nl> mmm a / tensorflow / core / kernels / svd_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / svd_op_gpu . cu . cc <nl> __global__ void ComputeValueOfVKernel ( Cuda2DLaunchConfig config , int64 m , <nl> / / Extracts the sign of V <nl> / / V [ i ] = V [ i ] > = 0 ? 1 : 0 <nl> template < class Scalar > <nl> - __global__ void ExtractSignOfVKernel ( CudaLaunchConfig config , Scalar * V ) { <nl> + __global__ void ExtractSignOfVKernel ( GpuLaunchConfig config , Scalar * V ) { <nl> CUDA_1D_KERNEL_LOOP ( i , config . virtual_thread_count ) { <nl> V [ i ] = V [ i ] > = 0 ? Scalar ( 1 ) : Scalar ( - 1 ) ; <nl> } <nl> class SvdOpGpu : public AsyncOpKernel { <nl> input_copy . flat < Scalar > ( ) . data ( ) , <nl> outputU_ptr , outputS_ptr , outputV_ptr ) ) ; <nl> / / 2 . clamp V to - 1 or + 1 <nl> - CudaLaunchConfig cfg1D = GetCudaLaunchConfig ( batch_size , d ) ; <nl> + GpuLaunchConfig cfg1D = GetCudaLaunchConfig ( batch_size , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( ExtractSignOfVKernel < Scalar > , <nl> cfg1D . block_count , cfg1D . thread_per_block , 0 , <nl> d . stream ( ) , cfg1D , outputV_ptr ) ) ; <nl> mmm a / tensorflow / core / kernels / tile_functor_gpu . h <nl> ppp b / tensorflow / core / kernels / tile_functor_gpu . h <nl> void TileSimple ( const Eigen : : GpuDevice & d , Tensor * out , const Tensor & in ) { <nl> / / Launch kernel to q [ . . . ] = p [ . . . ] . <nl> const T * p = in . flat < T > ( ) . data ( ) ; <nl> T * q = out - > flat < T > ( ) . data ( ) ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( out_nelem , d ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( out_nelem , d ) ; <nl> TF_CHECK_OK ( <nl> CudaLaunchKernel ( TileKernel < T > , cfg . block_count , cfg . thread_per_block , 0 , <nl> d . stream ( ) , cfg . virtual_thread_count , p , <nl> mmm a / tensorflow / core / kernels / transpose_functor_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / transpose_functor_gpu . cu . cc <nl> void TransposeSimple ( const GPUDevice & d , const Tensor & in , <nl> / / Launch kernel to q [ . . . ] = p [ . . . ] . <nl> const T * p = reinterpret_cast < const T * > ( in . tensor_data ( ) . data ( ) ) ; <nl> T * q = reinterpret_cast < T * > ( const_cast < char * > ( ( out - > tensor_data ( ) . data ( ) ) ) ) ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( nelem , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( <nl> TransposeKernel < T , conjugate > , cfg . block_count , cfg . thread_per_block , 0 , <nl> d . stream ( ) , cfg . virtual_thread_count , p , <nl> mmm a / tensorflow / core / kernels / tridiagonal_solve_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / tridiagonal_solve_op_gpu . cu . cc <nl> class TridiagonalSolveOpGpuLinalg : public LinearAlgebraOp < Scalar > { <nl> void SolveForSizeOneOrTwo ( OpKernelContext * context , const Scalar * diagonals , <nl> const Scalar * rhs , Scalar * output , int m , int k ) { <nl> const Eigen : : GpuDevice & device = context - > eigen_device < Eigen : : GpuDevice > ( ) ; <nl> - CudaLaunchConfig cfg = GetCudaLaunchConfig ( 1 , device ) ; <nl> + GpuLaunchConfig cfg = GetCudaLaunchConfig ( 1 , device ) ; <nl> bool * not_invertible_dev ; <nl> cudaMalloc ( & not_invertible_dev , sizeof ( bool ) ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( SolveForSizeOneOrTwoKernel < Scalar > , <nl> mmm a / tensorflow / core / kernels / where_op_gpu . cu . h <nl> ppp b / tensorflow / core / kernels / where_op_gpu . cu . h <nl> struct Where < GPUDevice , NDIM , T , TIndex > { <nl> const Eigen : : array < TIndex , NDIM > strides = <nl> CalculateStrides < TIndex , T , NDIM > ( input ) ; <nl> const TIndex output_rows = output . dimension ( 0 ) ; <nl> - CudaLaunchConfig config = GetCudaLaunchConfig ( output_rows , d ) ; <nl> + GpuLaunchConfig config = GetCudaLaunchConfig ( output_rows , d ) ; <nl> TF_CHECK_OK ( CudaLaunchKernel ( PropagateWhereIndicesKernel < NDIM , TIndex > , <nl> config . block_count , config . thread_per_block , 0 , <nl> d . stream ( ) , output_rows , strides , <nl>
|
Part two of renaming CudaLaunchConfig to GpuLaunchConfig : fix call sites .
|
tensorflow/tensorflow
|
978532afa9d6d63b5dc0b4b57d00dd77bf8e628d
|
2019-05-11T07:20:47Z
|
new file mode 100644 <nl> index 00000000000 . . 7f856eaef93 <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00226_deduplication_and_unexpected_parts . reference <nl> <nl> + 2015 - 01 - 01 1 <nl> + 2015 - 01 - 01 1 <nl> new file mode 100644 <nl> index 00000000000 . . 3f9c5b786fb <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00226_deduplication_and_unexpected_parts . sql <nl> <nl> + DROP TABLE IF EXISTS test . deduplication ; <nl> + CREATE TABLE test . deduplication ( d Date DEFAULT ' 2015 - 01 - 01 ' , x Int8 ) ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / test / deduplication ' , ' r1 ' , d , x , 1 ) ; <nl> + <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + INSERT INTO test . deduplication ( x ) VALUES ( 1 ) ; <nl> + <nl> + SELECT * FROM test . deduplication ; <nl> + <nl> + DETACH TABLE test . deduplication ; <nl> + ATTACH TABLE test . deduplication ( d Date DEFAULT ' 2015 - 01 - 01 ' , x Int8 ) ENGINE = ReplicatedMergeTree ( ' / clickhouse / tables / test / deduplication ' , ' r1 ' , d , x , 1 ) ; <nl> + <nl> + SELECT * FROM test . deduplication ; <nl> + <nl> + DROP TABLE test . deduplication ; <nl>
|
dbms : added test [ # METR - 17988 ] .
|
ClickHouse/ClickHouse
|
b34d3366fd899b001869ce2d3cb5a8655f378f46
|
2015-09-09T19:33:24Z
|
new file mode 100644 <nl> index 00000000000 . . 91977db7bf7 <nl> mmm / dev / null <nl> ppp b / Doxygen / Examples . AvocadoDB / rest_edge - create - edge <nl> <nl> + > curl - - data @ - - X POST - - dump - http : / / localhost : 8529 / edge ? collection = 7848004 & from = 7848004 / 9289796 & to = 7848004 / 9355332 <nl> + { " e " : 1 } <nl> + <nl> + HTTP / 1 . 1 201 Created <nl> + content - type : application / json ; charset = utf - 8 <nl> + location : / document / 7848004 / 9683012 <nl> + etag : " 9683012 " <nl> + <nl> + { <nl> + " _rev " : 9683012 , <nl> + " _id " : " 7848004 / 9683012 " , <nl> + " error " : false <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 0044b130180 <nl> mmm / dev / null <nl> ppp b / Doxygen / Examples . AvocadoDB / rest_edge - read - edge <nl> <nl> + > curl - X GET - - dump - http : / / localhost : 8529 / edge / 7848004 / 9683012 <nl> + <nl> + HTTP / 1 . 1 200 OK <nl> + content - type : application / json ; charset = utf - 8 <nl> + etag : " 9683012 " <nl> + <nl> + { <nl> + " _from " : " 7848004 / 9289796 " , <nl> + " _rev " : 9683012 , <nl> + " _to " : " 7848004 / 9355332 " , <nl> + " _id " : " 7848004 / 9683012 " , <nl> + " e " : 1 <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 6e8fb0f27be <nl> mmm / dev / null <nl> ppp b / UnitTests / HttpInterface / rest_edge_spec . rb <nl> <nl> + require ' rspec ' <nl> + require ' . / avocadodb . rb ' <nl> + <nl> + describe AvocadoDB do <nl> + prefix = " rest_edge " <nl> + <nl> + context " creating an edge : " do <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # error handling <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + context " error handling : " do <nl> + it " returns an error if url is missing from " do <nl> + cn = " UnitTestsCollectionEdge " <nl> + cmd = " / edge ? collection = # { cn } & createCollection = true " <nl> + body = " { } " <nl> + doc = AvocadoDB . log_post ( " # { prefix } - missing - from - to " , cmd , : body = > body ) <nl> + <nl> + doc . code . should eq ( 400 ) <nl> + doc . parsed_response [ ' error ' ] . should eq ( true ) <nl> + doc . parsed_response [ ' errorNum ' ] . should eq ( 400 ) <nl> + doc . parsed_response [ ' code ' ] . should eq ( 400 ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + AvocadoDB . drop_collection ( cn ) <nl> + end <nl> + <nl> + it " returns an error if from / to are malformed " do <nl> + cn = " UnitTestsCollectionEdge " <nl> + cmd = " / edge ? collection = # { cn } & createCollection = true & from = 1 & to = 1 " <nl> + body = " { } " <nl> + doc = AvocadoDB . log_post ( " # { prefix } - bad - from - to " , cmd , : body = > body ) <nl> + <nl> + doc . code . should eq ( 400 ) <nl> + doc . parsed_response [ ' error ' ] . should eq ( true ) <nl> + doc . parsed_response [ ' errorNum ' ] . should eq ( 400 ) <nl> + doc . parsed_response [ ' code ' ] . should eq ( 400 ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + AvocadoDB . drop_collection ( cn ) <nl> + end <nl> + end <nl> + <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # # known collection name <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + <nl> + context " known collection name : " do <nl> + before do <nl> + @ cn = " UnitTestsCollectionEdge " <nl> + @ cid = AvocadoDB . create_collection ( @ cn ) <nl> + end <nl> + <nl> + after do <nl> + AvocadoDB . drop_collection ( @ cn ) <nl> + end <nl> + <nl> + it " creating an edge " do <nl> + cmd = " / document ? collection = # { @ cid } " <nl> + <nl> + # create first vertex <nl> + body = " { \ " a \ " : 1 } " <nl> + doc = AvocadoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> + <nl> + doc . code . should eq ( 201 ) <nl> + doc . parsed_response [ ' _id ' ] . should be_kind_of ( String ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + id1 = doc . parsed_response [ ' _id ' ] <nl> + <nl> + # create second vertex <nl> + body = " { \ " a \ " : 2 } " <nl> + doc = AvocadoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> + <nl> + doc . code . should eq ( 201 ) <nl> + doc . parsed_response [ ' _id ' ] . should be_kind_of ( String ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + id2 = doc . parsed_response [ ' _id ' ] <nl> + <nl> + # create edge <nl> + cmd = " / edge ? collection = # { @ cid } & from = # { id1 } & to = # { id2 } " <nl> + body = " { } " <nl> + doc = AvocadoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> + <nl> + doc . code . should eq ( 201 ) <nl> + doc . parsed_response [ ' _id ' ] . should be_kind_of ( String ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + id3 = doc . parsed_response [ ' _id ' ] <nl> + <nl> + # check edge <nl> + <nl> + cmd = " / edge / # { id3 } " <nl> + doc = AvocadoDB . log_get ( " # { prefix } - read - edge " , cmd ) <nl> + <nl> + doc . code . should eq ( 200 ) <nl> + doc . parsed_response [ ' _id ' ] . should eq ( id3 ) <nl> + doc . parsed_response [ ' _from ' ] . should eq ( id1 ) <nl> + doc . parsed_response [ ' _to ' ] . should eq ( id2 ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + # create another edge <nl> + cmd = " / edge ? collection = # { @ cid } & from = # { id1 } & to = # { id2 } " <nl> + body = " { \ " e \ " : 1 } " <nl> + doc = AvocadoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> + <nl> + doc . code . should eq ( 201 ) <nl> + doc . parsed_response [ ' _id ' ] . should be_kind_of ( String ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + <nl> + id4 = doc . parsed_response [ ' _id ' ] <nl> + <nl> + # check edge <nl> + <nl> + cmd = " / edge / # { id4 } " <nl> + doc = AvocadoDB . log_get ( " # { prefix } - read - edge " , cmd ) <nl> + <nl> + doc . code . should eq ( 200 ) <nl> + doc . parsed_response [ ' _id ' ] . should eq ( id4 ) <nl> + doc . parsed_response [ ' _from ' ] . should eq ( id1 ) <nl> + doc . parsed_response [ ' _to ' ] . should eq ( id2 ) <nl> + doc . parsed_response [ ' e ' ] . should eq ( 1 ) <nl> + doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> + end <nl> + end <nl> + <nl> + end <nl> + end <nl> + <nl> + <nl>
|
fixed edges
|
arangodb/arangodb
|
fd79635d21e3c22f6484d7b9de2b304ce7f16ae4
|
2012-04-03T16:36:50Z
|
mmm a / Tools / docker / CNTK - GPU - 1bit - Image / Dockerfile <nl> ppp b / Tools / docker / CNTK - GPU - 1bit - Image / Dockerfile <nl> RUN wget - q https : / / raw . githubusercontent . com / Microsoft / CNTK / master / Scripts / inst <nl> <nl> ENV PATH / root / anaconda3 / envs / cntk - py34 / bin : $ PATH <nl> <nl> + # NCCL <nl> + RUN git clone - - depth = 1 - b master https : / / github . com / NVIDIA / nccl . git & & \ <nl> + cd nccl & & \ <nl> + make CUDA_HOME = / usr / local / cuda PREFIX = / usr / local install <nl> + <nl> WORKDIR / cntk <nl> <nl> # Build CNTK <nl> RUN git clone - - depth = 1 - - recursive - b master https : / / github . com / Microsoft / CNTK . <nl> - - with - gdk - nvml - lib = / usr / local / cuda / lib64 / stubs \ <nl> - - with - kaldi = $ { KALDI_PATH } \ <nl> - - with - py34 - path = / root / anaconda3 / envs / cntk - py34 \ <nl> - - - with - cudnn = / usr / local / cudnn " & & \ <nl> + - - with - cudnn = / usr / local / cudnn \ <nl> + - - with - nccl = / usr / local " & & \ <nl> + git submodule update - - init Source / Multiverso & & \ <nl> mkdir - p build / gpu / release & & \ <nl> cd build / gpu / release & & \ <nl> . . / . . / . . / configure $ CONFIGURE_OPTS - - with - openblas = / usr / local / openblas & & \ <nl> mmm a / Tools / docker / CNTK - GPU - Image / Dockerfile <nl> ppp b / Tools / docker / CNTK - GPU - Image / Dockerfile <nl> RUN wget - q https : / / raw . githubusercontent . com / Microsoft / CNTK / master / Scripts / inst <nl> <nl> ENV PATH / root / anaconda3 / envs / cntk - py34 / bin : $ PATH <nl> <nl> + # NCCL <nl> + RUN git clone - - depth = 1 - b master https : / / github . com / NVIDIA / nccl . git & & \ <nl> + cd nccl & & \ <nl> + make CUDA_HOME = / usr / local / cuda PREFIX = / usr / local install <nl> + <nl> WORKDIR / cntk <nl> <nl> # Build CNTK <nl> RUN git clone - - depth = 1 - b master https : / / github . com / Microsoft / CNTK . git . & & \ <nl> - - with - gdk - nvml - lib = / usr / local / cuda / lib64 / stubs \ <nl> - - with - kaldi = $ { KALDI_PATH } \ <nl> - - with - py34 - path = / root / anaconda3 / envs / cntk - py34 \ <nl> - - - with - cudnn = / usr / local / cudnn " & & \ <nl> + - - with - cudnn = / usr / local / cudnn \ <nl> + - - with - nccl = / usr / local " & & \ <nl> git submodule update - - init Source / Multiverso & & \ <nl> mkdir - p build / gpu / release & & \ <nl> cd build / gpu / release & & \ <nl>
|
Support for NCCL in Dockerfiles .
|
microsoft/CNTK
|
7d2e72d023e24eb00f2e0b347bf23c174ba6656c
|
2016-11-24T01:05:58Z
|
mmm a / folly / io / async / EventBase . cpp <nl> ppp b / folly / io / async / EventBase . cpp <nl> bool EventBase : : runInEventBaseThreadAndWait ( const Cob & fn ) { <nl> return true ; <nl> } <nl> <nl> - bool EventBase : : runAfterDelay ( const Cob & cob , <nl> - int milliseconds , <nl> - TimeoutManager : : InternalEnum in ) { <nl> + void EventBase : : runAfterDelay ( const Cob & cob , <nl> + int milliseconds , <nl> + TimeoutManager : : InternalEnum in ) { <nl> + if ( ! tryRunAfterDelay ( cob , milliseconds , in ) ) { <nl> + folly : : throwSystemError ( <nl> + " error in EventBase : : runAfterDelay ( ) , failed to schedule timeout " ) ; <nl> + } <nl> + } <nl> + <nl> + bool EventBase : : tryRunAfterDelay ( const Cob & cob , <nl> + int milliseconds , <nl> + TimeoutManager : : InternalEnum in ) { <nl> CobTimeout * timeout = new CobTimeout ( this , cob , in ) ; <nl> if ( ! timeout - > scheduleTimeout ( milliseconds ) ) { <nl> delete timeout ; <nl> return false ; <nl> } <nl> - <nl> pendingCobTimeouts_ . push_back ( * timeout ) ; <nl> return true ; <nl> } <nl> mmm a / folly / io / async / EventBase . h <nl> ppp b / folly / io / async / EventBase . h <nl> class EventBase : private boost : : noncopyable , <nl> * Runs the given Cob at some time after the specified number of <nl> * milliseconds . ( No guarantees exactly when . ) <nl> * <nl> - * @ return true iff the cob was successfully registered . <nl> + * Throws a std : : system_error if an error occurs . <nl> * / <nl> - bool runAfterDelay ( <nl> + void runAfterDelay ( <nl> const Cob & c , <nl> int milliseconds , <nl> - TimeoutManager : : InternalEnum = TimeoutManager : : InternalEnum : : NORMAL ) ; <nl> + TimeoutManager : : InternalEnum in = TimeoutManager : : InternalEnum : : NORMAL ) ; <nl> + <nl> + / * * <nl> + * @ see tryRunAfterDelay for more details <nl> + * <nl> + * @ return true iff the cob was successfully registered . <nl> + * <nl> + * * / <nl> + bool tryRunAfterDelay ( <nl> + const Cob & cob , <nl> + int milliseconds , <nl> + TimeoutManager : : InternalEnum in = TimeoutManager : : InternalEnum : : NORMAL ) ; <nl> <nl> / * * <nl> * Set the maximum desired latency in us and provide a callback which will be <nl> mmm a / folly / io / async / test / EventBaseTest . cpp <nl> ppp b / folly / io / async / test / EventBaseTest . cpp <nl> struct ScheduledEvent { <nl> <nl> void scheduleEvents ( EventBase * eventBase , int fd , ScheduledEvent * events ) { <nl> for ( ScheduledEvent * ev = events ; ev - > milliseconds > 0 ; + + ev ) { <nl> - eventBase - > runAfterDelay ( std : : bind ( & ScheduledEvent : : perform , ev , fd ) , <nl> + eventBase - > tryRunAfterDelay ( std : : bind ( & ScheduledEvent : : perform , ev , fd ) , <nl> ev - > milliseconds ) ; <nl> } <nl> } <nl> TEST ( EventBaseTest , ReadPersist ) { <nl> scheduleEvents ( & eb , sp [ 1 ] , events ) ; <nl> <nl> / / Schedule a timeout to unregister the handler after the third write <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 85 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 85 ) ; <nl> <nl> / / Loop <nl> TimePoint start ; <nl> TEST ( EventBaseTest , ReadImmediate ) { <nl> scheduleEvents ( & eb , sp [ 1 ] , events ) ; <nl> <nl> / / Schedule a timeout to unregister the handler <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 20 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 20 ) ; <nl> <nl> / / Loop <nl> TimePoint start ; <nl> TEST ( EventBaseTest , WritePersist ) { <nl> scheduleEvents ( & eb , sp [ 1 ] , events ) ; <nl> <nl> / / Schedule a timeout to unregister the handler after the third read <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 85 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 85 ) ; <nl> <nl> / / Loop <nl> TimePoint start ; <nl> TEST ( EventBaseTest , WriteImmediate ) { <nl> <nl> / / Schedule a timeout to unregister the handler <nl> int64_t unregisterTimeout = 40 ; <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , <nl> unregisterTimeout ) ; <nl> <nl> / / Loop <nl> TEST ( EventBaseTest , ReadWritePersist ) { <nl> scheduleEvents ( & eb , sp [ 1 ] , events ) ; <nl> <nl> / / Schedule a timeout to unregister the handler <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 80 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 80 ) ; <nl> <nl> / / Loop <nl> TimePoint start ; <nl> TEST ( EventBaseTest , ReadPartial ) { <nl> scheduleEvents ( & eb , sp [ 1 ] , events ) ; <nl> <nl> / / Schedule a timeout to unregister the handler <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 30 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 30 ) ; <nl> <nl> / / Loop <nl> TimePoint start ; <nl> TEST ( EventBaseTest , WritePartial ) { <nl> scheduleEvents ( & eb , sp [ 1 ] , events ) ; <nl> <nl> / / Schedule a timeout to unregister the handler <nl> - eb . runAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 30 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TestHandler : : unregisterHandler , & handler ) , 30 ) ; <nl> <nl> / / Loop <nl> TimePoint start ; <nl> TEST ( EventBaseTest , DestroyHandler ) { <nl> <nl> / / After 10ms , read some data , so that the handler <nl> / / will be notified that it can write . <nl> - eb . runAfterDelay ( std : : bind ( checkReadUntilEmpty , sp [ 1 ] , initialBytesWritten ) , <nl> + eb . tryRunAfterDelay ( std : : bind ( checkReadUntilEmpty , sp [ 1 ] , initialBytesWritten ) , <nl> 10 ) ; <nl> <nl> / / Start a timer to destroy the handler after 25ms <nl> TEST ( EventBaseTest , RunAfterDelay ) { <nl> TimePoint timestamp1 ( false ) ; <nl> TimePoint timestamp2 ( false ) ; <nl> TimePoint timestamp3 ( false ) ; <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp1 ) , 10 ) ; <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp2 ) , 20 ) ; <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp3 ) , 40 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp1 ) , 10 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp2 ) , 20 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp3 ) , 40 ) ; <nl> <nl> TimePoint start ; <nl> eb . loop ( ) ; <nl> TEST ( EventBaseTest , RunAfterDelay ) { <nl> } <nl> <nl> / * * <nl> - * Test the behavior of runAfterDelay ( ) when some timeouts are <nl> + * Test the behavior of tryRunAfterDelay ( ) when some timeouts are <nl> * still scheduled when the EventBase is destroyed . <nl> * / <nl> TEST ( EventBaseTest , RunAfterDelayDestruction ) { <nl> TEST ( EventBaseTest , RunAfterDelayDestruction ) { <nl> EventBase eb ; <nl> <nl> / / Run two normal timeouts <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp1 ) , 10 ) ; <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp2 ) , 20 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp1 ) , 10 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp2 ) , 20 ) ; <nl> <nl> / / Schedule a timeout to stop the event loop after 40ms <nl> - eb . runAfterDelay ( std : : bind ( & EventBase : : terminateLoopSoon , & eb ) , 40 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & EventBase : : terminateLoopSoon , & eb ) , 40 ) ; <nl> <nl> / / Schedule 2 timeouts that would fire after the event loop stops <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp3 ) , 80 ) ; <nl> - eb . runAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp4 ) , 160 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp3 ) , 80 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & TimePoint : : reset , & timestamp4 ) , 160 ) ; <nl> <nl> start . reset ( ) ; <nl> eb . loop ( ) ; <nl> TEST ( EventBaseTest , RescheduleTimeout ) { <nl> & AsyncTimeout : : scheduleTimeout ) ; <nl> <nl> / / after 10ms , reschedule t2 to run sooner than originally scheduled <nl> - eb . runAfterDelay ( std : : bind ( f , & t2 , 10 ) , 10 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( f , & t2 , 10 ) , 10 ) ; <nl> / / after 10ms , reschedule t3 to run later than originally scheduled <nl> - eb . runAfterDelay ( std : : bind ( f , & t3 , 40 ) , 10 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( f , & t3 , 40 ) , 10 ) ; <nl> <nl> TimePoint start ; <nl> eb . loop ( ) ; <nl> TEST ( EventBaseTest , CancelTimeout ) { <nl> <nl> ReschedulingTimeout t ( & eb , timeouts ) ; <nl> t . start ( ) ; <nl> - eb . runAfterDelay ( std : : bind ( & AsyncTimeout : : cancelTimeout , & t ) , 50 ) ; <nl> + eb . tryRunAfterDelay ( std : : bind ( & AsyncTimeout : : cancelTimeout , & t ) , 50 ) ; <nl> <nl> TimePoint start ; <nl> eb . loop ( ) ; <nl> TEST ( EventBaseTest , RunInThread ) { <nl> / / Once the last thread exits , it will stop the loop ( ) . However , this <nl> / / timeout also stops the loop in case there is a bug performing the normal <nl> / / stop . <nl> - data . evb . runAfterDelay ( std : : bind ( & EventBase : : terminateLoopSoon , & data . evb ) , <nl> + data . evb . tryRunAfterDelay ( std : : bind ( & EventBase : : terminateLoopSoon , & data . evb ) , <nl> 3000 ) ; <nl> <nl> TimePoint start ; <nl> TEST ( EventBaseTest , RunBeforeLoop ) { <nl> TEST ( EventBaseTest , RunBeforeLoopWait ) { <nl> EventBase base ; <nl> CountedLoopCallback cb ( & base , 1 ) ; <nl> - base . runAfterDelay ( [ & ] ( ) { <nl> + base . tryRunAfterDelay ( [ & ] ( ) { <nl> base . terminateLoopSoon ( ) ; <nl> } , 500 ) ; <nl> base . runBeforeLoop ( & cb ) ; <nl> mmm a / folly / wangle / bootstrap / BootstrapTest . cpp <nl> ppp b / folly / wangle / bootstrap / BootstrapTest . cpp <nl> class TestClientPipelineFactory : public PipelineFactory < Pipeline > { <nl> CHECK ( sock - > good ( ) ) ; <nl> <nl> / / We probably aren ' t connected immedately , check after a small delay <nl> - EventBaseManager : : get ( ) - > getEventBase ( ) - > runAfterDelay ( [ sock ] ( ) { <nl> + EventBaseManager : : get ( ) - > getEventBase ( ) - > tryRunAfterDelay ( [ sock ] ( ) { <nl> CHECK ( sock - > readable ( ) ) ; <nl> } , 100 ) ; <nl> return nullptr ; <nl>
|
EventBase : : runAfterDelay to throw an exception
|
facebook/folly
|
1a61493fec7ca4d8d64e8325ba7bbe82fa78116a
|
2015-03-03T03:30:14Z
|
mmm a / torch / csrc / utils / tuple_parser . cpp <nl> ppp b / torch / csrc / utils / tuple_parser . cpp <nl> auto TupleParser : : parse ( std : : shared_ptr < thpp : : Tensor > & x , const std : : string & par <nl> x . reset ( torch : : createTensor ( obj ) - > clone_shallow ( ) ) ; <nl> } <nl> <nl> + auto TupleParser : : parse ( at : : Tensor & x , const std : : string & param_name ) - > void { <nl> + PyObject * obj = next_arg ( ) ; <nl> + x = torch : : createTensorAT ( obj ) ; <nl> + } <nl> + <nl> auto TupleParser : : parse ( std : : vector < int > & x , const std : : string & param_name ) - > void { <nl> PyObject * obj = next_arg ( ) ; <nl> if ( ! PyTuple_Check ( obj ) ) { <nl> mmm a / torch / csrc / utils / tuple_parser . h <nl> ppp b / torch / csrc / utils / tuple_parser . h <nl> <nl> # include < memory > <nl> # include < vector > <nl> # include < THPP / THPP . h > <nl> + # include < ATen / ATen . h > <nl> <nl> namespace torch { <nl> <nl> struct TupleParser { <nl> void parse ( double & x , const std : : string & param_name ) ; <nl> void parse ( std : : unique_ptr < thpp : : Tensor > & x , const std : : string & param_name ) ; <nl> void parse ( std : : shared_ptr < thpp : : Tensor > & x , const std : : string & param_name ) ; <nl> + void parse ( at : : Tensor & x , const std : : string & param_name ) ; <nl> void parse ( std : : vector < int > & x , const std : : string & param_name ) ; <nl> void parse ( std : : string & x , const std : : string & param_name ) ; <nl> <nl>
|
add ATen tensor support to pytorch tuple_parser ( )
|
pytorch/pytorch
|
33ac9cdc10febdea36bdf8ab95f667e70416160f
|
2017-07-14T17:56:02Z
|
mmm a / samples / opengl / opengl_interop . cpp <nl> ppp b / samples / opengl / opengl_interop . cpp <nl> class GLWinApp : public WinApp <nl> } <nl> # endif <nl> <nl> - static float getFps ( ) <nl> - { <nl> - static std : : queue < int64 > time_queue ; <nl> - <nl> - int64 now = cv : : getTickCount ( ) ; <nl> - int64 then = 0 ; <nl> - time_queue . push ( now ) ; <nl> - <nl> - if ( time_queue . size ( ) > = 2 ) <nl> - then = time_queue . front ( ) ; <nl> - <nl> - if ( time_queue . size ( ) > = 25 ) <nl> - time_queue . pop ( ) ; <nl> - <nl> - return time_queue . size ( ) * ( float ) cv : : getTickFrequency ( ) / ( now - then ) ; <nl> - } <nl> - <nl> # if defined ( __linux__ ) <nl> int handle_event ( XEvent & e ) <nl> { <nl> class GLWinApp : public WinApp <nl> return 0 ; <nl> } <nl> <nl> - void print_info ( int mode , float fps , cv : : String oclDevName ) <nl> + void print_info ( int mode , float time , cv : : String oclDevName ) <nl> { <nl> # if defined ( WIN32 ) | | defined ( _WIN32 ) <nl> HDC hDC = m_hDC ; <nl> class GLWinApp : public WinApp <nl> <nl> y + = tm . tmHeight ; <nl> buf [ 0 ] = 0 ; <nl> - sprintf_s ( buf , sizeof ( buf ) - 1 , " FPS : % 2 . 1f " , fps ) ; <nl> + sprintf_s ( buf , sizeof ( buf ) - 1 , " Time : % 2 . 1f " , time ) ; <nl> : : TextOut ( hDC , 0 , y , buf , ( int ) strlen ( buf ) ) ; <nl> <nl> y + = tm . tmHeight ; <nl> class GLWinApp : public WinApp <nl> # elif defined ( __linux__ ) <nl> <nl> char buf [ 256 + 1 ] ; <nl> - snprintf ( buf , sizeof ( buf ) - 1 , " FPS : % 2 . 1f Mode : % s Device : % s " , fps , m_modeStr [ mode ] . c_str ( ) , oclDevName . c_str ( ) ) ; <nl> + snprintf ( buf , sizeof ( buf ) - 1 , " Time : % 2 . 1f Mode : % s Device : % s " , time , m_modeStr [ mode ] . c_str ( ) , oclDevName . c_str ( ) ) ; <nl> XStoreName ( m_display , m_window , buf ) ; <nl> # endif <nl> } <nl> class GLWinApp : public WinApp <nl> } <nl> <nl> bool do_buffer = use_buffer ( ) ; <nl> + <nl> switch ( get_mode ( ) ) <nl> { <nl> - case 0 : <nl> - / / no processing <nl> + case 0 : / / no processing <nl> + m_timer . clear ( ) ; <nl> break ; <nl> <nl> - case 1 : <nl> - { <nl> - / / process video frame on CPU <nl> - cv : : Mat m ( m_height , m_width , CV_8UC4 ) ; <nl> - <nl> - if ( do_buffer ) <nl> - buffer . copyTo ( m ) ; <nl> - else <nl> - texture . copyTo ( m ) ; <nl> - <nl> - if ( ! m_disableProcessing ) <nl> - { <nl> - / / blur texture image with OpenCV on CPU <nl> - cv : : blur ( m , m , cv : : Size ( 15 , 15 ) , cv : : Point ( - 7 , - 7 ) ) ; <nl> - } <nl> - <nl> - if ( do_buffer ) <nl> - buffer . copyFrom ( m , cv : : ogl : : Buffer : : PIXEL_UNPACK_BUFFER , true ) ; <nl> - else <nl> - texture . copyFrom ( m , true ) ; <nl> - <nl> + case 1 : / / process frame on CPU <nl> + processFrameCPU ( texture , buffer ) ; <nl> break ; <nl> - } <nl> - <nl> - case 2 : <nl> - { <nl> - / / process video frame on GPU <nl> - cv : : UMat u ; <nl> - <nl> - if ( do_buffer ) <nl> - u = cv : : ogl : : mapGLBuffer ( buffer ) ; <nl> - else <nl> - cv : : ogl : : convertFromGLTexture2D ( texture , u ) ; <nl> - <nl> - if ( ! m_disableProcessing ) <nl> - { <nl> - / / blur texture image with OpenCV on GPU with OpenCL <nl> - cv : : blur ( u , u , cv : : Size ( 15 , 15 ) , cv : : Point ( - 7 , - 7 ) ) ; <nl> - } <nl> - <nl> - if ( do_buffer ) <nl> - cv : : ogl : : unmapGLBuffer ( u ) ; <nl> - else <nl> - cv : : ogl : : convertToGLTexture2D ( u , texture ) ; <nl> <nl> + case 2 : / / process frame on GPU <nl> + processFrameGPU ( texture , buffer ) ; <nl> break ; <nl> - } <nl> - <nl> } / / switch <nl> <nl> if ( do_buffer ) / / buffer - > texture <nl> class GLWinApp : public WinApp <nl> glXSwapBuffers ( m_display , m_window ) ; <nl> # endif <nl> <nl> - print_info ( m_mode , getFps ( ) , m_oclDevName ) ; <nl> + print_info ( m_mode , m_timer . time ( Timer : : UNITS : : MSEC ) , m_oclDevName ) ; <nl> } <nl> <nl> <nl> class GLWinApp : public WinApp <nl> <nl> protected : <nl> <nl> + void processFrameCPU ( cv : : ogl : : Texture2D & texture , cv : : ogl : : Buffer & buffer ) <nl> + { <nl> + cv : : Mat m ( m_height , m_width , CV_8UC4 ) ; <nl> + <nl> + bool do_buffer = use_buffer ( ) ; <nl> + <nl> + m_timer . start ( ) ; <nl> + <nl> + if ( do_buffer ) <nl> + buffer . copyTo ( m ) ; <nl> + else <nl> + texture . copyTo ( m ) ; <nl> + <nl> + if ( ! m_disableProcessing ) <nl> + { <nl> + / / blur texture image with OpenCV on CPU <nl> + cv : : blur ( m , m , cv : : Size ( 15 , 15 ) , cv : : Point ( - 7 , - 7 ) ) ; <nl> + } <nl> + <nl> + if ( do_buffer ) <nl> + buffer . copyFrom ( m , cv : : ogl : : Buffer : : PIXEL_UNPACK_BUFFER , true ) ; <nl> + else <nl> + texture . copyFrom ( m , true ) ; <nl> + <nl> + m_timer . stop ( ) ; <nl> + } <nl> + <nl> + void processFrameGPU ( cv : : ogl : : Texture2D & texture , cv : : ogl : : Buffer & buffer ) <nl> + { <nl> + cv : : UMat u ; <nl> + <nl> + bool do_buffer = use_buffer ( ) ; <nl> + <nl> + m_timer . start ( ) ; <nl> + <nl> + if ( do_buffer ) <nl> + u = cv : : ogl : : mapGLBuffer ( buffer ) ; <nl> + else <nl> + cv : : ogl : : convertFromGLTexture2D ( texture , u ) ; <nl> + <nl> + if ( ! m_disableProcessing ) <nl> + { <nl> + / / blur texture image with OpenCV on GPU with OpenCL <nl> + cv : : blur ( u , u , cv : : Size ( 15 , 15 ) , cv : : Point ( - 7 , - 7 ) ) ; <nl> + } <nl> + <nl> + if ( do_buffer ) <nl> + cv : : ogl : : unmapGLBuffer ( u ) ; <nl> + else <nl> + cv : : ogl : : convertToGLTexture2D ( u , texture ) ; <nl> + <nl> + m_timer . stop ( ) ; <nl> + } <nl> + <nl> # if defined ( WIN32 ) | | defined ( _WIN32 ) <nl> int setup_pixel_format ( ) <nl> { <nl> mmm a / samples / opengl / winapp . hpp <nl> ppp b / samples / opengl / winapp . hpp <nl> <nl> <nl> # define SAFE_RELEASE ( p ) if ( p ) { p - > Release ( ) ; p = NULL ; } <nl> <nl> + class Timer <nl> + { <nl> + public : <nl> + enum UNITS <nl> + { <nl> + USEC = 0 , <nl> + MSEC , <nl> + SEC <nl> + } ; <nl> + <nl> + Timer ( ) : m_t0 ( 0 ) , m_diff ( 0 ) <nl> + { <nl> + m_tick_frequency = ( float ) cv : : getTickFrequency ( ) ; <nl> + <nl> + m_unit_mul [ USEC ] = 1000000 ; <nl> + m_unit_mul [ MSEC ] = 1000 ; <nl> + m_unit_mul [ SEC ] = 1 ; <nl> + } <nl> + <nl> + void clear ( ) <nl> + { <nl> + m_t0 = m_diff = 0 ; <nl> + } <nl> + <nl> + void start ( ) <nl> + { <nl> + m_t0 = cv : : getTickCount ( ) ; <nl> + } <nl> + <nl> + void stop ( ) <nl> + { <nl> + m_diff = cv : : getTickCount ( ) - m_t0 ; <nl> + } <nl> + <nl> + float time ( UNITS u = UNITS : : MSEC ) <nl> + { <nl> + float sec = m_diff / m_tick_frequency ; <nl> + <nl> + return sec * m_unit_mul [ u ] ; <nl> + } <nl> + <nl> + public : <nl> + float m_tick_frequency ; <nl> + int64 m_t0 ; <nl> + int64 m_diff ; <nl> + int m_unit_mul [ 3 ] ; <nl> + } ; <nl> + <nl> class WinApp <nl> { <nl> public : <nl> class WinApp <nl> int m_width ; <nl> int m_height ; <nl> std : : string m_window_name ; <nl> + Timer m_timer ; <nl> } ; <nl>
|
change displaying of fps to time ( ms )
|
opencv/opencv
|
ea102901530755948bb7c0ea83dddc36fdb5ab4b
|
2015-07-30T16:57:11Z
|
mmm a / test / common / utils . py <nl> ppp b / test / common / utils . py <nl> def latest_build_dir ( check_executable = True ) : <nl> <nl> masterBuildDir = os . path . join ( project_root_dir ( ) , ' build ' ) <nl> <nl> + if not os . path . isdir ( masterBuildDir ) : <nl> + raise test_exceptions . NotBuiltException ( detail = ' no version of this project have yet been built ' ) <nl> + <nl> # - - find the build directory with the most recent mtime <nl> <nl> canidatePath = None <nl>
|
merging polyglot test fails if not built
|
rethinkdb/rethinkdb
|
ad45dbc8566bac6dfd0d3db5b20109fd83c810b7
|
2014-06-07T00:46:54Z
|
mmm a / Makefile <nl> ppp b / Makefile <nl> ifdef ASSERT_STATUS_CHECKED <nl> column_family_test \ <nl> file_reader_writer_test \ <nl> corruption_test \ <nl> + db_universal_compaction_test \ <nl> <nl> ifeq ( $ ( USE_FOLLY_DISTRIBUTED_MUTEX ) , 1 ) <nl> TESTS_PASSING_ASC + = folly_synchronization_distributed_mutex_test <nl> mmm a / db / db_impl / db_impl_compaction_flush . cc <nl> ppp b / db / db_impl / db_impl_compaction_flush . cc <nl> Status DBImpl : : CompactFilesImpl ( <nl> & job_context - > superversion_contexts [ 0 ] , <nl> * c - > mutable_cf_options ( ) ) ; <nl> } <nl> + / / status above captures any error during compaction_job . Install , so its ok <nl> + / / not check compaction_job . io_status ( ) explicitly if we ' re not calling <nl> + / / SetBGError <nl> + compaction_job . io_status ( ) . PermitUncheckedError ( ) ; <nl> c - > ReleaseCompactionFiles ( s ) ; <nl> # ifndef ROCKSDB_LITE <nl> / / Need to make sure SstFileManager does its bookkeeping <nl> mmm a / db / db_test_util . cc <nl> ppp b / db / db_test_util . cc <nl> void DBTestBase : : Destroy ( const Options & options , bool delete_cf_paths ) { <nl> if ( delete_cf_paths ) { <nl> for ( size_t i = 0 ; i < handles_ . size ( ) ; + + i ) { <nl> ColumnFamilyDescriptor cfdescriptor ; <nl> - handles_ [ i ] - > GetDescriptor ( & cfdescriptor ) ; <nl> + / / GetDescriptor is not implemented for ROCKSDB_LITE <nl> + handles_ [ i ] - > GetDescriptor ( & cfdescriptor ) . PermitUncheckedError ( ) ; <nl> column_families . push_back ( cfdescriptor ) ; <nl> } <nl> } <nl> mmm a / db / db_universal_compaction_test . cc <nl> ppp b / db / db_universal_compaction_test . cc <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionSingleSortedRun ) { <nl> for ( int num = 0 ; num < 16 ; num + + ) { <nl> / / Write 100KB file . And immediately it should be compacted to one file . <nl> GenerateNewFile ( & rnd , & key_idx ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( 0 ) , 1 ) ; <nl> } <nl> ASSERT_OK ( Put ( Key ( key_idx ) , " " ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( 0 ) , 1 ) ; <nl> } <nl> <nl> TEST_P ( DBTestUniversalCompaction , OptimizeFiltersForHits ) { <nl> Env : : Priority : : LOW ) ; <nl> <nl> for ( int num = 0 ; num < options . level0_file_num_compaction_trigger ; num + + ) { <nl> - Put ( Key ( num * 10 ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( num * 10 ) , " val " ) ) ; <nl> if ( num ) { <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> } <nl> - Put ( Key ( 30 + num * 10 ) , " val " ) ; <nl> - Put ( Key ( 60 + num * 10 ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( 30 + num * 10 ) , " val " ) ) ; <nl> + ASSERT_OK ( Put ( Key ( 60 + num * 10 ) , " val " ) ) ; <nl> } <nl> - Put ( " " , " " ) ; <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> + ASSERT_OK ( Put ( " " , " " ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> <nl> / / Query set of non existing keys <nl> for ( int i = 5 ; i < 90 ; i + = 10 ) { <nl> TEST_P ( DBTestUniversalCompaction , OptimizeFiltersForHits ) { <nl> <nl> / / Unblock compaction and wait it for happening . <nl> sleeping_task_low . WakeUp ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / The same queries will not trigger bloom filter <nl> for ( int i = 5 ; i < 90 ; i + = 10 ) { <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionTrigger ) { <nl> / / Now we have 3 files at level 0 , with size 4 , 2 . 4 , 2 . Let ' s generate a <nl> / / new file of size 1 . <nl> GenerateNewFile ( 1 , & rnd , & key_idx ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Level - 0 compaction is triggered , but no file will be picked up . <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 4 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionTrigger ) { <nl> / / a new file of size 1 . <nl> filter - > expect_full_compaction_ . store ( true ) ; <nl> GenerateNewFile ( 1 , & rnd , & key_idx ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / All files at level 0 will be compacted into a single one . <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 1 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionSizeAmplification ) { <nl> ASSERT_OK ( Put ( 1 , Key ( key_idx ) , rnd . RandomString ( 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , num + 1 ) ; <nl> } <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 2 ) ; <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionSizeAmplification ) { <nl> / / but will instead trigger size amplification . <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / Verify that size amplification did occur <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 1 ) ; <nl> TEST_P ( DBTestUniversalCompaction , DynamicUniversalCompactionSizeAmplification ) { <nl> ASSERT_OK ( Put ( 1 , Key ( key_idx ) , rnd . RandomString ( 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , num + 1 ) ; <nl> } <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 2 ) ; <nl> TEST_P ( DBTestUniversalCompaction , DynamicUniversalCompactionSizeAmplification ) { <nl> / / but could instead trigger size amplification if it ' s set <nl> / / to 110 . <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Verify compaction did not happen <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 3 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , DynamicUniversalCompactionSizeAmplification ) { <nl> ASSERT_EQ ( 110u , mutable_cf_options . compaction_options_universal <nl> . max_size_amplification_percent ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Verify that size amplification did happen <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 1 ) ; <nl> ASSERT_EQ ( total_picked_compactions , 1 ) ; <nl> TEST_P ( DBTestUniversalCompaction , DynamicUniversalCompactionReadAmplification ) { <nl> ASSERT_OK ( Put ( 1 , Key ( key_idx ) , rnd . RandomString ( 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , num + 1 ) ; <nl> } <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , options . level0_file_num_compaction_trigger ) ; <nl> TEST_P ( DBTestUniversalCompaction , DynamicUniversalCompactionReadAmplification ) { <nl> / / Flush whatever is remaining in memtable . This is typically small , about <nl> / / 30KB . <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Verify compaction did not happen <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , options . level0_file_num_compaction_trigger + 1 ) ; <nl> ASSERT_EQ ( total_picked_compactions , 0 ) ; <nl> TEST_P ( DBTestUniversalCompaction , DynamicUniversalCompactionReadAmplification ) { <nl> ASSERT_EQ ( mutable_cf_options . compaction_options_universal . max_merge_width , <nl> 2u ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / Files in L0 are approx : 0 . 3 ( 30KB ) , 1 , 1 , 1 . <nl> / / On compaction : the files are below the size amp threshold , so we <nl> TEST_P ( DBTestUniversalCompaction , CompactFilesOnUniversalCompaction ) { <nl> for ( int key = 1024 * kEntriesPerBuffer ; key > = 0 ; - - key ) { <nl> ASSERT_OK ( Put ( 1 , ToString ( key ) , rnd . RandomString ( kTestValueSize ) ) ) ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ColumnFamilyMetaData cf_meta ; <nl> dbfull ( ) - > GetColumnFamilyMetaData ( handles_ [ 1 ] , & cf_meta ) ; <nl> std : : vector < std : : string > compaction_input_file_names ; <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionTargetLevel ) { <nl> compact_options . change_level = true ; <nl> compact_options . target_level = 4 ; <nl> compact_options . exclusive_manual_compaction = exclusive_manual_compaction_ ; <nl> - db_ - > CompactRange ( compact_options , nullptr , nullptr ) ; <nl> + ASSERT_OK ( db_ - > CompactRange ( compact_options , nullptr , nullptr ) ) ; <nl> ASSERT_EQ ( " 0 , 0 , 0 , 0 , 1 " , FilesPerLevel ( 0 ) ) ; <nl> } <nl> <nl> TEST_P ( DBTestUniversalCompactionMultiLevels , UniversalCompactionMultiLevels ) { <nl> ASSERT_OK ( Put ( 1 , Key ( i % num_keys ) , Key ( i ) ) ) ; <nl> } <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> for ( int i = num_keys ; i < num_keys * 2 ; i + + ) { <nl> ASSERT_EQ ( Get ( 1 , Key ( i % num_keys ) ) , Key ( i ) ) ; <nl> TEST_P ( DBTestUniversalCompactionMultiLevels , UniversalCompactionTrivialMove ) { <nl> std : : vector < std : : string > values ; <nl> <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> ASSERT_GT ( trivial_move , 0 ) ; <nl> ASSERT_GT ( non_trivial_move , 0 ) ; <nl> TEST_P ( DBTestUniversalCompactionParallel , UniversalCompactionParallel ) { <nl> for ( int i = 0 ; i < num_keys * 2 ; i + + ) { <nl> ASSERT_OK ( Put ( 1 , Key ( i % num_keys ) , Key ( i ) ) ) ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> ROCKSDB_NAMESPACE : : SyncPoint : : GetInstance ( ) - > DisableProcessing ( ) ; <nl> ASSERT_EQ ( num_compactions_running . load ( ) , 0 ) ; <nl> TEST_P ( DBTestUniversalCompactionParallel , PickByFileNumberBug ) { <nl> <nl> / / Hold the 1st compaction from finishing <nl> TEST_SYNC_POINT ( " DBTestUniversalCompactionParallel : : PickByFileNumberBug : 2 " ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / There should only be one picked compaction as the score drops below one <nl> / / after the first one is picked . <nl> TEST_P ( DBTestUniversalCompactionParallel , PickByFileNumberBug ) { <nl> <nl> / / Hold the 1st and 2nd compaction from finishing <nl> TEST_SYNC_POINT ( " DBTestUniversalCompactionParallel : : PickByFileNumberBug : 2 " ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / This time we will trigger a compaction because of size ratio and <nl> / / another compaction because of number of files that are not compacted <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionOptions ) { <nl> ASSERT_OK ( Put ( 1 , Key ( key_idx ) , rnd . RandomString ( 990 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> <nl> if ( num < options . level0_file_num_compaction_trigger - 1 ) { <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , num + 1 ) ; <nl> } <nl> } <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( 1 ) , 1 ) ; <nl> } <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionStopStyleSimilarSize ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , rnd . RandomString ( 990 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( ) , num + 1 ) ; <nl> } <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionStopStyleSimilarSize ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , rnd . RandomString ( 990 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Suppose each file flushed from mem table has size 1 . Now we compact <nl> / / ( level0_file_num_compaction_trigger + 1 ) = 4 files and should have a big <nl> / / file of size 4 . <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionStopStyleSimilarSize ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , rnd . RandomString ( 990 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> ASSERT_EQ ( NumSortedRuns ( ) , num + 3 ) ; <nl> } <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionStopStyleSimilarSize ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , rnd . RandomString ( 990 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Before compaction , we have 4 files at level 0 , with size 4 , 0 . 4 , 1 , 1 . <nl> / / After compaction , we should have 3 files , with size 4 , 0 . 4 , 2 . <nl> ASSERT_EQ ( NumSortedRuns ( ) , 3 ) ; <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionStopStyleSimilarSize ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , rnd . RandomString ( 990 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Level - 0 compaction is triggered , but no file will be picked up . <nl> ASSERT_EQ ( NumSortedRuns ( ) , 4 ) ; <nl> } <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionCompressRatio1 ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_LT ( TotalSize ( ) , 110000U * 2 * 0 . 9 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionCompressRatio1 ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_LT ( TotalSize ( ) , 110000 * 4 * 0 . 9 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionCompressRatio1 ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_LT ( TotalSize ( ) , 110000 * 6 * 0 . 9 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionCompressRatio1 ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_GT ( TotalSize ( ) , 110000 * 11 * 0 . 8 + 110000 * 2 ) ; <nl> } <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionCompressRatio2 ) { <nl> ASSERT_OK ( Put ( Key ( key_idx ) , CompressibleString ( & rnd , 10000 ) ) ) ; <nl> key_idx + + ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_LT ( TotalSize ( ) , 120000U * 12 * 0 . 82 + 120000 * 2 ) ; <nl> } <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionTrivialMoveTest1 ) { <nl> std : : vector < std : : string > values ; <nl> <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> ASSERT_GT ( trivial_move , 0 ) ; <nl> ASSERT_GT ( non_trivial_move , 0 ) ; <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionTrivialMoveTest2 ) { <nl> std : : vector < std : : string > values ; <nl> <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> ASSERT_GT ( trivial_move , 0 ) ; <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionFourPaths ) { <nl> options . num_levels = 1 ; <nl> <nl> std : : vector < std : : string > filenames ; <nl> - env_ - > GetChildren ( options . db_paths [ 1 ] . path , & filenames ) ; <nl> - / / Delete archival files . <nl> - for ( size_t i = 0 ; i < filenames . size ( ) ; + + i ) { <nl> - env_ - > DeleteFile ( options . db_paths [ 1 ] . path + " / " + filenames [ i ] ) ; <nl> + if ( env_ - > GetChildren ( options . db_paths [ 1 ] . path , & filenames ) . ok ( ) ) { <nl> + / / Delete archival files . <nl> + for ( size_t i = 0 ; i < filenames . size ( ) ; + + i ) { <nl> + ASSERT_OK ( <nl> + env_ - > DeleteFile ( options . db_paths [ 1 ] . path + " / " + filenames [ i ] ) ) ; <nl> + } <nl> + ASSERT_OK ( env_ - > DeleteDir ( options . db_paths [ 1 ] . path ) ) ; <nl> } <nl> - env_ - > DeleteDir ( options . db_paths [ 1 ] . path ) ; <nl> Reopen ( options ) ; <nl> <nl> Random rnd ( 301 ) ; <nl> TEST_P ( DBTestUniversalCompaction , IncreaseUniversalCompactionNumLevels ) { <nl> for ( int i = 0 ; i < = max_key1 ; i + + ) { <nl> / / each value is 10K <nl> ASSERT_OK ( Put ( 1 , Key ( i ) , rnd . RandomString ( 10000 ) ) ) ; <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / Stage 2 : reopen with universal compaction , num_levels = 4 <nl> options . compaction_style = kCompactionStyleUniversal ; <nl> TEST_P ( DBTestUniversalCompaction , IncreaseUniversalCompactionNumLevels ) { <nl> for ( int i = max_key1 + 1 ; i < = max_key2 ; i + + ) { <nl> / / each value is 10K <nl> ASSERT_OK ( Put ( 1 , Key ( i ) , rnd . RandomString ( 10000 ) ) ) ; <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> verify_func ( max_key2 ) ; <nl> / / Compaction to non - L0 has happened . <nl> TEST_P ( DBTestUniversalCompaction , IncreaseUniversalCompactionNumLevels ) { <nl> compact_options . change_level = true ; <nl> compact_options . target_level = 0 ; <nl> compact_options . exclusive_manual_compaction = exclusive_manual_compaction_ ; <nl> - dbfull ( ) - > CompactRange ( compact_options , handles_ [ 1 ] , nullptr , nullptr ) ; <nl> + ASSERT_OK ( <nl> + dbfull ( ) - > CompactRange ( compact_options , handles_ [ 1 ] , nullptr , nullptr ) ) ; <nl> / / Need to restart it once to remove higher level records in manifest . <nl> ReopenWithColumnFamilies ( { " default " , " pikachu " } , options ) ; <nl> / / Final reopen <nl> TEST_P ( DBTestUniversalCompaction , IncreaseUniversalCompactionNumLevels ) { <nl> for ( int i = max_key2 + 1 ; i < = max_key3 ; i + + ) { <nl> / / each value is 10K <nl> ASSERT_OK ( Put ( 1 , Key ( i ) , rnd . RandomString ( 10000 ) ) ) ; <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( handles_ [ 1 ] ) ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> } <nl> ASSERT_OK ( Flush ( 1 ) ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> verify_func ( max_key3 ) ; <nl> } <nl> <nl> TEST_P ( DBTestUniversalCompaction , UniversalCompactionSecondPathRatio ) { <nl> new SpecialSkipListFactory ( KNumKeysByGenerateNewFile - 1 ) ) ; <nl> <nl> std : : vector < std : : string > filenames ; <nl> - env_ - > GetChildren ( options . db_paths [ 1 ] . path , & filenames ) ; <nl> - / / Delete archival files . <nl> - for ( size_t i = 0 ; i < filenames . size ( ) ; + + i ) { <nl> - env_ - > DeleteFile ( options . db_paths [ 1 ] . path + " / " + filenames [ i ] ) ; <nl> + if ( env_ - > GetChildren ( options . db_paths [ 1 ] . path , & filenames ) . ok ( ) ) { <nl> + / / Delete archival files . <nl> + for ( size_t i = 0 ; i < filenames . size ( ) ; + + i ) { <nl> + ASSERT_OK ( <nl> + env_ - > DeleteFile ( options . db_paths [ 1 ] . path + " / " + filenames [ i ] ) ) ; <nl> + } <nl> + ASSERT_OK ( env_ - > DeleteDir ( options . db_paths [ 1 ] . path ) ) ; <nl> } <nl> - env_ - > DeleteDir ( options . db_paths [ 1 ] . path ) ; <nl> Reopen ( options ) ; <nl> <nl> Random rnd ( 301 ) ; <nl> TEST_P ( DBTestUniversalCompaction , ConcurrentBottomPriLowPriCompactions ) { <nl> / / use no_wait above because that one waits for flush and compaction . We <nl> / / don ' t want to wait for compaction because the full compaction is <nl> / / intentionally blocked while more files are flushed . <nl> - dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForFlushMemTable ( ) ) ; <nl> } <nl> if ( i = = 0 ) { <nl> TEST_SYNC_POINT ( <nl> " DBTestUniversalCompaction : ConcurrentBottomPriLowPriCompactions : 0 " ) ; <nl> } <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> / / First compaction should output to bottom level . Second should output to L0 <nl> / / since older L0 files pending compaction prevent it from being placed lower . <nl> TEST_P ( DBTestUniversalCompaction , RecalculateScoreAfterPicking ) { <nl> int key_idx = 0 ; <nl> GenerateNewFile ( & rnd , & key_idx ) ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> / / Compacting the first four files was enough to bring the score below one so <nl> / / there ' s no need to schedule any more compactions . <nl> ASSERT_EQ ( 1 , num_compactions_attempted ) ; <nl> TEST_P ( DBTestUniversalCompaction , FinalSortedRunCompactFilesConflict ) { <nl> auto stop_token = <nl> dbfull ( ) - > TEST_write_controler ( ) . GetCompactionPressureToken ( ) ; <nl> <nl> - Put ( " key " , " val " ) ; <nl> + ASSERT_OK ( Put ( " key " , " val " ) ) ; <nl> Flush ( ) ; <nl> - dbfull ( ) - > CompactRange ( CompactRangeOptions ( ) , nullptr , nullptr ) ; <nl> + ASSERT_OK ( dbfull ( ) - > CompactRange ( CompactRangeOptions ( ) , nullptr , nullptr ) ) ; <nl> ASSERT_EQ ( NumTableFilesAtLevel ( num_levels_ - 1 ) , 1 ) ; <nl> ColumnFamilyMetaData cf_meta ; <nl> ColumnFamilyHandle * default_cfh = db_ - > DefaultColumnFamily ( ) ; <nl> TEST_P ( DBTestUniversalCompaction , FinalSortedRunCompactFilesConflict ) { <nl> TEST_SYNC_POINT ( <nl> " DBTestUniversalCompaction : FinalSortedRunCompactFilesConflict : 0 " ) ; <nl> for ( int i = 0 ; i < 2 ; + + i ) { <nl> - Put ( " key " , " val " ) ; <nl> + ASSERT_OK ( Put ( " key " , " val " ) ) ; <nl> Flush ( ) ; <nl> } <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> compact_files_thread . join ( ) ; <nl> } <nl> TEST_P ( DBTestUniversalManualCompactionOutputPathId , <nl> DestroyAndReopen ( options ) ; <nl> CreateAndReopenWithCF ( { " pikachu " } , options ) ; <nl> MakeTables ( 3 , " p " , " q " , 1 ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 2 , TotalLiveFiles ( 1 ) ) ; <nl> ASSERT_EQ ( 2 , GetSstFileCount ( options . db_paths [ 0 ] . path ) ) ; <nl> ASSERT_EQ ( 0 , GetSstFileCount ( options . db_paths [ 1 ] . path ) ) ; <nl> TEST_P ( DBTestUniversalManualCompactionOutputPathId , <nl> CompactRangeOptions compact_options ; <nl> compact_options . target_path_id = 1 ; <nl> compact_options . exclusive_manual_compaction = exclusive_manual_compaction_ ; <nl> - db_ - > CompactRange ( compact_options , handles_ [ 1 ] , nullptr , nullptr ) ; <nl> + ASSERT_OK ( db_ - > CompactRange ( compact_options , handles_ [ 1 ] , nullptr , nullptr ) ) ; <nl> ASSERT_EQ ( 1 , TotalLiveFiles ( 1 ) ) ; <nl> ASSERT_EQ ( 0 , GetSstFileCount ( options . db_paths [ 0 ] . path ) ) ; <nl> ASSERT_EQ ( 1 , GetSstFileCount ( options . db_paths [ 1 ] . path ) ) ; <nl> TEST_P ( DBTestUniversalManualCompactionOutputPathId , <nl> / / Full compaction to DB path 0 <nl> compact_options . target_path_id = 0 ; <nl> compact_options . exclusive_manual_compaction = exclusive_manual_compaction_ ; <nl> - db_ - > CompactRange ( compact_options , handles_ [ 1 ] , nullptr , nullptr ) ; <nl> + ASSERT_OK ( db_ - > CompactRange ( compact_options , handles_ [ 1 ] , nullptr , nullptr ) ) ; <nl> ASSERT_EQ ( 1 , TotalLiveFiles ( 1 ) ) ; <nl> ASSERT_EQ ( 1 , GetSstFileCount ( options . db_paths [ 0 ] . path ) ) ; <nl> ASSERT_EQ ( 0 , GetSstFileCount ( options . db_paths [ 1 ] . path ) ) ; <nl> TEST_F ( DBTestUniversalCompaction2 , BasicL0toL1 ) { <nl> / / during flush <nl> int i ; <nl> for ( i = 0 ; i < 2000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> / / MoveFilesToLevel ( 6 ) ; <nl> - dbfull ( ) - > CompactRange ( CompactRangeOptions ( ) , nullptr , nullptr ) ; <nl> + ASSERT_OK ( dbfull ( ) - > CompactRange ( CompactRangeOptions ( ) , nullptr , nullptr ) ) ; <nl> <nl> for ( i = 1999 ; i < kNumKeys ; + + i ) { <nl> if ( i > = kNumKeys - kWindowSize & & <nl> i < kNumKeys - kWindowSize + kNumDelsTrigger ) { <nl> - Delete ( Key ( i ) ) ; <nl> + ASSERT_OK ( Delete ( Key ( i ) ) ) ; <nl> } else { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 0 ) ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 6 ) , 0 ) ; <nl> } <nl> TEST_F ( DBTestUniversalCompaction2 , SingleLevel ) { <nl> / / during flush <nl> int i ; <nl> for ( i = 0 ; i < 2000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> <nl> for ( i = 1999 ; i < kNumKeys ; + + i ) { <nl> if ( i > = kNumKeys - kWindowSize & & <nl> i < kNumKeys - kWindowSize + kNumDelsTrigger ) { <nl> - Delete ( Key ( i ) ) ; <nl> + ASSERT_OK ( Delete ( Key ( i ) ) ) ; <nl> } else { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 1 , NumTableFilesAtLevel ( 0 ) ) ; <nl> } <nl> # endif / / ENABLE_SINGLE_LEVEL_DTC <nl> TEST_F ( DBTestUniversalCompaction2 , MultipleLevels ) { <nl> / / during flush <nl> int i ; <nl> for ( i = 0 ; i < 500 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 500 ; i < 1000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 1000 ; i < 1500 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 1500 ; i < 2000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 0 ) ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 6 ) , 0 ) ; <nl> <nl> for ( i = 1999 ; i < 2333 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 2333 ; i < 2666 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 2666 ; i < 2999 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 0 ) ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 6 ) , 0 ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 5 ) , 0 ) ; <nl> <nl> for ( i = 1900 ; i < 2100 ; + + i ) { <nl> - Delete ( Key ( i ) ) ; <nl> + ASSERT_OK ( Delete ( Key ( i ) ) ) ; <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 0 ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 1 ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 2 ) ) ; <nl> TEST_F ( DBTestUniversalCompaction2 , OverlappingL0 ) { <nl> / / during flush <nl> int i ; <nl> for ( i = 0 ; i < 2000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 2000 ; i < 3000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 3500 ; i < 4000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> for ( i = 2900 ; i < 3100 ; + + i ) { <nl> - Delete ( Key ( i ) ) ; <nl> + ASSERT_OK ( Delete ( Key ( i ) ) ) ; <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 2 , NumTableFilesAtLevel ( 0 ) ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 6 ) , 0 ) ; <nl> } <nl> TEST_F ( DBTestUniversalCompaction2 , IngestBehind ) { <nl> / / during flush <nl> int i ; <nl> for ( i = 0 ; i < 2000 ; + + i ) { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> Flush ( ) ; <nl> / / MoveFilesToLevel ( 6 ) ; <nl> - dbfull ( ) - > CompactRange ( CompactRangeOptions ( ) , nullptr , nullptr ) ; <nl> + ASSERT_OK ( dbfull ( ) - > CompactRange ( CompactRangeOptions ( ) , nullptr , nullptr ) ) ; <nl> <nl> for ( i = 1999 ; i < kNumKeys ; + + i ) { <nl> if ( i > = kNumKeys - kWindowSize & & <nl> i < kNumKeys - kWindowSize + kNumDelsTrigger ) { <nl> - Delete ( Key ( i ) ) ; <nl> + ASSERT_OK ( Delete ( Key ( i ) ) ) ; <nl> } else { <nl> - Put ( Key ( i ) , " val " ) ; <nl> + ASSERT_OK ( Put ( Key ( i ) , " val " ) ) ; <nl> } <nl> } <nl> Flush ( ) ; <nl> <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 0 ) ) ; <nl> ASSERT_EQ ( 0 , NumTableFilesAtLevel ( 6 ) ) ; <nl> ASSERT_GT ( NumTableFilesAtLevel ( 5 ) , 0 ) ; <nl> TEST_F ( DBTestUniversalCompaction2 , PeriodicCompaction ) { <nl> / / Another flush would trigger compaction the oldest file . <nl> ASSERT_OK ( Put ( " foo " , " bar2 " ) ) ; <nl> Flush ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> <nl> ASSERT_EQ ( 1 , periodic_compactions ) ; <nl> ASSERT_EQ ( 0 , start_level ) ; <nl> TEST_F ( DBTestUniversalCompaction2 , PeriodicCompaction ) { <nl> / / A flush doesn ' t trigger a periodic compaction when threshold not hit <nl> ASSERT_OK ( Put ( " foo " , " bar2 " ) ) ; <nl> Flush ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 0 , periodic_compactions ) ; <nl> <nl> / / After periodic compaction threshold hits , a flush will trigger <nl> TEST_F ( DBTestUniversalCompaction2 , PeriodicCompaction ) { <nl> ASSERT_OK ( Put ( " foo " , " bar2 " ) ) ; <nl> env_ - > MockSleepForSeconds ( 48 * 60 * 60 + 100 ) ; <nl> Flush ( ) ; <nl> - dbfull ( ) - > TEST_WaitForCompact ( ) ; <nl> + ASSERT_OK ( dbfull ( ) - > TEST_WaitForCompact ( ) ) ; <nl> ASSERT_EQ ( 1 , periodic_compactions ) ; <nl> ASSERT_EQ ( 0 , start_level ) ; <nl> ASSERT_EQ ( 4 , output_level ) ; <nl>
|
Enable ASSERT_STATUS_CHECKED for db_universal_compaction_test ( )
|
facebook/rocksdb
|
a242a5830116c2fdd476af79873297a7e0d02410
|
2020-10-06T21:42:12Z
|
mmm a / lib / SILOptimizer / PassManager / Passes . cpp <nl> ppp b / lib / SILOptimizer / PassManager / Passes . cpp <nl> llvm : : cl : : opt < bool > <nl> SILViewCFG ( " sil - view - cfg " , llvm : : cl : : init ( false ) , <nl> llvm : : cl : : desc ( " Enable the sil cfg viewer pass " ) ) ; <nl> <nl> - llvm : : cl : : opt < bool > <nl> - SILViewGuaranteedCFG ( " sil - view - guaranteed - cfg " , llvm : : cl : : init ( false ) , <nl> - llvm : : cl : : desc ( " Enable the sil cfg viewer pass after diagnostics " ) ) ; <nl> + llvm : : cl : : opt < bool > SILViewGuaranteedCFG ( <nl> + " sil - view - guaranteed - cfg " , llvm : : cl : : init ( false ) , <nl> + llvm : : cl : : desc ( " Enable the sil cfg viewer pass after diagnostics " ) ) ; <nl> <nl> - llvm : : cl : : opt < bool > <nl> - SILViewSILGenCFG ( " sil - view - silgen - cfg " , llvm : : cl : : init ( false ) , <nl> - llvm : : cl : : desc ( " Enable the sil cfg viewer pass before diagnostics " ) ) ; <nl> + llvm : : cl : : opt < bool > SILViewSILGenCFG ( <nl> + " sil - view - silgen - cfg " , llvm : : cl : : init ( false ) , <nl> + llvm : : cl : : desc ( " Enable the sil cfg viewer pass before diagnostics " ) ) ; <nl> <nl> using namespace swift ; <nl> <nl> descriptorsForFile ( StringRef Filename , <nl> <nl> auto * RootList = cast < yaml : : SequenceNode > ( N ) ; <nl> <nl> - for ( auto & PMDescriptorIter : make_range ( RootList - > begin ( ) , RootList - > end ( ) ) ) { <nl> + for ( auto & PMDescriptorIter : <nl> + make_range ( RootList - > begin ( ) , RootList - > end ( ) ) ) { <nl> PMDescriptor PM ( cast < yaml : : SequenceNode > ( & PMDescriptorIter ) ) ; <nl> Descriptors . push_back ( std : : move ( PM ) ) ; <nl> } <nl>
|
Fix 80 - column violations .
|
apple/swift
|
149e1e4059053039f14150f04a369bcc87a6f4e3
|
2016-01-03T21:15:56Z
|
mmm a / include / swift / AST / AccessScope . h <nl> ppp b / include / swift / AST / AccessScope . h <nl> class AccessScope { <nl> } <nl> <nl> / / / Returns the associated access level for diagnostic purposes . <nl> - Accessibility accessibilityForDiagnostics ( ) const ; <nl> + Accessibility accessLevelForDiagnostics ( ) const ; <nl> <nl> / / / Returns the minimum access level required to access <nl> / / / associated DeclContext for diagnostic purposes . <nl> - Accessibility requiredAccessibilityForDiagnostics ( ) const { <nl> + Accessibility requiredAccessForDiagnostics ( ) const { <nl> return isFileScope ( ) <nl> ? Accessibility : : FilePrivate <nl> - : accessibilityForDiagnostics ( ) ; <nl> + : accessLevelForDiagnostics ( ) ; <nl> } <nl> <nl> / / / Returns the narrowest access scope if this and the specified access scope <nl> mmm a / include / swift / AST / Decl . h <nl> ppp b / include / swift / AST / Decl . h <nl> class ExtensionDecl final : public Decl , public GenericContext , <nl> ExtensionDeclBits . CheckedInheritanceClause = checked ; <nl> } <nl> <nl> - bool hasDefaultAccessibility ( ) const { <nl> + bool hasDefaultAccessLevel ( ) const { <nl> return ExtensionDeclBits . DefaultAndMaxAccessLevel ! = 0 ; <nl> } <nl> <nl> - Accessibility getDefaultAccessibility ( ) const { <nl> - assert ( hasDefaultAccessibility ( ) & & " not computed yet " ) ; <nl> + Accessibility getDefaultAccessLevel ( ) const { <nl> + assert ( hasDefaultAccessLevel ( ) & & " not computed yet " ) ; <nl> if ( ExtensionDeclBits . DefaultAndMaxAccessLevel & <nl> ( 1 < < ( static_cast < unsigned > ( Accessibility : : FilePrivate ) - 1 ) ) ) <nl> return Accessibility : : FilePrivate ; <nl> class ExtensionDecl final : public Decl , public GenericContext , <nl> return Accessibility : : Public ; <nl> } <nl> <nl> - Accessibility getMaxAccessibility ( ) const { <nl> - assert ( hasDefaultAccessibility ( ) & & " not computed yet " ) ; <nl> + Accessibility getMaxAccessLevel ( ) const { <nl> + assert ( hasDefaultAccessLevel ( ) & & " not computed yet " ) ; <nl> if ( ExtensionDeclBits . DefaultAndMaxAccessLevel & <nl> ( 1 < < ( static_cast < unsigned > ( Accessibility : : Public ) - 1 ) ) ) <nl> return Accessibility : : Public ; <nl> class ExtensionDecl final : public Decl , public GenericContext , <nl> return Accessibility : : FilePrivate ; <nl> } <nl> <nl> - void setDefaultAndMaxAccessibility ( Accessibility defaultAccess , <nl> - Accessibility maxAccess ) { <nl> - assert ( ! hasDefaultAccessibility ( ) & & " default accessibility already set " ) ; <nl> + void setDefaultAndMaxAccess ( Accessibility defaultAccess , <nl> + Accessibility maxAccess ) { <nl> + assert ( ! hasDefaultAccessLevel ( ) & & " default accessibility already set " ) ; <nl> assert ( maxAccess > = defaultAccess ) ; <nl> assert ( maxAccess ! = Accessibility : : Private & & " private not valid " ) ; <nl> assert ( defaultAccess ! = Accessibility : : Private & & " private not valid " ) ; <nl> ExtensionDeclBits . DefaultAndMaxAccessLevel = <nl> ( 1 < < ( static_cast < unsigned > ( defaultAccess ) - 1 ) ) | <nl> ( 1 < < ( static_cast < unsigned > ( maxAccess ) - 1 ) ) ; <nl> - assert ( getDefaultAccessibility ( ) = = defaultAccess & & " not enough bits " ) ; <nl> - assert ( getMaxAccessibility ( ) = = maxAccess & & " not enough bits " ) ; <nl> + assert ( getDefaultAccessLevel ( ) = = defaultAccess & & " not enough bits " ) ; <nl> + assert ( getMaxAccessLevel ( ) = = maxAccess & & " not enough bits " ) ; <nl> } <nl> <nl> void setConformanceLoader ( LazyMemberLoader * resolver , uint64_t contextData ) ; <nl> class ValueDecl : public Decl { <nl> SourceLoc getNameLoc ( ) const { return NameLoc ; } <nl> SourceLoc getLoc ( ) const { return NameLoc ; } <nl> <nl> - bool hasAccessibility ( ) const { <nl> + bool hasAccess ( ) const { <nl> return TypeAndAccess . getInt ( ) . hasValue ( ) ; <nl> } <nl> <nl> class ValueDecl : public Decl { <nl> / / / \ sa getFormalAccessScope <nl> Accessibility getFormalAccess ( const DeclContext * useDC = nullptr , <nl> bool respectVersionedAttr = false ) const { <nl> - assert ( hasAccessibility ( ) & & " accessibility not computed yet " ) ; <nl> + assert ( hasAccess ( ) & & " accessibility not computed yet " ) ; <nl> Accessibility result = TypeAndAccess . getInt ( ) . getValue ( ) ; <nl> if ( respectVersionedAttr & & <nl> result = = Accessibility : : Internal & & <nl> class ValueDecl : public Decl { <nl> / / / decisions . It should not be used at the AST or semantic level . <nl> Accessibility getEffectiveAccess ( ) const ; <nl> <nl> - void setAccessibility ( Accessibility access ) { <nl> - assert ( ! hasAccessibility ( ) & & " accessibility already set " ) ; <nl> - overwriteAccessibility ( access ) ; <nl> + void setAccess ( Accessibility access ) { <nl> + assert ( ! hasAccess ( ) & & " accessibility already set " ) ; <nl> + overwriteAccess ( access ) ; <nl> } <nl> <nl> / / / Overwrite the accessibility of this declaration . <nl> / / This is needed in the LLDB REPL . <nl> - void overwriteAccessibility ( Accessibility access ) { <nl> + void overwriteAccess ( Accessibility access ) { <nl> TypeAndAccess . setInt ( access ) ; <nl> } <nl> <nl> class AbstractStorageDecl : public ValueDecl { <nl> return nullptr ; <nl> } <nl> <nl> - Accessibility getSetterAccessibility ( ) const { <nl> - assert ( hasAccessibility ( ) ) ; <nl> + Accessibility getSetterFormalAccess ( ) const { <nl> + assert ( hasAccess ( ) ) ; <nl> assert ( GetSetInfo . getInt ( ) . hasValue ( ) ) ; <nl> return GetSetInfo . getInt ( ) . getValue ( ) ; <nl> } <nl> <nl> - void setSetterAccessibility ( Accessibility accessLevel ) { <nl> + void setSetterAccess ( Accessibility accessLevel ) { <nl> assert ( ! GetSetInfo . getInt ( ) . hasValue ( ) ) ; <nl> - overwriteSetterAccessibility ( accessLevel ) ; <nl> + overwriteSetterAccess ( accessLevel ) ; <nl> } <nl> <nl> - void overwriteSetterAccessibility ( Accessibility accessLevel ) ; <nl> + void overwriteSetterAccess ( Accessibility accessLevel ) ; <nl> <nl> / / / \ brief Retrieve the materializeForSet function , if this <nl> / / / declaration has one . <nl> NominalTypeDecl : : ToStoredProperty : : operator ( ) ( Decl * decl ) const { <nl> } <nl> <nl> inline void <nl> - AbstractStorageDecl : : overwriteSetterAccessibility ( Accessibility accessLevel ) { <nl> + AbstractStorageDecl : : overwriteSetterAccess ( Accessibility accessLevel ) { <nl> GetSetInfo . setInt ( accessLevel ) ; <nl> if ( auto setter = getSetter ( ) ) <nl> - setter - > overwriteAccessibility ( accessLevel ) ; <nl> + setter - > overwriteAccess ( accessLevel ) ; <nl> if ( auto materializeForSet = getMaterializeForSetFunc ( ) ) <nl> - materializeForSet - > overwriteAccessibility ( accessLevel ) ; <nl> + materializeForSet - > overwriteAccess ( accessLevel ) ; <nl> } <nl> <nl> inline bool AbstractStorageDecl : : isStatic ( ) const { <nl> mmm a / include / swift / AST / LazyResolver . h <nl> ppp b / include / swift / AST / LazyResolver . h <nl> class LazyResolver { <nl> / / / Resolve the accessibility of a value . <nl> / / / <nl> / / / It does no type - checking . <nl> - virtual void resolveAccessibility ( ValueDecl * VD ) = 0 ; <nl> + virtual void resolveAccessControl ( ValueDecl * VD ) = 0 ; <nl> <nl> / / / Resolve the type and declaration attributes of a value . <nl> / / / <nl> class DelegatingLazyResolver : public LazyResolver { <nl> Principal . resolveWitness ( conformance , requirement ) ; <nl> } <nl> <nl> - void resolveAccessibility ( ValueDecl * VD ) override { <nl> - Principal . resolveAccessibility ( VD ) ; <nl> + void resolveAccessControl ( ValueDecl * VD ) override { <nl> + Principal . resolveAccessControl ( VD ) ; <nl> } <nl> <nl> void resolveDeclSignature ( ValueDecl * VD ) override { <nl> mmm a / include / swift / AST / LookupKinds . h <nl> ppp b / include / swift / AST / LookupKinds . h <nl> enum NLOptions : unsigned { <nl> / / / Don ' t check accessibility when doing lookup into a type . <nl> / / / <nl> / / / This option is not valid when performing lookup into a module . <nl> - NL_IgnoreAccessibility = 0x20 , <nl> + NL_IgnoreAccessControl = 0x20 , <nl> <nl> / / / This lookup is known to be a non - cascading dependency , i . e . one that does <nl> / / / not affect downstream files . <nl> mmm a / include / swift / AST / PrintOptions . h <nl> ppp b / include / swift / AST / PrintOptions . h <nl> struct PrintOptions { <nl> bool PrintOverrideKeyword = true ; <nl> <nl> / / / Whether to print accessibility information on all value decls . <nl> - bool PrintAccessibility = false ; <nl> + bool PrintAccess = false ; <nl> <nl> - / / / If \ c PrintAccessibility is true , this determines whether to print <nl> + / / / If \ c PrintAccess is true , this determines whether to print <nl> / / / ' internal ' keyword . <nl> - bool PrintInternalAccessibilityKeyword = true ; <nl> + bool PrintInternalAccessKeyword = true ; <nl> <nl> / / / Print all decls that have at least this level of access . <nl> - Accessibility AccessibilityFilter = Accessibility : : Private ; <nl> + Accessibility AccessFilter = Accessibility : : Private ; <nl> <nl> / / / Print IfConfigDecls . <nl> bool PrintIfConfig = true ; <nl> struct PrintOptions { <nl> / / / Retrieve the set of options suitable for diagnostics printing . <nl> static PrintOptions printForDiagnostics ( ) { <nl> PrintOptions result = printVerbose ( ) ; <nl> - result . PrintAccessibility = true ; <nl> + result . PrintAccess = true ; <nl> result . Indent = 4 ; <nl> result . FullyQualifiedTypesIfAmbiguous = true ; <nl> result . SynthesizeSugarOnTypes = true ; <nl> struct PrintOptions { <nl> result . ExcludeAttrList . push_back ( DAK_Inline ) ; <nl> result . ExcludeAttrList . push_back ( DAK_Rethrows ) ; <nl> result . PrintOverrideKeyword = false ; <nl> - result . AccessibilityFilter = Accessibility : : Public ; <nl> + result . AccessFilter = Accessibility : : Public ; <nl> result . PrintIfConfig = false ; <nl> result . ShouldQualifyNestedDeclarations = <nl> QualifyNestedDeclarations : : TypesOnly ; <nl> struct PrintOptions { <nl> / / / Retrieve the print options that are suitable to print the testable interface . <nl> static PrintOptions printTestableInterface ( ) { <nl> PrintOptions result = printInterface ( ) ; <nl> - result . AccessibilityFilter = Accessibility : : Internal ; <nl> + result . AccessFilter = Accessibility : : Internal ; <nl> return result ; <nl> } <nl> <nl> struct PrintOptions { <nl> / / / swift file . <nl> static PrintOptions printSwiftFileInterface ( ) { <nl> PrintOptions result = printInterface ( ) ; <nl> - result . AccessibilityFilter = Accessibility : : Internal ; <nl> + result . AccessFilter = Accessibility : : Internal ; <nl> result . EmptyLineBetweenMembers = true ; <nl> return result ; <nl> } <nl> struct PrintOptions { <nl> result . ExcludeAttrList . push_back ( DAK_FixedLayout ) ; <nl> result . PrintStorageRepresentationAttrs = true ; <nl> result . AbstractAccessors = false ; <nl> - result . PrintAccessibility = true ; <nl> + result . PrintAccess = true ; <nl> result . SkipEmptyExtensionDecls = false ; <nl> result . SkipMissingMemberPlaceholders = false ; <nl> return result ; <nl> mmm a / lib / AST / ASTDumper . cpp <nl> ppp b / lib / AST / ASTDumper . cpp <nl> static StringRef getImportKindString ( ImportKind value ) { <nl> <nl> llvm_unreachable ( " Unhandled ImportKind in switch . " ) ; <nl> } <nl> - static StringRef getAccessibilityString ( Accessibility value ) { <nl> + static StringRef getAccessLevelString ( Accessibility value ) { <nl> switch ( value ) { <nl> case Accessibility : : Private : return " private " ; <nl> case Accessibility : : FilePrivate : return " fileprivate " ; <nl> namespace { <nl> PrintWithColorRAII ( OS , InterfaceTypeColor ) < < " ' " ; <nl> } <nl> <nl> - if ( VD - > hasAccessibility ( ) ) { <nl> + if ( VD - > hasAccess ( ) ) { <nl> PrintWithColorRAII ( OS , AccessibilityColor ) < < " access = " <nl> - < < getAccessibilityString ( VD - > getFormalAccess ( ) ) ; <nl> + < < getAccessLevelString ( VD - > getFormalAccess ( ) ) ; <nl> } <nl> <nl> if ( auto Overridden = VD - > getOverriddenDecl ( ) ) { <nl> mmm a / lib / AST / ASTMangler . cpp <nl> ppp b / lib / AST / ASTMangler . cpp <nl> std : : string ASTMangler : : mangleDeclType ( const ValueDecl * decl ) { <nl> <nl> # ifdef USE_NEW_MANGLING_FOR_OBJC_RUNTIME_NAMES <nl> static bool isPrivate ( const NominalTypeDecl * Nominal ) { <nl> - return Nominal - > hasAccessibility ( ) & & <nl> + return Nominal - > hasAccess ( ) & & <nl> Nominal - > getFormalAccess ( ) < = Accessibility : : FilePrivate ; <nl> } <nl> # endif <nl> static unsigned getUnnamedParamIndex ( const ParamDecl * D ) { <nl> } <nl> <nl> static StringRef getPrivateDiscriminatorIfNecessary ( const ValueDecl * decl ) { <nl> - if ( ! decl - > hasAccessibility ( ) | | <nl> + if ( ! decl - > hasAccess ( ) | | <nl> decl - > getFormalAccess ( ) > Accessibility : : FilePrivate | | <nl> isInPrivateOrLocalContext ( decl ) ) { <nl> return StringRef ( ) ; <nl> mmm a / lib / AST / ASTPrinter . cpp <nl> ppp b / lib / AST / ASTPrinter . cpp <nl> class PrintAST : public ASTVisitor < PrintAST > { <nl> } <nl> } <nl> <nl> - void printAccessibility ( Accessibility access , StringRef suffix = " " ) { <nl> + void printAccess ( Accessibility access , StringRef suffix = " " ) { <nl> switch ( access ) { <nl> case Accessibility : : Private : <nl> Printer < < tok : : kw_private ; <nl> class PrintAST : public ASTVisitor < PrintAST > { <nl> Printer < < tok : : kw_fileprivate ; <nl> break ; <nl> case Accessibility : : Internal : <nl> - if ( ! Options . PrintInternalAccessibilityKeyword ) <nl> + if ( ! Options . PrintInternalAccessKeyword ) <nl> return ; <nl> Printer < < tok : : kw_internal ; <nl> break ; <nl> class PrintAST : public ASTVisitor < PrintAST > { <nl> Printer < < suffix < < " " ; <nl> } <nl> <nl> - void printAccessibility ( const ValueDecl * D ) { <nl> - if ( ! Options . PrintAccessibility | | ! D - > hasAccessibility ( ) | | <nl> + void printAccess ( const ValueDecl * D ) { <nl> + if ( ! Options . PrintAccess | | ! D - > hasAccess ( ) | | <nl> D - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) ) <nl> return ; <nl> <nl> - printAccessibility ( D - > getFormalAccess ( ) ) ; <nl> + printAccess ( D - > getFormalAccess ( ) ) ; <nl> <nl> if ( auto storageDecl = dyn_cast < AbstractStorageDecl > ( D ) ) { <nl> if ( auto setter = storageDecl - > getSetter ( ) ) { <nl> Accessibility setterAccess = setter - > getFormalAccess ( ) ; <nl> if ( setterAccess ! = D - > getFormalAccess ( ) ) <nl> - printAccessibility ( setterAccess , " ( set ) " ) ; <nl> + printAccess ( setterAccess , " ( set ) " ) ; <nl> } <nl> } <nl> } <nl> bool ShouldPrintChecker : : shouldPrint ( const Decl * D , PrintOptions & Options ) { <nl> <nl> / / Skip declarations that are not accessible . <nl> if ( auto * VD = dyn_cast < ValueDecl > ( D ) ) { <nl> - if ( Options . AccessibilityFilter > Accessibility : : Private & & <nl> - VD - > hasAccessibility ( ) & & <nl> - VD - > getFormalAccess ( ) < Options . AccessibilityFilter ) <nl> + if ( Options . AccessFilter > Accessibility : : Private & & <nl> + VD - > hasAccess ( ) & & VD - > getFormalAccess ( ) < Options . AccessFilter ) <nl> return false ; <nl> } <nl> <nl> void PrintAST : : visitPatternBindingDecl ( PatternBindingDecl * decl ) { <nl> / / after type - checking , but it ' s close enough for now . <nl> if ( anyVar ) { <nl> printAttributes ( anyVar ) ; <nl> - printAccessibility ( anyVar ) ; <nl> + printAccess ( anyVar ) ; <nl> } <nl> <nl> if ( decl - > isStatic ( ) ) <nl> void PrintAST : : visitIfConfigDecl ( IfConfigDecl * ICD ) { <nl> void PrintAST : : visitTypeAliasDecl ( TypeAliasDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> if ( ! Options . SkipIntroducerKeywords ) <nl> Printer < < tok : : kw_typealias < < " " ; <nl> printContextIfNeeded ( decl ) ; <nl> void PrintAST : : visitAssociatedTypeDecl ( AssociatedTypeDecl * decl ) { <nl> void PrintAST : : visitEnumDecl ( EnumDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> <nl> if ( Options . PrintOriginalSourceText & & decl - > getStartLoc ( ) . isValid ( ) ) { <nl> ASTContext & Ctx = decl - > getASTContext ( ) ; <nl> void PrintAST : : visitEnumDecl ( EnumDecl * decl ) { <nl> void PrintAST : : visitStructDecl ( StructDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> <nl> if ( Options . PrintOriginalSourceText & & decl - > getStartLoc ( ) . isValid ( ) ) { <nl> ASTContext & Ctx = decl - > getASTContext ( ) ; <nl> void PrintAST : : visitStructDecl ( StructDecl * decl ) { <nl> void PrintAST : : visitClassDecl ( ClassDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> <nl> if ( Options . PrintOriginalSourceText & & decl - > getStartLoc ( ) . isValid ( ) ) { <nl> ASTContext & Ctx = decl - > getASTContext ( ) ; <nl> void PrintAST : : visitClassDecl ( ClassDecl * decl ) { <nl> void PrintAST : : visitProtocolDecl ( ProtocolDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> <nl> if ( Options . PrintOriginalSourceText & & decl - > getStartLoc ( ) . isValid ( ) ) { <nl> ASTContext & Ctx = decl - > getASTContext ( ) ; <nl> void PrintAST : : visitVarDecl ( VarDecl * decl ) { <nl> ! decl - > getAttrs ( ) . hasAttribute < SILStoredAttr > ( ) ) <nl> Printer < < " @ sil_stored " ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> if ( ! Options . SkipIntroducerKeywords ) { <nl> if ( decl - > isStatic ( ) ) <nl> printStaticKeyword ( decl - > getCorrectStaticSpelling ( ) ) ; <nl> void PrintAST : : visitFuncDecl ( FuncDecl * decl ) { <nl> } else { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> <nl> if ( Options . PrintOriginalSourceText & & decl - > getStartLoc ( ) . isValid ( ) ) { <nl> ASTContext & Ctx = decl - > getASTContext ( ) ; <nl> void PrintAST : : visitEnumElementDecl ( EnumElementDecl * decl ) { <nl> void PrintAST : : visitSubscriptDecl ( SubscriptDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> printContextIfNeeded ( decl ) ; <nl> recordDeclLoc ( decl , [ & ] { <nl> Printer < < " subscript " ; <nl> void PrintAST : : visitSubscriptDecl ( SubscriptDecl * decl ) { <nl> void PrintAST : : visitConstructorDecl ( ConstructorDecl * decl ) { <nl> printDocumentationComment ( decl ) ; <nl> printAttributes ( decl ) ; <nl> - printAccessibility ( decl ) ; <nl> + printAccess ( decl ) ; <nl> <nl> if ( ( decl - > getInitKind ( ) = = CtorInitializerKind : : Convenience | | <nl> decl - > getInitKind ( ) = = CtorInitializerKind : : ConvenienceFactory ) & & <nl> mmm a / lib / AST / ASTVerifier . cpp <nl> ppp b / lib / AST / ASTVerifier . cpp <nl> class Verifier : public ASTWalker { <nl> if ( D - > hasInterfaceType ( ) ) <nl> verifyChecked ( D - > getInterfaceType ( ) ) ; <nl> <nl> - if ( D - > hasAccessibility ( ) ) { <nl> + if ( D - > hasAccess ( ) ) { <nl> PrettyStackTraceDecl debugStack ( " verifying access " , D ) ; <nl> if ( D - > getFormalAccessScope ( ) . isPublic ( ) & & <nl> D - > getFormalAccess ( ) < Accessibility : : Public ) { <nl> class Verifier : public ASTWalker { <nl> } <nl> <nl> void verifyChecked ( ValueDecl * VD ) { <nl> - if ( ! VD - > hasAccessibility ( ) & & ! VD - > getDeclContext ( ) - > isLocalContext ( ) & & <nl> + if ( ! VD - > hasAccess ( ) & & ! VD - > getDeclContext ( ) - > isLocalContext ( ) & & <nl> ! isa < GenericTypeParamDecl > ( VD ) & & ! isa < ParamDecl > ( VD ) ) { <nl> dumpRef ( VD ) ; <nl> Out < < " does not have accessibility " ; <nl> class Verifier : public ASTWalker { <nl> } <nl> <nl> void verifyChecked ( AbstractStorageDecl * ASD ) { <nl> - if ( ASD - > hasAccessibility ( ) & & ASD - > isSettable ( nullptr ) ) { <nl> - auto setterAccess = ASD - > getSetterAccessibility ( ) ; <nl> + if ( ASD - > hasAccess ( ) & & ASD - > isSettable ( nullptr ) ) { <nl> + auto setterAccess = ASD - > getSetterFormalAccess ( ) ; <nl> if ( ASD - > getSetter ( ) & & <nl> ASD - > getSetter ( ) - > getFormalAccess ( ) ! = setterAccess ) { <nl> Out < < " AbstractStorageDecl ' s setter accessibility is out of sync " <nl> mmm a / lib / AST / Builtins . cpp <nl> ppp b / lib / AST / Builtins . cpp <nl> getBuiltinFunction ( Identifier Id , ArrayRef < Type > argTypes , Type ResType , <nl> TypeLoc : : withoutLoc ( ResType ) , DC ) ; <nl> FD - > setInterfaceType ( FnType ) ; <nl> FD - > setImplicit ( ) ; <nl> - FD - > setAccessibility ( Accessibility : : Public ) ; <nl> + FD - > setAccess ( Accessibility : : Public ) ; <nl> return FD ; <nl> } <nl> <nl> getBuiltinGenericFunction ( Identifier Id , <nl> func - > setInterfaceType ( InterfaceType ) ; <nl> func - > setGenericEnvironment ( Env ) ; <nl> func - > setImplicit ( ) ; <nl> - func - > setAccessibility ( Accessibility : : Public ) ; <nl> + func - > setAccess ( Accessibility : : Public ) ; <nl> <nl> return func ; <nl> } <nl> mmm a / lib / AST / Decl . cpp <nl> ppp b / lib / AST / Decl . cpp <nl> void AbstractStorageDecl : : configureSetRecord ( GetSetRecord * getSetInfo , <nl> <nl> auto setSetterAccess = [ & ] ( FuncDecl * fn ) { <nl> if ( auto setterAccess = GetSetInfo . getInt ( ) ) { <nl> - assert ( ! fn - > hasAccessibility ( ) | | <nl> + assert ( ! fn - > hasAccess ( ) | | <nl> fn - > getFormalAccess ( ) = = setterAccess . getValue ( ) ) ; <nl> - fn - > overwriteAccessibility ( setterAccess . getValue ( ) ) ; <nl> + fn - > overwriteAccess ( setterAccess . getValue ( ) ) ; <nl> } <nl> } ; <nl> <nl> void AbstractStorageDecl : : setComputedSetter ( FuncDecl * Set ) { <nl> GetSetInfo . getPointer ( ) - > Set = Set ; <nl> Set - > makeAccessor ( this , AccessorKind : : IsSetter ) ; <nl> if ( auto setterAccess = GetSetInfo . getInt ( ) ) { <nl> - assert ( ! Set - > hasAccessibility ( ) | | <nl> + assert ( ! Set - > hasAccess ( ) | | <nl> Set - > getFormalAccess ( ) = = setterAccess . getValue ( ) ) ; <nl> - Set - > overwriteAccessibility ( setterAccess . getValue ( ) ) ; <nl> + Set - > overwriteAccess ( setterAccess . getValue ( ) ) ; <nl> } <nl> } <nl> <nl> void AbstractStorageDecl : : setMaterializeForSetFunc ( FuncDecl * accessor ) { <nl> GetSetInfo . getPointer ( ) - > MaterializeForSet = accessor ; <nl> accessor - > makeAccessor ( this , AccessorKind : : IsMaterializeForSet ) ; <nl> if ( auto setterAccess = GetSetInfo . getInt ( ) ) { <nl> - assert ( ! accessor - > hasAccessibility ( ) | | <nl> + assert ( ! accessor - > hasAccess ( ) | | <nl> accessor - > getFormalAccess ( ) = = setterAccess . getValue ( ) ) ; <nl> - accessor - > overwriteAccessibility ( setterAccess . getValue ( ) ) ; <nl> + accessor - > overwriteAccess ( setterAccess . getValue ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / lib / AST / DeclContext . cpp <nl> ppp b / lib / AST / DeclContext . cpp <nl> ResilienceExpansion DeclContext : : getResilienceExpansion ( ) const { <nl> <nl> / / FIXME : Make sure this method is never called on decls that have not <nl> / / been fully validated . <nl> - if ( ! AFD - > hasAccessibility ( ) ) <nl> + if ( ! AFD - > hasAccess ( ) ) <nl> break ; <nl> <nl> / / If the function is not externally visible , we will not be serializing <nl> DeclContext : : isCascadingContextForLookup ( bool functionsAreNonCascading ) const { <nl> if ( functionsAreNonCascading ) <nl> return false ; <nl> auto * AFD = cast < AbstractFunctionDecl > ( this ) ; <nl> - if ( AFD - > hasAccessibility ( ) ) <nl> + if ( AFD - > hasAccess ( ) ) <nl> return AFD - > getFormalAccess ( ) > Accessibility : : FilePrivate ; <nl> break ; <nl> } <nl> <nl> case DeclContextKind : : SubscriptDecl : { <nl> auto * SD = cast < SubscriptDecl > ( this ) ; <nl> - if ( SD - > hasAccessibility ( ) ) <nl> + if ( SD - > hasAccess ( ) ) <nl> return SD - > getFormalAccess ( ) > Accessibility : : FilePrivate ; <nl> break ; <nl> } <nl> DeclContext : : isCascadingContextForLookup ( bool functionsAreNonCascading ) const { <nl> <nl> case DeclContextKind : : GenericTypeDecl : { <nl> auto * nominal = cast < GenericTypeDecl > ( this ) ; <nl> - if ( nominal - > hasAccessibility ( ) ) <nl> + if ( nominal - > hasAccess ( ) ) <nl> return nominal - > getFormalAccess ( ) > Accessibility : : FilePrivate ; <nl> break ; <nl> } <nl> <nl> case DeclContextKind : : ExtensionDecl : { <nl> auto * extension = cast < ExtensionDecl > ( this ) ; <nl> - if ( extension - > hasDefaultAccessibility ( ) ) <nl> - return extension - > getDefaultAccessibility ( ) > Accessibility : : FilePrivate ; <nl> - / / FIXME : duplicated from computeDefaultAccessibility in TypeCheckDecl . cpp . <nl> + if ( extension - > hasDefaultAccessLevel ( ) ) <nl> + return extension - > getDefaultAccessLevel ( ) > Accessibility : : FilePrivate ; <nl> + / / FIXME : duplicated from computeDefaultAccessLevel in TypeCheckDecl . cpp . <nl> if ( auto * AA = extension - > getAttrs ( ) . getAttribute < AccessibilityAttr > ( ) ) <nl> return AA - > getAccess ( ) > Accessibility : : FilePrivate ; <nl> if ( Type extendedTy = extension - > getExtendedType ( ) ) { <nl> bool AccessScope : : isFileScope ( ) const { <nl> return DC & & isa < FileUnit > ( DC ) ; <nl> } <nl> <nl> - Accessibility AccessScope : : accessibilityForDiagnostics ( ) const { <nl> + Accessibility AccessScope : : accessLevelForDiagnostics ( ) const { <nl> if ( isPublic ( ) ) <nl> return Accessibility : : Public ; <nl> if ( isa < ModuleDecl > ( getDeclContext ( ) ) ) <nl> mmm a / lib / AST / LookupVisibleDecls . cpp <nl> ppp b / lib / AST / LookupVisibleDecls . cpp <nl> static bool isDeclVisibleInLookupMode ( ValueDecl * Member , LookupState LS , <nl> LazyResolver * TypeResolver ) { <nl> if ( TypeResolver ) { <nl> TypeResolver - > resolveDeclSignature ( Member ) ; <nl> - TypeResolver - > resolveAccessibility ( Member ) ; <nl> + TypeResolver - > resolveAccessControl ( Member ) ; <nl> } <nl> <nl> / / Check accessibility when relevant . <nl> if ( ! Member - > getDeclContext ( ) - > isLocalContext ( ) & & <nl> ! isa < GenericTypeParamDecl > ( Member ) & & ! isa < ParamDecl > ( Member ) & & <nl> FromContext - > getASTContext ( ) . LangOpts . EnableAccessControl ) { <nl> - if ( Member - > isInvalid ( ) & & ! Member - > hasAccessibility ( ) ) <nl> + if ( Member - > isInvalid ( ) & & ! Member - > hasAccess ( ) ) <nl> return false ; <nl> if ( ! Member - > isAccessibleFrom ( FromContext ) ) <nl> return false ; <nl> class OverrideFilteringConsumer : public VisibleDeclConsumer { <nl> <nl> if ( TypeResolver ) { <nl> TypeResolver - > resolveDeclSignature ( VD ) ; <nl> - TypeResolver - > resolveAccessibility ( VD ) ; <nl> + TypeResolver - > resolveAccessControl ( VD ) ; <nl> } <nl> <nl> if ( VD - > isInvalid ( ) ) { <nl> mmm a / lib / AST / Module . cpp <nl> ppp b / lib / AST / Module . cpp <nl> void BuiltinUnit : : LookupCache : : lookupValue ( <nl> / * genericparams * / nullptr , <nl> const_cast < BuiltinUnit * > ( & M ) ) ; <nl> TAD - > setUnderlyingType ( Ty ) ; <nl> - TAD - > setAccessibility ( Accessibility : : Public ) ; <nl> + TAD - > setAccess ( Accessibility : : Public ) ; <nl> Entry = TAD ; <nl> } <nl> } <nl> ModuleDecl : : ModuleDecl ( Identifier name , ASTContext & ctx ) <nl> ctx . addDestructorCleanup ( * this ) ; <nl> setImplicit ( ) ; <nl> setInterfaceType ( ModuleType : : get ( this ) ) ; <nl> - setAccessibility ( Accessibility : : Public ) ; <nl> + setAccess ( Accessibility : : Public ) ; <nl> } <nl> <nl> void ModuleDecl : : addFile ( FileUnit & newFile ) { <nl> mmm a / lib / AST / ModuleNameLookup . cpp <nl> ppp b / lib / AST / ModuleNameLookup . cpp <nl> static void lookupInModule ( ModuleDecl * module , ModuleDecl : : AccessPathTy accessPa <nl> auto newEndIter = std : : remove_if ( localDecls . begin ( ) , localDecls . end ( ) , <nl> [ = ] ( ValueDecl * VD ) { <nl> if ( typeResolver ) { <nl> - typeResolver - > resolveAccessibility ( VD ) ; <nl> + typeResolver - > resolveAccessControl ( VD ) ; <nl> } <nl> - if ( ! VD - > hasAccessibility ( ) ) <nl> + if ( ! VD - > hasAccess ( ) ) <nl> return false ; <nl> return ! VD - > isAccessibleFrom ( moduleScopeContext ) ; <nl> } ) ; <nl> mmm a / lib / AST / NameLookup . cpp <nl> ppp b / lib / AST / NameLookup . cpp <nl> void AccessFilteringDeclConsumer : : foundDecl ( ValueDecl * D , <nl> DeclVisibilityKind reason ) { <nl> if ( D - > getASTContext ( ) . LangOpts . EnableAccessControl ) { <nl> if ( TypeResolver ) <nl> - TypeResolver - > resolveAccessibility ( D ) ; <nl> - if ( D - > isInvalid ( ) & & ! D - > hasAccessibility ( ) ) <nl> + TypeResolver - > resolveAccessControl ( D ) ; <nl> + if ( D - > isInvalid ( ) & & ! D - > hasAccess ( ) ) <nl> return ; <nl> if ( ! D - > isAccessibleFrom ( DC ) ) <nl> return ; <nl> UnqualifiedLookup : : UnqualifiedLookup ( DeclName Name , DeclContext * DC , <nl> if ( IsTypeLookup ) <nl> options | = NL_OnlyTypes ; <nl> if ( IgnoreAccessControl ) <nl> - options | = NL_IgnoreAccessibility ; <nl> + options | = NL_IgnoreAccessControl ; <nl> <nl> SmallVector < ValueDecl * , 4 > lookup ; <nl> dc - > lookupQualified ( lookupType , Name , options , TypeResolver , lookup ) ; <nl> UnqualifiedLookup : : UnqualifiedLookup ( DeclName Name , DeclContext * DC , <nl> if ( IsTypeLookup ) <nl> options | = NL_OnlyTypes ; <nl> if ( IgnoreAccessControl ) <nl> - options | = NL_IgnoreAccessibility ; <nl> + options | = NL_IgnoreAccessControl ; <nl> <nl> SmallVector < ValueDecl * , 4 > Lookup ; <nl> DC - > lookupQualified ( ExtendedType , Name , options , TypeResolver , Lookup ) ; <nl> void ClassDecl : : recordObjCMethod ( AbstractFunctionDecl * method ) { <nl> vec . push_back ( method ) ; <nl> } <nl> <nl> - static bool checkAccessibility ( const DeclContext * useDC , <nl> - const DeclContext * sourceDC , <nl> - Accessibility access ) { <nl> + static bool checkAccess ( const DeclContext * useDC , const DeclContext * sourceDC , <nl> + Accessibility access ) { <nl> if ( ! useDC ) <nl> return access > = Accessibility : : Public ; <nl> <nl> static bool checkAccessibility ( const DeclContext * useDC , <nl> } <nl> <nl> bool ValueDecl : : isAccessibleFrom ( const DeclContext * DC ) const { <nl> - return checkAccessibility ( DC , getDeclContext ( ) , getFormalAccess ( ) ) ; <nl> + return checkAccess ( DC , getDeclContext ( ) , getFormalAccess ( ) ) ; <nl> } <nl> <nl> bool AbstractStorageDecl : : isSetterAccessibleFrom ( const DeclContext * DC ) const { <nl> bool AbstractStorageDecl : : isSetterAccessibleFrom ( const DeclContext * DC ) const { <nl> if ( hasStorage ( ) & & ! isSettable ( nullptr ) ) <nl> return true ; <nl> <nl> - return checkAccessibility ( DC , getDeclContext ( ) , getSetterAccessibility ( ) ) ; <nl> + return checkAccess ( DC , getDeclContext ( ) , getSetterFormalAccess ( ) ) ; <nl> } <nl> <nl> bool DeclContext : : lookupQualified ( Type type , <nl> bool DeclContext : : lookupQualified ( Type type , <nl> <nl> auto & ctx = getASTContext ( ) ; <nl> if ( ! ctx . LangOpts . EnableAccessControl ) <nl> - options | = NL_IgnoreAccessibility ; <nl> + options | = NL_IgnoreAccessControl ; <nl> <nl> / / The set of nominal type declarations we should ( and have ) visited . <nl> SmallVector < NominalTypeDecl * , 4 > stack ; <nl> bool DeclContext : : lookupQualified ( Type type , <nl> } <nl> <nl> / / Check access . <nl> - if ( ! ( options & NL_IgnoreAccessibility ) ) <nl> + if ( ! ( options & NL_IgnoreAccessControl ) ) <nl> return decl - > isAccessibleFrom ( this ) ; <nl> <nl> return true ; <nl> mmm a / lib / AST / SwiftNameTranslation . cpp <nl> ppp b / lib / AST / SwiftNameTranslation . cpp <nl> isVisibleToObjC ( const ValueDecl * VD , Accessibility minRequiredAccess , <nl> bool checkParent ) { <nl> if ( ! ( VD - > isObjC ( ) | | VD - > getAttrs ( ) . hasAttribute < CDeclAttr > ( ) ) ) <nl> return false ; <nl> - if ( VD - > hasAccessibility ( ) & & VD - > getFormalAccess ( ) > = minRequiredAccess ) { <nl> + if ( VD - > hasAccess ( ) & & VD - > getFormalAccess ( ) > = minRequiredAccess ) { <nl> return true ; <nl> } else if ( checkParent ) { <nl> if ( auto ctor = dyn_cast < ConstructorDecl > ( VD ) ) { <nl> mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> static bool isInSystemModule ( DeclContext * D ) { <nl> return cast < ClangModuleUnit > ( D - > getModuleScopeContext ( ) ) - > isSystemModule ( ) ; <nl> } <nl> <nl> - static Accessibility getOverridableAccessibility ( DeclContext * dc ) { <nl> + static Accessibility getOverridableAccessLevel ( DeclContext * dc ) { <nl> return ( dc - > getAsProtocolOrProtocolExtensionContext ( ) <nl> ? Accessibility : : Public : Accessibility : : Open ) ; <nl> } <nl> static Pattern * createTypedNamedPattern ( VarDecl * decl ) { <nl> static std : : pair < VarDecl * , PatternBindingDecl * > <nl> createVarWithPattern ( ASTContext & cxt , DeclContext * dc , Identifier name , Type ty , <nl> VarDecl : : Specifier specifier , bool isImplicit , <nl> - Accessibility accessibility , <nl> - Accessibility setterAccessibility ) { <nl> + Accessibility access , <nl> + Accessibility setterAccess ) { <nl> / / Create a variable to store the underlying value . <nl> auto var = new ( cxt ) VarDecl ( <nl> / * IsStatic * / false , <nl> createVarWithPattern ( ASTContext & cxt , DeclContext * dc , Identifier name , Type ty , <nl> if ( isImplicit ) <nl> var - > setImplicit ( ) ; <nl> var - > setInterfaceType ( ty ) ; <nl> - var - > setAccessibility ( accessibility ) ; <nl> - var - > setSetterAccessibility ( setterAccessibility ) ; <nl> + var - > setAccess ( access ) ; <nl> + var - > setSetterAccess ( setterAccess ) ; <nl> <nl> / / Create a pattern binding to describe the variable . <nl> Pattern * varPattern = createTypedNamedPattern ( var ) ; <nl> makeEnumRawValueConstructor ( ClangImporter : : Implementation & Impl , <nl> selfDecl , paramPL , <nl> / * GenericParams = * / nullptr , enumDecl ) ; <nl> ctorDecl - > setImplicit ( ) ; <nl> - ctorDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + ctorDecl - > setAccess ( Accessibility : : Public ) ; <nl> <nl> auto optEnumTy = OptionalType : : get ( enumTy ) ; <nl> <nl> static FuncDecl * makeEnumRawValueGetter ( ClangImporter : : Implementation & Impl , <nl> <nl> getterDecl - > setInterfaceType ( type ) ; <nl> <nl> - getterDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + getterDecl - > setAccess ( Accessibility : : Public ) ; <nl> <nl> rawValueDecl - > makeComputed ( SourceLoc ( ) , getterDecl , nullptr , nullptr , <nl> SourceLoc ( ) ) ; <nl> static FuncDecl * makeNewtypeBridgedRawValueGetter ( <nl> <nl> getterDecl - > setInterfaceType ( type ) ; <nl> <nl> - getterDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + getterDecl - > setAccess ( Accessibility : : Public ) ; <nl> <nl> computedVar - > makeComputed ( SourceLoc ( ) , getterDecl , nullptr , nullptr , <nl> SourceLoc ( ) ) ; <nl> static FuncDecl * makeFieldGetterDecl ( ClangImporter : : Implementation & Impl , <nl> / * AccessorKeywordLoc = * / SourceLoc ( ) , <nl> / * GenericParams = * / nullptr , params , <nl> TypeLoc : : withoutLoc ( getterType ) , importedDecl , clangNode ) ; <nl> - getterDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + getterDecl - > setAccess ( Accessibility : : Public ) ; <nl> <nl> auto type = ParameterList : : getFullInterfaceType ( getterType , params , C ) ; <nl> getterDecl - > setInterfaceType ( type ) ; <nl> static FuncDecl * makeFieldSetterDecl ( ClangImporter : : Implementation & Impl , <nl> auto type = ParameterList : : getFullInterfaceType ( voidTy , params , C ) ; <nl> setterDecl - > setInterfaceType ( type ) ; <nl> <nl> - setterDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + setterDecl - > setAccess ( Accessibility : : Public ) ; <nl> setterDecl - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> <nl> - <nl> return setterDecl ; <nl> } <nl> <nl> createDefaultConstructor ( ClangImporter : : Implementation & Impl , <nl> constructor - > setInterfaceType ( allocFnTy ) ; <nl> constructor - > setInitializerInterfaceType ( initFnTy ) ; <nl> <nl> - constructor - > setAccessibility ( Accessibility : : Public ) ; <nl> + constructor - > setAccess ( Accessibility : : Public ) ; <nl> <nl> / / Mark the constructor transparent so that we inline it away completely . <nl> constructor - > getAttrs ( ) . add ( new ( context ) TransparentAttr ( / * implicit * / true ) ) ; <nl> createValueConstructor ( ClangImporter : : Implementation & Impl , <nl> constructor - > setInterfaceType ( allocFnTy ) ; <nl> constructor - > setInitializerInterfaceType ( initFnTy ) ; <nl> <nl> - constructor - > setAccessibility ( Accessibility : : Public ) ; <nl> + constructor - > setAccess ( Accessibility : : Public ) ; <nl> <nl> / / Make the constructor transparent so we inline it away completely . <nl> constructor - > getAttrs ( ) . add ( new ( context ) TransparentAttr ( / * implicit * / true ) ) ; <nl> static void addSynthesizedTypealias ( NominalTypeDecl * nominal , Identifier name , <nl> nullptr , nominal ) ; <nl> typealias - > setUnderlyingType ( underlyingType ) ; <nl> typealias - > setEarlyAttrValidation ( true ) ; <nl> - typealias - > setAccessibility ( Accessibility : : Public ) ; <nl> + typealias - > setAccess ( Accessibility : : Public ) ; <nl> typealias - > setValidationStarted ( ) ; <nl> typealias - > setImplicit ( ) ; <nl> <nl> static void addSynthesizedTypealias ( NominalTypeDecl * nominal , Identifier name , <nl> / / / \ param underlyingType the type of the raw value <nl> / / / \ param synthesizedProtocolAttrs synthesized protocol attributes to add <nl> / / / \ param protocols the protocols to make this struct conform to <nl> - / / / \ param setterAccessibility the accessibility of the raw value ' s setter <nl> + / / / \ param setterAccess the access level of the raw value ' s setter <nl> / / / <nl> / / / This will perform most of the work involved in making a new Swift struct <nl> / / / be backed by a raw value . This will populated derived protocols and <nl> static void makeStructRawValued ( <nl> Type underlyingType , ArrayRef < KnownProtocolKind > synthesizedProtocolAttrs , <nl> ArrayRef < ProtocolDecl * > protocols , <nl> MakeStructRawValuedOptions options = getDefaultMakeStructRawValuedOptions ( ) , <nl> - Accessibility setterAccessibility = Accessibility : : Private ) { <nl> + Accessibility setterAccess = Accessibility : : Private ) { <nl> auto & cxt = Impl . SwiftContext ; <nl> addProtocolsToStruct ( Impl , structDecl , synthesizedProtocolAttrs , protocols ) ; <nl> <nl> static void makeStructRawValued ( <nl> specifier , <nl> options . contains ( MakeStructRawValuedFlags : : IsImplicit ) , <nl> Accessibility : : Public , <nl> - setterAccessibility ) ; <nl> + setterAccess ) ; <nl> <nl> structDecl - > setHasDelayedMembers ( ) ; <nl> <nl> static void makeStructRawValuedWithBridge ( <nl> SourceLoc ( ) , computedVarName , bridgedType , structDecl ) ; <nl> computedVar - > setInterfaceType ( bridgedType ) ; <nl> computedVar - > setImplicit ( ) ; <nl> - computedVar - > setAccessibility ( Accessibility : : Public ) ; <nl> - computedVar - > setSetterAccessibility ( Accessibility : : Private ) ; <nl> + computedVar - > setAccess ( Accessibility : : Public ) ; <nl> + computedVar - > setSetterAccess ( Accessibility : : Private ) ; <nl> <nl> / / Create the getter for the computed value variable . <nl> auto computedVarGetter = makeNewtypeBridgedRawValueGetter ( <nl> static FuncDecl * buildSubscriptGetterDecl ( ClangImporter : : Implementation & Impl , <nl> thunk - > setInterfaceType ( interfaceType ) ; <nl> thunk - > setGenericEnvironment ( dc - > getGenericEnvironmentOfContext ( ) ) ; <nl> <nl> - thunk - > setAccessibility ( getOverridableAccessibility ( dc ) ) ; <nl> + thunk - > setAccess ( getOverridableAccessLevel ( dc ) ) ; <nl> <nl> auto objcAttr = getter - > getAttrs ( ) . getAttribute < ObjCAttr > ( ) ; <nl> assert ( objcAttr ) ; <nl> static FuncDecl * buildSubscriptSetterDecl ( ClangImporter : : Implementation & Impl , <nl> thunk - > setInterfaceType ( interfaceType ) ; <nl> thunk - > setGenericEnvironment ( dc - > getGenericEnvironmentOfContext ( ) ) ; <nl> <nl> - thunk - > setAccessibility ( getOverridableAccessibility ( dc ) ) ; <nl> + thunk - > setAccess ( getOverridableAccessLevel ( dc ) ) ; <nl> <nl> auto objcAttr = setter - > getAttrs ( ) . getAttribute < ObjCAttr > ( ) ; <nl> assert ( objcAttr ) ; <nl> static bool addErrorDomain ( NominalTypeDecl * swiftDecl , <nl> / * IsStatic * / isStatic , VarDecl : : Specifier : : Var , / * IsCaptureList * / false , <nl> SourceLoc ( ) , C . Id_nsErrorDomain , stringTy , swiftDecl ) ; <nl> errorDomainPropertyDecl - > setInterfaceType ( stringTy ) ; <nl> - errorDomainPropertyDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + errorDomainPropertyDecl - > setAccess ( Accessibility : : Public ) ; <nl> <nl> swiftDecl - > addMember ( errorDomainPropertyDecl ) ; <nl> swiftDecl - > addMember ( getterDecl ) ; <nl> static bool addErrorDomain ( NominalTypeDecl * swiftDecl , <nl> <nl> getterDecl - > setImplicit ( ) ; <nl> getterDecl - > setStatic ( isStatic ) ; <nl> - getterDecl - > setAccessibility ( Accessibility : : Public ) ; <nl> + getterDecl - > setAccess ( Accessibility : : Public ) ; <nl> <nl> auto ret = new ( C ) ReturnStmt ( SourceLoc ( ) , domainDeclRef ) ; <nl> getterDecl - > setBody ( <nl> namespace { <nl> <nl> makeStructRawValued ( Impl , structDecl , underlyingType , <nl> { KnownProtocolKind : : RawRepresentable } , protocols , <nl> - options , <nl> - / * setterAccessibility = * / Accessibility : : Public ) ; <nl> + options , / * setterAccess = * / Accessibility : : Public ) ; <nl> <nl> result = structDecl ; <nl> break ; <nl> namespace { <nl> / / Create the wrapper struct . <nl> errorWrapper = new ( C ) StructDecl ( loc , name , loc , None , nullptr , dc ) ; <nl> errorWrapper - > computeType ( ) ; <nl> - errorWrapper - > setAccessibility ( Accessibility : : Public ) ; <nl> + errorWrapper - > setAccess ( Accessibility : : Public ) ; <nl> <nl> / / Add inheritance clause . <nl> TypeLoc inheritedTypes [ 1 ] = { <nl> namespace { <nl> loc , C . Id_nsError , nsErrorType , <nl> errorWrapper ) ; <nl> nsErrorProp - > setImplicit ( ) ; <nl> - nsErrorProp - > setAccessibility ( Accessibility : : Public ) ; <nl> + nsErrorProp - > setAccess ( Accessibility : : Public ) ; <nl> nsErrorProp - > setInterfaceType ( nsErrorType ) ; <nl> <nl> / / Create a pattern binding to describe the variable . <nl> namespace { <nl> SourceLoc ( ) , varName , underlyingType , <nl> enumDecl ) ; <nl> rawValue - > setImplicit ( ) ; <nl> - rawValue - > setAccessibility ( Accessibility : : Public ) ; <nl> - rawValue - > setSetterAccessibility ( Accessibility : : Private ) ; <nl> + rawValue - > setAccess ( Accessibility : : Public ) ; <nl> + rawValue - > setSetterAccess ( Accessibility : : Private ) ; <nl> rawValue - > setInterfaceType ( underlyingType ) ; <nl> <nl> / / Create a pattern binding to describe the variable . <nl> namespace { <nl> result - > setInterfaceType ( type ) ; <nl> <nl> / / Someday , maybe this will need to be ' open ' for C + + virtual methods . <nl> - result - > setAccessibility ( Accessibility : : Public ) ; <nl> + result - > setAccess ( Accessibility : : Public ) ; <nl> finishFuncDecl ( decl , result ) ; <nl> <nl> / / If this is a compatibility stub , mark it as such . <nl> namespace { <nl> / * ThrowsLoc = * / SourceLoc ( ) , / * AccessorKeywordLoc = * / SourceLoc ( ) , <nl> / * GenericParams = * / nullptr , bodyParams , TypeLoc ( ) , dc , decl ) ; <nl> <nl> - result - > setAccessibility ( getOverridableAccessibility ( dc ) ) ; <nl> + result - > setAccess ( getOverridableAccessLevel ( dc ) ) ; <nl> <nl> auto resultTy = type - > castTo < FunctionType > ( ) - > getResult ( ) ; <nl> <nl> namespace { <nl> } <nl> <nl> auto result = Impl . createDeclWithClangNode < VarDecl > ( decl , <nl> - getOverridableAccessibility ( dc ) , <nl> + getOverridableAccessLevel ( dc ) , <nl> / * IsStatic * / decl - > isClassProperty ( ) , VarDecl : : Specifier : : Var , <nl> / * IsCaptureList * / false , Impl . importSourceLoc ( decl - > getLocation ( ) ) , <nl> name , dc - > mapTypeIntoContext ( type ) , dc ) ; <nl> Decl * SwiftDeclConverter : : importGlobalAsMethod ( <nl> result - > setInterfaceType ( interfaceType ) ; <nl> result - > setGenericEnvironment ( dc - > getGenericEnvironmentOfContext ( ) ) ; <nl> <nl> - result - > setAccessibility ( Accessibility : : Public ) ; <nl> + result - > setAccess ( Accessibility : : Public ) ; <nl> if ( selfIsInOut ) <nl> result - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> if ( selfIdx ) { <nl> SwiftDeclConverter : : importSubscript ( Decl * decl , <nl> auto bodyParams = getterThunk - > getParameterList ( 1 ) - > clone ( C ) ; <nl> DeclName name ( C , DeclBaseName : : createSubscript ( ) , { Identifier ( ) } ) ; <nl> auto subscript = Impl . createDeclWithClangNode < SubscriptDecl > ( <nl> - getter - > getClangNode ( ) , getOverridableAccessibility ( dc ) , name , <nl> + getter - > getClangNode ( ) , getOverridableAccessLevel ( dc ) , name , <nl> decl - > getLoc ( ) , bodyParams , decl - > getLoc ( ) , <nl> TypeLoc : : withoutLoc ( elementContextTy ) , dc , <nl> / * GenericParams = * / nullptr ) ; <nl> ClangImporter : : Implementation : : createConstant ( Identifier name , DeclContext * dc , <nl> TypeLoc : : withoutLoc ( type ) , dc ) ; <nl> func - > setStatic ( isStatic ) ; <nl> func - > setInterfaceType ( getterType ) ; <nl> - func - > setAccessibility ( getOverridableAccessibility ( dc ) ) ; <nl> + func - > setAccess ( getOverridableAccessLevel ( dc ) ) ; <nl> func - > setImplicit ( ) ; <nl> <nl> / / If we ' re not done type checking , build the getter body . <nl> mmm a / lib / ClangImporter / ImporterImpl . h <nl> ppp b / lib / ClangImporter / ImporterImpl . h <nl> class LLVM_LIBRARY_VISIBILITY ClangImporter : : Implementation <nl> auto D = : : new ( DeclPtr ) DeclTy ( std : : forward < Targs > ( Args ) . . . ) ; <nl> D - > setClangNode ( ClangN ) ; <nl> D - > setEarlyAttrValidation ( true ) ; <nl> - D - > setAccessibility ( access ) ; <nl> + D - > setAccess ( access ) ; <nl> if ( auto ASD = dyn_cast < AbstractStorageDecl > ( D ) ) <nl> - ASD - > setSetterAccessibility ( access ) ; <nl> + ASD - > setSetterAccess ( access ) ; <nl> / / All imported decls are constructed fully validated . <nl> D - > setValidationStarted ( ) ; <nl> if ( auto AFD = dyn_cast < AbstractFunctionDecl > ( static_cast < Decl * > ( D ) ) ) <nl> mmm a / lib / FrontendTool / FrontendTool . cpp <nl> ppp b / lib / FrontendTool / FrontendTool . cpp <nl> static bool dumpAPI ( ModuleDecl * Mod , StringRef OutDir ) { <nl> PrintOptions PO = PrintOptions : : printInterface ( ) ; <nl> PO . PrintOriginalSourceText = true ; <nl> PO . Indent = 2 ; <nl> - PO . PrintAccessibility = false ; <nl> + PO . PrintAccess = false ; <nl> PO . SkipUnderscoredStdlibProtocols = true ; <nl> SF - > print ( TempOS , PO ) ; <nl> if ( TempOS . str ( ) . trim ( ) . empty ( ) ) <nl> mmm a / lib / FrontendTool / ReferenceDependencies . cpp <nl> ppp b / lib / FrontendTool / ReferenceDependencies . cpp <nl> static void findNominalsAndOperators ( <nl> if ( ! VD ) <nl> continue ; <nl> <nl> - if ( VD - > hasAccessibility ( ) & & <nl> + if ( VD - > hasAccess ( ) & & <nl> VD - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) { <nl> continue ; <nl> } <nl> bool swift : : emitReferenceDependencies ( DiagnosticEngine & diags , <nl> auto * NTD = ED - > getExtendedType ( ) - > getAnyNominal ( ) ; <nl> if ( ! NTD ) <nl> break ; <nl> - if ( NTD - > hasAccessibility ( ) & & <nl> + if ( NTD - > hasAccess ( ) & & <nl> NTD - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) { <nl> break ; <nl> } <nl> bool swift : : emitReferenceDependencies ( DiagnosticEngine & diags , <nl> auto * NTD = cast < NominalTypeDecl > ( D ) ; <nl> if ( ! NTD - > hasName ( ) ) <nl> break ; <nl> - if ( NTD - > hasAccessibility ( ) & & <nl> + if ( NTD - > hasAccess ( ) & & <nl> NTD - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) { <nl> break ; <nl> } <nl> bool swift : : emitReferenceDependencies ( DiagnosticEngine & diags , <nl> auto * VD = cast < ValueDecl > ( D ) ; <nl> if ( ! VD - > hasName ( ) ) <nl> break ; <nl> - if ( VD - > hasAccessibility ( ) & & <nl> + if ( VD - > hasAccess ( ) & & <nl> VD - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) { <nl> break ; <nl> } <nl> bool swift : : emitReferenceDependencies ( DiagnosticEngine & diags , <nl> <nl> for ( auto & entry : sortedMembers ) { <nl> assert ( entry . first . first ! = nullptr ) ; <nl> - if ( entry . first . first - > hasAccessibility ( ) & & <nl> + if ( entry . first . first - > hasAccess ( ) & & <nl> entry . first . first - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) <nl> continue ; <nl> <nl> bool swift : : emitReferenceDependencies ( DiagnosticEngine & diags , <nl> isCascading | = i - > second ; <nl> } <nl> <nl> - if ( i - > first . first - > hasAccessibility ( ) & & <nl> + if ( i - > first . first - > hasAccess ( ) & & <nl> i - > first . first - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) <nl> continue ; <nl> <nl> mmm a / lib / IDE / CodeCompletion . cpp <nl> ppp b / lib / IDE / CodeCompletion . cpp <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> <nl> void addVarDeclRef ( const VarDecl * VD , DeclVisibilityKind Reason ) { <nl> if ( ! VD - > hasName ( ) | | <nl> - ( VD - > hasAccessibility ( ) & & ! VD - > isAccessibleFrom ( CurrDeclContext ) ) | | <nl> + ( VD - > hasAccess ( ) & & ! VD - > isAccessibleFrom ( CurrDeclContext ) ) | | <nl> shouldHideDeclFromCompletionResults ( VD ) ) <nl> return ; <nl> <nl> class CompletionLookup final : public swift : : VisibleDeclConsumer { <nl> DeclVisibilityKind Reason , <nl> bool HasTypeContext ) { <nl> if ( ! EED - > hasName ( ) | | <nl> - ( EED - > hasAccessibility ( ) & & ! EED - > isAccessibleFrom ( CurrDeclContext ) ) | | <nl> + ( EED - > hasAccess ( ) & & ! EED - > isAccessibleFrom ( CurrDeclContext ) ) | | <nl> shouldHideDeclFromCompletionResults ( EED ) ) <nl> return ; <nl> <nl> mmm a / lib / IDE / CommentConversion . cpp <nl> ppp b / lib / IDE / CommentConversion . cpp <nl> void CommentToXMLConverter : : visitDocComment ( const DocComment * DC ) { <nl> <nl> { <nl> PrintOptions PO = PrintOptions : : printInterface ( ) ; <nl> - PO . PrintAccessibility = false ; <nl> - PO . AccessibilityFilter = Accessibility : : Private ; <nl> + PO . PrintAccess = false ; <nl> + PO . AccessFilter = Accessibility : : Private ; <nl> PO . PrintDocumentationComments = false ; <nl> PO . TypeDefinitions = false ; <nl> PO . VarInitializers = false ; <nl> mmm a / lib / IDE / IDETypeChecking . cpp <nl> ppp b / lib / IDE / IDETypeChecking . cpp <nl> PrintOptions PrintOptions : : printTypeInterface ( Type T ) { <nl> <nl> PrintOptions PrintOptions : : printDocInterface ( ) { <nl> PrintOptions result = PrintOptions : : printModuleInterface ( ) ; <nl> - result . PrintAccessibility = false ; <nl> + result . PrintAccess = false ; <nl> result . SkipUnavailable = false ; <nl> result . ExcludeAttrList . push_back ( DAK_Available ) ; <nl> result . ArgAndParamPrinting = <nl> - PrintOptions : : ArgAndParamPrintingMode : : BothAlways ; <nl> + PrintOptions : : ArgAndParamPrintingMode : : BothAlways ; <nl> result . PrintDocumentationComments = false ; <nl> result . PrintRegularClangComments = false ; <nl> - result . PrintAccessibility = false ; <nl> result . PrintFunctionRepresentationAttrs = false ; <nl> return result ; <nl> } <nl> mmm a / lib / IDE / ModuleInterfacePrinting . cpp <nl> ppp b / lib / IDE / ModuleInterfacePrinting . cpp <nl> void swift : : ide : : printSubmoduleInterface ( <nl> <nl> / / Skip declarations that are not accessible . <nl> if ( auto * VD = dyn_cast < ValueDecl > ( D ) ) { <nl> - if ( Options . AccessibilityFilter > Accessibility : : Private & & <nl> - VD - > hasAccessibility ( ) & & <nl> - VD - > getFormalAccess ( ) < Options . AccessibilityFilter ) <nl> + if ( Options . AccessFilter > Accessibility : : Private & & <nl> + VD - > hasAccess ( ) & & <nl> + VD - > getFormalAccess ( ) < Options . AccessFilter ) <nl> continue ; <nl> } <nl> <nl> mmm a / lib / IDE / Refactoring . cpp <nl> ppp b / lib / IDE / Refactoring . cpp <nl> swift : : ide : : collectRenameAvailabilityInfo ( const ValueDecl * VD , <nl> AvailKind = RenameAvailableKind : : Unavailable_has_no_location ; <nl> } else if ( ! VD - > hasName ( ) ) { <nl> AvailKind = RenameAvailableKind : : Unavailable_has_no_name ; <nl> - } else if ( ! VD - > hasAccessibility ( ) ) { <nl> + } else if ( ! VD - > hasAccess ( ) ) { <nl> return llvm : : makeArrayRef ( Scratch ) ; <nl> } <nl> <nl> mmm a / lib / IRGen / GenClass . cpp <nl> ppp b / lib / IRGen / GenClass . cpp <nl> ClassDecl * IRGenModule : : getObjCRuntimeBaseClass ( Identifier name , <nl> SwiftRootClass - > getAttrs ( ) . add ( ObjCAttr : : createNullary ( Context , objcName , <nl> / * isNameImplicit = * / true ) ) ; <nl> SwiftRootClass - > setImplicit ( ) ; <nl> - SwiftRootClass - > setAccessibility ( Accessibility : : Open ) ; <nl> + SwiftRootClass - > setAccess ( Accessibility : : Open ) ; <nl> <nl> SwiftRootClasses . insert ( { name , SwiftRootClass } ) ; <nl> return SwiftRootClass ; <nl> mmm a / lib / PrintAsObjC / PrintAsObjC . cpp <nl> ppp b / lib / PrintAsObjC / PrintAsObjC . cpp <nl> class ObjCPrinter : private DeclVisitor < ObjCPrinter > , <nl> ASTContext & ctx = M . getASTContext ( ) ; <nl> bool isSettable = VD - > isSettable ( nullptr ) ; <nl> if ( isSettable & & ctx . LangOpts . EnableAccessControl ) <nl> - isSettable = ( VD - > getSetterAccessibility ( ) > = minRequiredAccess ) ; <nl> + isSettable = ( VD - > getSetterFormalAccess ( ) > = minRequiredAccess ) ; <nl> if ( ! isSettable ) <nl> os < < " , readonly " ; <nl> <nl> mmm a / lib / SIL / SIL . cpp <nl> ppp b / lib / SIL / SIL . cpp <nl> FormalLinkage swift : : getDeclLinkage ( const ValueDecl * D ) { <nl> if ( isa < ClangModuleUnit > ( fileContext ) ) <nl> return FormalLinkage : : PublicNonUnique ; <nl> <nl> - if ( ! D - > hasAccessibility ( ) ) { <nl> + if ( ! D - > hasAccess ( ) ) { <nl> assert ( D - > getDeclContext ( ) - > isLocalContext ( ) ) ; <nl> return FormalLinkage : : Private ; <nl> } <nl> mmm a / lib / SILOptimizer / IPO / LetPropertiesOpts . cpp <nl> ppp b / lib / SILOptimizer / IPO / LetPropertiesOpts . cpp <nl> static bool isAssignableExternally ( VarDecl * Property , SILModule * Module ) { <nl> / / it is a whole module compilation . In this case , no external initializer <nl> / / may exist . <nl> for ( auto SP : Ty - > getStoredProperties ( ) ) { <nl> - auto storedPropertyAccessibility = SP - > getEffectiveAccess ( ) ; <nl> - if ( storedPropertyAccessibility < = Accessibility : : FilePrivate | | <nl> - ( storedPropertyAccessibility < = Accessibility : : Internal & & <nl> + auto storedPropertyAccess = SP - > getEffectiveAccess ( ) ; <nl> + if ( storedPropertyAccess < = Accessibility : : FilePrivate | | <nl> + ( storedPropertyAccess < = Accessibility : : Internal & & <nl> Module - > isWholeModule ( ) ) ) { <nl> DEBUG ( llvm : : dbgs ( ) < < " Property " < < * Property <nl> < < " cannot be set externally \ n " ) ; <nl> mmm a / lib / SILOptimizer / Transforms / SpeculativeDevirtualizer . cpp <nl> ppp b / lib / SILOptimizer / Transforms / SpeculativeDevirtualizer . cpp <nl> static bool isDefaultCaseKnown ( ClassHierarchyAnalysis * CHA , <nl> if ( ! CD - > isChildContextOf ( DC ) ) <nl> return false ; <nl> <nl> - if ( ! CD - > hasAccessibility ( ) ) <nl> + if ( ! CD - > hasAccess ( ) ) <nl> return false ; <nl> <nl> / / Only consider ' private ' members , unless we are in whole - module compilation . <nl> mmm a / lib / SILOptimizer / Utils / Devirtualize . cpp <nl> ppp b / lib / SILOptimizer / Utils / Devirtualize . cpp <nl> static bool isKnownFinalClass ( ClassDecl * CD , SILModule & M , <nl> if ( ! CD - > isChildContextOf ( DC ) ) <nl> return false ; <nl> <nl> - if ( ! CD - > hasAccessibility ( ) ) <nl> + if ( ! CD - > hasAccess ( ) ) <nl> return false ; <nl> <nl> / / Only consider ' private ' members , unless we are in whole - module compilation . <nl> mmm a / lib / SILOptimizer / Utils / Local . cpp <nl> ppp b / lib / SILOptimizer / Utils / Local . cpp <nl> bool swift : : calleesAreStaticallyKnowable ( SILModule & M , SILDeclRef Decl ) { <nl> if ( AFD - > isDynamic ( ) ) <nl> return false ; <nl> <nl> - if ( ! AFD - > hasAccessibility ( ) ) <nl> + if ( ! AFD - > hasAccess ( ) ) <nl> return false ; <nl> <nl> / / Only consider ' private ' members , unless we are in whole - module compilation . <nl> mmm a / lib / Sema / CSDiag . cpp <nl> ppp b / lib / Sema / CSDiag . cpp <nl> void CalleeCandidateInfo : : filterList ( ClosenessPredicate predicate ) { <nl> <nl> / / Likewise , if the candidate is inaccessible from the scope it is being <nl> / / accessed from , mark it as inaccessible or a general mismatch . <nl> - if ( VD - > hasAccessibility ( ) & & ! VD - > isAccessibleFrom ( CS . DC ) ) { <nl> + if ( VD - > hasAccess ( ) & & ! VD - > isAccessibleFrom ( CS . DC ) ) { <nl> / / If this was an exact match , downgrade it to inaccessible , so that <nl> / / accessible decls that are also an exact match will take precedence . <nl> / / Otherwise consider it to be a general mismatch so we only list it in <nl> void CalleeCandidateInfo : : collectCalleeCandidates ( Expr * fn , <nl> / / TODO : figure out right value for isKnownPrivate <nl> if ( instanceType - > mayHaveMembers ( ) ) { <nl> auto ctors = CS . TC . lookupConstructors ( <nl> - CS . DC , instanceType , NameLookupFlags : : IgnoreAccessibility ) ; <nl> + CS . DC , instanceType , NameLookupFlags : : IgnoreAccessControl ) ; <nl> for ( auto ctor : ctors ) <nl> if ( ctor . getValueDecl ( ) - > hasInterfaceType ( ) ) <nl> candidates . push_back ( { ctor . getValueDecl ( ) , 1 } ) ; <nl> mmm a / lib / Sema / CSSimplify . cpp <nl> ppp b / lib / Sema / CSSimplify . cpp <nl> performMemberLookup ( ConstraintKind constraintKind , DeclName memberName , <nl> <nl> / / Ignore accessibility so we get candidates that might have been missed <nl> / / before . <nl> - lookupOptions | = NameLookupFlags : : IgnoreAccessibility ; <nl> + lookupOptions | = NameLookupFlags : : IgnoreAccessControl ; <nl> / / This is only used for diagnostics , so always use KnownPrivate . <nl> lookupOptions | = NameLookupFlags : : KnownPrivate ; <nl> <nl> mmm a / lib / Sema / CodeSynthesis . cpp <nl> ppp b / lib / Sema / CodeSynthesis . cpp <nl> void TypeChecker : : completePropertyBehaviorStorage ( VarDecl * VD , <nl> if ( VD - > getDeclContext ( ) - > getAsClassOrClassExtensionContext ( ) ) <nl> makeFinal ( Context , Storage ) ; <nl> Storage - > setImplicit ( ) ; <nl> - Storage - > setAccessibility ( Accessibility : : Private ) ; <nl> - Storage - > setSetterAccessibility ( Accessibility : : Private ) ; <nl> + Storage - > setAccess ( Accessibility : : Private ) ; <nl> + Storage - > setSetterAccess ( Accessibility : : Private ) ; <nl> <nl> addMemberToContextIfNeeded ( Storage , DC ) ; <nl> <nl> void TypeChecker : : completePropertyBehaviorParameter ( VarDecl * VD , <nl> if ( DC - > getAsClassOrClassExtensionContext ( ) ) <nl> makeFinal ( Context , Parameter ) ; <nl> Parameter - > setImplicit ( ) ; <nl> - Parameter - > setAccessibility ( Accessibility : : Private ) ; <nl> + Parameter - > setAccess ( Accessibility : : Private ) ; <nl> <nl> / / Recontextualize any closure declcontexts nested in the initializer to <nl> / / realize that they are in the parameter function . <nl> void TypeChecker : : completeLazyVarImplementation ( VarDecl * VD ) { <nl> if ( VD - > getDeclContext ( ) - > getAsClassOrClassExtensionContext ( ) ) <nl> makeFinal ( Context , Storage ) ; <nl> Storage - > setImplicit ( ) ; <nl> - Storage - > setAccessibility ( Accessibility : : Private ) ; <nl> - Storage - > setSetterAccessibility ( Accessibility : : Private ) ; <nl> + Storage - > setAccess ( Accessibility : : Private ) ; <nl> + Storage - > setSetterAccess ( Accessibility : : Private ) ; <nl> } <nl> <nl> / / / Consider add a materializeForSet accessor to the given storage <nl> void swift : : maybeAddAccessorsToVariable ( VarDecl * var , TypeChecker & TC ) { <nl> if ( mightBeMutating & & valueProp - > getGetter ( ) - > isMutating ( ) ) <nl> getter - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> <nl> - getter - > setAccessibility ( var - > getFormalAccess ( ) ) ; <nl> - <nl> + getter - > setAccess ( var - > getFormalAccess ( ) ) ; <nl> + <nl> / / Make a setter if the behavior property has one . <nl> if ( auto valueSetter = valueProp - > getSetter ( ) ) { <nl> ParamDecl * newValueParam = nullptr ; <nl> void swift : : maybeAddAccessorsToVariable ( VarDecl * var , TypeChecker & TC ) { <nl> if ( mightBeMutating & & valueSetter - > isMutating ( ) ) <nl> setter - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> / / TODO : max of property and implementation setter visibility ? <nl> - setter - > setAccessibility ( var - > getFormalAccess ( ) ) ; <nl> + setter - > setAccess ( var - > getFormalAccess ( ) ) ; <nl> } <nl> } else { <nl> / / Even if we couldn ' t find a value property , still make up a stub <nl> / / getter and setter , so that subsequent diagnostics make sense for a <nl> / / computed - ish property . <nl> getter = createGetterPrototype ( var , TC ) ; <nl> - getter - > setAccessibility ( var - > getFormalAccess ( ) ) ; <nl> + getter - > setAccess ( var - > getFormalAccess ( ) ) ; <nl> ParamDecl * newValueParam = nullptr ; <nl> setter = createSetterPrototype ( var , newValueParam , TC ) ; <nl> setter - > setSelfAccessKind ( SelfAccessKind : : NonMutating ) ; <nl> - setter - > setAccessibility ( var - > getFormalAccess ( ) ) ; <nl> + setter - > setAccess ( var - > getFormalAccess ( ) ) ; <nl> } <nl> <nl> var - > makeComputed ( SourceLoc ( ) , getter , setter , nullptr , SourceLoc ( ) ) ; <nl> void swift : : maybeAddAccessorsToVariable ( VarDecl * var , TypeChecker & TC ) { <nl> / / lazy getters are mutating on an enclosing value type . <nl> if ( ! dc - > getAsClassOrClassExtensionContext ( ) ) <nl> getter - > setSelfAccessKind ( SelfAccessKind : : Mutating ) ; <nl> - getter - > setAccessibility ( var - > getFormalAccess ( ) ) ; <nl> + getter - > setAccess ( var - > getFormalAccess ( ) ) ; <nl> <nl> ParamDecl * newValueParam = nullptr ; <nl> auto * setter = createSetterPrototype ( var , newValueParam , TC ) ; <nl> ConstructorDecl * swift : : createImplicitConstructor ( TypeChecker & tc , <nl> <nl> / / Mark implicit . <nl> ctor - > setImplicit ( ) ; <nl> - ctor - > setAccessibility ( accessLevel ) ; <nl> + ctor - > setAccess ( accessLevel ) ; <nl> <nl> if ( ICK = = ImplicitConstructorKind : : Memberwise ) <nl> ctor - > setIsMemberwiseInitializer ( ) ; <nl> swift : : createDesignatedInitOverride ( TypeChecker & tc , <nl> Accessibility access = classDecl - > getFormalAccess ( ) ; <nl> access = std : : max ( access , Accessibility : : Internal ) ; <nl> access = std : : min ( access , superclassCtor - > getFormalAccess ( ) ) ; <nl> - ctor - > setAccessibility ( access ) ; <nl> + ctor - > setAccess ( access ) ; <nl> <nl> / / Make sure the constructor is only as available as its superclass ' s <nl> / / constructor . <nl> mmm a / lib / Sema / DerivedConformanceCodable . cpp <nl> ppp b / lib / Sema / DerivedConformanceCodable . cpp <nl> static EnumDecl * synthesizeCodingKeysEnum ( TypeChecker & tc , <nl> auto * enumDecl = new ( C ) EnumDecl ( SourceLoc ( ) , C . Id_CodingKeys , SourceLoc ( ) , <nl> inherited , nullptr , target ) ; <nl> enumDecl - > setImplicit ( ) ; <nl> - enumDecl - > setAccessibility ( Accessibility : : Private ) ; <nl> + enumDecl - > setAccess ( Accessibility : : Private ) ; <nl> <nl> / / For classes which inherit from something Encodable or Decodable , we <nl> / / provide case ` super ` as the first key ( to be used in encoding super ) . <nl> static FuncDecl * deriveEncodable_encode ( TypeChecker & tc , Decl * parentDecl , <nl> } <nl> <nl> encodeDecl - > setInterfaceType ( interfaceType ) ; <nl> - encodeDecl - > setAccessibility ( std : : max ( target - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + encodeDecl - > setAccess ( std : : max ( target - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> <nl> / / If the type was not imported , the derived conformance is either from the <nl> / / type itself or an extension , in which case we will emit the declaration <nl> static ValueDecl * deriveDecodable_init ( TypeChecker & tc , Decl * parentDecl , <nl> <nl> initDecl - > setInterfaceType ( interfaceType ) ; <nl> initDecl - > setInitializerInterfaceType ( initializerType ) ; <nl> - initDecl - > setAccessibility ( std : : max ( target - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + initDecl - > setAccess ( std : : max ( target - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> <nl> / / If the type was not imported , the derived conformance is either from the <nl> / / type itself or an extension , in which case we will emit the declaration <nl> static bool canSynthesize ( TypeChecker & tc , NominalTypeDecl * target , <nl> auto accessScope = initializer - > getFormalAccessScope ( target ) ; <nl> tc . diagnose ( initializer , diag : : decodable_inaccessible_super_init_here , <nl> requirement - > getFullName ( ) , memberName , <nl> - accessScope . accessibilityForDiagnostics ( ) ) ; <nl> + accessScope . accessLevelForDiagnostics ( ) ) ; <nl> return false ; <nl> } else if ( initializer - > getFailability ( ) ! = OTK_None ) { <nl> / / We can ' t call super . init ( ) if it ' s failable , since init ( from : ) <nl> mmm a / lib / Sema / DerivedConformanceCodingKey . cpp <nl> ppp b / lib / Sema / DerivedConformanceCodingKey . cpp <nl> static ValueDecl * deriveInitDecl ( TypeChecker & tc , Decl * parentDecl , <nl> } <nl> initDecl - > setInterfaceType ( allocIfaceType ) ; <nl> initDecl - > setInitializerInterfaceType ( initIfaceType ) ; <nl> - initDecl - > setAccessibility ( std : : max ( Accessibility : : Internal , <nl> - enumDecl - > getFormalAccess ( ) ) ) ; <nl> + initDecl - > setAccess ( std : : max ( Accessibility : : Internal , <nl> + enumDecl - > getFormalAccess ( ) ) ) ; <nl> <nl> / / If the enum was not imported , the derived conformance is either from the <nl> / / enum itself or an extension , in which case we will emit the declaration <nl> mmm a / lib / Sema / DerivedConformanceEquatableHashable . cpp <nl> ppp b / lib / Sema / DerivedConformanceEquatableHashable . cpp <nl> deriveEquatable_enum_eq ( TypeChecker & tc , Decl * parentDecl , EnumDecl * enumDecl ) { <nl> <nl> / / Since we can ' t insert the = = operator into the same FileUnit as the enum , <nl> / / itself , we have to give it at least internal access . <nl> - eqDecl - > setAccessibility ( std : : max ( enumDecl - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + eqDecl - > setAccess ( std : : max ( enumDecl - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> <nl> / / If the enum was not imported , the derived conformance is either from the <nl> / / enum itself or an extension , in which case we will emit the declaration <nl> deriveHashable_enum_hashValue ( TypeChecker & tc , Decl * parentDecl , <nl> AnyFunctionType : : ExtInfo ( ) ) ; <nl> <nl> getterDecl - > setInterfaceType ( interfaceType ) ; <nl> - getterDecl - > setAccessibility ( std : : max ( Accessibility : : Internal , <nl> - enumDecl - > getFormalAccess ( ) ) ) ; <nl> + getterDecl - > setAccess ( std : : max ( Accessibility : : Internal , <nl> + enumDecl - > getFormalAccess ( ) ) ) ; <nl> <nl> / / If the enum was not imported , the derived conformance is either from the <nl> / / enum itself or an extension , in which case we will emit the declaration <nl> deriveHashable_enum_hashValue ( TypeChecker & tc , Decl * parentDecl , <nl> hashValueDecl - > setInterfaceType ( intType ) ; <nl> hashValueDecl - > makeComputed ( SourceLoc ( ) , getterDecl , <nl> nullptr , nullptr , SourceLoc ( ) ) ; <nl> - hashValueDecl - > setAccessibility ( getterDecl - > getFormalAccess ( ) ) ; <nl> + hashValueDecl - > setAccess ( getterDecl - > getFormalAccess ( ) ) ; <nl> <nl> Pattern * hashValuePat = new ( C ) NamedPattern ( hashValueDecl , / * implicit * / true ) ; <nl> hashValuePat - > setType ( intType ) ; <nl> mmm a / lib / Sema / DerivedConformanceRawRepresentable . cpp <nl> ppp b / lib / Sema / DerivedConformanceRawRepresentable . cpp <nl> static ConstructorDecl * deriveRawRepresentable_init ( TypeChecker & tc , <nl> } <nl> initDecl - > setInterfaceType ( allocIfaceType ) ; <nl> initDecl - > setInitializerInterfaceType ( initIfaceType ) ; <nl> - initDecl - > setAccessibility ( std : : max ( Accessibility : : Internal , <nl> - enumDecl - > getFormalAccess ( ) ) ) ; <nl> + initDecl - > setAccess ( std : : max ( Accessibility : : Internal , <nl> + enumDecl - > getFormalAccess ( ) ) ) ; <nl> <nl> / / If the enum was not imported , the derived conformance is either from the <nl> / / enum itself or an extension , in which case we will emit the declaration <nl> mmm a / lib / Sema / DerivedConformances . cpp <nl> ppp b / lib / Sema / DerivedConformances . cpp <nl> FuncDecl * DerivedConformance : : declareDerivedPropertyGetter ( TypeChecker & tc , <nl> interfaceType = FunctionType : : get ( { selfParam } , interfaceType , <nl> FunctionType : : ExtInfo ( ) ) ; <nl> getterDecl - > setInterfaceType ( interfaceType ) ; <nl> - getterDecl - > setAccessibility ( std : : max ( typeDecl - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + getterDecl - > setAccess ( std : : max ( typeDecl - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> <nl> / / If the enum was not imported , the derived conformance is either from the <nl> / / enum itself or an extension , in which case we will emit the declaration <nl> DerivedConformance : : declareDerivedReadOnlyProperty ( TypeChecker & tc , <nl> propDecl - > setImplicit ( ) ; <nl> propDecl - > makeComputed ( SourceLoc ( ) , getterDecl , nullptr , nullptr , <nl> SourceLoc ( ) ) ; <nl> - propDecl - > setAccessibility ( getterDecl - > getFormalAccess ( ) ) ; <nl> + propDecl - > setAccess ( getterDecl - > getFormalAccess ( ) ) ; <nl> propDecl - > setInterfaceType ( propertyInterfaceType ) ; <nl> <nl> / / If this is supposed to be a final property , mark it as such . <nl> mmm a / lib / Sema / MiscDiagnostics . cpp <nl> ppp b / lib / Sema / MiscDiagnostics . cpp <nl> void swift : : performStmtDiagnostics ( TypeChecker & TC , const Stmt * S ) { <nl> / / Utility functions <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - void swift : : fixItAccessibility ( InFlightDiagnostic & diag , ValueDecl * VD , <nl> - Accessibility desiredAccess , bool isForSetter ) { <nl> + void swift : : fixItAccess ( InFlightDiagnostic & diag , ValueDecl * VD , <nl> + Accessibility desiredAccess , bool isForSetter ) { <nl> StringRef fixItString ; <nl> switch ( desiredAccess ) { <nl> case Accessibility : : Private : fixItString = " private " ; break ; <nl> void swift : : fixItAccessibility ( InFlightDiagnostic & diag , ValueDecl * VD , <nl> AbstractAccessibilityAttr * attr ; <nl> if ( isForSetter ) { <nl> attr = attrs . getAttribute < SetterAccessibilityAttr > ( ) ; <nl> - cast < AbstractStorageDecl > ( VD ) - > overwriteSetterAccessibility ( desiredAccess ) ; <nl> + cast < AbstractStorageDecl > ( VD ) - > overwriteSetterAccess ( desiredAccess ) ; <nl> } else { <nl> attr = attrs . getAttribute < AccessibilityAttr > ( ) ; <nl> - VD - > overwriteAccessibility ( desiredAccess ) ; <nl> + VD - > overwriteAccess ( desiredAccess ) ; <nl> <nl> if ( auto * ASD = dyn_cast < AbstractStorageDecl > ( VD ) ) { <nl> if ( auto * getter = ASD - > getGetter ( ) ) <nl> - getter - > overwriteAccessibility ( desiredAccess ) ; <nl> + getter - > overwriteAccess ( desiredAccess ) ; <nl> <nl> if ( auto * setterAttr = attrs . getAttribute < SetterAccessibilityAttr > ( ) ) { <nl> if ( setterAttr - > getAccess ( ) > desiredAccess ) <nl> - fixItAccessibility ( diag , VD , desiredAccess , true ) ; <nl> + fixItAccess ( diag , VD , desiredAccess , true ) ; <nl> } else { <nl> - ASD - > overwriteSetterAccessibility ( desiredAccess ) ; <nl> + ASD - > overwriteSetterAccess ( desiredAccess ) ; <nl> } <nl> } <nl> } <nl> mmm a / lib / Sema / MiscDiagnostics . h <nl> ppp b / lib / Sema / MiscDiagnostics . h <nl> void performTopLevelDeclDiagnostics ( TypeChecker & TC , TopLevelCodeDecl * TLCD ) ; <nl> / / / Emit a fix - it to set the accessibility of \ p VD to \ p desiredAccess . <nl> / / / <nl> / / / This actually updates \ p VD as well . <nl> - void fixItAccessibility ( InFlightDiagnostic & diag , ValueDecl * VD , <nl> - Accessibility desiredAccess , bool isForSetter = false ) ; <nl> + void fixItAccess ( InFlightDiagnostic & diag , ValueDecl * VD , <nl> + Accessibility desiredAccess , bool isForSetter = false ) ; <nl> <nl> / / / Emit fix - its to correct the argument labels in \ p expr , which is the <nl> / / / argument tuple or single argument of a call . <nl> mmm a / lib / Sema / ResilienceDiagnostics . cpp <nl> ppp b / lib / Sema / ResilienceDiagnostics . cpp <nl> bool TypeChecker : : diagnoseInlineableDeclRef ( SourceLoc loc , <nl> <nl> diagnose ( loc , diag : : resilience_decl_unavailable , <nl> D - > getDescriptiveKind ( ) , D - > getFullName ( ) , <nl> - D - > getFormalAccessScope ( ) . accessibilityForDiagnostics ( ) , <nl> + D - > getFormalAccessScope ( ) . accessLevelForDiagnostics ( ) , <nl> getFragileFunctionKind ( DC ) ) ; <nl> <nl> bool isDefaultArgument = false ; <nl> mmm a / lib / Sema / TypeCheckAttr . cpp <nl> ppp b / lib / Sema / TypeCheckAttr . cpp <nl> void AttributeChecker : : visitAccessibilityAttr ( AccessibilityAttr * attr ) { <nl> } <nl> <nl> } else if ( auto extension = dyn_cast < ExtensionDecl > ( D - > getDeclContext ( ) ) ) { <nl> - TC . computeDefaultAccessibility ( extension ) ; <nl> - Accessibility maxAccess = extension - > getMaxAccessibility ( ) ; <nl> + TC . computeDefaultAccessLevel ( extension ) ; <nl> + Accessibility maxAccess = extension - > getMaxAccessLevel ( ) ; <nl> if ( std : : min ( attr - > getAccess ( ) , Accessibility : : Public ) > maxAccess ) { <nl> / / FIXME : It would be nice to say what part of the requirements actually <nl> / / end up being problematic . <nl> void AttributeChecker : : visitAccessibilityAttr ( AccessibilityAttr * attr ) { <nl> attr - > getAccess ( ) , <nl> D - > getDescriptiveKind ( ) , <nl> maxAccess ) ; <nl> - swift : : fixItAccessibility ( diag , cast < ValueDecl > ( D ) , maxAccess ) ; <nl> + swift : : fixItAccess ( diag , cast < ValueDecl > ( D ) , maxAccess ) ; <nl> return ; <nl> } <nl> <nl> void AttributeChecker : : visitAccessibilityAttr ( AccessibilityAttr * attr ) { <nl> attr - > getAccess ( ) , <nl> D - > getDescriptiveKind ( ) , <nl> extAttr - > getAccess ( ) ) ; <nl> - swift : : fixItAccessibility ( diag , cast < ValueDecl > ( D ) , extAttr - > getAccess ( ) ) ; <nl> + swift : : fixItAccess ( diag , cast < ValueDecl > ( D ) , extAttr - > getAccess ( ) ) ; <nl> return ; <nl> } <nl> } <nl> mmm a / lib / Sema / TypeCheckConstraints . cpp <nl> ppp b / lib / Sema / TypeCheckConstraints . cpp <nl> resolveDeclRefExpr ( UnresolvedDeclRefExpr * UDRE , DeclContext * DC ) { <nl> / / Try ignoring access control . <nl> NameLookupOptions relookupOptions = lookupOptions ; <nl> relookupOptions | = NameLookupFlags : : KnownPrivate ; <nl> - relookupOptions | = NameLookupFlags : : IgnoreAccessibility ; <nl> + relookupOptions | = NameLookupFlags : : IgnoreAccessControl ; <nl> LookupResult inaccessibleResults = lookupUnqualified ( DC , Name , Loc , <nl> relookupOptions ) ; <nl> if ( inaccessibleResults ) { <nl> mmm a / lib / Sema / TypeCheckDecl . cpp <nl> ppp b / lib / Sema / TypeCheckDecl . cpp <nl> static void checkRedeclaration ( TypeChecker & tc , ValueDecl * current ) { <nl> <nl> ReferencedNameTracker * tracker = currentFile - > getReferencedNameTracker ( ) ; <nl> bool isCascading = true ; <nl> - if ( current - > hasAccessibility ( ) ) <nl> + if ( current - > hasAccess ( ) ) <nl> isCascading = ( current - > getFormalAccess ( ) > Accessibility : : FilePrivate ) ; <nl> <nl> / / Find other potential definitions . <nl> class AccessScopeChecker { <nl> <nl> / / FIXME : Figure out why AssociatedTypeDecls don ' t always have <nl> / / accessibility here . <nl> - if ( ! VD - > hasAccessibility ( ) ) { <nl> + if ( ! VD - > hasAccess ( ) ) { <nl> if ( isa < AssociatedTypeDecl > ( VD ) ) <nl> return true ; <nl> } <nl> class TypeAccessScopeChecker : private TypeWalker , AccessScopeChecker { <nl> } / / end anonymous namespace <nl> <nl> <nl> - void TypeChecker : : computeDefaultAccessibility ( ExtensionDecl * ED ) { <nl> - if ( ED - > hasDefaultAccessibility ( ) ) <nl> + void TypeChecker : : computeDefaultAccessLevel ( ExtensionDecl * ED ) { <nl> + if ( ED - > hasDefaultAccessLevel ( ) ) <nl> return ; <nl> <nl> validateExtension ( ED ) ; <nl> <nl> - if ( ED - > hasDefaultAccessibility ( ) ) <nl> + if ( ED - > hasDefaultAccessLevel ( ) ) <nl> return ; <nl> <nl> Accessibility maxAccess = Accessibility : : Public ; <nl> void TypeChecker : : computeDefaultAccessibility ( ExtensionDecl * ED ) { <nl> ! ED - > getExtendedType ( ) - > hasError ( ) ) { <nl> if ( NominalTypeDecl * nominal = ED - > getExtendedType ( ) - > getAnyNominal ( ) ) { <nl> validateDeclForNameLookup ( nominal ) ; <nl> - if ( ED - > hasDefaultAccessibility ( ) ) <nl> + if ( ED - > hasDefaultAccessLevel ( ) ) <nl> return ; <nl> maxAccess = std : : max ( nominal - > getFormalAccess ( ) , <nl> Accessibility : : FilePrivate ) ; <nl> void TypeChecker : : computeDefaultAccessibility ( ExtensionDecl * ED ) { <nl> else <nl> maxAccess = Accessibility : : Public ; <nl> <nl> - ED - > setDefaultAndMaxAccessibility ( defaultAccess , maxAccess ) ; <nl> + ED - > setDefaultAndMaxAccess ( defaultAccess , maxAccess ) ; <nl> } <nl> <nl> - void TypeChecker : : computeAccessibility ( ValueDecl * D ) { <nl> - if ( D - > hasAccessibility ( ) ) <nl> + void TypeChecker : : computeAccessLevel ( ValueDecl * D ) { <nl> + if ( D - > hasAccess ( ) ) <nl> return ; <nl> <nl> / / Check if the decl has an explicit accessibility attribute . <nl> if ( auto * AA = D - > getAttrs ( ) . getAttribute < AccessibilityAttr > ( ) ) { <nl> - D - > setAccessibility ( AA - > getAccess ( ) ) ; <nl> + D - > setAccess ( AA - > getAccess ( ) ) ; <nl> <nl> } else if ( auto fn = dyn_cast < FuncDecl > ( D ) ) { <nl> / / Special case for accessors , which inherit the access of their storage . <nl> / / decl . A setter attribute can also override this . <nl> if ( AbstractStorageDecl * storage = fn - > getAccessorStorageDecl ( ) ) { <nl> - if ( storage - > hasAccessibility ( ) ) { <nl> + if ( storage - > hasAccess ( ) ) { <nl> if ( fn - > getAccessorKind ( ) = = AccessorKind : : IsSetter | | <nl> fn - > getAccessorKind ( ) = = AccessorKind : : IsMaterializeForSet ) <nl> - fn - > setAccessibility ( storage - > getSetterAccessibility ( ) ) ; <nl> + fn - > setAccess ( storage - > getSetterFormalAccess ( ) ) ; <nl> else <nl> - fn - > setAccessibility ( storage - > getFormalAccess ( ) ) ; <nl> + fn - > setAccess ( storage - > getFormalAccess ( ) ) ; <nl> } else { <nl> - computeAccessibility ( storage ) ; <nl> + computeAccessLevel ( storage ) ; <nl> } <nl> } <nl> } <nl> <nl> - if ( ! D - > hasAccessibility ( ) ) { <nl> + if ( ! D - > hasAccess ( ) ) { <nl> DeclContext * DC = D - > getDeclContext ( ) ; <nl> switch ( DC - > getContextKind ( ) ) { <nl> case DeclContextKind : : TopLevelCodeDecl : <nl> / / Variables declared in a top - level ' guard ' statement can be accessed in <nl> / / later top - level code . <nl> - D - > setAccessibility ( Accessibility : : FilePrivate ) ; <nl> + D - > setAccess ( Accessibility : : FilePrivate ) ; <nl> break ; <nl> case DeclContextKind : : AbstractClosureExpr : <nl> if ( isa < ParamDecl > ( D ) ) { <nl> / / Closure parameters may need to be accessible to the enclosing <nl> / / context , for single - expression closures . <nl> - D - > setAccessibility ( Accessibility : : FilePrivate ) ; <nl> + D - > setAccess ( Accessibility : : FilePrivate ) ; <nl> } else { <nl> - D - > setAccessibility ( Accessibility : : Private ) ; <nl> + D - > setAccess ( Accessibility : : Private ) ; <nl> } <nl> break ; <nl> case DeclContextKind : : SerializedLocal : <nl> case DeclContextKind : : Initializer : <nl> case DeclContextKind : : AbstractFunctionDecl : <nl> case DeclContextKind : : SubscriptDecl : <nl> - D - > setAccessibility ( Accessibility : : Private ) ; <nl> + D - > setAccess ( Accessibility : : Private ) ; <nl> break ; <nl> case DeclContextKind : : Module : <nl> case DeclContextKind : : FileUnit : <nl> - D - > setAccessibility ( Accessibility : : Internal ) ; <nl> + D - > setAccess ( Accessibility : : Internal ) ; <nl> break ; <nl> case DeclContextKind : : GenericTypeDecl : { <nl> auto generic = cast < GenericTypeDecl > ( DC ) ; <nl> - validateAccessibility ( generic ) ; <nl> + validateAccessControl ( generic ) ; <nl> Accessibility access = Accessibility : : Internal ; <nl> if ( isa < ProtocolDecl > ( generic ) ) <nl> access = std : : max ( Accessibility : : FilePrivate , <nl> generic - > getFormalAccess ( ) ) ; <nl> - D - > setAccessibility ( access ) ; <nl> + D - > setAccess ( access ) ; <nl> break ; <nl> } <nl> case DeclContextKind : : ExtensionDecl : { <nl> auto extension = cast < ExtensionDecl > ( DC ) ; <nl> - computeDefaultAccessibility ( extension ) ; <nl> - if ( ! D - > hasAccessibility ( ) ) { <nl> - auto access = extension - > getDefaultAccessibility ( ) ; <nl> - D - > setAccessibility ( access ) ; <nl> + computeDefaultAccessLevel ( extension ) ; <nl> + if ( ! D - > hasAccess ( ) ) { <nl> + auto access = extension - > getDefaultAccessLevel ( ) ; <nl> + D - > setAccess ( access ) ; <nl> } <nl> } <nl> } <nl> void TypeChecker : : computeAccessibility ( ValueDecl * D ) { <nl> <nl> if ( auto ASD = dyn_cast < AbstractStorageDecl > ( D ) ) { <nl> if ( auto * AA = D - > getAttrs ( ) . getAttribute < SetterAccessibilityAttr > ( ) ) <nl> - ASD - > setSetterAccessibility ( AA - > getAccess ( ) ) ; <nl> + ASD - > setSetterAccess ( AA - > getAccess ( ) ) ; <nl> else <nl> - ASD - > setSetterAccessibility ( ASD - > getFormalAccess ( ) ) ; <nl> + ASD - > setSetterAccess ( ASD - > getFormalAccess ( ) ) ; <nl> <nl> if ( auto getter = ASD - > getGetter ( ) ) <nl> - computeAccessibility ( getter ) ; <nl> + computeAccessLevel ( getter ) ; <nl> if ( auto setter = ASD - > getSetter ( ) ) <nl> - computeAccessibility ( setter ) ; <nl> + computeAccessLevel ( setter ) ; <nl> } <nl> } <nl> <nl> class TypeAccessScopeDiagnoser : private ASTWalker { <nl> / / / A uniquely - typed boolean to reduce the chances of accidentally inverting <nl> / / / a check . <nl> / / / <nl> - / / / \ see checkTypeAccessibility <nl> + / / / \ see checkTypeAccess <nl> enum class DowngradeToWarning : bool { <nl> No , <nl> Yes <nl> } ; <nl> <nl> - / / / \ see checkTypeAccessibility <nl> + / / / \ see checkTypeAccess <nl> using CheckTypeAccessCallback = <nl> void ( AccessScope , const TypeRepr * , DowngradeToWarning ) ; <nl> <nl> using CheckTypeAccessCallback = <nl> / / / The TypeRepr passed to \ p diagnose may be null , in which case a particular <nl> / / / part of the type that caused the problem could not be found . The DeclContext <nl> / / / is never null . <nl> - static void checkTypeAccessibilityImpl ( <nl> + static void checkTypeAccessImpl ( <nl> TypeChecker & TC , TypeLoc TL , AccessScope contextAccessScope , <nl> const DeclContext * useDC , <nl> llvm : : function_ref < CheckTypeAccessCallback > diagnose ) { <nl> static void checkTypeAccessibilityImpl ( <nl> / / / part of the type that caused the problem could not be found . The DeclContext <nl> / / / is never null . The DowngradeToWarning parameter is a hack to deal with <nl> / / / early versions of Swift 3 not diagnosing certain access violations . <nl> - static void checkTypeAccessibility ( <nl> + static void checkTypeAccess ( <nl> TypeChecker & TC , TypeLoc TL , const ValueDecl * context , <nl> llvm : : function_ref < CheckTypeAccessCallback > diagnose ) { <nl> const DeclContext * DC = context - > getDeclContext ( ) ; <nl> static void checkTypeAccessibility ( <nl> } <nl> <nl> AccessScope contextAccessScope = context - > getFormalAccessScope ( ) ; <nl> - checkTypeAccessibilityImpl ( TC , TL , contextAccessScope , DC , <nl> - [ = , & TC ] ( AccessScope requiredAccessScope , <nl> - const TypeRepr * offendingTR , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> + checkTypeAccessImpl ( TC , TL , contextAccessScope , DC , <nl> + [ = , & TC ] ( AccessScope requiredAccessScope , <nl> + const TypeRepr * offendingTR , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> if ( ! contextAccessScope . isPublic ( ) & & <nl> ! isa < ModuleDecl > ( contextAccessScope . getDeclContext ( ) ) & & <nl> TC . getLangOpts ( ) . isSwiftVersion3 ( ) ) { <nl> static void highlightOffendingType ( TypeChecker & TC , InFlightDiagnostic & diag , <nl> } <nl> } <nl> <nl> - static void checkGenericParamAccessibility ( TypeChecker & TC , <nl> - const GenericParamList * params , <nl> - const Decl * owner , <nl> - AccessScope accessScope , <nl> - Accessibility contextAccess ) { <nl> + static void checkGenericParamAccess ( TypeChecker & TC , <nl> + const GenericParamList * params , <nl> + const Decl * owner , <nl> + AccessScope accessScope , <nl> + Accessibility contextAccess ) { <nl> if ( ! params ) <nl> return ; <nl> <nl> static void checkGenericParamAccessibility ( TypeChecker & TC , <nl> if ( param - > getInherited ( ) . empty ( ) ) <nl> continue ; <nl> assert ( param - > getInherited ( ) . size ( ) = = 1 ) ; <nl> - checkTypeAccessibilityImpl ( TC , param - > getInherited ( ) . front ( ) , accessScope , <nl> - owner - > getDeclContext ( ) , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning thisDowngrade ) { <nl> + checkTypeAccessImpl ( TC , param - > getInherited ( ) . front ( ) , accessScope , <nl> + owner - > getDeclContext ( ) , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning thisDowngrade ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( thisDowngrade = = DowngradeToWarning : : No & & <nl> downgradeToWarning = = DowngradeToWarning : : Yes ) | | <nl> static void checkGenericParamAccessibility ( TypeChecker & TC , <nl> } ; <nl> switch ( requirement . getKind ( ) ) { <nl> case RequirementReprKind : : TypeConstraint : <nl> - checkTypeAccessibilityImpl ( TC , requirement . getSubjectLoc ( ) , <nl> - accessScope , owner - > getDeclContext ( ) , <nl> - callback ) ; <nl> - checkTypeAccessibilityImpl ( TC , requirement . getConstraintLoc ( ) , <nl> - accessScope , owner - > getDeclContext ( ) , <nl> - callback ) ; <nl> + checkTypeAccessImpl ( TC , requirement . getSubjectLoc ( ) , <nl> + accessScope , owner - > getDeclContext ( ) , <nl> + callback ) ; <nl> + checkTypeAccessImpl ( TC , requirement . getConstraintLoc ( ) , <nl> + accessScope , owner - > getDeclContext ( ) , <nl> + callback ) ; <nl> break ; <nl> case RequirementReprKind : : LayoutConstraint : <nl> - checkTypeAccessibilityImpl ( TC , requirement . getSubjectLoc ( ) , <nl> - accessScope , owner - > getDeclContext ( ) , <nl> - callback ) ; <nl> + checkTypeAccessImpl ( TC , requirement . getSubjectLoc ( ) , <nl> + accessScope , owner - > getDeclContext ( ) , <nl> + callback ) ; <nl> break ; <nl> case RequirementReprKind : : SameType : <nl> - checkTypeAccessibilityImpl ( TC , requirement . getFirstTypeLoc ( ) , <nl> - accessScope , owner - > getDeclContext ( ) , <nl> - callback ) ; <nl> - checkTypeAccessibilityImpl ( TC , requirement . getSecondTypeLoc ( ) , <nl> - accessScope , owner - > getDeclContext ( ) , <nl> - callback ) ; <nl> + checkTypeAccessImpl ( TC , requirement . getFirstTypeLoc ( ) , <nl> + accessScope , owner - > getDeclContext ( ) , <nl> + callback ) ; <nl> + checkTypeAccessImpl ( TC , requirement . getSecondTypeLoc ( ) , <nl> + accessScope , owner - > getDeclContext ( ) , <nl> + callback ) ; <nl> break ; <nl> } <nl> } <nl> static void checkGenericParamAccessibility ( TypeChecker & TC , <nl> } <nl> } <nl> <nl> - auto minAccess = minAccessScope . accessibilityForDiagnostics ( ) ; <nl> + auto minAccess = minAccessScope . accessLevelForDiagnostics ( ) ; <nl> <nl> bool isExplicit = <nl> owner - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) | | <nl> static void checkGenericParamAccessibility ( TypeChecker & TC , <nl> highlightOffendingType ( TC , diag , complainRepr ) ; <nl> } <nl> <nl> - static void checkGenericParamAccessibility ( TypeChecker & TC , <nl> - const GenericParamList * params , <nl> - const ValueDecl * owner ) { <nl> - checkGenericParamAccessibility ( TC , params , owner , <nl> - owner - > getFormalAccessScope ( ) , <nl> - owner - > getFormalAccess ( ) ) ; <nl> + static void checkGenericParamAccess ( TypeChecker & TC , <nl> + const GenericParamList * params , <nl> + const ValueDecl * owner ) { <nl> + checkGenericParamAccess ( TC , params , owner , owner - > getFormalAccessScope ( ) , <nl> + owner - > getFormalAccess ( ) ) ; <nl> } <nl> <nl> / / / Checks the given declaration ' s accessibility to make sure it is valid given <nl> / / / the way it is defined . <nl> / / / <nl> / / / \ p D must be a ValueDecl or a Decl that can appear in a type context . <nl> - static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> + static void checkAccessControl ( TypeChecker & TC , const Decl * D ) { <nl> if ( D - > isInvalid ( ) | | D - > isImplicit ( ) ) <nl> return ; <nl> <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> if ( seenVars . count ( theVar ) | | theVar - > isInvalid ( ) ) <nl> return ; <nl> <nl> - checkTypeAccessibility ( TC , TypeLoc : : withoutLoc ( theVar - > getType ( ) ) , <nl> - theVar , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * complainRepr , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> - auto typeAccess = typeAccessScope . accessibilityForDiagnostics ( ) ; <nl> + checkTypeAccess ( TC , TypeLoc : : withoutLoc ( theVar - > getType ( ) ) , <nl> + theVar , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * complainRepr , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> + auto typeAccess = typeAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = <nl> theVar - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) ; <nl> auto theVarAccess = isExplicit <nl> ? theVar - > getFormalAccess ( ) <nl> - : typeAccessScope . requiredAccessibilityForDiagnostics ( ) ; <nl> + : typeAccessScope . requiredAccessForDiagnostics ( ) ; <nl> auto diagID = diag : : pattern_type_access_inferred ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> diagID = diag : : pattern_type_access_inferred_warn ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> if ( ! anyVar ) <nl> return ; <nl> <nl> - checkTypeAccessibility ( TC , TP - > getTypeLoc ( ) , anyVar , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * complainRepr , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> - auto typeAccess = typeAccessScope . accessibilityForDiagnostics ( ) ; <nl> + checkTypeAccess ( TC , TP - > getTypeLoc ( ) , anyVar , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * complainRepr , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> + auto typeAccess = typeAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = <nl> anyVar - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) | | <nl> anyVar - > getDeclContext ( ) - > getAsProtocolOrProtocolExtensionContext ( ) ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> diagID = diag : : pattern_type_access_warn ; <nl> auto anyVarAccess = isExplicit <nl> ? anyVar - > getFormalAccess ( ) <nl> - : typeAccessScope . requiredAccessibilityForDiagnostics ( ) ; <nl> + : typeAccessScope . requiredAccessForDiagnostics ( ) ; <nl> auto diag = TC . diagnose ( P - > getLoc ( ) , diagID , <nl> anyVar - > isLet ( ) , <nl> isTypeContext , <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> case DeclKind : : TypeAlias : { <nl> auto TAD = cast < TypeAliasDecl > ( D ) ; <nl> <nl> - checkTypeAccessibility ( TC , TAD - > getUnderlyingTypeLoc ( ) , TAD , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * complainRepr , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> - auto typeAccess = typeAccessScope . accessibilityForDiagnostics ( ) ; <nl> + checkTypeAccess ( TC , TAD - > getUnderlyingTypeLoc ( ) , TAD , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * complainRepr , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> + auto typeAccess = typeAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = TAD - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) ; <nl> auto diagID = diag : : type_alias_underlying_type_access ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> std : : for_each ( assocType - > getInherited ( ) . begin ( ) , <nl> assocType - > getInherited ( ) . end ( ) , <nl> [ & ] ( TypeLoc requirement ) { <nl> - checkTypeAccessibility ( TC , requirement , assocType , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , requirement , assocType , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } <nl> } ) ; <nl> } ) ; <nl> - checkTypeAccessibility ( TC , assocType - > getDefaultDefinitionLoc ( ) , assocType , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , assocType - > getDefaultDefinitionLoc ( ) , assocType , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } ) ; <nl> <nl> if ( ! minAccessScope . isPublic ( ) ) { <nl> - auto minAccess = minAccessScope . accessibilityForDiagnostics ( ) ; <nl> + auto minAccess = minAccessScope . accessLevelForDiagnostics ( ) ; <nl> auto diagID = diag : : associated_type_access ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> diagID = diag : : associated_type_access_warn ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> case DeclKind : : Enum : { <nl> auto ED = cast < EnumDecl > ( D ) ; <nl> <nl> - checkGenericParamAccessibility ( TC , ED - > getGenericParams ( ) , ED ) ; <nl> + checkGenericParamAccess ( TC , ED - > getGenericParams ( ) , ED ) ; <nl> <nl> if ( ED - > hasRawType ( ) ) { <nl> Type rawType = ED - > getRawType ( ) ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } ) ; <nl> if ( rawTypeLocIter = = ED - > getInherited ( ) . end ( ) ) <nl> return ; <nl> - checkTypeAccessibility ( TC , * rawTypeLocIter , ED , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * complainRepr , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> - auto typeAccess = typeAccessScope . accessibilityForDiagnostics ( ) ; <nl> + checkTypeAccess ( TC , * rawTypeLocIter , ED , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * complainRepr , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> + auto typeAccess = typeAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = ED - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) ; <nl> auto diagID = diag : : enum_raw_type_access ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> <nl> case DeclKind : : Struct : { <nl> auto SD = cast < StructDecl > ( D ) ; <nl> - checkGenericParamAccessibility ( TC , SD - > getGenericParams ( ) , SD ) ; <nl> + checkGenericParamAccess ( TC , SD - > getGenericParams ( ) , SD ) ; <nl> return ; <nl> } <nl> <nl> case DeclKind : : Class : { <nl> auto CD = cast < ClassDecl > ( D ) ; <nl> <nl> - checkGenericParamAccessibility ( TC , CD - > getGenericParams ( ) , CD ) ; <nl> + checkGenericParamAccess ( TC , CD - > getGenericParams ( ) , CD ) ; <nl> <nl> if ( CD - > hasSuperclass ( ) ) { <nl> Type superclass = CD - > getSuperclass ( ) ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } ) ; <nl> if ( superclassLocIter = = CD - > getInherited ( ) . end ( ) ) <nl> return ; <nl> - checkTypeAccessibility ( TC , * superclassLocIter , CD , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * complainRepr , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> - auto typeAccess = typeAccessScope . accessibilityForDiagnostics ( ) ; <nl> + checkTypeAccess ( TC , * superclassLocIter , CD , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * complainRepr , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> + auto typeAccess = typeAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = CD - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) ; <nl> auto diagID = diag : : class_super_access ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> std : : for_each ( proto - > getInherited ( ) . begin ( ) , <nl> proto - > getInherited ( ) . end ( ) , <nl> [ & ] ( TypeLoc requirement ) { <nl> - checkTypeAccessibility ( TC , requirement , proto , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , requirement , proto , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } ) ; <nl> <nl> if ( ! minAccessScope . isPublic ( ) ) { <nl> - auto minAccess = minAccessScope . accessibilityForDiagnostics ( ) ; <nl> + auto minAccess = minAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = proto - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) ; <nl> auto diagID = diag : : protocol_refine_access ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> bool problemIsElement = false ; <nl> <nl> for ( auto & P : * SD - > getIndices ( ) ) { <nl> - checkTypeAccessibility ( TC , P - > getTypeLoc ( ) , P , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , P - > getTypeLoc ( ) , P , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } ) ; <nl> } <nl> <nl> - checkTypeAccessibility ( TC , SD - > getElementTypeLoc ( ) , SD , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , SD - > getElementTypeLoc ( ) , SD , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } ) ; <nl> <nl> if ( ! minAccessScope . isPublic ( ) ) { <nl> - auto minAccess = minAccessScope . accessibilityForDiagnostics ( ) ; <nl> + auto minAccess = minAccessScope . accessLevelForDiagnostics ( ) ; <nl> bool isExplicit = <nl> SD - > getAttrs ( ) . hasAttribute < AccessibilityAttr > ( ) | | <nl> SD - > getDeclContext ( ) - > getAsProtocolOrProtocolExtensionContext ( ) ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> diagID = diag : : subscript_type_access_warn ; <nl> auto subscriptDeclAccess = isExplicit <nl> ? SD - > getFormalAccess ( ) <nl> - : minAccessScope . requiredAccessibilityForDiagnostics ( ) ; <nl> + : minAccessScope . requiredAccessForDiagnostics ( ) ; <nl> auto diag = TC . diagnose ( SD , diagID , <nl> isExplicit , <nl> subscriptDeclAccess , <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> auto fn = cast < AbstractFunctionDecl > ( D ) ; <nl> bool isTypeContext = fn - > getDeclContext ( ) - > isTypeContext ( ) ; <nl> <nl> - checkGenericParamAccessibility ( TC , fn - > getGenericParams ( ) , fn ) ; <nl> + checkGenericParamAccess ( TC , fn - > getGenericParams ( ) , fn ) ; <nl> <nl> / / This must stay in sync with diag : : function_type_access . <nl> enum { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> <nl> for ( auto * PL : fn - > getParameterLists ( ) . slice ( isTypeContext ) ) { <nl> for ( auto & P : * PL ) { <nl> - checkTypeAccessibility ( TC , P - > getTypeLoc ( ) , P , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , P - > getTypeLoc ( ) , P , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> <nl> bool problemIsResult = false ; <nl> if ( auto FD = dyn_cast < FuncDecl > ( fn ) ) { <nl> - checkTypeAccessibility ( TC , FD - > getBodyResultTypeLoc ( ) , FD , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * thisComplainRepr , <nl> - DowngradeToWarning downgradeDiag ) { <nl> + checkTypeAccess ( TC , FD - > getBodyResultTypeLoc ( ) , FD , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * thisComplainRepr , <nl> + DowngradeToWarning downgradeDiag ) { <nl> if ( typeAccessScope . isChildOf ( minAccessScope ) | | <nl> ( ! complainRepr & & <nl> typeAccessScope . hasEqualDeclContextWith ( minAccessScope ) ) ) { <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> } <nl> <nl> if ( ! minAccessScope . isPublic ( ) ) { <nl> - auto minAccess = minAccessScope . accessibilityForDiagnostics ( ) ; <nl> + auto minAccess = minAccessScope . accessLevelForDiagnostics ( ) ; <nl> auto functionKind = isa < ConstructorDecl > ( fn ) <nl> ? FK_Initializer <nl> : isTypeContext ? FK_Method : FK_Function ; <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> diagID = diag : : function_type_access_warn ; <nl> auto fnAccess = isExplicit <nl> ? fn - > getFormalAccess ( ) <nl> - : minAccessScope . requiredAccessibilityForDiagnostics ( ) ; <nl> + : minAccessScope . requiredAccessForDiagnostics ( ) ; <nl> auto diag = TC . diagnose ( fn , diagID , <nl> isExplicit , <nl> fnAccess , <nl> static void checkAccessibility ( TypeChecker & TC , const Decl * D ) { <nl> <nl> if ( ! EED - > getArgumentTypeLoc ( ) . getType ( ) ) <nl> return ; <nl> - checkTypeAccessibility ( TC , EED - > getArgumentTypeLoc ( ) , EED , <nl> - [ & ] ( AccessScope typeAccessScope , <nl> - const TypeRepr * complainRepr , <nl> - DowngradeToWarning downgradeToWarning ) { <nl> - auto typeAccess = typeAccessScope . accessibilityForDiagnostics ( ) ; <nl> + checkTypeAccess ( TC , EED - > getArgumentTypeLoc ( ) , EED , <nl> + [ & ] ( AccessScope typeAccessScope , <nl> + const TypeRepr * complainRepr , <nl> + DowngradeToWarning downgradeToWarning ) { <nl> + auto typeAccess = typeAccessScope . accessLevelForDiagnostics ( ) ; <nl> auto diagID = diag : : enum_case_access ; <nl> if ( downgradeToWarning = = DowngradeToWarning : : Yes ) <nl> diagID = diag : : enum_case_access_warn ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> } <nl> <nl> if ( ! IsFirstPass ) <nl> - checkAccessibility ( TC , PBD ) ; <nl> + checkAccessControl ( TC , PBD ) ; <nl> <nl> TC . checkDeclAttributes ( PBD ) ; <nl> } <nl> <nl> void visitSubscriptDecl ( SubscriptDecl * SD ) { <nl> if ( IsSecondPass ) { <nl> - checkAccessibility ( TC , SD ) ; <nl> + checkAccessControl ( TC , SD ) ; <nl> return ; <nl> } <nl> <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> SD - > setIsBeingValidated ( false ) ; <nl> <nl> TC . checkDeclAttributesEarly ( SD ) ; <nl> - TC . computeAccessibility ( SD ) ; <nl> + TC . computeAccessLevel ( SD ) ; <nl> <nl> validateAttributes ( TC , SD ) ; <nl> <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> void visitTypeAliasDecl ( TypeAliasDecl * TAD ) { <nl> TC . checkDeclAttributesEarly ( TAD ) ; <nl> - TC . computeAccessibility ( TAD ) ; <nl> + TC . computeAccessLevel ( TAD ) ; <nl> <nl> if ( ! IsSecondPass ) <nl> TC . validateDecl ( TAD ) ; <nl> <nl> if ( IsSecondPass ) <nl> - checkAccessibility ( TC , TAD ) ; <nl> + checkAccessControl ( TC , TAD ) ; <nl> <nl> TC . checkDeclAttributes ( TAD ) ; <nl> } <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> void visitEnumDecl ( EnumDecl * ED ) { <nl> TC . checkDeclAttributesEarly ( ED ) ; <nl> - TC . computeAccessibility ( ED ) ; <nl> + TC . computeAccessLevel ( ED ) ; <nl> <nl> if ( ! IsSecondPass ) { <nl> checkUnsupportedNestedType ( ED ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> } <nl> <nl> if ( ! IsFirstPass ) { <nl> - checkAccessibility ( TC , ED ) ; <nl> + checkAccessControl ( TC , ED ) ; <nl> <nl> if ( ED - > hasRawType ( ) & & ! ED - > isObjC ( ) ) { <nl> / / ObjC enums have already had their raw values checked , but pure Swift <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> void visitStructDecl ( StructDecl * SD ) { <nl> TC . checkDeclAttributesEarly ( SD ) ; <nl> - TC . computeAccessibility ( SD ) ; <nl> + TC . computeAccessLevel ( SD ) ; <nl> <nl> if ( ! IsSecondPass ) { <nl> checkUnsupportedNestedType ( SD ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> } <nl> <nl> if ( ! IsFirstPass ) { <nl> - checkAccessibility ( TC , SD ) ; <nl> + checkAccessControl ( TC , SD ) ; <nl> <nl> if ( ! SD - > isInvalid ( ) ) <nl> TC . checkConformancesInContext ( SD , SD ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> void visitClassDecl ( ClassDecl * CD ) { <nl> TC . checkDeclAttributesEarly ( CD ) ; <nl> - TC . computeAccessibility ( CD ) ; <nl> + TC . computeAccessLevel ( CD ) ; <nl> <nl> if ( ! IsSecondPass ) { <nl> checkUnsupportedNestedType ( CD ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> } <nl> <nl> - checkAccessibility ( TC , CD ) ; <nl> + checkAccessControl ( TC , CD ) ; <nl> } <nl> <nl> TC . checkDeclAttributes ( CD ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> void visitProtocolDecl ( ProtocolDecl * PD ) { <nl> TC . checkDeclAttributesEarly ( PD ) ; <nl> - TC . computeAccessibility ( PD ) ; <nl> + TC . computeAccessLevel ( PD ) ; <nl> <nl> if ( ! IsSecondPass ) { <nl> checkUnsupportedNestedType ( PD ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> } <nl> <nl> if ( IsSecondPass ) { <nl> - checkAccessibility ( TC , PD ) ; <nl> + checkAccessControl ( TC , PD ) ; <nl> for ( auto member : PD - > getMembers ( ) ) { <nl> TC . checkUnsupportedProtocolType ( member ) ; <nl> - checkAccessibility ( TC , member ) ; <nl> + checkAccessControl ( TC , member ) ; <nl> } <nl> TC . checkInheritanceClause ( PD ) ; <nl> return ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> } <nl> <nl> if ( IsSecondPass ) { <nl> - checkAccessibility ( TC , FD ) ; <nl> + checkAccessControl ( TC , FD ) ; <nl> return ; <nl> } <nl> <nl> TC . checkDeclAttributesEarly ( FD ) ; <nl> - TC . computeAccessibility ( FD ) ; <nl> + TC . computeAccessLevel ( FD ) ; <nl> <nl> if ( FD - > hasInterfaceType ( ) | | FD - > isBeingValidated ( ) ) <nl> return ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> / * setter * / false , <nl> decl - > getDescriptiveKind ( ) , <nl> / * fromOverridden * / true ) ; <nl> - fixItAccessibility ( diag , decl , Accessibility : : Open ) ; <nl> + fixItAccess ( diag , decl , Accessibility : : Open ) ; <nl> } <nl> TC . diagnose ( matchDecl , diag : : overridden_here ) ; <nl> <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> ( requiredAccessScope - > hasEqualDeclContextWith ( matchAccessScope ) & & <nl> matchAccess ! = Accessibility : : Open ) ; <nl> Accessibility requiredAccess = <nl> - requiredAccessScope - > requiredAccessibilityForDiagnostics ( ) ; <nl> + requiredAccessScope - > requiredAccessForDiagnostics ( ) ; <nl> { <nl> auto diag = TC . diagnose ( decl , diag : : override_not_accessible , <nl> shouldDiagnoseSetter , <nl> decl - > getDescriptiveKind ( ) , <nl> overriddenForcesAccess ) ; <nl> - fixItAccessibility ( diag , decl , requiredAccess , <nl> + fixItAccess ( diag , decl , requiredAccess , <nl> shouldDiagnoseSetter ) ; <nl> } <nl> TC . diagnose ( matchDecl , diag : : overridden_here ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> void visitEnumElementDecl ( EnumElementDecl * EED ) { <nl> if ( IsSecondPass ) { <nl> - checkAccessibility ( TC , EED ) ; <nl> + checkAccessControl ( TC , EED ) ; <nl> return ; <nl> } <nl> if ( EED - > hasInterfaceType ( ) | | EED - > isBeingValidated ( ) ) <nl> return ; <nl> <nl> TC . checkDeclAttributesEarly ( EED ) ; <nl> - TC . validateAccessibility ( EED ) ; <nl> + TC . validateAccessControl ( EED ) ; <nl> <nl> / / Only attempt to validate the argument type or raw value if the element <nl> / / is not currently being validated . <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> / / Check conformances before visiting members , since we might <nl> / / synthesize bodies for derived conformances <nl> if ( ! IsFirstPass ) { <nl> - TC . computeDefaultAccessibility ( ED ) ; <nl> + TC . computeDefaultAccessLevel ( ED ) ; <nl> if ( auto * AA = ED - > getAttrs ( ) . getAttribute < AccessibilityAttr > ( ) ) { <nl> const auto access = AA - > getAccess ( ) ; <nl> AccessScope desiredAccessScope = AccessScope : : getPublic ( ) ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> case Accessibility : : Open : <nl> break ; <nl> } <nl> - checkGenericParamAccessibility ( TC , ED - > getGenericParams ( ) , ED , <nl> - desiredAccessScope , access ) ; <nl> + checkGenericParamAccess ( TC , ED - > getGenericParams ( ) , ED , <nl> + desiredAccessScope , access ) ; <nl> } <nl> TC . checkConformancesInContext ( ED , ED ) ; <nl> } <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> } <nl> <nl> if ( IsSecondPass ) { <nl> - checkAccessibility ( TC , CD ) ; <nl> + checkAccessControl ( TC , CD ) ; <nl> return ; <nl> } <nl> if ( CD - > hasInterfaceType ( ) | | CD - > isBeingValidated ( ) ) <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> CD - > setIsBeingValidated ( ) ; <nl> <nl> TC . checkDeclAttributesEarly ( CD ) ; <nl> - TC . computeAccessibility ( CD ) ; <nl> + TC . computeAccessLevel ( CD ) ; <nl> <nl> / / convenience initializers are only allowed on classes and in <nl> / / extensions thereof . <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> if ( CD - > getFormalAccess ( ) < requiredAccess ) { <nl> auto diag = TC . diagnose ( CD , <nl> diag : : required_initializer_not_accessible ) ; <nl> - fixItAccessibility ( diag , CD , requiredAccess ) ; <nl> + fixItAccess ( diag , CD , requiredAccess ) ; <nl> } <nl> } <nl> } <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> & & " Decl parsing must prevent destructors outside of types ! " ) ; <nl> <nl> TC . checkDeclAttributesEarly ( DD ) ; <nl> - if ( ! DD - > hasAccessibility ( ) ) { <nl> + if ( ! DD - > hasAccess ( ) ) { <nl> auto enclosingClass = cast < ClassDecl > ( DD - > getParent ( ) ) ; <nl> - DD - > setAccessibility ( enclosingClass - > getFormalAccess ( ) ) ; <nl> + DD - > setAccess ( enclosingClass - > getFormalAccess ( ) ) ; <nl> } <nl> <nl> configureImplicitSelf ( TC , DD ) ; <nl> void TypeChecker : : validateDecl ( ValueDecl * D ) { <nl> if ( hasEnabledForbiddenTypecheckPrefix ( ) ) <nl> checkForForbiddenPrefix ( D ) ; <nl> <nl> - validateAccessibility ( D ) ; <nl> + validateAccessControl ( D ) ; <nl> <nl> / / Validate the context . <nl> auto dc = D - > getDeclContext ( ) ; <nl> void TypeChecker : : validateDecl ( ValueDecl * D ) { <nl> assocType - > setIsBeingValidated ( ) ; <nl> SWIFT_DEFER { assocType - > setIsBeingValidated ( false ) ; } ; <nl> <nl> - validateAccessibility ( assocType ) ; <nl> + validateAccessControl ( assocType ) ; <nl> <nl> checkDeclAttributesEarly ( assocType ) ; <nl> checkInheritanceClause ( assocType ) ; <nl> void TypeChecker : : validateDecl ( ValueDecl * D ) { <nl> - > findUnresolvedDependentMemberType ( ) ) { <nl> aliasDecl - > getUnderlyingTypeLoc ( ) . setType ( Type ( ) , <nl> / * validated = * / false ) ; <nl> - validateAccessibility ( aliasDecl ) ; <nl> + validateAccessControl ( aliasDecl ) ; <nl> <nl> / / Check generic parameters , if needed . <nl> aliasDecl - > setIsBeingValidated ( ) ; <nl> void TypeChecker : : validateDeclForNameLookup ( ValueDecl * D ) { <nl> for ( auto paramDecl : * gp ) <nl> paramDecl - > setDepth ( depth ) ; <nl> <nl> - validateAccessibility ( proto ) ; <nl> + validateAccessControl ( proto ) ; <nl> <nl> / / Record inherited protocols . <nl> resolveInheritedProtocols ( proto ) ; <nl> void TypeChecker : : validateDeclForNameLookup ( ValueDecl * D ) { <nl> if ( assocType - > hasInterfaceType ( ) ) <nl> return ; <nl> assocType - > computeType ( ) ; <nl> - validateAccessibility ( assocType ) ; <nl> + validateAccessControl ( assocType ) ; <nl> break ; <nl> } <nl> case DeclKind : : TypeAlias : { <nl> void TypeChecker : : validateDeclForNameLookup ( ValueDecl * D ) { <nl> typealias - > setIsBeingValidated ( ) ; <nl> SWIFT_DEFER { typealias - > setIsBeingValidated ( false ) ; } ; <nl> <nl> - validateAccessibility ( typealias ) ; <nl> + validateAccessControl ( typealias ) ; <nl> if ( typealias - > getFormalAccess ( ) < = Accessibility : : FilePrivate ) <nl> options | = TR_KnownNonCascadingDependency ; <nl> <nl> void TypeChecker : : validateDeclForNameLookup ( ValueDecl * D ) { <nl> } <nl> } <nl> <nl> - void TypeChecker : : validateAccessibility ( ValueDecl * D ) { <nl> - if ( D - > hasAccessibility ( ) ) <nl> + void TypeChecker : : validateAccessControl ( ValueDecl * D ) { <nl> + if ( D - > hasAccess ( ) ) <nl> return ; <nl> <nl> - / / FIXME : Encapsulate the following in computeAccessibility ( ) ? <nl> + / / FIXME : Encapsulate the following in computeAccessLevel ( ) ? <nl> <nl> switch ( D - > getKind ( ) ) { <nl> case DeclKind : : Import : <nl> void TypeChecker : : validateAccessibility ( ValueDecl * D ) { <nl> break ; <nl> <nl> case DeclKind : : TypeAlias : <nl> - computeAccessibility ( D ) ; <nl> + computeAccessLevel ( D ) ; <nl> break ; <nl> <nl> case DeclKind : : GenericTypeParam : <nl> void TypeChecker : : validateAccessibility ( ValueDecl * D ) { <nl> case DeclKind : : AssociatedType : { <nl> auto assocType = cast < AssociatedTypeDecl > ( D ) ; <nl> auto prot = assocType - > getProtocol ( ) ; <nl> - validateAccessibility ( prot ) ; <nl> - assocType - > setAccessibility ( std : : max ( prot - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + validateAccessControl ( prot ) ; <nl> + assocType - > setAccess ( std : : max ( prot - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> break ; <nl> } <nl> <nl> void TypeChecker : : validateAccessibility ( ValueDecl * D ) { <nl> case DeclKind : : Func : <nl> case DeclKind : : Subscript : <nl> case DeclKind : : Constructor : <nl> - computeAccessibility ( D ) ; <nl> + computeAccessLevel ( D ) ; <nl> break ; <nl> <nl> case DeclKind : : Destructor : <nl> case DeclKind : : EnumElement : { <nl> if ( D - > isInvalid ( ) ) { <nl> - D - > setAccessibility ( Accessibility : : Private ) ; <nl> + D - > setAccess ( Accessibility : : Private ) ; <nl> } else { <nl> auto container = cast < NominalTypeDecl > ( D - > getDeclContext ( ) ) ; <nl> - validateAccessibility ( container ) ; <nl> - D - > setAccessibility ( std : : max ( container - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + validateAccessControl ( container ) ; <nl> + D - > setAccess ( std : : max ( container - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> } <nl> break ; <nl> } <nl> } <nl> <nl> - assert ( D - > hasAccessibility ( ) ) ; <nl> + assert ( D - > hasAccess ( ) ) ; <nl> } <nl> <nl> / / / Form the interface type of an extension from the raw type and the <nl> static void diagnoseClassWithoutInitializers ( TypeChecker & tc , <nl> auto initFrom = DeclName ( C , C . Id_init , C . Id_from ) ; <nl> auto result = tc . lookupMember ( superclassDecl , superclassType , initFrom , <nl> NameLookupFlags : : ProtocolMembers | <nl> - NameLookupFlags : : IgnoreAccessibility ) ; <nl> + NameLookupFlags : : IgnoreAccessControl ) ; <nl> <nl> if ( ! result . empty ( ) & & ! result . front ( ) . getValueDecl ( ) - > isImplicit ( ) ) <nl> diagDest = result . front ( ) . getValueDecl ( ) ; <nl> void TypeChecker : : addImplicitConstructors ( NominalTypeDecl * decl ) { <nl> addImplicitConstructors ( superclassDecl ) ; <nl> <nl> auto ctors = lookupConstructors ( classDecl , superclassTy , <nl> - NameLookupFlags : : IgnoreAccessibility ) ; <nl> + NameLookupFlags : : IgnoreAccessControl ) ; <nl> <nl> for ( auto memberResult : ctors ) { <nl> auto member = memberResult . getValueDecl ( ) ; <nl> mmm a / lib / Sema / TypeCheckGeneric . cpp <nl> ppp b / lib / Sema / TypeCheckGeneric . cpp <nl> TypeChecker : : prepareGenericParamList ( GenericParamList * gp , <nl> unsigned depth = gp - > getDepth ( ) ; <nl> for ( auto paramDecl : * gp ) { <nl> paramDecl - > setDepth ( depth ) ; <nl> - if ( ! paramDecl - > hasAccessibility ( ) ) <nl> - paramDecl - > setAccessibility ( access ) ; <nl> + if ( ! paramDecl - > hasAccess ( ) ) <nl> + paramDecl - > setAccess ( access ) ; <nl> } <nl> } <nl> <nl> mmm a / lib / Sema / TypeCheckNameLookup . cpp <nl> ppp b / lib / Sema / TypeCheckNameLookup . cpp <nl> LookupResult TypeChecker : : lookupUnqualified ( DeclContext * dc , DeclName name , <nl> loc , <nl> / * IsTypeLookup = * / false , <nl> options . contains ( NameLookupFlags : : ProtocolMembers ) , <nl> - options . contains ( NameLookupFlags : : IgnoreAccessibility ) ) ; <nl> + options . contains ( NameLookupFlags : : IgnoreAccessControl ) ) ; <nl> <nl> LookupResult result ; <nl> LookupResultBuilder builder ( * this , result , dc , options , <nl> TypeChecker : : lookupUnqualifiedType ( DeclContext * dc , DeclName name , <nl> loc , <nl> / * IsTypeLookup = * / true , <nl> / * AllowProtocolMembers = * / false , <nl> - options . contains ( NameLookupFlags : : IgnoreAccessibility ) ) ; <nl> + options . contains ( NameLookupFlags : : IgnoreAccessControl ) ) ; <nl> <nl> if ( ! lookup . Results . empty ( ) | | <nl> ! options . contains ( NameLookupFlags : : ProtocolMembers ) ) { <nl> TypeChecker : : lookupUnqualifiedType ( DeclContext * dc , DeclName name , <nl> loc , <nl> / * IsTypeLookup = * / true , <nl> / * AllowProtocolMembers = * / true , <nl> - options . contains ( NameLookupFlags : : IgnoreAccessibility ) ) ; <nl> + options . contains ( NameLookupFlags : : IgnoreAccessControl ) ) ; <nl> <nl> return LookupResult ( lookup . Results ) ; <nl> } <nl> LookupResult TypeChecker : : lookupMember ( DeclContext * dc , <nl> subOptions | = NL_KnownNonCascadingDependency ; <nl> if ( options . contains ( NameLookupFlags : : DynamicLookup ) ) <nl> subOptions | = NL_DynamicLookup ; <nl> - if ( options . contains ( NameLookupFlags : : IgnoreAccessibility ) ) <nl> - subOptions | = NL_IgnoreAccessibility ; <nl> + if ( options . contains ( NameLookupFlags : : IgnoreAccessControl ) ) <nl> + subOptions | = NL_IgnoreAccessControl ; <nl> <nl> if ( options . contains ( NameLookupFlags : : ProtocolMembers ) ) <nl> subOptions | = NL_ProtocolMembers ; <nl> LookupTypeResult TypeChecker : : lookupMemberType ( DeclContext * dc , <nl> subOptions | = NL_KnownNonCascadingDependency ; <nl> if ( options . contains ( NameLookupFlags : : ProtocolMembers ) ) <nl> subOptions | = NL_ProtocolMembers ; <nl> - if ( options . contains ( NameLookupFlags : : IgnoreAccessibility ) ) <nl> - subOptions | = NL_IgnoreAccessibility ; <nl> + if ( options . contains ( NameLookupFlags : : IgnoreAccessControl ) ) <nl> + subOptions | = NL_IgnoreAccessControl ; <nl> <nl> if ( ! dc - > lookupQualified ( type , name , subOptions , this , decls ) ) <nl> return result ; <nl> mmm a / lib / Sema / TypeCheckProtocol . cpp <nl> ppp b / lib / Sema / TypeCheckProtocol . cpp <nl> namespace { <nl> unsigned & bestIdx , <nl> bool & doNotDiagnoseMatches ) ; <nl> <nl> - bool checkWitnessAccessibility ( AccessScope & requiredAccessScope , <nl> - ValueDecl * requirement , <nl> - ValueDecl * witness , <nl> - bool * isSetter ) ; <nl> + bool checkWitnessAccess ( AccessScope & requiredAccessScope , <nl> + ValueDecl * requirement , <nl> + ValueDecl * witness , <nl> + bool * isSetter ) ; <nl> <nl> bool checkWitnessAvailability ( ValueDecl * requirement , <nl> ValueDecl * witness , <nl> bool WitnessChecker : : findBestWitness ( <nl> return isReallyBest ; <nl> } <nl> <nl> - bool WitnessChecker : : <nl> - checkWitnessAccessibility ( AccessScope & requiredAccessScope , <nl> - ValueDecl * requirement , <nl> - ValueDecl * witness , <nl> - bool * isSetter ) { <nl> + bool WitnessChecker : : checkWitnessAccess ( AccessScope & requiredAccessScope , <nl> + ValueDecl * requirement , <nl> + ValueDecl * witness , <nl> + bool * isSetter ) { <nl> * isSetter = false ; <nl> <nl> auto scopeIntersection = <nl> checkWitness ( AccessScope requiredAccessScope , <nl> return CheckKind : : OptionalityConflict ; <nl> <nl> bool isSetter = false ; <nl> - if ( checkWitnessAccessibility ( requiredAccessScope , requirement , <nl> - match . Witness , & isSetter ) ) { <nl> + if ( checkWitnessAccess ( requiredAccessScope , requirement , match . Witness , <nl> + & isSetter ) ) { <nl> CheckKind kind = ( isSetter <nl> ? CheckKind : : AccessibilityOfSetter <nl> : CheckKind : : Accessibility ) ; <nl> void ConformanceChecker : : recordTypeWitness ( AssociatedTypeDecl * assocType , <nl> AccessScope requiredAccessScope = <nl> Adoptee - > getAnyNominal ( ) - > getFormalAccessScope ( DC ) ; <nl> bool isSetter = false ; <nl> - if ( checkWitnessAccessibility ( requiredAccessScope , assocType , typeDecl , <nl> - & isSetter ) ) { <nl> + if ( checkWitnessAccess ( requiredAccessScope , assocType , typeDecl , <nl> + & isSetter ) ) { <nl> assert ( ! isSetter ) ; <nl> <nl> / / Avoid relying on the lifetime of ' this ' . <nl> void ConformanceChecker : : recordTypeWitness ( AssociatedTypeDecl * assocType , <nl> [ DC , typeDecl , requiredAccessScope ] ( <nl> NormalProtocolConformance * conformance ) { <nl> Accessibility requiredAccess = <nl> - requiredAccessScope . requiredAccessibilityForDiagnostics ( ) ; <nl> + requiredAccessScope . requiredAccessForDiagnostics ( ) ; <nl> auto proto = conformance - > getProtocol ( ) ; <nl> auto protoAccessScope = proto - > getFormalAccessScope ( DC ) ; <nl> bool protoForcesAccess = <nl> void ConformanceChecker : : recordTypeWitness ( AssociatedTypeDecl * assocType , <nl> typeDecl - > getFullName ( ) , <nl> requiredAccess , <nl> proto - > getName ( ) ) ; <nl> - fixItAccessibility ( diag , typeDecl , requiredAccess ) ; <nl> + fixItAccess ( diag , typeDecl , requiredAccess ) ; <nl> } ) ; <nl> } <nl> } else { <nl> void ConformanceChecker : : recordTypeWitness ( AssociatedTypeDecl * assocType , <nl> <nl> / / Inject the typealias into the nominal decl that conforms to the protocol . <nl> if ( auto nominal = DC - > getAsNominalTypeOrNominalTypeExtensionContext ( ) ) { <nl> - TC . computeAccessibility ( nominal ) ; <nl> + TC . computeAccessLevel ( nominal ) ; <nl> / / FIXME : Ideally this would use the protocol ' s access toommmthat is , <nl> / / a typealias added for an internal protocol shouldn ' t need to be <nl> / / publicmmmbut that can be problematic if the same type conforms to two <nl> / / protocols with different access levels . <nl> Accessibility aliasAccess = nominal - > getFormalAccess ( ) ; <nl> aliasAccess = std : : max ( aliasAccess , Accessibility : : Internal ) ; <nl> - aliasDecl - > setAccessibility ( aliasAccess ) ; <nl> + aliasDecl - > setAccess ( aliasAccess ) ; <nl> <nl> if ( nominal = = DC ) { <nl> nominal - > addMember ( aliasDecl ) ; <nl> printRequirementStub ( ValueDecl * Requirement , DeclContext * Adopter , <nl> <nl> PrintOptions Options = PrintOptions : : printForDiagnostics ( ) ; <nl> Options . PrintDocumentationComments = false ; <nl> - Options . AccessibilityFilter = Accessibility : : Private ; <nl> - Options . PrintAccessibility = false ; <nl> + Options . AccessFilter = Accessibility : : Private ; <nl> + Options . PrintAccess = false ; <nl> Options . SkipAttributes = true ; <nl> Options . FunctionBody = [ ] ( const ValueDecl * VD ) { return getCodePlaceholder ( ) ; } ; <nl> Options . setBaseType ( AdopterTy ) ; <nl> ConformanceChecker : : resolveWitnessViaLookup ( ValueDecl * requirement ) { <nl> NormalProtocolConformance * conformance ) { <nl> auto requiredAccessScope = check . RequiredAccessScope ; <nl> Accessibility requiredAccess = <nl> - requiredAccessScope . requiredAccessibilityForDiagnostics ( ) ; <nl> + requiredAccessScope . requiredAccessForDiagnostics ( ) ; <nl> auto proto = conformance - > getProtocol ( ) ; <nl> auto protoAccessScope = proto - > getFormalAccessScope ( DC ) ; <nl> bool protoForcesAccess = <nl> ConformanceChecker : : resolveWitnessViaLookup ( ValueDecl * requirement ) { <nl> witness - > getFullName ( ) , <nl> isSetter , <nl> requiredAccess , <nl> - protoAccessScope . accessibilityForDiagnostics ( ) , <nl> + protoAccessScope . accessLevelForDiagnostics ( ) , <nl> proto - > getName ( ) ) ; <nl> - fixItAccessibility ( diag , witness , requiredAccess , isSetter ) ; <nl> + fixItAccess ( diag , witness , requiredAccess , isSetter ) ; <nl> } ) ; <nl> break ; <nl> } <nl> void TypeChecker : : checkConformancesInContext ( DeclContext * dc , <nl> <nl> / / Determine the accessibility of this conformance . <nl> Decl * currentDecl = nullptr ; <nl> - Accessibility defaultAccessibility ; <nl> + Accessibility defaultAccess ; <nl> if ( auto ext = dyn_cast < ExtensionDecl > ( dc ) ) { <nl> Type extendedTy = ext - > getExtendedType ( ) ; <nl> if ( ! extendedTy ) <nl> void TypeChecker : : checkConformancesInContext ( DeclContext * dc , <nl> const NominalTypeDecl * nominal = extendedTy - > getAnyNominal ( ) ; <nl> if ( ! nominal ) <nl> return ; <nl> - defaultAccessibility = nominal - > getFormalAccess ( ) ; <nl> + defaultAccess = nominal - > getFormalAccess ( ) ; <nl> currentDecl = ext ; <nl> } else { <nl> - defaultAccessibility = cast < NominalTypeDecl > ( dc ) - > getFormalAccess ( ) ; <nl> + defaultAccess = cast < NominalTypeDecl > ( dc ) - > getFormalAccess ( ) ; <nl> currentDecl = cast < NominalTypeDecl > ( dc ) ; <nl> } <nl> <nl> void TypeChecker : : checkConformancesInContext ( DeclContext * dc , <nl> <nl> if ( tracker ) <nl> tracker - > addUsedMember ( { conformance - > getProtocol ( ) , Identifier ( ) } , <nl> - defaultAccessibility > Accessibility : : FilePrivate ) ; <nl> + defaultAccess > Accessibility : : FilePrivate ) ; <nl> <nl> / / Diagnose @ NSCoding on file / fileprivate / nested / generic classes , which <nl> / / have unstable archival names . <nl> void TypeChecker : : checkConformancesInContext ( DeclContext * dc , <nl> bestOptionalReqs . begin ( ) , <nl> bestOptionalReqs . end ( ) , <nl> [ & ] ( ValueDecl * req ) { <nl> - return ! shouldWarnAboutPotentialWitness ( req , value , <nl> - defaultAccessibility , <nl> + return ! shouldWarnAboutPotentialWitness ( req , value , defaultAccess , <nl> bestScore ) ; <nl> } ) , <nl> bestOptionalReqs . end ( ) ) ; <nl> void TypeChecker : : checkConformancesInContext ( DeclContext * dc , <nl> if ( conformance - > getProtocol ( ) = = req - > getDeclContext ( ) ) { <nl> diagnosePotentialWitness ( * this , <nl> conformance - > getRootNormalConformance ( ) , <nl> - req , value , defaultAccessibility ) ; <nl> + req , value , defaultAccess ) ; <nl> diagnosed = true ; <nl> break ; <nl> } <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> static Type diagnoseUnknownType ( TypeChecker & tc , DeclContext * dc , <nl> <nl> NameLookupOptions relookupOptions = lookupOptions ; <nl> relookupOptions | = NameLookupFlags : : KnownPrivate ; <nl> - relookupOptions | = NameLookupFlags : : IgnoreAccessibility ; <nl> + relookupOptions | = NameLookupFlags : : IgnoreAccessControl ; <nl> auto inaccessibleResults = <nl> tc . lookupUnqualifiedType ( lookupDC , comp - > getIdentifier ( ) , comp - > getIdLoc ( ) , <nl> relookupOptions ) ; <nl> static Type diagnoseUnknownType ( TypeChecker & tc , DeclContext * dc , <nl> / / Try ignoring access control . <nl> NameLookupOptions relookupOptions = lookupOptions ; <nl> relookupOptions | = NameLookupFlags : : KnownPrivate ; <nl> - relookupOptions | = NameLookupFlags : : IgnoreAccessibility ; <nl> + relookupOptions | = NameLookupFlags : : IgnoreAccessControl ; <nl> auto inaccessibleMembers = tc . lookupMemberType ( dc , parentType , <nl> comp - > getIdentifier ( ) , <nl> relookupOptions ) ; <nl> static Type diagnoseUnknownType ( TypeChecker & tc , DeclContext * dc , <nl> / / this allows for more precise diagnostic , which distinguishes between <nl> / / identifier not found as a member type vs . not found at all . <nl> NameLookupOptions memberLookupOptions = lookupOptions ; <nl> - memberLookupOptions | = NameLookupFlags : : IgnoreAccessibility ; <nl> + memberLookupOptions | = NameLookupFlags : : IgnoreAccessControl ; <nl> memberLookupOptions | = NameLookupFlags : : KnownPrivate ; <nl> <nl> memberLookup = tc . lookupMember ( dc , parentType , comp - > getIdentifier ( ) , <nl> mmm a / lib / Sema / TypeChecker . h <nl> ppp b / lib / Sema / TypeChecker . h <nl> enum class NameLookupFlags { <nl> DynamicLookup = 0x08 , <nl> / / / Whether to ignore access control for this lookup , allowing inaccessible <nl> / / / results to be returned . <nl> - IgnoreAccessibility = 0x10 , <nl> + IgnoreAccessControl = 0x10 , <nl> } ; <nl> <nl> / / / A set of options that control name lookup . <nl> class TypeChecker final : public LazyResolver { <nl> void validateDeclForNameLookup ( ValueDecl * D ) ; <nl> <nl> / / / Resolves the accessibility of the given declaration . <nl> - void validateAccessibility ( ValueDecl * D ) ; <nl> + void validateAccessControl ( ValueDecl * D ) ; <nl> <nl> / / / Validate the given extension declaration , ensuring that it <nl> / / / properly extends the nominal type it names . <nl> class TypeChecker final : public LazyResolver { <nl> <nl> void checkOwnershipAttr ( VarDecl * D , OwnershipAttr * attr ) ; <nl> <nl> - void computeAccessibility ( ValueDecl * D ) ; <nl> - void computeDefaultAccessibility ( ExtensionDecl * D ) ; <nl> + void computeAccessLevel ( ValueDecl * D ) ; <nl> + void computeDefaultAccessLevel ( ExtensionDecl * D ) ; <nl> <nl> - virtual void resolveAccessibility ( ValueDecl * VD ) override { <nl> - validateAccessibility ( VD ) ; <nl> + virtual void resolveAccessControl ( ValueDecl * VD ) override { <nl> + validateAccessControl ( VD ) ; <nl> } <nl> <nl> virtual void resolveDeclSignature ( ValueDecl * VD ) override { <nl> mmm a / lib / Serialization / Deserialization . cpp <nl> ppp b / lib / Serialization / Deserialization . cpp <nl> static bool isDeclAttrRecord ( unsigned ID ) { <nl> } <nl> <nl> static Optional < swift : : Accessibility > <nl> - getActualAccessibility ( uint8_t raw ) { <nl> + getActualAccessLevel ( uint8_t raw ) { <nl> switch ( serialization : : AccessibilityKind ( raw ) ) { <nl> # define CASE ( NAME ) \ <nl> case serialization : : AccessibilityKind : : NAME : \ <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> alias - > setUnderlyingType ( getType ( underlyingTypeID ) ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - alias - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + alias - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> " erroneous associated type " ) ; <nl> <nl> Accessibility parentAccess = cast < ProtocolDecl > ( DC ) - > getFormalAccess ( ) ; <nl> - assocType - > setAccessibility ( std : : max ( parentAccess , Accessibility : : Internal ) ) ; <nl> + assocType - > setAccess ( std : : max ( parentAccess , Accessibility : : Internal ) ) ; <nl> if ( isImplicit ) <nl> assocType - > setImplicit ( ) ; <nl> <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> / / Read the generic environment . <nl> configureGenericEnvironment ( theStruct , genericEnvID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - theStruct - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + theStruct - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> configureGenericEnvironment ( ctor , genericEnvID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - ctor - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + ctor - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> configureStorage ( var , storageKind , getterID , setterID , materializeForSetID , <nl> addressorID , mutableAddressorID , willSetID , didSetID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - var - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + var - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> } <nl> <nl> if ( var - > isSettable ( nullptr ) ) { <nl> - if ( auto setterAccess = getActualAccessibility ( rawSetterAccessLevel ) ) { <nl> - var - > setSetterAccessibility ( * setterAccess ) ; <nl> + if ( auto setterAccess = getActualAccessLevel ( rawSetterAccessLevel ) ) { <nl> + var - > setSetterAccess ( * setterAccess ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> configureGenericEnvironment ( fn , genericEnvID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - fn - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + fn - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> proto - > setRequiresClass ( isClassBounded ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - proto - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + proto - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> configureGenericEnvironment ( theClass , genericEnvID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - theClass - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + theClass - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> configureGenericEnvironment ( theEnum , genericEnvID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - theEnum - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + theEnum - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> if ( isImplicit ) <nl> elem - > setImplicit ( ) ; <nl> - elem - > setAccessibility ( std : : max ( cast < EnumDecl > ( DC ) - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + elem - > setAccess ( std : : max ( cast < EnumDecl > ( DC ) - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> <nl> break ; <nl> } <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> getterID , setterID , materializeForSetID , <nl> addressorID , mutableAddressorID , willSetID , didSetID ) ; <nl> <nl> - if ( auto accessLevel = getActualAccessibility ( rawAccessLevel ) ) { <nl> - subscript - > setAccessibility ( * accessLevel ) ; <nl> + if ( auto accessLevel = getActualAccessLevel ( rawAccessLevel ) ) { <nl> + subscript - > setAccess ( * accessLevel ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> } <nl> <nl> if ( subscript - > isSettable ( ) ) { <nl> - if ( auto setterAccess = getActualAccessibility ( rawSetterAccessLevel ) ) { <nl> - subscript - > setSetterAccessibility ( * setterAccess ) ; <nl> + if ( auto setterAccess = getActualAccessLevel ( rawSetterAccessLevel ) ) { <nl> + subscript - > setSetterAccess ( * setterAccess ) ; <nl> } else { <nl> error ( ) ; <nl> return nullptr ; <nl> ModuleFile : : getDeclChecked ( DeclID DID , Optional < DeclContext * > ForcedContext ) { <nl> <nl> configureGenericEnvironment ( dtor , genericEnvID ) ; <nl> <nl> - dtor - > setAccessibility ( std : : max ( cast < ClassDecl > ( DC ) - > getFormalAccess ( ) , <nl> - Accessibility : : Internal ) ) ; <nl> + dtor - > setAccess ( std : : max ( cast < ClassDecl > ( DC ) - > getFormalAccess ( ) , <nl> + Accessibility : : Internal ) ) ; <nl> auto * selfParams = readParameterList ( ) ; <nl> selfParams - > get ( 0 ) - > setImplicit ( ) ; / / self is implicit . <nl> <nl> mmm a / lib / Serialization / ModuleFile . cpp <nl> ppp b / lib / Serialization / ModuleFile . cpp <nl> void ModuleFile : : loadExtensions ( NominalTypeDecl * nominal ) { <nl> if ( iter = = ExtensionDecls - > end ( ) ) <nl> return ; <nl> <nl> - if ( nominal - > hasAccessibility ( ) & & <nl> + if ( nominal - > hasAccess ( ) & & <nl> nominal - > getEffectiveAccess ( ) < Accessibility : : Internal ) { <nl> if ( nominal - > getModuleScopeContext ( ) ! = getFile ( ) ) <nl> return ; <nl> mmm a / lib / Serialization / Serialization . cpp <nl> ppp b / lib / Serialization / Serialization . cpp <nl> getStableStaticSpelling ( swift : : StaticSpellingKind SS ) { <nl> llvm_unreachable ( " Unhandled StaticSpellingKind in switch . " ) ; <nl> } <nl> <nl> - static uint8_t getRawStableAccessibility ( Accessibility access ) { <nl> + static uint8_t getRawStableAccessLevel ( Accessibility access ) { <nl> switch ( access ) { <nl> # define CASE ( NAME ) \ <nl> case Accessibility : : NAME : \ <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> } <nl> <nl> if ( auto * value = dyn_cast < ValueDecl > ( D ) ) { <nl> - if ( value - > hasAccessibility ( ) & & <nl> + if ( value - > hasAccess ( ) & & <nl> value - > getFormalAccess ( ) < = Accessibility : : FilePrivate & & <nl> ! value - > getDeclContext ( ) - > isLocalContext ( ) ) { <nl> / / FIXME : We shouldn ' t need to encode this for / all / private decls . <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> auto underlying = typeAlias - > getUnderlyingTypeLoc ( ) . getType ( ) ; <nl> <nl> uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( typeAlias - > getFormalAccess ( ) ) ; <nl> + getRawStableAccessLevel ( typeAlias - > getFormalAccess ( ) ) ; <nl> <nl> unsigned abbrCode = DeclTypeAbbrCodes [ TypeAliasLayout : : Code ] ; <nl> TypeAliasLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> inheritedTypes . push_back ( addTypeRef ( inherited . getType ( ) ) ) ; <nl> <nl> uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( theStruct - > getFormalAccess ( ) ) ; <nl> + getRawStableAccessLevel ( theStruct - > getFormalAccess ( ) ) ; <nl> <nl> unsigned abbrCode = DeclTypeAbbrCodes [ StructLayout : : Code ] ; <nl> StructLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> inheritedAndDependencyTypes . push_back ( addTypeRef ( ty ) ) ; <nl> <nl> uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( theEnum - > getFormalAccess ( ) ) ; <nl> + getRawStableAccessLevel ( theEnum - > getFormalAccess ( ) ) ; <nl> <nl> unsigned abbrCode = DeclTypeAbbrCodes [ EnumLayout : : Code ] ; <nl> EnumLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> inheritedTypes . push_back ( addTypeRef ( inherited . getType ( ) ) ) ; <nl> <nl> uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( theClass - > getFormalAccess ( ) ) ; <nl> + getRawStableAccessLevel ( theClass - > getFormalAccess ( ) ) ; <nl> <nl> unsigned abbrCode = DeclTypeAbbrCodes [ ClassLayout : : Code ] ; <nl> ClassLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> for ( auto element : proto - > getInherited ( ) ) <nl> inherited . push_back ( addTypeRef ( element . getType ( ) ) ) ; <nl> <nl> - uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( proto - > getFormalAccess ( ) ) ; <nl> + uint8_t rawAccessLevel = getRawStableAccessLevel ( proto - > getFormalAccess ( ) ) ; <nl> <nl> unsigned abbrCode = DeclTypeAbbrCodes [ ProtocolLayout : : Code ] ; <nl> ProtocolLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> auto contextID = addDeclContextRef ( var - > getDeclContext ( ) ) ; <nl> <nl> Accessors accessors = getAccessors ( var ) ; <nl> - uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( var - > getFormalAccess ( ) ) ; <nl> + uint8_t rawAccessLevel = getRawStableAccessLevel ( var - > getFormalAccess ( ) ) ; <nl> uint8_t rawSetterAccessLevel = rawAccessLevel ; <nl> if ( var - > isSettable ( nullptr ) ) <nl> rawSetterAccessLevel = <nl> - getRawStableAccessibility ( var - > getSetterAccessibility ( ) ) ; <nl> + getRawStableAccessLevel ( var - > getSetterFormalAccess ( ) ) ; <nl> <nl> Type ty = var - > getInterfaceType ( ) ; <nl> SmallVector < TypeID , 2 > dependencies ; <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> for ( auto argName : fn - > getFullName ( ) . getArgumentNames ( ) ) <nl> nameComponentsAndDependencies . push_back ( addDeclBaseNameRef ( argName ) ) ; <nl> <nl> - uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( fn - > getFormalAccess ( ) ) ; <nl> + uint8_t rawAccessLevel = getRawStableAccessLevel ( fn - > getFormalAccess ( ) ) ; <nl> uint8_t rawAddressorKind = <nl> getRawStableAddressorKind ( fn - > getAddressorKind ( ) ) ; <nl> Type ty = fn - > getInterfaceType ( ) ; <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> <nl> Accessors accessors = getAccessors ( subscript ) ; <nl> uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( subscript - > getFormalAccess ( ) ) ; <nl> + getRawStableAccessLevel ( subscript - > getFormalAccess ( ) ) ; <nl> uint8_t rawSetterAccessLevel = rawAccessLevel ; <nl> if ( subscript - > isSettable ( ) ) <nl> rawSetterAccessLevel = <nl> - getRawStableAccessibility ( subscript - > getSetterAccessibility ( ) ) ; <nl> + getRawStableAccessLevel ( subscript - > getSetterFormalAccess ( ) ) ; <nl> <nl> unsigned abbrCode = DeclTypeAbbrCodes [ SubscriptLayout : : Code ] ; <nl> SubscriptLayout : : emitRecord ( Out , ScratchRecord , abbrCode , <nl> void Serializer : : writeDecl ( const Decl * D ) { <nl> for ( Type dependency : collectDependenciesFromType ( ty - > getCanonicalType ( ) ) ) <nl> nameComponentsAndDependencies . push_back ( addTypeRef ( dependency ) ) ; <nl> <nl> - uint8_t rawAccessLevel = <nl> - getRawStableAccessibility ( ctor - > getFormalAccess ( ) ) ; <nl> + uint8_t rawAccessLevel = getRawStableAccessLevel ( ctor - > getFormalAccess ( ) ) ; <nl> <nl> bool firstTimeRequired = ctor - > isRequired ( ) ; <nl> if ( auto * overridden = ctor - > getOverriddenDecl ( ) ) <nl> mmm a / test / attr / accessibility_print . swift <nl> ppp b / test / attr / accessibility_print . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - ide - test - skip - deinit = false - print - ast - typechecked - print - accessibility - source - filename = % s | % FileCheck % s - check - prefix = CHECK - check - prefix = CHECK - SRC <nl> + / / RUN : % target - swift - ide - test - skip - deinit = false - print - ast - typechecked - print - access - source - filename = % s | % FileCheck % s - check - prefix = CHECK - check - prefix = CHECK - SRC <nl> / / RUN : % target - swift - frontend - emit - module - path % t / accessibility_print . swiftmodule % s <nl> - / / RUN : % target - swift - ide - test - skip - deinit = false - print - module - print - accessibility - module - to - print = accessibility_print - I % t - source - filename = % s | % FileCheck % s <nl> + / / RUN : % target - swift - ide - test - skip - deinit = false - print - module - print - access - module - to - print = accessibility_print - I % t - source - filename = % s | % FileCheck % s <nl> <nl> / / This file uses alphabetic prefixes on its declarations because swift - ide - test <nl> / / sorts decls in a module before printing them . <nl> mmm a / tools / SourceKit / include / SourceKit / Core / ProtocolUIDs . def <nl> ppp b / tools / SourceKit / include / SourceKit / Core / ProtocolUIDs . def <nl> KEY ( Results , " key . results " ) <nl> KEY ( Request , " key . request " ) <nl> KEY ( Notification , " key . notification " ) <nl> KEY ( Kind , " key . kind " ) <nl> - KEY ( Accessibility , " key . accessibility " ) <nl> - KEY ( SetterAccessibility , " key . setter_accessibility " ) <nl> + KEY ( AccessLevel , " key . accessibility " ) <nl> + KEY ( SetterAccessLevel , " key . setter_accessibility " ) <nl> KEY ( Keyword , " key . keyword " ) <nl> KEY ( Name , " key . name " ) <nl> KEY ( USR , " key . usr " ) <nl> mmm a / tools / SourceKit / lib / SwiftLang / SwiftEditor . cpp <nl> ppp b / tools / SourceKit / lib / SwiftLang / SwiftEditor . cpp <nl> void SwiftEditorDocument : : Implementation : : buildSwiftInv ( <nl> <nl> namespace { <nl> <nl> - static UIdent getAccessibilityUID ( Accessibility Access ) { <nl> + static UIdent getAccessLevelUID ( Accessibility Access ) { <nl> static UIdent AccessOpen ( " source . lang . swift . accessibility . open " ) ; <nl> static UIdent AccessPublic ( " source . lang . swift . accessibility . public " ) ; <nl> static UIdent AccessInternal ( " source . lang . swift . accessibility . internal " ) ; <nl> static UIdent getAccessibilityUID ( Accessibility Access ) { <nl> llvm_unreachable ( " Unhandled Accessibility in switch . " ) ; <nl> } <nl> <nl> - static Optional < Accessibility > getAccessibilityStrictly ( const ExtensionDecl * ED ) { <nl> - if ( ED - > hasDefaultAccessibility ( ) ) <nl> - return ED - > getDefaultAccessibility ( ) ; <nl> + static Optional < Accessibility > getAccessLevelStrictly ( const ExtensionDecl * ED ) { <nl> + if ( ED - > hasDefaultAccessLevel ( ) ) <nl> + return ED - > getDefaultAccessLevel ( ) ; <nl> <nl> / / Check if the decl has an explicit accessibility attribute . <nl> if ( auto * AA = ED - > getAttrs ( ) . getAttribute < AccessibilityAttr > ( ) ) <nl> static Optional < Accessibility > getAccessibilityStrictly ( const ExtensionDecl * ED ) <nl> return None ; <nl> } <nl> <nl> - static Accessibility inferDefaultAccessibility ( const ExtensionDecl * ED ) { <nl> - if ( auto StrictAccess = getAccessibilityStrictly ( ED ) ) <nl> + static Accessibility inferDefaultAccessLevel ( const ExtensionDecl * ED ) { <nl> + if ( auto StrictAccess = getAccessLevelStrictly ( ED ) ) <nl> return StrictAccess . getValue ( ) ; <nl> <nl> / / Assume " internal " , which is the most common thing anyway . <nl> static Accessibility inferDefaultAccessibility ( const ExtensionDecl * ED ) { <nl> / / / If typechecking was performed we use the computed accessibility , otherwise <nl> / / / we fallback to inferring accessibility syntactically . This may not be as <nl> / / / accurate but it ' s only until we have typechecked the AST . <nl> - static Accessibility inferAccessibility ( const ValueDecl * D ) { <nl> + static Accessibility inferAccessLevel ( const ValueDecl * D ) { <nl> assert ( D ) ; <nl> - if ( D - > hasAccessibility ( ) ) <nl> + if ( D - > hasAccess ( ) ) <nl> return D - > getFormalAccess ( ) ; <nl> <nl> / / Check if the decl has an explicit accessibility attribute . <nl> static Accessibility inferAccessibility ( const ValueDecl * D ) { <nl> return Accessibility : : Internal ; <nl> case DeclContextKind : : GenericTypeDecl : { <nl> auto Nominal = cast < GenericTypeDecl > ( DC ) ; <nl> - Accessibility Access = inferAccessibility ( Nominal ) ; <nl> + Accessibility Access = inferAccessLevel ( Nominal ) ; <nl> if ( ! isa < ProtocolDecl > ( Nominal ) ) <nl> Access = std : : min ( Access , Accessibility : : Internal ) ; <nl> return Access ; <nl> } <nl> case DeclContextKind : : ExtensionDecl : <nl> - return inferDefaultAccessibility ( cast < ExtensionDecl > ( DC ) ) ; <nl> + return inferDefaultAccessLevel ( cast < ExtensionDecl > ( DC ) ) ; <nl> } <nl> <nl> llvm_unreachable ( " Unhandled DeclContextKind in switch . " ) ; <nl> } <nl> <nl> static Optional < Accessibility > <nl> - inferSetterAccessibility ( const AbstractStorageDecl * D ) { <nl> + inferSetterAccessLevel ( const AbstractStorageDecl * D ) { <nl> if ( auto * VD = dyn_cast < VarDecl > ( D ) ) { <nl> if ( VD - > isLet ( ) ) <nl> return None ; <nl> inferSetterAccessibility ( const AbstractStorageDecl * D ) { <nl> if ( auto * AA = D - > getAttrs ( ) . getAttribute < SetterAccessibilityAttr > ( ) ) <nl> return AA - > getAccess ( ) ; <nl> else <nl> - return inferAccessibility ( D ) ; <nl> + return inferAccessLevel ( D ) ; <nl> } <nl> <nl> class SwiftDocumentStructureWalker : public ide : : SyntaxModelWalker { <nl> class SwiftDocumentStructureWalker : public ide : : SyntaxModelWalker { <nl> if ( Node . Kind ! = SyntaxStructureKind : : Parameter & & <nl> Node . Kind ! = SyntaxStructureKind : : LocalVariable ) { <nl> if ( auto * VD = dyn_cast_or_null < ValueDecl > ( Node . Dcl ) ) { <nl> - AccessLevel = getAccessibilityUID ( inferAccessibility ( VD ) ) ; <nl> + AccessLevel = getAccessLevelUID ( inferAccessLevel ( VD ) ) ; <nl> } else if ( auto * ED = dyn_cast_or_null < ExtensionDecl > ( Node . Dcl ) ) { <nl> - if ( auto StrictAccess = getAccessibilityStrictly ( ED ) ) <nl> - AccessLevel = getAccessibilityUID ( StrictAccess . getValue ( ) ) ; <nl> + if ( auto StrictAccess = getAccessLevelStrictly ( ED ) ) <nl> + AccessLevel = getAccessLevelUID ( StrictAccess . getValue ( ) ) ; <nl> } <nl> if ( auto * ASD = dyn_cast_or_null < AbstractStorageDecl > ( Node . Dcl ) ) { <nl> - Optional < Accessibility > SetAccess = inferSetterAccessibility ( ASD ) ; <nl> + Optional < Accessibility > SetAccess = inferSetterAccessLevel ( ASD ) ; <nl> if ( SetAccess . hasValue ( ) ) { <nl> - SetterAccessLevel = getAccessibilityUID ( SetAccess . getValue ( ) ) ; <nl> + SetterAccessLevel = getAccessLevelUID ( SetAccess . getValue ( ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / tools / SourceKit / tools / sourcekitd / lib / API / DocStructureArray . cpp <nl> ppp b / tools / SourceKit / tools / sourcekitd / lib / API / DocStructureArray . cpp <nl> struct DocStructureReader { <nl> APPLY ( KeyLength , Int , node . Length ) ; <nl> APPLY ( KeyKind , UID , node . Kind ) ; <nl> if ( node . AccessLevel ) <nl> - APPLY ( KeyAccessibility , UID , node . AccessLevel ) ; <nl> + APPLY ( KeyAccessLevel , UID , node . AccessLevel ) ; <nl> if ( node . SetterAccessLevel ) <nl> - APPLY ( KeySetterAccessibility , UID , node . SetterAccessLevel ) ; <nl> + APPLY ( KeySetterAccessLevel , UID , node . SetterAccessLevel ) ; <nl> APPLY ( KeyNameOffset , Int , node . NameOffset ) ; <nl> APPLY ( KeyNameLength , Int , node . NameLength ) ; <nl> if ( node . BodyOffset | | node . BodyLength ) { <nl> mmm a / tools / swift - ide - test / swift - ide - test . cpp <nl> ppp b / tools / swift - ide - test / swift - ide - test . cpp <nl> PrintImplicitAttrs ( " print - implicit - attrs " , <nl> llvm : : cl : : init ( false ) ) ; <nl> <nl> static llvm : : cl : : opt < bool > <nl> - PrintAccessibility ( " print - accessibility " , <nl> - llvm : : cl : : desc ( " Print accessibility for all values " ) , <nl> - llvm : : cl : : cat ( Category ) , <nl> - llvm : : cl : : init ( false ) ) ; <nl> + PrintAccess ( " print - access " , <nl> + llvm : : cl : : desc ( " Print access keywords for all values " ) , <nl> + llvm : : cl : : cat ( Category ) , <nl> + llvm : : cl : : init ( false ) ) ; <nl> <nl> static llvm : : cl : : opt < bool > <nl> SkipUnavailable ( " skip - unavailable " , <nl> SkipUnavailable ( " skip - unavailable " , <nl> llvm : : cl : : init ( false ) ) ; <nl> <nl> static llvm : : cl : : opt < Accessibility > <nl> - AccessibilityFilter ( <nl> + AccessFilter ( <nl> llvm : : cl : : desc ( " Accessibility filter : " ) , <nl> llvm : : cl : : cat ( Category ) , <nl> llvm : : cl : : init ( Accessibility : : Private ) , <nl> static int doPrintLocalTypes ( const CompilerInvocation & InitInvok , <nl> llvm : : outs ( ) < < remangled < < " \ n " ; <nl> <nl> auto Options = PrintOptions : : printEverything ( ) ; <nl> - Options . PrintAccessibility = false ; <nl> + Options . PrintAccess = false ; <nl> LTD - > print ( llvm : : outs ( ) , Options ) ; <nl> llvm : : outs ( ) < < " \ n " ; <nl> } <nl> int main ( int argc , char * argv [ ] ) { <nl> PrintOpts . PreferTypeRepr = options : : PreferTypeRepr ; <nl> PrintOpts . ExplodePatternBindingDecls = options : : ExplodePatternBindingDecls ; <nl> PrintOpts . PrintImplicitAttrs = options : : PrintImplicitAttrs ; <nl> - PrintOpts . PrintAccessibility = options : : PrintAccessibility ; <nl> - PrintOpts . AccessibilityFilter = options : : AccessibilityFilter ; <nl> + PrintOpts . PrintAccess = options : : PrintAccess ; <nl> + PrintOpts . AccessFilter = options : : AccessFilter ; <nl> PrintOpts . PrintDocumentationComments = ! options : : SkipDocumentationComments ; <nl> PrintOpts . PrintRegularClangComments = options : : PrintRegularComments ; <nl> PrintOpts . SkipPrivateStdlibDecls = options : : SkipPrivateStdlibDecls ; <nl> mmm a / unittests / AST / TestContext . h <nl> ppp b / unittests / AST / TestContext . h <nl> class TestContext : public TestContextBase { <nl> auto result = new ( Ctx ) Nominal ( SourceLoc ( ) , Ctx . getIdentifier ( name ) , <nl> SourceLoc ( ) , / * inherited * / { } , <nl> genericParams , FileForLookups ) ; <nl> - result - > setAccessibility ( Accessibility : : Internal ) ; <nl> + result - > setAccess ( Accessibility : : Internal ) ; <nl> return result ; <nl> } <nl> } ; <nl>
|
Excise " Accessibility " from the compiler ( 1 / 3 )
|
apple/swift
|
5f30eac288d0d8a1e2491e9256e489f1fe608026
|
2017-08-28T18:11:57Z
|
mmm a / project / BuildDependencies / scripts / 0_package . list <nl> ppp b / project / BuildDependencies / scripts / 0_package . list <nl> libxslt - 1 . 1 . 26_1 - win32 . 7z <nl> mysqlclient - 6 . 1 . 3 - win32 - vc120 . 7z <nl> pcre - 8 . 34 - win32 - vc120 . 7z <nl> python - 2 . 7 . 5 - 2 - win32 . 7z <nl> - sqlite - 3 . 7 . 16 . 2 - win32 - vc120 . 7z <nl> + sqlite - 3 . 8 . 6 - win32 - vc120 . 7z <nl> taglib - 1 . 9 . 1 - win32 - vc120 . 7z <nl> tinyxml - 2 . 6 . 2_3 - win32 - vc120 . 7z <nl>
|
[ win32 ] update sqlite to 3080600 ( aka 3 . 8 . 6 )
|
xbmc/xbmc
|
35a22972885c58f0916f6df2a4ecb9568c1cc99a
|
2014-08-27T17:25:35Z
|
mmm a / hphp / runtime / ext / fb / ext_fb . php <nl> ppp b / hphp / runtime / ext / fb / ext_fb . php <nl> function fb_compact_unserialize ( mixed $ thing , <nl> * handler returns FALSE , code will continue with original function . <nl> * Otherwise , it will return what handler tells . The handler function looks <nl> * like " intercept_handler ( $ name , $ obj , $ params , $ data , & $ done ) " , where $ name <nl> - * is original function ' s name , $ obj is $ this for an instance method call or <nl> - * null for static method call or function calls , and $ params are original <nl> - * call ' s parameters . $ data is what ' s passed to fb_intercept ( ) and set $ done <nl> - * to false to indicate function should continue its execution with old <nl> - * function as if interception did not happen . By default $ done is true so it <nl> - * will return handler ' s return immediately without executing old function ' s <nl> - * code . Note that built - in functions are not interceptable . <nl> + * is original function ' s fully - qualified name ( ' Class : : method ' ) , $ obj is $ this <nl> + * for an instance method call or null for static method call or function calls , <nl> + * and $ params are original call ' s parameters . $ data is what ' s passed to <nl> + * fb_intercept ( ) and set $ done to false to indicate function should continue its <nl> + * execution with old function as if interception did not happen . By default $ done <nl> + * is true so it will return handler ' s return immediately without executing old <nl> + * function ' s code . Note that built - in functions are not interceptable . <nl> * @ param string $ name - The function or class method name to intercept . Use <nl> * " class : : method " for method name . If empty , all functions will be <nl> * intercepted by the specified handler and registered individual handlers <nl>
|
Clarify documentation around $ name in fb_intercept callbacks .
|
facebook/hhvm
|
8ef2e1d017338d1a177446097111eed8da7a46e6
|
2015-11-05T05:02:22Z
|
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> a license to everyone to use it as detailed in LICENSE . ) <nl> * Louis Lagrange < lagrange . louis @ gmail . com > <nl> * Ying - Ruei Liang < thumbd03803 @ gmail . com > <nl> * Stuart Geipel < lapimlu @ gmail . com > <nl> + * Yeonjun Lim < yjroot @ gmail . com > <nl> <nl> mmm a / site / source / docs / api_reference / preamble . js . rst <nl> ppp b / site / source / docs / api_reference / preamble . js . rst <nl> The : ref : ` emscripten - memory - model ` uses a typed array buffer ( ` ` ArrayBuffer ` ` ) t <nl> <nl> . . js : data : : HEAPU8 <nl> <nl> - View for 32 - bit unsigned memory . <nl> - <nl> - <nl> - . . js : data : : HEAPU8 <nl> - <nl> - View for 32 - bit unsigned memory . <nl> + View for 8 - bit unsigned memory . <nl> <nl> <nl> . . js : data : : HEAPU16 <nl>
|
Merge pull request from yjroot / api_reference
|
emscripten-core/emscripten
|
faf9b4b2060747b027fdd5f395ab7f19455fe5c9
|
2015-10-21T13:19:15Z
|
mmm a / docs / CHANGELOG . txt <nl> ppp b / docs / CHANGELOG . txt <nl> Breaking Changes : <nl> <nl> Other Changes : <nl> <nl> + - Added GetBackgroundDrawList ( ) helper to quickly get access to a ImDrawList that will be rendered <nl> + behind every other windows . ( # 2391 ) <nl> - Nav : Fixed a tap on AltGR ( e . g . German keyboard ) from navigating to the menu layer . <nl> - DragScalar , InputScalar , SliderScalar : Added support for u8 / s8 / u16 / s16 data types . <nl> We are reusing function instances for larger types to reduce code size . ( # 643 , # 320 , # 708 , # 1011 ) <nl> mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> CODE <nl> A : - You can create a dummy window . Call Begin ( ) with the NoBackground | NoDecoration | NoSavedSettings | NoInputs flags . <nl> ( The ImGuiWindowFlags_NoDecoration flag itself is a shortcut for NoTitleBar | NoResize | NoScrollbar | NoCollapse ) <nl> Then you can retrieve the ImDrawList * via GetWindowDrawList ( ) and draw to it in any way you like . <nl> - - You can call ImGui : : GetOverlayDrawList ( ) and use this draw list to display contents over every other imgui windows . <nl> + - You can call ImGui : : GetBackgroundDrawList ( ) or ImGui : : GetOverlayDrawList ( ) and use those draw list to display contents <nl> + behind or over every other imgui windows . <nl> - You can create your own ImDrawList instance . You ' ll need to initialize them ImGui : : GetDrawListSharedData ( ) , or create <nl> your own ImDrawListSharedData , and then call your rendered code with your own ImDrawList or ImDrawData data . <nl> <nl> int ImGui : : GetFrameCount ( ) <nl> return GImGui - > FrameCount ; <nl> } <nl> <nl> + ImDrawList * ImGui : : GetBackgroundDrawList ( ) <nl> + { <nl> + return & GImGui - > BackgroundDrawList ; <nl> + } <nl> + <nl> static ImDrawList * GetOverlayDrawList ( ImGuiWindow * ) <nl> { <nl> - / / This seemingly unnecessary wrapper simplifies compatibility between the ' master ' and ' viewport ' branches . <nl> + / / This seemingly unnecessary wrapper simplifies compatibility between the ' master ' and ' docking ' branches . <nl> return & GImGui - > OverlayDrawList ; <nl> } <nl> <nl> void ImGui : : NewFrame ( ) <nl> g . DrawListSharedData . ClipRectFullscreen = ImVec4 ( 0 . 0f , 0 . 0f , g . IO . DisplaySize . x , g . IO . DisplaySize . y ) ; <nl> g . DrawListSharedData . CurveTessellationTol = g . Style . CurveTessellationTol ; <nl> <nl> + g . BackgroundDrawList . Clear ( ) ; <nl> + g . BackgroundDrawList . PushTextureID ( g . IO . Fonts - > TexID ) ; <nl> + g . BackgroundDrawList . PushClipRectFullScreen ( ) ; <nl> + g . BackgroundDrawList . Flags = ( g . Style . AntiAliasedLines ? ImDrawListFlags_AntiAliasedLines : 0 ) | ( g . Style . AntiAliasedFill ? ImDrawListFlags_AntiAliasedFill : 0 ) ; <nl> + <nl> g . OverlayDrawList . Clear ( ) ; <nl> g . OverlayDrawList . PushTextureID ( g . IO . Fonts - > TexID ) ; <nl> g . OverlayDrawList . PushClipRectFullScreen ( ) ; <nl> void ImGui : : Shutdown ( ImGuiContext * context ) <nl> g . OpenPopupStack . clear ( ) ; <nl> g . BeginPopupStack . clear ( ) ; <nl> g . DrawDataBuilder . ClearFreeMemory ( ) ; <nl> + g . BackgroundDrawList . ClearFreeMemory ( ) ; <nl> g . OverlayDrawList . ClearFreeMemory ( ) ; <nl> g . PrivateClipboard . clear ( ) ; <nl> g . InputTextState . ClearFreeMemory ( ) ; <nl> void ImGui : : Render ( ) <nl> / / Gather ImDrawList to render ( for each active window ) <nl> g . IO . MetricsRenderVertices = g . IO . MetricsRenderIndices = g . IO . MetricsRenderWindows = 0 ; <nl> g . DrawDataBuilder . Clear ( ) ; <nl> + if ( ! g . BackgroundDrawList . VtxBuffer . empty ( ) ) <nl> + AddDrawListToDrawData ( & g . DrawDataBuilder . Layers [ 0 ] , & g . BackgroundDrawList ) ; <nl> + <nl> ImGuiWindow * windows_to_render_front_most [ 2 ] ; <nl> windows_to_render_front_most [ 0 ] = ( g . NavWindowingTarget & & ! ( g . NavWindowingTarget - > Flags & ImGuiWindowFlags_NoBringToFrontOnFocus ) ) ? g . NavWindowingTarget - > RootWindow : NULL ; <nl> windows_to_render_front_most [ 1 ] = g . NavWindowingTarget ? g . NavWindowingList : NULL ; <nl> mmm a / imgui . h <nl> ppp b / imgui . h <nl> namespace ImGui <nl> IMGUI_API bool IsRectVisible ( const ImVec2 & rect_min , const ImVec2 & rect_max ) ; / / test if rectangle ( in screen space ) is visible / not clipped . to perform coarse clipping on user ' s side . <nl> IMGUI_API double GetTime ( ) ; / / get global imgui time . incremented by io . DeltaTime every frame . <nl> IMGUI_API int GetFrameCount ( ) ; / / get global imgui frame count . incremented by 1 every frame . <nl> - IMGUI_API ImDrawList * GetOverlayDrawList ( ) ; / / this draw list will be the last rendered one , useful to quickly draw overlays shapes / text <nl> + IMGUI_API ImDrawList * GetBackgroundDrawList ( ) ; / / this draw list will be the first rendering one . Useful to quickly draw shapes / text behind dear imgui contents . <nl> + IMGUI_API ImDrawList * GetOverlayDrawList ( ) ; / / this draw list will be the last rendered one . Useful to quickly draw shapes / text over dear imgui contents . <nl> IMGUI_API ImDrawListSharedData * GetDrawListSharedData ( ) ; / / you may use this when creating your own ImDrawList instances . <nl> IMGUI_API const char * GetStyleColorName ( ImGuiCol idx ) ; / / get a string corresponding to the enum value ( for display , saving , etc . ) . <nl> IMGUI_API void SetStateStorage ( ImGuiStorage * storage ) ; / / replace current window storage with our own ( if you want to manipulate it yourself , typically clear subsection of it ) <nl> mmm a / imgui_internal . h <nl> ppp b / imgui_internal . h <nl> struct ImGuiContext <nl> ImDrawData DrawData ; / / Main ImDrawData instance to pass render information to the user <nl> ImDrawDataBuilder DrawDataBuilder ; <nl> float DimBgRatio ; / / 0 . 0 . . 1 . 0 animation when fading in a dimming background ( for modal window and CTRL + TAB list ) <nl> + ImDrawList BackgroundDrawList ; <nl> ImDrawList OverlayDrawList ; / / Optional software render of mouse cursors , if io . MouseDrawCursor is set + a few debug overlays <nl> ImGuiMouseCursor MouseCursor ; <nl> <nl> struct ImGuiContext <nl> int WantTextInputNextFrame ; <nl> char TempBuffer [ 1024 * 3 + 1 ] ; / / Temporary text buffer <nl> <nl> - ImGuiContext ( ImFontAtlas * shared_font_atlas ) : OverlayDrawList ( NULL ) <nl> + ImGuiContext ( ImFontAtlas * shared_font_atlas ) : BackgroundDrawList ( NULL ) , OverlayDrawList ( NULL ) <nl> { <nl> Initialized = false ; <nl> FrameScopeActive = FrameScopePushedImplicitWindow = false ; <nl> struct ImGuiContext <nl> NavMoveDir = NavMoveDirLast = NavMoveClipDir = ImGuiDir_None ; <nl> <nl> DimBgRatio = 0 . 0f ; <nl> + BackgroundDrawList . _Data = & DrawListSharedData ; <nl> + BackgroundDrawList . _OwnerName = " # # Background " ; / / Give it a name for debugging <nl> OverlayDrawList . _Data = & DrawListSharedData ; <nl> OverlayDrawList . _OwnerName = " # # Overlay " ; / / Give it a name for debugging <nl> MouseCursor = ImGuiMouseCursor_Arrow ; <nl>
|
Added GetBackgroundDrawList ( ) helper to quickly get access to a ImDrawList that will be rendered behind every other windows . ( )
|
ocornut/imgui
|
96b13760d450b251e823a09ba2409f9f83ec2124
|
2019-03-04T15:10:51Z
|
mmm a / README . md <nl> ppp b / README . md <nl> The tools make low - level operating system analytics and monitoring both performa <nl> CentOS 6 . 5 | [ ! [ Build Status ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildCentOS6 / badge / icon ) ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildCentOS6 / ) | | * * Homepage : * * | https : / / osquery . io <nl> CentOS 7 . 0 | [ ! [ Build Status ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildCentOS7 / badge / icon ) ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildCentOS7 / ) | | * * Downloads : * * | https : / / osquery . io / downloads <nl> Ubuntu 12 . 04 | [ ! [ Build Status ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildUbuntu12 / badge / icon ) ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildUbuntu12 / ) | | * * Tables : * * | https : / / osquery . io / tables <nl> - Ubuntu 14 . 04 | [ ! [ Build Status ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildUbuntu14 / badge / icon ) ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildUbuntu14 / ) | | * * Guide : * * | http : / / osquery . rtfd . org <nl> + Ubuntu 14 . 04 | [ ! [ Build Status ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildUbuntu14 / badge / icon ) ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildUbuntu14 / ) | | * * Guide : * * | https : / / osquery . readthedocs . org <nl> OS X 10 . 10 | [ ! [ Build Status ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildOSX / badge / icon ) ] ( https : / / jenkins . osquery . io / job / osqueryMasterBuildOSX / ) | | * * Homebrew : * * | ` brew install osquery ` <nl> <nl> # # # # What is osquery ? <nl>
|
Merge pull request from marpaia / readme
|
osquery/osquery
|
b409049050af789ead11769dab1db5531283c24a
|
2015-04-20T04:59:52Z
|
mmm a / lib / ClangImporter / ClangImporter . cpp <nl> ppp b / lib / ClangImporter / ClangImporter . cpp <nl> ClangImporter : : Implementation : : getWrapperForModule ( ClangImporter & importer , <nl> return file ; <nl> } <nl> <nl> - static clang : : Module * getBestOwningModule ( const clang : : Decl * D , <nl> - bool allowForwardDeclaration ) { <nl> + clang : : Module * ClangImporter : : Implementation : : getClangSubmoduleForDecl ( <nl> + const clang : : Decl * D , <nl> + bool allowForwardDeclaration ) { <nl> const clang : : Decl * actual = nullptr ; <nl> if ( auto OID = dyn_cast < clang : : ObjCInterfaceDecl > ( D ) ) { <nl> / / Put the Objective - C class into the module that contains the @ interface <nl> static clang : : Module * getBestOwningModule ( const clang : : Decl * D , <nl> ClangModuleUnit * ClangImporter : : Implementation : : getClangModuleForDecl ( <nl> const clang : : Decl * D , <nl> bool allowForwardDeclaration ) { <nl> - clang : : Module * M = getBestOwningModule ( D , allowForwardDeclaration ) ; <nl> + clang : : Module * M = getClangSubmoduleForDecl ( D , allowForwardDeclaration ) ; <nl> if ( ! M ) <nl> return nullptr ; <nl> <nl> class FilteringVisibleDeclConsumer : public swift : : VisibleDeclConsumer { <nl> const ClangModuleUnit * CMU ) <nl> : NextConsumer ( consumer ) , ModuleFilter ( CMU ) { } <nl> <nl> - virtual void foundDecl ( ValueDecl * VD , DeclVisibilityKind Reason ) override { <nl> + void foundDecl ( ValueDecl * VD , DeclVisibilityKind Reason ) override { <nl> if ( isVisibleFromModule ( ModuleFilter , VD ) ) <nl> NextConsumer . foundDecl ( VD , Reason ) ; <nl> } <nl> class FilteringDeclaredDeclConsumer : public swift : : VisibleDeclConsumer { <nl> const ClangModuleUnit * CMU ) <nl> : NextConsumer ( consumer ) , ModuleFilter ( CMU ) { } <nl> <nl> - virtual void foundDecl ( ValueDecl * VD , DeclVisibilityKind Reason ) override { <nl> + void foundDecl ( ValueDecl * VD , DeclVisibilityKind Reason ) override { <nl> if ( isDeclaredInModule ( ModuleFilter , VD ) ) <nl> NextConsumer . foundDecl ( VD , Reason ) ; <nl> } <nl> void ClangImporter : : lookupVisibleDecls ( VisibleDeclConsumer & Consumer ) const { <nl> / / through the list . <nl> } while ( Impl . CurrentCacheState ! = Implementation : : CacheState : : InProgress ) ; <nl> <nl> + auto & ClangPP = Impl . getClangPreprocessor ( ) ; <nl> + for ( auto I = ClangPP . macro_begin ( ) , E = ClangPP . macro_end ( ) ; I ! = E ; + + I ) { <nl> + if ( ! I - > first - > hasMacroDefinition ( ) ) <nl> + continue ; <nl> + auto Name = Impl . importName ( I - > first ) ; <nl> + if ( Name . empty ( ) ) <nl> + continue ; <nl> + if ( auto * Imported = <nl> + Impl . importMacro ( Name , I - > second - > getMacroInfo ( ) ) ) { <nl> + Impl . CachedVisibleDecls . push_back ( Imported ) ; <nl> + } <nl> + } <nl> + <nl> Impl . CurrentCacheState = Implementation : : CacheState : : Valid ; <nl> } <nl> <nl> mmm a / lib / ClangImporter / ImportMacro . cpp <nl> ppp b / lib / ClangImporter / ImportMacro . cpp <nl> <nl> <nl> using namespace swift ; <nl> <nl> + clang : : Module * ClangImporter : : Implementation : : getClangSubmoduleForMacro ( <nl> + const clang : : MacroInfo * MI ) { <nl> + auto * ExternalSource = getClangASTContext ( ) . getExternalSource ( ) ; <nl> + return ExternalSource - > getModule ( MI - > getOwningModuleID ( ) ) ; <nl> + } <nl> + <nl> + ClangModuleUnit * ClangImporter : : Implementation : : getClangModuleForMacro ( <nl> + const clang : : MacroInfo * MI ) { <nl> + clang : : Module * M = getClangSubmoduleForMacro ( MI ) ; <nl> + if ( ! M ) <nl> + return nullptr ; <nl> + <nl> + / / Get the parent module because currently we don ' t represent submodules with <nl> + / / ClangModule . <nl> + / / FIXME : this is just a workaround until we can import submodules . <nl> + M = M - > getTopLevelModule ( ) ; <nl> + <nl> + auto & importer = <nl> + static_cast < ClangImporter & > ( * SwiftContext . getClangModuleLoader ( ) ) ; <nl> + return getWrapperForModule ( importer , M ) ; <nl> + } <nl> + <nl> static ValueDecl * importNumericLiteral ( ClangImporter : : Implementation & Impl , <nl> + const clang : : MacroInfo * MI , <nl> Identifier name , <nl> clang : : Token const * signTok , <nl> clang : : Token const & tok ) { <nl> - / / FIXME : This constant should live in the correct module for the macro . <nl> - DeclContext * dc = Impl . firstClangModule ; <nl> - <nl> + DeclContext * dc = Impl . getClangModuleForMacro ( MI ) ; <nl> + if ( ! dc ) <nl> + return nullptr ; <nl> + <nl> assert ( tok . getKind ( ) = = clang : : tok : : numeric_constant & & <nl> " not a numeric token " ) ; <nl> clang : : ActionResult < clang : : Expr * > result = <nl> static bool isStringToken ( const clang : : Token & tok ) { <nl> } <nl> <nl> static ValueDecl * importStringLiteral ( ClangImporter : : Implementation & Impl , <nl> + const clang : : MacroInfo * MI , <nl> Identifier name , <nl> clang : : Token const & tok , <nl> bool isObjC ) { <nl> - / / FIXME : This constant should live in the correct module for the macro . <nl> - DeclContext * dc = Impl . firstClangModule ; <nl> + DeclContext * dc = Impl . getClangModuleForMacro ( MI ) ; <nl> + if ( ! dc ) <nl> + return nullptr ; <nl> <nl> assert ( isStringToken ( tok ) ) ; <nl> <nl> static ValueDecl * importStringLiteral ( ClangImporter : : Implementation & Impl , <nl> } <nl> <nl> static ValueDecl * importLiteral ( ClangImporter : : Implementation & Impl , <nl> + const clang : : MacroInfo * MI , <nl> Identifier name , <nl> clang : : Token const & tok ) { <nl> switch ( tok . getKind ( ) ) { <nl> case clang : : tok : : numeric_constant : <nl> - return importNumericLiteral ( Impl , name , / * signTok * / nullptr , tok ) ; <nl> + return importNumericLiteral ( Impl , MI , name , / * signTok * / nullptr , tok ) ; <nl> <nl> case clang : : tok : : string_literal : <nl> case clang : : tok : : utf8_string_literal : <nl> - return importStringLiteral ( Impl , name , tok , / * isObjC * / false ) ; <nl> + return importStringLiteral ( Impl , MI , name , tok , / * isObjC * / false ) ; <nl> <nl> / / TODO : char literals . <nl> default : <nl> static ValueDecl * importMacro ( ClangImporter : : Implementation & impl , <nl> <nl> / / If it ' s a literal token , we might be able to translate the literal . <nl> if ( tok . isLiteral ( ) ) { <nl> - return importLiteral ( impl , name , tok ) ; <nl> + return importLiteral ( impl , macro , name , tok ) ; <nl> } <nl> <nl> if ( tok . is ( clang : : tok : : identifier ) ) { <nl> static ValueDecl * importMacro ( ClangImporter : : Implementation & impl , <nl> clang : : Token const & second = macro - > tokens_begin ( ) [ 1 ] ; <nl> <nl> if ( isSignToken ( first ) & & second . is ( clang : : tok : : numeric_constant ) ) <nl> - return importNumericLiteral ( impl , name , & first , second ) ; <nl> + return importNumericLiteral ( impl , macro , name , & first , second ) ; <nl> <nl> / / We also allow @ " string " . <nl> if ( first . is ( clang : : tok : : at ) & & isStringToken ( second ) ) <nl> - return importStringLiteral ( impl , name , second , / * objc * / true ) ; <nl> + return importStringLiteral ( impl , macro , name , second , / * objc * / true ) ; <nl> <nl> return nullptr ; <nl> } <nl> static ValueDecl * importMacro ( ClangImporter : : Implementation & impl , <nl> rparenTok . is ( clang : : tok : : r_paren ) & & <nl> isSignToken ( signTok ) & & <nl> litTok . is ( clang : : tok : : numeric_constant ) ) { <nl> - return importNumericLiteral ( impl , name , & signTok , litTok ) ; <nl> + return importNumericLiteral ( impl , macro , name , & signTok , litTok ) ; <nl> } <nl> } <nl> <nl> mmm a / lib / ClangImporter / ImporterImpl . h <nl> ppp b / lib / ClangImporter / ImporterImpl . h <nl> class ClangImporter : : Implementation : public LazyMemberLoader { <nl> return Instance - > getPreprocessor ( ) ; <nl> } <nl> <nl> + clang : : Module * getClangSubmoduleForDecl ( const clang : : Decl * D , <nl> + bool allowForwardDeclaration = false ) ; <nl> + <nl> / / / \ brief Retrieve the imported module that should contain the given <nl> / / / Clang decl . <nl> ClangModuleUnit * getClangModuleForDecl ( const clang : : Decl * D , <nl> bool allowForwardDeclaration = false ) ; <nl> <nl> + clang : : Module * getClangSubmoduleForMacro ( const clang : : MacroInfo * MI ) ; <nl> + <nl> + ClangModuleUnit * getClangModuleForMacro ( const clang : : MacroInfo * MI ) ; <nl> + <nl> / / / \ brief Import the given Swift identifier into Clang . <nl> clang : : DeclarationName importName ( Identifier name ) ; <nl> <nl> mmm a / test / IDE / Inputs / custom - modules / foo_clang_module . h <nl> ppp b / test / IDE / Inputs / custom - modules / foo_clang_module . h <nl> enum BarforwardDeclaredEnum ; <nl> # define FOO_MACRO_4 0xffffffffu <nl> # define FOO_MACRO_5 0xffffffffffffffffull <nl> <nl> + # define FOO_MACRO_UNDEF_1 0 <nl> + # undef FOO_MACRO_UNDEF_1 <nl> + <nl> + # define FOO_MACRO_REDEF_1 0 <nl> + # undef FOO_MACRO_REDEF_1 <nl> + # define FOO_MACRO_REDEF_1 1 <nl> + <nl> + # define FOO_MACRO_REDEF_2 0 <nl> + # define FOO_MACRO_REDEF_2 1 <nl> + <nl> + void theLastDeclInFooClangModuleH ( ) ; <nl> + <nl>
|
Clang importer : put macros into correct module and return them in visible decl
|
apple/swift
|
962c924ab1136e2b199551d8cba7f824506f5d11
|
2014-03-26T10:27:44Z
|
mmm a / src / debug / debug . js <nl> ppp b / src / debug / debug . js <nl> Debug . StepAction = { StepOut : 0 , <nl> / / The different types of scripts matching enum ScriptType in objects . h . <nl> Debug . ScriptType = { Native : 0 , <nl> Extension : 1 , <nl> - Normal : 2 } ; <nl> + Normal : 2 , <nl> + Wasm : 3 } ; <nl> <nl> / / The different types of script compilations matching enum <nl> / / Script : : CompilationType in objects . h . <nl> Debug . findScript = function ( func_or_script_name ) { <nl> if ( IS_FUNCTION ( func_or_script_name ) ) { <nl> return % FunctionGetScript ( func_or_script_name ) ; <nl> } else if ( IS_REGEXP ( func_or_script_name ) ) { <nl> - var scripts = Debug . scripts ( ) ; <nl> + var scripts = this . scripts ( ) ; <nl> var last_result = null ; <nl> var result_count = 0 ; <nl> for ( var i in scripts ) { <nl> Debug . setBreakPointByScriptIdAndPosition = function ( script_id , position , <nl> if ( ! enabled ) { <nl> break_point . disable ( ) ; <nl> } <nl> - var scripts = this . scripts ( ) ; <nl> - var position_alignment = IS_UNDEFINED ( opt_position_alignment ) <nl> - ? Debug . BreakPositionAlignment . Statement : opt_position_alignment ; <nl> - for ( var i = 0 ; i < scripts . length ; i + + ) { <nl> - if ( script_id = = scripts [ i ] . id ) { <nl> - break_point . actual_position = % SetScriptBreakPoint ( scripts [ i ] , position , <nl> - position_alignment , break_point ) ; <nl> - break ; <nl> - } <nl> + var script = scriptById ( script_id ) ; <nl> + if ( script ) { <nl> + var position_alignment = IS_UNDEFINED ( opt_position_alignment ) <nl> + ? Debug . BreakPositionAlignment . Statement : opt_position_alignment ; <nl> + break_point . actual_position = % SetScriptBreakPoint ( script , position , <nl> + position_alignment , break_point ) ; <nl> } <nl> return break_point ; <nl> } ; <nl> Debug . scripts = function ( ) { <nl> } ; <nl> <nl> <nl> + / / Get a specific script currently loaded . This is based on scanning the heap . <nl> + / / TODO ( clemensh ) : Create a runtime function for this . <nl> + function scriptById ( scriptId ) { <nl> + var scripts = Debug . scripts ( ) ; <nl> + for ( var script of scripts ) { <nl> + if ( script . id = = scriptId ) return script ; <nl> + } <nl> + return UNDEFINED ; <nl> + } ; <nl> + <nl> + <nl> Debug . debuggerFlags = function ( ) { <nl> return debugger_flags ; <nl> } ; <nl> <nl> + Debug . getWasmFunctionOffsetTable = function ( scriptId ) { <nl> + var script = scriptById ( scriptId ) ; <nl> + return script ? % GetWasmFunctionOffsetTable ( script ) : UNDEFINED ; <nl> + } <nl> + <nl> + Debug . disassembleWasmFunction = function ( scriptId ) { <nl> + var script = scriptById ( scriptId ) ; <nl> + return script ? % DisassembleWasmFunction ( script ) : UNDEFINED ; <nl> + } <nl> + <nl> Debug . MakeMirror = MakeMirror ; <nl> <nl> function MakeExecutionState ( break_id ) { <nl> DebugCommandProcessor . prototype . scriptsRequest_ = function ( request , response ) { <nl> } <nl> <nl> / / Collect all scripts in the heap . <nl> - var scripts = % DebugGetLoadedScripts ( ) ; <nl> + var scripts = Debug . scripts ( ) ; <nl> <nl> response . body = [ ] ; <nl> <nl> DebugCommandProcessor . prototype . changeLiveRequest_ = function ( <nl> var script_id = request . arguments . script_id ; <nl> var preview_only = ! ! request . arguments . preview_only ; <nl> <nl> - var scripts = % DebugGetLoadedScripts ( ) ; <nl> - <nl> - var the_script = null ; <nl> - for ( var i = 0 ; i < scripts . length ; i + + ) { <nl> - if ( scripts [ i ] . id = = script_id ) { <nl> - the_script = scripts [ i ] ; <nl> - } <nl> - } <nl> + var the_script = scriptById ( script_id ) ; <nl> if ( ! the_script ) { <nl> response . failed ( ' Script not found ' ) ; <nl> return ; <nl> mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> int PropertyDetails : : field_width_in_words ( ) const { <nl> int holder : : name ( ) const { return READ_INT_FIELD ( this , offset ) ; } \ <nl> void holder : : set_ # # name ( int value ) { WRITE_INT_FIELD ( this , offset , value ) ; } <nl> <nl> - <nl> - # define ACCESSORS ( holder , name , type , offset ) \ <nl> - type * holder : : name ( ) const { return type : : cast ( READ_FIELD ( this , offset ) ) ; } \ <nl> - void holder : : set_ # # name ( type * value , WriteBarrierMode mode ) { \ <nl> - WRITE_FIELD ( this , offset , value ) ; \ <nl> - CONDITIONAL_WRITE_BARRIER ( GetHeap ( ) , this , offset , value , mode ) ; \ <nl> + # define ACCESSORS_CHECKED ( holder , name , type , offset , condition ) \ <nl> + type * holder : : name ( ) const { \ <nl> + DCHECK ( condition ) ; \ <nl> + return type : : cast ( READ_FIELD ( this , offset ) ) ; \ <nl> + } \ <nl> + void holder : : set_ # # name ( type * value , WriteBarrierMode mode ) { \ <nl> + DCHECK ( condition ) ; \ <nl> + WRITE_FIELD ( this , offset , value ) ; \ <nl> + CONDITIONAL_WRITE_BARRIER ( GetHeap ( ) , this , offset , value , mode ) ; \ <nl> } <nl> <nl> + # define ACCESSORS ( holder , name , type , offset ) \ <nl> + ACCESSORS_CHECKED ( holder , name , type , offset , true ) <nl> <nl> / / Getter that returns a Smi as an int and writes an int as a Smi . <nl> - # define SMI_ACCESSORS ( holder , name , offset ) \ <nl> - int holder : : name ( ) const { \ <nl> - Object * value = READ_FIELD ( this , offset ) ; \ <nl> - return Smi : : cast ( value ) - > value ( ) ; \ <nl> - } \ <nl> - void holder : : set_ # # name ( int value ) { \ <nl> - WRITE_FIELD ( this , offset , Smi : : FromInt ( value ) ) ; \ <nl> + # define SMI_ACCESSORS_CHECKED ( holder , name , offset , condition ) \ <nl> + int holder : : name ( ) const { \ <nl> + DCHECK ( condition ) ; \ <nl> + Object * value = READ_FIELD ( this , offset ) ; \ <nl> + return Smi : : cast ( value ) - > value ( ) ; \ <nl> + } \ <nl> + void holder : : set_ # # name ( int value ) { \ <nl> + DCHECK ( condition ) ; \ <nl> + WRITE_FIELD ( this , offset , Smi : : FromInt ( value ) ) ; \ <nl> } <nl> <nl> + # define SMI_ACCESSORS ( holder , name , offset ) \ <nl> + SMI_ACCESSORS_CHECKED ( holder , name , offset , true ) <nl> + <nl> # define SYNCHRONIZED_SMI_ACCESSORS ( holder , name , offset ) \ <nl> int holder : : synchronized_ # # name ( ) const { \ <nl> Object * value = ACQUIRE_READ_FIELD ( this , offset ) ; \ <nl> ACCESSORS ( Script , context_data , Object , kContextOffset ) <nl> ACCESSORS ( Script , wrapper , HeapObject , kWrapperOffset ) <nl> SMI_ACCESSORS ( Script , type , kTypeOffset ) <nl> ACCESSORS ( Script , line_ends , Object , kLineEndsOffset ) <nl> - ACCESSORS ( Script , eval_from_shared , Object , kEvalFromSharedOffset ) <nl> - SMI_ACCESSORS ( Script , eval_from_position , kEvalFromPositionOffset ) <nl> + ACCESSORS_CHECKED ( Script , eval_from_shared , Object , kEvalFromSharedOffset , <nl> + this - > type ( ) ! = TYPE_WASM ) <nl> + SMI_ACCESSORS_CHECKED ( Script , eval_from_position , kEvalFromPositionOffset , <nl> + this - > type ( ) ! = TYPE_WASM ) <nl> ACCESSORS ( Script , shared_function_infos , Object , kSharedFunctionInfosOffset ) <nl> SMI_ACCESSORS ( Script , flags , kFlagsOffset ) <nl> ACCESSORS ( Script , source_url , Object , kSourceUrlOffset ) <nl> ACCESSORS ( Script , source_mapping_url , Object , kSourceMappingUrlOffset ) <nl> + ACCESSORS_CHECKED ( Script , wasm_object , JSObject , kEvalFromSharedOffset , <nl> + this - > type ( ) = = TYPE_WASM ) <nl> + SMI_ACCESSORS_CHECKED ( Script , wasm_function_index , kEvalFromPositionOffset , <nl> + this - > type ( ) = = TYPE_WASM ) <nl> <nl> Script : : CompilationType Script : : compilation_type ( ) { <nl> return BooleanBit : : get ( flags ( ) , kCompilationTypeBit ) ? <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class Script : public Struct { <nl> / / [ source_url ] : sourceURL from magic comment <nl> DECL_ACCESSORS ( source_url , Object ) <nl> <nl> - / / [ source_url ] : sourceMappingURL magic comment <nl> + / / [ source_mapping_url ] : sourceMappingURL magic comment <nl> DECL_ACCESSORS ( source_mapping_url , Object ) <nl> <nl> + / / [ wasm_object ] : the wasm object this script belongs to . <nl> + / / This must only be called if the type of this script is TYPE_WASM . <nl> + DECL_ACCESSORS ( wasm_object , JSObject ) <nl> + <nl> + / / [ wasm_function_index ] : the wasm function index this script belongs to . <nl> + / / This must only be called if the type of this script is TYPE_WASM . <nl> + DECL_INT_ACCESSORS ( wasm_function_index ) <nl> + <nl> / / [ compilation_type ] : how the the script was compiled . Encoded in the <nl> / / ' flags ' field . <nl> inline CompilationType compilation_type ( ) ; <nl> mmm a / src / runtime / runtime - debug . cc <nl> ppp b / src / runtime / runtime - debug . cc <nl> <nl> # include " src / interpreter / interpreter . h " <nl> # include " src / isolate - inl . h " <nl> # include " src / runtime / runtime . h " <nl> + # include " src / wasm / wasm - debug . h " <nl> # include " src / wasm / wasm - module . h " <nl> <nl> namespace v8 { <nl> RUNTIME_FUNCTION ( Runtime_DebugBreakInOptimizedCode ) { <nl> UNIMPLEMENTED ( ) ; <nl> return NULL ; <nl> } <nl> + <nl> + RUNTIME_FUNCTION ( Runtime_GetWasmFunctionOffsetTable ) { <nl> + DCHECK ( args . length ( ) = = 1 ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + CONVERT_ARG_CHECKED ( JSValue , script_val , 0 ) ; <nl> + <nl> + RUNTIME_ASSERT ( script_val - > value ( ) - > IsScript ( ) ) ; <nl> + Handle < Script > script = Handle < Script > ( Script : : cast ( script_val - > value ( ) ) ) ; <nl> + <nl> + Handle < wasm : : WasmDebugInfo > debug_info ( <nl> + wasm : : GetDebugInfo ( script - > wasm_object ( ) ) , isolate ) ; <nl> + Handle < FixedArray > elements = wasm : : WasmDebugInfo : : GetFunctionOffsetTable ( <nl> + debug_info , script - > wasm_function_index ( ) ) ; <nl> + return * isolate - > factory ( ) - > NewJSArrayWithElements ( elements ) ; <nl> + } <nl> + <nl> + RUNTIME_FUNCTION ( Runtime_DisassembleWasmFunction ) { <nl> + DCHECK ( args . length ( ) = = 1 ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + CONVERT_ARG_CHECKED ( JSValue , script_val , 0 ) ; <nl> + <nl> + RUNTIME_ASSERT ( script_val - > value ( ) - > IsScript ( ) ) ; <nl> + Handle < Script > script = Handle < Script > ( Script : : cast ( script_val - > value ( ) ) ) ; <nl> + <nl> + Handle < wasm : : WasmDebugInfo > debug_info ( <nl> + wasm : : GetDebugInfo ( script - > wasm_object ( ) ) , isolate ) ; <nl> + return * wasm : : WasmDebugInfo : : DisassembleFunction ( <nl> + debug_info , script - > wasm_function_index ( ) ) ; <nl> + } <nl> + <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / runtime / runtime . h <nl> ppp b / src / runtime / runtime . h <nl> namespace internal { <nl> F ( DebugPopPromise , 0 , 1 ) \ <nl> F ( DebugAsyncTaskEvent , 1 , 1 ) \ <nl> F ( DebugIsActive , 0 , 1 ) \ <nl> - F ( DebugBreakInOptimizedCode , 0 , 1 ) <nl> + F ( DebugBreakInOptimizedCode , 0 , 1 ) \ <nl> + F ( GetWasmFunctionOffsetTable , 1 , 1 ) \ <nl> + F ( DisassembleWasmFunction , 1 , 1 ) <nl> <nl> # define FOR_EACH_INTRINSIC_FORIN ( F ) \ <nl> F ( ForInDone , 2 , 1 ) \ <nl> mmm a / src / wasm / ast - decoder . cc <nl> ppp b / src / wasm / ast - decoder . cc <nl> bool PrintAst ( base : : AccountingAllocator * allocator , const FunctionBody & body , <nl> os < < " " < < count < < " " < < WasmOpcodes : : TypeName ( type ) ; <nl> } <nl> os < < std : : endl ; <nl> + + + line_nr ; <nl> <nl> for ( const byte * locals = body . start ; locals < pc ; locals + + ) { <nl> os < < ( locals = = body . start ? " 0x " : " 0x " ) < < AsHex ( * locals , 2 ) < < " , " ; <nl> mmm a / src / wasm / wasm - debug . cc <nl> ppp b / src / wasm / wasm - debug . cc <nl> JSObject * WasmDebugInfo : : wasm_object ( ) { <nl> return JSObject : : cast ( get ( kWasmDebugInfoWasmObj ) ) ; <nl> } <nl> <nl> - bool WasmDebugInfo : : SetBreakPoint ( int byte_offset ) { <nl> - / / TODO ( clemensh ) : Implement this . <nl> - return false ; <nl> - } <nl> - <nl> Script * WasmDebugInfo : : GetFunctionScript ( Handle < WasmDebugInfo > debug_info , <nl> int func_index ) { <nl> Isolate * isolate = debug_info - > GetIsolate ( ) ; <nl> Script * WasmDebugInfo : : GetFunctionScript ( Handle < WasmDebugInfo > debug_info , <nl> scripts - > set ( func_index , * script ) ; <nl> <nl> script - > set_type ( Script : : TYPE_WASM ) ; <nl> + script - > set_wasm_object ( debug_info - > wasm_object ( ) ) ; <nl> + script - > set_wasm_function_index ( func_index ) ; <nl> + <nl> + int hash = 0 ; <nl> + debug_info - > get ( kWasmDebugInfoWasmBytesHash ) - > ToInt32 ( & hash ) ; <nl> + char buffer [ 32 ] ; <nl> + SNPrintF ( ArrayVector ( buffer ) , " wasm : / / % 08x / % d " , hash , func_index ) ; <nl> + Handle < String > source_url = <nl> + isolate - > factory ( ) - > NewStringFromAsciiChecked ( buffer , TENURED ) ; <nl> + script - > set_source_url ( * source_url ) ; <nl> <nl> int func_bytes_len = <nl> GetFunctionOffsetAndLength ( debug_info , func_index ) . second ; <nl> Script * WasmDebugInfo : : GetFunctionScript ( Handle < WasmDebugInfo > debug_info , <nl> line_ends - > set_map ( isolate - > heap ( ) - > fixed_cow_array_map ( ) ) ; <nl> script - > set_line_ends ( * line_ends ) ; <nl> <nl> - / / TODO ( clemensh ) : Register this new script at the debugger . <nl> + isolate - > debug ( ) - > OnAfterCompile ( script ) ; <nl> <nl> return * script ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 13b93cb29bf <nl> mmm / dev / null <nl> ppp b / test / mjsunit / wasm / debug - disassembly . js <nl> <nl> + / / Copyright 2016 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - expose - wasm - - expose - debug - as debug <nl> + <nl> + load ( " test / mjsunit / wasm / wasm - constants . js " ) ; <nl> + load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> + <nl> + Debug = debug . Debug <nl> + <nl> + / / Initialized in setup ( ) . <nl> + var exception ; <nl> + var break_count ; <nl> + var num_wasm_scripts ; <nl> + var module ; <nl> + <nl> + function listener ( event , exec_state , event_data , data ) { <nl> + try { <nl> + if ( event = = Debug . DebugEvent . Break ) { <nl> + + + break_count ; <nl> + / / Request frame details . This should trigger creation of the Script <nl> + / / objects for all frames on the stack . <nl> + var num_frames = exec_state . frameCount ( ) ; <nl> + for ( var i = 0 ; i < num_frames ; + + i ) { <nl> + var frame = exec_state . frame ( i ) ; <nl> + var details = frame . details ( ) ; <nl> + var script = details . script ( ) ; <nl> + if ( script . type = = Debug . ScriptType . Wasm ) { <nl> + var pos = frame . sourcePosition ( ) ; <nl> + var name = script . nameOrSourceURL ( ) ; <nl> + var disassembly = Debug . disassembleWasmFunction ( script . id ) ; <nl> + var offset_table = Debug . getWasmFunctionOffsetTable ( script . id ) ; <nl> + assertEquals ( 0 , offset_table . length % 3 ) ; <nl> + var lineNr = null ; <nl> + var columnNr = null ; <nl> + for ( var p = 0 ; p < offset_table . length ; p + = 3 ) { <nl> + if ( offset_table [ p ] ! = pos ) continue ; <nl> + lineNr = offset_table [ p + 1 ] ; <nl> + columnNr = offset_table [ p + 2 ] ; <nl> + } <nl> + assertNotNull ( lineNr , " position should occur in offset table " ) ; <nl> + assertNotNull ( columnNr , " position should occur in offset table " ) ; <nl> + var line = disassembly . split ( " \ n " ) [ lineNr ] ; <nl> + assertTrue ( ! ! line , " line number must occur in disassembly " ) ; <nl> + assertTrue ( line . length > columnNr , " column number must be valid " ) ; <nl> + var expected_string ; <nl> + if ( name . endsWith ( " / 0 " ) ) { <nl> + / / Function 0 calls the imported function . <nl> + expected_string = " kExprCallImport , " ; <nl> + } else if ( name . endsWith ( " / 1 " ) ) { <nl> + / / Function 1 calls function 0 . <nl> + expected_string = " kExprCallFunction , " ; <nl> + } else { <nl> + assertTrue ( false , " Unexpected wasm script : " + name ) ; <nl> + } <nl> + assertTrue ( line . substr ( columnNr ) . startsWith ( expected_string ) , <nl> + " offset " + columnNr + " should start with ' " + expected_string <nl> + + " ' : " + line ) ; <nl> + } <nl> + } <nl> + } else if ( event = = Debug . DebugEvent . AfterCompile ) { <nl> + var script = event_data . script ( ) ; <nl> + if ( script . scriptType ( ) = = Debug . ScriptType . Wasm ) { <nl> + + + num_wasm_scripts ; <nl> + } <nl> + } <nl> + } catch ( e ) { <nl> + print ( " exception : " + e ) ; <nl> + exception = e ; <nl> + } <nl> + } ; <nl> + <nl> + var builder = new WasmModuleBuilder ( ) ; <nl> + <nl> + builder . addImport ( " func " , kSig_v_v ) ; <nl> + <nl> + builder . addFunction ( " call_import " , kSig_v_v ) <nl> + . addBody ( [ kExprCallImport , kArity0 , 0 ] ) <nl> + . exportFunc ( ) ; <nl> + <nl> + / / Add a bit of unneccessary code to increase the byte offset . <nl> + builder . addFunction ( " call_call_import " , kSig_v_v ) <nl> + . addLocals ( { i32_count : 2 } ) <nl> + . addBody ( [ <nl> + kExprI32Const , 27 , kExprSetLocal , 0 , <nl> + kExprI32Const , ( - 7 & 0x7f ) , kExprSetLocal , 1 , <nl> + kExprGetLocal , 0 , kExprGetLocal , 1 , kExprI32Add , kExprI64UConvertI32 , <nl> + kExprI64Const , 0 , <nl> + kExprI64Ne , kExprIf , <nl> + kExprCallFunction , kArity0 , 0 , <nl> + kExprEnd <nl> + ] ) <nl> + . exportFunc ( ) ; <nl> + <nl> + function call_debugger ( ) { <nl> + debugger ; <nl> + } <nl> + <nl> + function setup ( ) { <nl> + module = builder . instantiate ( { func : call_debugger } ) ; <nl> + exception = null ; <nl> + break_count = 0 ; <nl> + num_wasm_scripts = 0 ; <nl> + } <nl> + <nl> + ( function testRegisteredWasmScripts1 ( ) { <nl> + setup ( ) ; <nl> + Debug . setListener ( listener ) ; <nl> + / / Initially 0 scripts . <nl> + assertEquals ( 0 , num_wasm_scripts ) ; <nl> + / / Call the " call_import " function - > 1 script . <nl> + module . exports . call_import ( ) ; <nl> + assertEquals ( 1 , num_wasm_scripts ) ; <nl> + / / Call " call_import " again - > still just 1 script . <nl> + module . exports . call_import ( ) ; <nl> + assertEquals ( 1 , num_wasm_scripts ) ; <nl> + / / Call " call_call_import " - > now 2 scripts . <nl> + module . exports . call_call_import ( ) ; <nl> + assertEquals ( 2 , num_wasm_scripts ) ; <nl> + Debug . setListener ( null ) ; <nl> + <nl> + assertEquals ( 3 , break_count ) ; <nl> + if ( exception ) throw exception ; <nl> + } ) ( ) ; <nl> + <nl> + ( function testRegisteredWasmScripts2 ( ) { <nl> + setup ( ) ; <nl> + Debug . setListener ( listener ) ; <nl> + / / Initially 0 scripts . <nl> + assertEquals ( 0 , num_wasm_scripts ) ; <nl> + / / Call the " call_call_import " function - > 2 scripts should be registered . <nl> + module . exports . call_call_import ( ) ; <nl> + assertEquals ( 2 , num_wasm_scripts ) ; <nl> + Debug . setListener ( null ) ; <nl> + <nl> + assertEquals ( 1 , break_count ) ; <nl> + if ( exception ) throw exception ; <nl> + } ) ( ) ; <nl>
|
[ wasm ] Disassemble wasm code from script
|
v8/v8
|
d249efd705cd851e8441ee3cc804669812ba1694
|
2016-06-30T09:57:07Z
|
mmm a / src / core / lib / security / security_connector / ssl / ssl_security_connector . cc <nl> ppp b / src / core / lib / security / security_connector / ssl / ssl_security_connector . cc <nl> namespace { <nl> grpc_error * ssl_check_peer ( <nl> const char * peer_name , const tsi_peer * peer , <nl> grpc_core : : RefCountedPtr < grpc_auth_context > * auth_context ) { <nl> - # if TSI_OPENSSL_ALPN_SUPPORT <nl> - / * Check the ALPN if ALPN is supported . * / <nl> - const tsi_peer_property * p = <nl> - tsi_peer_get_property_by_name ( peer , TSI_SSL_ALPN_SELECTED_PROTOCOL ) ; <nl> - if ( p = = nullptr ) { <nl> - return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> - " Cannot check peer : missing selected ALPN property . " ) ; <nl> - } <nl> - if ( ! grpc_chttp2_is_alpn_version_supported ( p - > value . data , p - > value . length ) ) { <nl> - return GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> - " Cannot check peer : invalid ALPN value . " ) ; <nl> + grpc_error * error = grpc_ssl_check_alpn ( peer ) ; <nl> + if ( error ! = GRPC_ERROR_NONE ) { <nl> + return error ; <nl> } <nl> - # endif / * TSI_OPENSSL_ALPN_SUPPORT * / <nl> / * Check the peer name if specified . * / <nl> if ( peer_name ! = nullptr & & ! grpc_ssl_host_matches_name ( peer , peer_name ) ) { <nl> char * msg ; <nl> gpr_asprintf ( & msg , " Peer name % s is not in peer certificate " , peer_name ) ; <nl> - grpc_error * error = GRPC_ERROR_CREATE_FROM_COPIED_STRING ( msg ) ; <nl> + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING ( msg ) ; <nl> gpr_free ( msg ) ; <nl> return error ; <nl> } <nl> mmm a / test / core / security / security_connector_test . cc <nl> ppp b / test / core / security / security_connector_test . cc <nl> <nl> # include " src / core / tsi / transport_security . h " <nl> # include " test / core / util / test_config . h " <nl> <nl> + # ifndef TSI_OPENSSL_ALPN_SUPPORT <nl> + # define TSI_OPENSSL_ALPN_SUPPORT 1 <nl> + # endif <nl> + <nl> static int check_transport_security_type ( const grpc_auth_context * ctx ) { <nl> grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name ( <nl> ctx , GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME ) ; <nl> static void test_default_ssl_roots ( void ) { <nl> gpr_free ( roots_env_var_file_path ) ; <nl> } <nl> <nl> + static void test_peer_alpn_check ( void ) { <nl> + # if TSI_OPENSSL_ALPN_SUPPORT <nl> + tsi_peer peer ; <nl> + const char * alpn = " grpc " ; <nl> + const char * wrong_alpn = " wrong " ; <nl> + / / peer does not have a TSI_SSL_ALPN_SELECTED_PROTOCOL property . <nl> + GPR_ASSERT ( tsi_construct_peer ( 1 , & peer ) = = TSI_OK ) ; <nl> + GPR_ASSERT ( tsi_construct_string_peer_property ( " wrong peer property name " , <nl> + alpn , strlen ( alpn ) , <nl> + & peer . properties [ 0 ] ) = = TSI_OK ) ; <nl> + grpc_error * error = grpc_ssl_check_alpn ( & peer ) ; <nl> + GPR_ASSERT ( error ! = GRPC_ERROR_NONE ) ; <nl> + tsi_peer_destruct ( & peer ) ; <nl> + GRPC_ERROR_UNREF ( error ) ; <nl> + / / peer has a TSI_SSL_ALPN_SELECTED_PROTOCOL property but with an incorrect <nl> + / / property value . <nl> + GPR_ASSERT ( tsi_construct_peer ( 1 , & peer ) = = TSI_OK ) ; <nl> + GPR_ASSERT ( tsi_construct_string_peer_property ( TSI_SSL_ALPN_SELECTED_PROTOCOL , <nl> + wrong_alpn , strlen ( wrong_alpn ) , <nl> + & peer . properties [ 0 ] ) = = TSI_OK ) ; <nl> + error = grpc_ssl_check_alpn ( & peer ) ; <nl> + GPR_ASSERT ( error ! = GRPC_ERROR_NONE ) ; <nl> + tsi_peer_destruct ( & peer ) ; <nl> + GRPC_ERROR_UNREF ( error ) ; <nl> + / / peer has a TSI_SSL_ALPN_SELECTED_PROTOCOL property with a correct property <nl> + / / value . <nl> + GPR_ASSERT ( tsi_construct_peer ( 1 , & peer ) = = TSI_OK ) ; <nl> + GPR_ASSERT ( tsi_construct_string_peer_property ( TSI_SSL_ALPN_SELECTED_PROTOCOL , <nl> + alpn , strlen ( alpn ) , <nl> + & peer . properties [ 0 ] ) = = TSI_OK ) ; <nl> + GPR_ASSERT ( grpc_ssl_check_alpn ( & peer ) = = GRPC_ERROR_NONE ) ; <nl> + tsi_peer_destruct ( & peer ) ; <nl> + # else <nl> + GPR_ASSERT ( grpc_ssl_check_alpn ( nullptr ) = = GRPC_ERROR_NONE ) ; <nl> + # endif <nl> + } <nl> + <nl> int main ( int argc , char * * argv ) { <nl> grpc : : testing : : TestEnvironment env ( argc , argv ) ; <nl> grpc_init ( ) ; <nl> int main ( int argc , char * * argv ) { <nl> test_cn_and_multiple_sans_and_others_ssl_peer_to_auth_context ( ) ; <nl> test_ipv6_address_san ( ) ; <nl> test_default_ssl_roots ( ) ; <nl> - <nl> + test_peer_alpn_check ( ) ; <nl> grpc_shutdown ( ) ; <nl> return 0 ; <nl> } <nl>
|
fix ALPN issues .
|
grpc/grpc
|
1fb7a841333658eee482801f0428fc297e979042
|
2019-04-05T17:23:29Z
|
mmm a / aten / src / ATen / core / ivalue . cpp <nl> ppp b / aten / src / ATen / core / ivalue . cpp <nl> void IValue : : dump ( ) const { <nl> <nl> <nl> std : : string ivalue : : Object : : name ( ) const { <nl> - return this - > type_ . type_ - > qualname ( ) ; <nl> + return this - > type_ . type_ - > name ( ) - > qualifiedName ( ) ; <nl> } <nl> <nl> IValue ivalue : : Object : : getAttr ( const std : : string & name ) const { <nl> mmm a / aten / src / ATen / core / jit_type . h <nl> ppp b / aten / src / ATen / core / jit_type . h <nl> struct CAFFE2_API NamedType : public Type { <nl> NamedType ( TypeKind tk , c10 : : optional < c10 : : QualifiedName > qualifiedName ) <nl> : Type ( tk ) , name_ ( std : : move ( qualifiedName ) ) { } <nl> <nl> - std : : string python_str ( ) const ; <nl> - std : : string qualname ( ) const ; <nl> - std : : string qualifier ( ) const ; <nl> - std : : string basename ( ) const ; <nl> - <nl> - const c10 : : optional < QualifiedName > & qualified_name_obj ( ) const { <nl> + const c10 : : optional < QualifiedName > & name ( ) const { <nl> return name_ ; <nl> } <nl> <nl> struct CAFFE2_API ClassType : public NamedType { <nl> DEFINE_IS_SUBCLASS ( ClassType ) ; <nl> bool operator = = ( const Type & rhs ) const override { <nl> if ( auto user_rhs = rhs . cast < ClassType > ( ) ) { <nl> - return qualname ( ) = = user_rhs - > qualname ( ) ; <nl> + return name ( ) - > qualifiedName ( ) = = user_rhs - > name ( ) - > qualifiedName ( ) ; <nl> } <nl> return false ; <nl> } <nl> <nl> std : : string str ( ) const override { <nl> - return std : : string ( " ClassType < " ) + basename ( ) + " > " ; <nl> + return std : : string ( " ClassType < " ) + name ( ) - > name ( ) + " > " ; <nl> } <nl> <nl> std : : string python_str ( ) const override { <nl> - return qualname ( ) ; <nl> + return name ( ) - > qualifiedName ( ) ; <nl> } <nl> <nl> TypePtr getAttribute ( const std : : string & name ) const { <nl> mmm a / aten / src / ATen / core / type . cpp <nl> ppp b / aten / src / ATen / core / type . cpp <nl> std : : ostream & operator < < ( std : : ostream & out , const VaryingShape & vs ) { <nl> return out ; <nl> } <nl> <nl> - std : : string NamedType : : python_str ( ) const { <nl> - TORCH_INTERNAL_ASSERT ( name_ ) ; <nl> - return name_ - > qualifiedName ( ) ; <nl> - } <nl> - <nl> - std : : string NamedType : : qualname ( ) const { <nl> - TORCH_INTERNAL_ASSERT ( name_ ) ; <nl> - return name_ - > qualifiedName ( ) ; <nl> - } <nl> - <nl> - std : : string NamedType : : qualifier ( ) const { <nl> - TORCH_INTERNAL_ASSERT ( name_ ) ; <nl> - return name_ - > prefix ( ) ; <nl> - } <nl> - <nl> - std : : string NamedType : : basename ( ) const { <nl> - TORCH_INTERNAL_ASSERT ( name_ ) ; <nl> - return name_ - > name ( ) ; <nl> - } <nl> - <nl> std : : shared_ptr < FunctionSchema > TupleType : : namedTupleSchemaFromNamesAndTypes ( <nl> c10 : : QualifiedName qualName , <nl> std : : vector < std : : string > field_names , <nl> bool TupleType : : operator = = ( const Type & rhs ) const { <nl> std : : string TupleType : : str ( ) const { <nl> std : : stringstream ss ; <nl> if ( schema_ & & name_ ) { <nl> - ss < < qualname ( ) ; <nl> + ss < < name_ - > qualifiedName ( ) ; <nl> } else { <nl> ss < < " ( " ; <nl> for ( size_t i = 0 ; i < elements ( ) . size ( ) ; + + i ) { <nl> std : : string TupleType : : str ( ) const { <nl> std : : string TupleType : : python_str ( ) const { <nl> std : : stringstream ss ; <nl> if ( schema_ & & name_ ) { <nl> - ss < < qualname ( ) ; <nl> + ss < < name_ - > qualifiedName ( ) ; <nl> } else { <nl> ss < < " Tuple [ " ; <nl> for ( size_t i = 0 ; i < elements ( ) . size ( ) ; + + i ) { <nl> mmm a / torch / csrc / jit / argument_spec . cpp <nl> ppp b / torch / csrc / jit / argument_spec . cpp <nl> void ArgumentSpecCreator : : scan ( <nl> size_t pos = instructions_ . size ( ) ; <nl> instructions_ . emplace_back ( ENTER_OBJECT ) ; <nl> for ( size_t i = 0 ; i < cls - > numAttributes ( ) ; + + i ) { <nl> - auto key = cls - > qualname ( ) + cls - > attributeNames ( ) . at ( i ) ; <nl> + auto key = cls - > name ( ) - > qualifiedName ( ) + cls - > attributeNames ( ) . at ( i ) ; <nl> / / it is only safe to specialize because someone might have written to it <nl> if ( ! written_slots . count ( key ) ) { <nl> scan ( cls - > containedTypes ( ) . at ( i ) , depth + 1 , written_slots ) ; <nl> static void scanWrittenSlots ( <nl> for ( Node * n : block - > nodes ( ) ) { <nl> if ( n - > kind ( ) = = prim : : SetAttr ) { <nl> if ( auto cls = n - > inputs ( ) . at ( 0 ) - > type ( ) - > cast < ClassType > ( ) ) { <nl> - written_slots . insert ( cls - > qualname ( ) + n - > s ( attr : : name ) ) ; <nl> + written_slots . insert ( cls - > name ( ) - > qualifiedName ( ) + n - > s ( attr : : name ) ) ; <nl> } <nl> } <nl> for ( Block * subblock : n - > blocks ( ) ) { <nl> mmm a / torch / csrc / jit / export . cpp <nl> ppp b / torch / csrc / jit / export . cpp <nl> class ScriptModuleSerializer2 : public ScriptModuleSerializer { <nl> <nl> / / For the type , foo . bar . Baz <nl> const std : : string filename = ImportExportHelpers : : qualifierToPath ( <nl> - class_type - > qualifier ( ) , torch : : PROTO_VERSION_NEWEST ) ; <nl> + class_type - > name ( ) - > prefix ( ) , torch : : PROTO_VERSION_NEWEST ) ; <nl> / / End state : filename is " foo / bar . py " , in which we will define a class <nl> / / named Baz <nl> auto & stream = fileToSrc [ filename ] ; <nl> void ScriptModuleSerializer : : writeLibs ( torch : : ModelDef * model_def ) { <nl> <nl> / / For the type , foo . bar . Baz <nl> const std : : string filename = <nl> - ImportExportHelpers : : qualifierToPath ( class_type - > qualifier ( ) , 5 ) ; <nl> + ImportExportHelpers : : qualifierToPath ( class_type - > name ( ) - > prefix ( ) , 5 ) ; <nl> / / End state : filename is " foo / bar . py " , in which we will define a class <nl> / / named Baz <nl> fileToSrc [ filename ] < < class_src ; <nl> void ScriptModuleSerializer : : writeLibs ( torch : : ModelDef * model_def ) { <nl> for ( const auto & item : converted_classes_ ) { <nl> const c10 : : NamedTypePtr & class_type = item . key ( ) ; <nl> const std : : string filename = <nl> - ImportExportHelpers : : qualifierToPath ( class_type - > qualifier ( ) , 5 ) ; <nl> + ImportExportHelpers : : qualifierToPath ( class_type - > name ( ) - > prefix ( ) , 5 ) ; <nl> if ( written_files . count ( filename ) ) { <nl> continue ; <nl> } <nl> mmm a / torch / csrc / jit / passes / python_print . cpp <nl> ppp b / torch / csrc / jit / passes / python_print . cpp <nl> struct PythonPrintPass { <nl> if ( const auto classType = type - > cast < ClassType > ( ) ) { <nl> addToClassTable ( classType ) ; <nl> } else if ( const auto tupleType = type - > cast < TupleType > ( ) ) { <nl> - if ( tupleType - > qualified_name_obj ( ) ) { <nl> + if ( tupleType - > name ( ) ) { <nl> addToClassTable ( tupleType ) ; <nl> } <nl> } <nl> struct PythonPrintPass { <nl> if ( auto qualname = node - > output ( ) <nl> - > type ( ) <nl> - > expect < TupleType > ( ) <nl> - - > qualified_name_obj ( ) ) { <nl> + - > name ( ) ) { <nl> stmt < < qualname - > qualifiedName ( ) ; <nl> } <nl> printValueList ( <nl> struct PythonPrintPass { <nl> std : : ostringstream ret ; <nl> std : : unordered_set < std : : string > already_printed ; <nl> for ( const auto & c : direct_class_deps_ ) { <nl> - if ( already_printed . count ( c - > qualifier ( ) ) ) { <nl> + if ( already_printed . count ( c - > name ( ) - > prefix ( ) ) ) { <nl> continue ; <nl> } <nl> / / TODO we try to print a def for TestLinear in TestLinear . forward <nl> - ret < < " import " < < c - > qualifier ( ) < < " \ n " ; <nl> - already_printed . insert ( c - > qualifier ( ) ) ; <nl> + ret < < " import " < < c - > name ( ) - > prefix ( ) < < " \ n " ; <nl> + already_printed . insert ( c - > name ( ) - > prefix ( ) ) ; <nl> } <nl> return ret . str ( ) ; <nl> } <nl> struct PythonPrintPass { <nl> if ( legacy_module_printing_ ) { <nl> is_module = false ; <nl> } <nl> - body_ < < " class " < < classType - > basename ( ) ; <nl> + body_ < < " class " < < classType - > name ( ) - > name ( ) ; <nl> if ( is_module ) { <nl> body_ < < " ( Module ) " ; <nl> } <nl> struct PythonPrintPass { <nl> } <nl> } else if ( auto tupleType = type - > cast < TupleType > ( ) ) { <nl> TORCH_INTERNAL_ASSERT ( tupleType - > schema ( ) ) ; <nl> - body_ < < " class " < < tupleType - > basename ( ) ; <nl> + body_ < < " class " < < tupleType - > name ( ) - > name ( ) ; <nl> body_ < < " ( NamedTuple ) : \ n " ; <nl> { <nl> const auto guard = WithIndented ( ) ; <nl> mmm a / torch / csrc / jit / passes / shape_analysis . cpp <nl> ppp b / torch / csrc / jit / passes / shape_analysis . cpp <nl> class ShapePropagator { <nl> auto orig_type = node - > output ( ) - > type ( ) - > expect < TupleType > ( ) ; <nl> node - > output ( ) - > setType ( TupleType : : create ( <nl> fmap ( node - > inputs ( ) , [ ] ( Value * v ) { return v - > type ( ) ; } ) , <nl> - orig_type - > qualified_name_obj ( ) , <nl> + orig_type - > name ( ) , <nl> orig_type - > schema ( ) ) ) ; <nl> return ; <nl> } <nl> mmm a / torch / csrc / jit / pickler . cpp <nl> ppp b / torch / csrc / jit / pickler . cpp <nl> void Pickler : : pushIValueImpl ( const IValue & ivalue ) { <nl> } else if ( ivalue . isObject ( ) ) { <nl> auto obj = ivalue . toObject ( ) ; <nl> auto type = obj - > type ( ) ; <nl> - pushGlobal ( type - > qualifier ( ) , type - > basename ( ) ) ; <nl> + pushGlobal ( type - > name ( ) - > prefix ( ) , type - > name ( ) - > name ( ) ) ; <nl> push < OpCode > ( OpCode : : EMPTY_TUPLE ) ; <nl> push < OpCode > ( OpCode : : NEWOBJ ) ; <nl> if ( checkHasValidSetGetState ( type ) ) { <nl> mmm a / torch / csrc / jit / pybind_utils . h <nl> ppp b / torch / csrc / jit / pybind_utils . h <nl> inline py : : object toPyObject ( IValue & & ivalue ) { <nl> } <nl> if ( tuple - > type & & tuple - > type - > schema ( ) & & <nl> tuple - > type - > schema ( ) - > name ( ) ! = " " ) { <nl> - auto unqualName = tuple - > type - > basename ( ) ; <nl> + auto unqualName = tuple - > type - > name ( ) - > name ( ) ; <nl> auto fieldNames = fmap ( tuple - > type - > schema ( ) - > arguments ( ) , [ ] ( const Argument & arg ) { <nl> return arg . name ( ) ; <nl> } ) ; <nl> mmm a / torch / csrc / jit / script / compilation_unit . h <nl> ppp b / torch / csrc / jit / script / compilation_unit . h <nl> struct TORCH_API CompilationUnit { <nl> / / of invalidating their methods . NamedTuples are fine though , since they <nl> / / don ' t have methods . <nl> TORCH_CHECK ( <nl> - 0 = = classDict_ . count ( * namedType - > qualified_name_obj ( ) ) , <nl> + 0 = = classDict_ . count ( * namedType - > name ( ) ) , <nl> " class ' " , <nl> - namedType - > qualname ( ) , <nl> + namedType - > name ( ) - > qualifiedName ( ) , <nl> " ' already defined . " ) ; <nl> classes_ . push_back ( std : : move ( namedType ) ) ; <nl> - classDict_ [ * classes_ . back ( ) - > qualified_name_obj ( ) ] = classes_ . size ( ) - 1 ; <nl> + classDict_ [ * classes_ . back ( ) - > name ( ) ] = classes_ . size ( ) - 1 ; <nl> } ; <nl> <nl> c10 : : ClassTypePtr get_class ( const c10 : : QualifiedName & name ) const { <nl> struct TORCH_API CompilationUnit { <nl> <nl> c10 : : TupleTypePtr get_named_tuple ( const c10 : : QualifiedName & name ) const { <nl> for ( const auto & cls : classes_ ) { <nl> - if ( cls - > qualname ( ) = = name . qualifiedName ( ) ) { <nl> + if ( cls - > name ( ) - > qualifiedName ( ) = = name . qualifiedName ( ) ) { <nl> return cls - > expect < TupleType > ( ) ; <nl> } <nl> } <nl> mmm a / torch / csrc / jit / script / module . h <nl> ppp b / torch / csrc / jit / script / module . h <nl> struct TORCH_API Module { <nl> ~ Module ( ) { } <nl> <nl> const c10 : : QualifiedName & name ( ) const { <nl> - return * module_object ( ) - > type ( ) - > qualified_name_obj ( ) ; <nl> + return * module_object ( ) - > type ( ) - > name ( ) ; <nl> } <nl> <nl> void set_optimized ( bool o ) { <nl> mmm a / torch / csrc / jit / script / sugared_value . cpp <nl> ppp b / torch / csrc / jit / script / sugared_value . cpp <nl> std : : shared_ptr < SugaredValue > ClassValue : : call ( <nl> auto & g = * m . graph ( ) ; <nl> auto self = g . insertNode ( g . createObject ( type_ ) ) - > output ( ) ; <nl> if ( ! type_ - > getMethod ( " __init__ " ) ) { <nl> - throw ErrorReport ( loc ) < < " Class " < < type_ - > basename ( ) <nl> + throw ErrorReport ( loc ) < < " Class " < < type_ - > name ( ) - > name ( ) <nl> < < " does not have an __init__ function defined " ; <nl> } <nl> <nl> std : : shared_ptr < SugaredValue > NamedTupleConstructor : : call ( <nl> <nl> auto schema = type_ - > schema ( ) ; <nl> TORCH_INTERNAL_ASSERT ( schema ) ; <nl> - auto qualname = type_ - > qualified_name_obj ( ) ; <nl> + auto qualname = type_ - > name ( ) ; <nl> auto matched_schema = matchSchema ( * schema , loc , g , inputs , attributes ) ; <nl> <nl> auto self = <nl>
|
simplify NamedType interface ( )
|
pytorch/pytorch
|
a0836cb8da5cb6011a7252209dde86f9e477de7b
|
2019-08-13T03:29:49Z
|
mmm a / doc / developer - notes . md <nl> ppp b / doc / developer - notes . md <nl> CXXFLAGS = " - g - ggdb - O0 " or whatever debug flags you need . <nl> If the code is behaving strangely , take a look in the debug . log file in the data directory ; <nl> error and debugging messages are written there . <nl> <nl> - The - debug = . . . command - line option controls debugging ; running with just - debug will turn <nl> + The - debug = . . . command - line option controls debugging ; running with just - debug or - debug = 1 will turn <nl> on all categories ( and give you a very large debug . log file ) . <nl> <nl> The Qt code routes qDebug ( ) output to debug . log under category " qt " : run with - debug = qt <nl> mmm a / src / init . cpp <nl> ppp b / src / init . cpp <nl> std : : string HelpMessage ( HelpMessageMode mode ) <nl> if ( mode = = HMM_BITCOIN_QT ) <nl> debugCategories + = " , qt " ; <nl> strUsage + = HelpMessageOpt ( " - debug = < category > " , strprintf ( _ ( " Output debugging information ( default : % u , supplying < category > is optional ) " ) , 0 ) + " . " + <nl> - _ ( " If < category > is not supplied , output all debugging information . " ) + _ ( " < category > can be : " ) + " " + debugCategories + " . " ) ; <nl> + _ ( " If < category > is not supplied or if < category > = 1 , output all debugging information . " ) + _ ( " < category > can be : " ) + " " + debugCategories + " . " ) ; <nl> # ifdef ENABLE_WALLET <nl> strUsage + = HelpMessageOpt ( " - gen " , strprintf ( _ ( " Generate coins ( default : % u ) " ) , 0 ) ) ; <nl> strUsage + = HelpMessageOpt ( " - genproclimit = < n > " , strprintf ( _ ( " Set the number of threads for coin generation if enabled ( - 1 = all cores , default : % d ) " ) , 1 ) ) ; <nl> mmm a / src / util . cpp <nl> ppp b / src / util . cpp <nl> bool LogAcceptCategory ( const char * category ) <nl> <nl> / / if not debugging everything and not debugging specific category , LogPrint does nothing . <nl> if ( setCategories . count ( string ( " " ) ) = = 0 & & <nl> + setCategories . count ( string ( " 1 " ) ) = = 0 & & <nl> setCategories . count ( string ( category ) ) = = 0 ) <nl> return false ; <nl> } <nl>
|
Merge pull request
|
bitcoin/bitcoin
|
8d05ec7bda41a720da16901c510b4cb75e6ab558
|
2015-06-01T09:26:59Z
|
mmm a / lib / IRGen / GenProto . cpp <nl> ppp b / lib / IRGen / GenProto . cpp <nl> namespace { <nl> SmallVector < ProtocolDecl * , 4 > baseProtos ; <nl> baseType . getType ( ) - > isExistentialType ( baseProtos ) ; <nl> for ( auto baseProto : baseProtos ) { <nl> + / / ObjC protocols do not have witnesses . <nl> + if ( baseProto - > isObjC ( ) ) <nl> + continue ; <nl> asDerived ( ) . addOutOfLineBaseProtocol ( baseProto ) ; <nl> } <nl> } <nl>
|
IRGen : Leave inherited ObjC protocols out of protocol witness tables .
|
apple/swift
|
54898bf8fd7bb05867f260b87788924bcf9a81cb
|
2013-06-19T04:51:57Z
|
mmm a / samples / gpu / stereo_match . cpp <nl> ppp b / samples / gpu / stereo_match . cpp <nl> void App : : run ( ) <nl> imshow ( " left " , left ) ; <nl> imshow ( " right " , right ) ; <nl> <nl> - / / Create stero method descriptors <nl> + / / Set common parameters <nl> bm . ndisp = p . ndisp ; <nl> bp . ndisp = p . ndisp ; <nl> csbp . ndisp = p . ndisp ; <nl>
|
fixed comment
|
opencv/opencv
|
b102299dfac496381389b87bd4732579d2e92c66
|
2010-12-21T09:55:56Z
|
mmm a / hphp / runtime / base / heap - collect . cpp <nl> ppp b / hphp / runtime / base / heap - collect . cpp <nl> StructuredLogEntry logCommon ( ) { <nl> sample . setInt ( " memory_limit " , t_pre_stats . limit ) ; <nl> sample . setInt ( " usage " , t_pre_stats . usage ( ) ) ; <nl> sample . setInt ( " mm_usage " , t_pre_stats . mmUsage ) ; <nl> - sample . setInt ( " aux_usage " , t_pre_stats . auxUsage ( ) ) ; <nl> + sample . setInt ( " aux_usage " , t_pre_stats . auxUsage ) ; <nl> sample . setInt ( " mm_capacity " , t_pre_stats . capacity ) ; <nl> sample . setInt ( " peak_usage " , t_pre_stats . peakUsage ) ; <nl> sample . setInt ( " peak_capacity " , t_pre_stats . peakCap ) ; <nl> void MemoryManager : : requestGC ( ) { <nl> } <nl> <nl> void MemoryManager : : updateNextGc ( ) { <nl> - auto mm_limit = m_stats . limit - m_stats . auxUsage ( ) ; <nl> + auto mm_limit = m_stats . limit - m_stats . auxUsage ; <nl> int64_t delta = ( mm_limit - m_stats . mmUsage ) * <nl> RuntimeOption : : EvalGCTriggerPct ; <nl> delta = std : : max ( delta , RuntimeOption : : EvalGCMinTrigger ) ; <nl> mmm a / hphp / runtime / base / memory - manager . cpp <nl> ppp b / hphp / runtime / base / memory - manager . cpp <nl> void MemoryManager : : resetStatsImpl ( bool isInternalCall ) { <nl> m_stats . peakCap ) ; <nl> FTRACE ( 1 , " total alloc : { } \ nje alloc : { } \ nje dealloc : { } \ n " , <nl> m_stats . totalAlloc , m_prevAllocated , m_prevDeallocated ) ; <nl> + FTRACE ( 1 , " je debt : { } \ n \ n " , m_stats . mallocDebt ) ; <nl> # else <nl> FTRACE ( 1 , " resetStatsImpl ( { } ) pre : \ n " <nl> " usage : { } \ ncapacity : { } \ npeak usage : { } \ npeak capacity : { } \ n \ n " , <nl> void MemoryManager : : resetStatsImpl ( bool isInternalCall ) { <nl> if ( isInternalCall ) { <nl> m_statsIntervalActive = false ; <nl> m_stats . mmUsage = 0 ; <nl> - m_stats . threadUsage = 0 ; <nl> + m_stats . auxUsage = 0 ; <nl> m_stats . capacity = 0 ; <nl> m_stats . peakUsage = 0 ; <nl> m_stats . peakCap = 0 ; <nl> void MemoryManager : : resetStatsImpl ( bool isInternalCall ) { <nl> / / Anything that was definitively allocated by the MemoryManager allocator <nl> / / should be counted in this number even if we ' re otherwise zeroing out <nl> / / the count for each thread . <nl> - m_stats . totalAlloc = s_statsEnabled ? m_stats . capacity : 0 ; <nl> + m_stats . totalAlloc = s_statsEnabled ? m_stats . mallocDebt : 0 ; <nl> <nl> m_enableStatsSync = s_statsEnabled ; <nl> # else <nl> void MemoryManager : : resetStatsImpl ( bool isInternalCall ) { <nl> } <nl> # ifdef USE_JEMALLOC <nl> if ( s_statsEnabled ) { <nl> + m_stats . mallocDebt = 0 ; <nl> m_prevDeallocated = * m_deallocated ; <nl> m_prevAllocated = * m_allocated ; <nl> } <nl> void MemoryManager : : resetStatsImpl ( bool isInternalCall ) { <nl> m_stats . peakCap ) ; <nl> FTRACE ( 1 , " total alloc : { } \ nje alloc : { } \ nje dealloc : { } \ n " , <nl> m_stats . totalAlloc , m_prevAllocated , m_prevDeallocated ) ; <nl> + FTRACE ( 1 , " je debt : { } \ n \ n " , m_stats . mallocDebt ) ; <nl> # else <nl> FTRACE ( 1 , " resetStatsImpl ( { } ) post : \ n " <nl> " usage : { } \ ncapacity : { } \ npeak usage : { } \ npeak capacity : { } \ n \ n " , <nl> void MemoryManager : : setMemThresholdCallback ( size_t threshold ) { <nl> * <nl> * The stats parameter allows the updates to be applied to either <nl> * m_stats as in refreshStats ( ) or to a separate MemoryUsageStats <nl> - * struct as in getStatsCopy ( ) . <nl> + * struct as in getStatsSafe ( ) . <nl> * <nl> * The template variable live controls whether or not MemoryManager <nl> * member variables are updated and whether or not to call helper <nl> void MemoryManager : : refreshStatsImpl ( MemoryUsageStats & stats ) { <nl> / / <nl> / / int64 musage = delta - delta0 ; <nl> / / <nl> - / / This includes memory allocated by the request heap ( BigHeap ) , <nl> - / / which is recorded in m_stats . capacity , so it can be subtracted <nl> - / / when necessary to avoid double - counting . <nl> + / / Note however , the slab allocator adds to m_stats . mallocDebt <nl> + / / when it calls malloc ( ) , so that this function can avoid <nl> + / / double - counting the malloced memory . Thus musage in the example <nl> + / / code may well substantially exceed m_stats . usage . <nl> if ( m_enableStatsSync ) { <nl> / / We can ' t currently handle wrapping so make sure this isn ' t happening . <nl> assert ( * m_allocated < = uint64_t ( std : : numeric_limits < int64_t > : : max ( ) ) ) ; <nl> void MemoryManager : : refreshStatsImpl ( MemoryUsageStats & stats ) { <nl> curAllocated , m_prevAllocated , curAllocated - m_prevAllocated ) ; <nl> FTRACE ( 1 , " je dealloc : \ ncurrent : { } \ nprevious : { } \ nchange : { } \ n " , <nl> curDeallocated , m_prevDeallocated , curDeallocated - m_prevDeallocated ) ; <nl> - FTRACE ( 1 , " usage : { } \ ntotalAlloc : { } \ n " , stats . usage ( ) , stats . totalAlloc ) ; <nl> + FTRACE ( 1 , " usage : { } \ ntotalAlloc : { } \ nmallocDebt : { } \ n " , <nl> + stats . usage ( ) , stats . totalAlloc , stats . mallocDebt ) ; <nl> <nl> / / Since these deltas potentially include memory allocated from another <nl> / / thread but deallocated on this one , it is possible for these numbers to <nl> void MemoryManager : : refreshStatsImpl ( MemoryUsageStats & stats ) { <nl> <nl> / / Subtract the old usage adjustment ( prevUsage ) and add the current one <nl> / / ( curUsage ) to arrive at the new combined usage number . <nl> - stats . threadUsage + = curUsage - prevUsage ; <nl> + stats . auxUsage + = curUsage - prevUsage ; <nl> + <nl> + / / Remove the " debt " accrued from request - heap allocating slabs and big <nl> + / / objects , so we don ' t double count them . <nl> + stats . auxUsage - = stats . mallocDebt ; <nl> + stats . mallocDebt = 0 ; <nl> <nl> / / Accumulate the increase in allocation volume since the last refresh . <nl> / / We need to do the calculation instead of just setting it to curAllocated <nl> / / because of the MaskAlloc capability . <nl> stats . totalAlloc + = int64_t ( curAllocated ) - int64_t ( m_prevAllocated ) ; <nl> - <nl> if ( live ) { <nl> m_prevAllocated = curAllocated ; <nl> m_prevDeallocated = curDeallocated ; <nl> NEVER_INLINE void * MemoryManager : : newSlab ( uint32_t nbytes ) { <nl> storeTail ( m_front , ( char * ) m_limit - ( char * ) m_front ) ; <nl> auto slab = m_heap . allocSlab ( kSlabSize ) ; <nl> assert ( ( uintptr_t ( slab . ptr ) & kSmallSizeAlignMask ) = = 0 ) ; <nl> + m_stats . mallocDebt + = slab . size ; <nl> m_stats . capacity + = slab . size ; <nl> m_stats . peakCap = std : : max ( m_stats . peakCap , m_stats . capacity ) ; <nl> m_front = ( void * ) ( uintptr_t ( slab . ptr ) + nbytes ) ; <nl> MemBlock MemoryManager : : mallocBigSize ( size_t bytes , HeaderKind kind , <nl> / / NB : We don ' t report the SweepNode size in the stats . <nl> auto const delta = Mode = = FreeRequested ? bytes : block . size ; <nl> m_stats . mmUsage + = delta ; <nl> + / / Adjust jemalloc otherwise we ' ll double count the direct allocation . <nl> + m_stats . mallocDebt + = delta ; <nl> m_stats . capacity + = block . size + sizeof ( MallocNode ) ; <nl> updateBigStats ( ) ; <nl> FTRACE ( 3 , " mallocBigSize : { } ( { } requested , { } usable ) \ n " , <nl> MemBlock MemoryManager : : resizeBig ( MallocNode * n , size_t nbytes ) { <nl> auto old_size = n - > nbytes - sizeof ( MallocNode ) ; <nl> auto block = m_heap . resizeBig ( n + 1 , nbytes ) ; <nl> m_stats . mmUsage + = block . size - old_size ; <nl> + m_stats . mallocDebt + = block . size - old_size ; <nl> m_stats . capacity + = block . size - old_size ; <nl> updateBigStats ( ) ; <nl> return block ; <nl> void MemoryManager : : freeBigSize ( void * vp , size_t bytes ) { <nl> / / Since we account for these direct allocations in our usage and adjust for <nl> / / them on allocation , we also need to adjust for them negatively on free . <nl> m_stats . mmUsage - = bytes ; <nl> + m_stats . mallocDebt - = bytes ; <nl> auto actual = static_cast < MallocNode * > ( vp ) [ - 1 ] . nbytes ; <nl> assert ( bytes < = actual ) ; <nl> m_stats . capacity - = actual ; <nl> mmm a / hphp / runtime / base / memory - usage - stats . h <nl> ppp b / hphp / runtime / base / memory - usage - stats . h <nl> namespace HPHP { <nl> * totalAlloc will also be maintained , otherwise it will be 0 . <nl> * / <nl> struct MemoryUsageStats { <nl> - int64_t mmUsage ; / / bytes currently in use via MM apis <nl> - int64_t threadUsage ; / / bytes currently in use via jemalloc apis <nl> + int64_t mmUsage ; / / bytes are currently in use via MM apis <nl> + int64_t auxUsage ; / / adjustment for allocations via jemalloc <nl> <nl> int64_t capacity ; / / sum of slabs & big objects ( MM ' s capacity ) <nl> int64_t limit ; / / the max bytes allowed for a request before it is <nl> / / terminated for exceeding the memory limit <nl> + int64_t mallocDebt ; / / how many bytes of malloced memory have not <nl> + / / been processed by MemoryManager : : refreshStats <nl> int64_t peakUsage ; / / how many bytes have been used at maximum <nl> int64_t peakCap ; / / peak bytes owned by MemoryManager ( slabs and big ) <nl> int64_t peakIntervalUsage ; / / peakUsage during userland interval <nl> struct MemoryUsageStats { <nl> int64_t totalAlloc ; / / how many bytes have cumulatively been allocated <nl> / / by the underlying allocator <nl> <nl> - / * <nl> - * Current malloc usage for this thread , minus the large objects <nl> - * and slabs owned by MemoryManager . <nl> - * / <nl> - int64_t auxUsage ( ) const { return threadUsage - capacity ; } <nl> - <nl> - / * <nl> - * Current usage for this thread as the sum of MemoryManager usage <nl> - * ( allocated but not yet freed ) plus direct jemalloc usage <nl> - * ( allocated - deallocated ) that bypasses MemoryManager . <nl> - * / <nl> - int64_t usage ( ) const { return mmUsage + auxUsage ( ) ; } <nl> + int64_t usage ( ) const { return mmUsage + auxUsage ; } <nl> <nl> friend struct MemoryManager ; <nl> } ; <nl>
|
Backed out changeset b12102109701 D5139382 Kill MemoryUsageStats : : mallocDebt
|
facebook/hhvm
|
d3e1924f3d23a08ee3727f108f82c8d39b504825
|
2017-06-20T15:31:03Z
|
mmm a / lib / IDE / CodeCompletion . cpp <nl> ppp b / lib / IDE / CodeCompletion . cpp <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> IsSuperRefExpr = true ; <nl> } <nl> <nl> + void addTypeAnnotation ( CodeCompletionResultBuilder & Builder , Type T ) { <nl> + if ( T - > isVoid ( ) ) <nl> + Builder . addTypeAnnotation ( " Void " ) ; <nl> + else <nl> + Builder . addTypeAnnotation ( T . getString ( ) ) ; <nl> + } <nl> + <nl> void addSwiftVarDeclRef ( const VarDecl * VD ) { <nl> StringRef Name = VD - > getName ( ) . get ( ) ; <nl> assert ( ! Name . empty ( ) & & " name should not be empty " ) ; <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> if ( ! HaveDot ) <nl> Builder . addDot ( ) ; <nl> Builder . addTextChunk ( Name ) ; <nl> - Builder . addTypeAnnotation ( VD - > getType ( ) . getString ( ) ) ; <nl> + addTypeAnnotation ( Builder , VD - > getType ( ) ) ; <nl> } <nl> <nl> void addTuplePatternParameters ( CodeCompletionResultBuilder & Builder , <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> NeedComma = true ; <nl> } <nl> Builder . addRightParen ( ) ; <nl> - Builder . addTypeAnnotation ( AFT - > getResult ( ) . getString ( ) ) ; <nl> + addTypeAnnotation ( Builder , AFT - > getResult ( ) ) ; <nl> } <nl> <nl> void addSwiftMethodCall ( const FuncDecl * FD ) { <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> addTuplePatternParameters ( Builder , cast < TuplePattern > ( Patterns [ FirstIndex ] ) ) ; <nl> Builder . addRightParen ( ) ; <nl> / / FIXME : Pattern should pretty - print itself . <nl> - llvm : : SmallString < 32 > Type ; <nl> + llvm : : SmallString < 32 > TypeStr ; <nl> for ( unsigned i = FirstIndex + 1 , e = Patterns . size ( ) ; i ! = e ; + + i ) { <nl> - Type + = " ( " ; <nl> + TypeStr + = " ( " ; <nl> for ( auto TupleElt : cast < TuplePattern > ( Patterns [ i ] ) - > getFields ( ) ) { <nl> - Type + = TupleElt . getPattern ( ) - > getBoundName ( ) . str ( ) ; <nl> - Type + = " : " ; <nl> - Type + = TupleElt . getPattern ( ) - > getType ( ) . getString ( ) ; <nl> + TypeStr + = TupleElt . getPattern ( ) - > getBoundName ( ) . str ( ) ; <nl> + TypeStr + = " : " ; <nl> + TypeStr + = TupleElt . getPattern ( ) - > getType ( ) . getString ( ) ; <nl> } <nl> - Type + = " ) - > " ; <nl> + TypeStr + = " ) - > " ; <nl> } <nl> - Type + = FE - > getResultType ( SwiftContext ) . getString ( ) ; <nl> - Builder . addTypeAnnotation ( Type ) ; <nl> + Type ResultType = FE - > getResultType ( SwiftContext ) ; <nl> + if ( ResultType - > isVoid ( ) ) <nl> + TypeStr + = " Void " ; <nl> + else <nl> + TypeStr + = ResultType . getString ( ) ; <nl> + Builder . addTypeAnnotation ( TypeStr ) ; <nl> <nl> / / TODO : skip arguments with default parameters ? <nl> } <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> Builder . addLeftParen ( ) ; <nl> addTuplePatternParameters ( Builder , cast < TuplePattern > ( CD - > getArguments ( ) ) ) ; <nl> Builder . addRightParen ( ) ; <nl> - Builder . addTypeAnnotation ( CD - > getResultType ( ) . getString ( ) ) ; <nl> + addTypeAnnotation ( Builder , CD - > getResultType ( ) ) ; <nl> } <nl> <nl> void addSwiftSubscriptCall ( const SubscriptDecl * SD ) { <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> Builder . addLeftBracket ( ) ; <nl> addTuplePatternParameters ( Builder , cast < TuplePattern > ( SD - > getIndices ( ) ) ) ; <nl> Builder . addRightBracket ( ) ; <nl> - Builder . addTypeAnnotation ( SD - > getElementType ( ) . getString ( ) ) ; <nl> + addTypeAnnotation ( Builder , SD - > getElementType ( ) ) ; <nl> } <nl> <nl> void addClangDecl ( const clang : : NamedDecl * ND ) { <nl> class CompletionLookup : swift : : VisibleDeclConsumer , <nl> Builder . addDot ( ) ; <nl> Builder . addTextChunk ( Name ) ; <nl> if ( ! TypeAnnotation . isNull ( ) ) <nl> - Builder . addTypeAnnotation ( TypeAnnotation . getString ( ) ) ; <nl> + addTypeAnnotation ( Builder , TypeAnnotation ) ; <nl> } <nl> <nl> / / Implement swift : : VisibleDeclConsumer <nl>
|
Code completion : use ' Void ' instead of ' ( ) ' in type annotations
|
apple/swift
|
6a0c6f487926eabcabd3e3788627f0b06a312225
|
2013-07-15T18:28:51Z
|
mmm a / cmake / templates / OpenCVConfig . cmake . in <nl> ppp b / cmake / templates / OpenCVConfig . cmake . in <nl> foreach ( __cvcomponent $ { OpenCV_FIND_COMPONENTS } ) <nl> get_target_property ( __implib_release opencv_world IMPORTED_IMPLIB_RELEASE ) <nl> get_target_property ( __location_dbg opencv_world IMPORTED_LOCATION_DEBUG ) <nl> get_target_property ( __location_release opencv_world IMPORTED_LOCATION_RELEASE ) <nl> + get_target_property ( __include_dir opencv_world INTERFACE_INCLUDE_DIRECTORIES ) <nl> add_library ( $ { __cvcomponent } SHARED IMPORTED ) <nl> + set_target_properties ( $ { __cvcomponent } PROPERTIES INTERFACE_INCLUDE_DIRECTORIES " $ { __include_dir } " ) <nl> if ( __location_dbg ) <nl> set_property ( TARGET $ { __cvcomponent } APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG ) <nl> set_target_properties ( $ { __cvcomponent } PROPERTIES <nl>
|
Merge pull request from mshabunin : fix - world - interface - include
|
opencv/opencv
|
403c3c3d032d2be204890ffc728e0fee307b9da4
|
2018-02-02T16:25:17Z
|
deleted file mode 100644 <nl> index 7dc519fdf . . 000000000 <nl> mmm a / appendix / hid_system_client / CMakeLists . txt <nl> ppp / dev / null <nl> <nl> - cmake_minimum_required ( VERSION 3 . 9 ) <nl> - <nl> - include ( . . / . . / src / common . cmake ) <nl> - <nl> - project ( a . out ) <nl> - <nl> - add_executable ( <nl> - a . out <nl> - main . cpp <nl> - ) <nl> - <nl> - target_link_libraries ( <nl> - a . out <nl> - " - framework CoreFoundation " <nl> - " - framework IOKit " <nl> - ) <nl> deleted file mode 100644 <nl> index 4612efecb . . 000000000 <nl> mmm a / appendix / hid_system_client / Makefile <nl> ppp / dev / null <nl> <nl> - all : build_make <nl> - <nl> - clean : clean_builds <nl> - <nl> - run : <nl> - . / build / a . out <nl> - <nl> - include . . / . . / src / Makefile . rules <nl> deleted file mode 100644 <nl> index c5a66f790 . . 000000000 <nl> mmm a / appendix / hid_system_client / main . cpp <nl> ppp / dev / null <nl> <nl> - # include " dispatcher_utility . hpp " <nl> - # include " hid_system_client . hpp " <nl> - # include " logger . hpp " <nl> - <nl> - int main ( int argc , const char * argv [ ] ) { <nl> - krbn : : dispatcher_utility : : initialize_dispatchers ( ) ; <nl> - <nl> - signal ( SIGINT , [ ] ( int ) { <nl> - CFRunLoopStop ( CFRunLoopGetMain ( ) ) ; <nl> - } ) ; <nl> - <nl> - auto client = std : : make_unique < krbn : : hid_system_client > ( ) ; <nl> - client - > caps_lock_state_changed . connect ( [ ] ( auto & & state ) { <nl> - if ( ! state ) { <nl> - krbn : : logger : : get_logger ( ) . info ( " caps_lock_state_changed : boost : : none " ) ; <nl> - } else { <nl> - krbn : : logger : : get_logger ( ) . info ( " caps_lock_state_changed : { 0 } " , * state ) ; <nl> - } <nl> - } ) ; <nl> - client - > async_start_caps_lock_check_timer ( std : : chrono : : milliseconds ( 100 ) ) ; <nl> - <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> - <nl> - client - > async_set_caps_lock_state ( true ) ; <nl> - <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> - <nl> - client - > async_set_caps_lock_state ( true ) ; <nl> - <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> - <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> - <nl> - client - > async_set_caps_lock_state ( false ) ; <nl> - <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> - <nl> - CFRunLoopRun ( ) ; <nl> - <nl> - client = nullptr ; <nl> - <nl> - krbn : : dispatcher_utility : : terminate_dispatchers ( ) ; <nl> - <nl> - return 0 ; <nl> - } <nl> deleted file mode 100644 <nl> index 9bc45c3c7 . . 000000000 <nl> mmm a / src / share / hid_system_client . hpp <nl> ppp / dev / null <nl> <nl> - # pragma once <nl> - <nl> - # include " boost_defs . hpp " <nl> - <nl> - # include " iokit_utility . hpp " <nl> - # include " logger . hpp " <nl> - # include " types . hpp " <nl> - # include < boost / signals2 . hpp > <nl> - # include < pqrs / osx / iokit_return . hpp > <nl> - # include < pqrs / osx / iokit_service_monitor . hpp > <nl> - <nl> - namespace krbn { <nl> - class hid_system_client final : pqrs : : dispatcher : : extra : : dispatcher_client { <nl> - public : <nl> - / / Signals ( invoked from the shared dispatcher thread ) <nl> - <nl> - boost : : signals2 : : signal < void ( boost : : optional < bool > ) > caps_lock_state_changed ; <nl> - <nl> - / / Methods <nl> - <nl> - hid_system_client ( const hid_system_client & ) = delete ; <nl> - <nl> - / / Note : <nl> - / / OS X shares IOHIDSystem among all input devices even the serial_number of IOHIDSystem is same with the one of the input device . <nl> - / / <nl> - / / Example : <nl> - / / The matched_callback always contains only one IOHIDSystem even if the following devices are connected . <nl> - / / * Apple Internal Keyboard / Track <nl> - / / * HHKB - BT <nl> - / / * org . pqrs . driver . VirtualHIDKeyboard <nl> - / / <nl> - / / The IOHIDSystem object ' s serial_number is one of the connected devices . <nl> - / / <nl> - / / But the IOHIDSystem object is shared by all input devices . <nl> - / / Thus , the IOHIDGetModifierLockState returns true if caps lock is on in one device . <nl> - <nl> - hid_system_client ( void ) : dispatcher_client ( ) , <nl> - service_ ( IO_OBJECT_NULL ) , <nl> - connect_ ( IO_OBJECT_NULL ) , <nl> - caps_lock_state_check_timer_ ( * this ) { <nl> - if ( auto matching_dictionary = IOServiceNameMatching ( kIOHIDSystemClass ) ) { <nl> - service_monitor_ = std : : make_unique < pqrs : : osx : : iokit_service_monitor > ( weak_dispatcher_ , <nl> - matching_dictionary ) ; <nl> - <nl> - service_monitor_ - > service_detected . connect ( [ this ] ( auto & & registry_entry_id , auto & & service_ptr ) { <nl> - close_connection ( ) ; <nl> - <nl> - / / Use the last matched service . <nl> - open_connection ( * service_ptr ) ; <nl> - } ) ; <nl> - <nl> - service_monitor_ - > service_removed . connect ( [ this ] ( auto & & registry_entry_id ) { <nl> - close_connection ( ) ; <nl> - <nl> - / / Use the next service <nl> - service_monitor_ - > async_invoke_service_detected ( ) ; <nl> - } ) ; <nl> - <nl> - service_monitor_ - > async_start ( ) ; <nl> - <nl> - CFRelease ( matching_dictionary ) ; <nl> - } <nl> - } <nl> - <nl> - virtual ~ hid_system_client ( void ) { <nl> - detach_from_dispatcher ( [ this ] { <nl> - caps_lock_state_check_timer_ . stop ( ) ; <nl> - close_connection ( ) ; <nl> - <nl> - service_monitor_ = nullptr ; <nl> - } ) ; <nl> - } <nl> - <nl> - void async_start_caps_lock_check_timer ( std : : chrono : : milliseconds interval ) { <nl> - enqueue_to_dispatcher ( [ this , interval ] { <nl> - last_caps_lock_state_ = boost : : none ; <nl> - <nl> - caps_lock_state_check_timer_ . start ( <nl> - [ this ] { <nl> - auto s = get_modifier_lock_state ( kIOHIDCapsLockState ) ; <nl> - if ( last_caps_lock_state_ ! = s ) { <nl> - last_caps_lock_state_ = s ; <nl> - enqueue_to_dispatcher ( [ this , s ] { <nl> - caps_lock_state_changed ( s ) ; <nl> - } ) ; <nl> - } <nl> - } , <nl> - interval ) ; <nl> - } ) ; <nl> - } <nl> - <nl> - void async_set_caps_lock_state ( bool state ) { <nl> - enqueue_to_dispatcher ( [ this , state ] { <nl> - set_modifier_lock_state ( kIOHIDCapsLockState , state ) ; <nl> - } ) ; <nl> - } <nl> - <nl> - private : <nl> - / / This method is executed in the dispatcher thread . <nl> - void open_connection ( io_service_t s ) { <nl> - service_ = s ; <nl> - IOObjectRetain ( service_ ) ; <nl> - <nl> - pqrs : : osx : : iokit_return r = IOServiceOpen ( service_ , mach_task_self ( ) , kIOHIDParamConnectType , & connect_ ) ; <nl> - if ( r ) { <nl> - logger : : get_logger ( ) . info ( " hid_system_client is opened . " ) ; <nl> - <nl> - } else { <nl> - logger : : get_logger ( ) . error ( " hid_system_client : : open_connection is failed : { 0 } " , <nl> - r . to_string ( ) ) ; <nl> - connect_ = IO_OBJECT_NULL ; <nl> - } <nl> - } <nl> - <nl> - / / This method is executed in the dispatcher thread . <nl> - void close_connection ( void ) { <nl> - if ( connect_ ) { <nl> - pqrs : : osx : : iokit_return r = IOServiceClose ( connect_ ) ; <nl> - if ( ! r ) { <nl> - logger : : get_logger ( ) . error ( " hid_system_client : : close_connection error : { 0 } " , <nl> - r . to_string ( ) ) ; <nl> - } <nl> - connect_ = IO_OBJECT_NULL ; <nl> - } <nl> - <nl> - logger : : get_logger ( ) . info ( " hid_system_client is closed . " ) ; <nl> - <nl> - if ( service_ ) { <nl> - IOObjectRelease ( service_ ) ; <nl> - service_ = IO_OBJECT_NULL ; <nl> - } <nl> - } <nl> - <nl> - / / This method is executed in the dispatcher thread . <nl> - boost : : optional < bool > get_modifier_lock_state ( int selector ) { <nl> - if ( ! connect_ ) { <nl> - logger : : get_logger ( ) . error ( " hid_system_client : : get_modifier_lock_state connect_ is null . " ) ; <nl> - return boost : : none ; <nl> - } <nl> - <nl> - bool state = false ; <nl> - pqrs : : osx : : iokit_return r = IOHIDGetModifierLockState ( connect_ , selector , & state ) ; <nl> - if ( ! r ) { <nl> - logger : : get_logger ( ) . error ( " IOHIDGetModifierLockState is failed : { 0 } " , <nl> - r . to_string ( ) ) ; <nl> - return boost : : none ; <nl> - } <nl> - <nl> - return state ; <nl> - } <nl> - <nl> - / / This method is executed in the dispatcher thread . <nl> - void set_modifier_lock_state ( int selector , bool state ) { <nl> - if ( ! connect_ ) { <nl> - logger : : get_logger ( ) . error ( " hid_system_client : : set_modifier_lock_state connect_ is null . " ) ; <nl> - return ; <nl> - } <nl> - <nl> - pqrs : : osx : : iokit_return r = IOHIDSetModifierLockState ( connect_ , selector , state ) ; <nl> - if ( ! r ) { <nl> - logger : : get_logger ( ) . error ( " IOHIDSetModifierLockState is failed : { 0 } " , <nl> - r . to_string ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - std : : unique_ptr < pqrs : : osx : : iokit_service_monitor > service_monitor_ ; <nl> - io_service_t service_ ; <nl> - io_connect_t connect_ ; <nl> - <nl> - pqrs : : dispatcher : : extra : : timer caps_lock_state_check_timer_ ; <nl> - boost : : optional < bool > last_caps_lock_state_ ; <nl> - } ; <nl> - } / / namespace krbn <nl>
|
remove hid_system_client
|
pqrs-org/Karabiner-Elements
|
b3d6c3a29818b11ed113b1d3424d290777631667
|
2018-11-14T00:05:41Z
|
mmm a / src / app / commands / cmd_invert_mask . cpp <nl> ppp b / src / app / commands / cmd_invert_mask . cpp <nl> void InvertMaskCommand : : onExecute ( Context * context ) <nl> if ( undo . isEnabled ( ) ) <nl> undo . pushUndoer ( new undoers : : SetMask ( undo . getObjects ( ) , document ) ) ; <nl> <nl> - / * create a new mask * / <nl> + / / Select all the sprite area <nl> base : : UniquePtr < Mask > mask ( new Mask ( ) ) ; <nl> + mask - > replace ( sprite - > bounds ( ) ) ; <nl> <nl> - / * select all the sprite area * / <nl> - mask - > replace ( 0 , 0 , sprite - > width ( ) , sprite - > height ( ) ) ; <nl> - <nl> - / * remove in the new mask the current sprite marked region * / <nl> + / / Remove in the new mask the current sprite marked region <nl> const gfx : : Rect & maskBounds = document - > mask ( ) - > bounds ( ) ; <nl> doc : : fill_rect ( mask - > bitmap ( ) , <nl> - maskBounds . x , maskBounds . y , <nl> - maskBounds . x + maskBounds . w - 1 , <nl> - maskBounds . y + maskBounds . h - 1 , 0 ) ; <nl> + maskBounds . x , maskBounds . y , <nl> + maskBounds . x + maskBounds . w - 1 , <nl> + maskBounds . y + maskBounds . h - 1 , 0 ) ; <nl> <nl> / / Invert the current mask in the sprite <nl> document - > mask ( ) - > invert ( ) ; <nl> mmm a / src / app / commands / cmd_mask_all . cpp <nl> ppp b / src / app / commands / cmd_mask_all . cpp <nl> void MaskAllCommand : : onExecute ( Context * context ) <nl> undo . pushUndoer ( new undoers : : SetMask ( undo . getObjects ( ) , document ) ) ; <nl> <nl> / / Change the selection <nl> - document - > mask ( ) - > replace ( 0 , 0 , sprite - > width ( ) , sprite - > height ( ) ) ; <nl> + document - > mask ( ) - > replace ( sprite - > bounds ( ) ) ; <nl> document - > setMaskVisible ( true ) ; <nl> document - > resetTransformation ( ) ; <nl> <nl> mmm a / src / app / commands / cmd_rotate . cpp <nl> ppp b / src / app / commands / cmd_rotate . cpp <nl> class RotateJob : public Job <nl> } <nl> <nl> / / create the new rotated mask <nl> - new_mask - > replace ( x , y , <nl> - m_angle = = 180 ? origBounds . w : origBounds . h , <nl> - m_angle = = 180 ? origBounds . h : origBounds . w ) ; <nl> + new_mask - > replace ( <nl> + gfx : : Rect ( x , y , <nl> + m_angle = = 180 ? origBounds . w : origBounds . h , <nl> + m_angle = = 180 ? origBounds . h : origBounds . w ) ) ; <nl> doc : : rotate_image ( origMask - > bitmap ( ) , new_mask - > bitmap ( ) , m_angle ) ; <nl> <nl> / / Copy new mask <nl> mmm a / src / app / commands / cmd_sprite_size . cpp <nl> ppp b / src / app / commands / cmd_sprite_size . cpp <nl> class SpriteSizeJob : public Job { <nl> int w = scale_x ( old_bitmap - > width ( ) ) ; <nl> int h = scale_y ( old_bitmap - > height ( ) ) ; <nl> base : : UniquePtr < Mask > new_mask ( new Mask ) ; <nl> - new_mask - > replace ( scale_x ( m_document - > mask ( ) - > bounds ( ) . x - 1 ) , <nl> - scale_y ( m_document - > mask ( ) - > bounds ( ) . y - 1 ) , MAX ( 1 , w ) , MAX ( 1 , h ) ) ; <nl> + new_mask - > replace ( <nl> + gfx : : Rect ( <nl> + scale_x ( m_document - > mask ( ) - > bounds ( ) . x - 1 ) , <nl> + scale_y ( m_document - > mask ( ) - > bounds ( ) . y - 1 ) , MAX ( 1 , w ) , MAX ( 1 , h ) ) ) ; <nl> algorithm : : resize_image ( old_bitmap , new_mask - > bitmap ( ) , <nl> m_resize_method , <nl> m_sprite - > getPalette ( FrameNumber ( 0 ) ) , / / Ignored <nl> mmm a / src / app / commands / filters / filter_manager_impl . cpp <nl> ppp b / src / app / commands / filters / filter_manager_impl . cpp <nl> void FilterManagerImpl : : beginForPreview ( ) <nl> m_preview_mask . reset ( new Mask ( * document - > mask ( ) ) ) ; <nl> else { <nl> m_preview_mask . reset ( new Mask ( ) ) ; <nl> - m_preview_mask - > replace ( m_offset_x , m_offset_y , <nl> - m_src - > width ( ) , <nl> - m_src - > height ( ) ) ; <nl> + m_preview_mask - > replace ( <nl> + gfx : : Rect ( m_offset_x , m_offset_y , <nl> + m_src - > width ( ) , <nl> + m_src - > height ( ) ) ) ; <nl> } <nl> <nl> m_row = 0 ; <nl> mmm a / src / app / file / ase_format . cpp <nl> ppp b / src / app / file / ase_format . cpp <nl> static Mask * ase_file_read_mask_chunk ( FILE * f ) <nl> <nl> mask = new Mask ( ) ; <nl> mask - > setName ( name . c_str ( ) ) ; <nl> - mask - > replace ( x , y , w , h ) ; <nl> + mask - > replace ( gfx : : Rect ( x , y , w , h ) ) ; <nl> <nl> / / Read image data <nl> for ( v = 0 ; v < h ; v + + ) <nl> mmm a / src / app / tools / inks . h <nl> ppp b / src / app / tools / inks . h <nl> class SelectionInk : public Ink { <nl> switch ( loop - > getSelectionMode ( ) ) { <nl> case kDefaultSelectionMode : <nl> case kAddSelectionMode : <nl> - loop - > getMask ( ) - > add ( x1 - offset . x , y - offset . y , x2 - x1 + 1 , 1 ) ; <nl> + loop - > getMask ( ) - > add ( <nl> + gfx : : Rect ( x1 - offset . x , y - offset . y , x2 - x1 + 1 , 1 ) ) ; <nl> break ; <nl> case kSubtractSelectionMode : <nl> - loop - > getMask ( ) - > subtract ( x1 - offset . x , y - offset . y , x2 - x1 + 1 , 1 ) ; <nl> + loop - > getMask ( ) - > subtract ( <nl> + gfx : : Rect ( x1 - offset . x , y - offset . y , x2 - x1 + 1 , 1 ) ) ; <nl> break ; <nl> } <nl> } <nl> class SelectionInk : public Ink { <nl> undo - > pushUndoer ( new undoers : : SetMask ( undo - > getObjects ( ) , loop - > getDocument ( ) ) ) ; <nl> <nl> loop - > getMask ( ) - > freeze ( ) ; <nl> - loop - > getMask ( ) - > reserve ( 0 , 0 , loop - > sprite ( ) - > width ( ) , loop - > sprite ( ) - > height ( ) ) ; <nl> + loop - > getMask ( ) - > reserve ( loop - > sprite ( ) - > bounds ( ) ) ; <nl> } <nl> else { <nl> loop - > getMask ( ) - > unfreeze ( ) ; <nl> mmm a / src / app / ui / editor / pixels_movement . cpp <nl> ppp b / src / app / ui / editor / pixels_movement . cpp <nl> void PixelsMovement : : redrawCurrentMask ( ) <nl> <nl> / / Transform mask <nl> <nl> - m_currentMask - > replace ( 0 , 0 , m_sprite - > width ( ) , m_sprite - > height ( ) ) ; <nl> + m_currentMask - > replace ( m_sprite - > bounds ( ) ) ; <nl> m_currentMask - > freeze ( ) ; <nl> clear_image ( m_currentMask - > bitmap ( ) , 0 ) ; <nl> drawParallelogram ( m_currentMask - > bitmap ( ) , m_initialMask - > bitmap ( ) , <nl> mmm a / src / app / util / msk_file . cpp <nl> ppp b / src / app / util / msk_file . cpp <nl> Mask * load_msk_file ( const char * filename ) <nl> / / Animator MSK format <nl> else if ( orig_size = = 8000 ) { <nl> mask = new Mask ( ) ; <nl> - mask - > replace ( 0 , 0 , 320 , 200 ) ; <nl> + mask - > replace ( gfx : : Rect ( 0 , 0 , 320 , 200 ) ) ; <nl> <nl> u = v = 0 ; <nl> for ( i = 0 ; i < 8000 ; i + + ) { <nl> mmm a / src / doc / mask . cpp <nl> ppp b / src / doc / mask . cpp <nl> void Mask : : invert ( ) <nl> } <nl> } <nl> <nl> - void Mask : : replace ( int x , int y , int w , int h ) <nl> + void Mask : : replace ( const gfx : : Rect & bounds ) <nl> { <nl> - m_bounds = gfx : : Rect ( x , y , w , h ) ; <nl> + m_bounds = bounds ; <nl> <nl> delete m_bitmap ; <nl> - m_bitmap = Image : : create ( IMAGE_BITMAP , w , h ) ; <nl> + m_bitmap = Image : : create ( IMAGE_BITMAP , bounds . w , bounds . h ) ; <nl> <nl> clear_image ( m_bitmap , 1 ) ; <nl> } <nl> <nl> - void Mask : : replace ( const gfx : : Rect & bounds ) <nl> - { <nl> - replace ( bounds . x , bounds . y , bounds . w , bounds . h ) ; <nl> - } <nl> - <nl> - void Mask : : add ( int x , int y , int w , int h ) <nl> + void Mask : : add ( const gfx : : Rect & bounds ) <nl> { <nl> if ( m_freeze_count = = 0 ) <nl> - reserve ( x , y , w , h ) ; <nl> + reserve ( bounds ) ; <nl> <nl> fill_rect ( m_bitmap , <nl> - x - m_bounds . x , y - m_bounds . y , <nl> - x - m_bounds . x + w - 1 , y - m_bounds . y + h - 1 , 1 ) ; <nl> + bounds . x - m_bounds . x , <nl> + bounds . y - m_bounds . y , <nl> + bounds . x - m_bounds . x + bounds . w - 1 , <nl> + bounds . y - m_bounds . y + bounds . h - 1 , 1 ) ; <nl> } <nl> <nl> - void Mask : : add ( const gfx : : Rect & bounds ) <nl> - { <nl> - add ( bounds . x , bounds . y , bounds . w , bounds . h ) ; <nl> - } <nl> - <nl> - void Mask : : subtract ( int x , int y , int w , int h ) <nl> + void Mask : : subtract ( const gfx : : Rect & bounds ) <nl> { <nl> if ( m_bitmap ) { <nl> fill_rect ( m_bitmap , <nl> - x - m_bounds . x , <nl> - y - m_bounds . y , <nl> - x - m_bounds . x + w - 1 , <nl> - y - m_bounds . y + h - 1 , 0 ) ; <nl> + bounds . x - m_bounds . x , <nl> + bounds . y - m_bounds . y , <nl> + bounds . x - m_bounds . x + bounds . w - 1 , <nl> + bounds . y - m_bounds . y + bounds . h - 1 , 0 ) ; <nl> + <nl> shrink ( ) ; <nl> } <nl> } <nl> <nl> - void Mask : : subtract ( const gfx : : Rect & bounds ) <nl> - { <nl> - subtract ( bounds . x , bounds . y , bounds . w , bounds . h ) ; <nl> - } <nl> - <nl> - void Mask : : intersect ( int x , int y , int w , int h ) <nl> + void Mask : : intersect ( const gfx : : Rect & bounds ) <nl> { <nl> if ( m_bitmap ) { <nl> - int x1 = m_bounds . x ; <nl> - int y1 = m_bounds . y ; <nl> - int x2 = MIN ( m_bounds . x + m_bounds . w - 1 , x + w - 1 ) ; <nl> - int y2 = MIN ( m_bounds . y + m_bounds . h - 1 , y + h - 1 ) ; <nl> + gfx : : Rect newBounds = m_bounds . createIntersect ( bounds ) ; <nl> <nl> - m_bounds . x = MAX ( x , x1 ) ; <nl> - m_bounds . y = MAX ( y , y1 ) ; <nl> - m_bounds . w = x2 - m_bounds . x + 1 ; <nl> - m_bounds . h = y2 - m_bounds . y + 1 ; <nl> + Image * image = NULL ; <nl> + <nl> + if ( ! newBounds . isEmpty ( ) ) { <nl> + image = crop_image ( m_bitmap , <nl> + newBounds . x - m_bounds . x , <nl> + newBounds . y - m_bounds . y , <nl> + newBounds . w , <nl> + newBounds . h , 0 ) ; <nl> + } <nl> <nl> - Image * image = crop_image ( m_bitmap , m_bounds . x - x1 , m_bounds . y - y1 , m_bounds . w , m_bounds . h , 0 ) ; <nl> delete m_bitmap ; <nl> m_bitmap = image ; <nl> + m_bounds = newBounds ; <nl> <nl> shrink ( ) ; <nl> } <nl> } <nl> <nl> - void Mask : : intersect ( const gfx : : Rect & bounds ) <nl> - { <nl> - intersect ( bounds . x , bounds . y , bounds . w , bounds . h ) ; <nl> - } <nl> - <nl> void Mask : : byColor ( const Image * src , int color , int fuzziness ) <nl> { <nl> - replace ( 0 , 0 , src - > width ( ) , src - > height ( ) ) ; <nl> + replace ( src - > bounds ( ) ) ; <nl> <nl> Image * dst = m_bitmap ; <nl> <nl> void Mask : : crop ( const Image * image ) <nl> get_pixel ( image , c , y2 ) ) ; <nl> <nl> if ( done_count < 4 ) <nl> - intersect ( x1 , y1 , x2 - x1 + 1 , y2 - y1 + 1 ) ; <nl> + intersect ( gfx : : Rect ( x1 , y1 , x2 - x1 + 1 , y2 - y1 + 1 ) ) ; <nl> else <nl> clear ( ) ; <nl> <nl> # undef ADVANCE <nl> } <nl> <nl> - void Mask : : reserve ( int x , int y , int w , int h ) <nl> + void Mask : : reserve ( const gfx : : Rect & bounds ) <nl> { <nl> - ASSERT ( w > 0 & & h > 0 ) ; <nl> + ASSERT ( ! bounds . isEmpty ( ) ) ; <nl> <nl> if ( ! m_bitmap ) { <nl> - m_bounds . x = x ; <nl> - m_bounds . y = y ; <nl> - m_bounds . w = w ; <nl> - m_bounds . h = h ; <nl> - m_bitmap = Image : : create ( IMAGE_BITMAP , w , h ) ; <nl> + m_bounds = bounds ; <nl> + m_bitmap = Image : : create ( IMAGE_BITMAP , bounds . w , bounds . h ) ; <nl> clear_image ( m_bitmap , 0 ) ; <nl> } <nl> else { <nl> - int x1 = m_bounds . x ; <nl> - int y1 = m_bounds . y ; <nl> - int x2 = MAX ( m_bounds . x + m_bounds . w - 1 , x + w - 1 ) ; <nl> - int y2 = MAX ( m_bounds . y + m_bounds . h - 1 , y + h - 1 ) ; <nl> - int new_mask_x = MIN ( x , x1 ) ; <nl> - int new_mask_y = MIN ( y , y1 ) ; <nl> - int new_mask_w = x2 - new_mask_x + 1 ; <nl> - int new_mask_h = y2 - new_mask_y + 1 ; <nl> - <nl> - if ( m_bounds . x ! = new_mask_x | | <nl> - m_bounds . y ! = new_mask_y | | <nl> - m_bounds . w ! = new_mask_w | | <nl> - m_bounds . h ! = new_mask_h ) { <nl> - m_bounds . x = new_mask_x ; <nl> - m_bounds . y = new_mask_y ; <nl> - m_bounds . w = new_mask_w ; <nl> - m_bounds . h = new_mask_h ; <nl> - <nl> - Image * image = crop_image ( m_bitmap , m_bounds . x - x1 , m_bounds . y - y1 , m_bounds . w , m_bounds . h , 0 ) ; <nl> + gfx : : Rect newBounds = m_bounds . createUnion ( bounds ) ; <nl> + <nl> + if ( m_bounds ! = newBounds ) { <nl> + Image * image = crop_image ( m_bitmap , <nl> + newBounds . x - m_bounds . x , <nl> + newBounds . y - m_bounds . y , <nl> + newBounds . w , <nl> + newBounds . h , 0 ) ; <nl> delete m_bitmap ; / / image <nl> m_bitmap = image ; <nl> + m_bounds = newBounds ; <nl> } <nl> } <nl> } <nl> mmm a / src / doc / mask . h <nl> ppp b / src / doc / mask . h <nl> namespace doc { <nl> void copyFrom ( const Mask * sourceMask ) ; <nl> <nl> / / Replace the whole mask with the given region . <nl> - void replace ( int x , int y , int w , int h ) ; <nl> void replace ( const gfx : : Rect & bounds ) ; <nl> <nl> / / Inverts the mask . <nl> void invert ( ) ; <nl> <nl> / / Adds the specified rectangle in the mask / selection <nl> - void add ( int x , int y , int w , int h ) ; <nl> void add ( const gfx : : Rect & bounds ) ; <nl> - <nl> - void subtract ( int x , int y , int w , int h ) ; <nl> void subtract ( const gfx : : Rect & bounds ) ; <nl> - void intersect ( int x , int y , int w , int h ) ; <nl> void intersect ( const gfx : : Rect & bounds ) ; <nl> void byColor ( const Image * image , int color , int fuzziness ) ; <nl> void crop ( const Image * image ) ; <nl> <nl> / / Reserves a rectangle to draw onto the bitmap ( you should call <nl> / / shrink after you draw in the bitmap ) <nl> - void reserve ( int x , int y , int w , int h ) ; <nl> + void reserve ( const gfx : : Rect & bounds ) ; <nl> <nl> / / Shrinks all sides of the mask to the minimum possible looking at <nl> / / empty pixels in the bitmap <nl> mmm a / src / doc / mask_io . cpp <nl> ppp b / src / doc / mask_io . cpp <nl> Mask * read_mask ( std : : istream & is ) <nl> if ( w > 0 & & h > 0 ) { <nl> int size = BitmapTraits : : getRowStrideBytes ( w ) ; <nl> <nl> - mask - > add ( x , y , w , h ) ; <nl> + mask - > add ( gfx : : Rect ( x , y , w , h ) ) ; <nl> for ( int c = 0 ; c < mask - > bounds ( ) . h ; c + + ) <nl> is . read ( ( char * ) mask - > bitmap ( ) - > getPixelAddress ( 0 , c ) , size ) ; <nl> } <nl>
|
Change Mask member functions to use gfx : : Rect instead of x , y , w , h args
|
aseprite/aseprite
|
f66b48c69850aa90052149429442eecbdb687971
|
2014-12-13T22:10:54Z
|
mmm a / hphp / hack / src / typing / typing . ml <nl> ppp b / hphp / hack / src / typing / typing . ml <nl> and condition ? lhs_of_null_coalesce env tparamet = <nl> when ( List . length ivar_tyl ) = ( List . length hint_tyl ) - > <nl> let env , tyl = List . map2_env env ivar_tyl hint_tyl safely_refine_type in <nl> env , ( reason , Ttuple tyl ) <nl> - | _ , ( Tany | Tmixed | Tnonnull | Tprim _ | Toption _ | Ttuple _ <nl> + | _ , Tnonnull - > <nl> + TUtils . non_null env ivar_ty <nl> + | _ , ( Tany | Tmixed | Tprim _ | Toption _ | Ttuple _ <nl> | Tshape _ | Tvar _ | Tabstract _ | Tarraykind _ | Tanon _ <nl> | Tunresolved _ | Tobject | Terr | Tfun _ | Tdynamic ) - > <nl> ( * TODO ( kunalm ) Implement the type refinement for each type * ) <nl> new file mode 100644 <nl> index 00000000000 . . 69f4f6186e0 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / is_expression / nonnull1 . php <nl> <nl> + < ? hh / / strict <nl> + <nl> + function f ( ? int $ x ) : void { <nl> + if ( $ x is nonnull ) { <nl> + expect_nonnull ( $ x ) ; <nl> + expect_int ( $ x ) ; <nl> + } <nl> + } <nl> + <nl> + function expect_nonnull ( nonnull $ x ) : void { } <nl> + function expect_int ( int $ x ) : void { } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / is_expression / nonnull1 . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . 9b1ca3bfb1b <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / is_expression / nonnull2 . php <nl> <nl> + < ? hh / / strict <nl> + <nl> + function f ( mixed $ x ) : void { <nl> + if ( $ x is nonnull ) { <nl> + expect_nonnull ( $ x ) ; <nl> + expect_int ( $ x ) ; <nl> + } <nl> + } <nl> + <nl> + function expect_nonnull ( nonnull $ x ) : void { } <nl> + function expect_int ( int $ x ) : void { } <nl> new file mode 100644 <nl> index 00000000000 . . 8b18408a895 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / is_expression / nonnull2 . php . exp <nl> <nl> + File " nonnull2 . php " , line 6 , characters 16 - 17 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " nonnull2 . php " , line 11 , characters 21 - 23 : <nl> + This is an int <nl> + File " nonnull2 . php " , line 3 , characters 12 - 16 : <nl> + It is incompatible with a nonnull value <nl> new file mode 100644 <nl> index 00000000000 . . 6f5d4bcaef6 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / is_expression / nonnull3 . php <nl> <nl> + < ? hh / / strict <nl> + <nl> + function f ( bool $ option ) : void { <nl> + $ x = $ option ? return_int ( ) : return_nstring ( ) ; <nl> + if ( $ x is nonnull ) { <nl> + expect_nonnull ( $ x ) ; <nl> + expect_arraykey ( $ x ) ; <nl> + } <nl> + } <nl> + <nl> + function return_int ( ) : int { <nl> + return 1 ; <nl> + } <nl> + function return_nstring ( ) : ? string { <nl> + return ' foo ' ; <nl> + } <nl> + <nl> + function expect_nonnull ( nonnull $ x ) : void { } <nl> + function expect_arraykey ( arraykey $ x ) : void { } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / is_expression / nonnull3 . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl>
|
Typecheck nonnull in is - expressions
|
facebook/hhvm
|
e660cfd8075b13257a2fa7c1f5109510c904ccb3
|
2018-04-11T17:09:03Z
|
mmm a / tensorflow / api_template . __init__ . py <nl> ppp b / tensorflow / api_template . __init__ . py <nl> <nl> # does not have ' python ' , ' core ' directories . Then , it will be copied <nl> # to tensorflow / which does have these two directories . <nl> pass <nl> + # Similarly for compiler . Do it separately to make sure we do this even if the <nl> + # others don ' t exist . <nl> + try : <nl> + del compiler <nl> + except NameError : <nl> + pass <nl> # pylint : enable = undefined - variable <nl> mmm a / tensorflow / api_template_v1 . __init__ . py <nl> ppp b / tensorflow / api_template_v1 . __init__ . py <nl> <nl> # does not have ' python ' , ' core ' directories . Then , it will be copied <nl> # to tensorflow / which does have these two directories . <nl> pass <nl> + # Similarly for compiler . Do it separately to make sure we do this even if the <nl> + # others don ' t exist . <nl> + try : <nl> + del compiler <nl> + except NameError : <nl> + pass <nl> # pylint : enable = undefined - variable <nl> mmm a / tensorflow / contrib / lite / python / convert . py <nl> ppp b / tensorflow / contrib / lite / python / convert . py <nl> <nl> from tensorflow . python . platform import resource_loader as _resource_loader <nl> from tensorflow . python . util import deprecation <nl> from tensorflow . python . util . lazy_loader import LazyLoader <nl> + from tensorflow . python . util . tf_export import tf_export as _tf_export <nl> <nl> # Lazy load since some of the performance benchmark skylark rules <nl> # break dependencies . <nl> class ConverterError ( Exception ) : <nl> pass <nl> <nl> <nl> + # Don ' t expose these for now . <nl> + # @ _tf_export ( " lite . toco_convert_protos " ) <nl> def toco_convert_protos ( model_flags_str , toco_flags_str , input_data_str ) : <nl> " " " Convert ` input_data_str ` according to model and toco parameters . <nl> <nl> def tensor_name ( x ) : <nl> return x . name . split ( " : " ) [ 0 ] <nl> <nl> <nl> + # Don ' t expose these for now . <nl> + # @ _tf_export ( " lite . build_toco_convert_protos " ) <nl> def build_toco_convert_protos ( input_tensors , <nl> output_tensors , <nl> inference_type = lite_constants . FLOAT , <nl> def toco_convert_impl ( input_data , input_tensors , output_tensors , * args , <nl> return data <nl> <nl> <nl> + @ _tf_export ( " lite . toco_convert " ) <nl> @ deprecation . deprecated ( None , " Use ` lite . TFLiteConverter ` instead . " ) <nl> def toco_convert ( input_data , input_tensors , output_tensors , * args , * * kwargs ) : <nl> " " " Convert a model using TOCO . <nl> mmm a / tensorflow / contrib / lite / python / interpreter . py <nl> ppp b / tensorflow / contrib / lite / python / interpreter . py <nl> <nl> import sys <nl> import numpy as np <nl> from tensorflow . python . util . lazy_loader import LazyLoader <nl> + from tensorflow . python . util . tf_export import tf_export as _tf_export <nl> <nl> # Lazy load since some of the performance benchmark skylark rules <nl> # break dependencies . Must use double quotes to match code internal rewrite <nl> <nl> del LazyLoader <nl> <nl> <nl> + @ _tf_export ( ' lite . Interpreter ' ) <nl> class Interpreter ( object ) : <nl> " " " Interpreter inferace for TF - Lite Models . " " " <nl> <nl> mmm a / tensorflow / contrib / lite / python / lite . py <nl> ppp b / tensorflow / contrib / lite / python / lite . py <nl> <nl> from tensorflow . python . saved_model import signature_constants as _signature_constants <nl> from tensorflow . python . saved_model import tag_constants as _tag_constants <nl> from tensorflow . python . util import deprecation as _deprecation <nl> + from tensorflow . python . util . tf_export import tf_export as _tf_export <nl> <nl> <nl> + @ _tf_export ( " lite . TFLiteConverter " ) <nl> class TFLiteConverter ( object ) : <nl> " " " Convert a TensorFlow model into ` output_format ` using TOCO . <nl> <nl> def _set_batch_size ( self , batch_size ) : <nl> tensor . set_shape ( shape ) <nl> <nl> <nl> + @ _tf_export ( " lite . TocoConverter " ) <nl> class TocoConverter ( object ) : <nl> " " " Convert a TensorFlow model into ` output_format ` using TOCO . <nl> <nl> mmm a / tensorflow / contrib / lite / python / lite_constants . py <nl> ppp b / tensorflow / contrib / lite / python / lite_constants . py <nl> <nl> from tensorflow . contrib . lite . toco import toco_flags_pb2 as _toco_flags_pb2 <nl> from tensorflow . contrib . lite . toco import types_pb2 as _types_pb2 <nl> from tensorflow . python . util . all_util import remove_undocumented <nl> + from tensorflow . python . util . tf_export import tf_export as _tf_export <nl> <nl> # Enum types from the protobuf promoted to the API <nl> FLOAT = _types_pb2 . FLOAT <nl> <nl> TFLITE = _toco_flags_pb2 . TFLITE <nl> GRAPHVIZ_DOT = _toco_flags_pb2 . GRAPHVIZ_DOT <nl> <nl> + _tf_export ( " lite . constants . FLOAT " ) . export_constant ( __name__ , " FLOAT " ) <nl> + _tf_export ( " lite . constants . INT32 " ) . export_constant ( __name__ , " INT32 " ) <nl> + _tf_export ( " lite . constants . INT64 " ) . export_constant ( __name__ , " INT64 " ) <nl> + _tf_export ( " lite . constants . STRING " ) . export_constant ( __name__ , " STRING " ) <nl> + _tf_export ( " lite . constants . QUANTIZED_UINT8 " ) . export_constant ( <nl> + __name__ , " QUANTIZED_UINT8 " ) <nl> + _tf_export ( " lite . constants . TFLITE " ) . export_constant ( __name__ , " TFLITE " ) <nl> + _tf_export ( " lite . constants . GRAPHVIZ_DOT " ) . export_constant ( <nl> + __name__ , " GRAPHVIZ_DOT " ) <nl> + <nl> # Currently the default mode of operation is to shell to another python process <nl> # to protect against crashes . However , it breaks some dependent targets because <nl> # it forces us to depend on an external py_binary . The experimental API doesn ' t <nl> mmm a / tensorflow / contrib / lite / python / op_hint . py <nl> ppp b / tensorflow / contrib / lite / python / op_hint . py <nl> def tflite_cool_activation ( input ) : <nl> from tensorflow . python . ops import array_ops as _array_ops <nl> from tensorflow . python . util import compat as _compat <nl> from tensorflow . python . util . all_util import remove_undocumented <nl> + from tensorflow . python . util . tf_export import tf_export as _tf_export <nl> <nl> <nl> + @ _tf_export ( " lite . OpHint " ) <nl> class OpHint ( object ) : <nl> " " " A class that helps build tflite function invocations . <nl> <nl> class OpHint ( object ) : <nl> # Types of aggregations <nl> # stack : stacks all ophints with matching tags . i . e . for a static rnn . <nl> # specifically , this is good for an input or output to a static rnn cell . <nl> - AGGREGATE_STACK = _compat . as_bytes ( " stack " ) <nl> + AGGREGATE_STACK = " stack " <nl> # first : only takes the first output ( one with lowest sort index ) <nl> # of matching tags . This is good for the input state to an RNN . <nl> - AGGREGATE_FIRST = _compat . as_bytes ( " first " ) <nl> + AGGREGATE_FIRST = " first " <nl> # aggregation last takes only the last tag ( one with highest sort index ) . <nl> # This is good for an output value on the last stack item of a <nl> # static rnn . <nl> - AGGREGATE_LAST = _compat . as_bytes ( " last " ) <nl> + AGGREGATE_LAST = " last " <nl> <nl> class OpHintArgumentTracker ( object ) : <nl> " " " Conceptually tracks indices of arguments of " OpHint functions " . <nl> def _find_all_hints_in_graph_def ( graphdef ) : <nl> if sort = = - 1 : sort = None <nl> aggregation = None <nl> if OpHint . FUNCTION_AGGREGATE_ATTR in attr : <nl> - aggregation = attr [ OpHint . FUNCTION_AGGREGATE_ATTR ] . s <nl> + aggregation = _compat . as_text ( attr [ OpHint . FUNCTION_AGGREGATE_ATTR ] . s ) <nl> <nl> # Add the input or output <nl> def put_operand ( stuff , index , sort , operand , aggregation ) : <nl> def _remove_redundant_stack_unstack ( graph_def ) : <nl> return curr <nl> <nl> <nl> + @ _tf_export ( " lite . convert_op_hints_to_stubs " ) <nl> def _convert_op_hints_to_stubs_helper ( <nl> graph_def , write_callback = lambda sess , graph_def : None ) : <nl> " " " Converts a graph_def to a new graph_def where all op hints are stubbed . <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> py_library ( <nl> srcs_version = " PY2AND3 " , <nl> visibility = [ <nl> " / / tensorflow : __pkg__ " , <nl> + " / / tensorflow / python / estimator : __subpackages__ " , <nl> " / / tensorflow / python / tools : __pkg__ " , <nl> " / / tensorflow / python / tools / api / generator : __pkg__ " , <nl> " / / tensorflow / tools / api / tests : __pkg__ " , <nl> py_library ( <nl> " : util " , <nl> " : weights_broadcast_ops " , <nl> " : while_v2 " , <nl> + " / / tensorflow / contrib / lite / python : lite " , <nl> " / / tensorflow / core : protos_all_py " , <nl> " / / tensorflow / python / compat " , <nl> " / / tensorflow / python / data " , <nl> mmm a / tensorflow / python / tools / api / generator / api_gen . bzl <nl> ppp b / tensorflow / python / tools / api / generator / api_gen . bzl <nl> def gen_api_init_files ( <nl> api_version = 2 , <nl> compat_api_versions = [ ] , <nl> compat_init_templates = [ ] , <nl> - packages = [ " tensorflow . python " ] , <nl> + packages = [ " tensorflow . python " , " tensorflow . contrib . lite . python . lite " ] , <nl> package_deps = [ " / / tensorflow / python : no_contrib " ] , <nl> output_package = " tensorflow " , <nl> output_dir = " " ) : <nl> mmm a / tensorflow / python / tools / api / generator / api_init_files . bzl <nl> ppp b / tensorflow / python / tools / api / generator / api_init_files . bzl <nl> TENSORFLOW_API_INIT_FILES = [ <nl> " keras / wrappers / __init__ . py " , <nl> " keras / wrappers / scikit_learn / __init__ . py " , <nl> " linalg / __init__ . py " , <nl> + " lite / __init__ . py " , <nl> + " lite / constants / __init__ . py " , <nl> " logging / __init__ . py " , <nl> " losses / __init__ . py " , <nl> " manip / __init__ . py " , <nl> mmm a / tensorflow / python / tools / api / generator / api_init_files_v1 . bzl <nl> ppp b / tensorflow / python / tools / api / generator / api_init_files_v1 . bzl <nl> TENSORFLOW_API_INIT_FILES_V1 = [ <nl> " layers / __init__ . py " , <nl> " layers / experimental / __init__ . py " , <nl> " linalg / __init__ . py " , <nl> + " lite / __init__ . py " , <nl> + " lite / constants / __init__ . py " , <nl> " logging / __init__ . py " , <nl> " losses / __init__ . py " , <nl> " manip / __init__ . py " , <nl> mmm a / tensorflow / python / tools / api / generator / create_python_api . py <nl> ppp b / tensorflow / python / tools / api / generator / create_python_api . py <nl> def in_packages ( m ) : <nl> module . __name__ is None or not in_packages ( module . __name__ ) ) : <nl> continue <nl> # Do not generate __init__ . py files for contrib modules for now . <nl> - if ' . contrib . ' in module . __name__ or module . __name__ . endswith ( ' . contrib ' ) : <nl> + if ( ( ' . contrib . ' in module . __name__ or module . __name__ . endswith ( ' . contrib ' ) ) <nl> + and ' . lite ' not in module . __name__ ) : <nl> continue <nl> <nl> for module_contents_name in dir ( module ) : <nl> mmm a / tensorflow / python / tools / api / generator / output_init_files_test . py <nl> ppp b / tensorflow / python / tools / api / generator / output_init_files_test . py <nl> <nl> # available in sys . modules <nl> # pylint : disable = unused - import <nl> from tensorflow import python as _tf_for_api_traversal <nl> + from tensorflow . contrib . lite . python import lite as _tflite_for_api_traversal <nl> # pylint : enable = unused - import <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . util import tf_decorator <nl> new file mode 100644 <nl> index 0000000000000 . . 9a22ee1f879f6 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . - interpreter . pbtxt <nl> <nl> + path : " tensorflow . lite . Interpreter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . interpreter . Interpreter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' model_path \ ' , \ ' model_content \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " allocate_tensors " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_input_details " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_output_details " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_tensor " <nl> + argspec : " args = [ \ ' self \ ' , \ ' tensor_index \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_tensor_details " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " invoke " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " reset_all_variables " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " resize_tensor_input " <nl> + argspec : " args = [ \ ' self \ ' , \ ' input_index \ ' , \ ' tensor_size \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " set_tensor " <nl> + argspec : " args = [ \ ' self \ ' , \ ' tensor_index \ ' , \ ' value \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " tensor " <nl> + argspec : " args = [ \ ' self \ ' , \ ' tensor_index \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 66bf5256f6bc9 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . - op - hint . - op - hint - argument - tracker . pbtxt <nl> <nl> + path : " tensorflow . lite . OpHint . OpHintArgumentTracker " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . op_hint . OpHintArgumentTracker \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' function_name \ ' , \ ' unique_function_id \ ' , \ ' node_name_prefix \ ' , \ ' attr_name \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add " <nl> + argspec : " args = [ \ ' self \ ' , \ ' arg \ ' , \ ' tag \ ' , \ ' name \ ' , \ ' aggregate \ ' , \ ' index_override \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 8c8e55f6cfb58 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . - op - hint . pbtxt <nl> <nl> + path : " tensorflow . lite . OpHint " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . op_hint . OpHint \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member { <nl> + name : " AGGREGATE_FIRST " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " AGGREGATE_LAST " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " AGGREGATE_STACK " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_AGGREGATE_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_INPUT_INDEX_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_NAME_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_OUTPUT_INDEX_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_SORT_INDEX_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_UUID_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " OpHintArgumentTracker " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " TFLITE_INPUT_INDICES " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' function_name \ ' ] , varargs = None , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_input " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_inputs " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_output " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_outputs " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . b695c6cdf60ee <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . - t - f - lite - converter . pbtxt <nl> <nl> + path : " tensorflow . lite . TFLiteConverter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . lite . TFLiteConverter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' graph_def \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' , \ ' input_arrays_with_shape \ ' , \ ' output_arrays \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " convert " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " from_frozen_graph " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' graph_def_file \ ' , \ ' input_arrays \ ' , \ ' output_arrays \ ' , \ ' input_shapes \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_keras_model_file " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' model_file \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_saved_model " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' saved_model_dir \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' , \ ' tag_set \ ' , \ ' signature_key \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_session " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' sess \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_input_arrays " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . da46b3f031dba <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . - toco - converter . pbtxt <nl> <nl> + path : " tensorflow . lite . TocoConverter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . lite . TocoConverter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + } <nl> + member_method { <nl> + name : " from_frozen_graph " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' graph_def_file \ ' , \ ' input_arrays \ ' , \ ' output_arrays \ ' , \ ' input_shapes \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_keras_model_file " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' model_file \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_saved_model " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' saved_model_dir \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' , \ ' tag_set \ ' , \ ' signature_key \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_session " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' sess \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 08845553e55d3 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . constants . pbtxt <nl> <nl> + path : " tensorflow . lite . constants " <nl> + tf_module { <nl> + member { <nl> + name : " FLOAT " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " GRAPHVIZ_DOT " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " INT32 " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " INT64 " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " QUANTIZED_UINT8 " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " STRING " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " TFLITE " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . f5013c250be84 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . lite . pbtxt <nl> <nl> + path : " tensorflow . lite " <nl> + tf_module { <nl> + member { <nl> + name : " Interpreter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " OpHint " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " TFLiteConverter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " TocoConverter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " constants " <nl> + mtype : " < type \ ' module \ ' > " <nl> + } <nl> + member_method { <nl> + name : " toco_convert " <nl> + argspec : " args = [ \ ' input_data \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + } <nl> mmm a / tensorflow / tools / api / golden / v1 / tensorflow . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . pbtxt <nl> tf_module { <nl> name : " linalg " <nl> mtype : " < type \ ' module \ ' > " <nl> } <nl> + member { <nl> + name : " lite " <nl> + mtype : " < type \ ' module \ ' > " <nl> + } <nl> member { <nl> name : " logging " <nl> mtype : " < type \ ' module \ ' > " <nl> new file mode 100644 <nl> index 0000000000000 . . 9a22ee1f879f6 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . - interpreter . pbtxt <nl> <nl> + path : " tensorflow . lite . Interpreter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . interpreter . Interpreter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' model_path \ ' , \ ' model_content \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " allocate_tensors " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_input_details " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_output_details " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_tensor " <nl> + argspec : " args = [ \ ' self \ ' , \ ' tensor_index \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_tensor_details " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " invoke " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " reset_all_variables " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " resize_tensor_input " <nl> + argspec : " args = [ \ ' self \ ' , \ ' input_index \ ' , \ ' tensor_size \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " set_tensor " <nl> + argspec : " args = [ \ ' self \ ' , \ ' tensor_index \ ' , \ ' value \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " tensor " <nl> + argspec : " args = [ \ ' self \ ' , \ ' tensor_index \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 66bf5256f6bc9 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . - op - hint . - op - hint - argument - tracker . pbtxt <nl> <nl> + path : " tensorflow . lite . OpHint . OpHintArgumentTracker " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . op_hint . OpHintArgumentTracker \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' function_name \ ' , \ ' unique_function_id \ ' , \ ' node_name_prefix \ ' , \ ' attr_name \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add " <nl> + argspec : " args = [ \ ' self \ ' , \ ' arg \ ' , \ ' tag \ ' , \ ' name \ ' , \ ' aggregate \ ' , \ ' index_override \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 8c8e55f6cfb58 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . - op - hint . pbtxt <nl> <nl> + path : " tensorflow . lite . OpHint " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . op_hint . OpHint \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member { <nl> + name : " AGGREGATE_FIRST " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " AGGREGATE_LAST " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " AGGREGATE_STACK " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_AGGREGATE_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_INPUT_INDEX_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_NAME_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_OUTPUT_INDEX_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_SORT_INDEX_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " FUNCTION_UUID_ATTR " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member { <nl> + name : " OpHintArgumentTracker " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " TFLITE_INPUT_INDICES " <nl> + mtype : " < type \ ' str \ ' > " <nl> + } <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' function_name \ ' ] , varargs = None , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_input " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_inputs " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_output " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " add_outputs " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . b695c6cdf60ee <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . - t - f - lite - converter . pbtxt <nl> <nl> + path : " tensorflow . lite . TFLiteConverter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . lite . TFLiteConverter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + argspec : " args = [ \ ' self \ ' , \ ' graph_def \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' , \ ' input_arrays_with_shape \ ' , \ ' output_arrays \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " convert " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " from_frozen_graph " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' graph_def_file \ ' , \ ' input_arrays \ ' , \ ' output_arrays \ ' , \ ' input_shapes \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_keras_model_file " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' model_file \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_saved_model " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' saved_model_dir \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' , \ ' tag_set \ ' , \ ' signature_key \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_session " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' sess \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + member_method { <nl> + name : " get_input_arrays " <nl> + argspec : " args = [ \ ' self \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . da46b3f031dba <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . - toco - converter . pbtxt <nl> <nl> + path : " tensorflow . lite . TocoConverter " <nl> + tf_class { <nl> + is_instance : " < class \ ' tensorflow . contrib . lite . python . lite . TocoConverter \ ' > " <nl> + is_instance : " < type \ ' object \ ' > " <nl> + member_method { <nl> + name : " __init__ " <nl> + } <nl> + member_method { <nl> + name : " from_frozen_graph " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' graph_def_file \ ' , \ ' input_arrays \ ' , \ ' output_arrays \ ' , \ ' input_shapes \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_keras_model_file " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' model_file \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_saved_model " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' saved_model_dir \ ' , \ ' input_arrays \ ' , \ ' input_shapes \ ' , \ ' output_arrays \ ' , \ ' tag_set \ ' , \ ' signature_key \ ' ] , varargs = None , keywords = None , defaults = [ \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' , \ ' None \ ' ] , " <nl> + } <nl> + member_method { <nl> + name : " from_session " <nl> + argspec : " args = [ \ ' cls \ ' , \ ' sess \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' ] , varargs = None , keywords = None , defaults = None " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 08845553e55d3 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . constants . pbtxt <nl> <nl> + path : " tensorflow . lite . constants " <nl> + tf_module { <nl> + member { <nl> + name : " FLOAT " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " GRAPHVIZ_DOT " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " INT32 " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " INT64 " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " QUANTIZED_UINT8 " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " STRING " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + member { <nl> + name : " TFLITE " <nl> + mtype : " < type \ ' int \ ' > " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . f5013c250be84 <nl> mmm / dev / null <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . lite . pbtxt <nl> <nl> + path : " tensorflow . lite " <nl> + tf_module { <nl> + member { <nl> + name : " Interpreter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " OpHint " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " TFLiteConverter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " TocoConverter " <nl> + mtype : " < type \ ' type \ ' > " <nl> + } <nl> + member { <nl> + name : " constants " <nl> + mtype : " < type \ ' module \ ' > " <nl> + } <nl> + member_method { <nl> + name : " toco_convert " <nl> + argspec : " args = [ \ ' input_data \ ' , \ ' input_tensors \ ' , \ ' output_tensors \ ' ] , varargs = args , keywords = kwargs , defaults = None " <nl> + } <nl> + } <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . pbtxt <nl> tf_module { <nl> name : " linalg " <nl> mtype : " < type \ ' module \ ' > " <nl> } <nl> + member { <nl> + name : " lite " <nl> + mtype : " < type \ ' module \ ' > " <nl> + } <nl> member { <nl> name : " logging " <nl> mtype : " < type \ ' module \ ' > " <nl> mmm a / tensorflow / tools / api / lib / python_object_to_proto_visitor . py <nl> ppp b / tensorflow / tools / api / lib / python_object_to_proto_visitor . py <nl> <nl> _NORMALIZE_TYPE [ " < class ' abc . ABCMeta ' > " ] = " < type ' type ' > " <nl> _NORMALIZE_ISINSTANCE = { <nl> " < class " <nl> + " ' tensorflow . contrib . lite . python . op_hint . OpHint . OpHintArgumentTracker ' > " : # pylint : disable = line - too - long <nl> + " < class " <nl> + " ' tensorflow . contrib . lite . python . op_hint . OpHintArgumentTracker ' > " , <nl> + " < class " <nl> " ' tensorflow . python . training . monitored_session . _MonitoredSession . StepContext ' > " : # pylint : disable = line - too - long <nl> " < class " <nl> " ' tensorflow . python . training . monitored_session . StepContext ' > " , <nl>
|
Move TensorFlow Lite Python into tensorflow . lite
|
tensorflow/tensorflow
|
4427005495404b944fafd3b15b92050cf419d443
|
2018-10-31T16:25:19Z
|
mmm a / tensorflow / core / kernels / matrix_set_diag_op . cc <nl> ppp b / tensorflow / core / kernels / matrix_set_diag_op . cc <nl> limitations under the License . <nl> <nl> # define EIGEN_USE_THREADS <nl> <nl> - # if GOOGLE_CUDA <nl> + # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> # define EIGEN_USE_GPU <nl> - # endif / / GOOGLE_CUDA <nl> + # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> # include " tensorflow / core / kernels / matrix_set_diag_op . h " <nl> <nl> struct MatrixSetDiag < CPUDevice , T > { <nl> <nl> } / / namespace functor <nl> <nl> - # if GOOGLE_CUDA <nl> + # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> / / Forward declarations of the functor specializations for GPU . <nl> namespace functor { <nl> TF_CALL_complex128 ( REGISTER_MATRIX_SET_DIAG_GPU ) ; <nl> TF_CALL_GPU_NUMBER_TYPES ( REGISTER_BATCH_MATRIX_SET_DIAG_GPU ) ; <nl> # undef REGISTER_BATCH_MATRIX_SET_DIAG_GPU <nl> <nl> - # endif / / GOOGLE_CUDA <nl> + # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / matrix_set_diag_op_gpu . cu . cc <nl> ppp b / tensorflow / core / kernels / matrix_set_diag_op_gpu . cu . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> - # if GOOGLE_CUDA <nl> + # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> # define EIGEN_USE_GPU <nl> <nl> __global__ void MatrixSetDiagKernel ( const int num_threads , const int m , <nl> const int upper_diag_index , <nl> const Scalar * diag_ptr , <nl> Scalar * output_ptr ) { <nl> - CUDA_1D_KERNEL_LOOP ( index , num_threads ) { <nl> + GPU_1D_KERNEL_LOOP ( index , num_threads ) { <nl> const int batch_and_diag_index = index / max_diag_len ; <nl> const int index_in_the_diagonal = <nl> index - batch_and_diag_index * max_diag_len ; <nl> __global__ void MatrixCopyInputAndSetDiagKernel ( <nl> const int max_diag_len , const int lower_diag_index , <nl> const int upper_diag_index , const Scalar * input_ptr , const Scalar * diag_ptr , <nl> Scalar * output_ptr ) { <nl> - CUDA_1D_KERNEL_LOOP ( index , num_threads ) { <nl> + GPU_1D_KERNEL_LOOP ( index , num_threads ) { <nl> const int batch_and_row_index = index / n ; <nl> const int col = index - batch_and_row_index * n ; <nl> const int batch = batch_and_row_index / m ; <nl> struct MatrixSetDiag < GPUDevice , Scalar > { <nl> if ( input . data ( ) = = output . data ( ) ) { <nl> GpuLaunchConfig config = <nl> GetGpuLaunchConfig ( batch_size * num_diags * max_diag_len , device ) ; <nl> - TF_CHECK_OK ( CudaLaunchKernel ( <nl> + TF_CHECK_OK ( GpuLaunchKernel ( <nl> MatrixSetDiagKernel < Scalar > , config . block_count , <nl> config . thread_per_block , 0 , device . stream ( ) , <nl> config . virtual_thread_count , m , n , num_diags , max_diag_len , <nl> upper_diag_index , diag . data ( ) , output . data ( ) ) ) ; <nl> } else { <nl> - GpuLaunchConfig config = GetCudaLaunchConfig ( batch_size * m * n , device ) ; <nl> - TF_CHECK_OK ( CudaLaunchKernel ( <nl> + GpuLaunchConfig config = GetGpuLaunchConfig ( batch_size * m * n , device ) ; <nl> + TF_CHECK_OK ( GpuLaunchKernel ( <nl> MatrixCopyInputAndSetDiagKernel < Scalar > , config . block_count , <nl> config . thread_per_block , 0 , device . stream ( ) , <nl> config . virtual_thread_count , m , n , num_diags , max_diag_len , <nl> TF_CALL_complex128 ( DEFINE_GPU_SPEC ) ; <nl> } / / namespace functor <nl> } / / namespace tensorflow <nl> <nl> - # endif / / GOOGLE_CUDA <nl> + # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl>
|
Merge pull request from ROCmSoftwarePlatform : google_upstream_matrix_set_diag
|
tensorflow/tensorflow
|
0ff7955a0c1a42e2767afb0a5cc202dfe4d6ff19
|
2019-06-18T08:42:03Z
|
mmm a / libraries / chain / include / eosio / chain / authority . hpp <nl> ppp b / libraries / chain / include / eosio / chain / authority . hpp <nl> struct authority { <nl> } <nl> } <nl> <nl> + authority ( permission_level p , uint32_t delay_sec = 0 ) <nl> + : threshold ( 1 ) , accounts ( { { p , 1 } } ) <nl> + { <nl> + if ( delay_sec > 0 ) { <nl> + threshold = 2 ; <nl> + waits . push_back ( wait_weight { delay_sec , 1 } ) ; <nl> + } <nl> + } <nl> + <nl> authority ( uint32_t t , vector < key_weight > k , vector < permission_level_weight > p = { } , vector < wait_weight > w = { } ) <nl> : threshold ( t ) , keys ( move ( k ) ) , accounts ( move ( p ) ) , waits ( move ( w ) ) { } <nl> authority ( ) { } <nl> mmm a / programs / cleos / main . cpp <nl> ppp b / programs / cleos / main . cpp <nl> Usage : . / cleos create account [ OPTIONS ] creator name OwnerKey ActiveKey <nl> # include < fc / io / console . hpp > <nl> # include < fc / exception / exception . hpp > <nl> # include < fc / variant_object . hpp > <nl> + # include < fc / static_variant . hpp > <nl> <nl> # include < eosio / chain / name . hpp > <nl> # include < eosio / chain / config . hpp > <nl> using namespace eosio : : client : : http ; <nl> using namespace eosio : : client : : localize ; <nl> using namespace eosio : : client : : config ; <nl> using namespace boost : : filesystem ; <nl> + using auth_type = fc : : static_variant < public_key_type , permission_level > ; <nl> <nl> FC_DECLARE_EXCEPTION ( explained_exception , 9000000 , " explained exception , see error log " ) ; <nl> FC_DECLARE_EXCEPTION ( localized_exception , 10000000 , " an error occured " ) ; <nl> void send_transaction ( signed_transaction & trx , int32_t extra_kcpu , packed_trans <nl> } <nl> } <nl> <nl> - chain : : action create_newaccount ( const name & creator , const name & newaccount , public_key_type owner , public_key_type active ) { <nl> + chain : : permission_level to_permission_level ( const std : : string & s ) { <nl> + auto at_pos = s . find ( ' @ ' ) ; <nl> + return permission_level { s . substr ( 0 , at_pos ) , s . substr ( at_pos + 1 ) } ; <nl> + } <nl> + <nl> + chain : : action create_newaccount ( const name & creator , const name & newaccount , auth_type owner , auth_type active ) { <nl> return action { <nl> get_account_permissions ( tx_permission , { creator , config : : active_name } ) , <nl> eosio : : chain : : newaccount { <nl> . creator = creator , <nl> . name = newaccount , <nl> - . owner = eosio : : chain : : authority { 1 , { { owner , 1 } } , { } } , <nl> - . active = eosio : : chain : : authority { 1 , { { active , 1 } } , { } } <nl> + . owner = owner . contains < public_key_type > ( ) ? authority ( owner . get < public_key_type > ( ) ) : authority ( owner . get < permission_level > ( ) ) , <nl> + . active = active . contains < public_key_type > ( ) ? authority ( active . get < public_key_type > ( ) ) : authority ( active . get < permission_level > ( ) ) <nl> } <nl> } ; <nl> } <nl> struct create_account_subcommand { <nl> ) ; <nl> createAccount - > add_option ( " creator " , creator , localized ( " The name of the account creating the new account " ) ) - > required ( ) ; <nl> createAccount - > add_option ( " name " , account_name , localized ( " The name of the new account " ) ) - > required ( ) ; <nl> - createAccount - > add_option ( " OwnerKey " , owner_key_str , localized ( " The owner public key for the new account " ) ) - > required ( ) ; <nl> - createAccount - > add_option ( " ActiveKey " , active_key_str , localized ( " The active public key for the new account " ) ) ; <nl> + createAccount - > add_option ( " OwnerKey " , owner_key_str , localized ( " The owner public key or permission level for the new account " ) ) - > required ( ) ; <nl> + createAccount - > add_option ( " ActiveKey " , active_key_str , localized ( " The active public key or permission level for the new account " ) ) ; <nl> <nl> if ( ! simple ) { <nl> createAccount - > add_option ( " - - stake - net " , stake_net , <nl> struct create_account_subcommand { <nl> add_standard_transaction_options ( createAccount , " creator @ active " ) ; <nl> <nl> createAccount - > set_callback ( [ this ] { <nl> - if ( ! active_key_str . size ( ) ) <nl> - active_key_str = owner_key_str ; <nl> - public_key_type owner_key , active_key ; <nl> - try { <nl> - owner_key = public_key_type ( owner_key_str ) ; <nl> - } EOS_RETHROW_EXCEPTIONS ( public_key_type_exception , " Invalid owner public key : $ { public_key } " , ( " public_key " , owner_key_str ) ) ; <nl> - try { <nl> - active_key = public_key_type ( active_key_str ) ; <nl> - } EOS_RETHROW_EXCEPTIONS ( public_key_type_exception , " Invalid active public key : $ { public_key } " , ( " public_key " , active_key_str ) ) ; <nl> - auto create = create_newaccount ( creator , account_name , owner_key , active_key ) ; <nl> + auth_type owner , active ; <nl> + <nl> + if ( owner_key_str . find ( ' @ ' ) ! = string : : npos ) { <nl> + try { <nl> + owner = to_permission_level ( owner_key_str ) ; <nl> + } EOS_RETHROW_EXCEPTIONS ( explained_exception , " Invalid owner permission level : $ { permission } " , ( " permission " , owner_key_str ) ) <nl> + } else { <nl> + try { <nl> + owner = public_key_type ( owner_key_str ) ; <nl> + } EOS_RETHROW_EXCEPTIONS ( public_key_type_exception , " Invalid owner public key : $ { public_key } " , ( " public_key " , owner_key_str ) ) ; <nl> + } <nl> + <nl> + if ( active_key_str . empty ( ) ) { <nl> + active = owner ; <nl> + } else if ( active_key_str . find ( ' @ ' ) ! = string : : npos ) { <nl> + try { <nl> + active = to_permission_level ( active_key_str ) ; <nl> + } EOS_RETHROW_EXCEPTIONS ( explained_exception , " Invalid active permission level : $ { permission } " , ( " permission " , active_key_str ) ) <nl> + } else { <nl> + try { <nl> + active = public_key_type ( active_key_str ) ; <nl> + } EOS_RETHROW_EXCEPTIONS ( public_key_type_exception , " Invalid active public key : $ { public_key } " , ( " public_key " , active_key_str ) ) ; <nl> + } <nl> + <nl> + auto create = create_newaccount ( creator , account_name , owner , active ) ; <nl> if ( ! simple ) { <nl> EOSC_ASSERT ( buy_ram_eos . size ( ) | | buy_ram_bytes_in_kbytes | | buy_ram_bytes , " ERROR : One of - - buy - ram , - - buy - ram - kbytes or - - buy - ram - bytes should have non - zero value " ) ; <nl> EOSC_ASSERT ( ! buy_ram_bytes_in_kbytes | | ! buy_ram_bytes , " ERROR : - - buy - ram - kbytes and - - buy - ram - bytes cannot be set at the same time " ) ; <nl>
|
Merge pull request from conr2d / newaccount
|
EOSIO/eos
|
4a5378e19676880c40e865560066a207c100facd
|
2019-01-15T23:05:38Z
|
mmm a / configure . py <nl> ppp b / configure . py <nl> def check_bazel_version ( min_version ) : <nl> print ( ' Make sure you are running at least bazel % s ' % min_version ) <nl> return curr_version <nl> <nl> + print ( " You have bazel % s installed . " % curr_version ) <nl> + <nl> if curr_version_int < min_version_int : <nl> print ( ' Please upgrade your bazel installation to version % s or higher to ' <nl> ' build TensorFlow ! ' % min_version ) <nl> mmm a / tensorflow / core / kernels / BUILD <nl> ppp b / tensorflow / core / kernels / BUILD <nl> tf_kernel_library ( <nl> " strided_slice_op . h " , <nl> " strided_slice_op_impl . h " , <nl> " strided_slice_op_gpu . cu . cc " , <nl> - " slice_op_gpu . cu . cc " , <nl> ] , <nl> deps = [ <nl> " : bounds_check " , <nl> mmm a / tensorflow / python / BUILD <nl> ppp b / tensorflow / python / BUILD <nl> cuda_py_test ( <nl> " : math_ops " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> cuda_py_test ( <nl> cuda_py_test ( <nl> " : variables " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> cuda_py_test ( <nl> cuda_py_test ( <nl> " : special_math_ops " , <nl> " / / third_party / py / numpy " , <nl> ] , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> py_library ( <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> cuda_py_test ( <nl> " / / tensorflow / python : platform " , <nl> ] , <nl> shard_count = 5 , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> tf_py_test ( <nl> cuda_py_test ( <nl> " / / tensorflow / python : linalg_ops " , <nl> " / / tensorflow / python : math_ops " , <nl> ] , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> cuda_py_test ( <nl> cuda_py_test ( <nl> " / / tensorflow / python : math_ops " , <nl> ] , <nl> shard_count = 4 , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> cuda_py_test ( <nl> cuda_py_test ( <nl> " / / tensorflow / python : linalg_ops " , <nl> ] , <nl> shard_count = 20 , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> cuda_py_test ( <nl> tf_py_test ( <nl> " / / tensorflow / python : variables " , <nl> ] , <nl> shard_count = 3 , <nl> + tags = [ " no_windows_gpu " ] , <nl> ) <nl> <nl> tf_py_test ( <nl> mmm a / tensorflow / stream_executor / BUILD <nl> ppp b / tensorflow / stream_executor / BUILD <nl> cc_library ( <nl> exclude = [ " cuda / cuda_platform_id . cc " ] , <nl> ) , <nl> ) , <nl> + copts = select ( { <nl> + " / / tensorflow : windows " : [ " / DNOGDI " ] , <nl> + " / / conditions : default " : [ ] , <nl> + } ) , <nl> linkopts = select ( { <nl> " / / tensorflow : freebsd " : [ ] , <nl> " / / conditions : default " : [ " - ldl " ] , <nl> mmm a / tensorflow / tensorflow . bzl <nl> ppp b / tensorflow / tensorflow . bzl <nl> WIN_COPTS = [ <nl> " / Iexternal / gemmlowp " , <nl> " / wd4018 " , # - Wno - sign - compare <nl> " / U_HAS_EXCEPTIONS " , " / D_HAS_EXCEPTIONS = 1 " , " / EHsc " , # - fno - exceptions <nl> + " / DNOGDI " , <nl> ] <nl> <nl> # LINT . IfChange <nl> mmm a / tensorflow / tools / ci_build / windows / bazel / bazel_test_lib . sh <nl> ppp b / tensorflow / tools / ci_build / windows / bazel / bazel_test_lib . sh <nl> function run_configure_for_gpu_build { <nl> export TF_NEED_CUDA = 1 <nl> export TF_CUDA_VERSION = 8 . 0 <nl> export CUDA_TOOLKIT_PATH = " C : / Program Files / NVIDIA GPU Computing Toolkit / CUDA / v8 . 0 " <nl> - export TF_CUDNN_VERSION = 5 <nl> + export TF_CUDNN_VERSION = 6 . 0 <nl> export CUDNN_INSTALL_PATH = " C : / tools / cuda " <nl> - export TF_CUDA_COMPUTE_CAPABILITIES = " 3 . 5 , 5 . 2 " <nl> + export TF_CUDA_COMPUTE_CAPABILITIES = " 3 . 7 " <nl> if [ - z " $ TF_ENABLE_XLA " ] ; then <nl> export TF_ENABLE_XLA = 0 <nl> fi <nl> function run_configure_for_gpu_build { <nl> export TF_NEED_GCP = 0 <nl> export TF_NEED_HDFS = 0 <nl> export TF_NEED_OPENCL = 0 <nl> + <nl> + # TODO ( pcloudy ) : Remove this after TensorFlow uses its own CRSOOTOOL <nl> + # for GPU build on Windows <nl> + export USE_MSVC_WRAPPER = 1 <nl> + <nl> echo " " | . / configure <nl> } <nl> <nl> mmm a / tensorflow / tools / ci_build / windows / bazel / common_env . sh <nl> ppp b / tensorflow / tools / ci_build / windows / bazel / common_env . sh <nl> mkdir - p " $ TMPDIR " <nl> export BAZEL_SH = $ { BAZEL_SH : - " C : / tools / msys64 / usr / bin / bash " } <nl> <nl> # Set Python path for . / configure <nl> - export PYTHON_BIN_PATH = " C : / Program Files / Anaconda3 / python " <nl> + export PYTHON_BIN_PATH = " C : / Program Files / Anaconda3 / python . exe " <nl> export PYTHON_LIB_PATH = " C : / Program Files / Anaconda3 / lib / site - packages " <nl> <nl> # Set Python path for cc_configure . bzl <nl> - export BAZEL_PYTHON = " C : / Program Files / Anaconda3 / python " <nl> + export BAZEL_PYTHON = " C : / Program Files / Anaconda3 / python . exe " <nl> <nl> # Set Visual Studio path <nl> export BAZEL_VS = " C : / Program Files ( x86 ) / Microsoft Visual Studio 14 . 0 " <nl> mmm a / tensorflow / tools / ci_build / windows / gpu / pip / build_tf_windows . sh <nl> ppp b / tensorflow / tools / ci_build / windows / gpu / pip / build_tf_windows . sh <nl> run_configure_for_gpu_build <nl> <nl> clean_output_base <nl> <nl> - bazel build - c opt - - config = win - cuda $ BUILD_OPTS tensorflow / tools / pip_package : build_pip_package | | exit $ ? <nl> + bazel build - c opt $ BUILD_OPTS tensorflow / tools / pip_package : build_pip_package | | exit $ ? <nl> <nl> # Create a python test directory to avoid package name conflict <nl> PY_TEST_DIR = " py_test_dir " <nl> reinstall_tensorflow_pip $ { PIP_NAME } <nl> # Define no_tensorflow_py_deps = true so that every py_test has no deps anymore , <nl> # which will result testing system installed tensorflow <nl> # GPU tests are very flaky when running concurrently , so set local_test_jobs = 1 <nl> - bazel test - c opt - - config = win - cuda $ BUILD_OPTS - k - - test_output = errors \ <nl> + bazel test - c opt $ BUILD_OPTS - k - - test_output = errors \ <nl> - - define = no_tensorflow_py_deps = true - - test_lang_filters = py \ <nl> - - - test_tag_filters = - no_pip , - no_windows , - no_windows_gpu \ <nl> - - - build_tag_filters = - no_pip , - no_windows , - no_windows_gpu \ <nl> + - - test_tag_filters = - no_pip , - no_windows , - no_windows_gpu , - no_gpu , - no_pip_gpu \ <nl> + - - build_tag_filters = - no_pip , - no_windows , - no_windows_gpu , - no_gpu , - no_pip_gpu \ <nl> - - local_test_jobs = 1 - - build_tests_only / / $ { PY_TEST_DIR } / tensorflow / python / . . . <nl>
|
Make Windows Bazel GPU build work again ( )
|
tensorflow/tensorflow
|
d74f65bac566d78a781116789f7262d94f929830
|
2017-08-08T20:37:02Z
|
mmm a / tests / src / cf_utility / test . cpp <nl> ppp b / tests / src / cf_utility / test . cpp <nl> TEST_CASE ( " initialize " ) { <nl> krbn : : thread_utility : : register_main_thread ( ) ; <nl> } <nl> <nl> - TEST_CASE ( " run_loop_thread " ) { <nl> - for ( int i = 0 ; i < 5000 ; + + i ) { <nl> - __block int count1 = 0 ; <nl> - __block int count2 = 0 ; <nl> + namespace { <nl> + class run_loop_thread_test final { <nl> + public : <nl> + run_loop_thread_test ( void ) : count1_ ( 0 ) , <nl> + count2_ ( 0 ) { <nl> + for ( int i = 0 ; i < 5000 ; + + i ) { <nl> + count1_ = 0 ; <nl> + count2_ = 0 ; <nl> <nl> - auto thread1 = std : : make_shared < krbn : : cf_utility : : run_loop_thread > ( ) ; <nl> - auto thread2 = std : : make_shared < krbn : : cf_utility : : run_loop_thread > ( ) ; <nl> + thread1_ = std : : make_shared < krbn : : cf_utility : : run_loop_thread > ( ) ; <nl> + thread2_ = std : : make_shared < krbn : : cf_utility : : run_loop_thread > ( ) ; <nl> <nl> - / / thread1 <nl> + / / thread1 ( loop ) <nl> <nl> - for ( int j = 0 ; j < 5 ; + + j ) { <nl> - thread1 - > enqueue ( ^ { <nl> - + + count1 ; <nl> - / / krbn : : logger : : get_logger ( ) . info ( " thread1 { 0 } { 1 } " , j , count1 ) ; <nl> - } ) ; <nl> - } <nl> + for ( int j = 0 ; j < 5 ; + + j ) { <nl> + thread1_ - > enqueue ( ^ { <nl> + + + count1_ ; <nl> + / / krbn : : logger : : get_logger ( ) . info ( " thread1 { 0 } { 1 } " , j , count1 ) ; <nl> + } ) ; <nl> + } <nl> <nl> - / / thread2 <nl> + / / thread2 ( recursive ) <nl> <nl> - thread2 - > enqueue ( ^ { <nl> - + + count2 ; <nl> - / / krbn : : logger : : get_logger ( ) . info ( " thread2 { 0 } " , count2 ) ; <nl> - } ) ; <nl> + enqueue2 ( ) ; <nl> <nl> - thread1 = nullptr ; <nl> - thread2 = nullptr ; <nl> + / / Verify counts <nl> + <nl> + thread1_ = nullptr ; <nl> + thread2_ = nullptr ; <nl> + <nl> + REQUIRE ( count1_ = = 5 ) ; <nl> + REQUIRE ( count2_ = = 3 ) ; <nl> + } <nl> + } <nl> <nl> - REQUIRE ( count1 = = 5 ) ; <nl> - REQUIRE ( count2 = = 1 ) ; <nl> + private : <nl> + void enqueue2 ( void ) { <nl> + thread2_ - > enqueue ( ^ { <nl> + + + count2_ ; <nl> + if ( count2_ < 3 ) { <nl> + enqueue2 ( ) ; <nl> + } <nl> + } ) ; <nl> } <nl> + <nl> + private : <nl> + std : : shared_ptr < krbn : : cf_utility : : run_loop_thread > thread1_ ; <nl> + std : : shared_ptr < krbn : : cf_utility : : run_loop_thread > thread2_ ; <nl> + <nl> + int count1_ ; <nl> + int count2_ ; <nl> + } ; <nl> + } / / namespace <nl> + <nl> + TEST_CASE ( " run_loop_thread " ) { <nl> + run_loop_thread_test ( ) ; <nl> } <nl>
|
update tests
|
pqrs-org/Karabiner-Elements
|
7c90a15a627cee1fac6bc5477af9086a6743051d
|
2018-08-08T14:50:37Z
|
new file mode 100644 <nl> index 000000000000 . . 8b99e43dc45a <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers / 28260 - swift - constraints - constraintgraphnode - getmembertype . swift <nl> <nl> + / / RUN : not - - crash % target - swift - frontend % s - parse <nl> + <nl> + / / Distributed under the terms of the MIT license <nl> + / / Test case submitted to project by https : / / github . com / practicalswift ( practicalswift ) <nl> + / / Test case found by fuzzing <nl> + <nl> + protocol A { func < <nl> + typealias f : A <nl> + protocol A { <nl> + class a <nl> + typealias f : a <nl>
|
[ swiftc ] Add test case for crash triggered in swift : : constraints : : ConstraintGraphNode : : getMemberType ( swift : : Identifier , std : : function < swift : : TypeVariableType * ( ) > )
|
apple/swift
|
a183c5fc9d054560788074a8952c2b857df9cd83
|
2016-03-08T10:36:24Z
|
mmm a / . travis . yml <nl> ppp b / . travis . yml <nl> <nl> language : cpp <nl> env : <nl> matrix : <nl> + - GEN_JSB = YES <nl> - PLATFORM = linux DEBUG = 1 <nl> - PLATFORM = nacl DEBUG = 1 <nl> - PLATFORM = android <nl>
|
Adding GEN_JSB env for Travis - ci to auto generate jsbinding glue codes .
|
cocos2d/cocos2d-x
|
9257c4f4e816dd9fde4a9709a571962171c4e10e
|
2013-05-21T05:48:53Z
|
mmm a / dbms / src / Core / Defines . h <nl> ppp b / dbms / src / Core / Defines . h <nl> <nl> # elif defined ( __SANITIZE_THREAD__ ) <nl> # define THREAD_SANITIZER 1 <nl> # endif <nl> + <nl> + / / / Explicitly allow undefined behaviour for certain functions . Use it as a function attribute . <nl> + / / / It is useful in case when compiler cannot see ( and exploit ) it , but UBSan can . <nl> + / / / Example : multiplication of signed integers with possibility of overflow when both sides are from user input . <nl> + # if defined ( __clang__ ) <nl> + # define NO_UNDEFINED_SANITIZER __attribute__ ( ( __no_sanitize__ ( " undefined " ) ) ) <nl> + # else <nl> + / / / It does not work in GCC . GCC 7 cannot recognize this attribute and GCC 8 simply ignores it . <nl> + # define NO_UNDEFINED_SANITIZER <nl> + # endif <nl>
|
Added attribute
|
ClickHouse/ClickHouse
|
37a2d51e0ca7a53e50a733d763724a6012aa41fc
|
2018-12-26T23:17:04Z
|
mmm a / Telegram / SourceFiles / chat_helpers / spellchecker_common . cpp <nl> ppp b / Telegram / SourceFiles / chat_helpers / spellchecker_common . cpp <nl> void DownloadDictionaryInBackground ( <nl> const auto id = langs [ counter ] ; <nl> counter + + ; <nl> const auto destroyer = [ = ] { <nl> - / / This is a temporary workaround . <nl> - const auto copyId = id ; <nl> - const auto copyLangs = langs ; <nl> - const auto copySession = session ; <nl> - const auto copyCounter = counter ; <nl> BackgroundLoader = nullptr ; <nl> BackgroundLoaderChanged . fire ( 0 ) ; <nl> <nl> - if ( DictionaryExists ( copyId ) ) { <nl> - auto dicts = copySession - > settings ( ) . dictionariesEnabled ( ) ; <nl> - if ( ! ranges : : contains ( dicts , copyId ) ) { <nl> - dicts . push_back ( copyId ) ; <nl> - copySession - > settings ( ) . setDictionariesEnabled ( std : : move ( dicts ) ) ; <nl> - copySession - > saveSettingsDelayed ( ) ; <nl> + if ( DictionaryExists ( id ) ) { <nl> + auto dicts = session - > settings ( ) . dictionariesEnabled ( ) ; <nl> + if ( ! ranges : : contains ( dicts , id ) ) { <nl> + dicts . push_back ( id ) ; <nl> + session - > settings ( ) . setDictionariesEnabled ( std : : move ( dicts ) ) ; <nl> + session - > saveSettingsDelayed ( ) ; <nl> } <nl> } <nl> <nl> - if ( copyCounter > = copyLangs . size ( ) ) { <nl> + if ( counter > = langs . size ( ) ) { <nl> return ; <nl> } <nl> - DownloadDictionaryInBackground ( copySession , copyCounter , copyLangs ) ; <nl> + DownloadDictionaryInBackground ( session , counter , langs ) ; <nl> } ; <nl> if ( DictionaryExists ( id ) ) { <nl> destroyer ( ) ; <nl> DictLoader : : DictLoader ( <nl> } <nl> <nl> void DictLoader : : unpack ( const QString & path ) { <nl> - Expects ( _destroyCallback ) ; <nl> crl : : async ( [ = ] { <nl> const auto success = Spellchecker : : UnpackDictionary ( path , id ( ) ) ; <nl> if ( success ) { <nl> QFile ( path ) . remove ( ) ; <nl> + destroy ( ) ; <nl> + return ; <nl> } <nl> - crl : : on_main ( success ? _destroyCallback : [ = ] { fail ( ) ; } ) ; <nl> + crl : : on_main ( [ = ] { fail ( ) ; } ) ; <nl> } ) ; <nl> } <nl> <nl> void DictLoader : : destroy ( ) { <nl> Expects ( _destroyCallback ) ; <nl> <nl> - _destroyCallback ( ) ; <nl> + crl : : on_main ( _destroyCallback ) ; <nl> } <nl> <nl> void DictLoader : : fail ( ) { <nl> mmm a / Telegram / SourceFiles / chat_helpers / spellchecker_common . h <nl> ppp b / Telegram / SourceFiles / chat_helpers / spellchecker_common . h <nl> class DictLoader : public Storage : : CloudBlob : : BlobLoader { <nl> void unpack ( const QString & path ) override ; <nl> void fail ( ) override ; <nl> <nl> + / / Be sure to always call it in the main thread . <nl> Fn < void ( ) > _destroyCallback ; <nl> <nl> rpl : : lifetime _lifetime ; <nl>
|
Moved all destruction calls of DictLoader to main thread .
|
telegramdesktop/tdesktop
|
a0584ea7a19b377cd0a578bee2c2610aa8533cd3
|
2020-02-27T11:18:00Z
|
mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_BOOL ( future , FUTURE_BOOL , <nl> " not - too - far future " ) <nl> <nl> DEFINE_IMPLICATION ( future , preparser_scope_analysis ) <nl> + DEFINE_IMPLICATION ( future , lazy_deserialization ) <nl> <nl> / / Flags for experimental implementation features . <nl> DEFINE_BOOL ( allocation_site_pretenuring , true , <nl> DEFINE_INT ( runtime_stats , 0 , <nl> DEFINE_VALUE_IMPLICATION ( runtime_call_stats , runtime_stats , 1 ) <nl> <nl> / / snapshot - common . cc <nl> - DEFINE_BOOL ( lazy_deserialization , true , <nl> + DEFINE_BOOL ( lazy_deserialization , false , <nl> " Deserialize code lazily from the snapshot . " ) <nl> DEFINE_BOOL ( trace_lazy_deserialization , false , " Trace lazy deserialization . " ) <nl> DEFINE_BOOL ( profile_deserialization , false , <nl>
|
Revert " [ snapshot ] Ship lazy TFJ builtins "
|
v8/v8
|
47fabf362b813277cab9f1f42f51911ace502b48
|
2017-10-16T13:29:52Z
|
mmm a / dlib / image_transforms / draw . h <nl> ppp b / dlib / image_transforms / draw . h <nl> <nl> <nl> # include " draw_abstract . h " <nl> # include " . . / algs . h " <nl> + # include " . . / pixel . h " <nl> # include < cmath > <nl> <nl> namespace dlib <nl> namespace dlib <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> template < <nl> - typename image_type <nl> + typename image_type , <nl> + typename pixel_type <nl> > <nl> void draw_line ( <nl> long x1 , <nl> namespace dlib <nl> long x2 , <nl> long y2 , <nl> image_type & c , <nl> - typename image_type : : type val <nl> + const pixel_type & val <nl> ) <nl> { <nl> if ( x1 = = x2 ) <nl> namespace dlib <nl> if ( y < 0 | | y > = c . nr ( ) ) <nl> continue ; <nl> <nl> - c [ y ] [ x1 ] = val ; <nl> + assign_pixel ( c [ y ] [ x1 ] , val ) ; <nl> } <nl> } <nl> else if ( y1 = = y2 ) <nl> namespace dlib <nl> if ( x < 0 | | x > = c . nc ( ) ) <nl> continue ; <nl> <nl> - c [ y1 ] [ x ] = val ; <nl> + assign_pixel ( c [ y1 ] [ x ] , val ) ; <nl> } <nl> } <nl> else <nl> namespace dlib <nl> continue ; <nl> <nl> <nl> - c [ y ] [ x ] = val ; <nl> + assign_pixel ( c [ y ] [ x ] , val ) ; <nl> } <nl> } <nl> else <nl> namespace dlib <nl> if ( y < 0 | | y > = c . nr ( ) ) <nl> continue ; <nl> <nl> - c [ y ] [ x ] = val ; <nl> + assign_pixel ( c [ y ] [ x ] , val ) ; <nl> } <nl> } <nl> } <nl> <nl> } <nl> <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + template < <nl> + typename image_type , <nl> + typename pixel_type <nl> + > <nl> + void draw_line ( <nl> + image_type & c , <nl> + const point & p1 , <nl> + const point & p2 , <nl> + const pixel_type & val <nl> + ) <nl> + { <nl> + draw_line ( p1 . x ( ) , p1 . y ( ) , p2 . x ( ) , p2 . y ( ) , c , val ) ; <nl> + } <nl> + <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> template < <nl> mmm a / dlib / image_transforms / draw_abstract . h <nl> ppp b / dlib / image_transforms / draw_abstract . h <nl> namespace dlib <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> template < <nl> - typename image_type <nl> + typename image_type , <nl> + typename pixel_type <nl> + > <nl> + void draw_line ( <nl> + image_type & c , <nl> + const point & p1 , <nl> + const point & p2 , <nl> + const pixel_type & val <nl> + ) ; <nl> + / * ! <nl> + requires <nl> + - image_type = = is an implementation of array2d / array2d_kernel_abstract . h <nl> + - pixel_traits < pixel_type > is defined <nl> + ensures <nl> + - # img . nr ( ) = = img . nr ( ) & & # img . nc ( ) = = img . nc ( ) <nl> + ( i . e . the dimensions of the input image are not changed ) <nl> + - for all valid r and c that are on the line between point p1 and p2 : <nl> + - performs assign_pixel ( img [ r ] [ c ] , val ) <nl> + ( i . e . it draws the line from p1 to p2 onto the image ) <nl> + ! * / <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + template < <nl> + typename image_type , <nl> + typename pixel_type <nl> > <nl> void draw_line ( <nl> long x1 , <nl> namespace dlib <nl> long x2 , <nl> long y2 , <nl> image_type & img , <nl> - typename image_type : : type val <nl> + const pixel_type & val <nl> ) ; <nl> / * ! <nl> requires <nl> - image_type = = is an implementation of array2d / array2d_kernel_abstract . h <nl> + - pixel_traits < pixel_type > is defined <nl> ensures <nl> - - # img . nr ( ) = = img . nr ( ) & & # img . nc ( ) = = img . nc ( ) <nl> - ( i . e . the dimensions of the input image are not chanaged ) <nl> - - for all valid r and c that are on the line between point ( x1 , y1 ) <nl> - and point ( x2 , y2 ) : <nl> - - performs img [ r ] [ c ] = val <nl> - ( i . e . it draws the line from ( x1 , y1 ) to ( x2 , y2 ) onto the image ) <nl> + - performs draw_line ( img , point ( x1 , y1 ) , point ( x2 , y2 ) , val ) <nl> ! * / <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> namespace dlib <nl> - fills the area defined by rect in the given image with the given pixel value . <nl> ! * / <nl> <nl> - } <nl> - <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> } <nl>
|
Cleaned up the interface to draw_line ( )
|
davisking/dlib
|
13d7eaaf682be1a1aa1ec1b66e0e3a86f932bede
|
2011-08-24T23:38:59Z
|
mmm a / ios / sdk / WeexSDK / Sources / Display / WXComponent + Display . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Display / WXComponent + Display . m <nl> - ( void ) _handleBorders : ( NSDictionary * ) styles isUpdating : ( BOOL ) updating <nl> previousNeedsDrawBorder = [ self _needsDrawBorder ] ; <nl> } <nl> <nl> - # define WX_CHECK_BORDER_PROP ( prop , direction1 , direction2 , direction3 , direction4 , type , setLayerProp ) \ <nl> + # define WX_CHECK_BORDER_PROP ( prop , direction1 , direction2 , direction3 , direction4 , type ) \ <nl> do { \ <nl> BOOL needsDisplay = NO ; \ <nl> NSString * styleProp = WX_NSSTRING ( WX_CONCAT ( border , prop ) ) ; \ <nl> - ( void ) _handleBorders : ( NSDictionary * ) styles isUpdating : ( BOOL ) updating <nl> needsDisplay = YES ; \ <nl> } \ <nl> if ( needsDisplay & & updating ) { \ <nl> - if ( ! [ self _needsDrawBorder ] ) { \ <nl> - setLayerProp ; \ <nl> - } \ <nl> [ self setNeedsDisplay ] ; \ <nl> } \ <nl> } while ( 0 ) ; <nl> <nl> - WX_CHECK_BORDER_PROP ( Style , Top , Left , Bottom , Right , WXBorderStyle , <nl> - _layer . borderWidth = _borderTopWidth ; <nl> - _layer . borderColor = _borderTopColor . CGColor ; <nl> - _layer . cornerRadius = _borderTopLeftRadius ; <nl> - _layer . backgroundColor = _backgroundColor . CGColor ) <nl> - WX_CHECK_BORDER_PROP ( Color , Top , Left , Bottom , Right , UIColor , <nl> - _layer . borderColor = _borderTopColor . CGColor ) <nl> - WX_CHECK_BORDER_PROP ( Width , Top , Left , Bottom , Right , WXPixelType , <nl> - _layer . borderWidth = _borderTopWidth ) <nl> - WX_CHECK_BORDER_PROP ( Radius , TopLeft , TopRight , BottomLeft , BottomRight , WXPixelType , <nl> - _layer . cornerRadius = _borderTopLeftRadius ) <nl> + WX_CHECK_BORDER_PROP ( Style , Top , Left , Bottom , Right , WXBorderStyle ) <nl> + WX_CHECK_BORDER_PROP ( Color , Top , Left , Bottom , Right , UIColor ) <nl> + WX_CHECK_BORDER_PROP ( Width , Top , Left , Bottom , Right , WXPixelType ) <nl> + WX_CHECK_BORDER_PROP ( Radius , TopLeft , TopRight , BottomLeft , BottomRight , WXPixelType ) <nl> <nl> if ( updating ) { <nl> BOOL nowNeedsDrawBorder = [ self _needsDrawBorder ] ; <nl> - ( void ) _handleBorders : ( NSDictionary * ) styles isUpdating : ( BOOL ) updating <nl> _layer . borderWidth = 0 ; <nl> _layer . backgroundColor = NULL ; <nl> } <nl> + <nl> + if ( ! nowNeedsDrawBorder ) { <nl> + _layer . cornerRadius = _borderTopLeftRadius ; <nl> + _layer . borderWidth = _borderTopWidth ; <nl> + _layer . borderColor = _borderTopColor . CGColor ; <nl> + _layer . backgroundColor = _backgroundColor . CGColor ; <nl> + } <nl> } <nl> } <nl> <nl>
|
* [ ios ] fix border render bug while updating radius
|
apache/incubator-weex
|
c24dc984b5ceb1fb74d1d6b0a2741aaf195f0b44
|
2016-07-27T08:25:49Z
|
mmm a / tensorflow / core / common_runtime / BUILD <nl> ppp b / tensorflow / core / common_runtime / BUILD <nl> cc_library ( <nl> " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core / profiler / lib : annotated_traceme " , <nl> + " / / tensorflow / core / profiler / lib : connected_traceme " , <nl> " / / tensorflow / core / profiler / lib : scoped_annotation " , <nl> " / / tensorflow / core / profiler / lib : traceme " , <nl> + " / / tensorflow / core / profiler / lib : traceme_encode " , <nl> " @ com_google_absl / / absl / memory " , <nl> ] , <nl> alwayslink = 1 , <nl> mmm a / tensorflow / core / common_runtime / eager / BUILD <nl> ppp b / tensorflow / core / common_runtime / eager / BUILD <nl> KERNEL_AND_DEVICE_DEPS = [ <nl> " / / tensorflow / core : lib_internal " , <nl> " / / tensorflow / core : protos_all_cc " , <nl> " / / tensorflow / core / profiler / lib : annotated_traceme " , <nl> + " / / tensorflow / core / profiler / lib : connected_traceme " , <nl> " / / tensorflow / core / profiler / lib : traceme " , <nl> + " / / tensorflow / core / profiler / lib : traceme_encode " , <nl> " / / tensorflow / core / grappler / optimizers : meta_optimizer " , <nl> ] <nl> <nl> mmm a / tensorflow / core / common_runtime / eager / kernel_and_device . cc <nl> ppp b / tensorflow / core / common_runtime / eager / kernel_and_device . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / fingerprint . h " <nl> # include " tensorflow / core / platform / setround . h " <nl> # include " tensorflow / core / profiler / lib / annotated_traceme . h " <nl> + # include " tensorflow / core / profiler / lib / connected_traceme . h " <nl> # include " tensorflow / core / profiler / lib / traceme . h " <nl> + # include " tensorflow / core / profiler / lib / traceme_encode . h " <nl> # include " tensorflow / core / public / version . h " <nl> # include " tensorflow / core / util / tensor_slice_reader_cache . h " <nl> # if ! defined ( IS_MOBILE_PLATFORM ) <nl> void KernelAndDeviceFunc : : RunAsync ( <nl> <nl> outputs - > clear ( ) ; <nl> <nl> - profiler : : TraceMe * activity = new profiler : : TraceMe ( <nl> + profiler : : TraceMeProducer activity ( <nl> + / / To TraceMeConsumers in ExecutorState : : Process / Finish . <nl> [ & ] { <nl> - return absl : : StrCat ( " FunctionRun # name = " , name ( ) , " , id = " , opts - > step_id , <nl> - " # " ) ; <nl> + return profiler : : TraceMeEncode ( <nl> + " FunctionRun " , { { " id " , opts - > step_id } , { " $ r " , 1 } / * root_event * / } ) ; <nl> } , <nl> + profiler : : ContextType : : kTfExecutor , opts - > step_id , <nl> profiler : : TraceMeLevel : : kInfo ) ; <nl> pflr_ - > Run ( * opts , handle_ , inputs , outputs , <nl> - [ opts , rendezvous , local_cm , step_container , this , activity , <nl> + [ opts , rendezvous , local_cm , step_container , this , <nl> done = std : : move ( done ) ] ( const Status & s ) { <nl> - delete activity ; <nl> rendezvous - > Unref ( ) ; <nl> if ( step_container = = nullptr ) { <nl> this - > step_container_ . CleanUp ( ) ; <nl> mmm a / tensorflow / core / common_runtime / executor . cc <nl> ppp b / tensorflow / core / common_runtime / executor . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / tracing . h " <nl> # include " tensorflow / core / platform / types . h " <nl> # include " tensorflow / core / profiler / lib / annotated_traceme . h " <nl> + # include " tensorflow / core / profiler / lib / connected_traceme . h " <nl> # include " tensorflow / core / profiler / lib / scoped_annotation . h " <nl> # include " tensorflow / core / profiler / lib / traceme . h " <nl> + # include " tensorflow / core / profiler / lib / traceme_encode . h " <nl> # include " tensorflow / core / protobuf / error_codes . pb . h " <nl> # include " tensorflow / core / util / tensor_slice_reader_cache . h " <nl> <nl> void ExecutorState < PropagatorStateType > : : ProcessConstTensor ( <nl> template < class PropagatorStateType > <nl> void ExecutorState < PropagatorStateType > : : Process ( TaggedNode tagged_node , <nl> int64 scheduled_nsec ) { <nl> - profiler : : TraceMe activity ( <nl> + profiler : : TraceMeConsumer activity ( <nl> + / / From TraceMeProducer in KernelAndDeviceFunc : : RunAsync . <nl> [ & ] { <nl> / / NOTE : This tracing uses the iteration number from the first tagged <nl> / / node that executes during this call to ` Process ( ) ` . In principle , <nl> / / subsequent nodes could have different values of ` iter_num ` that <nl> / / will not be traced . <nl> - return absl : : StrCat ( " ExecutorState : : Process # id = " , step_id_ , <nl> - " , iter_num = " , tagged_node . get_iter_num ( ) , " # " ) ; <nl> + return profiler : : TraceMeEncode ( <nl> + " ExecutorState : : Process " , <nl> + { { " id " , step_id_ } , { " iter_num " , tagged_node . get_iter_num ( ) } } ) ; <nl> } , <nl> - 2 ) ; <nl> + step_id_ , profiler : : ContextType : : kTfExecutor , <nl> + profiler : : TraceMeLevel : : kInfo ) ; <nl> WithContext wc ( context_ ) ; <nl> TaggedNodeSeq ready ; <nl> TaggedNodeReadyQueue inline_ready ; <nl> void ExecutorState < PropagatorStateType > : : Finish ( ) { <nl> } <nl> delete this ; <nl> runner ( [ step_id , status , done_cb = std : : move ( done_cb ) ] ( ) { <nl> - profiler : : TraceMe traceme ( <nl> + profiler : : TraceMeConsumer activity ( <nl> + / / From TraceMeProducer in KernelAndDeviceFunc : : RunAsync . <nl> [ & ] { <nl> - return absl : : StrCat ( " ExecutorDoneCallback # id = " , step_id , " # " ) ; <nl> + return profiler : : TraceMeEncode ( " ExecutorDoneCallback " , <nl> + { { " id " , step_id } } ) ; <nl> } , <nl> - 2 ) ; <nl> + step_id , profiler : : ContextType : : kTfExecutor , <nl> + profiler : : TraceMeLevel : : kInfo ) ; <nl> done_cb ( status ) ; <nl> } ) ; <nl> return ; <nl> mmm a / tensorflow / core / profiler / lib / BUILD <nl> ppp b / tensorflow / core / profiler / lib / BUILD <nl> filegroup ( <nl> name = " mobile_srcs " , <nl> srcs = [ <nl> " annotated_traceme . h " , <nl> + " connected_traceme . h " , <nl> " profiler_session . cc " , <nl> " profiler_session . h " , <nl> " scoped_annotation . h " , <nl>
|
Fix FunctionRun ' s TraceMe to capture the scheduling of the function ( instead of async execution of it ) and use new TraceMe APIs .
|
tensorflow/tensorflow
|
ad027e2469cd23bba89b9a489d6cc7477ef48519
|
2020-06-09T23:49:17Z
|
mmm a / ports / hdf5 / CONTROL <nl> ppp b / ports / hdf5 / CONTROL <nl> <nl> Source : hdf5 <nl> - Version : 1 . 10 . 0 - patch1 <nl> + Version : 1 . 10 . 0 - patch1 - 1 <nl> Description : HDF5 is a data model , library , and file format for storing and managing data <nl> Build - Depends : zlib , szip , msmpi <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 15db78a2d00 <nl> mmm / dev / null <nl> ppp b / ports / hdf5 / link - libraries - private . patch <nl> <nl> pppmmm a / src / CMakeLists . txt <nl> ppp + b / src / CMakeLists . txt <nl> + if ( BUILD_SHARED_LIBS ) <nl> + set ( shared_gen_SRCS $ { HDF5_BINARY_DIR } / shared / H5Tinit . c $ { HDF5_BINARY_DIR } / shared / H5lib_settings . c ) <nl> + add_library ( $ { HDF5_LIBSH_TARGET } SHARED $ { common_SRCS } $ { shared_gen_SRCS } $ { H5_PUBLIC_HEADERS } $ { H5_PRIVATE_HEADERS } $ { H5_GENERATED_HEADERS } ) <nl> + TARGET_C_PROPERTIES ( $ { HDF5_LIBSH_TARGET } SHARED " " " " ) <nl> + - target_link_libraries ( $ { HDF5_LIBSH_TARGET } $ { LINK_SHARED_LIBS } ) <nl> + + target_link_libraries ( $ { HDF5_LIBSH_TARGET } PRIVATE $ { LINK_SHARED_LIBS } ) <nl> + if ( NOT WIN32 ) <nl> + - target_link_libraries ( $ { HDF5_LIBSH_TARGET } dl ) <nl> + + target_link_libraries ( $ { HDF5_LIBSH_TARGET } PRIVATE dl ) <nl> + endif ( NOT WIN32 ) <nl> + if ( H5_HAVE_PARALLEL AND MPI_C_FOUND ) <nl> + - target_link_libraries ( $ { HDF5_LIBSH_TARGET } $ { MPI_C_LIBRARIES } ) <nl> + + target_link_libraries ( $ { HDF5_LIBSH_TARGET } PRIVATE $ { MPI_C_LIBRARIES } ) <nl> + endif ( H5_HAVE_PARALLEL AND MPI_C_FOUND ) <nl> + set_global_variable ( HDF5_LIBRARIES_TO_EXPORT " $ { HDF5_LIBRARIES_TO_EXPORT } ; $ { HDF5_LIBSH_TARGET } " ) <nl> + H5_SET_LIB_OPTIONS ( $ { HDF5_LIBSH_TARGET } $ { HDF5_LIB_NAME } SHARED $ { HDF5_PACKAGE_SOVERSION } ) <nl> + if ( BUILD_SHARED_LIBS ) <nl> + APPEND PROPERTY COMPILE_DEFINITIONS <nl> + " H5_HAVE_THREADSAFE " <nl> + ) <nl> + - target_link_libraries ( $ { HDF5_LIBSH_TARGET } Threads : : Threads ) <nl> + + target_link_libraries ( $ { HDF5_LIBSH_TARGET } PRIVATE Threads : : Threads ) <nl> + endif ( HDF5_ENABLE_THREADSAFE ) <nl> + <nl> + if ( HDF5_ENABLE_DEBUG_APIS ) <nl> mmm a / ports / hdf5 / portfile . cmake <nl> ppp b / ports / hdf5 / portfile . cmake <nl> vcpkg_apply_patches ( <nl> PATCHES <nl> $ { CMAKE_CURRENT_LIST_DIR } / use - szip - config . patch <nl> $ { CMAKE_CURRENT_LIST_DIR } / disable - static - libs . patch <nl> + $ { CMAKE_CURRENT_LIST_DIR } / link - libraries - private . patch <nl> ) <nl> <nl> set ( DISABLE_STATIC_LIBS OFF ) <nl>
|
[ hdf5 ] Mark linked libraries as private
|
microsoft/vcpkg
|
2c9cf2a520ca704e40f51756497f3bd833d45359
|
2017-02-28T09:48:24Z
|
mmm a / src / core / arm / skyeye_common / vfp / vfpinstr . cpp <nl> ppp b / src / core / arm / skyeye_common / vfp / vfpinstr . cpp <nl> static ARM_INST_PTR INTERPRETER_TRANSLATE ( vpush ) ( unsigned int inst , int index ) <nl> <nl> addr = cpu - > Reg [ R13 ] - inst_cream - > imm32 ; <nl> <nl> - for ( int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> + for ( unsigned int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> { <nl> if ( inst_cream - > single ) <nl> { <nl> static ARM_INST_PTR INTERPRETER_TRANSLATE ( vstm ) ( unsigned int inst , int index ) <nl> <nl> addr = ( inst_cream - > add ? cpu - > Reg [ inst_cream - > n ] : cpu - > Reg [ inst_cream - > n ] - inst_cream - > imm32 ) ; <nl> <nl> - for ( int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> + for ( unsigned int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> { <nl> if ( inst_cream - > single ) <nl> { <nl> static ARM_INST_PTR INTERPRETER_TRANSLATE ( vpop ) ( unsigned int inst , int index ) <nl> <nl> addr = cpu - > Reg [ R13 ] ; <nl> <nl> - for ( int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> + for ( unsigned int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> { <nl> if ( inst_cream - > single ) <nl> { <nl> static ARM_INST_PTR INTERPRETER_TRANSLATE ( vldm ) ( unsigned int inst , int index ) <nl> <nl> addr = ( inst_cream - > add ? cpu - > Reg [ inst_cream - > n ] : cpu - > Reg [ inst_cream - > n ] - inst_cream - > imm32 ) ; <nl> <nl> - for ( int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> + for ( unsigned int i = 0 ; i < inst_cream - > regs ; i + + ) <nl> { <nl> if ( inst_cream - > single ) <nl> { <nl>
|
vfpinstr : Fix trivial signed / unsigned mismatch warnings
|
yuzu-emu/yuzu
|
4f910bb1a119d1791bfbcdd41242f827b5079691
|
2015-02-17T23:53:50Z
|
mmm a / tensorflow / core / kernels / data / prefetch_dataset_op_test . cc <nl> ppp b / tensorflow / core / kernels / data / prefetch_dataset_op_test . cc <nl> namespace { <nl> <nl> constexpr char kNodeName [ ] = " prefetch_dataset " ; <nl> <nl> - class PrefetchDatasetOpTest : public DatasetOpsTestBase { <nl> - protected : <nl> - / / Creates ` TensorSliceDataset ` variant tensor from the input vector of <nl> - / / tensors . <nl> - Status CreateTensorSliceDatasetTensor ( <nl> - std : : vector < Tensor > * const tensor_vector , Tensor * dataset_tensor ) { <nl> - DatasetBase * tensor_slice_dataset ; <nl> - TF_RETURN_IF_ERROR ( CreateTensorSliceDataset ( <nl> - " tensor_slice_node " , tensor_vector , & tensor_slice_dataset ) ) ; <nl> - TF_RETURN_IF_ERROR ( <nl> - StoreDatasetInVariantTensor ( tensor_slice_dataset , dataset_tensor ) ) ; <nl> - return Status : : OK ( ) ; <nl> + class PrefetchDatasetOpTest : public DatasetOpsTestBaseV2 { } ; <nl> + <nl> + class PrefetchDatasetParams : public DatasetParams { <nl> + public : <nl> + template < typename T > <nl> + PrefetchDatasetParams ( T input_dataset_params , int64 buffer_size , <nl> + DataTypeVector output_dtypes , <nl> + std : : vector < PartialTensorShape > output_shapes , <nl> + int slack_period , bool legacy_autotune , <nl> + string node_name ) <nl> + : DatasetParams ( std : : move ( output_dtypes ) , std : : move ( output_shapes ) , <nl> + std : : move ( node_name ) ) , <nl> + buffer_size_ ( buffer_size ) , <nl> + slack_period_ ( slack_period ) , <nl> + legacy_autotune_ ( legacy_autotune ) { <nl> + input_dataset_params_ . push_back ( absl : : make_unique < T > ( input_dataset_params ) ) ; <nl> + iterator_prefix_ = <nl> + name_utils : : IteratorPrefix ( input_dataset_params . dataset_type ( ) , <nl> + input_dataset_params . iterator_prefix ( ) ) ; <nl> + } <nl> + <nl> + std : : vector < Tensor > GetInputTensors ( ) const override { <nl> + return { CreateTensor < int64 > ( TensorShape ( { } ) , { buffer_size_ } ) } ; <nl> } <nl> <nl> - / / Create a new ` PrefetchDataset ` op kernel . <nl> - Status CreatePrefetchDatasetKernel ( <nl> - const DataTypeVector & output_types , <nl> - const std : : vector < PartialTensorShape > & output_shapes , <nl> - std : : unique_ptr < OpKernel > * op_kernel ) { <nl> - NodeDef node_def = test : : function : : NDef ( <nl> - kNodeName , name_utils : : OpName ( PrefetchDatasetOp : : kDatasetType ) , <nl> - { PrefetchDatasetOp : : kInputDataset , PrefetchDatasetOp : : kBufferSize } , <nl> - { { PrefetchDatasetOp : : kOutputTypes , output_types } , <nl> - { PrefetchDatasetOp : : kOutputShapes , output_shapes } , <nl> - { PrefetchDatasetOp : : kSlackPeriod , 0 } , <nl> - { PrefetchDatasetOp : : kLegacyAutotune , true } } ) ; <nl> - TF_RETURN_IF_ERROR ( CreateOpKernel ( node_def , op_kernel ) ) ; <nl> + Status GetInputNames ( std : : vector < string > * input_names ) const override { <nl> + input_names - > clear ( ) ; <nl> + input_names - > emplace_back ( PrefetchDatasetOp : : kInputDataset ) ; <nl> + input_names - > emplace_back ( PrefetchDatasetOp : : kBufferSize ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / Create a new ` PrefetchDataset ` op kernel context . <nl> - Status CreatePrefetchDatasetContext ( <nl> - OpKernel * op_kernel , gtl : : InlinedVector < TensorValue , 4 > * const inputs , <nl> - std : : unique_ptr < OpKernelContext > * context ) { <nl> - TF_RETURN_IF_ERROR ( CheckOpKernelInput ( * op_kernel , * inputs ) ) ; <nl> - TF_RETURN_IF_ERROR ( CreateOpKernelContext ( op_kernel , inputs , context ) ) ; <nl> + Status GetAttributes ( AttributeVector * attr_vector ) const override { <nl> + attr_vector - > clear ( ) ; <nl> + attr_vector - > emplace_back ( PrefetchDatasetOp : : kOutputTypes , output_dtypes_ ) ; <nl> + attr_vector - > emplace_back ( PrefetchDatasetOp : : kOutputShapes , output_shapes_ ) ; <nl> + attr_vector - > emplace_back ( PrefetchDatasetOp : : kSlackPeriod , slack_period_ ) ; <nl> + attr_vector - > emplace_back ( PrefetchDatasetOp : : kLegacyAutotune , <nl> + legacy_autotune_ ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> - } ; <nl> <nl> - struct TestCase { <nl> - std : : vector < Tensor > input_tensors ; <nl> - int64 buffer_size ; <nl> - std : : vector < Tensor > expected_outputs ; <nl> - DataTypeVector expected_output_dtypes ; <nl> - std : : vector < PartialTensorShape > expected_output_shapes ; <nl> - int64 expected_cardinality ; <nl> - std : : vector < int > breakpoints ; <nl> + string dataset_type ( ) const override { <nl> + return PrefetchDatasetOp : : kDatasetType ; <nl> + } <nl> + <nl> + private : <nl> + int64 buffer_size_ ; <nl> + int slack_period_ ; <nl> + bool legacy_autotune_ ; <nl> } ; <nl> <nl> - TestCase PositiveBufferSizeTestCase ( ) { <nl> - return { <nl> - / * input_tensors * / <nl> - { CreateTensor < int64 > ( TensorShape { 10 , 1 } , { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> - / * buffer_size * / 5 , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape { 1 } , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 1 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 2 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 3 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 4 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 5 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 6 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 7 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 8 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 9 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { 1 } ) } , <nl> - / * expected_cardinality * / 10 , <nl> - / * breakpoints * / { 0 , 4 , 11 } } ; <nl> + / / Test case 1 : positive buffer size . <nl> + PrefetchDatasetParams PrefetchDatasetParams1 ( ) { <nl> + auto tensor_slice_dataset_params = TensorSliceDatasetParams ( <nl> + / * components = * / { CreateTensor < int64 > ( TensorShape { 10 , 1 } , <nl> + { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> + / * node_name = * / " tensor_slice " ) ; <nl> + return PrefetchDatasetParams ( <nl> + / * input_dataset_params = * / tensor_slice_dataset_params , <nl> + / * buffer_size = * / 5 , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { 1 } ) } , <nl> + / * slack_period = * / 0 , <nl> + / * legacy_autotune = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - TestCase ZeroBufferSizeTestCase ( ) { <nl> - return { <nl> - / * input_tensors * / <nl> - { CreateTensor < int64 > ( TensorShape { 10 , 1 } , { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> - / * buffer_size * / 0 , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape { 1 } , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 1 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 2 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 3 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 4 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 5 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 6 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 7 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 8 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 9 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { 1 } ) } , <nl> - / * expected_cardinality * / 10 , <nl> - / * breakpoints * / { 0 , 4 , 11 } } ; <nl> + / / Test case 2 : zero buffer size . <nl> + PrefetchDatasetParams PrefetchDatasetParams2 ( ) { <nl> + auto tensor_slice_dataset_params = TensorSliceDatasetParams ( <nl> + / * components = * / { CreateTensor < int64 > ( TensorShape { 10 , 1 } , <nl> + { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> + / * node_name = * / " tensor_slice " ) ; <nl> + return PrefetchDatasetParams ( <nl> + / * input_dataset_params = * / tensor_slice_dataset_params , <nl> + / * buffer_size = * / 0 , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { 1 } ) } , <nl> + / * slack_period = * / 0 , <nl> + / * legacy_autotune = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - TestCase AutoTuneTestCase ( ) { <nl> - return { <nl> - / * input_tensors * / <nl> - { CreateTensor < int64 > ( TensorShape { 10 , 1 } , { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> - / * buffer_size * / - 1 , <nl> - / * expected_outputs * / <nl> - { CreateTensor < int64 > ( TensorShape { 1 } , { 0 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 1 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 2 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 3 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 4 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 5 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 6 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 7 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 8 } ) , <nl> - CreateTensor < int64 > ( TensorShape { 1 } , { 9 } ) } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { 1 } ) } , <nl> - / * expected_cardinality * / 10 , <nl> - / * breakpoints * / { 0 , 4 , 11 } } ; <nl> + / / Test case 3 : autotune buffer size . <nl> + PrefetchDatasetParams PrefetchDatasetParams3 ( ) { <nl> + auto tensor_slice_dataset_params = TensorSliceDatasetParams ( <nl> + / * components = * / { CreateTensor < int64 > ( TensorShape { 10 , 1 } , <nl> + { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> + / * node_name = * / " tensor_slice " ) ; <nl> + return PrefetchDatasetParams ( <nl> + / * input_dataset_params = * / tensor_slice_dataset_params , <nl> + / * buffer_size = * / - 1 , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { 1 } ) } , <nl> + / * slack_period = * / 0 , <nl> + / * legacy_autotune = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - TestCase InvalidBufferSizeTestCase ( ) { <nl> - return { <nl> - / * input_tensors * / <nl> - { CreateTensor < int64 > ( TensorShape { 10 , 1 } , { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> - / * buffer_size * / - 2 , <nl> - / * expected_outputs * / { } , <nl> - / * expected_output_dtypes * / { DT_INT64 } , <nl> - / * expected_output_shapes * / { PartialTensorShape ( { 1 } ) } , <nl> - / * expected_cardinality * / 0 , <nl> - / * breakpoints * / { 0 , 4 , 11 } } ; <nl> + / / Test case 4 : slack_period > 0 . <nl> + PrefetchDatasetParams PrefetchDatasetParams4 ( ) { <nl> + auto tensor_slice_dataset_params = TensorSliceDatasetParams ( <nl> + / * components = * / { CreateTensor < int64 > ( TensorShape { 10 , 1 } , <nl> + { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> + / * node_name = * / " tensor_slice " ) ; <nl> + return PrefetchDatasetParams ( <nl> + / * input_dataset_params = * / tensor_slice_dataset_params , <nl> + / * buffer_size = * / - 1 , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { 1 } ) } , <nl> + / * slack_period = * / 5 , <nl> + / * legacy_autotune = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - class ParameterizedPrefetchDatasetOpTest <nl> - : public PrefetchDatasetOpTest , <nl> - public : : testing : : WithParamInterface < TestCase > { } ; <nl> - <nl> - TEST_P ( ParameterizedPrefetchDatasetOpTest , GetNext ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = GetParam ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( prefetch_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( prefetch_dataset - > MakeIterator ( iterator_ctx . get ( ) , " Iterator " , <nl> - & iterator ) ) ; <nl> - <nl> - auto expected_outputs_it = test_case . expected_outputs . begin ( ) ; <nl> - bool end_of_sequence = false ; <nl> - std : : vector < Tensor > out_tensors ; <nl> - while ( ! end_of_sequence ) { <nl> - TF_EXPECT_OK ( <nl> - iterator - > GetNext ( iterator_ctx . get ( ) , & out_tensors , & end_of_sequence ) ) ; <nl> - if ( ! end_of_sequence ) { <nl> - for ( const auto & tensor : out_tensors ) { <nl> - EXPECT_NE ( expected_outputs_it , test_case . expected_outputs . end ( ) ) ; <nl> - TF_EXPECT_OK ( ExpectEqual ( tensor , * expected_outputs_it ) ) ; <nl> - expected_outputs_it + + ; <nl> - } <nl> - } <nl> - } <nl> - EXPECT_EQ ( expected_outputs_it , test_case . expected_outputs . end ( ) ) ; <nl> + / / Test case 5 : legacy_autotune = false . <nl> + PrefetchDatasetParams PrefetchDatasetParams5 ( ) { <nl> + auto tensor_slice_dataset_params = TensorSliceDatasetParams ( <nl> + / * components = * / { CreateTensor < int64 > ( TensorShape { 10 , 1 } , <nl> + { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> + / * node_name = * / " tensor_slice " ) ; <nl> + return PrefetchDatasetParams ( <nl> + / * input_dataset_params = * / tensor_slice_dataset_params , <nl> + / * buffer_size = * / - 1 , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { 1 } ) } , <nl> + / * slack_period = * / 5 , <nl> + / * legacy_autotune = * / false , <nl> + / * node_name = * / kNodeName ) ; <nl> } <nl> <nl> - TEST_F ( PrefetchDatasetOpTest , InvalidBufferSize ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = InvalidBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - EXPECT_EQ ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , & prefetch_dataset ) <nl> - . code ( ) , <nl> - tensorflow : : error : : INVALID_ARGUMENT ) ; <nl> + PrefetchDatasetParams InvalidBufferSizePrefetchDatasetParams ( ) { <nl> + auto tensor_slice_dataset_params = TensorSliceDatasetParams ( <nl> + / * components = * / { CreateTensor < int64 > ( TensorShape { 10 , 1 } , <nl> + { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ) } , <nl> + / * node_name = * / " tensor_slice " ) ; <nl> + return PrefetchDatasetParams ( <nl> + / * input_dataset_params = * / tensor_slice_dataset_params , <nl> + / * buffer_size = * / - 2 , <nl> + / * output_dtypes = * / { DT_INT64 } , <nl> + / * output_shapes = * / { PartialTensorShape ( { 1 } ) } , <nl> + / * slack_period = * / 0 , <nl> + / * legacy_autotune = * / true , <nl> + / * node_name = * / kNodeName ) ; <nl> + } <nl> + <nl> + std : : vector < GetNextTestCase < PrefetchDatasetParams > > GetNextTestCases ( ) { <nl> + return { <nl> + { / * dataset_params = * / PrefetchDatasetParams1 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / PrefetchDatasetParams2 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / <nl> + PrefetchDatasetParams3 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / <nl> + PrefetchDatasetParams4 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / <nl> + PrefetchDatasetParams5 ( ) , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , <nl> + { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } } ; <nl> } <nl> <nl> + ITERATOR_GET_NEXT_TEST_P ( PrefetchDatasetOpTest , PrefetchDatasetParams , <nl> + GetNextTestCases ( ) ) <nl> + <nl> TEST_F ( PrefetchDatasetOpTest , DatasetNodeName ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - EXPECT_EQ ( prefetch_dataset - > node_name ( ) , kNodeName ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetNodeName ( dataset_params . node_name ( ) ) ) ; <nl> } <nl> <nl> TEST_F ( PrefetchDatasetOpTest , DatasetTypeString ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - EXPECT_EQ ( prefetch_dataset - > type_string ( ) , <nl> - name_utils : : OpName ( PrefetchDatasetOp : : kDatasetType ) ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetTypeString ( <nl> + name_utils : : OpName ( PrefetchDatasetOp : : kDatasetType ) ) ) ; <nl> } <nl> <nl> TEST_F ( PrefetchDatasetOpTest , DatasetOutputDtypes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyTypesMatch ( prefetch_dataset - > output_dtypes ( ) , <nl> - test_case . expected_output_dtypes ) ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetOutputDtypes ( { DT_INT64 } ) ) ; <nl> } <nl> <nl> TEST_F ( PrefetchDatasetOpTest , DatasetOutputShapes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyShapesCompatible ( prefetch_dataset - > output_shapes ( ) , <nl> - test_case . expected_output_shapes ) ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckDatasetOutputShapes ( dataset_params . output_shapes ( ) ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedPrefetchDatasetOpTest , Cardinality ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - EXPECT_EQ ( prefetch_dataset - > Cardinality ( ) , test_case . expected_cardinality ) ; <nl> + std : : vector < CardinalityTestCase < PrefetchDatasetParams > > CardinalityTestCases ( ) { <nl> + return { { / * dataset_params = * / PrefetchDatasetParams1 ( ) , <nl> + / * expected_cardinality = * / 10 } , <nl> + { / * dataset_params = * / PrefetchDatasetParams2 ( ) , <nl> + / * expected_cardinality = * / 10 } , <nl> + { / * dataset_params = * / PrefetchDatasetParams3 ( ) , <nl> + / * expected_cardinality = * / 10 } , <nl> + { / * dataset_params = * / PrefetchDatasetParams4 ( ) , <nl> + / * expected_cardinality = * / 10 } , <nl> + { / * dataset_params = * / PrefetchDatasetParams5 ( ) , <nl> + / * expected_cardinality = * / 10 } } ; <nl> } <nl> <nl> + DATASET_CARDINALITY_TEST_P ( PrefetchDatasetOpTest , PrefetchDatasetParams , <nl> + CardinalityTestCases ( ) ) <nl> + <nl> TEST_F ( PrefetchDatasetOpTest , IteratorOutputDtypes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( prefetch_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( prefetch_dataset - > MakeIterator ( iterator_ctx . get ( ) , " Iterator " , <nl> - & iterator ) ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyTypesMatch ( iterator - > output_dtypes ( ) , <nl> - test_case . expected_output_dtypes ) ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckIteratorOutputDtypes ( { DT_INT64 } ) ) ; <nl> } <nl> <nl> TEST_F ( PrefetchDatasetOpTest , IteratorOutputShapes ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( prefetch_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( prefetch_dataset - > MakeIterator ( iterator_ctx . get ( ) , " Iterator " , <nl> - & iterator ) ) ; <nl> - <nl> - TF_EXPECT_OK ( VerifyShapesCompatible ( iterator - > output_shapes ( ) , <nl> - test_case . expected_output_shapes ) ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckIteratorOutputShapes ( dataset_params . output_shapes ( ) ) ) ; <nl> } <nl> <nl> TEST_F ( PrefetchDatasetOpTest , IteratorOutputPrefix ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( prefetch_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( prefetch_dataset - > MakeIterator ( iterator_ctx . get ( ) , " Iterator " , <nl> - & iterator ) ) ; <nl> - <nl> - EXPECT_EQ ( <nl> - iterator - > prefix ( ) , <nl> - name_utils : : IteratorPrefix ( PrefetchDatasetOp : : kDatasetType , " Iterator " ) ) ; <nl> + auto dataset_params = PrefetchDatasetParams1 ( ) ; <nl> + TF_ASSERT_OK ( Initialize ( dataset_params ) ) ; <nl> + TF_ASSERT_OK ( CheckIteratorPrefix ( name_utils : : IteratorPrefix ( <nl> + PrefetchDatasetOp : : kDatasetType , dataset_params . iterator_prefix ( ) ) ) ) ; <nl> } <nl> <nl> - TEST_P ( ParameterizedPrefetchDatasetOpTest , Roundtrip ) { <nl> - int thread_num = 2 , cpu_num = 2 ; <nl> - TF_ASSERT_OK ( InitThreadPool ( thread_num ) ) ; <nl> - TF_ASSERT_OK ( InitFunctionLibraryRuntime ( { } , cpu_num ) ) ; <nl> - <nl> - const TestCase & test_case = PositiveBufferSizeTestCase ( ) ; <nl> - Tensor tensor_slice_dataset_tensor ( DT_VARIANT , TensorShape ( { } ) ) ; <nl> - std : : vector < Tensor > inputs_for_tensor_slice_dataset = test_case . input_tensors ; <nl> - TF_ASSERT_OK ( CreateTensorSliceDatasetTensor ( & inputs_for_tensor_slice_dataset , <nl> - & tensor_slice_dataset_tensor ) ) ; <nl> - Tensor buffer_size = <nl> - CreateTensor < int64 > ( TensorShape { } , { test_case . buffer_size } ) ; <nl> - gtl : : InlinedVector < TensorValue , 4 > inputs_for_prefetch_dataset ( <nl> - { TensorValue ( & tensor_slice_dataset_tensor ) , TensorValue ( & buffer_size ) } ) ; <nl> - <nl> - std : : unique_ptr < OpKernel > prefetch_dataset_kernel ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetKernel ( test_case . expected_output_dtypes , <nl> - test_case . expected_output_shapes , <nl> - & prefetch_dataset_kernel ) ) ; <nl> - std : : unique_ptr < OpKernelContext > prefetch_dataset_context ; <nl> - TF_ASSERT_OK ( CreatePrefetchDatasetContext ( prefetch_dataset_kernel . get ( ) , <nl> - & inputs_for_prefetch_dataset , <nl> - & prefetch_dataset_context ) ) ; <nl> - DatasetBase * prefetch_dataset ; <nl> - TF_ASSERT_OK ( CreateDataset ( prefetch_dataset_kernel . get ( ) , <nl> - prefetch_dataset_context . get ( ) , <nl> - & prefetch_dataset ) ) ; <nl> - core : : ScopedUnref scoped_unref ( prefetch_dataset ) ; <nl> - <nl> - std : : unique_ptr < IteratorContext > iterator_ctx ; <nl> - TF_ASSERT_OK ( <nl> - CreateIteratorContext ( prefetch_dataset_context . get ( ) , & iterator_ctx ) ) ; <nl> - std : : unique_ptr < IteratorBase > iterator ; <nl> - TF_ASSERT_OK ( prefetch_dataset - > MakeIterator ( iterator_ctx . get ( ) , " Iterator " , <nl> - & iterator ) ) ; <nl> - <nl> - std : : unique_ptr < SerializationContext > serialization_ctx ; <nl> - TF_ASSERT_OK ( CreateSerializationContext ( & serialization_ctx ) ) ; <nl> - bool end_of_sequence = false ; <nl> - std : : vector < Tensor > out_tensors ; <nl> - int cur_iteration = 0 ; <nl> - auto expected_outputs_it = test_case . expected_outputs . begin ( ) ; <nl> - const std : : vector < int > & breakpoints = test_case . breakpoints ; <nl> - for ( int breakpoint : breakpoints ) { <nl> - VariantTensorData data ; <nl> - VariantTensorDataWriter writer ( & data ) ; <nl> - TF_EXPECT_OK ( iterator - > Save ( serialization_ctx . get ( ) , & writer ) ) ; <nl> - TF_EXPECT_OK ( writer . Flush ( ) ) ; <nl> - VariantTensorDataReader reader ( & data ) ; <nl> - TF_EXPECT_OK ( RestoreIterator ( iterator_ctx . get ( ) , & reader , " Iterator " , <nl> - * prefetch_dataset , & iterator ) ) ; <nl> - <nl> - while ( cur_iteration < = breakpoint ) { <nl> - TF_EXPECT_OK ( iterator - > GetNext ( iterator_ctx . get ( ) , & out_tensors , <nl> - & end_of_sequence ) ) ; <nl> - if ( ! end_of_sequence ) { <nl> - for ( auto & tensor : out_tensors ) { <nl> - EXPECT_NE ( expected_outputs_it , test_case . expected_outputs . end ( ) ) ; <nl> - TF_EXPECT_OK ( ExpectEqual ( tensor , * expected_outputs_it ) ) ; <nl> - expected_outputs_it + + ; <nl> - } <nl> - } <nl> - cur_iteration + + ; <nl> - } <nl> - <nl> - if ( breakpoint > = test_case . expected_outputs . size ( ) ) { <nl> - EXPECT_TRUE ( end_of_sequence ) ; <nl> - EXPECT_EQ ( expected_outputs_it , test_case . expected_outputs . end ( ) ) ; <nl> - } else { <nl> - EXPECT_FALSE ( end_of_sequence ) ; <nl> - } <nl> - } <nl> + std : : vector < IteratorSaveAndRestoreTestCase < PrefetchDatasetParams > > <nl> + IteratorSaveAndRestoreTestCases ( ) { <nl> + return { <nl> + { / * dataset_params = * / PrefetchDatasetParams1 ( ) , <nl> + / * breakpoints = * / { 0 , 4 , 11 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / PrefetchDatasetParams2 ( ) , <nl> + / * breakpoints = * / { 0 , 4 , 11 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / <nl> + PrefetchDatasetParams3 ( ) , <nl> + / * breakpoints = * / { 0 , 4 , 11 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / <nl> + PrefetchDatasetParams4 ( ) , <nl> + / * breakpoints = * / { 0 , 4 , 11 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } , <nl> + { / * dataset_params = * / <nl> + PrefetchDatasetParams5 ( ) , <nl> + / * breakpoints = * / { 0 , 4 , 11 } , <nl> + / * expected_outputs = * / <nl> + CreateTensors < int64 > ( <nl> + TensorShape { 1 } , <nl> + { { 0 } , { 1 } , { 2 } , { 3 } , { 4 } , { 5 } , { 6 } , { 7 } , { 8 } , { 9 } } ) } } ; <nl> } <nl> <nl> - INSTANTIATE_TEST_SUITE_P ( PreFetchDatasetOpTest , <nl> - ParameterizedPrefetchDatasetOpTest , <nl> - : : testing : : ValuesIn ( std : : vector < TestCase > ( <nl> - { PositiveBufferSizeTestCase ( ) , <nl> - ZeroBufferSizeTestCase ( ) , AutoTuneTestCase ( ) } ) ) ) ; <nl> + ITERATOR_SAVE_AND_RESTORE_TEST_P ( PrefetchDatasetOpTest , PrefetchDatasetParams , <nl> + IteratorSaveAndRestoreTestCases ( ) ) <nl> + <nl> + TEST_F ( PrefetchDatasetOpTest , InvalidBufferSize ) { <nl> + auto dataset_params = InvalidBufferSizePrefetchDatasetParams ( ) ; <nl> + EXPECT_EQ ( Initialize ( dataset_params ) . code ( ) , error : : INVALID_ARGUMENT ) ; <nl> + } <nl> <nl> } / / namespace <nl> } / / namespace data <nl>
|
Refactor PrefetchDatasetOpTest
|
tensorflow/tensorflow
|
402c69253836ff7b1fc6db19f5d8fb0af4e3cc3b
|
2019-10-15T17:08:21Z
|
mmm a / PowerEditor / src / Notepad_plus . cpp <nl> ppp b / PowerEditor / src / Notepad_plus . cpp <nl> void Notepad_plus : : notifyBufferChanged ( Buffer * buffer , int mask ) <nl> <nl> / / Then we ask user to update <nl> if ( doReloadOrNot ( buffer - > getFullPathName ( ) , buffer - > isDirty ( ) ) ! = IDYES ) <nl> + { <nl> + / / Since the file content has changed but the user doesn ' t want to reload it , set state to dirty <nl> + buffer - > setDirty ( true ) ; <nl> + <nl> break ; / / abort <nl> + } <nl> } <nl> / / Set _isLoadedDirty false so when the document clean state is reached the icon will be set to blue <nl> buffer - > setLoadedDirty ( false ) ; <nl>
|
Fix file not dirty while its content changed on the hard drive .
|
notepad-plus-plus/notepad-plus-plus
|
7aef4a6b6df10923c3ea44ee135f32d0f25cee26
|
2017-07-14T09:53:33Z
|
new file mode 100644 <nl> index 00000000000 . . cef5148a8d9 <nl> mmm / dev / null <nl> ppp b / js / common / test - data / apps / minimal - working - setup - teardown / controller . js <nl> <nl> + var FoxxApplication = require ( " org / arangodb / foxx " ) . Controller ; <nl> + var controller = new FoxxApplication ( applicationContext ) ; <nl> + <nl> + controller . get ( ' / test ' , function ( req , res ) { <nl> + " use strict " ; <nl> + res . json ( true ) ; <nl> + } ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 238c90850f0 <nl> mmm / dev / null <nl> ppp b / js / common / test - data / apps / minimal - working - setup - teardown / manifest . json <nl> <nl> + { <nl> + " name " : " minimal - working - manifest " , <nl> + " version " : " 0 . 0 . 0 " , <nl> + " setup " : " setup . js " , <nl> + " teardown " : " teardown . js " , <nl> + " controllers " : { <nl> + " / " : " controller . js " <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 6c9449f1b43 <nl> mmm / dev / null <nl> ppp b / js / common / test - data / apps / minimal - working - setup - teardown / setup . js <nl> <nl> + var db = require ( " internal " ) . db ; <nl> + var col = applicationContext . collectionName ( " setup_teardown " ) ; <nl> + if ( db . _collection ( col ) ) { <nl> + db . _create ( col ) ; <nl> + } <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . c5abe44a867 <nl> mmm / dev / null <nl> ppp b / js / common / test - data / apps / minimal - working - setup - teardown / teardown . js <nl> <nl> + var db = require ( " internal " ) . db ; <nl> + var col = applicationContext . collectionName ( " setup_teardown " ) ; <nl> + db . _drop ( col ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 3afa215d0ed <nl> mmm / dev / null <nl> ppp b / js / common / test - data / apps / minimal - working - setup / manifest . json <nl> <nl> + { <nl> + " name " : " minimal - working - manifest " , <nl> + " version " : " 0 . 0 . 0 " , <nl> + " setup " : " setup . js " <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 1fc7e992c7b <nl> mmm / dev / null <nl> ppp b / js / common / test - data / apps / minimal - working - setup / setup . js <nl> <nl> + var db = require ( " internal " ) . db ; <nl> + var col = applicationContext . collectionName ( " setup " ) ; <nl> + if ( db . _collection ( col ) ) { <nl> + db . _create ( col ) ; <nl> + } <nl>
|
Added minimal working test apps
|
arangodb/arangodb
|
f2ea8b006849a09e418fb742a61155aba7de9910
|
2015-03-06T13:23:58Z
|
mmm a / src / google / protobuf / generated_message_util . h <nl> ppp b / src / google / protobuf / generated_message_util . h <nl> class ExplicitlyConstructed { <nl> <nl> / / Default empty string object . Don ' t use this directly . Instead , call <nl> / / GetEmptyString ( ) to get the reference . <nl> - extern ExplicitlyConstructed < : : std : : string > fixed_address_empty_string ; <nl> + LIBPROTOBUF_EXPORT extern ExplicitlyConstructed < : : std : : string > fixed_address_empty_string ; <nl> LIBPROTOBUF_EXPORT extern ProtobufOnceType empty_string_once_init_ ; <nl> LIBPROTOBUF_EXPORT void InitEmptyString ( ) ; <nl> <nl>
|
Export symbols used in inline functions
|
protocolbuffers/protobuf
|
9094bf0f7e638f68cae72581ae8416e7c2721b41
|
2017-06-12T15:09:55Z
|
mmm a / test / type / types . swift <nl> ppp b / test / type / types . swift <nl> class C { <nl> let _ : inout @ convention ( c ) Int - > Int / / expected - error { { ' inout ' may only be used on parameters } } <nl> func foo3 ( inout a : Int - > Void ) { } / / expected - error { { ' inout ' before a parameter name is not allowed , place it before the parameter type instead } } { { 11 - 16 = } } { { 20 - 20 = inout } } <nl> / / expected - error @ - 1 { { single argument function types require parentheses } } { { 20 - 20 = ( } } { { 23 - 23 = ) } } <nl> + <nl> + func sr5505 ( arg : Int ) - > String { <nl> + return " hello " <nl> + } <nl> + var _ : sr5505 = sr5505 / / expected - error { { use of undeclared type ' sr5505 ' } } <nl>
|
[ QoI ] Add test - case for SR - 5505 ( attempting to use function name as a type )
|
apple/swift
|
c431d5bbc0fc2f087f9b789efaac901d3c72e5d8
|
2017-07-19T22:05:44Z
|
mmm a / g3doc / Passes . md <nl> ppp b / g3doc / Passes . md <nl> This document describes the available MLIR passes and their contracts . <nl> <nl> Convert ML functions to equivalent CFG functions . <nl> <nl> - Individual operations are preserved . Loops are converted to a subgraph of basic <nl> - blocks ( initialization , condition checking , subgraph of body blocks ) with loop <nl> - induction variable being passed as the basic block argument of the condition <nl> - checking block . <nl> + Individual operations are preserved . Loops are converted to a subgraph of blocks <nl> + ( initialization , condition checking , subgraph of body blocks ) with loop <nl> + induction variable being passed as the block argument of the condition checking <nl> + block . <nl> <nl> # # # Input IR <nl> <nl> addition to the operations present in the source ML functions . <nl> - Individual operations other than control flow from the source ML functions <nl> are replicated in the produced CFG functions ; their arguments may be updated <nl> to capture the corresponding SSA values after conversion ( e . g . , loop <nl> - iterators become basic block arguments ) . <nl> + iterators become block arguments ) . <nl> <nl> # # ` affine_apply ` lowering ( ` - lower - affine - apply ` ) { # lower - affine - apply } <nl> <nl> mmm a / include / mlir / Analysis / Dominance . h <nl> ppp b / include / mlir / Analysis / Dominance . h <nl> class DominanceInfo : public DominatorTreeBase { <nl> return ( Instruction * ) a - > getDefiningInst ( ) = = b | | properlyDominates ( a , b ) ; <nl> } <nl> <nl> - / / dominates / properlyDominates for basic blocks . <nl> + / / dominates / properlyDominates for blocks . <nl> using DominatorTreeBase : : dominates ; <nl> using DominatorTreeBase : : properlyDominates ; <nl> } ; <nl> mmm a / include / mlir / IR / Block . h <nl> ppp b / include / mlir / IR / Block . h <nl> class Block : public IRObjectWithUseList , <nl> / / / Unlink this Block from its Function and delete it . <nl> void eraseFromFunction ( ) ; <nl> <nl> - / / / Split the basic block into two basic blocks before the specified <nl> - / / / instruction or iterator . <nl> + / / / Split the block into two blocks before the specified instruction or <nl> + / / / iterator . <nl> / / / <nl> / / / Note that all instructions BEFORE the specified iterator stay as part of <nl> / / / the original basic block , an unconditional branch is added to the original <nl> class Block : public IRObjectWithUseList , <nl> void print ( raw_ostream & os ) const ; <nl> void dump ( ) const ; <nl> <nl> - / / / Print out the name of the basic block without printing its body . <nl> + / / / Print out the name of the block without printing its body . <nl> / / / NOTE : The printType argument is ignored . We keep it for compatibility <nl> / / / with LLVM dominator machinery that expects it to exist . <nl> void printAsOperand ( raw_ostream & os , bool printType = true ) ; <nl> mmm a / include / mlir / IR / BuiltinOps . h <nl> ppp b / include / mlir / IR / BuiltinOps . h <nl> class AffineApplyOp <nl> / / / The " br " operation represents a branch instruction in a CFG function . <nl> / / / The operation takes variable number of operands and produces no results . <nl> / / / The operand number and types for each successor must match the <nl> - / / / arguments of the basic block successor . For example : <nl> + / / / arguments of the block successor . For example : <nl> / / / <nl> / / / bb2 : <nl> / / / % 2 = call @ someFn ( ) <nl> class BranchOp : public Op < BranchOp , OpTrait : : VariadicOperands , <nl> / / / The " cond_br " operation represents a conditional branch instruction in a <nl> / / / CFG function . The operation takes variable number of operands and produces <nl> / / / no results . The operand number and types for each successor must match the <nl> - / / arguments of the basic block successor . For example : <nl> + / / arguments of the block successor . For example : <nl> / / / <nl> / / / bb0 : <nl> / / / % 0 = extract_element % arg0 [ ] : tensor < i1 > <nl> mmm a / include / mlir / IR / Instruction . h <nl> ppp b / include / mlir / IR / Instruction . h <nl> class Instruction : public IROperandOwner , <nl> / / / function . <nl> void moveBefore ( Instruction * existingInst ) ; <nl> <nl> - / / / Unlink this operation instruction from its current basic block and insert <nl> - / / / it right before ` iterator ` in the specified basic block . <nl> + / / / Unlink this operation instruction from its current block and insert it <nl> + / / / right before ` iterator ` in the specified block . <nl> void moveBefore ( Block * block , llvm : : iplist < Instruction > : : iterator iterator ) ; <nl> <nl> / / Returns whether the Instruction is a terminator . <nl> mmm a / include / mlir / StandardOps / StandardOps . h <nl> ppp b / include / mlir / StandardOps / StandardOps . h <nl> class CallOp <nl> <nl> / / / The " call_indirect " operation represents an indirect call to a value of <nl> / / / function type . Functions are first class types in MLIR , and may be passed <nl> - / / / as arguments and merged together with basic block arguments . The operands <nl> + / / / as arguments and merged together with block arguments . The operands <nl> / / / and result types of the call must match the specified function type . <nl> / / / <nl> / / / % 31 = call_indirect % 15 ( % 0 , % 1 ) <nl> mmm a / lib / Analysis / Verifier . cpp <nl> ppp b / lib / Analysis / Verifier . cpp <nl> bool CFGFuncVerifier : : verify ( ) { <nl> / / we have uses and defs . <nl> <nl> if ( fn . empty ( ) ) <nl> - return failure ( " cfgfunc must have at least one basic block " , fn ) ; <nl> + return failure ( " cfgfunc must have at least one block " , fn ) ; <nl> <nl> / / Verify the first block has no predecessors . <nl> auto * firstBB = & fn . front ( ) ; <nl> bool CFGFuncVerifier : : verifyInstOperands ( const Instruction & inst ) { <nl> <nl> bool CFGFuncVerifier : : verifyBlock ( const Block & block ) { <nl> if ( ! block . getTerminator ( ) ) <nl> - return failure ( " basic block with no terminator " , block ) ; <nl> + return failure ( " block with no terminator " , block ) ; <nl> <nl> for ( auto * arg : block . getArguments ( ) ) { <nl> if ( arg - > getOwner ( ) ! = & block ) <nl> - return failure ( " basic block argument not owned by block " , block ) ; <nl> + return failure ( " block argument not owned by block " , block ) ; <nl> } <nl> <nl> for ( auto & inst : block ) { <nl> mmm a / lib / IR / AsmPrinter . cpp <nl> ppp b / lib / IR / AsmPrinter . cpp <nl> void Block : : print ( raw_ostream & os ) const { <nl> <nl> void Block : : dump ( ) const { print ( llvm : : errs ( ) ) ; } <nl> <nl> - / / / Print out the name of the basic block without printing its body . <nl> + / / / Print out the name of the block without printing its body . <nl> void Block : : printAsOperand ( raw_ostream & os , bool printType ) { <nl> if ( ! getFunction ( ) ) { <nl> os < < " < < UNLINKED BLOCK > > \ n " ; <nl> mmm a / lib / IR / Builders . cpp <nl> ppp b / lib / IR / Builders . cpp <nl> AffineMap Builder : : getShiftedAffineMap ( AffineMap map , int64_t shift ) { <nl> / / Instructions . <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - / / / Add new basic block and set the insertion point to the end of it . If an <nl> - / / / ' insertBefore ' basic block is passed , the block will be placed before the <nl> + / / / Add new block and set the insertion point to the end of it . If an <nl> + / / / ' insertBefore ' block is passed , the block will be placed before the <nl> / / / specified block . If not , the block will be appended to the end of the <nl> / / / current function . <nl> Block * FuncBuilder : : createBlock ( Block * insertBefore ) { <nl> mmm a / lib / IR / Operation . cpp <nl> ppp b / lib / IR / Operation . cpp <nl> bool OpTrait : : impl : : verifyIsTerminator ( const OperationInst * op ) { <nl> const Block * block = op - > getBlock ( ) ; <nl> if ( ! block | | & block - > back ( ) ! = op ) <nl> return op - > emitOpError ( <nl> - " must be the last instruction in the parent basic block . " ) ; <nl> + " must be the last instruction in the parent block " ) ; <nl> } <nl> <nl> / / Verify the state of the successor blocks . <nl> mmm a / lib / Parser / Parser . cpp <nl> ppp b / lib / Parser / Parser . cpp <nl> class FunctionParser : public Parser { <nl> <nl> / / Block references . <nl> <nl> - / / / Get the basic block with the specified name , creating it if it doesn ' t <nl> + / / / Get the block with the specified name , creating it if it doesn ' t <nl> / / / already exist . The location specified is the point of use , which allows <nl> / / / us to diagnose references to blocks that are not defined precisely . <nl> Block * getBlockNamed ( StringRef name , SMLoc loc ) ; <nl> mmm a / lib / Target / LLVMIR / ConvertToLLVMIR . cpp <nl> ppp b / lib / Target / LLVMIR / ConvertToLLVMIR . cpp <nl> bool ModuleLowerer : : convertBlock ( const Block & bb , bool ignoreArguments ) { <nl> / / value remapping and PHI nodes , but do not add incoming edges for the PHI <nl> / / nodes just yet : those values may be defined by this or following blocks . <nl> / / This step is omitted if " ignoreArguments " is set . The arguments of the <nl> - / / first basic block have been already made available through the remapping of <nl> + / / first block have been already made available through the remapping of <nl> / / LLVM function arguments . <nl> if ( ! ignoreArguments ) { <nl> auto predecessors = bb . getPredecessors ( ) ; <nl> static const Value * getPHISourceValue ( const Block * current , const Block * pred , <nl> / / through the " true " or the " false " branch and take the relevant operands . <nl> auto condBranchOp = terminator . dyn_cast < CondBranchOp > ( ) ; <nl> assert ( condBranchOp & & <nl> - " only branch instructions can be terminators of a basic block that " <nl> + " only branch instructions can be terminators of a block that " <nl> " has successors " ) ; <nl> <nl> condBranchOp - > emitError ( " NYI : conditional branches with arguments " ) ; <nl> mmm a / lib / Transforms / ConvertToCFG . cpp <nl> ppp b / lib / Transforms / ConvertToCFG . cpp <nl> Value * FunctionConverter : : buildMinMaxReductionSeq ( <nl> return value ; <nl> } <nl> <nl> - / / Convert a " for " loop to a flow of basic blocks . <nl> + / / Convert a " for " loop to a flow of blocks . <nl> / / <nl> / / Create an SESE region for the loop ( including its body ) and append it to the <nl> / / end of the current region . The loop region consists of the initialization <nl> void FunctionConverter : : visitForInst ( ForInst * forInst ) { <nl> builder . create < BranchOp > ( builder . getUnknownLoc ( ) , loopInitBlock ) ; <nl> <nl> / / The loop condition block has an argument for loop induction variable . <nl> - / / Create it upfront and make the loop induction variable - > basic block <nl> + / / Create it upfront and make the loop induction variable - > block <nl> / / argument remapping available to the following instructions . ForInstruction <nl> / / is - a Value corresponding to the loop induction variable . <nl> builder . setInsertionPointToEnd ( loopConditionBlock ) ; <nl> mmm a / test / Target / llvmir . mlir <nl> ppp b / test / Target / llvmir . mlir <nl> <nl> <nl> <nl> / / <nl> - / / Basic functionality : function and basic block conversion , function calls , <nl> + / / Basic functionality : function and block conversion , function calls , <nl> / / phi nodes , scalar type conversion , arithmetic operations . <nl> / / <nl> <nl>
|
Tidy up references to " basic blocks " that should refer to blocks now . NFC .
|
tensorflow/tensorflow
|
ebf251e8e62397f5d80d5d68cd04fd63a8cf4817
|
2019-03-29T21:44:59Z
|
mmm a / src / core / ext / transport / chttp2 / transport / chttp2_transport . c <nl> ppp b / src / core / ext / transport / chttp2 / transport / chttp2_transport . c <nl> static void parsing_action ( grpc_exec_ctx * exec_ctx , void * arg , <nl> grpc_chttp2_transport * t = arg ; <nl> GPR_TIMER_BEGIN ( " reading_action . parse " , 0 ) ; <nl> size_t i = 0 ; <nl> - grpc_error * errors [ 2 ] = { error , GRPC_ERROR_NONE } ; <nl> + grpc_error * errors [ 2 ] = { GRPC_ERROR_REF ( error ) , GRPC_ERROR_NONE } ; <nl> for ( ; i < t - > read_buffer . count & & errors [ 1 ] = = GRPC_ERROR_NONE ; i + + ) { <nl> errors [ 1 ] = grpc_chttp2_perform_read ( exec_ctx , & t - > parsing , <nl> t - > read_buffer . slices [ i ] ) ; <nl>
|
Fix refcounting bug
|
grpc/grpc
|
06a5dc4d5d44156d1d115b905ac4d64ecbae858a
|
2016-05-10T19:37:27Z
|
mmm a / Makefile . am <nl> ppp b / Makefile . am <nl> dist - hook : <nl> # Need to remove . svn directories from directories <nl> # added using EXTRA_DIST . $ ( distdir ) / tessdata would in <nl> # theory suffice . <nl> - rm - rf $ ( find $ ( distdir ) - name . svn ) <nl> - rm - rf $ ( find $ ( distdir ) - name . git ) <nl> - rm - rf $ ( find $ ( distdir ) - name . deps ) <nl> - rm - rf $ ( find $ ( distdir ) - name . libs ) <nl> - rm - rf $ ( find $ ( distdir ) - name * . o ) <nl> - rm - rf $ ( find $ ( distdir ) - name * . lo ) <nl> - rm - rf $ ( find $ ( distdir ) - name * . la ) <nl> - rm - rf $ ( find $ ( distdir ) / training - executable - type f ) <nl> - rm - rf $ ( distdir ) / doc / html / * <nl> + rm - rf ` find $ ( distdir ) - name . deps - type d ` <nl> + - rm - f $ ( distdir ) / * / Makefile $ ( distdir ) / * / * / Makefile <nl> + rm - f ` find $ ( distdir ) - name ' * ~ ' ` <nl> + rm - rf $ ( find $ ( distdir ) / src / training - executable - type f ) <nl> + rm - rf $ ( distdir ) / doc / html / * $ ( distdir ) / doc / * . log <nl> <nl> ScrollView . jar : <nl> @ cd " $ ( top_builddir ) / java " & & $ ( MAKE ) $ @ <nl>
|
update dist - hook
|
tesseract-ocr/tesseract
|
90403ef3716c4a287b537b6fcceca9b6e805833b
|
2018-10-12T18:10:39Z
|
mmm a / README . md <nl> ppp b / README . md <nl> $ python <nl> > > > import tensorflow as tf <nl> > > > hello = tf . constant ( ' Hello , TensorFlow ! ' ) <nl> > > > sess = tf . Session ( ) <nl> - > > > print sess . run ( hello ) <nl> + > > > sess . run ( hello ) <nl> Hello , TensorFlow ! <nl> > > > a = tf . constant ( 10 ) <nl> > > > b = tf . constant ( 32 ) <nl> - > > > print sess . run ( a + b ) <nl> + > > > sess . run ( a + b ) <nl> 42 <nl> > > > <nl> <nl>
|
TensorFlow : Upstream changes to git .
|
tensorflow/tensorflow
|
3972c791b9f4d9a61b9ad6399b481df396f359ff
|
2015-11-25T20:13:27Z
|
mmm a / UnitTests / Makefile . unittests <nl> ppp b / UnitTests / Makefile . unittests <nl> unittests - brief : \ <nl> jslint \ <nl> unittests - config \ <nl> unittests - boost \ <nl> + unittests - shell - client - readonly \ <nl> unittests - shell - server \ <nl> unittests - shell - server - ahuacatl \ <nl> unittests - http - server \ <nl> unittests - shell - server - ahuacatl : <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # # @ brief SHELL CLIENT TESTS <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + UNITTESTS_READONLY = $ ( addprefix - - javascript . unit - tests , @ top_srcdir @ / js / client / tests / shell - changeMode . js ) <nl> + UNITTESTS_NO_READONLY = $ ( addprefix - - javascript . unit - tests , @ top_srcdir @ / js / client / tests / shell - noChangeMode . js ) <nl> + . PHONY : unittests - shell - client - readonly <nl> + unittests - shell - client - readonly : <nl> + $ ( MAKE ) start - server PID = $ ( PID ) SERVER_START = " - - server . endpoint unix : / / $ ( VOCDIR ) / arango . sock - - server . disable - authentication true " PROTO = unix <nl> + @ echo <nl> + @ echo " = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = " <nl> + @ echo " < < SHELL CLIENT READONLY > > " <nl> + @ echo " = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = " <nl> + @ echo <nl> + <nl> + $ ( VALGRIND ) @ builddir @ / bin / arangosh $ ( CLIENT_OPT ) - - server . username " $ ( USERNAME ) " - - server . password " $ ( PASSWORD ) " - - server . endpoint unix : / / $ ( VOCDIR ) / arango . sock $ ( UNITTESTS_READONLY ) | | test " x $ ( FORCE ) " = = " x1 " <nl> + sleep 2 <nl> + kill ` cat $ ( PIDFILE ) ` <nl> + <nl> + while test - f $ ( PIDFILE ) ; do sleep 1 ; done <nl> + @ if [ " $ ( VALGRIND ) " ! = " " ] ; then sleep 60 ; fi <nl> + <nl> + @ rm - rf " $ ( VOCDIR ) " <nl> + @ echo <nl> + <nl> + $ ( MAKE ) start - server PID = $ ( PID ) SERVER_START = " - - server . endpoint tcp : / / $ ( VOCHOST ) : $ ( VOCPORT ) - - server . disable - authentication true " PROTO = http <nl> + $ ( VALGRIND ) @ builddir @ / bin / arangosh $ ( CLIENT_OPT ) - - server . username " $ ( USERNAME ) " - - server . password " $ ( PASSWORD ) " - - server . endpoint tcp : / / $ ( VOCHOST ) : $ ( VOCPORT ) $ ( UNITTESTS_NO_READONLY ) | | test " x $ ( FORCE ) " = = " x1 " <nl> + sleep 2 <nl> + kill ` cat $ ( PIDFILE ) ` <nl> + <nl> + while test - f $ ( PIDFILE ) ; do sleep 1 ; done <nl> + @ if [ " $ ( VALGRIND ) " ! = " " ] ; then sleep 60 ; fi <nl> + <nl> + @ rm - rf " $ ( VOCDIR ) " <nl> + @ echo <nl> <nl> SHELL_CLIENT_ONLY = \ <nl> @ top_srcdir @ / js / client / tests / shell - endpoints . js \ <nl> SHELL_CLIENT_ONLY = \ <nl> SHELL_CLIENT = $ ( SHELL_COMMON ) $ ( SHELL_CLIENT_ONLY ) <nl> <nl> . PHONY : unittests - shell - client <nl> - <nl> UNITTESTS_CLIENT = $ ( addprefix - - javascript . unit - tests , $ ( SHELL_CLIENT ) ) <nl> <nl> unittests - shell - client : <nl> new file mode 100644 <nl> index 00000000000 . . 3ea8f5f01b7 <nl> mmm / dev / null <nl> ppp b / js / client / tests / shell - changeMode . js <nl> <nl> + / * jslint indent : 2 , maxlen : 120 , vars : true , white : true , plusplus : true , nonpropdel : true , nomen : true , sloppy : true * / <nl> + / * global require , assertEqual , assertNotEqual , <nl> + print , print_plain , COMPARE_STRING , NORMALIZE_STRING , <nl> + help , start_pager , stop_pager , start_pretty_print , stop_pretty_print , start_color_print , stop_color_print * / <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tests for client - specific functionality <nl> + / / / <nl> + / / / @ file <nl> + / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2010 - 2012 triagens GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Esteban Lombeyda <nl> + / / / @ author Copyright 2014 , triAGENS GmbH , Cologne , Germany <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + var jsunity = require ( " jsunity " ) ; <nl> + var db = require ( " org / arangodb " ) . db ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief test suite <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + function changeOperationModePositiveCaseTestSuite ( ) { <nl> + <nl> + return { <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief set up <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + setUp : function ( ) { <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tear down <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + tearDown : function ( ) { <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tests if the change of the operation mode of the arango server <nl> + / / / can be done . <nl> + / / / Note : this test needs an arango server with endpoint unix : . . . <nl> + / / / See target unittests - shell - client - readonly <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + testChangeMode : function ( ) { <nl> + var result = <nl> + db . _executeTransaction ( { collections : { } , <nl> + action : function ( ) { <nl> + var db = require ( ' internal ' ) . db ; <nl> + var result = db . _changeMode ( ' ReadOnly ' ) ; <nl> + return result ; <nl> + } <nl> + } ) ; <nl> + assertTrue ( result ) ; <nl> + } <nl> + <nl> + } ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief executes the test suite <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + jsunity . run ( changeOperationModePositiveCaseTestSuite ) ; <nl> + <nl> + return jsunity . done ( ) ; <nl> + <nl> + / / Local Variables : <nl> + / / mode : outline - minor <nl> + / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ page \ \ | / / / @ } \ \ ) " <nl> + / / End : <nl> new file mode 100644 <nl> index 00000000000 . . d8aaa96ec61 <nl> mmm / dev / null <nl> ppp b / js / client / tests / shell - noChangeMode . js <nl> <nl> + / * jslint indent : 2 , maxlen : 120 , vars : true , white : true , plusplus : true , nonpropdel : true , nomen : true , sloppy : true * / <nl> + / * global require , assertEqual , assertNotEqual , <nl> + print , print_plain , COMPARE_STRING , NORMALIZE_STRING , <nl> + help , start_pager , stop_pager , start_pretty_print , stop_pretty_print , start_color_print , stop_color_print * / <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tests for client - specific functionality <nl> + / / / <nl> + / / / @ file <nl> + / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2010 - 2012 triagens GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Esteban Lombeyda <nl> + / / / @ author Copyright 2014 , triAGENS GmbH , Cologne , Germany <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + var jsunity = require ( " jsunity " ) ; <nl> + var arangodb = require ( " org / arangodb " ) ; <nl> + var db = arangodb . db ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief test suite <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + function changeOperationModeNegativeCaseTestSuite ( ) { <nl> + <nl> + return { <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief set up <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + setUp : function ( ) { <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tear down <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + tearDown : function ( ) { <nl> + } , <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief tests if the change of the operation mode of the arango server <nl> + / / / can be done . <nl> + / / / Note : this test needs an arango server with endpoint unix : . . . <nl> + / / / See target unittests - shell - client - readonly <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + testTryChangeMode : function ( ) { <nl> + var modified = true ; <nl> + try { <nl> + db . _executeTransaction ( { collections : { } , <nl> + action : function ( ) { <nl> + var db = require ( ' internal ' ) . db ; <nl> + var result = db . _changeMode ( ' ReadOnly ' ) ; <nl> + return result ; <nl> + } <nl> + } ) ; } catch ( e ) { <nl> + assertEqual ( arangodb . errors . ERROR_FORBIDDEN . code , e . errorNum ) ; <nl> + modified = false ; <nl> + } <nl> + assertFalse ( modified ) ; <nl> + } <nl> + <nl> + } ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief executes the test suite <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + jsunity . run ( changeOperationModeNegativeCaseTestSuite ) ; <nl> + <nl> + return jsunity . done ( ) ; <nl> + <nl> + / / Local Variables : <nl> + / / mode : outline - minor <nl> + / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ page \ \ | / / / @ } \ \ ) " <nl> + / / End : <nl> mmm a / js / server / tests / shell - readonly . js <nl> ppp b / js / server / tests / shell - readonly . js <nl> <nl> / / / <nl> / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> / / / <nl> - / / / @ author Dr . Frank Celler <nl> + / / / @ author Esteban Lombeyda <nl> / / / @ author Copyright 2014 , triAGENS GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl>
|
read - only : unit tests for client and server side
|
arangodb/arangodb
|
2405a5ffa2839cdc07ead00caac7b7664f8b2aec
|
2014-06-16T13:24:12Z
|
mmm a / xbmc / video / VideoDatabase . cpp <nl> ppp b / xbmc / video / VideoDatabase . cpp <nl> bool CVideoDatabase : : GetStreamDetails ( CVideoInfoTag & tag ) const <nl> auto_ptr < Dataset > pDS ( m_pDB - > CreateDataset ( ) ) ; <nl> try <nl> { <nl> - CStdString strSQL = PrepareSQL ( " SELECT * FROM streamdetails WHERE idFile = % i " , tag . m_iFileId ) ; <nl> - pDS - > query ( strSQL ) ; <nl> + CStdString strSQL = PrepareSQL ( " SELECT * FROM streamdetails WHERE idFile = % i " , tag . m_iFileId ) ; <nl> + pDS - > query ( strSQL ) ; <nl> <nl> - while ( ! pDS - > eof ( ) ) <nl> - { <nl> - CStreamDetail : : StreamType e = ( CStreamDetail : : StreamType ) pDS - > fv ( 1 ) . get_asInt ( ) ; <nl> - switch ( e ) <nl> + while ( ! pDS - > eof ( ) ) <nl> { <nl> - case CStreamDetail : : VIDEO : <nl> - { <nl> - CStreamDetailVideo * p = new CStreamDetailVideo ( ) ; <nl> - p - > m_strCodec = pDS - > fv ( 2 ) . get_asString ( ) ; <nl> - p - > m_fAspect = pDS - > fv ( 3 ) . get_asFloat ( ) ; <nl> - p - > m_iWidth = pDS - > fv ( 4 ) . get_asInt ( ) ; <nl> - p - > m_iHeight = pDS - > fv ( 5 ) . get_asInt ( ) ; <nl> - p - > m_iDuration = pDS - > fv ( 10 ) . get_asInt ( ) ; <nl> - details . AddStream ( p ) ; <nl> - retVal = true ; <nl> - break ; <nl> - } <nl> - case CStreamDetail : : AUDIO : <nl> - { <nl> - CStreamDetailAudio * p = new CStreamDetailAudio ( ) ; <nl> - p - > m_strCodec = pDS - > fv ( 6 ) . get_asString ( ) ; <nl> - if ( pDS - > fv ( 7 ) . get_isNull ( ) ) <nl> - p - > m_iChannels = - 1 ; <nl> - else <nl> - p - > m_iChannels = pDS - > fv ( 7 ) . get_asInt ( ) ; <nl> - p - > m_strLanguage = pDS - > fv ( 8 ) . get_asString ( ) ; <nl> - details . AddStream ( p ) ; <nl> - retVal = true ; <nl> - break ; <nl> - } <nl> - case CStreamDetail : : SUBTITLE : <nl> + CStreamDetail : : StreamType e = ( CStreamDetail : : StreamType ) pDS - > fv ( 1 ) . get_asInt ( ) ; <nl> + switch ( e ) <nl> { <nl> - CStreamDetailSubtitle * p = new CStreamDetailSubtitle ( ) ; <nl> - p - > m_strLanguage = pDS - > fv ( 9 ) . get_asString ( ) ; <nl> - details . AddStream ( p ) ; <nl> - retVal = true ; <nl> - break ; <nl> + case CStreamDetail : : VIDEO : <nl> + { <nl> + CStreamDetailVideo * p = new CStreamDetailVideo ( ) ; <nl> + p - > m_strCodec = pDS - > fv ( 2 ) . get_asString ( ) ; <nl> + p - > m_fAspect = pDS - > fv ( 3 ) . get_asFloat ( ) ; <nl> + p - > m_iWidth = pDS - > fv ( 4 ) . get_asInt ( ) ; <nl> + p - > m_iHeight = pDS - > fv ( 5 ) . get_asInt ( ) ; <nl> + p - > m_iDuration = pDS - > fv ( 10 ) . get_asInt ( ) ; <nl> + details . AddStream ( p ) ; <nl> + retVal = true ; <nl> + break ; <nl> + } <nl> + case CStreamDetail : : AUDIO : <nl> + { <nl> + CStreamDetailAudio * p = new CStreamDetailAudio ( ) ; <nl> + p - > m_strCodec = pDS - > fv ( 6 ) . get_asString ( ) ; <nl> + if ( pDS - > fv ( 7 ) . get_isNull ( ) ) <nl> + p - > m_iChannels = - 1 ; <nl> + else <nl> + p - > m_iChannels = pDS - > fv ( 7 ) . get_asInt ( ) ; <nl> + p - > m_strLanguage = pDS - > fv ( 8 ) . get_asString ( ) ; <nl> + details . AddStream ( p ) ; <nl> + retVal = true ; <nl> + break ; <nl> + } <nl> + case CStreamDetail : : SUBTITLE : <nl> + { <nl> + CStreamDetailSubtitle * p = new CStreamDetailSubtitle ( ) ; <nl> + p - > m_strLanguage = pDS - > fv ( 9 ) . get_asString ( ) ; <nl> + details . AddStream ( p ) ; <nl> + retVal = true ; <nl> + break ; <nl> + } <nl> } <nl> - } <nl> <nl> - pDS - > next ( ) ; <nl> - } <nl> + pDS - > next ( ) ; <nl> + } <nl> <nl> - pDS - > close ( ) ; <nl> + pDS - > close ( ) ; <nl> } <nl> catch ( . . . ) <nl> { <nl>
|
[ cosmetic ] indenting after try / catch in GetStreamDetails
|
xbmc/xbmc
|
f8772de1060fd4a51f4a5f77b8051cb6b86cd7f0
|
2012-11-18T07:58:41Z
|
new file mode 100644 <nl> index 000000000000 . . 422f46b5579b <nl> mmm / dev / null <nl> ppp b / validation - test / compiler_crashers_2_fixed / sr9199 . swift <nl> <nl> + / / RUN : % target - swift - frontend - emit - ir - o % t . ll % s <nl> + <nl> + / / Just make sure we don ' t crash . <nl> + <nl> + protocol Publicable { <nl> + associatedtype PublicModel <nl> + <nl> + func publicized ( ) - > PublicModel <nl> + } <nl> + <nl> + <nl> + protocol WithReturnType { <nl> + associatedtype MainType <nl> + associatedtype ReturnType <nl> + <nl> + func returnTheThing ( ) <nl> + } <nl> + <nl> + extension WithReturnType where MainType : Publicable { <nl> + typealias ReturnType = MainType . PublicModel <nl> + <nl> + func returnTheThing ( ) { <nl> + print ( " publicable " ) <nl> + } <nl> + } <nl> + <nl> + extension WithReturnType { <nl> + func returnTheThing ( ) { <nl> + print ( " not publicable " ) <nl> + } <nl> + } <nl> + <nl> + extension String : Publicable { <nl> + struct PublicString { <nl> + let inner : String <nl> + <nl> + init ( str : String ) { <nl> + self . inner = " Public : \ ( str ) " <nl> + } <nl> + } <nl> + <nl> + func publicized ( ) - > PublicString { <nl> + return PublicString ( str : self ) <nl> + } <nl> + } <nl> + <nl> + struct Controller < T > { <nl> + <nl> + } <nl> + <nl> + extension Controller : WithReturnType { <nl> + typealias MainType = T <nl> + } <nl> + <nl> + let controller = Controller < String > ( ) <nl> + <nl> + controller . returnTheThing ( ) <nl> \ No newline at end of file <nl>
|
[ test ] Add a fixed crasher test for SR - 9199 ( )
|
apple/swift
|
bc1839745634e8cd35251333ed2793910e98975f
|
2018-11-07T17:16:47Z
|
mmm a / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergeTreeDataSelectExecutor . cpp <nl> MarkRanges MergeTreeDataSelectExecutor : : filterMarksUsingIndex ( <nl> continue ; <nl> } <nl> <nl> - if ( res . empty ( ) | | res . back ( ) . end - data_range . begin > = min_marks_for_seek ) <nl> + if ( res . empty ( ) | | res . back ( ) . end - data_range . begin > min_marks_for_seek ) <nl> res . push_back ( data_range ) ; <nl> else <nl> res . back ( ) . end = data_range . end ; <nl> mmm a / dbms / src / Storages / MergeTree / MergedBlockOutputStream . cpp <nl> ppp b / dbms / src / Storages / MergeTree / MergedBlockOutputStream . cpp <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Perm <nl> WrittenOffsetColumns offset_columns ; <nl> <nl> auto primary_key_column_names = storage . primary_key_columns ; <nl> - Names skip_indexes_column_names ; <nl> + std : : set < String > skip_indexes_column_names_set ; <nl> for ( const auto & index : storage . skip_indices ) <nl> - std : : copy ( index - > columns . cbegin ( ) , index - > columns . cend ( ) , std : : back_inserter ( skip_indexes_column_names ) ) ; <nl> + std : : copy ( index - > columns . cbegin ( ) , index - > columns . cend ( ) , <nl> + std : : inserter ( skip_indexes_column_names_set , skip_indexes_column_names_set . end ( ) ) ) ; <nl> + Names skip_indexes_column_names ( skip_indexes_column_names_set . begin ( ) , skip_indexes_column_names_set . end ( ) ) ; <nl> <nl> / / / Here we will add the columns related to the Primary Key , then write the index . <nl> std : : vector < ColumnWithTypeAndName > primary_key_columns ( primary_key_column_names . size ( ) ) ; <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Perm <nl> rows_count + = rows ; <nl> <nl> { <nl> + / / / Creating block for update <nl> + Block indices_update_block ( skip_indexes_columns ) ; <nl> / / / Filling and writing skip indices like in IMergedBlockOutputStream : : writeData <nl> for ( size_t i = 0 ; i < storage . skip_indices . size ( ) ; + + i ) <nl> { <nl> void MergedBlockOutputStream : : writeImpl ( const Block & block , const IColumn : : Perm <nl> } <nl> <nl> size_t pos = prev_pos ; <nl> - skip_indices_granules [ i ] - > update ( block , & pos , limit ) ; <nl> + skip_indices_granules [ i ] - > update ( indices_update_block , & pos , limit ) ; <nl> <nl> if ( pos = = prev_pos + limit ) <nl> { <nl>
|
fix
|
ClickHouse/ClickHouse
|
610534e5ff3a507020aa131a4224c964acfb3ea3
|
2019-02-14T16:59:26Z
|
mmm a / dbms / src / Interpreters / ExpressionActions . cpp <nl> ppp b / dbms / src / Interpreters / ExpressionActions . cpp <nl> void ExpressionActionsChain : : finalize ( ) <nl> std : : unordered_map < String , size_t > required_output_indexes ; <nl> for ( size_t j = 0 ; j < required_output . size ( ) ; + + j ) <nl> required_output_indexes [ required_output [ j ] ] = j ; <nl> - auto & can_remove_required_output = steps [ i ] . can_remove_required_output ; <nl> + auto & can_remove_required_output = steps [ i ] . not_need_in_future_steps ; <nl> <nl> if ( i + 1 < static_cast < int > ( steps . size ( ) ) ) <nl> { <nl> mmm a / dbms / src / Interpreters / ExpressionActions . h <nl> ppp b / dbms / src / Interpreters / ExpressionActions . h <nl> struct ExpressionActionsChain <nl> ExpressionActionsPtr actions ; <nl> NameSet additional_input ; <nl> Names required_output ; <nl> - std : : vector < std : : shared_ptr < bool > > can_remove_required_output ; / / / Has the same size with required_output , is filled in finalize ( ) <nl> + / / / Columns which are used only for current steps and not used in next actions ( and can be removed from block ) . <nl> + / / / Example : filter column for where actions . <nl> + / / / If not empty , has the same size with required_output ; is filled in finalize ( ) . <nl> + std : : vector < bool * > not_need_in_future_steps ; <nl> <nl> Step ( const ExpressionActionsPtr & actions_ = nullptr , const Names & required_output_ = Names ( ) ) <nl> : actions ( actions_ ) , required_output ( required_output_ ) { } <nl> mmm a / dbms / src / Interpreters / ExpressionAnalyzer . cpp <nl> ppp b / dbms / src / Interpreters / ExpressionAnalyzer . cpp <nl> bool ExpressionAnalyzer : : appendJoin ( ExpressionActionsChain & chain , bool only_ty <nl> return true ; <nl> } <nl> <nl> - bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool only_types , std : : shared_ptr < bool > & remove_filter ) <nl> + bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool only_types , bool & remove_filter ) <nl> { <nl> assertSelect ( ) ; <nl> <nl> bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool onl <nl> getRootActions ( select_query - > prewhere_expression , only_types , false , step . actions ) ; <nl> String prewhere_column_name = select_query - > prewhere_expression - > getColumnName ( ) ; <nl> step . required_output . push_back ( prewhere_column_name ) ; <nl> - step . can_remove_required_output = { remove_filter = std : : make_shared < bool > ( true ) } ; <nl> + step . not_need_in_future_steps = { & remove_filter } ; <nl> <nl> { <nl> / / / Remove unused source_columns from prewhere actions . <nl> bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool onl <nl> return true ; <nl> } <nl> <nl> - bool ExpressionAnalyzer : : appendWhere ( ExpressionActionsChain & chain , bool only_types , std : : shared_ptr < bool > & remove_filter ) <nl> + bool ExpressionAnalyzer : : appendWhere ( ExpressionActionsChain & chain , bool only_types , bool & remove_filter ) <nl> { <nl> assertSelect ( ) ; <nl> <nl> bool ExpressionAnalyzer : : appendWhere ( ExpressionActionsChain & chain , bool only_t <nl> ExpressionActionsChain : : Step & step = chain . steps . back ( ) ; <nl> <nl> step . required_output . push_back ( select_query - > where_expression - > getColumnName ( ) ) ; <nl> - step . can_remove_required_output = { remove_filter = std : : make_shared < bool > ( true ) } ; <nl> + step . not_need_in_future_steps = { & remove_filter } ; <nl> getRootActions ( select_query - > where_expression , only_types , false , step . actions ) ; <nl> <nl> return true ; <nl> mmm a / dbms / src / Interpreters / ExpressionAnalyzer . h <nl> ppp b / dbms / src / Interpreters / ExpressionAnalyzer . h <nl> class ExpressionAnalyzer : private boost : : noncopyable <nl> / / / Before aggregation : <nl> bool appendArrayJoin ( ExpressionActionsChain & chain , bool only_types ) ; <nl> bool appendJoin ( ExpressionActionsChain & chain , bool only_types ) ; <nl> - bool appendPrewhere ( ExpressionActionsChain & chain , bool only_types , std : : shared_ptr < bool > & remove_filter ) ; <nl> - bool appendWhere ( ExpressionActionsChain & chain , bool only_types , std : : shared_ptr < bool > & remove_filter ) ; <nl> + bool appendPrewhere ( ExpressionActionsChain & chain , bool only_types , bool & remove_filter ) ; <nl> + bool appendWhere ( ExpressionActionsChain & chain , bool only_types , bool & remove_filter ) ; <nl> bool appendGroupBy ( ExpressionActionsChain & chain , bool only_types ) ; <nl> void appendAggregateFunctionsArguments ( ExpressionActionsChain & chain , bool only_types ) ; <nl> <nl> mmm a / dbms / src / Interpreters / InterpreterSelectQuery . cpp <nl> ppp b / dbms / src / Interpreters / InterpreterSelectQuery . cpp <nl> InterpreterSelectQuery : : AnalysisResult InterpreterSelectQuery : : analyzeExpression <nl> * throw out unnecessary columns based on the entire query . In unnecessary parts of the query , we will not execute subqueries . <nl> * / <nl> <nl> - std : : shared_ptr < bool > remove_where_filter ; <nl> - std : : shared_ptr < bool > remove_prewhere_filter ; <nl> + bool remove_prewhere_filter ; <nl> <nl> { <nl> ExpressionActionsChain chain ; <nl> <nl> - if ( query_analyzer - > appendPrewhere ( chain , false , remove_prewhere_filter ) ) <nl> + if ( query_analyzer - > appendPrewhere ( chain , ! res . first_stage , remove_prewhere_filter ) ) <nl> { <nl> res . prewhere_info = std : : make_shared < PrewhereInfo > ( <nl> chain . steps . front ( ) . actions , query . prewhere_expression - > getColumnName ( ) ) ; <nl> InterpreterSelectQuery : : AnalysisResult InterpreterSelectQuery : : analyzeExpression <nl> chain . addStep ( ) ; <nl> } <nl> <nl> - if ( query_analyzer - > appendWhere ( chain , ! res . first_stage , remove_where_filter ) ) <nl> + if ( query_analyzer - > appendWhere ( chain , ! res . first_stage , res . remove_where_filter ) ) <nl> { <nl> res . has_where = true ; <nl> res . before_where = chain . getLastActions ( ) ; <nl> InterpreterSelectQuery : : AnalysisResult InterpreterSelectQuery : : analyzeExpression <nl> } <nl> <nl> if ( res . prewhere_info ) <nl> - res . prewhere_info - > remove_prewhere_column = * remove_prewhere_filter ; <nl> + res . prewhere_info - > remove_prewhere_column = remove_prewhere_filter ; <nl> <nl> / / / Before executing WHERE and HAVING , remove the extra columns from the block ( mostly the aggregation keys ) . <nl> if ( res . has_where ) <nl> - { <nl> res . before_where - > prependProjectInput ( ) ; <nl> - res . remove_where_filter = * remove_where_filter ; <nl> - } <nl> if ( res . has_having ) <nl> res . before_having - > prependProjectInput ( ) ; <nl> <nl>
|
style fixes
|
ClickHouse/ClickHouse
|
233ac6c5999df84d3064fbf7de6ea155cfbcec8f
|
2018-04-20T19:38:34Z
|
mmm a / tensorflow / core / ops / array_ops . cc <nl> ppp b / tensorflow / core / ops / array_ops . cc <nl> REGISTER_OP ( " SplitV " ) <nl> for ( const auto size : data ) { <nl> if ( size = = - 1 ) { <nl> if ( has_neg_one ) { <nl> - return errors : : InvalidArgument ( " size_splits can only have one - 1 " ) ; <nl> + return errors : : InvalidArgument ( <nl> + " size_splits can only have one - 1 " ) ; <nl> } <nl> has_neg_one = true ; <nl> } else { <nl> REGISTER_OP ( " SplitV " ) <nl> if ( data [ i ] = = - 1 & & c - > ValueKnown ( split_dim_size ) ) { <nl> size = split_dim_size - total_size ; <nl> } <nl> - TF_RETURN_IF_ERROR ( c - > ReplaceDim ( input , split_dim , <nl> - c - > MakeDim ( size ) , & output_shape ) ) ; <nl> + TF_RETURN_IF_ERROR ( <nl> + c - > ReplaceDim ( input , split_dim , c - > MakeDim ( size ) , & output_shape ) ) ; <nl> c - > set_output ( i , output_shape ) ; <nl> } <nl> if ( c - > ValueKnown ( split_dim_size ) ) { <nl>
|
Fix clang format errors
|
tensorflow/tensorflow
|
3910e6e0d09aa622f77888271cc33d8e20b138ae
|
2018-08-09T20:02:14Z
|
mmm a / hphp / hack / src / annotated_ast / aast_defs . ml <nl> ppp b / hphp / hack / src / annotated_ast / aast_defs . ml <nl> let pp_og_null_flavor fmt flavor = <nl> match flavor with <nl> | OG_nullthrows - > " OG_nullthrows " <nl> | OG_nullsafe - > " OG_nullsafe " <nl> - <nl> - let pp_kvc_kind fmt _ = Format . pp_print_string fmt " < kvc_kind > " <nl> - <nl> - let pp_vc_kind fmt _ = Format . pp_print_string fmt " < vc_kind > " <nl> mmm a / hphp / hack / test / decl / classes_const_keyword . php . exp <nl> ppp b / hphp / hack / test / decl / classes_const_keyword . php . exp <nl> Parsed decls : <nl> { Shallow_decl_defs . scc_abstract = false ; <nl> scc_expr = <nl> ( Some ( [ 25 : 34 - 50 ] , <nl> - ( ValCollection ( < vc_kind > , None , <nl> + ( ValCollection ( Keyset , None , <nl> [ ( [ 25 : 41 - 44 ] , ( String " a " ) ) ; ( [ 25 : 46 - 49 ] , ( String " b " ) ) ] <nl> ) ) ) ) ; <nl> scc_name = ( [ 25 : 24 - 31 ] , " CKEYSET " ) ; <nl> Parsed decls : <nl> ) ) ) <nl> } ; <nl> { Shallow_decl_defs . scc_abstract = false ; <nl> - scc_expr = <nl> - ( Some ( [ 26 : 47 - 55 ] , ( ValCollection ( < vc_kind > , None , [ ] ) ) ) ) ; <nl> + scc_expr = ( Some ( [ 26 : 47 - 55 ] , ( ValCollection ( Keyset , None , [ ] ) ) ) ) ; <nl> scc_name = ( [ 26 : 27 - 44 ] , " CCLASSNAME_KEYSET " ) ; <nl> scc_type = <nl> ( Rhint ( root | classes_const_keyword . php line 26 , characters 9 - 25 ) , <nl> Parsed decls : <nl> [ { Shallow_decl_defs . scc_abstract = false ; <nl> scc_expr = <nl> ( Some ( [ 5 : 35 - 53 ] , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( [ 5 : 40 - 41 ] , ( Int " 2 " ) ) , ( [ 5 : 45 - 52 ] , ( String " folly " ) ) ) ] <nl> ) ) ) ) ; <nl> scc_name = ( [ 5 : 27 - 32 ] , " CDICT " ) ; <nl> Parsed decls : <nl> { Shallow_decl_defs . scc_abstract = false ; <nl> scc_expr = <nl> ( Some ( [ 7 : 5 - 34 ] , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( [ 7 : 10 - 11 ] , ( Int " 2 " ) ) , <nl> ( [ 7 : 15 - 33 ] , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( [ 7 : 20 - 21 ] , ( Int " 4 " ) ) , <nl> ( [ 7 : 25 - 32 ] , ( String " folly " ) ) ) ] <nl> ) ) ) ) <nl> Parsed decls : <nl> { Shallow_decl_defs . scc_abstract = false ; <nl> scc_expr = <nl> ( Some ( [ 9 : 5 - 44 ] , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( [ 9 : 10 - 11 ] , ( Int " 2 " ) ) , <nl> ( [ 9 : 15 - 43 ] , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( [ 9 : 20 - 21 ] , ( Int " 4 " ) ) , <nl> ( [ 9 : 25 - 32 ] , <nl> ( Unop ( Uminus , ( [ 9 : 26 - 32 ] , ( Float " 4 . 5e10 " ) ) ) ) ) ) ; <nl> Parsed decls : <nl> [ { Shallow_decl_defs . scc_abstract = false ; <nl> scc_expr = <nl> ( Some ( [ 17 : 25 - 37 ] , <nl> - ( ValCollection ( < vc_kind > , None , <nl> + ( ValCollection ( Vec , None , <nl> [ ( [ 17 : 29 - 30 ] , ( Int " 1 " ) ) ; ( [ 17 : 32 - 33 ] , ( Int " 2 " ) ) ; <nl> ( [ 17 : 35 - 36 ] , ( Int " 3 " ) ) ] <nl> ) ) ) ) ; <nl> Parsed decls : <nl> { Shallow_decl_defs . scc_abstract = false ; <nl> scc_expr = <nl> ( Some ( [ 18 : 37 - 64 ] , <nl> - ( ValCollection ( < vc_kind > , None , <nl> + ( ValCollection ( Vec , None , <nl> [ ( [ 18 : 41 - 47 ] , <nl> - ( ValCollection ( < vc_kind > , None , <nl> - [ ( [ 18 : 45 - 46 ] , ( Int " 1 " ) ) ] ) ) ) ; <nl> + ( ValCollection ( Vec , None , [ ( [ 18 : 45 - 46 ] , ( Int " 1 " ) ) ] ) ) ) ; <nl> ( [ 18 : 49 - 55 ] , <nl> - ( ValCollection ( < vc_kind > , None , <nl> - [ ( [ 18 : 53 - 54 ] , ( Int " 2 " ) ) ] ) ) ) ; <nl> + ( ValCollection ( Vec , None , [ ( [ 18 : 53 - 54 ] , ( Int " 2 " ) ) ] ) ) ) ; <nl> ( [ 18 : 57 - 63 ] , <nl> - ( ValCollection ( < vc_kind > , None , <nl> - [ ( [ 18 : 61 - 62 ] , ( Int " 3 " ) ) ] ) ) ) <nl> + ( ValCollection ( Vec , None , [ ( [ 18 : 61 - 62 ] , ( Int " 3 " ) ) ] ) ) ) <nl> ] <nl> ) ) ) ) ; <nl> scc_name = ( [ 18 : 23 - 34 ] , " CNESTED_VEC " ) ; <nl> mmm a / hphp / hack / test / tast / add_vector . php . exp <nl> ppp b / hphp / hack / test / tast / add_vector . php . exp <nl> <nl> ( ( [ 16 : 12 - 13 ] , Vector < T > ) , ( Id ( [ 16 : 12 - 13 ] , " x " ) ) ) , <nl> OG_nullthrows ) ) ) , <nl> ( ( [ 16 : 16 - 25 ] , Vector < T > ) , <nl> - ( ValCollection ( < vc_kind > , None , [ ] ) ) ) <nl> + ( ValCollection ( Vector , None , [ ] ) ) ) <nl> ) ) ) ) ) <nl> ] ; <nl> fb_annotation = ( ) } ; <nl> mmm a / hphp / hack / test / tast / class_const . php . exp <nl> ppp b / hphp / hack / test / tast / class_const . php . exp <nl> <nl> [ { cc_type = None ; cc_id = ( [ 5 : 9 - 12 ] , " FOO " ) ; <nl> cc_expr = <nl> ( Some ( ( [ 5 : 15 - 8 : 4 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 6 : 5 - 8 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 6 : 12 - 13 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 7 : 5 - 8 ] , string ) , ( String " b " ) ) , <nl> mmm a / hphp / hack / test / tast / construct_unknown_class . php . exp <nl> ppp b / hphp / hack / test / tast / construct_unknown_class . php . exp <nl> Errors : <nl> [ ( ( [ 4 : 22 - 23 ] , int ) , ( Int " 3 " ) ) ; <nl> ( ( [ 4 : 25 - 28 ] , string ) , ( String " s " ) ) ; <nl> ( ( [ 4 : 30 - 36 ] , vec < int > ) , <nl> - ( ValCollection ( < vc_kind > , None , <nl> + ( ValCollection ( Vec , None , <nl> [ ( ( [ 4 : 34 - 35 ] , int ) , ( Int " 3 " ) ) ] ) ) ) <nl> ] , <nl> ( Some ( ( [ 4 : 41 - 43 ] , vec < string > ) , <nl> mmm a / hphp / hack / test / tast / dict_attribute . php . exp <nl> ppp b / hphp / hack / test / tast / dict_attribute . php . exp <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 20 : 15 - 16 ] , " \ \ P " ) ; <nl> ua_params = <nl> [ ( ( [ 20 : 17 - 41 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 20 : 22 - 25 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 20 : 29 - 30 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 20 : 32 - 35 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 19 : 3 - 4 ] , " \ \ F " ) ; <nl> ua_params = <nl> [ ( ( [ 19 : 5 - 29 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 19 : 10 - 13 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 19 : 17 - 18 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 19 : 20 - 23 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 26 : 5 - 10 ] , " \ \ SProp " ) ; <nl> ua_params = <nl> [ ( ( [ 26 : 11 - 35 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 26 : 16 - 19 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 26 : 23 - 24 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 26 : 26 - 29 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 24 : 5 - 10 ] , " \ \ IProp " ) ; <nl> ua_params = <nl> [ ( ( [ 24 : 11 - 35 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 24 : 16 - 19 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 24 : 23 - 24 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 24 : 26 - 29 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 28 : 5 - 8 ] , " \ \ Met " ) ; <nl> ua_params = <nl> [ ( ( [ 28 : 9 - 33 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 28 : 14 - 17 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 28 : 21 - 22 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 28 : 24 - 27 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 22 : 3 - 4 ] , " \ \ C " ) ; <nl> ua_params = <nl> [ ( ( [ 22 : 5 - 29 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 22 : 10 - 13 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 22 : 17 - 18 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 22 : 20 - 23 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 32 : 3 - 4 ] , " \ \ E " ) ; <nl> ua_params = <nl> [ ( ( [ 32 : 5 - 29 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 32 : 10 - 13 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 32 : 17 - 18 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 32 : 20 - 23 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 3 : 9 - 11 ] , " \ \ Fi " ) ; <nl> ua_params = <nl> [ ( ( [ 3 : 12 - 36 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 3 : 17 - 20 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 3 : 24 - 25 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 3 : 27 - 30 ] , string ) , ( String " b " ) ) , <nl> <nl> [ { ua_name = ( [ 35 : 3 - 9 ] , " \ \ TAlias " ) ; <nl> ua_params = <nl> [ ( ( [ 35 : 10 - 34 ] , dict < string , int > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 35 : 15 - 18 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 35 : 22 - 23 ] , int ) , ( Int " 1 " ) ) ) ; <nl> ( ( ( [ 35 : 25 - 28 ] , string ) , ( String " b " ) ) , <nl> mmm a / hphp / hack / test / tast / initializer . php . exp <nl> ppp b / hphp / hack / test / tast / initializer . php . exp <nl> <nl> cv_id = ( [ 12 : 18 - 20 ] , " s " ) ; <nl> cv_expr = <nl> ( Some ( ( [ 12 : 23 - 34 ] , Set < string > ) , <nl> - ( ValCollection ( < vc_kind > , None , <nl> + ( ValCollection ( Set , None , <nl> [ ( ( [ 12 : 28 - 33 ] , string ) , ( String " foo " ) ) ] ) ) ) ) ; <nl> cv_user_attributes = [ ] ; cv_doc_comment = None ; <nl> cv_is_promoted_variadic = false ; cv_is_static = true ; <nl> mmm a / hphp / hack / test / tast / mixed_mixed . php . exp <nl> ppp b / hphp / hack / test / tast / mixed_mixed . php . exp <nl> <nl> ( Id ( [ 7 : 3 - 15 ] , " \ \ expect_mixed " ) ) ) , <nl> [ ] , <nl> [ ( ( [ 8 : 5 - 11 : 6 ] , dict < arraykey , mixed > ) , <nl> - ( KeyValCollection ( < kvc_kind > , None , <nl> + ( KeyValCollection ( Dict , None , <nl> [ ( ( ( [ 9 : 7 - 10 ] , string ) , ( String " a " ) ) , <nl> ( ( [ 9 : 14 - 16 ] , int ) , ( Lvar ( [ 9 : 14 - 16 ] , $ a ) ) ) ) ; <nl> ( ( ( [ 10 : 7 - 10 ] , string ) , ( String " b " ) ) , <nl> mmm a / hphp / hack / test / tast / unresolved_grown_after_lambda . php . exp <nl> ppp b / hphp / hack / test / tast / unresolved_grown_after_lambda . php . exp <nl> Errors : <nl> ( Binop ( ( Eq None ) , <nl> ( ( [ 4 : 3 - 9 ] , Vector < int > ) , ( Lvar ( [ 4 : 3 - 9 ] , $ items ) ) ) , <nl> ( ( [ 4 : 12 - 25 ] , Vector < int > ) , <nl> - ( ValCollection ( < vc_kind > , None , <nl> + ( ValCollection ( Vector , None , <nl> [ ( ( [ 4 : 21 - 23 ] , int ) , ( Lvar ( [ 4 : 21 - 23 ] , $ i ) ) ) ] ) ) ) <nl> ) ) ) ) ) ; <nl> ( [ 5 : 3 - 12 : 5 ] , <nl>
|
Stop overriding automatic pp for collection kinds
|
facebook/hhvm
|
23fa6d3c1dbce70a8534d7bda2e15dd82e498a49
|
2020-07-01T01:11:54Z
|
mmm a / tensorflow / python / eager / pywrap_tfe_src . cc <nl> ppp b / tensorflow / python / eager / pywrap_tfe_src . cc <nl> using tensorflow : : string ; <nl> using tensorflow : : strings : : Printf ; <nl> <nl> namespace { <nl> - <nl> + / / NOTE : Items are retrieved from and returned to these unique_ptrs , and they <nl> + / / act as arenas . This is important if the same thread requests 2 items without <nl> + / / releasing one . <nl> + / / The following sequence of events on the same thread will still succeed : <nl> + / / - GetOp < - Returns existing . <nl> + / / - GetOp < - Allocates and returns a new pointer . <nl> + / / - ReleaseOp < - Sets the item in the unique_ptr . <nl> + / / - ReleaseOp < - Sets the item in the unique_ptr , deleting the old one . <nl> + / / This occurs when a PyFunc kernel is run . This behavior makes it safe in that <nl> + / / case , as well as the case where python decides to reuse the underlying <nl> + / / C + + thread in 2 python threads case . <nl> thread_local std : : unique_ptr < TFE_Op > thread_local_eager_operation = / / NOLINT <nl> nullptr ; <nl> thread_local std : : unique_ptr < TF_Status > thread_local_tf_status = / / NOLINT <nl>
|
Some documentation of the thread_local unique_ptrs .
|
tensorflow/tensorflow
|
49e8e1fe0c2eb096045e4806acf9ad4ba98317a0
|
2019-10-03T23:29:30Z
|
mmm a / tools / depends / xbmc - addons . include <nl> ppp b / tools / depends / xbmc - addons . include <nl> export PKG_CONFIG_LIBDIR = $ ( ADDON_DEPS_DIR ) / lib / pkgconfig <nl> ifeq ( $ ( CROSS_COMPILING ) , yes ) <nl> DEPS = $ ( TOOLCHAIN_FILE ) $ ( abs_top_srcdir ) / target / config - binaddons . site $ ( abs_top_srcdir ) / target / Toolchain_binaddons . cmake $ ( CONFIG_SUB ) $ ( CONFIG_GUESS ) <nl> TOOLCHAIN = - DCMAKE_TOOLCHAIN_FILE = $ ( TOOLCHAIN_FILE ) <nl> + ifeq ( $ ( OS ) , linux ) <nl> + ifneq ( $ ( TARGET_PLATFORM ) , gbm ) <nl> + DEPS + = linux - system - libs <nl> + endif <nl> + endif <nl> endif <nl> <nl> ifeq ( $ ( PLATFORM ) , ) <nl>
|
tools / depends : fix linux add - on build
|
xbmc/xbmc
|
6e8bd60b52f6100abcdbacebb1557c9210076998
|
2020-10-04T17:05:17Z
|
mmm a / src / mongo / base / generate_error_codes . py <nl> ppp b / src / mongo / base / generate_error_codes . py <nl> def generate_error_class_predicate_definition ( self , class_name , code_names ) : <nl> # pragma once <nl> # include < string > <nl> # include < cstdint > <nl> + # include < iosfwd > <nl> # include " mongo / base / string_data . h " <nl> namespace mongo { <nl> / * * <nl> class ErrorCodes { <nl> * that the result of a call to fromInt ( ) may not be one of the values in the <nl> * Error enumeration . <nl> * / <nl> - static Error fromInt ( int code ) ; <nl> + static Error fromInt ( int code ) { <nl> + return static_cast < Error > ( code ) ; <nl> + } <nl> % ( error_code_class_predicate_declarations ) s ; <nl> } ; <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & stream , ErrorCodes : : Error code ) ; <nl> } / / namespace mongo <nl> ' ' ' <nl> <nl> class ErrorCodes { <nl> std : : string ErrorCodes : : errorString ( Error err ) { <nl> switch ( err ) { <nl> % ( symbol_to_string_cases ) s ; <nl> - default : return mongoutils : : str : : stream ( ) < < " Location " < < err ; <nl> + default : return mongoutils : : str : : stream ( ) < < " Location " < < int ( err ) ; <nl> } <nl> } <nl> ErrorCodes : : Error ErrorCodes : : fromString ( StringData name ) { <nl> % ( string_to_symbol_cases ) s ; <nl> return UnknownError ; <nl> } <nl> - ErrorCodes : : Error ErrorCodes : : fromInt ( int code ) { <nl> - return static_cast < Error > ( code ) ; <nl> + std : : ostream & operator < < ( std : : ostream & stream , ErrorCodes : : Error code ) { <nl> + return stream < < ErrorCodes : : errorString ( code ) ; <nl> } <nl> % ( error_code_class_predicate_definitions ) s <nl> namespace { <nl> mmm a / src / mongo / base / status . cpp <nl> ppp b / src / mongo / base / status . cpp <nl> std : : ostream & operator < < ( std : : ostream & os , const Status & status ) { <nl> return os < < status . codeString ( ) < < " " < < status . reason ( ) ; <nl> } <nl> <nl> - std : : ostream & operator < < ( std : : ostream & os , ErrorCodes : : Error code ) { <nl> - return os < < ErrorCodes : : errorString ( code ) ; <nl> - } <nl> - <nl> std : : string Status : : toString ( ) const { <nl> std : : ostringstream ss ; <nl> ss < < codeString ( ) ; <nl> mmm a / src / mongo / base / status . h <nl> ppp b / src / mongo / base / status . h <nl> inline bool operator = = ( const ErrorCodes : : Error lhs , const Status & rhs ) ; <nl> <nl> inline bool operator ! = ( const ErrorCodes : : Error lhs , const Status & rhs ) ; <nl> <nl> - / / <nl> - / / Convenience method for unittest code . Please use accessors otherwise . <nl> - / / <nl> - <nl> std : : ostream & operator < < ( std : : ostream & os , const Status & status ) ; <nl> - std : : ostream & operator < < ( std : : ostream & os , ErrorCodes : : Error ) ; <nl> <nl> } / / namespace mongo <nl> <nl> mmm a / src / mongo / bson / util / builder . h <nl> ppp b / src / mongo / bson / util / builder . h <nl> class StringBuilderImpl { <nl> append ( typeName ( type ) ) ; <nl> return * this ; <nl> } <nl> + StringBuilderImpl & operator < < ( ErrorCodes : : Error code ) { <nl> + append ( ErrorCodes : : errorString ( code ) ) ; <nl> + return * this ; <nl> + } <nl> <nl> void appendDoubleNice ( double x ) { <nl> const int prev = _buf . l ; <nl>
|
SERVER - 30580 Always use errorString ( ) when streaming an ErrorCode : : Error
|
mongodb/mongo
|
fe72cc35ff8af7bf421d29c668e4d50c048d141b
|
2017-08-16T20:28:02Z
|
mmm a / emlink . py <nl> ppp b / emlink . py <nl> def __init__ ( self , filename ) : <nl> <nl> # imports <nl> self . imports_js = self . js [ self . start_asm : self . start_funcs ] <nl> - self . imports = [ m . group ( 0 ) for m in js_optimizer . import_sig . finditer ( self . imports_js ) ] <nl> - # print ' imports ' , self . imports <nl> + self . imports = { } <nl> + for imp in js_optimizer . import_sig . finditer ( self . imports_js ) : <nl> + key , value = imp . group ( 0 ) . split ( ' var ' ) [ 1 ] [ : - 1 ] . replace ( ' ' , ' ' ) . split ( ' = ' ) <nl> + self . imports [ key ] = value <nl> + # print > > sys . stderr , ' imports ' , self . imports <nl> <nl> # funcs <nl> self . funcs_js = self . js [ self . start_funcs : self . end_funcs ] <nl> def relocate_into ( self , main ) : <nl> # print > > sys . stderr , ' replacements : ' , replacements <nl> <nl> # imports <nl> - main_imports = set ( main . imports ) <nl> - new_imports = [ imp for imp in self . imports if imp not in main_imports and <nl> - imp . split ( ' var ' ) [ 1 ] . split ( ' = ' ) [ 0 ] not in main_funcs ] # a previous import may now be implemented # TODO : reverse <nl> + new_imports = [ ' var % s = % s ; ' % ( imp , self . imports [ imp ] ) for imp in self . imports if imp not in main . imports and imp not in main_funcs ] <nl> main . imports_js + = ' \ n ' . join ( new_imports ) + ' \ n ' <nl> <nl> # sendings : add invokes for new tables <nl> mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def test ( name , header , main , side , expected , first = True ) : <nl> out = run_js ( ' together . js ' , engine = SPIDERMONKEY_ENGINE , stderr = PIPE , full_output = True ) <nl> self . assertContained ( expected , out ) <nl> self . validate_asmjs ( out ) <nl> - if first : test ( name + ' ( reverse ) ' , header , side , main , expected , False ) # test reverse order <nl> + if first : <nl> + shutil . copyfile ( ' together . js ' , ' first . js ' ) <nl> + test ( name + ' ( reverse ) ' , header , side , main , expected , False ) # test reverse order <nl> <nl> # test a simple call from one module to another . only one has a string ( and constant memory initialization for it ) <nl> test ( ' basics ' , ' ' , ' ' ' <nl> mmm a / tools / js_optimizer . py <nl> ppp b / tools / js_optimizer . py <nl> def path_from_root ( * pathelems ) : <nl> <nl> DEBUG = os . environ . get ( ' EMCC_DEBUG ' ) <nl> <nl> - import_sig = re . compile ( ' ( * ) var ( [ _ \ w $ ] + ) * = . * ; ' ) <nl> + import_sig = re . compile ( ' var ( [ _ \ w $ ] + ) * = [ ^ , ; ] + ; ' ) <nl> func_sig = re . compile ( ' ( * ) function ( [ _ \ w $ ] + ) \ ( ' ) <nl> <nl> class Minifier : <nl>
|
refactor linking of imports
|
emscripten-core/emscripten
|
4f9dfa2025f92130036dc7ca83ea91ebc8275e89
|
2013-07-03T22:31:04Z
|
mmm a / modules / prediction / scenario / feature_extractor / feature_extractor . cc <nl> ppp b / modules / prediction / scenario / feature_extractor / feature_extractor . cc <nl> void FeatureExtractor : : ExtractEgoLaneFeatures ( <nl> EnvironmentFeatures * ptr_environment_features , <nl> const LaneInfoPtr & ptr_ego_lane , const common : : math : : Vec2d & ego_position ) { <nl> if ( ptr_ego_lane = = nullptr ) { <nl> - AERROR < < " Ego vehicle is not on any lane . " ; <nl> + ADEBUG < < " Ego vehicle is not on any lane . " ; <nl> return ; <nl> } <nl> ADEBUG < < " Ego vehicle is on lane [ " < < ptr_ego_lane - > id ( ) . id ( ) < < " ] " ; <nl> void FeatureExtractor : : ExtractNeighborLaneFeatures ( <nl> EnvironmentFeatures * ptr_environment_features , <nl> const LaneInfoPtr & ptr_ego_lane , const Vec2d & ego_position ) { <nl> if ( ptr_ego_lane = = nullptr ) { <nl> - AERROR < < " Ego vehicle is not on any lane . " ; <nl> + ADEBUG < < " Ego vehicle is not on any lane . " ; <nl> return ; <nl> } <nl> <nl>
|
Prediction : mute error message and reduce to debug level
|
ApolloAuto/apollo
|
46cd2ec0d59a892e8ec42438ac3f34d4eaba10e9
|
2019-06-24T06:13:54Z
|
mmm a / src / clustering / administration / issues / log_write . cc <nl> ppp b / src / clustering / administration / issues / log_write . cc <nl> void log_write_issue_tracker_t : : combine ( <nl> if ( combined_it = = combined_issues . end ( ) ) { <nl> combined_issues . insert ( std : : make_pair ( issue . message , & issue ) ) ; <nl> } else { <nl> - rassert ( issue . affected_server_ids . size ( ) = = 0 ) ; <nl> + rassert ( issue . affected_server_ids . size ( ) = = 1 ) ; <nl> combined_it - > second - > add_server ( issue . affected_server_ids [ 0 ] ) ; <nl> } <nl> } <nl>
|
fixing assert in log_write_issue_tracker_t : : combine
|
rethinkdb/rethinkdb
|
1886ec2f65828db8bc720571c7f7dc5c959eb5a8
|
2014-10-06T18:45:47Z
|
mmm a / libs / image / include / image / ColorTransform . h <nl> ppp b / libs / image / include / image / ColorTransform . h <nl> inline filament : : math : : float3 linearToSRGB ( const filament : : math : : float3 & color ) <nl> } <nl> <nl> / / Creates a n - channel sRGB image from a linear floating - point image . <nl> - / / The source image can have more than N channels , but only the first N are honored . <nl> - template < typename T , int N = 3 > <nl> + / / The source image can have more than N channels , but only the first N are converted to sRGB . <nl> + template < typename T , int N = 3 > <nl> std : : unique_ptr < uint8_t [ ] > fromLinearTosRGB ( const LinearImage & image ) { <nl> const size_t w = image . getWidth ( ) ; <nl> const size_t h = image . getHeight ( ) ; <nl> std : : unique_ptr < uint8_t [ ] > fromLinearTosRGB ( const LinearImage & image ) { <nl> } <nl> <nl> / / Creates a N - channel RGB u8 image from a f32 image . <nl> - / / The source image can have three or more channels , but only the first N are honored . <nl> - template < typename T , int N = 3 > <nl> + template < typename T , int N = 3 > <nl> std : : unique_ptr < uint8_t [ ] > fromLinearToRGB ( const LinearImage & image ) { <nl> - using filament : : math : : float3 ; <nl> size_t w = image . getWidth ( ) ; <nl> size_t h = image . getHeight ( ) ; <nl> size_t channels = image . getChannels ( ) ; <nl> mmm a / libs / image / src / ImageSampler . cpp <nl> ppp b / libs / image / src / ImageSampler . cpp <nl> void generateMipmaps ( const LinearImage & source , Filter filter , LinearImage * resu <nl> uint32_t width = source . getWidth ( ) ; <nl> uint32_t height = source . getHeight ( ) ; <nl> for ( uint32_t n = 0 ; n < mips ; + + n ) { <nl> - width = std : : max ( width > > 1u , 1u ) ; <nl> - height = std : : max ( height > > 1u , 1u ) ; <nl> - result [ n ] = resampleImage ( source , width , height , filter ) ; <nl> + width = std : : max ( width > > 1u , 1u ) ; <nl> + height = std : : max ( height > > 1u , 1u ) ; <nl> + result [ n ] = resampleImage ( source , width , height , filter ) ; <nl> } <nl> } <nl> <nl> mmm a / libs / imageio / src / ImageDecoder . cpp <nl> ppp b / libs / imageio / src / ImageDecoder . cpp <nl> LinearImage PNGDecoder : : decode ( ) { <nl> if ( colorType = = PNG_COLOR_TYPE_PALETTE ) { <nl> png_set_palette_to_rgb ( mPNG ) ; <nl> } <nl> - if ( colorType = = PNG_COLOR_TYPE_GRAY ) { <nl> + if ( colorType = = PNG_COLOR_TYPE_GRAY | | colorType = = PNG_COLOR_TYPE_GRAY_ALPHA ) { <nl> + if ( bitDepth < 8 ) { <nl> + png_set_expand_gray_1_2_4_to_8 ( mPNG ) ; <nl> + } <nl> png_set_gray_to_rgb ( mPNG ) ; <nl> } <nl> + if ( png_get_valid ( mPNG , mInfo , PNG_INFO_tRNS ) ) { <nl> + png_set_tRNS_to_alpha ( mPNG ) ; <nl> + } <nl> if ( getColorSpace ( ) = = ImageDecoder : : ColorSpace : : SRGB ) { <nl> png_set_alpha_mode ( mPNG , PNG_ALPHA_PNG , PNG_DEFAULT_sRGB ) ; <nl> } else { <nl> LinearImage PNGDecoder : : decode ( ) { <nl> } <nl> <nl> png_read_update_info ( mPNG , mInfo ) ; <nl> + <nl> + / / Read updated color type since we may have asked for a conversion before <nl> + colorType = png_get_color_type ( mPNG , mInfo ) ; <nl> + <nl> uint32_t width = png_get_image_width ( mPNG , mInfo ) ; <nl> uint32_t height = png_get_image_height ( mPNG , mInfo ) ; <nl> size_t rowBytes = png_get_rowbytes ( mPNG , mInfo ) ; <nl> LinearImage PNGDecoder : : decode ( ) { <nl> if ( colorType = = PNG_COLOR_TYPE_RGBA ) { <nl> if ( getColorSpace ( ) = = ImageDecoder : : ColorSpace : : SRGB ) { <nl> return toLinearWithAlpha < uint16_t > ( width , height , rowBytes , imageData , <nl> - [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> - sRGBToLinear < filament : : math : : float4 > ) ; <nl> + [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> + sRGBToLinear < filament : : math : : float4 > ) ; <nl> } else { <nl> return toLinearWithAlpha < uint16_t > ( width , height , rowBytes , imageData , <nl> - [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> - [ ] ( const filament : : math : : float4 & color ) - > filament : : math : : float4 { return color ; } ) ; <nl> + [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> + [ ] ( const filament : : math : : float4 & color ) - > filament : : math : : float4 { return color ; } ) ; <nl> } <nl> } else { <nl> / / Convert to linear float ( PNG 16 stores data in network order ( big endian ) . <nl> if ( getColorSpace ( ) = = ImageDecoder : : ColorSpace : : SRGB ) { <nl> return toLinear < uint16_t > ( width , height , rowBytes , imageData , <nl> - [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> + [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> sRGBToLinear < filament : : math : : float3 > ) ; <nl> } else { <nl> return toLinear < uint16_t > ( width , height , rowBytes , imageData , <nl> - [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> - [ ] ( const filament : : math : : float3 & color ) - > filament : : math : : float3 { return color ; } ) ; <nl> + [ ] ( uint16_t v ) - > uint16_t { return ntohs ( v ) ; } , <nl> + [ ] ( const filament : : math : : float3 & color ) - > filament : : math : : float3 { return color ; } ) ; <nl> } <nl> } <nl> } catch ( std : : runtime_error & e ) { <nl> mmm a / libs / imageio / src / ImageEncoder . cpp <nl> ppp b / libs / imageio / src / ImageEncoder . cpp <nl> class PNGEncoder : public ImageEncoder : : Encoder { <nl> bool encode ( const LinearImage & image ) override ; <nl> <nl> int chooseColorType ( const LinearImage & image ) const ; <nl> - uint32_t getChannelsCount ( ) const ; <nl> + uint32_t getChannelsCount ( int colorType ) const ; <nl> <nl> static void cb_error ( png_structp png , png_const_charp error ) ; <nl> static void cb_stream ( png_structp png , png_bytep buffer , png_size_t size ) ; <nl> int PNGEncoder : : chooseColorType ( const LinearImage & image ) const { <nl> switch ( channels ) { <nl> case 1 : <nl> return PNG_COLOR_TYPE_GRAY ; <nl> - default : <nl> - std : : cerr < < " Warning : strange number of channels in PNG " < < std : : endl ; <nl> case 3 : <nl> switch ( mFormat ) { <nl> case PixelFormat : : RGBM : <nl> int PNGEncoder : : chooseColorType ( const LinearImage & image ) const { <nl> default : <nl> return PNG_COLOR_TYPE_RGB ; <nl> } <nl> + case 4 : <nl> + return PNG_COLOR_TYPE_RGBA ; <nl> + default : <nl> + std : : cerr < < " Warning : strange number of channels in PNG " < < std : : endl ; <nl> + return PNG_COLOR_TYPE_RGB ; <nl> } <nl> } <nl> <nl> - uint32_t PNGEncoder : : getChannelsCount ( ) const { <nl> + uint32_t PNGEncoder : : getChannelsCount ( int colorType ) const { <nl> switch ( mFormat ) { <nl> case PixelFormat : : RGBM : <nl> case PixelFormat : : RGB_10_11_11_REV : <nl> return 4 ; <nl> default : <nl> + switch ( colorType ) { <nl> + case PNG_COLOR_TYPE_GRAY : return 1 ; <nl> + case PNG_COLOR_TYPE_RGB : return 3 ; <nl> + case PNG_COLOR_TYPE_RGBA : return 4 ; <nl> + } <nl> return 3 ; <nl> } <nl> } <nl> bool PNGEncoder : : encode ( const LinearImage & image ) { <nl> } <nl> break ; <nl> default : <nl> - if ( srcChannels ! = 1 & & srcChannels ! = 3 ) { <nl> + if ( srcChannels ! = 1 & & srcChannels ! = 3 & & srcChannels ! = 4 ) { <nl> std : : cerr < < " Cannot encode PNG : " < < srcChannels < < " channels . " < < std : : endl ; <nl> return false ; <nl> } <nl> bool PNGEncoder : : encode ( const LinearImage & image ) { <nl> / / Write header ( 8 bit colour depth ) <nl> size_t width = image . getWidth ( ) ; <nl> size_t height = image . getHeight ( ) ; <nl> + int colorType = chooseColorType ( image ) ; <nl> + <nl> png_set_IHDR ( mPNG , mInfo , width , height , <nl> - 8 , chooseColorType ( image ) , PNG_INTERLACE_NONE , <nl> - PNG_COMPRESSION_TYPE_BASE , PNG_FILTER_TYPE_BASE ) ; <nl> + 8 , colorType , PNG_INTERLACE_NONE , <nl> + PNG_COMPRESSION_TYPE_BASE , PNG_FILTER_TYPE_BASE ) ; <nl> <nl> if ( mFormat = = PixelFormat : : LINEAR_RGB | | mFormat = = PixelFormat : : RGB_10_11_11_REV ) { <nl> png_set_gAMA ( mPNG , mInfo , 1 . 0 ) ; <nl> bool PNGEncoder : : encode ( const LinearImage & image ) { <nl> dstChannels = 1 ; <nl> data = fromLinearToGrayscale < uint8_t > ( image ) ; <nl> } else { <nl> - dstChannels = getChannelsCount ( ) ; <nl> + dstChannels = getChannelsCount ( colorType ) ; <nl> switch ( mFormat ) { <nl> case PixelFormat : : RGBM : <nl> data = fromLinearToRGBM < uint8_t > ( image ) ; <nl> break ; <nl> + case PixelFormat : : RGB_10_11_11_REV : <nl> + data = fromLinearToRGB_10_11_11_REV ( image ) ; <nl> + break ; <nl> case PixelFormat : : sRGB : <nl> - data = fromLinearTosRGB < uint8_t > ( image ) ; <nl> + if ( dstChannels = = 4 ) { <nl> + data = fromLinearTosRGB < uint8_t , 4 > ( image ) ; <nl> + } else { <nl> + data = fromLinearTosRGB < uint8_t , 3 > ( image ) ; <nl> + } <nl> break ; <nl> case PixelFormat : : LINEAR_RGB : <nl> - data = fromLinearToRGB < uint8_t > ( image ) ; <nl> - break ; <nl> - case PixelFormat : : RGB_10_11_11_REV : <nl> - data = fromLinearToRGB_10_11_11_REV ( image ) ; <nl> + if ( dstChannels = = 4 ) { <nl> + data = fromLinearToRGB < uint8_t , 4 > ( image ) ; <nl> + } else { <nl> + data = fromLinearToRGB < uint8_t , 3 > ( image ) ; <nl> + } <nl> break ; <nl> } <nl> } <nl> <nl> for ( size_t y = 0 ; y < height ; y + + ) { <nl> - row_pointers [ y ] = reinterpret_cast < png_bytep > ( & data [ y * width * dstChannels * <nl> - sizeof ( uint8_t ) ] ) ; <nl> + row_pointers [ y ] = reinterpret_cast < png_bytep > <nl> + ( & data [ y * width * dstChannels * sizeof ( uint8_t ) ] ) ; <nl> } <nl> <nl> png_write_image ( mPNG , row_pointers . get ( ) ) ; <nl> mmm a / tools / mipgen / src / main . cpp <nl> ppp b / tools / mipgen / src / main . cpp <nl> using namespace image ; <nl> using namespace std ; <nl> using namespace utils ; <nl> <nl> - static ImageEncoder : : Format g_format = ImageEncoder : : Format : : PNG_LINEAR ; <nl> + static ImageEncoder : : Format g_format = ImageEncoder : : Format : : PNG ; <nl> static bool g_formatSpecified = false ; <nl> static bool g_createGallery = false ; <nl> static std : : string g_compression = " " ; <nl> int main ( int argc , char * argv [ ] ) { <nl> g_ktxContainer = true ; <nl> g_formatSpecified = true ; <nl> } else if ( ! g_formatSpecified ) { <nl> - g_format = ImageEncoder : : chooseFormat ( outputPattern , ! g_linearized ) ; <nl> + g_format = ImageEncoder : : chooseFormat ( outputPattern , g_linearized ) ; <nl> } <nl> <nl> puts ( " Reading image . . . " ) ; <nl>
|
Fix support of paletted images and alpha channel in imageio ( )
|
google/filament
|
c28d9ddb9d672c361a4f07b5c4c462724cf99f96
|
2020-04-18T23:32:05Z
|
mmm a / tensorflow / python / ops / bincount_ops . py <nl> ppp b / tensorflow / python / ops / bincount_ops . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> from tensorflow . python . framework import ops <nl> from tensorflow . python . framework import sparse_tensor <nl> def bincount ( arr , <nl> " " " <nl> name = " bincount " if name is None else name <nl> with ops . name_scope ( name ) : <nl> - # Somehow forward compatible needs to be False . <nl> - if not binary_output and axis is None : <nl> - arr = ops . convert_to_tensor ( arr , name = " arr " , dtype = dtypes . int32 ) <nl> - array_is_nonempty = math_ops . reduce_prod ( array_ops . shape ( arr ) ) > 0 <nl> - output_size = math_ops . cast ( array_is_nonempty , dtypes . int32 ) * ( <nl> - math_ops . reduce_max ( arr ) + 1 ) <nl> - if minlength is not None : <nl> - minlength = ops . convert_to_tensor ( <nl> - minlength , name = " minlength " , dtype = dtypes . int32 ) <nl> - output_size = gen_math_ops . maximum ( minlength , output_size ) <nl> - if maxlength is not None : <nl> - maxlength = ops . convert_to_tensor ( <nl> - maxlength , name = " maxlength " , dtype = dtypes . int32 ) <nl> - output_size = gen_math_ops . minimum ( maxlength , output_size ) <nl> - if weights is not None : <nl> - weights = ops . convert_to_tensor ( weights , name = " weights " ) <nl> - return gen_math_ops . unsorted_segment_sum ( weights , arr , output_size ) <nl> - weights = constant_op . constant ( [ ] , dtype ) <nl> - return gen_math_ops . bincount ( arr , output_size , weights ) <nl> - <nl> if not isinstance ( arr , sparse_tensor . SparseTensor ) : <nl> arr = ragged_tensor . convert_to_tensor_or_ragged_tensor ( arr , name = " arr " ) <nl> if weights is not None : <nl>
|
Remove forward compatibility check for bincount .
|
tensorflow/tensorflow
|
38718b4ed5feabbd4019fa24c828bdc1733289c9
|
2020-10-12T16:49:58Z
|
mmm a / test / unittests / compiler / js - typed - lowering - unittest . cc <nl> ppp b / test / unittests / compiler / js - typed - lowering - unittest . cc <nl> TEST_F ( JSTypedLoweringTest , JSStorePropertyToExternalTypedArrayWithConversion ) { <nl> Node * context = UndefinedConstant ( ) ; <nl> Node * effect = graph ( ) - > start ( ) ; <nl> Node * control = graph ( ) - > start ( ) ; <nl> - / / TODO ( mstarzinger ) : Once the effect - control - linearizer provides a frame <nl> - / / state we can get rid of this checkpoint again . The reducer won ' t care . <nl> - Node * checkpoint = graph ( ) - > NewNode ( common ( ) - > Checkpoint ( ) , <nl> - EmptyFrameState ( ) , effect , control ) ; <nl> VectorSlotPair feedback ; <nl> const Operator * op = javascript ( ) - > StoreProperty ( language_mode , feedback ) ; <nl> Node * node = graph ( ) - > NewNode ( op , base , key , value , context , <nl> - EmptyFrameState ( ) , checkpoint , control ) ; <nl> + EmptyFrameState ( ) , effect , control ) ; <nl> Reduction r = Reduce ( node ) ; <nl> <nl> Matcher < Node * > offset_matcher = <nl> TEST_F ( JSTypedLoweringTest , JSStorePropertyToExternalTypedArrayWithConversion ) { <nl> BufferAccess ( type ) , <nl> IsPointerConstant ( bit_cast < intptr_t > ( & backing_store [ 0 ] ) ) , <nl> offset_matcher , IsNumberConstant ( array - > byte_length ( ) - > Number ( ) ) , <nl> - value_matcher , checkpoint , control ) ) ; <nl> + value_matcher , effect , control ) ) ; <nl> } <nl> } <nl> } <nl>
|
[ unittests ] Remove redundant checkpoint in JSTypedLoweringTest .
|
v8/v8
|
e432f07db4829e247fb19b64110944b50a5be289
|
2017-02-14T13:36:37Z
|
mmm a / modules / highgui / src / cap_vfw . cpp <nl> ppp b / modules / highgui / src / cap_vfw . cpp <nl> bool CvCaptureCAM_VFW : : open ( int wIndex ) <nl> fourcc = ( DWORD ) - 1 ; <nl> <nl> memset ( & caps , 0 , sizeof ( caps ) ) ; <nl> - capDriverGetCaps ( hWndC , & caps , sizeof ( & caps ) ) ; <nl> + capDriverGetCaps ( hWndC , & caps , sizeof ( caps ) ) ; <nl> : : MoveWindow ( hWndC , 0 , 0 , 320 , 240 , TRUE ) ; <nl> capSetUserData ( hWndC , ( size_t ) this ) ; <nl> capSetCallbackOnFrame ( hWndC , frameCallback ) ; <nl>
|
fixed incorrect sizeof ( ) expression in CvCaptureCAM_VFW : : open
|
opencv/opencv
|
3c86788b1f424acd30bf0ea491b23408d9d07bb7
|
2013-04-01T07:35:33Z
|
mmm a / build / deps / github_hashes / facebook / fbthrift - rev . txt <nl> ppp b / build / deps / github_hashes / facebook / fbthrift - rev . txt <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit d92f4e3e1e610cac714916e210c96c9b053a84ca <nl> + Subproject commit f50d35e5b308ecf86f0b18760c8439074e2a0b9b <nl> mmm a / build / deps / github_hashes / facebook / wangle - rev . txt <nl> ppp b / build / deps / github_hashes / facebook / wangle - rev . txt <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 622abbcbb3d4a600dacc6bd4f7c4705209534b82 <nl> + Subproject commit 09a7ff22e38739c59729cfb14209ff4f196053dc <nl>
|
Updating submodules
|
facebook/watchman
|
10682bcac06a2d136efe92f8bf45f1d02773d523
|
2020-02-21T05:56:08Z
|
mmm a / lib / Sema / CSDiagnostics . cpp <nl> ppp b / lib / Sema / CSDiagnostics . cpp <nl> bool InaccessibleMemberFailure : : diagnoseAsError ( ) { <nl> auto & cs = getConstraintSystem ( ) ; <nl> auto * locator = <nl> cs . getConstraintLocator ( baseExpr , ConstraintLocator : : Member ) ; <nl> - if ( llvm : : any_of ( cs . getFixes ( ) , [ & ] ( const ConstraintFix * fix ) { <nl> - return fix - > getLocator ( ) = = locator ; <nl> - } ) ) <nl> + if ( cs . hasFixFor ( locator ) ) <nl> return false ; <nl> } <nl> <nl> mmm a / lib / Sema / CSDiagnostics . h <nl> ppp b / lib / Sema / CSDiagnostics . h <nl> class ContextualFailure : public FailureDiagnostic { <nl> / / / Diagnose failed conversion in a ` CoerceExpr ` . <nl> bool diagnoseCoercionToUnrelatedType ( ) const ; <nl> <nl> - / / If we ' re trying to convert something of type " ( ) - > T " to T , <nl> - / / then we probably meant to call the value . <nl> + / / / If we ' re trying to convert something of type " ( ) - > T " to T , <nl> + / / / then we probably meant to call the value . <nl> bool diagnoseMissingFunctionCall ( ) const ; <nl> <nl> / / / Produce a specialized diagnostic if this is an invalid conversion to Bool . <nl> class MutatingMemberRefOnImmutableBase final : public FailureDiagnostic { <nl> bool diagnoseAsError ( ) override ; <nl> } ; <nl> <nl> - / / Diagnose an attempt to use AnyObject as the root type of a KeyPath <nl> - / / <nl> - / / ` ` ` swift <nl> - / / let keyPath = \ AnyObject . bar <nl> - / / ` ` ` <nl> + / / / Diagnose an attempt to use AnyObject as the root type of a KeyPath <nl> + / / / <nl> + / / / ` ` ` swift <nl> + / / / let keyPath = \ AnyObject . bar <nl> + / / / ` ` ` <nl> class AnyObjectKeyPathRootFailure final : public FailureDiagnostic { <nl> <nl> public : <nl>
|
Merge remote - tracking branch ' origin / master ' into master - rebranch
|
apple/swift
|
cf405482f5472836d16ac572231fa3893d1ff526
|
2020-01-21T08:22:55Z
|
mmm a / code / sorting / src / bogo_sort / README . md <nl> ppp b / code / sorting / src / bogo_sort / README . md <nl> <nl> # Bogosort <nl> + Bogosort or * * permutation sort * * is an extremely inefficient sorting algorithm . This is due to it ' s random nature : it randomly generates permutations of it ' s input until it finds one that is sorted . It has no use in practical applications . <nl> <nl> - Bogosort is an extremely inefficient sorting algorithm . This is due to it ' s random nature : it randomly generates permutations of it ' s input until sorted . <nl> + # # Explanation <nl> + Consider an array : [ 2 3 5 0 1 ] <nl> <nl> - # # Sources and more info : <nl> + 5 3 2 0 1 ( 1st shuffling ) <nl> + 1 3 2 5 0 ( 2nd shuffling ) <nl> + 1 0 2 5 3 ( 2nd shuffling ) <nl> + . <nl> + . <nl> + . <nl> + 0 1 2 3 5 ( nth shuffling ) - Sorted Array <nl> <nl> - - https : / / en . wikipedia . org / wiki / Bogosort <nl> + Where , n is unknown as this algorithm does not tell , in which step the resultant permutation will be sorted . <nl> <nl> mmm - <nl> + # # Algorithm <nl> + ` ` ` <nl> + while not in_order ( list ) do <nl> + shuffle ( list ) <nl> + done <nl> + ` ` ` <nl> + <nl> + # # Complexity <nl> + * * Time complexity * * <nl> + - Worst case : O ( & # 8734 ; ) <nl> + - Average case : O ( n * n ! ) <nl> + - Best case : O ( n ) <nl> + <nl> + * * Space complexity * * : O ( 1 ) <nl> <nl> + mmm <nl> < p align = " center " > <nl> A massive collaborative effort by < a href = " https : / / github . com / OpenGenus / cosmos " > OpenGenus Foundation < / a > <nl> < / p > <nl> - <nl> mmm - <nl> + mmm <nl> \ No newline at end of file <nl>
|
Bogosort - readme modified
|
OpenGenus/cosmos
|
638b8d13df83875b4b5349fbd9a065f4733aed26
|
2018-04-12T17:50:57Z
|
mmm a / src / unity / lib / unity_sframe . cpp <nl> ppp b / src / unity / lib / unity_sframe . cpp <nl> void unity_sframe : : swap_columns ( size_t i , size_t j ) { <nl> Dlog_func_entry ( ) ; <nl> logstream ( LOG_DEBUG ) < < " Args : " < < i < < " , " < < j < < std : : endl ; <nl> if ( i > = num_columns ( ) ) { <nl> - log_and_throw ( " Column index value of " < < i < < " is out of bound . " ) ; <nl> + log_and_throw ( " Column index value of " + std : : to_string ( i ) + " is out of bound . " ) ; <nl> } <nl> if ( j > = num_columns ( ) ) { <nl> - log_and_throw ( " Column index value of " < < j < < " is out of bound . " ) ; <nl> + log_and_throw ( " Column index value of " + std : : to_string ( j ) + " is out of bound . " ) ; <nl> } <nl> <nl> std : : vector < std : : string > new_column_names = column_names ( ) ; <nl>
|
Fixed build break in 4 . 2a1 build . ( )
|
apple/turicreate
|
dbcf5356803c8e13d41ef12c806b3f3b273e142a
|
2018-03-02T18:37:17Z
|
mmm a / src / containers / archive / archive . cc <nl> ppp b / src / containers / archive / archive . cc <nl> int64_t force_read ( read_stream_t * s , void * p , int64_t n ) { <nl> } <nl> <nl> write_message_t : : ~ write_message_t ( ) { <nl> - for ( write_buffer_t * buffer = buffers_ . head ( ) ; buffer ; buffer = buffers_ . next ( buffer ) ) { <nl> + write_buffer_t * buffer ; <nl> + while ( ( buffer = buffers_ . head ( ) ) ) { <nl> buffers_ . remove ( buffer ) ; <nl> - delete buffer ; <nl> } <nl> } <nl> <nl>
|
Made the write_message_t destructor be not horribly broken . I am a bad person .
|
rethinkdb/rethinkdb
|
12c44f90e6fe69b9d494019f53d9eee32d8207bd
|
2012-04-30T21:49:22Z
|
mmm a / single_include / catch . hpp <nl> ppp b / single_include / catch . hpp <nl> <nl> / * <nl> - * Generated : 2012 - 10 - 12 07 : 57 : 48 . 487873 <nl> + * Generated : 2012 - 10 - 12 08 : 05 : 21 . 766821 <nl> * mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> * This file has been merged from multiple headers . Please don ' t edit it directly <nl> * Copyright ( c ) 2012 Two Blue Cubes Ltd . All rights reserved . <nl>
|
Regenerated to test branch
|
catchorg/Catch2
|
813a13caeddd8072f9992492c5aea84226ed8f03
|
2012-10-12T07:05:45Z
|
mmm a / doc / faq / a01 - espcomm_sync - failed . rst <nl> ppp b / doc / faq / a01 - espcomm_sync - failed . rst <nl> example at 921600 baud , but with two reset retries . <nl> <nl> Reset Method : nodemcu , reset retries <nl> <nl> - If you are interested how noodemcu reset method is implemented , then <nl> + If you are interested how nodemcu reset method is implemented , then <nl> check circuit below . As indicated it does not pull to ground RTS and DTR <nl> lines once you open Serial Monitor in Arduino IDE . <nl> <nl> . . figure : : pictures / a01 - nodemcu - reset - implementation . png <nl> - : alt : Implementation of noodemcu reset <nl> + : alt : Implementation of nodemcu reset <nl> <nl> - Implementation of noodemcu reset <nl> + Implementation of nodemcu reset <nl> <nl> It consists of two transistors and resistors that you can locate on <nl> NodeMCU board on right . On left you can see complete circuit and the <nl> mmm a / doc / faq / a02 - my - esp - crashes . rst <nl> ppp b / doc / faq / a02 - my - esp - crashes . rst <nl> If you don ' t have any code for troubleshooting , use the example below : <nl> Serial . println ( ) ; <nl> Serial . println ( " Let ' s provoke the s / w wdt firing . . . " ) ; <nl> / / <nl> - / / provoke an OOM , will be recorded as the last occured one <nl> + / / provoke an OOM , will be recorded as the last occurred one <nl> char * out_of_memory_failure = ( char * ) malloc ( 1000000 ) ; <nl> / / <nl> / / wait for s / w wdt in infinite loop below <nl> Memory , memory , memory <nl> * If you use std libs like std : : vector , make sure to call its : : reserve ( ) method before filling it . This allows allocating only once , which reduces mem fragmentation , and makes sure that there are no empty unused slots left over in the container at the end . <nl> <nl> Stack <nl> - The amount of stack in the ESP is tiny at only 4KB . For normal developement in large systems , it <nl> + The amount of stack in the ESP is tiny at only 4KB . For normal development in large systems , it <nl> is good practice to use and abuse the stack , because it is faster for allocation / deallocation , the scope of the object is well defined , and deallocation automatically happens in reverse order as allocation , which means no mem fragmentation . However , with the tiny amount of stack available in the ESP , that practice is not really viable , at least not for big objects . <nl> * Large objects that have internally managed memory , such as String , std : : string , std : : vector , etc , are ok on the stack , because they internally allocate their buffers on the heap . <nl> * Large arrays on the stack , such as uint8_t buffer [ 2048 ] should be avoided on the stack and be dynamically allocated ( consider smart pointers ) . <nl> - * Objects that have large data members , such as large arrays , should be avoided on the stack , and be dynamicaly allocated ( consider smart pointers ) . <nl> + * Objects that have large data members , such as large arrays , should be avoided on the stack , and be dynamically allocated ( consider smart pointers ) . <nl> <nl> <nl> If at the Wall , Enter an Issue Report <nl> mmm a / doc / faq / a05 - board - generator . rst <nl> ppp b / doc / faq / a05 - board - generator . rst <nl> Additional Notes : <nl> <nl> 1 . The boards . txt file will always contain the generic and esp8285 boards . <nl> <nl> - 2 . If boards . txt file exist and no backup copy named boards . txt . orig exist , the current boards . txt will be renamed to boards . txt . orig . Otherwise , the existing boards . txt is over - writen when you generate a new boards . txt file . Similar behavior for when generating a new boards . local . txt . <nl> + 2 . If boards . txt file exist and no backup copy named boards . txt . orig exist , the current boards . txt will be renamed to boards . txt . orig . Otherwise , the existing boards . txt is over - written when you generate a new boards . txt file . Similar behavior for when generating a new boards . local . txt . <nl> <nl> 3 . The boards in the boards . txt file will be in the order they were listed in your favorites file , specified by option ` ` - - filter < file > ` ` . <nl> <nl> mmm a / doc / faq / readme . rst <nl> ppp b / doc / faq / readme . rst <nl> How can I get some extra KBs in flash ? <nl> About WPS <nl> ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> <nl> - From release 2 . 4 . 2 and ahead , not using WPS will give an exra ~ 4 . 5KB in <nl> + From release 2 . 4 . 2 and ahead , not using WPS will give an extra ~ 4 . 5KB in <nl> heap . <nl> <nl> In release 2 . 4 . 2 only , WPS is disabled by default and the board generator is <nl>
|
Fix spelling typo under doc / faq ( )
|
esp8266/Arduino
|
1ff927d04caa28e1294e94ec10edf758bec6edf1
|
2020-08-18T14:45:37Z
|
mmm a / lib / SILPasses / PassManager . cpp <nl> ppp b / lib / SILPasses / PassManager . cpp <nl> llvm : : cl : : opt < std : : string > <nl> SILPrintOnlyFun ( " sil - print - only - function " , llvm : : cl : : init ( " " ) , <nl> llvm : : cl : : desc ( " Only print out the sil for this function " ) ) ; <nl> <nl> + llvm : : cl : : opt < std : : string > <nl> + SILPrintBefore ( " sil - print - before " , llvm : : cl : : init ( " " ) , llvm : : cl : : desc ( <nl> + " Print out the sil before passes which contain this string " ) ) ; <nl> + <nl> + llvm : : cl : : opt < std : : string > <nl> + SILPrintAfter ( " sil - print - after " , llvm : : cl : : init ( " " ) , llvm : : cl : : desc ( <nl> + " Print out the sil after passes which contain this string " ) ) ; <nl> + <nl> + llvm : : cl : : opt < std : : string > <nl> + SILPrintAround ( " sil - print - around " , llvm : : cl : : init ( " " ) , llvm : : cl : : desc ( <nl> + " Print out the sil before and after passes which contain this string " ) ) ; <nl> + <nl> + static bool doPrintBefore ( SILTransform * T , SILFunction * F ) { <nl> + if ( ! SILPrintOnlyFun . empty ( ) & & ( ! F | | F - > getName ( ) ! = SILPrintOnlyFun ) ) <nl> + return false ; <nl> + <nl> + if ( ! SILPrintBefore . empty ( ) & & <nl> + T - > getName ( ) . find ( SILPrintBefore ) ! = StringRef : : npos ) <nl> + return true ; <nl> + <nl> + if ( ! SILPrintAround . empty ( ) & & <nl> + T - > getName ( ) . find ( SILPrintAround ) ! = StringRef : : npos ) <nl> + return true ; <nl> + <nl> + return false ; <nl> + } <nl> + <nl> + static bool doPrintAfter ( SILTransform * T , SILFunction * F , bool Default ) { <nl> + if ( ! SILPrintOnlyFun . empty ( ) & & ( ! F | | F - > getName ( ) ! = SILPrintOnlyFun ) ) <nl> + return false ; <nl> + <nl> + if ( ! SILPrintAfter . empty ( ) & & <nl> + T - > getName ( ) . find ( SILPrintAfter ) ! = StringRef : : npos ) <nl> + return true ; <nl> + <nl> + if ( ! SILPrintAround . empty ( ) & & <nl> + T - > getName ( ) . find ( SILPrintAround ) ! = StringRef : : npos ) <nl> + return true ; <nl> + <nl> + return Default ; <nl> + } <nl> + <nl> bool SILPassManager : : <nl> runFunctionPasses ( llvm : : ArrayRef < SILFunctionTransform * > FuncTransforms ) { <nl> CompleteFunctions * CompleteFuncs = getAnalysis < CompleteFunctions > ( ) ; <nl> runFunctionPasses ( llvm : : ArrayRef < SILFunctionTransform * > FuncTransforms ) { <nl> llvm : : dbgs ( ) < < " # " < < NumPassesRun < < " Pass : " < < SFT - > getName ( ) <nl> < < " , Function : " < < F . getName ( ) < < " \ n " ; <nl> <nl> + if ( doPrintBefore ( SFT , & F ) ) { <nl> + llvm : : dbgs ( ) < < " * * * SIL function before " < < SFT - > getName ( ) < < " ( " <nl> + < < NumOptimizationIterations < < " ) * * * \ n " ; <nl> + F . dump ( ) ; <nl> + } <nl> + <nl> llvm : : sys : : TimeValue StartTime = llvm : : sys : : TimeValue : : now ( ) ; <nl> SFT - > run ( ) ; <nl> <nl> runFunctionPasses ( llvm : : ArrayRef < SILFunctionTransform * > FuncTransforms ) { <nl> } <nl> <nl> / / If this pass invalidated anything , print and verify . <nl> - if ( CompleteFuncs - > hasChanged ( ) ) { <nl> - if ( Options . PrintAll ) { <nl> - if ( SILPrintOnlyFun . empty ( ) | | F . getName ( ) . str ( ) = = SILPrintOnlyFun ) { <nl> - llvm : : dbgs ( ) < < " * * * SIL function after " < < SFT - > getName ( ) < < " ( " <nl> - < < NumOptimizationIterations < < " ) * * * \ n " ; <nl> - F . dump ( ) ; <nl> - } <nl> - } <nl> - if ( Options . VerifyAll ) { <nl> - F . verify ( ) ; <nl> - } <nl> + if ( doPrintAfter ( SFT , & F , <nl> + CompleteFuncs - > hasChanged ( ) & & Options . PrintAll ) ) { <nl> + llvm : : dbgs ( ) < < " * * * SIL function after " < < SFT - > getName ( ) < < " ( " <nl> + < < NumOptimizationIterations < < " ) * * * \ n " ; <nl> + F . dump ( ) ; <nl> + } <nl> + if ( CompleteFuncs - > hasChanged ( ) & & Options . VerifyAll ) { <nl> + F . verify ( ) ; <nl> } <nl> } <nl> } <nl> void SILPassManager : : runOneIteration ( ) { <nl> llvm : : dbgs ( ) < < " # " < < NumPassesRun < < " Pass : " < < SMT - > getName ( ) <nl> < < " ( module pass ) \ n " ; <nl> <nl> + if ( doPrintBefore ( SMT , nullptr ) ) { <nl> + llvm : : dbgs ( ) < < " * * * SIL module before " < < SMT - > getName ( ) < < " ( " <nl> + < < NumOptimizationIterations < < " ) * * * \ n " ; <nl> + Mod - > dump ( ) ; <nl> + } <nl> + <nl> llvm : : sys : : TimeValue StartTime = llvm : : sys : : TimeValue : : now ( ) ; <nl> SMT - > run ( ) ; <nl> + + NumPassesRun ; <nl> void SILPassManager : : runOneIteration ( ) { <nl> } <nl> <nl> / / If this pass invalidated anything , print and verify . <nl> - if ( CompleteFuncs - > hasChanged ( ) ) { <nl> - if ( Options . PrintAll & & SILPrintOnlyFun . empty ( ) ) { <nl> - llvm : : dbgs ( ) < < " * * * SIL module after " < < SMT - > getName ( ) < < " ( " <nl> - < < NumOptimizationIterations < < " ) * * * \ n " ; <nl> - Mod - > dump ( ) ; <nl> - } <nl> - if ( Options . VerifyAll ) { <nl> - Mod - > verify ( ) ; <nl> - } <nl> + if ( doPrintAfter ( SMT , nullptr , <nl> + CompleteFuncs - > hasChanged ( ) & & Options . PrintAll ) ) { <nl> + llvm : : dbgs ( ) < < " * * * SIL module after " < < SMT - > getName ( ) < < " ( " <nl> + < < NumOptimizationIterations < < " ) * * * \ n " ; <nl> + Mod - > dump ( ) ; <nl> + } <nl> + if ( CompleteFuncs - > hasChanged ( ) & & Options . VerifyAll ) { <nl> + Mod - > verify ( ) ; <nl> } <nl> <nl> continue ; <nl>
|
Add more options to dump the SIL during optimizations .
|
apple/swift
|
72f0cc2ab48cbf5903b94ff41d9fca1f1fc5e1e2
|
2014-12-05T16:52:23Z
|
mmm a / db / jsobj . cpp <nl> ppp b / db / jsobj . cpp <nl> namespace mongo { <nl> const char * f = e . fieldName ( ) ; <nl> try { <nl> unsigned u = stringToNum ( f ) ; <nl> + assert ( u < 1000000 ) ; <nl> if ( u > = v . size ( ) ) <nl> v . resize ( u + 1 ) ; <nl> v [ u ] = e ; <nl>
|
put Array check back but at 1M instead of 4k
|
mongodb/mongo
|
d782a78fc75721a4548e28ca3b2adba36470747e
|
2011-01-20T22:06:20Z
|
mmm a / src / library_pthread . js <nl> ppp b / src / library_pthread . js <nl> <nl> var LibraryPThread = { <nl> $ PThread__postset : ' if ( ! ENVIRONMENT_IS_PTHREAD ) PThread . initMainThreadBlock ( ) ; ' , <nl> - $ PThread__deps : [ ' $ PROCINFO ' ] , <nl> + $ PThread__deps : [ ' $ PROCINFO ' , ' _register_pthread_ptr ' ] , <nl> $ PThread : { <nl> MAIN_THREAD_ID : 1 , / / A special constant that identifies the main JS thread ID . <nl> mainThreadInfo : { <nl> var LibraryPThread = { <nl> initMainThreadBlock : function ( ) { <nl> if ( ENVIRONMENT_IS_PTHREAD ) return undefined ; <nl> PThread . mainThreadBlock = allocate ( { { { C_STRUCTS . pthread . __size__ } } } , " i32 * " , ALLOC_STATIC ) ; <nl> + __register_pthread_ptr ( PThread . mainThreadBlock , / * isMainBrowserThread = * / ! ENVIRONMENT_IS_WORKER , / * isMainRuntimeThread = * / 1 ) ; / / Pass the thread address inside the asm . js scope to store it for fast access that avoids the need for a FFI out . <nl> + <nl> for ( var i = 0 ; i < { { { C_STRUCTS . pthread . __size__ } } } / 4 ; + + i ) HEAPU32 [ PThread . mainThreadBlock / 4 + i ] = 0 ; <nl> <nl> / / The pthread struct has a field that points to itself - this is used as a magic ID to detect whether the pthread_t <nl> var LibraryPThread = { <nl> PThread . runExitHandlers ( ) ; <nl> <nl> _emscripten_futex_wake ( tb + { { { C_STRUCTS . pthread . threadStatus } } } , { { { cDefine ( ' INT_MAX ' ) } } } ) ; <nl> + __register_pthread_ptr ( 0 , 0 , 0 ) ; / / Unregister the thread block also inside the asm . js scope . <nl> threadInfoStruct = 0 ; <nl> if ( ENVIRONMENT_IS_PTHREAD ) { <nl> postMessage ( { cmd : ' exit ' } ) ; <nl> var LibraryPThread = { <nl> Atomics . store ( HEAPU32 , ( threadInfoStruct + { { { C_STRUCTS . pthread . threadStatus } } } ) > > 2 , 1 ) ; / / Mark the thread as no longer running . <nl> _emscripten_futex_wake ( threadInfoStruct + { { { C_STRUCTS . pthread . threadStatus } } } , { { { cDefine ( ' INT_MAX ' ) } } } ) ; / / wake all threads <nl> threadInfoStruct = selfThreadId = 0 ; / / Not hosting a pthread anymore in this worker , reset the info structures to null . <nl> + __register_pthread_ptr ( 0 , 0 , 0 ) ; / / Unregister the thread block also inside the asm . js scope . <nl> postMessage ( { cmd : ' cancelDone ' } ) ; <nl> } , <nl> <nl> var LibraryPThread = { <nl> else PThread . threadExit ( status ) ; <nl> } , <nl> <nl> + _pthread_ptr : 0 , <nl> + _pthread_is_main_runtime_thread : 0 , <nl> + _pthread_is_main_browser_thread : 0 , <nl> + <nl> + _register_pthread_ptr__asm : true , <nl> + _register_pthread_ptr__deps : [ ' _pthread_ptr ' , ' _pthread_is_main_runtime_thread ' , ' _pthread_is_main_browser_thread ' ] , <nl> + _register_pthread_ptr : function ( pthreadPtr , isMainBrowserThread , isMainRuntimeThread ) { <nl> + pthreadPtr = pthreadPtr | 0 ; <nl> + isMainBrowserThread = isMainBrowserThread | 0 ; <nl> + isMainRuntimeThread = isMainRuntimeThread | 0 ; <nl> + __pthread_ptr = pthreadPtr ; <nl> + __pthread_is_main_browser_thread = isMainBrowserThread ; <nl> + __pthread_is_main_runtime_thread = isMainRuntimeThread ; <nl> + } , <nl> + <nl> / / Public pthread_self ( ) function which returns a unique ID for the thread . <nl> + pthread_self__asm : true , <nl> + pthread_self__deps : [ ' _pthread_ptr ' ] , <nl> pthread_self : function ( ) { <nl> - if ( ENVIRONMENT_IS_PTHREAD ) return threadInfoStruct ; <nl> - return PThread . mainThreadBlock ; / / Main JS thread . <nl> + return __pthread_ptr | 0 ; <nl> } , <nl> <nl> + emscripten_is_main_runtime_thread__asm : true , <nl> + emscripten_is_main_runtime_thread__deps : [ ' _pthread_is_main_runtime_thread ' ] , <nl> emscripten_is_main_runtime_thread : function ( ) { <nl> - return ! ENVIRONMENT_IS_PTHREAD ; <nl> + return __pthread_is_main_runtime_thread | 0 ; / / Semantically the same as testing " ! ENVIRONMENT_IS_PTHREAD " outside the asm . js scope <nl> } , <nl> <nl> + emscripten_is_main_browser_thread__asm : true , <nl> + emscripten_is_main_browser_thread__deps : [ ' _pthread_is_main_browser_thread ' ] , <nl> emscripten_is_main_browser_thread : function ( ) { <nl> - return ! ENVIRONMENT_IS_WORKER ; <nl> + return __pthread_is_main_browser_thread | 0 ; / / Semantically the same as testing " ! ENVIRONMENT_IS_WORKER " outside the asm . js scope <nl> } , <nl> <nl> pthread_getschedparam : function ( thread , policy , schedparam ) { <nl> mmm a / src / pthread - main . js <nl> ppp b / src / pthread - main . js <nl> this . onmessage = function ( e ) { <nl> postMessage ( { cmd : ' loaded ' } ) ; <nl> } else if ( e . data . cmd = = = ' run ' ) { / / This worker was idle , and now should start executing its pthread entry point . <nl> threadInfoStruct = e . data . threadInfoStruct ; <nl> + __register_pthread_ptr ( threadInfoStruct , / * isMainBrowserThread = * / 0 , / * isMainRuntimeThread = * / 0 ) ; / / Pass the thread address inside the asm . js scope to store it for fast access that avoids the need for a FFI out . <nl> assert ( threadInfoStruct ) ; <nl> selfThreadId = e . data . selfThreadId ; <nl> assert ( selfThreadId ) ; <nl>
|
Optimize potentially hot functions pthread_self ( ) , emscripten_is_main_runtime_thread ( ) and emscripten_is_main_browser_thread ( ) to run inside the asm . js scope to avoid a FFI transition . Implements .
|
emscripten-core/emscripten
|
78214c48f8cd3f8e6afb905355beff6359ca5f4d
|
2016-01-04T22:05:10Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.