diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / src / core / lib / surface / api_trace . h <nl> ppp b / src / core / lib / surface / api_trace . h <nl> <nl> <nl> extern grpc_core : : TraceFlag grpc_api_trace ; <nl> <nl> - # ifdef __cplusplus <nl> - extern " C " { <nl> - # endif <nl> - <nl> / * Provide unwrapping macros because we ' re in C89 and variadic macros weren ' t <nl> introduced until C99 . . . * / <nl> # define GRPC_API_TRACE_UNWRAP0 ( ) <nl> extern " C " { <nl> gpr_log ( GPR_INFO , fmt GRPC_API_TRACE_UNWRAP # # nargs args ) ; \ <nl> } <nl> <nl> - # ifdef __cplusplus <nl> - } <nl> - # endif <nl> - <nl> # endif / * GRPC_CORE_LIB_SURFACE_API_TRACE_H * / <nl>
|
Fix window compile
|
grpc/grpc
|
dabd7c0ad9e76dbed10b57cb16195e643d1bf511
|
2017-11-15T16:25:30Z
|
mmm a / src / gui / shutdownconfirm . cpp <nl> ppp b / src / gui / shutdownconfirm . cpp <nl> ShutdownConfirmDlg : : ShutdownConfirmDlg ( const ShutdownAction & action ) <nl> / / Set ' Cancel ' as default button . <nl> setDefaultButton ( QMessageBox : : Cancel ) ; <nl> m_timer . setInterval ( 1000 ) ; / / 1sec <nl> - connect ( & m_timer , SIGNAL ( m_timeout ( ) ) , this , SLOT ( updateSeconds ( ) ) ) ; <nl> + connect ( & m_timer , SIGNAL ( timeout ( ) ) , this , SLOT ( updateSeconds ( ) ) ) ; <nl> show ( ) ; <nl> / / Move to center <nl> move ( Utils : : Misc : : screenCenter ( this ) ) ; <nl>
|
Fix shutdown dialog . Closes .
|
qbittorrent/qBittorrent
|
88abe2baff0198ab76982518e5953d6350692e4f
|
2015-11-25T16:30:43Z
|
mmm a / lib / SILPasses / SILCombinerVisitors . cpp <nl> ppp b / lib / SILPasses / SILCombinerVisitors . cpp <nl> SILCombiner : : optimizeApplyOfPartialApply ( ApplyInst * AI , PartialApplyInst * PAI ) { <nl> } <nl> <nl> ApplyInst * NAI = Builder - > createApply ( AI - > getLoc ( ) , FRI , FnType , ResultTy , <nl> - Subs , Args , AI - > isTransparent ( ) ) ; <nl> + Subs , Args , <nl> + FRI - > getReferencedFunction ( ) - > isTransparent ( ) ) ; <nl> NAI - > setDebugScope ( AI - > getDebugScope ( ) ) ; <nl> <nl> / / We also need to release the partial_apply instruction itself because it <nl> mmm a / test / SILPasses / sil_combine . sil <nl> ppp b / test / SILPasses / sil_combine . sil <nl> bb0 ( % 0 : $ Int ) : <nl> sil @ some_closure : $ @ thin ( Int ) - > Int <nl> sil @ print_a_number : $ @ thin ( Int ) - > ( ) <nl> <nl> + / / CHECK - LABEL : sil @ applied <nl> + sil @ applied : $ @ thin ( Builtin . Int32 ) - > Builtin . Int32 { <nl> + bb0 ( % 0 : $ Builtin . Int32 ) : <nl> + / / CHECK : return <nl> + return % 0 : $ Builtin . Int32 <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil @ combine_partial_apply_with_apply <nl> + sil @ combine_partial_apply_with_apply : $ @ thin ( Builtin . Int32 ) - > Builtin . Int32 { <nl> + bb0 ( % 0 : $ Builtin . Int32 ) : <nl> + / / CHECK : [ [ APPLIED : % . * ] ] = function_ref @ applied <nl> + % 2 = function_ref @ applied : $ @ thin ( Builtin . Int32 ) - > Builtin . Int32 <nl> + / / CHECK : [ [ THICK : % . * ] ] = thin_to_thick_function [ [ APPLIED ] ] <nl> + % 3 = thin_to_thick_function % 2 : $ @ thin ( Builtin . Int32 ) - > Builtin . Int32 to $ @ callee_owned ( Builtin . Int32 ) - > Builtin . Int32 <nl> + / / CHECK : [ [ REABSTRACT : % . * ] ] = function_ref @ reabstract <nl> + % 4 = function_ref @ reabstract : $ @ thin ( @ out Builtin . Int32 , Builtin . Int32 , @ owned @ callee_owned ( Builtin . Int32 ) - > Builtin . Int32 ) - > ( ) <nl> + / / CHECK - NOT : partial_apply <nl> + % 5 = partial_apply % 4 ( % 3 ) : $ @ thin ( @ out Builtin . Int32 , Builtin . Int32 , @ owned @ callee_owned ( Builtin . Int32 ) - > Builtin . Int32 ) - > ( ) <nl> + / / CHECK : [ [ TMP : % . * ] ] = alloc_stack $ Builtin . Int32 <nl> + % 6 = alloc_stack $ Builtin . Int32 <nl> + / / CHECK - NOT : strong_retain <nl> + strong_retain % 5 : $ @ callee_owned ( @ out Builtin . Int32 , Builtin . Int32 ) - > ( ) <nl> + / / CHECK : apply [ transparent ] [ [ REABSTRACT ] ] ( [ [ TMP ] ] # 1 , % 0 , [ [ THICK ] ] ) <nl> + % 8 = apply % 5 ( % 6 # 1 , % 0 ) : $ @ callee_owned ( @ out Builtin . Int32 , Builtin . Int32 ) - > ( ) <nl> + / / CHECK - NOT : strong_release <nl> + strong_release % 5 : $ @ callee_owned ( @ out Builtin . Int32 , Builtin . Int32 ) - > ( ) <nl> + / / CHECK - NOT : tuple <nl> + % 10 = tuple ( ) <nl> + / / CHECK : [ [ RESULT : % . * ] ] = load [ [ TMP ] ] # 1 <nl> + % 11 = load % 6 # 1 : $ * Builtin . Int32 <nl> + / / CHECK : dealloc_stack [ [ TMP ] ] # 0 <nl> + dealloc_stack % 6 # 0 : $ * @ local_storage Builtin . Int32 <nl> + / / CHECK : return [ [ RESULT ] ] <nl> + return % 11 : $ Builtin . Int32 <nl> + } <nl> + <nl> + / / CHECK - LABEL : sil shared [ transparent ] @ reabstract <nl> + sil shared [ transparent ] @ reabstract : $ @ thin ( @ out Builtin . Int32 , Builtin . Int32 , @ owned @ callee_owned ( Builtin . Int32 ) - > Builtin . Int32 ) - > ( ) { <nl> + bb0 ( % 0 : $ * Builtin . Int32 , % 1 : $ Builtin . Int32 , % 2 : $ @ callee_owned ( Builtin . Int32 ) - > Builtin . Int32 ) : <nl> + % 3 = apply % 2 ( % 1 ) : $ @ callee_owned ( Builtin . Int32 ) - > Builtin . Int32 <nl> + store % 3 to % 0 : $ * Builtin . Int32 <nl> + % 5 = tuple ( ) <nl> + / / CHECK : return <nl> + return % 5 : $ ( ) <nl> + } <nl> + <nl> / / CHECK - LABEL : remove_init_ex <nl> / / CHECK : alloc_stack $ Bool <nl> / / CHECK - NOT : init_existential <nl>
|
Set the transparent bit properly when combining apply and partial_apply .
|
apple/swift
|
6dafd410e9cf5b67750e41a9cfdbed7596b09401
|
2015-01-16T23:13:52Z
|
mmm a / modules / gdscript / language_server / gdscript_language_protocol . cpp <nl> ppp b / modules / gdscript / language_server / gdscript_language_protocol . cpp <nl> Dictionary GDScriptLanguageProtocol : : initialize ( const Dictionary & p_params ) { <nl> params [ " path " ] = workspace - > root ; <nl> Dictionary request = make_notification ( " gdscript_client / changeWorkspace " , params ) ; <nl> <nl> - ERR_FAIL_COND_V_MSG ( latest_client_id = = - 1 , ret . to_json ( ) , <nl> - " GDScriptLanguageProtocol : Can ' t initialize as no client is connected . " ) ; <nl> - ERR_FAIL_INDEX_V_MSG ( ( uint64_t ) latest_client_id , clients . size ( ) , ret . to_json ( ) , <nl> + ERR_FAIL_COND_V_MSG ( ! clients . has ( latest_client_id ) , ret . to_json ( ) , <nl> vformat ( " GDScriptLanguageProtocol : Can ' t initialize invalid peer ' % d ' . " , latest_client_id ) ) ; <nl> Ref < LSPeer > peer = clients . get ( latest_client_id ) ; <nl> if ( peer ! = nullptr ) { <nl> void GDScriptLanguageProtocol : : notify_client ( const String & p_method , const Varia <nl> " GDScript LSP : Can ' t notify client as none was connected . " ) ; <nl> p_client_id = latest_client_id ; <nl> } <nl> - ERR_FAIL_INDEX ( ( uint64_t ) p_client_id , clients . size ( ) ) ; <nl> + ERR_FAIL_COND ( ! clients . has ( p_client_id ) ) ; <nl> Ref < LSPeer > peer = clients . get ( p_client_id ) ; <nl> ERR_FAIL_COND ( peer = = nullptr ) ; <nl> <nl> mmm a / modules / gdscript / language_server / gdscript_language_protocol . h <nl> ppp b / modules / gdscript / language_server / gdscript_language_protocol . h <nl> class GDScriptLanguageProtocol : public JSONRPC { <nl> <nl> HashMap < int , Ref < LSPeer > > clients ; <nl> Ref < TCP_Server > server ; <nl> - int latest_client_id = - 1 ; <nl> + int latest_client_id = 0 ; <nl> int next_client_id = 0 ; <nl> <nl> Ref < GDScriptTextDocument > text_document ; <nl>
|
Merge pull request from akien - mga / lsp - fix - 39548
|
godotengine/godot
|
e720118a756ce193142362546c71ecb82a700f20
|
2020-06-15T12:27:41Z
|
mmm a / tools / js - optimizer . js <nl> ppp b / tools / js - optimizer . js <nl> function simplifyExpressionsPre ( ast ) { <nl> var rerun = true ; <nl> while ( rerun ) { <nl> rerun = false ; <nl> - traverseGenerated ( ast , function ( node , type , stack ) { <nl> + traverseGenerated ( ast , function process ( node , type , stack ) { <nl> if ( type = = ' binary ' & & node [ 1 ] = = ' | ' & & ( jsonCompare ( node [ 2 ] , ZERO ) | | jsonCompare ( node [ 3 ] , ZERO ) ) ) { <nl> - stack . push ( 1 ) ; / / From here on up , no need for this kind of correction , it ' s done at the top <nl> - <nl> / / We might be able to remove this correction <nl> - for ( var i = stack . length - 2 ; i > = 0 ; i - - ) { <nl> + for ( var i = stack . length - 1 ; i > = 0 ; i - - ) { <nl> if ( stack [ i ] = = 1 ) { <nl> / / Great , we can eliminate <nl> rerun = true ; <nl> - return jsonCompare ( node [ 2 ] , ZERO ) ? node [ 3 ] : node [ 2 ] ; <nl> + / / we will replace ourselves with the non - zero side . Recursively process that node . <nl> + var result = jsonCompare ( node [ 2 ] , ZERO ) ? node [ 3 ] : node [ 2 ] , other ; <nl> + while ( other = process ( result , result [ 0 ] , stack ) ) { <nl> + result = other ; <nl> + } <nl> + return result ; <nl> } else if ( stack [ i ] = = - 1 ) { <nl> break ; / / Too bad , we can ' t <nl> } <nl> } <nl> + stack . push ( 1 ) ; / / From here on up , no need for this kind of correction , it ' s done at the top <nl> + / / ( Add this at the end , so it is only added if we did not remove it ) <nl> } else if ( type = = ' binary ' & & node [ 1 ] in USEFUL_BINARY_OPS ) { <nl> stack . push ( 1 ) ; <nl> } else if ( ( type = = ' binary ' & & node [ 1 ] in SAFE_BINARY_OPS ) | | type = = ' num ' | | type = = ' name ' ) { <nl> mmm a / tools / test - js - optimizer - output . js <nl> ppp b / tools / test - js - optimizer - output . js <nl> function asmy ( ) { <nl> f ( ( HEAPU8 [ _buf + i6 & 16777215 ] & 1 ) + i5 | 0 ) ; <nl> f ( ( HEAP8 [ _buf + i6 & 16777215 ] & 1 ) + i5 | 0 ) ; <nl> f ( ( HEAPU8 [ _buf + i6 & 16777215 ] & 1 ) + i5 | 0 ) ; <nl> + if ( ( _sbrk ( $ 419 | 0 ) | 0 ) = = - 1 ) { <nl> + print ( " fleefl " ) ; <nl> + } <nl> } <nl> <nl> mmm a / tools / test - js - optimizer . js <nl> ppp b / tools / test - js - optimizer . js <nl> function asmy ( ) { <nl> f ( ( HEAPU8 [ _buf + i6 & 16777215 ] & 255 & 1 ) + i5 | 0 ) ; <nl> f ( ( HEAP8 [ _buf + i6 & 16777215 ] & 1 & 255 ) + i5 | 0 ) ; <nl> f ( ( HEAPU8 [ _buf + i6 & 16777215 ] & 1 & 255 ) + i5 | 0 ) ; <nl> + if ( ( _sbrk ( $ 419 | 0 ) | 0 | 0 ) = = - 1 ) { <nl> + print ( ' fleefl ' ) ; <nl> + } <nl> } <nl> / / EMSCRIPTEN_GENERATED_FUNCTIONS : [ " abc " , " xyz " , " xyz2 " , " expr " , " loopy " , " bits " , " maths " , " hoisting " , " demangle " , " lua " , " moreLabels " , " notComps " , " tricky " , " asmy " ] <nl>
|
fix bug with lack of recursion in simplifyBitops
|
emscripten-core/emscripten
|
486db79e93244b743b8ce8bef83ce93ef9470e16
|
2013-01-08T23:12:12Z
|
mmm a / android / android . toolchain . cmake <nl> ppp b / android / android . toolchain . cmake <nl> elseif ( ANDROID_STANDALONE_TOOLCHAIN ) <nl> set ( BUILD_WITH_STANDALONE_TOOLCHAIN True ) <nl> else ( ) <nl> list ( GET ANDROID_NDK_SEARCH_PATHS 0 ANDROID_NDK_SEARCH_PATH ) <nl> - message ( FATAL_ERROR " Could not find neither Android NDK nor Android standalone toolcahin . <nl> + message ( FATAL_ERROR " Could not find neither Android NDK nor Android standalone toolchain . <nl> You should either set an environment variable : <nl> export ANDROID_NDK = ~ / my - android - ndk <nl> or <nl> You are strongly recommended to switch to another NDK release . <nl> " ) <nl> endif ( ) <nl> <nl> + if ( NOT _CMAKE_IN_TRY_COMPILE AND X86 AND ANDROID_STL MATCHES " gnustl " AND ANDROID_NDK_RELEASE STREQUAL " r6 " ) <nl> + message ( WARNING " The x86 system header file from NDK r6 has incorrect definition for ptrdiff_t . You are recommended to upgrade to a newer NDK release or manually patch the header : <nl> + See https : / / android . googlesource . com / platform / development . git f907f4f9d4e56ccc8093df6fee54454b8bcab6c2 <nl> + + + mmm a / ndk / platforms / android - 9 / arch - x86 / include / machine / _types . h <nl> + ppp b / ndk / platforms / android - 9 / arch - x86 / include / machine / _types . h <nl> + typedef long int ssize_t ; <nl> + # endif <nl> + # ifndef _PTRDIFF_T <nl> + # define _PTRDIFF_T <nl> + - typedef long ptrdiff_t ; <nl> + + # ifdef __ANDROID__ <nl> + + typedef int ptrdiff_t ; <nl> + + # else <nl> + + typedef long ptrdiff_t ; <nl> + + # endif <nl> + # endif <nl> + " ) <nl> + endif ( ) <nl> + <nl> # setup paths and STL for NDK <nl> if ( BUILD_WITH_ANDROID_NDK ) <nl> set ( ANDROID_TOOLCHAIN_ROOT " $ { ANDROID_NDK } / toolchains / $ { ANDROID_TOOLCHAIN_NAME } / prebuilt / $ { ANDROID_NDK_HOST_SYSTEM_NAME } " ) <nl> if ( EXISTS " $ { __libstl } " OR EXISTS " $ { __libsupcxx } " ) <nl> set ( CMAKE_CXX_CREATE_SHARED_MODULE " < CMAKE_CXX_COMPILER > < CMAKE_SHARED_LIBRARY_CXX_FLAGS > < LANGUAGE_COMPILE_FLAGS > < LINK_FLAGS > < CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS > < CMAKE_SHARED_LIBRARY_SONAME_CXX_FLAG > < TARGET_SONAME > - o < TARGET > < OBJECTS > < LINK_LIBRARIES > " ) <nl> set ( CMAKE_CXX_LINK_EXECUTABLE " < CMAKE_CXX_COMPILER > < FLAGS > < CMAKE_CXX_LINK_FLAGS > < LINK_FLAGS > < OBJECTS > - o < TARGET > < LINK_LIBRARIES > " ) <nl> endif ( ) <nl> + if ( X86 AND ANDROID_STL MATCHES " gnustl " AND ANDROID_NDK_RELEASE STREQUAL " r6 " ) <nl> + # workaround " undefined reference to ` __dso_handle ' " problem <nl> + set ( CMAKE_CXX_CREATE_SHARED_LIBRARY " $ { CMAKE_CXX_CREATE_SHARED_LIBRARY } \ " $ { ANDROID_SYSROOT } / usr / lib / crtbegin_so . o \ " " ) <nl> + set ( CMAKE_CXX_CREATE_SHARED_MODULE " $ { CMAKE_CXX_CREATE_SHARED_MODULE } \ " $ { ANDROID_SYSROOT } / usr / lib / crtbegin_so . o \ " " ) <nl> + endif ( ) <nl> if ( EXISTS " $ { __libstl } " ) <nl> set ( CMAKE_CXX_CREATE_SHARED_LIBRARY " $ { CMAKE_CXX_CREATE_SHARED_LIBRARY } \ " $ { __libstl } \ " " ) <nl> set ( CMAKE_CXX_CREATE_SHARED_MODULE " $ { CMAKE_CXX_CREATE_SHARED_MODULE } \ " $ { __libstl } \ " " ) <nl> if ( ANDROID_FUNCTION_LEVEL_LINKING ) <nl> set ( ANDROID_LINKER_FLAGS " $ { ANDROID_LINKER_FLAGS } - Wl , - - gc - sections " ) <nl> endif ( ) <nl> <nl> - if ( CMAKE_HOST_UNIX AND ( ARMEABI_V7A OR X86 ) AND ANDROID_COMPILER_VERSION VERSION_EQUAL " 4 . 6 " ) <nl> - if ( ANDROID_GOLD_LINKER ) <nl> - set ( ANDROID_LINKER_FLAGS " $ { ANDROID_LINKER_FLAGS } - fuse - ld = gold " ) <nl> - endif ( ) <nl> + if ( ANDROID_GOLD_LINKER AND CMAKE_HOST_UNIX AND ( ARMEABI OR ARMEABI_V7A OR X86 ) AND ANDROID_COMPILER_VERSION VERSION_EQUAL " 4 . 6 " ) <nl> + set ( ANDROID_LINKER_FLAGS " $ { ANDROID_LINKER_FLAGS } - fuse - ld = gold " ) <nl> + elseif ( ANDROID_NDK_RELEASE STREQUAL " r8b " AND ARMEABI AND ANDROID_COMPILER_VERSION VERSION_EQUAL " 4 . 6 " AND NOT _CMAKE_IN_TRY_COMPILE ) <nl> + message ( WARNING " The default bfd linker from arm GCC 4 . 6 toolchain can fail with ' unresolvable R_ARM_THM_CALL relocation ' error message . See https : / / code . google . com / p / android / issues / detail ? id = 35342 <nl> + On Linux and OS X host platform you can workaround this problem using gold linker ( default ) . <nl> + Rerun cmake with - DANDROID_GOLD_LINKER = ON option . <nl> + " ) <nl> endif ( ) <nl> <nl> if ( ANDROID_NOEXECSTACK ) <nl>
|
Android toolchain : added fixes / workarounds for NDK r8b and NDK r6 bugs
|
opencv/opencv
|
4bd2c6b50da08e9863fb09fa335b682ce1968ba5
|
2012-09-07T14:27:47Z
|
mmm a / tests / test_other . py <nl> ppp b / tests / test_other . py <nl> def test_js_optimizer ( self ) : <nl> [ ' asm ' , ' asmPreciseF32 ' , ' optimizeFrounds ' ] ) , <nl> ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - last . js ' ) , [ open ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - lastOpts - output . js ' ) ) . read ( ) , open ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - lastOpts - output2 . js ' ) ) . read ( ) ] , <nl> [ ' asm ' , ' asmLastOpts ' ] ) , <nl> - ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - last . js ' ) , open ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - last - output . js ' ) ) . read ( ) , <nl> + ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - last . js ' ) , [ open ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - last - output . js ' ) ) . read ( ) , open ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - last - output2 . js ' ) ) . read ( ) ] , <nl> [ ' asm ' , ' asmLastOpts ' , ' last ' ] ) , <nl> ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - relocate . js ' ) , open ( path_from_root ( ' tools ' , ' test - js - optimizer - asm - relocate - output . js ' ) ) . read ( ) , <nl> [ ' asm ' , ' relocate ' ] ) , <nl> def test_js_optimizer ( self ) : <nl> [ ' asm ' , ' ensureLabelSet ' ] ) , <nl> ] : <nl> print input , passes <nl> + <nl> + if type ( expected ) = = str : expected = [ expected ] <nl> + expected = map ( lambda out : out . replace ( ' \ n \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) , expected ) <nl> + <nl> # test calling js optimizer <nl> print ' js ' <nl> output = Popen ( NODE_JS + [ path_from_root ( ' tools ' , ' js - optimizer . js ' ) , input ] + passes , stdin = PIPE , stdout = PIPE ) . communicate ( ) [ 0 ] <nl> <nl> def check_js ( js ) : <nl> - self . assertIdentical ( expected , js . replace ( ' \ r \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) ) <nl> + self . assertIdentical ( expected , js . replace ( ' \ r \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) ) <nl> check_js ( output ) <nl> <nl> if js_optimizer . use_native ( passes ) : <nl> def check_js ( js ) : <nl> def check_json ( ) : <nl> Popen ( listify ( NODE_JS ) + [ path_from_root ( ' tools ' , ' js - optimizer . js ' ) , output_temp , ' receiveJSON ' ] , stdin = PIPE , stdout = open ( output_temp + ' . js ' , ' w ' ) ) . communicate ( ) <nl> output = open ( output_temp + ' . js ' ) . read ( ) <nl> - self . assertIdentical ( expected , output . replace ( ' \ r \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) ) <nl> + self . assertIdentical ( expected , output . replace ( ' \ r \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) . replace ( ' \ n \ n ' , ' \ n ' ) ) <nl> <nl> - print ' native ( receiveJSON ) ' <nl> self . clear ( ) <nl> input_temp = ' temp . js ' <nl> output_temp = ' output . js ' <nl> def check_json ( ) : <nl> json = open ( input_temp + ' . js ' ) . read ( ) <nl> json + = ' \ n ' + original [ original . find ( ' / / EXTRA_INFO : ' ) : ] <nl> open ( input_temp + ' . js ' , ' w ' ) . write ( json ) <nl> - output = Popen ( [ js_optimizer . get_native_optimizer ( ) , input_temp + ' . js ' ] + passes + [ ' receiveJSON ' , ' emitJSON ' ] , stdin = PIPE , stdout = open ( output_temp , ' w ' ) ) . communicate ( ) [ 0 ] <nl> - check_json ( ) <nl> <nl> - print ' native ( parsing JS ) ' <nl> - output = Popen ( [ js_optimizer . get_native_optimizer ( ) , input ] + passes + [ ' emitJSON ' ] , stdin = PIPE , stdout = open ( output_temp , ' w ' ) ) . communicate ( ) [ 0 ] <nl> - check_json ( ) <nl> + if ' last ' not in passes : # last is only relevant when we emit JS <nl> + print ' native ( receiveJSON ) ' <nl> + output = Popen ( [ js_optimizer . get_native_optimizer ( ) , input_temp + ' . js ' ] + passes + [ ' receiveJSON ' , ' emitJSON ' ] , stdin = PIPE , stdout = open ( output_temp , ' w ' ) ) . communicate ( ) [ 0 ] <nl> + check_json ( ) <nl> + <nl> + print ' native ( parsing JS ) ' <nl> + output = Popen ( [ js_optimizer . get_native_optimizer ( ) , input ] + passes + [ ' emitJSON ' ] , stdin = PIPE , stdout = open ( output_temp , ' w ' ) ) . communicate ( ) [ 0 ] <nl> + check_json ( ) <nl> <nl> print ' native ( emitting JS ) ' <nl> output = Popen ( [ js_optimizer . get_native_optimizer ( ) , input ] + passes , stdin = PIPE , stdout = PIPE ) . communicate ( ) [ 0 ] <nl> mmm a / tools / js_optimizer . py <nl> ppp b / tools / js_optimizer . py <nl> <nl> def path_from_root ( * pathelems ) : <nl> return os . path . join ( __rootpath__ , * pathelems ) <nl> <nl> - NATIVE_PASSES = set ( [ ' asm ' , ' asmPreciseF32 ' , ' receiveJSON ' , ' emitJSON ' , ' eliminate ' , ' eliminateMemSafe ' , ' simplifyExpressions ' , ' simplifyIfs ' , ' optimizeFrounds ' , ' registerize ' , ' minifyNames ' , ' minifyLocals ' , ' minifyWhitespace ' , ' cleanup ' , ' asmLastOpts ' ] ) <nl> + NATIVE_PASSES = set ( [ ' asm ' , ' asmPreciseF32 ' , ' receiveJSON ' , ' emitJSON ' , ' eliminate ' , ' eliminateMemSafe ' , ' simplifyExpressions ' , ' simplifyIfs ' , ' optimizeFrounds ' , ' registerize ' , ' minifyNames ' , ' minifyLocals ' , ' minifyWhitespace ' , ' cleanup ' , ' asmLastOpts ' , ' last ' ] ) <nl> <nl> JS_OPTIMIZER = path_from_root ( ' tools ' , ' js - optimizer . js ' ) <nl> <nl> mmm a / tools / optimizer / optimizer . cpp <nl> ppp b / tools / optimizer / optimizer . cpp <nl> int main ( int argc , char * * argv ) { <nl> else if ( str = = " minifyLocals " ) minifyLocals ( doc ) ; <nl> else if ( str = = " minifyWhitespace " ) { } <nl> else if ( str = = " asmLastOpts " ) asmLastOpts ( doc ) ; <nl> + else if ( str = = " last " ) { } <nl> else { <nl> fprintf ( stderr , " unrecognized argument : % s \ n " , str . c_str ( ) ) ; <nl> assert ( 0 ) ; <nl> int main ( int argc , char * * argv ) { <nl> } else { <nl> JSPrinter jser ( ! minifyWhitespace , last , doc ) ; <nl> jser . printAst ( ) ; <nl> - std : : cout < < jser . buffer < < " \ n \ n " ; <nl> + std : : cout < < jser . buffer < < " \ n " ; <nl> } <nl> <nl> return 0 ; <nl> mmm a / tools / optimizer / simple_ast . h <nl> ppp b / tools / optimizer / simple_ast . h <nl> struct JSPrinter { <nl> } <nl> <nl> void emit ( char c ) { <nl> + if ( ! pretty & & c = = ' } ' & & buffer [ used - 1 ] = = ' ; ' ) used - - ; / / optimize ; } into } , the ; is not separating anything <nl> ensure ( 1 ) ; <nl> buffer [ used + + ] = c ; <nl> } <nl> struct JSPrinter { <nl> assert ( d > = 0 ) ; <nl> unsigned long long uu = ( unsigned long long ) d ; <nl> if ( uu = = d ) { <nl> - snprintf ( buffer , 45 , e ? " 0x % llx " : " % llu " , uu ) ; <nl> + snprintf ( buffer , 45 , ( e & & ! finalize ) ? " 0x % llx " : " % llu " , uu ) ; <nl> sscanf ( buffer , " % lf " , & temp ) ; <nl> } else { <nl> / / too large for a machine integer , just use floats <nl> struct JSPrinter { <nl> } <nl> <nl> void printUnaryPrefix ( Ref node ) { <nl> + if ( ( buffer [ used - 1 ] = = ' - ' & & node [ 1 ] = = MINUS ) | | <nl> + ( buffer [ used - 1 ] = = ' + ' & & node [ 1 ] = = PLUS & & ! finalize ) ) { <nl> + emit ( ' ' ) ; / / cannot join - and - to - - , looks like the - - operator <nl> + } <nl> + if ( finalize & & node [ 1 ] = = PLUS & & ( node [ 2 ] [ 0 ] = = NUM | | <nl> + ( node [ 2 ] [ 0 ] = = UNARY_PREFIX & & node [ 2 ] [ 1 ] = = MINUS & & node [ 2 ] [ 2 ] [ 0 ] = = NUM ) ) ) { <nl> + / / emit a finalized number <nl> + char * curr = buffer + used ; <nl> + print ( node [ 2 ] ) ; <nl> + buffer [ used ] = 0 ; <nl> + if ( strchr ( curr , ' . ' ) ) return ; / / already a decimal point , all good <nl> + char * e = strchr ( curr , ' e ' ) ; <nl> + if ( ! e ) { <nl> + emit ( " . 0 " ) ; <nl> + return ; <nl> + } <nl> + ensure ( 3 ) ; <nl> + char * end = strchr ( curr , 0 ) ; <nl> + while ( end > = e ) { <nl> + end [ 2 ] = end [ 0 ] ; <nl> + end - - ; <nl> + } <nl> + e [ 0 ] = ' . ' ; <nl> + e [ 1 ] = ' 0 ' ; <nl> + used + = 2 ; <nl> + return ; <nl> + } <nl> emit ( node [ 1 ] - > getCString ( ) ) ; <nl> printChild ( node [ 2 ] , node , 1 ) ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . ee736a4e71a <nl> mmm / dev / null <nl> ppp b / tools / test - js - optimizer - asm - last - output2 . js <nl> <nl> + function finall ( x ) { <nl> + x = + x ; <nl> + var a = 5 . 0 ; <nl> + a = + x ; <nl> + a = 17 ; <nl> + a = 44 . 0 ; <nl> + a = 44 . 0 ; <nl> + a = 44 . 9 ; <nl> + a = 1278 . 0e3 ; <nl> + a = 12 . 0e10 ; <nl> + a = - x ; <nl> + a = - 17 ; <nl> + a = - 44 ; <nl> + a = - 44 ; <nl> + a = - 44 . 9 ; <nl> + a = - 1278e3 ; <nl> + a = - 12e10 ; <nl> + a = + - x ; <nl> + a = - 17 . 0 ; <nl> + a = - 44 . 0 ; <nl> + a = - 44 . 0 ; <nl> + a = - 44 . 9 ; <nl> + a = - 1278 . 0e3 ; <nl> + a = - 12 . 0e10 ; <nl> + a = 9223372036854775808 . 0 ; <nl> + a = - 9223372036854775808 . 0 ; <nl> + a = - 9223372036854775808 . 0 ; <nl> + a = - 9223372036854775808 ; <nl> + a = 999999984306749440 . 0 ; <nl> + a = - 999999984306749440 . 0 ; <nl> + a = - 999999984306749440 . 0 ; <nl> + a = - 999999984306749440 ; <nl> + a = 1123456789012345651200 . 0 ; <nl> + f ( g ( ) | 0 ) ; <nl> + return 12 . 0e10 ; <nl> + } <nl> + function looop ( ) { <nl> + do do_it ( ) ; while ( ! condition ( ) ) ; <nl> + do do_it ( ) ; while ( ! ( a > b ) ) ; <nl> + do do_it ( ) ; while ( x ( ) ) ; <nl> + while ( 1 ) { <nl> + do_it ( ) ; <nl> + if ( a ( ) ) continue ; <nl> + if ( ! x ( ) ) break ; <nl> + } <nl> + do { <nl> + do_it ( ) ; <nl> + do if ( a ( ) ) continue ; while ( b ( ) ) ; <nl> + } while ( x ( ) ) ; <nl> + do { <nl> + do_it ( ) ; <nl> + while ( b ( ) ) if ( a ( ) ) continue ; <nl> + } while ( x ( ) ) ; <nl> + X : while ( 1 ) { <nl> + do_it ( ) ; <nl> + while ( b ( ) ) if ( a ( ) ) continue X ; <nl> + if ( ! x ( ) ) break ; <nl> + } <nl> + do blah ( ) ; while ( ! shah ( ) ) ; <nl> + a = b ; <nl> + LABELED : while ( 1 ) { <nl> + blah ( ) ; <nl> + if ( shah ( ) ) { <nl> + c = d ; <nl> + break ; <nl> + } <nl> + } <nl> + while ( 1 ) { <nl> + blah ( ) ; <nl> + if ( check ) break ; <nl> + if ( shah ( ) ) { <nl> + e = f ; <nl> + break ; <nl> + } <nl> + } <nl> + do { <nl> + blah ( ) ; <nl> + while ( 1 ) if ( check ) break ; <nl> + } while ( ! shah ( ) ) ; <nl> + g = h ; <nl> + if ( a ) waka ( ) ; <nl> + if ( a ) waka ( ) ; else wala ( ) ; <nl> + if ( a ) if ( a ) waka ( ) ; else wala ( ) ; <nl> + if ( a ) { <nl> + if ( a ) waka ( ) ; <nl> + } else other ( ) ; <nl> + if ( a ) if ( a ) waka ( ) ; else wala ( ) ; else other ( ) ; <nl> + } <nl> + function conditions ( ) { <nl> + if ( HEAP32 [ $ incdec_ptr71_i + 8 > > 2 ] | 0 ) shoo ( ) ; <nl> + if ( x = = 0 ) y ( ) ; <nl> + if ( ! x ) y ( ) ; <nl> + if ( ! y ) z ( ) ; <nl> + if ( x ! = 0 ) y ( ) ; <nl> + if ( x ) y ( ) ; <nl> + if ( y ) z ( ) ; <nl> + if ( x ) y ( ) ; <nl> + if ( ! x ) y ( ) ; <nl> + if ( ! ( s ( ) | 0 ) ) z ( ) ; <nl> + if ( x + 4 | 0 ) y ( ) ; <nl> + if ( x & 4 ) y ( ) ; <nl> + } <nl> + <nl>
|
update cashew and enable last in native optimizer
|
emscripten-core/emscripten
|
facac7def455da6d4868fc5acb57f0cdb216e6e5
|
2014-12-06T20:01:50Z
|
mmm a / HelloWorld / ios / AppController . mm <nl> ppp b / HelloWorld / ios / AppController . mm <nl> - ( BOOL ) application : ( UIApplication * ) application didFinishLaunchingWithOptions : ( <nl> window = [ [ UIWindow alloc ] initWithFrame : [ [ UIScreen mainScreen ] bounds ] ] ; <nl> EAGLView * __glView = [ EAGLView viewWithFrame : [ window bounds ] <nl> pixelFormat : kEAGLColorFormatRGBA8 <nl> - depthFormat : 0 <nl> + depthFormat : GL_DEPTH_COMPONENT16_OES <nl> preserveBackbuffer : NO <nl> sharegroup : nil <nl> multiSampling : NO <nl> mmm a / template / xcode3 / cocos2d - x_app / ios / ___PROJECTNAMEASIDENTIFIER___AppController . mm <nl> ppp b / template / xcode3 / cocos2d - x_app / ios / ___PROJECTNAMEASIDENTIFIER___AppController . mm <nl> - ( BOOL ) application : ( UIApplication * ) application didFinishLaunchingWithOptions : ( <nl> window = [ [ UIWindow alloc ] initWithFrame : [ [ UIScreen mainScreen ] bounds ] ] ; <nl> EAGLView * __glView = [ EAGLView viewWithFrame : [ window bounds ] <nl> pixelFormat : kEAGLColorFormatRGBA8 <nl> - depthFormat : 0 <nl> + depthFormat : GL_DEPTH_COMPONENT16_OES <nl> preserveBackbuffer : NO <nl> sharegroup : nil <nl> multiSampling : NO <nl> mmm a / template / xcode3 / cocos2d - x_box2d_app / ios / ___PROJECTNAMEASIDENTIFIER___AppController . mm <nl> ppp b / template / xcode3 / cocos2d - x_box2d_app / ios / ___PROJECTNAMEASIDENTIFIER___AppController . mm <nl> - ( BOOL ) application : ( UIApplication * ) application didFinishLaunchingWithOptions : ( <nl> window = [ [ UIWindow alloc ] initWithFrame : [ [ UIScreen mainScreen ] bounds ] ] ; <nl> EAGLView * __glView = [ EAGLView viewWithFrame : [ window bounds ] <nl> pixelFormat : kEAGLColorFormatRGBA8 <nl> - depthFormat : 0 <nl> + depthFormat : GL_DEPTH_COMPONENT16_OES <nl> preserveBackbuffer : NO <nl> sharegroup : nil <nl> multiSampling : NO <nl> mmm a / template / xcode3 / cocos2d - x_chipmunk_app / ios / ___PROJECTNAMEASIDENTIFIER___AppController . mm <nl> ppp b / template / xcode3 / cocos2d - x_chipmunk_app / ios / ___PROJECTNAMEASIDENTIFIER___AppController . mm <nl> - ( BOOL ) application : ( UIApplication * ) application didFinishLaunchingWithOptions : ( <nl> window = [ [ UIWindow alloc ] initWithFrame : [ [ UIScreen mainScreen ] bounds ] ] ; <nl> EAGLView * __glView = [ EAGLView viewWithFrame : [ window bounds ] <nl> pixelFormat : kEAGLColorFormatRGBA8 <nl> - depthFormat : 0 <nl> + depthFormat : GL_DEPTH_COMPONENT16_OES <nl> preserveBackbuffer : NO <nl> sharegroup : nil <nl> multiSampling : NO <nl> mmm a / tests / test . ios / Classes / testsAppDelegate . mm <nl> ppp b / tests / test . ios / Classes / testsAppDelegate . mm <nl> - ( BOOL ) application : ( UIApplication * ) application didFinishLaunchingWithOptions : ( <nl> window = [ [ UIWindow alloc ] initWithFrame : [ [ UIScreen mainScreen ] bounds ] ] ; <nl> EAGLView * __glView = [ EAGLView viewWithFrame : [ window bounds ] <nl> pixelFormat : kEAGLColorFormatRGBA8 <nl> - depthFormat : 0 <nl> + depthFormat : GL_DEPTH_COMPONENT16_OES <nl> preserveBackbuffer : NO <nl> sharegroup : nil <nl> multiSampling : NO <nl>
|
[ ios ] fix , Enable the depth test when EAGLView is created .
|
cocos2d/cocos2d-x
|
09be3e8fe5919335a31e82e72de6a0c040aa8ba0
|
2011-03-21T09:15:20Z
|
mmm a / api / envoy / config / filter / http / health_check / v2 / health_check . proto <nl> ppp b / api / envoy / config / filter / http / health_check / v2 / health_check . proto <nl> message HealthCheck { <nl> / / Specifies whether the filter operates in pass through mode or not . <nl> google . protobuf . BoolValue pass_through_mode = 1 [ ( validate . rules ) . message . required = true ] ; <nl> <nl> - / / Specifies the incoming HTTP endpoint that should be considered the <nl> - / / health check endpoint . For example * / healthcheck * . <nl> - / / Note that this field is deprecated in favor of <nl> - / / : ref : ` headers < envoy_api_field_config . filter . http . health_check . v2 . HealthCheck . headers > ` . <nl> - string endpoint = 2 [ deprecated = true ] ; <nl> + reserved 2 ; <nl> + reserved " endpoint " ; <nl> <nl> / / If operating in pass through mode , the amount of time in milliseconds <nl> / / that the filter should cache the upstream response . <nl> message HealthCheck { <nl> <nl> / / Specifies a set of health check request headers to match on . The health check filter will <nl> / / check a request ’ s headers against all the specified headers . To specify the health check <nl> - / / endpoint , set the ` ` : path ` ` header to match on . Note that if the <nl> - / / : ref : ` endpoint < envoy_api_field_config . filter . http . health_check . v2 . HealthCheck . endpoint > ` <nl> - / / field is set , it will overwrite any ` ` : path ` ` header to match . <nl> + / / endpoint , set the ` ` : path ` ` header to match on . <nl> repeated envoy . api . v2 . route . HeaderMatcher headers = 5 ; <nl> } <nl> mmm a / docs / root / intro / version_history . rst <nl> ppp b / docs / root / intro / version_history . rst <nl> Version history <nl> * health check : health check connections can now be configured to use http / 2 . <nl> * health check http filter : added <nl> : ref : ` generic header matching < envoy_api_field_config . filter . http . health_check . v2 . HealthCheck . headers > ` <nl> - to trigger health check response . Deprecated the <nl> - : ref : ` endpoint option < envoy_api_field_config . filter . http . health_check . v2 . HealthCheck . endpoint > ` . <nl> + to trigger health check response . Deprecated the endpoint option . <nl> * http : filters can now optionally support <nl> : ref : ` virtual host < envoy_api_field_route . VirtualHost . per_filter_config > ` , <nl> : ref : ` route < envoy_api_field_route . Route . per_filter_config > ` , and <nl> mmm a / source / common / config / filter_json . cc <nl> ppp b / source / common / config / filter_json . cc <nl> void FilterJson : : translateHealthCheckFilter ( <nl> <nl> JSON_UTIL_SET_BOOL ( json_config , proto_config , pass_through_mode ) ; <nl> JSON_UTIL_SET_DURATION ( json_config , proto_config , cache_time ) ; <nl> - JSON_UTIL_SET_STRING ( json_config , proto_config , endpoint ) ; <nl> + std : : string endpoint = json_config . getString ( " endpoint " ) ; <nl> + auto & header = * proto_config . add_headers ( ) ; <nl> + header . set_name ( " : path " ) ; <nl> + header . set_exact_match ( endpoint ) ; <nl> } <nl> <nl> void FilterJson : : translateGrpcJsonTranscoder ( <nl> mmm a / source / extensions / filters / http / health_check / config . cc <nl> ppp b / source / extensions / filters / http / health_check / config . cc <nl> Http : : FilterFactoryCb HealthCheckFilterConfig : : createFilterFactoryFromProtoTyped <nl> <nl> const bool pass_through_mode = proto_config . pass_through_mode ( ) . value ( ) ; <nl> const int64_t cache_time_ms = PROTOBUF_GET_MS_OR_DEFAULT ( proto_config , cache_time , 0 ) ; <nl> - const std : : string hc_endpoint = proto_config . endpoint ( ) ; <nl> <nl> auto header_match_data = std : : make_shared < std : : vector < Http : : HeaderUtility : : HeaderData > > ( ) ; <nl> <nl> - / / TODO ( mrice32 ) : remove endpoint field at the end of the 1 . 7 . 0 deprecation cycle . <nl> - const bool endpoint_set = ! proto_config . endpoint ( ) . empty ( ) ; <nl> - if ( endpoint_set ) { <nl> - envoy : : api : : v2 : : route : : HeaderMatcher matcher ; <nl> - matcher . set_name ( Http : : Headers : : get ( ) . Path . get ( ) ) ; <nl> - matcher . set_exact_match ( proto_config . endpoint ( ) ) ; <nl> - header_match_data - > emplace_back ( matcher ) ; <nl> - } <nl> - <nl> for ( const envoy : : api : : v2 : : route : : HeaderMatcher & matcher : proto_config . headers ( ) ) { <nl> Http : : HeaderUtility : : HeaderData single_header_match ( matcher ) ; <nl> - / / Ignore any path header matchers if the endpoint field has been set . <nl> - if ( ! ( endpoint_set & & single_header_match . name_ = = Http : : Headers : : get ( ) . Path ) ) { <nl> - header_match_data - > push_back ( std : : move ( single_header_match ) ) ; <nl> - } <nl> + header_match_data - > push_back ( std : : move ( single_header_match ) ) ; <nl> } <nl> <nl> if ( ! pass_through_mode & & cache_time_ms ) { <nl> mmm a / test / extensions / filters / http / health_check / config_test . cc <nl> ppp b / test / extensions / filters / http / health_check / config_test . cc <nl> TEST ( HealthCheckFilterConfig , FailsWhenNotPassThroughButTimeoutSetProto ) { <nl> NiceMock < Server : : Configuration : : MockFactoryContext > context ; <nl> <nl> config . mutable_pass_through_mode ( ) - > set_value ( false ) ; <nl> - config . set_endpoint ( " foo " ) ; <nl> config . mutable_cache_time ( ) - > set_seconds ( 10 ) ; <nl> + envoy : : api : : v2 : : route : : HeaderMatcher & header = * config . add_headers ( ) ; <nl> + header . set_name ( " : path " ) ; <nl> + header . set_exact_match ( " foo " ) ; <nl> <nl> EXPECT_THROW ( <nl> healthCheckFilterConfig . createFilterFactoryFromProto ( config , " dummy_stats_prefix " , context ) , <nl> TEST ( HealthCheckFilterConfig , NotFailingWhenNotPassThroughAndTimeoutNotSetProto ) <nl> NiceMock < Server : : Configuration : : MockFactoryContext > context ; <nl> <nl> config . mutable_pass_through_mode ( ) - > set_value ( false ) ; <nl> - config . set_endpoint ( " foo " ) ; <nl> + envoy : : api : : v2 : : route : : HeaderMatcher & header = * config . add_headers ( ) ; <nl> + header . set_name ( " : path " ) ; <nl> + header . set_exact_match ( " foo " ) ; <nl> healthCheckFilterConfig . createFilterFactoryFromProto ( config , " dummy_stats_prefix " , context ) ; <nl> } <nl> <nl> TEST ( HealthCheckFilterConfig , HealthCheckFilterWithEmptyProto ) { <nl> healthCheckFilterConfig . createEmptyConfigProto ( ) . get ( ) ) ; <nl> <nl> config . mutable_pass_through_mode ( ) - > set_value ( false ) ; <nl> - config . set_endpoint ( " foo " ) ; <nl> + envoy : : api : : v2 : : route : : HeaderMatcher & header = * config . add_headers ( ) ; <nl> + header . set_name ( " : path " ) ; <nl> + header . set_exact_match ( " foo " ) ; <nl> healthCheckFilterConfig . createFilterFactoryFromProto ( config , " dummy_stats_prefix " , context ) ; <nl> } <nl> <nl> TEST ( HealthCheckFilterConfig , HealthCheckFilterHeaderMatchMissingHeader ) { <nl> testHealthCheckHeaderMatch ( config , headers , false ) ; <nl> } <nl> <nl> - / / If an endpoint is specified and the path matches , it should match regardless of any : path <nl> - / / conditions given in the headers field . <nl> - TEST ( HealthCheckFilterConfig , HealthCheckFilterEndpoint ) { <nl> - envoy : : config : : filter : : http : : health_check : : v2 : : HealthCheck config ; <nl> - <nl> - config . mutable_pass_through_mode ( ) - > set_value ( false ) ; <nl> - <nl> - config . set_endpoint ( " foo " ) ; <nl> - <nl> - envoy : : api : : v2 : : route : : HeaderMatcher & header = * config . add_headers ( ) ; <nl> - header . set_name ( Http : : Headers : : get ( ) . Path . get ( ) ) ; <nl> - header . set_exact_match ( " bar " ) ; <nl> - <nl> - Http : : TestHeaderMapImpl headers { { Http : : Headers : : get ( ) . Path . get ( ) , " foo " } } ; <nl> - <nl> - testHealthCheckHeaderMatch ( config , headers , true ) ; <nl> - } <nl> - <nl> - / / If an endpoint is specified and the path does not match , the filter should not match regardless <nl> - / / of any : path conditions given in the headers field . <nl> - TEST ( HealthCheckFilterConfig , HealthCheckFilterEndpointOverride ) { <nl> - envoy : : config : : filter : : http : : health_check : : v2 : : HealthCheck config ; <nl> - <nl> - config . mutable_pass_through_mode ( ) - > set_value ( false ) ; <nl> - <nl> - config . set_endpoint ( " foo " ) ; <nl> - <nl> - envoy : : api : : v2 : : route : : HeaderMatcher & header = * config . add_headers ( ) ; <nl> - header . set_name ( Http : : Headers : : get ( ) . Path . get ( ) ) ; <nl> - header . set_exact_match ( " bar " ) ; <nl> - <nl> - Http : : TestHeaderMapImpl headers { { Http : : Headers : : get ( ) . Path . get ( ) , " bar " } } ; <nl> - <nl> - testHealthCheckHeaderMatch ( config , headers , false ) ; <nl> - } <nl> - <nl> / / Conditions for the same header should match if they are both satisfied . <nl> TEST ( HealthCheckFilterConfig , HealthCheckFilterDuplicateMatch ) { <nl> envoy : : config : : filter : : http : : health_check : : v2 : : HealthCheck config ; <nl>
|
health_check : remove deprecated endpoint field ( )
|
envoyproxy/envoy
|
8b3aae8c6c7c68eb0f05c43a729a7e415cca3e55
|
2018-07-24T16:33:46Z
|
mmm a / bindings / csharp / CNTKLibraryManagedDll / CNTKLibraryManagedDll . csproj <nl> ppp b / bindings / csharp / CNTKLibraryManagedDll / CNTKLibraryManagedDll . csproj <nl> <nl> < Compile Include = " SwigProxyClasses \ Axis . cs " / > <nl> < Compile Include = " SwigProxyClasses \ AxisVector . cs " / > <nl> < Compile Include = " SwigProxyClasses \ BoolVector . cs " / > <nl> - < Compile Include = " SwigProxyClasses \ Common . cs " / > <nl> - < Compile Include = " SwigProxyClasses \ CommonPINVOKE . cs " / > <nl> + < Compile Include = " SwigProxyClasses \ Utils . cs " / > <nl> + < Compile Include = " SwigProxyClasses \ UtilsPINVOKE . cs " / > <nl> < Compile Include = " SwigProxyClasses \ DataType . cs " / > <nl> < Compile Include = " SwigProxyClasses \ DeviceDescriptor . cs " / > <nl> < Compile Include = " SwigProxyClasses \ DeviceDescriptorVector . cs " / > <nl> mmm a / bindings / csharp / Swig / cntk_cs . i <nl> ppp b / bindings / csharp / Swig / cntk_cs . i <nl> <nl> - % module ( directors = " 1 " ) Common <nl> + % module ( directors = " 1 " ) Utils <nl> / / % feature ( " autodoc " , " 1 " ) ; <nl> <nl> % include < stl . i > <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualDeviceDescriptor ( this , p ) ; <nl> + return Utils . AreEqualDeviceDescriptor ( this , p ) ; <nl> } <nl> <nl> public bool Equals ( DeviceDescriptor p ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualDeviceDescriptor ( this , p ) ; <nl> + return Utils . AreEqualDeviceDescriptor ( this , p ) ; <nl> } <nl> <nl> public static bool operator = = ( DeviceDescriptor first , DeviceDescriptor second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualDeviceDescriptor ( first , second ) ; <nl> + return Utils . AreEqualDeviceDescriptor ( first , second ) ; <nl> } <nl> <nl> public static bool operator ! = ( DeviceDescriptor first , DeviceDescriptor second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualAxis ( this , p ) ; <nl> + return Utils . AreEqualAxis ( this , p ) ; <nl> } <nl> <nl> public bool Equals ( Axis p ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualAxis ( this , p ) ; <nl> + return Utils . AreEqualAxis ( this , p ) ; <nl> } <nl> <nl> public static bool operator = = ( Axis first , Axis second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualAxis ( first , second ) ; <nl> + return Utils . AreEqualAxis ( first , second ) ; <nl> } <nl> <nl> public static bool operator ! = ( Axis first , Axis second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> { <nl> varVect . Add ( v ) ; <nl> } <nl> - return Common . Combine ( varVect ) ; <nl> + return Utils . Combine ( varVect ) ; <nl> } <nl> <nl> public static Function AsComposite ( Function rootFunction , string name = " " ) <nl> { <nl> - return Common . AsComposite ( rootFunction , name ) ; <nl> + return Utils . AsComposite ( rootFunction , name ) ; <nl> } <nl> <nl> public static Function Alias ( Variable operand , string name = " " ) <nl> { <nl> - return Common . Alias ( operand , name ) ; <nl> + return Utils . Alias ( operand , name ) ; <nl> } <nl> <nl> / / For C # Eval , default ParameterCloningMethod is share . <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualVariable ( this , p ) ; <nl> + return Utils . AreEqualVariable ( this , p ) ; <nl> } <nl> <nl> public bool Equals ( Variable p ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualVariable ( this , p ) ; <nl> + return Utils . AreEqualVariable ( this , p ) ; <nl> } <nl> <nl> public static bool operator = = ( Variable first , Variable second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualVariable ( first , second ) ; <nl> + return Utils . AreEqualVariable ( first , second ) ; <nl> } <nl> <nl> public static bool operator ! = ( Variable first , Variable second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualShape ( this , p ) ; <nl> + return Utils . AreEqualShape ( this , p ) ; <nl> } <nl> <nl> public bool Equals ( NDShape p ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualShape ( this , p ) ; <nl> + return Utils . AreEqualShape ( this , p ) ; <nl> } <nl> <nl> public static bool operator = = ( NDShape first , NDShape second ) <nl> SWIG_STD_VECTOR_ENHANCED ( CNTK : : DeviceDescriptor ) <nl> } <nl> <nl> / / Return true if the fields match : <nl> - return Common . AreEqualShape ( first , second ) ; <nl> + return Utils . AreEqualShape ( first , second ) ; <nl> } <nl> <nl> public static bool operator ! = ( NDShape first , NDShape second ) <nl>
|
change Common to Utils
|
microsoft/CNTK
|
d952f8b242b6a151a4f8caaa18aec13d762437ae
|
2017-03-28T07:46:40Z
|
mmm a / src / runtime / base / frame_injection . cpp <nl> ppp b / src / runtime / base / frame_injection . cpp <nl> FrameInjection * FrameInjection : : GetStackFrame ( int level ) { <nl> <nl> String FrameInjection : : getFileName ( ) { <nl> if ( m_flags & PseudoMain ) { <nl> - return m_name + 10 ; <nl> + return m_name [ 0 ] = = ' _ ' ? m_name : m_name + 10 ; <nl> } <nl> const char * c = strstr ( m_name , " : : " ) ; <nl> const char * f = NULL ; <nl> mmm a / src / runtime / base / program_functions . cpp <nl> ppp b / src / runtime / base / program_functions . cpp <nl> static int execute_program_impl ( int argc , char * * argv ) { <nl> try { <nl> execute_command_line_begin ( new_argc , new_argv , po . xhprofFlags ) ; <nl> <nl> - DECLARE_THREAD_INFO_NOINIT ; <nl> - FRAME_INJECTION_FLAGS ( empty_string , _ , FrameInjection : : PseudoMain ) ; <nl> - <nl> if ( po . debugger_options . extension . empty ( ) ) { <nl> / / even if it ' s empty , still need to call for warmup <nl> hphp_invoke_simple ( " " , true ) ; / / not to run the 1st file if compiled <nl> mmm a / src / runtime / eval / eval . cpp <nl> ppp b / src / runtime / eval / eval . cpp <nl> Variant eval ( LVariableTable * vars , CObjRef self , CStrRef code_str , <nl> RequestEvalState : : addCodeContainer ( scc ) ; <nl> / / todo : pass in params <nl> NestedVariableEnvironment env ( vars , blk , Array ( ) , self ) ; <nl> + EvalFrameInjection fi ( empty_string , " _ " , env , NULL , NULL , <nl> + FrameInjection : : PseudoMain ) ; <nl> s - > eval ( env ) ; <nl> if ( env . isReturning ( ) ) { <nl> return env . getRet ( ) ; <nl> bool eval_get_call_info_hook ( const CallInfo * & ci , void * & extra , const char * s , <nl> EvalFrameInjection * efi = NULL ; <nl> for ( FrameInjection * fi = info - > m_top ; fi ; fi = fi - > getPrev ( ) ) { <nl> efi = dynamic_cast < EvalFrameInjection * > ( fi ) ; <nl> - if ( efi ) break ; <nl> + if ( efi ) break ; <nl> } <nl> ASSERT ( efi ) ; <nl> efi - > getEnv ( ) . setClosure ( extra ) ; <nl>
|
[ Fix ] Fix hphpi ( hphpd ) crash when eval a closure
|
facebook/hhvm
|
b8ca324b8f61462533604e2f6af6f000b099677e
|
2011-04-12T18:55:46Z
|
mmm a / Telegram / SourceFiles / media / player / media_player . style <nl> ppp b / Telegram / SourceFiles / media / player / media_player . style <nl> mediaPlayerPlayback : FilledSlider { <nl> lineWidth : 2px ; <nl> activeFg : mediaPlayerActiveFg ; <nl> inactiveFg : mediaPlayerInactiveFg ; <nl> + disabledFg : # 9dd1ef ; <nl> duration : 150 ; <nl> } <nl> <nl> mediaPlayerPanelPlayback : MediaSlider { <nl> width : 3px ; <nl> activeFg : mediaPlayerActiveFg ; <nl> inactiveFg : mediaPlayerInactiveFg ; <nl> + disabledActiveFg : mediaPlayerInactiveFg ; <nl> + disabledInactiveFg : windowBg ; <nl> activeOpacity : 1 . ; <nl> inactiveOpacity : 1 . ; <nl> seekSize : size ( 9px , 9px ) ; <nl> mmm a / Telegram / SourceFiles / media / player / media_player_cover . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_cover . cpp <nl> void CoverWidget : : handleSongUpdate ( const UpdatedEvent & e ) { <nl> return ; <nl> } <nl> <nl> - _playback - > updateState ( * e . playbackState ) ; <nl> + if ( audioId . audio ( ) - > loading ( ) ) { <nl> + _playback - > updateLoadingState ( audioId . audio ( ) - > progress ( ) ) ; <nl> + } else { <nl> + _playback - > updateState ( * e . playbackState ) ; <nl> + } <nl> <nl> auto stopped = ( ( playbackState . state & AudioPlayerStoppedMask ) | | playbackState . state = = AudioPlayerFinishing ) ; <nl> auto showPause = ! stopped & & ( playbackState . state = = AudioPlayerPlaying | | playbackState . state = = AudioPlayerResuming | | playbackState . state = = AudioPlayerStarting ) ; <nl> void CoverWidget : : updateTimeText ( const AudioMsgId & audioId , const AudioPlaybackS <nl> _lastDurationMs = ( playbackState . duration * 1000LL ) / frequency ; <nl> <nl> if ( audioId . audio ( ) - > loading ( ) ) { <nl> - auto loaded = audioId . audio ( ) - > loadOffset ( ) ; <nl> - auto loadProgress = snap ( float64 ( loaded ) / qMax ( audioId . audio ( ) - > size , 1 ) , 0 . , 1 . ) ; <nl> - _time = QString : : number ( qRound ( loadProgress * 100 ) ) + ' % ' ; <nl> + _time = QString : : number ( qRound ( audioId . audio ( ) - > progress ( ) * 100 ) ) + ' % ' ; <nl> _playback - > setDisabled ( true ) ; <nl> } else { <nl> display = display / frequency ; <nl> mmm a / Telegram / SourceFiles / media / player / media_player_widget . cpp <nl> ppp b / Telegram / SourceFiles / media / player / media_player_widget . cpp <nl> void Widget : : handleSongUpdate ( const UpdatedEvent & e ) { <nl> return ; <nl> } <nl> <nl> - _playback - > updateState ( * e . playbackState ) ; <nl> + if ( audioId . audio ( ) - > loading ( ) ) { <nl> + _playback - > updateLoadingState ( audioId . audio ( ) - > progress ( ) ) ; <nl> + } else { <nl> + _playback - > updateState ( * e . playbackState ) ; <nl> + } <nl> <nl> auto stopped = ( ( playbackState . state & AudioPlayerStoppedMask ) | | playbackState . state = = AudioPlayerFinishing ) ; <nl> auto showPause = ! stopped & & ( playbackState . state = = AudioPlayerPlaying | | playbackState . state = = AudioPlayerResuming | | playbackState . state = = AudioPlayerStarting ) ; <nl> void Widget : : updateTimeText ( const AudioMsgId & audioId , const AudioPlaybackState <nl> _lastDurationMs = ( playbackState . duration * 1000LL ) / frequency ; <nl> <nl> if ( audioId . audio ( ) - > loading ( ) ) { <nl> - auto loaded = audioId . audio ( ) - > loadOffset ( ) ; <nl> - auto loadProgress = snap ( float64 ( loaded ) / qMax ( audioId . audio ( ) - > size , 1 ) , 0 . , 1 . ) ; <nl> - _time = QString : : number ( qRound ( loadProgress * 100 ) ) + ' % ' ; <nl> + _time = QString : : number ( qRound ( audioId . audio ( ) - > progress ( ) * 100 ) ) + ' % ' ; <nl> _playback - > setDisabled ( true ) ; <nl> } else { <nl> display = display / frequency ; <nl> mmm a / Telegram / SourceFiles / media / view / media_clip_playback . cpp <nl> ppp b / Telegram / SourceFiles / media / view / media_clip_playback . cpp <nl> Playback : : Playback ( Ui : : ContinuousSlider * slider ) : _slider ( slider ) { <nl> void Playback : : updateState ( const AudioPlaybackState & playbackState ) { <nl> qint64 position = 0 , duration = playbackState . duration ; <nl> <nl> + setDisabled ( false ) ; <nl> _playing = ! ( playbackState . state & AudioPlayerStoppedMask ) ; <nl> if ( _playing | | playbackState . state = = AudioPlayerStopped ) { <nl> position = playbackState . position ; <nl> void Playback : : updateState ( const AudioPlaybackState & playbackState ) { <nl> _slider - > update ( ) ; <nl> } <nl> <nl> + void Playback : : updateLoadingState ( float64 progress ) { <nl> + setDisabled ( true ) ; <nl> + auto animated = progress > _slider - > value ( ) ; <nl> + _slider - > setValue ( progress , animated ) ; <nl> + } <nl> + <nl> } / / namespace Clip <nl> } / / namespace Media <nl> mmm a / Telegram / SourceFiles / media / view / media_clip_playback . h <nl> ppp b / Telegram / SourceFiles / media / view / media_clip_playback . h <nl> class Playback { <nl> Playback ( Ui : : ContinuousSlider * slider ) ; <nl> <nl> void updateState ( const AudioPlaybackState & playbackState ) ; <nl> + void updateLoadingState ( float64 progress ) ; <nl> <nl> void setFadeOpacity ( float64 opacity ) { <nl> _slider - > setFadeOpacity ( opacity ) ; <nl> mmm a / Telegram / SourceFiles / media / view / mediaview . style <nl> ppp b / Telegram / SourceFiles / media / view / mediaview . style <nl> mediaviewPlayback : MediaSlider { <nl> width : 3px ; <nl> activeFg : mediaviewPlaybackActive ; <nl> inactiveFg : mediaviewPlaybackInactive ; <nl> + disabledActiveFg : mediaviewPlaybackActive ; <nl> + disabledInactiveFg : mediaviewPlaybackInactive ; <nl> activeOpacity : mediaviewActiveOpacity ; <nl> inactiveOpacity : mediaviewInactiveOpacity ; <nl> seekSize : size ( 11px , 11px ) ; <nl> mmm a / Telegram / SourceFiles / mtproto / file_download . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / file_download . cpp <nl> void FileLoader : : readImage ( const QSize & shrinkBox ) const { <nl> } <nl> <nl> float64 FileLoader : : currentProgress ( ) const { <nl> - if ( _complete ) return 1 ; <nl> - if ( ! fullSize ( ) ) return 0 ; <nl> - return float64 ( currentOffset ( ) ) / fullSize ( ) ; <nl> + if ( _complete ) return 1 . ; <nl> + if ( ! fullSize ( ) ) return 0 . ; <nl> + return snap ( float64 ( currentOffset ( ) ) / fullSize ( ) , 0 . , 1 . ) ; <nl> } <nl> <nl> int32 FileLoader : : fullSize ( ) const { <nl> mmm a / Telegram / SourceFiles / structs . cpp <nl> ppp b / Telegram / SourceFiles / structs . cpp <nl> bool DocumentData : : displayLoading ( ) const { <nl> <nl> float64 DocumentData : : progress ( ) const { <nl> if ( uploading ( ) ) { <nl> - if ( size > 0 ) { <nl> - return float64 ( uploadOffset ) / size ; <nl> - } <nl> - return 0 ; <nl> + return snap ( ( size > 0 ) ? float64 ( uploadOffset ) / size : 0 . , 0 . , 1 . ) ; <nl> } <nl> - return loading ( ) ? _loader - > currentProgress ( ) : ( loaded ( ) ? 1 : 0 ) ; <nl> + return loading ( ) ? _loader - > currentProgress ( ) : ( loaded ( ) ? 1 . : 0 . ) ; <nl> } <nl> <nl> int32 DocumentData : : loadOffset ( ) const { <nl> mmm a / Telegram / SourceFiles / ui / widgets / continuous_slider . cpp <nl> ppp b / Telegram / SourceFiles / ui / widgets / continuous_slider . cpp <nl> void ContinuousSlider : : wheelEvent ( QWheelEvent * e ) { <nl> # else / / OS_MAC_OLD <nl> constexpr auto step = static_cast < int > ( QWheelEvent : : DefaultDeltasPerStep ) ; <nl> # endif / / OS_MAC_OLD <nl> - constexpr auto coef = 1 . / ( step * 5 . ) ; <nl> + constexpr auto coef = 1 . / ( step * 10 . ) ; <nl> <nl> auto deltaX = e - > angleDelta ( ) . x ( ) , deltaY = e - > angleDelta ( ) . y ( ) ; <nl> if ( cPlatform ( ) = = dbipMac | | cPlatform ( ) = = dbipMacOld ) { <nl> mmm a / Telegram / SourceFiles / ui / widgets / continuous_slider . h <nl> ppp b / Telegram / SourceFiles / ui / widgets / continuous_slider . h <nl> class ContinuousSlider : public TWidget { <nl> return _mouseDown ? _downValue : a_value . current ( ) ; <nl> } <nl> float64 getCurrentOverFactor ( uint64 ms ) { <nl> - return _a_over . current ( ms , _over ? 1 . : 0 . ) ; <nl> + return _disabled ? 0 . : _a_over . current ( ms , _over ? 1 . : 0 . ) ; <nl> } <nl> bool isDisabled ( ) const { <nl> return _disabled ; <nl> mmm a / Telegram / SourceFiles / ui / widgets / filled_slider . cpp <nl> ppp b / Telegram / SourceFiles / ui / widgets / filled_slider . cpp <nl> void FilledSlider : : paintEvent ( QPaintEvent * e ) { <nl> auto lineWidthPartial = lineWidth - lineWidthRounded ; <nl> auto seekRect = getSeekRect ( ) ; <nl> auto value = getCurrentValue ( ms ) ; <nl> - auto from = seekRect . x ( ) , mid = disabled ? from : qRound ( from + value * seekRect . width ( ) ) , end = from + seekRect . width ( ) ; <nl> + auto from = seekRect . x ( ) , mid = qRound ( from + value * seekRect . width ( ) ) , end = from + seekRect . width ( ) ; <nl> if ( mid > from ) { <nl> p . setOpacity ( masterOpacity ) ; <nl> - p . fillRect ( from , height ( ) - lineWidthRounded , ( mid - from ) , lineWidthRounded , _st . activeFg ) ; <nl> + p . fillRect ( from , height ( ) - lineWidthRounded , ( mid - from ) , lineWidthRounded , disabled ? _st . disabledFg : _st . activeFg ) ; <nl> if ( lineWidthPartial > 0 . 01 ) { <nl> p . setOpacity ( masterOpacity * lineWidthPartial ) ; <nl> - p . fillRect ( from , height ( ) - lineWidthRounded - 1 , ( mid - from ) , 1 , _st . activeFg ) ; <nl> + p . fillRect ( from , height ( ) - lineWidthRounded - 1 , ( mid - from ) , 1 , disabled ? _st . disabledFg : _st . activeFg ) ; <nl> } <nl> } <nl> if ( end > mid & & over > 0 ) { <nl> mmm a / Telegram / SourceFiles / ui / widgets / media_slider . cpp <nl> ppp b / Telegram / SourceFiles / ui / widgets / media_slider . cpp <nl> void MediaSlider : : paintEvent ( QPaintEvent * e ) { <nl> auto markerLength = ( horizontal ? seekRect . width ( ) : seekRect . height ( ) ) ; <nl> auto from = _alwaysDisplayMarker ? 0 : markerFrom ; <nl> auto length = _alwaysDisplayMarker ? ( horizontal ? width ( ) : height ( ) ) : markerLength ; <nl> - auto mid = disabled ? from : qRound ( from + value * length ) ; <nl> + auto mid = qRound ( from + value * length ) ; <nl> auto end = from + length ; <nl> + auto & activeFg = disabled ? _st . disabledActiveFg : _st . activeFg ; <nl> + auto & inactiveFg = disabled ? _st . disabledInactiveFg : _st . inactiveFg ; <nl> if ( mid > from ) { <nl> auto fromClipRect = horizontal ? QRect ( 0 , 0 , mid , height ( ) ) : QRect ( 0 , 0 , width ( ) , mid ) ; <nl> auto fromRect = horizontal <nl> void MediaSlider : : paintEvent ( QPaintEvent * e ) { <nl> : QRect ( ( width ( ) - _st . width ) / 2 , from , _st . width , mid + radius - from ) ; <nl> p . setClipRect ( fromClipRect ) ; <nl> p . setOpacity ( masterOpacity * ( over * _st . activeOpacity + ( 1 . - over ) * _st . inactiveOpacity ) ) ; <nl> - p . setBrush ( horizontal ? _st . activeFg : _st . inactiveFg ) ; <nl> + p . setBrush ( horizontal ? activeFg : inactiveFg ) ; <nl> p . drawRoundedRect ( fromRect , radius , radius ) ; <nl> } <nl> if ( end > mid ) { <nl> void MediaSlider : : paintEvent ( QPaintEvent * e ) { <nl> : QRect ( ( width ( ) - _st . width ) / 2 , mid - radius , _st . width , end - ( mid - radius ) ) ; <nl> p . setClipRect ( endClipRect ) ; <nl> p . setOpacity ( masterOpacity ) ; <nl> - p . setBrush ( horizontal ? _st . inactiveFg : _st . activeFg ) ; <nl> + p . setBrush ( horizontal ? inactiveFg : activeFg ) ; <nl> p . drawRoundedRect ( endRect , radius , radius ) ; <nl> } <nl> auto markerSizeRatio = disabled ? 0 . : ( _alwaysDisplayMarker ? 1 . : over ) ; <nl> void MediaSlider : : paintEvent ( QPaintEvent * e ) { <nl> if ( remove * 2 < size ) { <nl> p . setClipRect ( rect ( ) ) ; <nl> p . setOpacity ( masterOpacity * _st . activeOpacity ) ; <nl> - p . setBrush ( _st . activeFg ) ; <nl> + p . setBrush ( activeFg ) ; <nl> p . drawEllipse ( seekButton . marginsRemoved ( QMargins ( remove , remove , remove , remove ) ) ) ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / ui / widgets / widgets . style <nl> ppp b / Telegram / SourceFiles / ui / widgets / widgets . style <nl> MediaSlider { <nl> width : pixels ; <nl> activeFg : color ; <nl> inactiveFg : color ; <nl> + disabledActiveFg : color ; <nl> + disabledInactiveFg : color ; <nl> activeOpacity : double ; <nl> inactiveOpacity : double ; <nl> seekSize : size ; <nl> FilledSlider { <nl> lineWidth : pixels ; <nl> activeFg : color ; <nl> inactiveFg : color ; <nl> + disabledFg : color ; <nl> duration : int ; <nl> } <nl> <nl>
|
Display download progress in the media player playback widget .
|
telegramdesktop/tdesktop
|
d5430736320d084afb17aab22e5e922dcf6faa09
|
2016-10-13T09:12:12Z
|
mmm a / doc / g_stands_for . md <nl> ppp b / doc / g_stands_for . md <nl> <nl> - 1 . 28 ' g ' stands for [ ' galactic ' ] ( https : / / github . com / grpc / grpc / tree / v1 . 28 . x ) <nl> - 1 . 29 ' g ' stands for [ ' gringotts ' ] ( https : / / github . com / grpc / grpc / tree / v1 . 29 . x ) <nl> - 1 . 30 ' g ' stands for [ ' gradius ' ] ( https : / / github . com / grpc / grpc / tree / v1 . 30 . x ) <nl> - - 1 . 31 ' g ' stands for [ ' galore ' ] ( https : / / github . com / grpc / grpc / tree / master ) <nl> + - 1 . 31 ' g ' stands for [ ' galore ' ] ( https : / / github . com / grpc / grpc / tree / v1 . 31 . x ) <nl> + - 1 . 32 ' g ' stands for [ ' giggle ' ] ( https : / / github . com / grpc / grpc / tree / master ) <nl>
|
add g - stands - for for the next release
|
grpc/grpc
|
f9b68bb01e14a8487ab5516e6bbc9c3ca519ce4e
|
2020-07-22T10:53:08Z
|
mmm a / caffe2 / python / control_ops_grad_test . py <nl> ppp b / caffe2 / python / control_ops_grad_test . py <nl> <nl> from __future__ import print_function <nl> from __future__ import unicode_literals <nl> <nl> + import unittest <nl> from caffe2 . python import core , test_util , workspace <nl> from caffe2 . python . control_ops_grad import disambiguate_grad_if_op_output <nl> from caffe2 . python . model_helper import ModelHelper <nl> def test_disambiguate_grad_if_op_output ( self ) : <nl> new_grad_output = " input_grad " + " _autosplit_ " + " 0 " <nl> disambiguate_grad_if_op_output ( grad_op , 0 , new_grad_output ) <nl> self . assertEqual ( grad_op . output [ 0 ] , new_grad_output ) <nl> - self . assertEqual ( grad_op . arg [ 1 ] . n . op [ 1 ] . output [ 0 ] , new_grad_output ) <nl> + for arg in grad_op . arg : <nl> + if arg . name = = " else_net " : <nl> + self . assertEqual ( arg . n . op [ 1 ] . output [ 0 ] , new_grad_output ) <nl> + else : <nl> + self . assertEqual ( arg . name , " then_net " ) <nl> + <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + unittest . main ( ) <nl>
|
Fix the weird bug in control_flow_op_test . py ( )
|
pytorch/pytorch
|
7163bfdf589605a96e46f4ec5ff1a5de9c502433
|
2019-09-27T03:44:03Z
|
mmm a / tools / optimizer / wasm . h <nl> ppp b / tools / optimizer / wasm . h <nl> class Loop : public Expression { <nl> public : <nl> Var out , in ; <nl> Expression * body ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( loop " ; <nl> + if ( out . is ( ) ) { <nl> + o < < " " ; <nl> + out . print ( o ) ; <nl> + if ( in . is ( ) ) { <nl> + o < < " " ; <nl> + in . print ( o ) ; <nl> + } <nl> + } <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , body ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Label : public Expression { <nl> class Break : public Expression { <nl> public : <nl> Var var ; <nl> Expression * condition , * value ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( break " ; <nl> + var . print ( o ) ; <nl> + incIndent ( o , indent ) ; <nl> + if ( condition ) printFullLine ( o , indent , condition ) ; <nl> + if ( value ) printFullLine ( o , indent , value ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Switch : public Expression { <nl> class Switch : public Expression { <nl> Expression * value ; <nl> std : : vector < Case > cases ; <nl> Expression * default_ ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( switch " ; <nl> + var . print ( o ) ; <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , value ) ; <nl> + o < < " TODO : cases / default \ n " ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> + <nl> } ; <nl> <nl> class Call : public Expression { <nl> public : <nl> Var target ; <nl> ExpressionList operands ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( call " ; <nl> + target . print ( o ) ; <nl> + incIndent ( o , indent ) ; <nl> + for ( auto operand : operands ) { <nl> + printFullLine ( o , indent , operand ) ; <nl> + } <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class CallImport : public Call { <nl> class CallIndirect : public Expression { <nl> class GetLocal : public Expression { <nl> public : <nl> Var id ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( setlocal " ; <nl> + id . print ( o ) < < ' ) ' ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class SetLocal : public Expression { <nl> public : <nl> Var id ; <nl> Expression * value ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( setlocal " ; <nl> + id . print ( o ) ; <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , value ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Load : public Expression { <nl> class Load : public Expression { <nl> int offset ; <nl> unsigned align ; <nl> Expression * ptr ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( load " < < bytes < < ' ' < < signed_ < < ' ' < < offset < < ' ' < < align ; <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , ptr ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Store : public Expression { <nl> class Store : public Expression { <nl> int offset ; <nl> unsigned align ; <nl> Expression * ptr , * value ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( load " < < bytes < < ' ' < < ' ' < < offset < < ' ' < < align ; <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , ptr ) ; <nl> + printFullLine ( o , indent , value ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Const : public Expression { <nl> public : <nl> Literal value ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( literal " ; <nl> + value . print ( o ) ; <nl> + o < < ' ) ' ; <nl> + } <nl> } ; <nl> <nl> class Unary : public Expression { <nl> class Binary : public Expression { <nl> public : <nl> BinaryOp op ; <nl> Expression * left , * right ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( binary " ; <nl> + switch ( op ) { <nl> + case Add : o < < " add " ; break ; <nl> + case Sub : o < < " sub " ; break ; <nl> + case Mul : o < < " mul " ; break ; <nl> + case DivS : o < < " divs " ; break ; <nl> + case DivU : o < < " divu " ; break ; <nl> + case RemS : o < < " rems " ; break ; <nl> + case RemU : o < < " remu " ; break ; <nl> + case And : o < < " and " ; break ; <nl> + case Or : o < < " or " ; break ; <nl> + case Xor : o < < " xor " ; break ; <nl> + case Shl : o < < " shl " ; break ; <nl> + case ShrU : o < < " shru " ; break ; <nl> + case ShrS : o < < " shrs " ; break ; <nl> + case Div : o < < " div " ; break ; <nl> + case CopySign : o < < " copysign " ; break ; <nl> + case Min : o < < " min " ; break ; <nl> + case Max : o < < " max " ; break ; <nl> + default : abort ( ) ; <nl> + } <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , left ) ; <nl> + printFullLine ( o , indent , right ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Compare : public Expression { <nl> public : <nl> RelationalOp op ; <nl> Expression * left , * right ; <nl> + <nl> + std : : ostream & print ( std : : ostream & o , unsigned indent ) override { <nl> + o < < " ( compare " ; <nl> + switch ( op ) { <nl> + case Eq : o < < " eq " ; break ; <nl> + case Ne : o < < " ne " ; break ; <nl> + case LtS : o < < " lts " ; break ; <nl> + case LtU : o < < " ltu " ; break ; <nl> + case LeS : o < < " les " ; break ; <nl> + case LeU : o < < " leu " ; break ; <nl> + case GtS : o < < " gts " ; break ; <nl> + case GtU : o < < " gtu " ; break ; <nl> + case GeS : o < < " ges " ; break ; <nl> + case GeU : o < < " geu " ; break ; <nl> + case Lt : o < < " lt " ; break ; <nl> + case Le : o < < " le " ; break ; <nl> + case Gt : o < < " gt " ; break ; <nl> + case Ge : o < < " ge " ; break ; <nl> + default : abort ( ) ; <nl> + } <nl> + incIndent ( o , indent ) ; <nl> + printFullLine ( o , indent , left ) ; <nl> + printFullLine ( o , indent , right ) ; <nl> + decIndent ( o , indent ) ; <nl> + return o ; <nl> + } <nl> } ; <nl> <nl> class Convert : public Expression { <nl>
|
more printing
|
emscripten-core/emscripten
|
b6c38ea66ec18311ab1a0507c7df7f6dc087a523
|
2015-11-04T00:14:55Z
|
mmm a / hphp / runtime / base / variable - unserializer . cpp <nl> ppp b / hphp / runtime / base / variable - unserializer . cpp <nl> namespace HPHP { <nl> <nl> static void unserializeVariant ( Variant & , VariableUnserializer * unserializer , <nl> UnserializeMode mode = UnserializeMode : : Value ) ; <nl> - static void unserializeArray ( Array & , VariableUnserializer * ) ; <nl> + static Array unserializeArray ( VariableUnserializer * ) ; <nl> static String unserializeString ( VariableUnserializer * , char delimiter0 = ' " ' , <nl> char delimiter1 = ' " ' ) ; <nl> static void unserializeCollection ( ObjectData * obj , VariableUnserializer * uns , <nl> void unserializeVariant ( Variant & self , VariableUnserializer * uns , <nl> UnserializeMode mode / * = UnserializeMode : : Value * / ) { <nl> <nl> / / NOTE : If you make changes to how serialization and unserialization work , <nl> - / / make sure to update the reserialize ( ) method in " runtime / ext / ext_apc . cpp " <nl> - / / and to update test_apc_reserialize ( ) in " test / ext / test_ext_apc . cpp " . <nl> + / / make sure to update reserialize ( ) here and test_apc_reserialize ( ) <nl> + / / in " test / ext / test_ext_apc . cpp " . <nl> <nl> char type = uns - > readChar ( ) ; <nl> char sep = uns - > readChar ( ) ; <nl> void unserializeVariant ( Variant & self , VariableUnserializer * uns , <nl> { <nl> / / Check stack depth to avoid overflow . <nl> check_recursion_throw ( ) ; <nl> - auto v = Array : : Create ( ) ; <nl> - unserializeArray ( v , uns ) ; <nl> - tvMove ( make_tv < KindOfArray > ( v . detach ( ) ) , * self . asTypedValue ( ) ) ; <nl> + auto a = unserializeArray ( uns ) ; <nl> + tvMove ( make_tv < KindOfArray > ( a . detach ( ) ) , * self . asTypedValue ( ) ) ; <nl> } <nl> return ; / / array has ' } ' terminating <nl> case ' L ' : <nl> void unserializeVariant ( Variant & self , VariableUnserializer * uns , <nl> uns - > expectChar ( ' ; ' ) ; <nl> } <nl> <nl> - void unserializeArray ( Array & arr , VariableUnserializer * uns ) { <nl> + Array unserializeArray ( VariableUnserializer * uns ) { <nl> int64_t size = uns - > readInt ( ) ; <nl> uns - > expectChar ( ' : ' ) ; <nl> uns - > expectChar ( ' { ' ) ; <nl> - <nl> if ( size = = 0 ) { <nl> - arr = Array : : Create ( ) ; <nl> - } else { <nl> - if ( UNLIKELY ( size < 0 | | size > std : : numeric_limits < int > : : max ( ) ) ) { <nl> - throwArraySizeOutOfBounds ( ) ; <nl> - } <nl> - auto const scale = computeScaleFromSize ( size ) ; <nl> - auto const allocsz = computeAllocBytes ( scale ) ; <nl> + uns - > expectChar ( ' } ' ) ; <nl> + return Array : : Create ( ) ; / / static empty array <nl> + } <nl> + if ( UNLIKELY ( size < 0 | | size > std : : numeric_limits < int > : : max ( ) ) ) { <nl> + throwArraySizeOutOfBounds ( ) ; <nl> + } <nl> + auto const scale = computeScaleFromSize ( size ) ; <nl> + auto const allocsz = computeAllocBytes ( scale ) ; <nl> <nl> - / / For large arrays , do a naive pre - check for OOM . <nl> - if ( UNLIKELY ( allocsz > kMaxSmallSize & & MM ( ) . preAllocOOM ( allocsz ) ) ) { <nl> - check_request_surprise_unlikely ( ) ; <nl> - } <nl> + / / For large arrays , do a naive pre - check for OOM . <nl> + if ( UNLIKELY ( allocsz > kMaxSmallSize & & MM ( ) . preAllocOOM ( allocsz ) ) ) { <nl> + check_request_surprise_unlikely ( ) ; <nl> + } <nl> <nl> - / / Pre - allocate an ArrayData of the given size , to avoid escalation in the <nl> - / / middle , which breaks references . <nl> - arr = ArrayInit ( size , ArrayInit : : Mixed { } ) . toArray ( ) ; <nl> - for ( int64_t i = 0 ; i < size ; i + + ) { <nl> - Variant key ; <nl> - unserializeVariant ( key , uns , UnserializeMode : : Key ) ; <nl> - if ( ! key . isString ( ) & & ! key . isInteger ( ) ) { <nl> - throwInvalidKey ( ) ; <nl> - } <nl> - / / for apc , we know the key can ' t exist , but ignore that optimization <nl> - assert ( uns - > type ( ) ! = VariableUnserializer : : Type : : APCSerialize | | <nl> - ! arr . exists ( key , true ) ) ; <nl> + / / Pre - allocate an ArrayData of the given size , to avoid escalation in the <nl> + / / middle , which breaks references . <nl> + Array arr = ArrayInit ( size , ArrayInit : : Mixed { } ) . toArray ( ) ; <nl> + for ( int64_t i = 0 ; i < size ; i + + ) { <nl> + Variant key ; <nl> + unserializeVariant ( key , uns , UnserializeMode : : Key ) ; <nl> + if ( ! key . isString ( ) & & ! key . isInteger ( ) ) { <nl> + throwInvalidKey ( ) ; <nl> + } <nl> + / / for apc , we know the key can ' t exist , but ignore that optimization <nl> + assert ( uns - > type ( ) ! = VariableUnserializer : : Type : : APCSerialize | | <nl> + ! arr . exists ( key , true ) ) ; <nl> <nl> - Variant & value = arr . lvalAt ( key , AccessFlags : : Key ) ; <nl> - if ( UNLIKELY ( isRefcountedType ( value . getRawType ( ) ) ) ) { <nl> - uns - > putInOverwrittenList ( value ) ; <nl> - } <nl> - unserializeVariant ( value , uns ) ; <nl> + Variant & value = arr . lvalAt ( key , AccessFlags : : Key ) ; <nl> + if ( UNLIKELY ( isRefcountedType ( value . getRawType ( ) ) ) ) { <nl> + uns - > putInOverwrittenList ( value ) ; <nl> + } <nl> + unserializeVariant ( value , uns ) ; <nl> <nl> - if ( i < ( size - 1 ) ) { <nl> - auto lastChar = uns - > peekBack ( ) ; <nl> - if ( ( lastChar ! = ' ; ' & & lastChar ! = ' } ' ) ) { <nl> - throwUnterminatedElement ( ) ; <nl> - } <nl> + if ( i < ( size - 1 ) ) { <nl> + auto lastChar = uns - > peekBack ( ) ; <nl> + if ( ( lastChar ! = ' ; ' & & lastChar ! = ' } ' ) ) { <nl> + throwUnterminatedElement ( ) ; <nl> } <nl> } <nl> } <nl> - <nl> check_request_surprise_unlikely ( ) ; <nl> - <nl> uns - > expectChar ( ' } ' ) ; <nl> + return arr ; <nl> } <nl> <nl> static <nl>
|
have unserializeArray return the array
|
facebook/hhvm
|
688cebbb0ab506386c22e2658ae5f29eb26e9b35
|
2015-08-26T16:30:27Z
|
mmm a / RELEASE . md <nl> ppp b / RELEASE . md <nl> <nl> acceleration . Known limitations include : It is not currently possible to load <nl> a custom op library . The GCS and HDFS file systems are not currently <nl> supported . The following ops are not currently implemented : <nl> - DepthwiseConv2dNative , DepthwiseConv2dNativeBackpropFilter , <nl> - DepthwiseConv2dNativeBackpropInput , Dequantize , Digamma , Erf , Erfc , Igamma , <nl> - Igammac , Lgamma , Polygamma , QuantizeAndDequantize , QuantizedAvgPool , <nl> + Dequantize , QuantizeAndDequantize , QuantizedAvgPool , <nl> QuantizedBatchNomWithGlobalNormalization , QuantizedBiasAdd , QuantizedConcat , <nl> QuantizedConv2D , QuantizedMatmul , QuantizedMaxPool , <nl> QuantizeDownAndShrinkRange , QuantizedRelu , QuantizedRelu6 , QuantizedReshape , <nl> mmm a / tensorflow / contrib / cmake / tf_tests . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_tests . cmake <nl> if ( tensorflow_BUILD_PYTHON_TESTS ) <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / string_to_number_op_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / clip_ops_test . py " <nl> # misc <nl> - " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / cwise_ops_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / variable_scope_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / python / kernel_tests / reshape_op_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / tensorboard / backend / server_test . py " <nl> mmm a / tensorflow / core / kernels / cwise_op_digamma . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_digamma . cc <nl> limitations under the License . <nl> # include " tensorflow / core / kernels / cwise_ops_common . h " <nl> <nl> namespace tensorflow { <nl> - # if EIGEN_HAS_C99_MATH <nl> REGISTER3 ( UnaryOp , CPU , " Digamma " , functor : : digamma , float , Eigen : : half , <nl> double ) ; <nl> # if GOOGLE_CUDA <nl> REGISTER3 ( UnaryOp , GPU , " Digamma " , functor : : digamma , float , Eigen : : half , <nl> double ) ; <nl> # endif / / GOOGLE_CUDA <nl> - # endif / / EIGEN_HAS_C99_MATH <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / cwise_op_erf . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_erf . cc <nl> limitations under the License . <nl> # include " tensorflow / core / kernels / cwise_ops_common . h " <nl> <nl> namespace tensorflow { <nl> - # if EIGEN_HAS_C99_MATH <nl> REGISTER3 ( UnaryOp , CPU , " Erf " , functor : : erf , float , Eigen : : half , double ) ; <nl> # if GOOGLE_CUDA <nl> REGISTER3 ( UnaryOp , GPU , " Erf " , functor : : erf , float , Eigen : : half , double ) ; <nl> # endif / / GOOGLE_CUDA <nl> - # endif / / EIGEN_HAS_C99_MATH <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / cwise_op_erfc . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_erfc . cc <nl> limitations under the License . <nl> # include " tensorflow / core / kernels / cwise_ops_common . h " <nl> <nl> namespace tensorflow { <nl> - # if EIGEN_HAS_C99_MATH <nl> REGISTER3 ( UnaryOp , CPU , " Erfc " , functor : : erfc , float , Eigen : : half , double ) ; <nl> # if GOOGLE_CUDA <nl> REGISTER3 ( UnaryOp , GPU , " Erfc " , functor : : erfc , float , Eigen : : half , double ) ; <nl> # endif / / GOOGLE_CUDA <nl> - # endif / / EIGEN_HAS_C99_MATH <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / cwise_op_igammas . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_igammas . cc <nl> limitations under the License . <nl> # include " tensorflow / core / kernels / cwise_ops_common . h " <nl> <nl> namespace tensorflow { <nl> - # if EIGEN_HAS_C99_MATH <nl> REGISTER2 ( BinaryOp , CPU , " Igamma " , functor : : igamma , float , double ) ; <nl> REGISTER2 ( BinaryOp , CPU , " Igammac " , functor : : igammac , float , double ) ; <nl> - # endif / / EIGEN_HAS_C99_MATH <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / cwise_op_lgamma . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_lgamma . cc <nl> limitations under the License . <nl> # include " tensorflow / core / kernels / cwise_ops_common . h " <nl> <nl> namespace tensorflow { <nl> - # if EIGEN_HAS_C99_MATH <nl> REGISTER3 ( UnaryOp , CPU , " Lgamma " , functor : : lgamma , float , Eigen : : half , double ) ; <nl> # if GOOGLE_CUDA <nl> REGISTER3 ( UnaryOp , GPU , " Lgamma " , functor : : lgamma , float , Eigen : : half , double ) ; <nl> # endif <nl> - # endif / / EIGEN_HAS_C99_MATH <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / core / kernels / cwise_op_zeta . cc <nl> ppp b / tensorflow / core / kernels / cwise_op_zeta . cc <nl> limitations under the License . <nl> <nl> namespace tensorflow { <nl> REGISTER2 ( BinaryOp , CPU , " Zeta " , functor : : zeta , float , double ) ; <nl> - # if EIGEN_HAS_C99_MATH <nl> REGISTER2 ( BinaryOp , CPU , " Polygamma " , functor : : polygamma , float , double ) ; <nl> - # endif / / EIGEN_HAS_C99_MATH <nl> } / / namespace tensorflow <nl>
|
[ Windows ] Re - enable the special function ops .
|
tensorflow/tensorflow
|
62a4578b6bf9e6830a59fed0e4e45768be93b6da
|
2016-11-29T23:39:21Z
|
mmm a / contrib / openssl - cmake / CMakeLists . txt <nl> ppp b / contrib / openssl - cmake / CMakeLists . txt <nl> set ( OPENSSL_MODULESDIR " / usr / local / lib / ossl - modules " CACHE PATH " Set the default <nl> add_definitions ( - DOPENSSLDIR = " $ { OPENSSLDIR } " - DENGINESDIR = " $ { OPENSSL_ENGINESDIR } " - DMODULESDIR = " $ { OPENSSL_MODULESDIR } " ) <nl> <nl> if ( ARCH_AMD64 ) <nl> - add_definitions ( - DAES_ASM - DBSAES_ASM - DECP_NISTZ256_ASM - DGHASH_ASM - DKECCAK1600_ASM - DMD5_ASM - DOPENSSL_BN_ASM_GF2m - DOPENSSL_BN_ASM_MONT - DOPENSSL_BN_ASM_MONT5 - DOPENSSL_CPUID_OBJ - DOPENSSL_IA32_SSE2 - DPOLY1305_ASM - DSHA1_ASM - DSHA256_ASM - DSHA512_ASM - DVPAES_ASM - DWHIRLPOOL_ASM - DX25519_ASM - DOPENSSL_USE_NODELETE - DL_ENDIAN ) <nl> + # - DSHA256_ASM was removed because it doesn ' t work with asynchronous unwind ( query profiler ) for unknown reason . <nl> + add_definitions ( - DAES_ASM - DBSAES_ASM - DECP_NISTZ256_ASM - DGHASH_ASM - DKECCAK1600_ASM - DMD5_ASM - DOPENSSL_BN_ASM_GF2m - DOPENSSL_BN_ASM_MONT - DOPENSSL_BN_ASM_MONT5 - DOPENSSL_CPUID_OBJ - DOPENSSL_IA32_SSE2 - DPOLY1305_ASM - DSHA1_ASM - DSHA512_ASM - DVPAES_ASM - DWHIRLPOOL_ASM - DX25519_ASM - DOPENSSL_USE_NODELETE - DL_ENDIAN ) <nl> elseif ( ARCH_AARCH64 ) <nl> add_definitions ( - DECP_NISTZ256_ASM - DKECCAK1600_ASM - DOPENSSL_BN_ASM_MONT - DOPENSSL_CPUID_OBJ - DPOLY1305_ASM - DSHA1_ASM - DSHA256_ASM - DSHA512_ASM - DVPAES_ASM - DOPENSSL_USE_NODELETE - DL_ENDIAN ) <nl> endif ( ) <nl> if ( ARCH_AMD64 ) <nl> $ { OPENSSL_BINARY_DIR } / crypto / sha / sha1 - mb - x86_64 . s <nl> $ { OPENSSL_BINARY_DIR } / crypto / sha / sha1 - x86_64 . s <nl> $ { OPENSSL_BINARY_DIR } / crypto / sha / sha256 - mb - x86_64 . s <nl> - $ { OPENSSL_BINARY_DIR } / crypto / sha / sha256 - x86_64 . s <nl> + <nl> + # $ { OPENSSL_BINARY_DIR } / crypto / sha / sha256 - x86_64 . s <nl> + $ { OPENSSL_SOURCE_DIR } / crypto / sha / sha256 . c <nl> + <nl> $ { OPENSSL_BINARY_DIR } / crypto / sha / sha512 - x86_64 . s <nl> $ { OPENSSL_BINARY_DIR } / crypto / whrlpool / wp - x86_64 . s ) <nl> elseif ( ARCH_AARCH64 ) <nl>
|
Fixed performance test
|
ClickHouse/ClickHouse
|
49fd9c41d0269aa75ff40992f60a4a253bc9b852
|
2019-12-16T01:21:09Z
|
mmm a / src / objects / backing - store . cc <nl> ppp b / src / objects / backing - store . cc <nl> std : : unique_ptr < BackingStore > BackingStore : : AllocateWasmMemory ( <nl> / / Enforce engine limitation on the maximum number of pages . <nl> if ( initial_pages > wasm : : kV8MaxWasmMemoryPages ) return nullptr ; <nl> <nl> + / / Trying to allocate 4 GiB on a 32 - bit platform is guaranteed to fail . <nl> + / / We don ' t lower the official max_maximum_mem_pages ( ) limit because that <nl> + / / would be observable upon instantiation ; this way the effective limit <nl> + / / on 32 - bit platforms is defined by the allocator . <nl> + constexpr size_t kPlatformMax = <nl> + std : : numeric_limits < size_t > : : max ( ) / wasm : : kWasmPageSize ; <nl> + if ( initial_pages > kPlatformMax ) return nullptr ; <nl> + <nl> auto backing_store = <nl> TryAllocateWasmMemory ( isolate , initial_pages , maximum_pages , shared ) ; <nl> if ( ! backing_store & & maximum_pages > initial_pages ) { <nl> std : : unique_ptr < BackingStore > BackingStore : : AllocateWasmMemory ( <nl> <nl> std : : unique_ptr < BackingStore > BackingStore : : CopyWasmMemory ( Isolate * isolate , <nl> size_t new_pages ) { <nl> - / / Trying to allocate 4 GiB on a 32 - bit platform is guaranteed to fail . <nl> - / / We don ' t lower the official max_maximum_mem_pages ( ) limit because that <nl> - / / would be observable upon instantiation ; this way the effective limit <nl> - / / on 32 - bit platforms is defined by the allocator . <nl> - if ( new_pages > std : : numeric_limits < size_t > : : max ( ) / wasm : : kWasmPageSize ) { <nl> - return { } ; <nl> - } <nl> - DCHECK_GE ( new_pages * wasm : : kWasmPageSize , byte_length_ ) ; <nl> / / Note that we could allocate uninitialized to save initialization cost here , <nl> / / but since Wasm memories are allocated by the page allocator , the zeroing <nl> / / cost is already built - in . <nl> std : : unique_ptr < BackingStore > BackingStore : : CopyWasmMemory ( Isolate * isolate , <nl> } <nl> <nl> if ( byte_length_ > 0 ) { <nl> + / / If the allocation was successful , then the new buffer must be at least <nl> + / / as big as the old one . <nl> + DCHECK_GE ( new_pages * wasm : : kWasmPageSize , byte_length_ ) ; <nl> memcpy ( new_backing_store - > buffer_start ( ) , buffer_start_ , byte_length_ ) ; <nl> } <nl> <nl> new file mode 100644 <nl> index 00000000000 . . dc1703178ce <nl> mmm / dev / null <nl> ppp b / test / mjsunit / regress / wasm / regress - crbug - 1057094 . js <nl> <nl> + / / Copyright 2020 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - wasm - max - mem - pages = 65536 <nl> + <nl> + try { <nl> + var __v_50189 = new WebAssembly . Memory ( { <nl> + initial : 65536 <nl> + } ) ; <nl> + } catch ( e ) { <nl> + / / 32 - bit builds will throw a RangeError , that ' s okay . <nl> + assertTrue ( e instanceof RangeError ) ; <nl> + } <nl>
|
[ wasm ] Fix memory limit check with custom flags
|
v8/v8
|
27538aa3610a2fd2496409053fd1ee945a68cd1a
|
2020-03-03T16:17:24Z
|
mmm a / include / swift / AST / PrettyStackTrace . h <nl> ppp b / include / swift / AST / PrettyStackTrace . h <nl> class PrettyStackTraceSelector : public llvm : : PrettyStackTraceEntry { <nl> void print ( llvm : : raw_ostream & OS ) const override ; <nl> } ; <nl> <nl> + / / / PrettyStackTraceDifferentiabilityWitness - Observe that we are processing a <nl> + / / / specific differentiability witness . <nl> + class PrettyStackTraceDifferentiabilityWitness <nl> + : public llvm : : PrettyStackTraceEntry { <nl> + const SILDifferentiabilityWitnessKey Key ; <nl> + const char * Action ; <nl> + <nl> + public : <nl> + PrettyStackTraceDifferentiabilityWitness ( <nl> + const char * action , const SILDifferentiabilityWitnessKey key ) <nl> + : Key ( key ) , Action ( action ) { } <nl> + virtual void print ( llvm : : raw_ostream & OS ) const ; <nl> + } ; <nl> + <nl> + void printDifferentiabilityWitnessDescription ( <nl> + llvm : : raw_ostream & out , const SILDifferentiabilityWitnessKey key , <nl> + bool addNewline = true ) ; <nl> + <nl> } / / end namespace swift <nl> <nl> # endif <nl> mmm a / include / swift / IRGen / Linking . h <nl> ppp b / include / swift / IRGen / Linking . h <nl> class LinkEntity { <nl> / / / ProtocolConformance * . <nl> ProtocolWitnessTableLazyCacheVariable , <nl> <nl> + / / / A SIL differentiability witness . <nl> + DifferentiabilityWitness , <nl> + <nl> / / Everything following this is a type kind . <nl> <nl> / / / A value witness for a type . <nl> class LinkEntity { <nl> return getAssociatedConformanceByIndex ( conformance - > getProtocol ( ) , index ) ; <nl> } <nl> <nl> + void <nl> + setForDifferentiabilityWitness ( Kind kind , <nl> + const SILDifferentiabilityWitness * witness ) { <nl> + Pointer = const_cast < void * > ( static_cast < const void * > ( witness ) ) ; <nl> + SecondaryPointer = nullptr ; <nl> + Data = LINKENTITY_SET_FIELD ( Kind , unsigned ( kind ) ) ; <nl> + } <nl> + <nl> void setForType ( Kind kind , CanType type ) { <nl> assert ( isTypeKind ( kind ) ) ; <nl> Pointer = type . getPointer ( ) ; <nl> class LinkEntity { <nl> return entity ; <nl> } <nl> <nl> + static LinkEntity <nl> + forDifferentiabilityWitness ( const SILDifferentiabilityWitness * witness ) { <nl> + LinkEntity entity ; <nl> + entity . setForDifferentiabilityWitness ( Kind : : DifferentiabilityWitness , <nl> + witness ) ; <nl> + return entity ; <nl> + } <nl> + <nl> static LinkEntity forProtocolWitnessTable ( const RootProtocolConformance * C ) { <nl> LinkEntity entity ; <nl> entity . setForProtocolConformance ( Kind : : ProtocolWitnessTable , C ) ; <nl> class LinkEntity { <nl> return reinterpret_cast < SILGlobalVariable * > ( Pointer ) ; <nl> } <nl> <nl> + SILDifferentiabilityWitness * getSILDifferentiabilityWitness ( ) const { <nl> + assert ( getKind ( ) = = Kind : : DifferentiabilityWitness ) ; <nl> + return reinterpret_cast < SILDifferentiabilityWitness * > ( Pointer ) ; <nl> + } <nl> + <nl> const RootProtocolConformance * getRootProtocolConformance ( ) const { <nl> assert ( isRootProtocolConformanceKind ( getKind ( ) ) ) ; <nl> return cast < RootProtocolConformance > ( getProtocolConformance ( ) ) ; <nl> mmm a / lib / AST / AutoDiff . cpp <nl> ppp b / lib / AST / AutoDiff . cpp <nl> <nl> <nl> using namespace swift ; <nl> <nl> + void AutoDiffConfig : : print ( llvm : : raw_ostream & s ) const { <nl> + s < < " ( parameters = " ; <nl> + parameterIndices - > print ( s ) ; <nl> + s < < " results = " ; <nl> + resultIndices - > print ( s ) ; <nl> + if ( derivativeGenericSignature ) { <nl> + s < < " where = " ; <nl> + derivativeGenericSignature - > print ( s ) ; <nl> + } <nl> + s < < ' ) ' ; <nl> + } <nl> + <nl> / / TODO ( TF - 874 ) : This helper is inefficient and should be removed . Unwrapping at <nl> / / most once ( for curried method types ) is sufficient . <nl> static void unwrapCurryLevels ( AnyFunctionType * fnTy , <nl> mmm a / lib / AST / PrettyStackTrace . cpp <nl> ppp b / lib / AST / PrettyStackTrace . cpp <nl> void PrettyStackTraceGenericSignature : : print ( llvm : : raw_ostream & out ) const { <nl> void PrettyStackTraceSelector : : print ( llvm : : raw_ostream & out ) const { <nl> out < < " While " < < Action < < " ' " < < Selector < < " ' " ; <nl> } <nl> + <nl> + void PrettyStackTraceDifferentiabilityWitness : : print ( <nl> + llvm : : raw_ostream & out ) const { <nl> + out < < " While " < < Action < < ' ' ; <nl> + printDifferentiabilityWitnessDescription ( out , Key ) ; <nl> + } <nl> + <nl> + void swift : : printDifferentiabilityWitnessDescription ( <nl> + llvm : : raw_ostream & out , const SILDifferentiabilityWitnessKey key , <nl> + bool addNewline ) { <nl> + out < < key . first < < " " ; <nl> + key . second . print ( out ) ; <nl> + if ( addNewline ) <nl> + out < < ' \ n ' ; <nl> + } <nl> mmm a / lib / IRGen / CMakeLists . txt <nl> ppp b / lib / IRGen / CMakeLists . txt <nl> add_swift_host_library ( swiftIRGen STATIC <nl> GenControl . cpp <nl> GenCoverage . cpp <nl> GenDecl . cpp <nl> + GenDiffWitness . cpp <nl> GenEnum . cpp <nl> GenExistential . cpp <nl> GenFunc . cpp <nl> mmm a / lib / IRGen / GenDecl . cpp <nl> ppp b / lib / IRGen / GenDecl . cpp <nl> void IRGenerator : : emitGlobalTopLevel ( llvm : : StringSet < > * linkerDirectives ) { <nl> CurrentIGMPtr IGM = getGenModule ( prop . getDecl ( ) - > getInnermostDeclContext ( ) ) ; <nl> IGM - > emitSILProperty ( & prop ) ; <nl> } <nl> - <nl> + <nl> + / / Emit differentiability witnesses . <nl> + for ( auto & dw : <nl> + PrimaryIGM - > getSILModule ( ) . getDifferentiabilityWitnessList ( ) ) { <nl> + / / Emit into same IRGenModule as the original function . <nl> + / / NOTE ( TF - 894 ) : Investigate whether ` getGenModule ( dw . getVJP ( ) ) ` is <nl> + / / significant / desirable ; ` getGenModule ` seems relevant for multi - threaded <nl> + / / compilation . When the differentiation transform canonicalizes all <nl> + / / differentiability witnesses to have JVP / VJP functions , we can assert <nl> + / / that JVP / VJP functions exist and use ` getGenModule ( dw . getVJP ( ) ) ` . <nl> + CurrentIGMPtr IGM = getGenModule ( dw . getOriginalFunction ( ) ) ; <nl> + <nl> + IGM - > emitSILDifferentiabilityWitness ( & dw ) ; <nl> + } <nl> + <nl> / / Emit code coverage mapping data . <nl> PrimaryIGM - > emitCoverageMapping ( ) ; <nl> <nl> IRGenModule : : getAddrOfWitnessTablePattern ( const NormalProtocolConformance * conf , <nl> return getAddrOfLLVMVariable ( entity , definition , DebugTypeInfo ( ) ) ; <nl> } <nl> <nl> + / / / Look up the address of a differentiability witness . <nl> + llvm : : Constant * IRGenModule : : getAddrOfDifferentiabilityWitness ( <nl> + const SILDifferentiabilityWitness * witness , ConstantInit definition ) { <nl> + auto entity = LinkEntity : : forDifferentiabilityWitness ( witness ) ; <nl> + return getAddrOfLLVMVariable ( entity , definition , DebugTypeInfo ( ) ) ; <nl> + } <nl> + <nl> llvm : : Function * <nl> IRGenModule : : getAddrOfAssociatedTypeWitnessTableAccessFunction ( <nl> const NormalProtocolConformance * conformance , <nl> new file mode 100644 <nl> index 000000000000 . . f27a5861a884 <nl> mmm / dev / null <nl> ppp b / lib / IRGen / GenDiffWitness . cpp <nl> <nl> + / / = = = mmm GenDiffWitness . cpp - IRGen for differentiability witnesses mmmmmm - = = = / / <nl> + / / <nl> + / / This source file is part of the Swift . org open source project <nl> + / / <nl> + / / Copyright ( c ) 2020 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / <nl> + / / This file implements IR generation for SIL differentiability witnesses . <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + # include " swift / AST / PrettyStackTrace . h " <nl> + # include " swift / SIL / SILDifferentiabilityWitness . h " <nl> + <nl> + # include " ConstantBuilder . h " <nl> + # include " IRGenModule . h " <nl> + <nl> + using namespace swift ; <nl> + using namespace irgen ; <nl> + <nl> + void IRGenModule : : emitSILDifferentiabilityWitness ( <nl> + SILDifferentiabilityWitness * dw ) { <nl> + PrettyStackTraceDifferentiabilityWitness _st ( <nl> + " emitting differentiability witness for " , dw - > getKey ( ) ) ; <nl> + <nl> + / / Don ' t emit declarations . <nl> + if ( dw - > isDeclaration ( ) ) <nl> + return ; <nl> + <nl> + / / Don ' t emit ` public_external ` witnesses . <nl> + if ( dw - > getLinkage ( ) = = SILLinkage : : PublicExternal ) <nl> + return ; <nl> + <nl> + ConstantInitBuilder builder ( * this ) ; <nl> + auto diffWitnessContents = builder . beginStruct ( ) ; <nl> + <nl> + assert ( dw - > getJVP ( ) & & <nl> + " Differentiability witness definition should have JVP " ) ; <nl> + assert ( dw - > getVJP ( ) & & <nl> + " Differentiability witness definition should have VJP " ) ; <nl> + <nl> + diffWitnessContents . addBitCast ( <nl> + getAddrOfSILFunction ( dw - > getJVP ( ) , NotForDefinition ) , Int8PtrTy ) ; <nl> + diffWitnessContents . addBitCast ( <nl> + getAddrOfSILFunction ( dw - > getVJP ( ) , NotForDefinition ) , Int8PtrTy ) ; <nl> + <nl> + getAddrOfDifferentiabilityWitness ( <nl> + dw , diffWitnessContents . finishAndCreateFuture ( ) ) ; <nl> + } <nl> mmm a / lib / IRGen / IRGenModule . cpp <nl> ppp b / lib / IRGen / IRGenModule . cpp <nl> IRGenModule : : IRGenModule ( IRGenerator & irgen , <nl> <nl> DynamicReplacementKeyTy = createStructType ( * this , " swift . dyn_repl_key " , <nl> { RelativeAddressTy , Int32Ty } ) ; <nl> + <nl> + DifferentiabilityWitnessTy = createStructType ( <nl> + * this , " swift . differentiability_witness " , { Int8PtrTy , Int8PtrTy } ) ; <nl> } <nl> <nl> IRGenModule : : ~ IRGenModule ( ) { <nl> mmm a / lib / IRGen / IRGenModule . h <nl> ppp b / lib / IRGen / IRGenModule . h <nl> namespace swift { <nl> class RootProtocolConformance ; <nl> struct SILDeclRef ; <nl> class SILDefaultWitnessTable ; <nl> + class SILDifferentiabilityWitness ; <nl> class SILGlobalVariable ; <nl> class SILModule ; <nl> class SILProperty ; <nl> class IRGenModule { <nl> * DynamicReplacementLinkEntryPtrTy ; / / % link_entry * <nl> llvm : : StructType * DynamicReplacementKeyTy ; / / { i32 , i32 } <nl> <nl> + llvm : : StructType * DifferentiabilityWitnessTy ; / / { i8 * , i8 * } <nl> + <nl> llvm : : GlobalVariable * TheTrivialPropertyDescriptor = nullptr ; <nl> <nl> / / / Used to create unique names for class layout types with tail allocated <nl> private : \ <nl> void emitSILFunction ( SILFunction * f ) ; <nl> void emitSILWitnessTable ( SILWitnessTable * wt ) ; <nl> void emitSILProperty ( SILProperty * prop ) ; <nl> + void emitSILDifferentiabilityWitness ( SILDifferentiabilityWitness * dw ) ; <nl> void emitSILStaticInitializers ( ) ; <nl> llvm : : Constant * emitFixedTypeLayout ( CanType t , const FixedTypeInfo & ti ) ; <nl> void emitProtocolConformance ( const ConformanceDescription & record ) ; <nl> private : \ <nl> llvm : : Function * getAddrOfDefaultAssociatedConformanceAccessor ( <nl> AssociatedConformance requirement ) ; <nl> <nl> + llvm : : Constant * <nl> + getAddrOfDifferentiabilityWitness ( const SILDifferentiabilityWitness * witness , <nl> + ConstantInit definition = ConstantInit ( ) ) ; <nl> + <nl> Address getAddrOfObjCISAMask ( ) ; <nl> <nl> / / / Retrieve the generic signature for the current generic context , or null if no <nl> mmm a / lib / IRGen / Linking . cpp <nl> ppp b / lib / IRGen / Linking . cpp <nl> std : : string LinkEntity : : mangleAsString ( ) const { <nl> case Kind : : ReflectionAssociatedTypeDescriptor : <nl> return mangler . mangleReflectionAssociatedTypeDescriptor ( <nl> getProtocolConformance ( ) ) ; <nl> + case Kind : : DifferentiabilityWitness : <nl> + return mangler . mangleSILDifferentiabilityWitnessKey ( <nl> + { getSILDifferentiabilityWitness ( ) - > getOriginalFunction ( ) - > getName ( ) , <nl> + getSILDifferentiabilityWitness ( ) - > getConfig ( ) } ) ; <nl> } <nl> llvm_unreachable ( " bad entity kind ! " ) ; <nl> } <nl> SILLinkage LinkEntity : : getLinkage ( ForDefinition_t forDefinition ) const { <nl> case Kind : : ExtensionDescriptor : <nl> case Kind : : AnonymousDescriptor : <nl> return SILLinkage : : Shared ; <nl> + case Kind : : DifferentiabilityWitness : <nl> + return getSILDifferentiabilityWitness ( ) - > getLinkage ( ) ; <nl> } <nl> llvm_unreachable ( " bad link entity kind " ) ; <nl> } <nl> bool LinkEntity : : isAvailableExternally ( IRGenModule & IGM ) const { <nl> - > getDeclContext ( ) <nl> - > getInnermostTypeContext ( ) ) ; <nl> } <nl> + <nl> + case Kind : : DifferentiabilityWitness : <nl> + return true ; <nl> <nl> case Kind : : ObjCMetadataUpdateFunction : <nl> case Kind : : ObjCResilientClassStub : <nl> llvm : : Type * LinkEntity : : getDefaultDeclarationType ( IRGenModule & IGM ) const { <nl> return IGM . ObjCResilientClassStubTy ; <nl> } <nl> llvm_unreachable ( " invalid metadata address " ) ; <nl> + case Kind : : DifferentiabilityWitness : <nl> + return IGM . DifferentiabilityWitnessTy ; <nl> default : <nl> llvm_unreachable ( " declaration LLVM type not specified " ) ; <nl> } <nl> Alignment LinkEntity : : getAlignment ( IRGenModule & IGM ) const { <nl> case Kind : : OpaqueTypeDescriptorAccessorKey : <nl> case Kind : : OpaqueTypeDescriptorAccessorVar : <nl> case Kind : : ObjCResilientClassStub : <nl> + case Kind : : DifferentiabilityWitness : <nl> return IGM . getPointerAlignment ( ) ; <nl> case Kind : : TypeMetadataDemanglingCacheVariable : <nl> return Alignment ( 8 ) ; <nl> bool LinkEntity : : isWeakImported ( ModuleDecl * module ) const { <nl> case Kind : : ReflectionBuiltinDescriptor : <nl> case Kind : : ReflectionFieldDescriptor : <nl> case Kind : : CoroutineContinuationPrototype : <nl> + case Kind : : DifferentiabilityWitness : <nl> return false ; <nl> } <nl> <nl> const SourceFile * LinkEntity : : getSourceFileForEmission ( ) const { <nl> case Kind : : ReflectionBuiltinDescriptor : <nl> case Kind : : ValueWitness : <nl> case Kind : : ValueWitnessTable : <nl> + case Kind : : DifferentiabilityWitness : <nl> return nullptr ; <nl> } <nl> <nl> mmm a / test / AutoDiff / SIL / Serialization / sil_differentiability_witness . sil <nl> ppp b / test / AutoDiff / SIL / Serialization / sil_differentiability_witness . sil <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - sil - opt % s - emit - sib - o % t / tmp . sib - module - name main <nl> / / RUN : % target - sil - opt % t / tmp . sib - o % t / tmp . sil - module - name main <nl> - <nl> / / NOTE ( SR - 12090 ) : Workaround because import declarations are not preserved in . sib files . <nl> / / RUN : sed - e ' s / import Swift $ / import Swift ; import _Differentiation / ' % t / tmp . sil > % t / tmp_fixed . sil <nl> / / RUN : % target - sil - opt % t / tmp_fixed . sil - module - name main - emit - sorted - sil | % FileCheck - - check - prefix = ROUNDTRIP % s <nl> <nl> + / / IRGen test . <nl> + <nl> + / / RUN : % target - swift - frontend - emit - ir % s | % FileCheck - - check - prefix = IRGEN % s <nl> + <nl> / / REQUIRES : differentiable_programming <nl> / / NOTE ( SR - 12090 ) : ` shell ` is required only to run ` sed ` as a SR - 12090 workaround . <nl> / / REQUIRES : shell <nl> sil_differentiability_witness [ parameters 0 ] [ results 0 ] @ externalFn1 : $ @ conven <nl> / / ROUNDTRIP : vjp : @ AD__externalFn1__vjp_src_0_wrt_0 : $ @ convention ( thin ) ( Float ) - > ( Float , @ owned @ callee_guaranteed ( Float ) - > Float ) <nl> / / ROUNDTRIP : } <nl> <nl> + / / IRGEN - LABEL : @ AD__externalFn1_PSRS = { { ( protected ) ? } } global { i8 * , i8 * } { <nl> + / / IRGEN - SAME : @ AD__externalFn1__jvp_src_0_wrt_0 <nl> + / / IRGEN - SAME : @ AD__externalFn1__vjp_src_0_wrt_0 <nl> + / / IRGEN - SAME : } <nl> + <nl> / / Test SIL differentiability witness for bodiless original function , with bodiless jvp / vjp . <nl> <nl> sil @ externalFn2 : $ @ convention ( thin ) ( Float ) - > Float <nl> sil_differentiability_witness [ parameters 0 ] [ results 0 ] @ externalFn2 : $ @ conven <nl> / / ROUNDTRIP : vjp : @ AD__externalFn2__vjp_src_0_wrt_0 : $ @ convention ( thin ) ( Float ) - > ( Float , @ owned @ callee_guaranteed ( Float ) - > Float ) <nl> / / ROUNDTRIP : } <nl> <nl> + / / IRGEN - LABEL : @ AD__externalFn2_PSRS = { { ( protected ) ? } } global { i8 * , i8 * } { <nl> + / / IRGEN - SAME : @ AD__externalFn2__jvp_src_0_wrt_0 <nl> + / / IRGEN - SAME : @ AD__externalFn2__vjp_src_0_wrt_0 <nl> + / / IRGEN - SAME : } <nl> + <nl> / / Test SIL differentiability witness declaration . <nl> <nl> sil @ externalFn3 : $ @ convention ( thin ) ( Float ) - > Float <nl> sil_differentiability_witness [ parameters 0 ] [ results 0 ] @ externalFn3 : $ @ conven <nl> / / ROUNDTRIP - LABEL : / / differentiability witness for externalFn3 <nl> / / ROUNDTRIP : sil_differentiability_witness { { ( public_external ) ? } } [ parameters 0 ] [ results 0 ] @ externalFn3 : $ @ convention ( thin ) ( Float ) - > Float { { [ ^ { ] * $ } } <nl> <nl> + / / IRGEN - NOT : @ AD__externalFn3 { { . * } } = { { . * } } { i8 * , i8 * } <nl> + <nl> / / Test public non - generic function . <nl> / / SIL differentiability witness : <nl> / / - Has public linkage ( implicit ) . <nl> sil_differentiability_witness [ parameters 0 ] [ results 0 ] @ foo : $ @ convention ( thi <nl> / / ROUNDTRIP : vjp : @ AD__foo__vjp_src_0_wrt_0 : $ @ convention ( thin ) ( Float ) - > ( Float , @ owned @ callee_guaranteed ( Float ) - > Float ) <nl> / / ROUNDTRIP : } <nl> <nl> + / / IRGEN - LABEL : @ AD__foo_PSRS = { { ( protected ) ? } } global { i8 * , i8 * } { <nl> + / / IRGEN - SAME : @ AD__foo__jvp_src_0_wrt_0 <nl> + / / IRGEN - SAME : @ AD__foo__vjp_src_0_wrt_0 <nl> + / / IRGEN - SAME : } <nl> + <nl> / / Test internal generic function . <nl> / / SIL differentiability witness : <nl> / / - Has hidden linkage . <nl> sil_differentiability_witness hidden [ parameters 0 1 ] [ results 0 ] < τ_0_0 where <nl> / / ROUNDTRIP : jvp : @ AD__generic__jvp_src_0_wrt_0_1 : $ @ convention ( thin ) < τ_0_0 where τ_0_0 : Differentiable > ( @ in_guaranteed τ_0_0 , Float ) - > ( @ out τ_0_0 , @ owned @ callee_guaranteed ( @ in_guaranteed τ_0_0 . TangentVector , Float ) - > @ out τ_0_0 . TangentVector ) <nl> / / ROUNDTRIP : vjp : @ AD__generic__vjp_src_0_wrt_0_1 : $ @ convention ( thin ) < τ_0_0 where τ_0_0 : Differentiable > ( @ in_guaranteed τ_0_0 , Float ) - > ( @ out τ_0_0 , @ owned @ callee_guaranteed ( @ in_guaranteed τ_0_0 . TangentVector ) - > ( @ out τ_0_0 . TangentVector , Float ) ) <nl> / / ROUNDTRIP : } <nl> + <nl> + / / IRGEN - LABEL : @ AD__generic_PSSRS16_Differentiation14DifferentiableRzl = hidden global { i8 * , i8 * } { <nl> + / / IRGEN - SAME : @ AD__generic__jvp_src_0_wrt_0_1 <nl> + / / IRGEN - SAME : @ AD__generic__vjp_src_0_wrt_0_1 <nl> + / / IRGEN - SAME : } <nl>
|
Merge remote - tracking branch ' origin / master ' into master - rebranch
|
apple/swift
|
fda8e977e1f09699bb3b4d8f50d15503edb6c0b1
|
2020-02-07T22:24:32Z
|
mmm a / src / library_memfs . js <nl> ppp b / src / library_memfs . js <nl> mergeInto ( LibraryManager . library , { <nl> } else if ( FS . isFile ( node . mode ) ) { <nl> node . node_ops = MEMFS . ops_table . file . node ; <nl> node . stream_ops = MEMFS . ops_table . file . stream ; <nl> - node . usedBytes = 0 ; / / The actual number of bytes used in the typed array , as opposed to contents . buffer . byteLength which gives the whole capacity . <nl> + node . usedBytes = 0 ; / / The actual number of bytes used in the typed array , as opposed to contents . length which gives the whole capacity . <nl> / / When the byte data of the file is populated , this will point to either a typed array , or a normal JS array . Typed arrays are preferred <nl> / / for performance , and used by default . However , typed arrays are not resizable like normal JS arrays are , so there is a small disk size <nl> / / penalty involved for appending file writes that continuously grow a file similar to std : : vector capacity vs used - scheme . <nl> mergeInto ( LibraryManager . library , { <nl> # endif <nl> <nl> if ( ! node . contents | | node . contents . subarray ) { / / Keep using a typed array if creating a new storage , or if old one was a typed array as well . <nl> - var prevCapacity = node . contents ? node . contents . buffer . byteLength : 0 ; <nl> + var prevCapacity = node . contents ? node . contents . length : 0 ; <nl> if ( prevCapacity > = newCapacity ) return ; / / No need to expand , the storage was already large enough . <nl> / / Don ' t expand strictly to the given requested limit if it ' s only a very small increase , but instead geometrically grow capacity . <nl> / / For small filesizes ( < 1MB ) , perform size * 2 geometric increase , but for large sizes , do a much more conservative size * 1 . 125 increase to <nl> mergeInto ( LibraryManager . library , { <nl> } , <nl> <nl> / / Writes the byte range ( buffer [ offset ] , buffer [ offset + length ] ) to offset ' position ' into the file pointed by ' stream ' <nl> + / / canOwn : A boolean that tells if this function can take ownership of the passed in buffer from the subbuffer portion <nl> + / / that the typed array view ' buffer ' points to . The underlying ArrayBuffer can be larger than that , but <nl> + / / canOwn = true will not take ownership of the portion outside the bytes addressed by the view . <nl> write : function ( stream , buffer , offset , length , position , canOwn ) { <nl> if ( ! length ) return 0 ; <nl> var node = stream . node ; <nl>
|
When canOwn = true is passed to MEMFS write functions , a view can be passed in that only addresses a subportion of the underlying ArrayBuffer . Define the semantics of that scenario to mean that only the bytes pointed to by the subview can be taken ownership of , and not the bytes outside the view . ( Previously passing a view to a subportion of an ArrayBuffer would throw an exception when later appending to it .
|
emscripten-core/emscripten
|
6c22952045691e94041bff838e6d20d3f9c9654e
|
2016-11-08T14:00:09Z
|
mmm a / src / mongo / shell / collection . js <nl> ppp b / src / mongo / shell / collection . js <nl> DBCollection . prototype . hashAllDocs = function ( ) { <nl> * / <nl> DBCollection . prototype . dropIndex = function ( index ) { <nl> assert ( index , " need to specify index to dropIndex " ) ; <nl> + <nl> + / / Need an extra check for array because ' Array ' is an ' object ' , but not every ' object ' is an <nl> + / / ' Array ' . <nl> + if ( typeof index ! = " string " & & typeof index ! = " object " | | index instanceof Array ) { <nl> + throw new Error ( <nl> + " The index to drop must be either the index name or the index specification document " ) ; <nl> + } <nl> + <nl> + if ( typeof index = = " string " & & index = = = " * " ) { <nl> + throw new Error ( <nl> + " To drop indexes in the collection using ' * ' , use db . collection . dropIndexes ( ) " ) ; <nl> + } <nl> + <nl> var res = this . _dbCommand ( " dropIndexes " , { index : index } ) ; <nl> return res ; <nl> } ; <nl>
|
SERVER - 39663 db . collection . dropIndex ( ) should not accept multiple index names
|
mongodb/mongo
|
ac49ef4c2dcbee1317c3c1f5bad102eef70ce16c
|
2019-04-06T01:41:43Z
|
mmm a / README . md <nl> ppp b / README . md <nl> LeetCode <nl> | 347 | [ Top K Frequent Elements ] ( https : / / leetcode . com / problems / top - k - frequent - elements / ) | [ C + + ] ( . / algorithms / cpp / topKFrequentElements / topKFrequentElements . cpp ) | Medium | <nl> | 345 | [ Reverse Vowels of a String ] ( https : / / leetcode . com / problems / reverse - vowels - of - a - string / ) | [ C + + ] ( . / algorithms / cpp / reverseVowelsOfAString / reverseVowelsOfAString . cpp ) | Easy | <nl> | 344 | [ Reverse String ] ( https : / / leetcode . com / problems / reverse - string / ) | [ C + + ] ( . / algorithms / cpp / reverseString / ReverseString . cpp ) | Easy | <nl> + | 343 | [ Integer Break ] ( https : / / leetcode . com / problems / integer - break / ) | [ C + + ] ( . / algorithms / cpp / integerBreak / IntegerBreak . cpp ) | Medium | <nl> | 342 | [ Power of Four ] ( https : / / leetcode . com / problems / power - of - four / ) | [ C + + ] ( . / algorithms / cpp / powerOfFour / PowerOfFour . cpp ) | Easy | <nl> | 337 | [ House Robber III ] ( https : / / leetcode . com / problems / house - robber - iii / ) | [ C + + ] ( . / algorithms / cpp / houseRobber / houseRobberIII . cpp ) | Medium | <nl> | 334 | [ Increasing Triplet Subsequence ] ( https : / / leetcode . com / problems / increasing - triplet - subsequence / ) | [ C + + ] ( . / algorithms / cpp / increasingTripletSubsequence / increasingTripletSubsequence . cpp ) | Medium | <nl> new file mode 100644 <nl> index 00000000 . . 21f78bb8 <nl> mmm / dev / null <nl> ppp b / algorithms / cpp / integerBreak / IntegerBreak . cpp <nl> <nl> + / / Source : https : / / leetcode . com / problems / integer - break / <nl> + / / Author : Hao Chen <nl> + / / Date : 2016 - 05 - 29 <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + * <nl> + * Given a positive integer n , break it into the sum of at least two positive integers <nl> + * and maximize the product of those integers . Return the maximum product you can get . <nl> + * <nl> + * For example , given n = 2 , return 1 ( 2 = 1 + 1 ) ; given n = 10 , return 36 ( 10 = 3 + 3 <nl> + * + 4 ) . <nl> + * <nl> + * Note : you may assume that n is not less than 2 . <nl> + * <nl> + * There is a simple O ( n ) solution to this problem . <nl> + * You may check the breaking results of n ranging from 7 to 10 to discover the <nl> + * regularities . <nl> + * <nl> + * Credits : Special thanks to @ jianchao . li . fighter for adding this problem and creating <nl> + * all test cases . <nl> + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + class Solution { <nl> + public : <nl> + / / As the hint said , checking the n with ranging from 7 to 10 to discover the regularities . <nl> + / / n = 7 , 3 * 4 = 12 <nl> + / / n = 8 , 3 * 3 * 2 = 18 <nl> + / / n = 9 , 3 * 3 * 3 = 27 <nl> + / / n = 10 , 3 * 3 * 4 = 36 <nl> + / / n = 11 , 3 * 3 * 3 * 2 = 54 <nl> + / / <nl> + / / we can see we can break the number by 3 if it is greater than 4 ; <nl> + / / <nl> + int integerBreak ( int n ) { <nl> + if ( n = = 2 ) return 1 ; <nl> + if ( n = = 3 ) return 2 ; <nl> + int result = 1 ; <nl> + while ( n > 4 ) { <nl> + result * = 3 ; <nl> + n - = 3 ; <nl> + } <nl> + result * = n ; <nl> + return result ; <nl> + } <nl> + } ; <nl> + <nl>
|
the solution of problem " Integer Break "
|
haoel/leetcode
|
726d8d3bd636e46bc2bad090b935cad60e6eec84
|
2016-05-29T08:27:19Z
|
mmm a / tensorflow / compiler / xla / service / cpu / BUILD <nl> ppp b / tensorflow / compiler / xla / service / cpu / BUILD <nl> filegroup ( <nl> srcs = [ <nl> " runtime_fp16 . cc " , <nl> " runtime_key_value_sort . cc " , <nl> + " runtime_pow . cc " , <nl> " runtime_single_threaded_conv2d . cc " , <nl> " runtime_single_threaded_fft . cc " , <nl> " runtime_single_threaded_matmul . cc " , <nl> filegroup ( <nl> " runtime_fft_impl . h " , <nl> " runtime_fp16 . h " , <nl> " runtime_key_value_sort . h " , <nl> + " runtime_pow . h " , <nl> " runtime_single_threaded_conv2d . h " , <nl> " runtime_single_threaded_fft . h " , <nl> " runtime_single_threaded_matmul . h " , <nl> cc_library ( <nl> " : cpu_runtime " , <nl> " : orc_jit_memory_mapper " , <nl> " : runtime_fp16 " , <nl> + " : runtime_pow " , <nl> " : runtime_conv2d " , <nl> " : runtime_conv2d_mkl " , <nl> " : runtime_fft " , <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " runtime_pow " , <nl> + srcs = [ <nl> + " runtime_pow . cc " , <nl> + ] , <nl> + hdrs = [ <nl> + " runtime_pow . h " , <nl> + ] , <nl> + copts = runtime_copts ( ) , <nl> + deps = [ <nl> + " / / tensorflow / core / platform : macros " , <nl> + " / / tensorflow / core / platform : types " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " cpu_executable " , <nl> srcs = [ " cpu_executable . cc " ] , <nl> mmm a / tensorflow / compiler / xla / service / cpu / compiler_functor . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / compiler_functor . cc <nl> void CompilerFunctor : : AddTargetInfoPasses ( <nl> target_library_info_impl - > addVectorizableFunctions ( <nl> VectorFunctionsForTargetLibraryInfoImpl ( ) ) ; <nl> <nl> - / / TODO ( b / 136651482 ) : Disable pow ( f ) so LLVM doesn ' t transform it into powi . <nl> - / / It would be better to provide our own powi . <nl> - target_library_info_impl - > setUnavailable ( llvm : : LibFunc_pow ) ; <nl> - target_library_info_impl - > setUnavailable ( llvm : : LibFunc_powf ) ; <nl> - <nl> passes - > add ( <nl> new llvm : : TargetLibraryInfoWrapperPass ( * target_library_info_impl ) ) ; <nl> passes - > add ( createTargetTransformInfoWrapperPass ( <nl> new file mode 100644 <nl> index 0000000000000 . . 08308b4ce57f7 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / service / cpu / runtime_pow . cc <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / compiler / xla / service / cpu / runtime_pow . h " <nl> + <nl> + # include " tensorflow / core / platform / macros . h " <nl> + <nl> + template < typename T > <nl> + static T Powi ( T a , tensorflow : : int32 b ) { <nl> + const bool recip = b < 0 ; <nl> + T r = 1 ; <nl> + while ( true ) { <nl> + if ( b & 1 ) r * = a ; <nl> + b / = 2 ; <nl> + if ( b = = 0 ) break ; <nl> + a * = a ; <nl> + } <nl> + return recip ? 1 / r : r ; <nl> + } <nl> + <nl> + float TF_ATTRIBUTE_WEAK __powisf2 ( float a , tensorflow : : int32 b ) { <nl> + return Powi ( a , b ) ; <nl> + } <nl> + <nl> + double TF_ATTRIBUTE_WEAK __powidf2 ( double a , tensorflow : : int32 b ) { <nl> + return Powi ( a , b ) ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . 53f8094256d13 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / xla / service / cpu / runtime_pow . h <nl> <nl> + / * Copyright 2020 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_POW_H_ <nl> + # define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_POW_H_ <nl> + <nl> + # include " tensorflow / core / platform / types . h " <nl> + <nl> + / / Raises F32 value a to the power of b . <nl> + extern " C " float __powisf2 ( float a , tensorflow : : int32 b ) ; <nl> + <nl> + / / Raises F64 value a to the power of b . <nl> + extern " C " double __powidf2 ( double a , tensorflow : : int32 b ) ; <nl> + <nl> + # endif / / TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_POW_H_ <nl> mmm a / tensorflow / compiler / xla / service / cpu / simple_orc_jit . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu / simple_orc_jit . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / xla / service / cpu / runtime_key_value_sort . h " <nl> # include " tensorflow / compiler / xla / service / cpu / runtime_matmul . h " <nl> # include " tensorflow / compiler / xla / service / cpu / runtime_matmul_mkl . h " <nl> + # include " tensorflow / compiler / xla / service / cpu / runtime_pow . h " <nl> # include " tensorflow / compiler / xla / service / cpu / runtime_single_threaded_conv2d . h " <nl> # include " tensorflow / compiler / xla / service / cpu / runtime_single_threaded_fft . h " <nl> # include " tensorflow / compiler / xla / service / cpu / runtime_single_threaded_matmul . h " <nl> bool RegisterKnownJITSymbols ( ) { <nl> " Host " ) ; <nl> registry - > Register ( " __truncdfhf2 " , reinterpret_cast < void * > ( __truncdfhf2 ) , <nl> " Host " ) ; <nl> + registry - > Register ( " __powisf2 " , reinterpret_cast < void * > ( __powisf2 ) , " Host " ) ; <nl> + registry - > Register ( " __powidf2 " , reinterpret_cast < void * > ( __powidf2 ) , " Host " ) ; <nl> <nl> # undef REGISTER_CPU_RUNTIME_SYMBOL <nl> <nl>
|
[ XLA : CPU ] Add our own __powisf2 and __powidf2 which can be generated by LLVM
|
tensorflow/tensorflow
|
25e51cdbacfd0badf48ed16a6f942195d94832fe
|
2020-05-05T18:53:15Z
|
mmm a / core / rid_owner . h <nl> ppp b / core / rid_owner . h <nl> class RID_Alloc : public RID_AllocBase { <nl> free_list_chunks [ alloc_count / elements_in_chunk ] [ alloc_count % elements_in_chunk ] = idx ; <nl> } <nl> <nl> + _FORCE_INLINE_ uint32_t get_rid_count ( ) const { <nl> + return alloc_count ; <nl> + } <nl> + <nl> + _FORCE_INLINE_ T * get_rid_by_index ( uint32_t p_index ) { <nl> + ERR_FAIL_INDEX_V ( p_index , alloc_count , NULL ) ; <nl> + uint64_t idx = free_list_chunks [ p_index / elements_in_chunk ] [ p_index % elements_in_chunk ] ; <nl> + return & chunks [ idx / elements_in_chunk ] [ idx % elements_in_chunk ] ; <nl> + } <nl> + <nl> void get_owned_list ( List < RID > * p_owned ) { <nl> for ( size_t i = 0 ; i < alloc_count ; i + + ) { <nl> uint64_t idx = free_list_chunks [ i / elements_in_chunk ] [ i % elements_in_chunk ] ; <nl> class RID_Owner { <nl> alloc . free ( p_rid ) ; <nl> } <nl> <nl> + _FORCE_INLINE_ uint32_t get_rid_count ( ) const { <nl> + return alloc . get_rid_count ( ) ; <nl> + } <nl> + <nl> + _FORCE_INLINE_ T * get_rid_by_index ( uint32_t p_index ) { <nl> + return alloc . get_rid_by_index ( p_index ) ; <nl> + } <nl> + <nl> _FORCE_INLINE_ void get_owned_list ( List < RID > * p_owned ) { <nl> return alloc . get_owned_list ( p_owned ) ; <nl> } <nl> mmm a / drivers / vulkan / rendering_device_vulkan . cpp <nl> ppp b / drivers / vulkan / rendering_device_vulkan . cpp <nl> RID RenderingDeviceVulkan : : texture_create ( const TextureFormat & p_format , const T <nl> } <nl> <nl> if ( p_format . usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT & & ! ( flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT ) ) { <nl> + printf ( " vkformat : % x \ n " , image_create_info . format ) ; <nl> ERR_FAIL_V_MSG ( RID ( ) , " Format " + format_text + " does not support usage as depth - stencil attachment . " ) ; <nl> } <nl> <nl> RID RenderingDeviceVulkan : : texture_create ( const TextureFormat & p_format , const T <nl> <nl> / / set bound and unbound layouts <nl> if ( p_format . usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT ) { <nl> - texture . aspect_mask = TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT ; <nl> + texture . aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT ; <nl> <nl> if ( p_format . usage_bits & TEXTURE_USAGE_SAMPLING_BIT ) { <nl> texture . unbound_layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ; <nl> bool RenderingDeviceVulkan : : _uniform_add_binding ( Vector < Vector < VkDescriptorSetLa <nl> } break ; * / <nl> default : { <nl> <nl> - if ( reflection . getType ( ) - > getQualifier ( ) . hasOffset ( ) ) { <nl> + if ( reflection . getType ( ) - > getQualifier ( ) . hasOffset ( ) | | reflection . name . find ( " . " ) ! = std : : string : : npos ) { <nl> / / member of uniform block ? <nl> return true ; <nl> } <nl> bool RenderingDeviceVulkan : : uniform_set_is_valid ( RID p_uniform_set ) { <nl> return uniform_set_owner . owns ( p_uniform_set ) ; <nl> } <nl> <nl> - Error RenderingDeviceVulkan : : buffer_update ( RID p_buffer , uint32_t p_offset , uint32_t p_size , void * p_data , bool p_sync_with_draw ) { <nl> + Error RenderingDeviceVulkan : : buffer_update ( RID p_buffer , uint32_t p_offset , uint32_t p_size , const void * p_data , bool p_sync_with_draw ) { <nl> _THREAD_SAFE_METHOD_ <nl> <nl> ERR_FAIL_COND_V_MSG ( draw_list & & p_sync_with_draw , ERR_INVALID_PARAMETER , <nl> mmm a / drivers / vulkan / rendering_device_vulkan . h <nl> ppp b / drivers / vulkan / rendering_device_vulkan . h <nl> class RenderingDeviceVulkan : public RenderingDevice { <nl> virtual RID uniform_set_create ( const Vector < Uniform > & p_uniforms , RID p_shader , uint32_t p_shader_set ) ; <nl> virtual bool uniform_set_is_valid ( RID p_uniform_set ) ; <nl> <nl> - virtual Error buffer_update ( RID p_buffer , uint32_t p_offset , uint32_t p_size , void * p_data , bool p_sync_with_draw = false ) ; / / works for any buffer <nl> + virtual Error buffer_update ( RID p_buffer , uint32_t p_offset , uint32_t p_size , const void * p_data , bool p_sync_with_draw = false ) ; / / works for any buffer <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> / * * * * RENDER PIPELINE * * * * / <nl> mmm a / scene / 2d / animated_sprite . cpp <nl> ppp b / scene / 2d / animated_sprite . cpp <nl> <nl> # include " scene / scene_string_names . h " <nl> <nl> # define NORMAL_SUFFIX " _normal " <nl> + # define SPECULAR_SUFFIX " _specular " <nl> <nl> # ifdef TOOLS_ENABLED <nl> Dictionary AnimatedSprite : : _edit_get_state ( ) const { <nl> void SpriteFrames : : add_animation ( const StringName & p_anim ) { <nl> <nl> animations [ p_anim ] = Anim ( ) ; <nl> animations [ p_anim ] . normal_name = String ( p_anim ) + NORMAL_SUFFIX ; <nl> + animations [ p_anim ] . specular_name = String ( p_anim ) + SPECULAR_SUFFIX ; <nl> } <nl> <nl> bool SpriteFrames : : has_animation ( const StringName & p_anim ) const { <nl> void SpriteFrames : : rename_animation ( const StringName & p_prev , const StringName & <nl> animations . erase ( p_prev ) ; <nl> animations [ p_next ] = anim ; <nl> animations [ p_next ] . normal_name = String ( p_next ) + NORMAL_SUFFIX ; <nl> + animations [ p_next ] . specular_name = String ( p_next ) + SPECULAR_SUFFIX ; <nl> } <nl> <nl> Vector < String > SpriteFrames : : _get_animation_list ( ) const { <nl> void AnimatedSprite : : _notification ( int p_what ) { <nl> return ; <nl> <nl> Ref < Texture2D > normal = frames - > get_normal_frame ( animation , frame ) ; <nl> + Ref < Texture2D > specular = frames - > get_specular_frame ( animation , frame ) ; <nl> <nl> RID ci = get_canvas_item ( ) ; <nl> <nl> void AnimatedSprite : : _notification ( int p_what ) { <nl> if ( vflip ) <nl> dst_rect . size . y = - dst_rect . size . y ; <nl> <nl> - texture - > draw_rect_region ( ci , dst_rect , Rect2 ( Vector2 ( ) , texture - > get_size ( ) ) , Color ( 1 , 1 , 1 ) , false , normal ) ; <nl> + texture - > draw_rect_region ( ci , dst_rect , Rect2 ( Vector2 ( ) , texture - > get_size ( ) ) , Color ( 1 , 1 , 1 ) , false , normal , specular , Color ( specular_color . r , specular_color . g , specular_color . b , shininess ) ) ; <nl> <nl> } break ; <nl> } <nl> String AnimatedSprite : : get_configuration_warning ( ) const { <nl> return String ( ) ; <nl> } <nl> <nl> + void AnimatedSprite : : set_specular_color ( const Color & p_color ) { <nl> + specular_color = p_color ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + Color AnimatedSprite : : get_specular_color ( ) const { <nl> + return specular_color ; <nl> + } <nl> + <nl> + void AnimatedSprite : : set_shininess ( float p_shininess ) { <nl> + shininess = CLAMP ( p_shininess , 0 . 0 , 1 . 0 ) ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + float AnimatedSprite : : get_shininess ( ) const { <nl> + return shininess ; <nl> + } <nl> + <nl> void AnimatedSprite : : _bind_methods ( ) { <nl> <nl> ClassDB : : bind_method ( D_METHOD ( " set_sprite_frames " , " sprite_frames " ) , & AnimatedSprite : : set_sprite_frames ) ; <nl> void AnimatedSprite : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " set_speed_scale " , " speed_scale " ) , & AnimatedSprite : : set_speed_scale ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_speed_scale " ) , & AnimatedSprite : : get_speed_scale ) ; <nl> <nl> + ClassDB : : bind_method ( D_METHOD ( " set_specular_color " , " color " ) , & AnimatedSprite : : set_specular_color ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_specular_color " ) , & AnimatedSprite : : get_specular_color ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_shininess " , " shininess " ) , & AnimatedSprite : : set_shininess ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_shininess " ) , & AnimatedSprite : : get_shininess ) ; <nl> + <nl> ClassDB : : bind_method ( D_METHOD ( " _res_changed " ) , & AnimatedSprite : : _res_changed ) ; <nl> <nl> ADD_SIGNAL ( MethodInfo ( " frame_changed " ) ) ; <nl> ADD_SIGNAL ( MethodInfo ( " animation_finished " ) ) ; <nl> <nl> + ADD_GROUP ( " Animation " , " " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " frames " , PROPERTY_HINT_RESOURCE_TYPE , " SpriteFrames " ) , " set_sprite_frames " , " get_sprite_frames " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : STRING , " animation " ) , " set_animation " , " get_animation " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : INT , " frame " ) , " set_frame " , " get_frame " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " speed_scale " ) , " set_speed_scale " , " get_speed_scale " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " playing " ) , " _set_playing " , " _is_playing " ) ; <nl> + ADD_GROUP ( " Lighting " , " " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : COLOR , " specular_color " , PROPERTY_HINT_COLOR_NO_ALPHA ) , " set_specular_color " , " get_specular_color " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " shininess " , PROPERTY_HINT_RANGE , " 0 , 1 , 0 . 01 " ) , " set_shininess " , " get_shininess " ) ; <nl> + ADD_GROUP ( " Offset " , " " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " centered " ) , " set_centered " , " is_centered " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : VECTOR2 , " offset " ) , " set_offset " , " get_offset " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " flip_h " ) , " set_flip_h " , " is_flipped_h " ) ; <nl> AnimatedSprite : : AnimatedSprite ( ) { <nl> animation = " default " ; <nl> timeout = 0 ; <nl> is_over = false ; <nl> + specular_color = Color ( 1 , 1 , 1 , 1 ) ; <nl> + shininess = 1 . 0 ; <nl> } <nl> mmm a / scene / 2d / animated_sprite . h <nl> ppp b / scene / 2d / animated_sprite . h <nl> class SpriteFrames : public Resource { <nl> } <nl> <nl> StringName normal_name ; <nl> + StringName specular_name ; <nl> } ; <nl> <nl> + Color specular_color ; <nl> + float shininess ; <nl> + <nl> Map < StringName , Anim > animations ; <nl> <nl> Array _get_frames ( ) const ; <nl> class SpriteFrames : public Resource { <nl> return EN - > get ( ) . frames [ p_idx ] ; <nl> } <nl> <nl> + _FORCE_INLINE_ Ref < Texture2D > get_specular_frame ( const StringName & p_anim , int p_idx ) const { <nl> + <nl> + const Map < StringName , Anim > : : Element * E = animations . find ( p_anim ) ; <nl> + ERR_FAIL_COND_V ( ! E , Ref < Texture2D > ( ) ) ; <nl> + ERR_FAIL_COND_V ( p_idx < 0 , Ref < Texture2D > ( ) ) ; <nl> + <nl> + const Map < StringName , Anim > : : Element * EN = animations . find ( E - > get ( ) . specular_name ) ; <nl> + <nl> + if ( ! EN | | p_idx > = EN - > get ( ) . frames . size ( ) ) <nl> + return Ref < Texture2D > ( ) ; <nl> + <nl> + return EN - > get ( ) . frames [ p_idx ] ; <nl> + } <nl> + <nl> void set_frame ( const StringName & p_anim , int p_idx , const Ref < Texture2D > & p_frame ) { <nl> Map < StringName , Anim > : : Element * E = animations . find ( p_anim ) ; <nl> ERR_FAIL_COND_MSG ( ! E , " Animation ' " + String ( p_anim ) + " ' doesn ' t exist . " ) ; <nl> class AnimatedSprite : public Node2D { <nl> bool _is_playing ( ) const ; <nl> Rect2 _get_rect ( ) const ; <nl> <nl> + Color specular_color ; <nl> + float shininess ; <nl> + <nl> protected : <nl> static void _bind_methods ( ) ; <nl> void _notification ( int p_what ) ; <nl> class AnimatedSprite : public Node2D { <nl> void set_flip_v ( bool p_flip ) ; <nl> bool is_flipped_v ( ) const ; <nl> <nl> - void set_modulate ( const Color & p_color ) ; <nl> - Color get_modulate ( ) const ; <nl> + void set_specular_color ( const Color & p_color ) ; <nl> + Color get_specular_color ( ) const ; <nl> + <nl> + void set_shininess ( float p_shininess ) ; <nl> + float get_shininess ( ) const ; <nl> <nl> virtual String get_configuration_warning ( ) const ; <nl> AnimatedSprite ( ) ; <nl> mmm a / scene / 2d / polygon_2d . cpp <nl> ppp b / scene / 2d / polygon_2d . cpp <nl> void Polygon2D : : _notification ( int p_what ) { <nl> if ( invert | | polygons . size ( ) = = 0 ) { <nl> Vector < int > indices = Geometry : : triangulate_polygon ( points ) ; <nl> if ( indices . size ( ) ) { <nl> - VS : : get_singleton ( ) - > canvas_item_add_triangle_array ( get_canvas_item ( ) , indices , points , colors , uvs , bones , weights , texture . is_valid ( ) ? texture - > get_rid ( ) : RID ( ) ) ; <nl> + VS : : get_singleton ( ) - > canvas_item_add_triangle_array ( get_canvas_item ( ) , indices , points , colors , uvs , bones , weights , texture . is_valid ( ) ? texture - > get_rid ( ) : RID ( ) , - 1 , normal_map . is_valid ( ) ? normal_map - > get_rid ( ) : RID ( ) , specular_map . is_valid ( ) ? specular_map - > get_rid ( ) : RID ( ) , Color ( specular_color . r , specular_color . g , specular_color . b , shininess ) ) ; <nl> } <nl> } else { <nl> / / draw individual polygons <nl> Ref < Texture2D > Polygon2D : : get_texture ( ) const { <nl> return texture ; <nl> } <nl> <nl> + void Polygon2D : : set_normal_map ( const Ref < Texture2D > & p_normal_map ) { <nl> + normal_map = p_normal_map ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + Ref < Texture2D > Polygon2D : : get_normal_map ( ) const { <nl> + return normal_map ; <nl> + } <nl> + <nl> + void Polygon2D : : set_specular_map ( const Ref < Texture2D > & p_specular_map ) { <nl> + specular_map = p_specular_map ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + Ref < Texture2D > Polygon2D : : get_specular_map ( ) const { <nl> + return specular_map ; <nl> + } <nl> + <nl> + void Polygon2D : : set_specular_color ( const Color & p_specular_color ) { <nl> + specular_color = p_specular_color ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + Color Polygon2D : : get_specular_color ( ) const { <nl> + return specular_color ; <nl> + } <nl> + <nl> + void Polygon2D : : set_shininess ( float p_shininess ) { <nl> + shininess = CLAMP ( p_shininess , 0 . 0 , 1 . 0 ) ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + float Polygon2D : : get_shininess ( ) const { <nl> + return shininess ; <nl> + } <nl> + <nl> void Polygon2D : : set_texture_offset ( const Vector2 & p_offset ) { <nl> <nl> tex_ofs = p_offset ; <nl> void Polygon2D : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " set_texture " , " texture " ) , & Polygon2D : : set_texture ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_texture " ) , & Polygon2D : : get_texture ) ; <nl> <nl> + ClassDB : : bind_method ( D_METHOD ( " set_normal_map " , " normal_map " ) , & Polygon2D : : set_normal_map ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_normal_map " ) , & Polygon2D : : get_normal_map ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_specular_map " , " specular_map " ) , & Polygon2D : : set_specular_map ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_specular_map " ) , & Polygon2D : : get_specular_map ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_specular_color " , " specular_color " ) , & Polygon2D : : set_specular_color ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_specular_color " ) , & Polygon2D : : get_specular_color ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_shininess " , " shininess " ) , & Polygon2D : : set_shininess ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_shininess " ) , & Polygon2D : : get_shininess ) ; <nl> + <nl> ClassDB : : bind_method ( D_METHOD ( " set_texture_offset " , " texture_offset " ) , & Polygon2D : : set_texture_offset ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_texture_offset " ) , & Polygon2D : : get_texture_offset ) ; <nl> <nl> void Polygon2D : : _bind_methods ( ) { <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : VECTOR2 , " texture_scale " ) , " set_texture_scale " , " get_texture_scale " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " texture_rotation_degrees " , PROPERTY_HINT_RANGE , " - 360 , 360 , 0 . 1 , or_lesser , or_greater " ) , " set_texture_rotation_degrees " , " get_texture_rotation_degrees " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " texture_rotation " , PROPERTY_HINT_NONE , " " , 0 ) , " set_texture_rotation " , " get_texture_rotation " ) ; <nl> + ADD_GROUP ( " Lighting " , " " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " normal_map " , PROPERTY_HINT_RESOURCE_TYPE , " Texture2D " ) , " set_normal_map " , " get_normal_map " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " specular_map " , PROPERTY_HINT_RESOURCE_TYPE , " Texture2D " ) , " set_specular_map " , " get_specular_map " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : COLOR , " specular_color " , PROPERTY_HINT_COLOR_NO_ALPHA ) , " set_specular_color " , " get_specular_color " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " shininess " , PROPERTY_HINT_RANGE , " 0 , 1 , 0 . 01 " ) , " set_shininess " , " get_shininess " ) ; <nl> ADD_GROUP ( " Skeleton " , " " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : NODE_PATH , " skeleton " , PROPERTY_HINT_NODE_PATH_VALID_TYPES , " Skeleton2D " ) , " set_skeleton " , " get_skeleton " ) ; <nl> <nl> Polygon2D : : Polygon2D ( ) { <nl> rect_cache_dirty = true ; <nl> internal_vertices = 0 ; <nl> current_skeleton_id = 0 ; <nl> + specular_color = Color ( 1 , 1 , 1 , 1 ) ; <nl> + shininess = 1 . 0 ; <nl> } <nl> mmm a / scene / 2d / polygon_2d . h <nl> ppp b / scene / 2d / polygon_2d . h <nl> class Polygon2D : public Node2D { <nl> <nl> Color color ; <nl> Ref < Texture2D > texture ; <nl> + Ref < Texture2D > normal_map ; <nl> + Ref < Texture2D > specular_map ; <nl> + Color specular_color ; <nl> + float shininess ; <nl> + <nl> Size2 tex_scale ; <nl> Vector2 tex_ofs ; <nl> bool tex_tile ; <nl> class Polygon2D : public Node2D { <nl> void set_texture ( const Ref < Texture2D > & p_texture ) ; <nl> Ref < Texture2D > get_texture ( ) const ; <nl> <nl> + void set_normal_map ( const Ref < Texture2D > & p_normal_map ) ; <nl> + Ref < Texture2D > get_normal_map ( ) const ; <nl> + <nl> + void set_specular_map ( const Ref < Texture2D > & p_specular_map ) ; <nl> + Ref < Texture2D > get_specular_map ( ) const ; <nl> + <nl> + void set_specular_color ( const Color & p_specular_color ) ; <nl> + Color get_specular_color ( ) const ; <nl> + <nl> + void set_shininess ( float p_shininess ) ; <nl> + float get_shininess ( ) const ; <nl> + <nl> void set_texture_offset ( const Vector2 & p_offset ) ; <nl> Vector2 get_texture_offset ( ) const ; <nl> <nl> mmm a / scene / 2d / sprite . cpp <nl> ppp b / scene / 2d / sprite . cpp <nl> void Sprite : : _notification ( int p_what ) { <nl> Rect2 src_rect , dst_rect ; <nl> bool filter_clip ; <nl> _get_rects ( src_rect , dst_rect , filter_clip ) ; <nl> - texture - > draw_rect_region ( ci , dst_rect , src_rect , Color ( 1 , 1 , 1 ) , false , normal_map , Ref < Texture2D > ( ) , Color ( 1 , 1 , 1 , 1 ) , VS : : CANVAS_ITEM_TEXTURE_FILTER_DEFAULT , VS : : CANVAS_ITEM_TEXTURE_REPEAT_DEFAULT , filter_clip ) ; <nl> + texture - > draw_rect_region ( ci , dst_rect , src_rect , Color ( 1 , 1 , 1 ) , false , normal_map , specular , Color ( specular_color . r , specular_color . g , specular_color . b , shininess ) , VS : : CANVAS_ITEM_TEXTURE_FILTER_DEFAULT , VS : : CANVAS_ITEM_TEXTURE_REPEAT_DEFAULT , filter_clip ) ; <nl> <nl> } break ; <nl> } <nl> Ref < Texture2D > Sprite : : get_normal_map ( ) const { <nl> return normal_map ; <nl> } <nl> <nl> + void Sprite : : set_specular_map ( const Ref < Texture2D > & p_texture ) { <nl> + <nl> + specular = p_texture ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + Ref < Texture2D > Sprite : : get_specular_map ( ) const { <nl> + <nl> + return specular ; <nl> + } <nl> + <nl> + void Sprite : : set_specular_color ( const Color & p_color ) { <nl> + specular_color = p_color ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + Color Sprite : : get_specular_color ( ) const { <nl> + return specular_color ; <nl> + } <nl> + <nl> + void Sprite : : set_shininess ( float p_shininess ) { <nl> + shininess = CLAMP ( p_shininess , 0 . 0 , 1 . 0 ) ; <nl> + update ( ) ; <nl> + } <nl> + <nl> + float Sprite : : get_shininess ( ) const { <nl> + return shininess ; <nl> + } <nl> + <nl> Ref < Texture2D > Sprite : : get_texture ( ) const { <nl> <nl> return texture ; <nl> bool Sprite : : is_pixel_opaque ( const Point2 & p_point ) const { <nl> if ( vflip ) <nl> q . y = 1 . 0f - q . y ; <nl> q = q * src_rect . size + src_rect . position ; <nl> - # warning this need to be obtained from CanvasItem repeat mode when I add it <nl> + # warning this need to be obtained from CanvasItem new repeat mode ( but it needs to guess it from hierarchy , need to add a function for that ) <nl> bool is_repeat = false ; <nl> bool is_mirrored_repeat = false ; <nl> if ( is_repeat ) { <nl> void Sprite : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " set_normal_map " , " normal_map " ) , & Sprite : : set_normal_map ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_normal_map " ) , & Sprite : : get_normal_map ) ; <nl> <nl> + ClassDB : : bind_method ( D_METHOD ( " set_specular_map " , " specular_map " ) , & Sprite : : set_specular_map ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_specular_map " ) , & Sprite : : get_specular_map ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_specular_color " , " specular_color " ) , & Sprite : : set_specular_color ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_specular_color " ) , & Sprite : : get_specular_color ) ; <nl> + <nl> + ClassDB : : bind_method ( D_METHOD ( " set_shininess " , " shininess " ) , & Sprite : : set_shininess ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_shininess " ) , & Sprite : : get_shininess ) ; <nl> + <nl> ClassDB : : bind_method ( D_METHOD ( " set_centered " , " centered " ) , & Sprite : : set_centered ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " is_centered " ) , & Sprite : : is_centered ) ; <nl> <nl> void Sprite : : _bind_methods ( ) { <nl> ADD_SIGNAL ( MethodInfo ( " texture_changed " ) ) ; <nl> <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " texture " , PROPERTY_HINT_RESOURCE_TYPE , " Texture2D " ) , " set_texture " , " get_texture " ) ; <nl> + ADD_GROUP ( " Lighting " , " " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " normal_map " , PROPERTY_HINT_RESOURCE_TYPE , " Texture2D " ) , " set_normal_map " , " get_normal_map " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : OBJECT , " specular_map " , PROPERTY_HINT_RESOURCE_TYPE , " Texture2D " ) , " set_specular_map " , " get_specular_map " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : COLOR , " specular_color " , PROPERTY_HINT_COLOR_NO_ALPHA ) , " set_specular_color " , " get_specular_color " ) ; <nl> + ADD_PROPERTY ( PropertyInfo ( Variant : : REAL , " shininess " , PROPERTY_HINT_RANGE , " 0 , 1 , 0 . 01 " ) , " set_shininess " , " get_shininess " ) ; <nl> ADD_GROUP ( " Offset " , " " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : BOOL , " centered " ) , " set_centered " , " is_centered " ) ; <nl> ADD_PROPERTY ( PropertyInfo ( Variant : : VECTOR2 , " offset " ) , " set_offset " , " get_offset " ) ; <nl> Sprite : : Sprite ( ) { <nl> vflip = false ; <nl> region = false ; <nl> region_filter_clip = false ; <nl> + shininess = 1 . 0 ; <nl> + specular_color = Color ( 1 , 1 , 1 , 1 ) ; <nl> <nl> frame = 0 ; <nl> <nl> mmm a / scene / 2d / sprite . h <nl> ppp b / scene / 2d / sprite . h <nl> class Sprite : public Node2D { <nl> <nl> Ref < Texture2D > texture ; <nl> Ref < Texture2D > normal_map ; <nl> + Ref < Texture2D > specular ; <nl> + Color specular_color ; <nl> + float shininess ; <nl> <nl> bool centered ; <nl> Point2 offset ; <nl> class Sprite : public Node2D { <nl> void set_normal_map ( const Ref < Texture2D > & p_texture ) ; <nl> Ref < Texture2D > get_normal_map ( ) const ; <nl> <nl> + void set_specular_map ( const Ref < Texture2D > & p_texture ) ; <nl> + Ref < Texture2D > get_specular_map ( ) const ; <nl> + <nl> + void set_specular_color ( const Color & p_color ) ; <nl> + Color get_specular_color ( ) const ; <nl> + <nl> + void set_shininess ( float p_shininess ) ; <nl> + float get_shininess ( ) const ; <nl> + <nl> void set_centered ( bool p_center ) ; <nl> bool is_centered ( ) const ; <nl> <nl> mmm a / servers / visual / rasterizer / rasterizer . h <nl> ppp b / servers / visual / rasterizer / rasterizer . h <nl> class RasterizerStorage { <nl> virtual void render_target_disable_clear_request ( RID p_render_target ) = 0 ; <nl> virtual void render_target_do_clear_request ( RID p_render_target ) = 0 ; <nl> <nl> - / * CANVAS SHADOW * / <nl> - <nl> - virtual RID canvas_light_shadow_buffer_create ( int p_width ) = 0 ; <nl> - <nl> - / * LIGHT SHADOW MAPPING * / <nl> - <nl> - virtual RID canvas_light_occluder_create ( ) = 0 ; <nl> - virtual void canvas_light_occluder_set_polylines ( RID p_occluder , const PoolVector < Vector2 > & p_lines ) = 0 ; <nl> - <nl> virtual VS : : InstanceType get_base_type ( RID p_rid ) const = 0 ; <nl> virtual bool free ( RID p_rid ) = 0 ; <nl> <nl> class RasterizerCanvas { <nl> RID texture ; <nl> Vector2 texture_offset ; <nl> RID canvas ; <nl> - RID shadow_buffer ; <nl> + bool use_shadow ; <nl> int shadow_buffer_size ; <nl> float shadow_gradient_length ; <nl> VS : : CanvasLightShadowFilter shadow_filter ; <nl> Color shadow_color ; <nl> float shadow_smooth ; <nl> <nl> - void * texture_cache ; / / implementation dependent <nl> + / / void * texture_cache ; / / implementation dependent <nl> Rect2 rect_cache ; <nl> Transform2D xform_cache ; <nl> float radius_cache ; / / used for shadow far plane <nl> - CameraMatrix shadow_matrix_cache ; <nl> + / / CameraMatrix shadow_matrix_cache ; <nl> <nl> Transform2D light_shader_xform ; <nl> - Vector2 light_shader_pos ; <nl> + / / Vector2 light_shader_pos ; <nl> <nl> Light * shadows_next_ptr ; <nl> Light * filter_next_ptr ; <nl> class RasterizerCanvas { <nl> <nl> RID light_internal ; <nl> <nl> + int32_t render_index_cache ; <nl> + <nl> Light ( ) { <nl> enabled = true ; <nl> color = Color ( 1 , 1 , 1 ) ; <nl> class RasterizerCanvas { <nl> energy = 1 . 0 ; <nl> item_shadow_mask = - 1 ; <nl> mode = VS : : CANVAS_LIGHT_MODE_ADD ; <nl> - texture_cache = NULL ; <nl> + / / texture_cache = NULL ; <nl> next_ptr = NULL ; <nl> mask_next_ptr = NULL ; <nl> filter_next_ptr = NULL ; <nl> + use_shadow = false ; <nl> shadow_buffer_size = 2048 ; <nl> shadow_gradient_length = 0 ; <nl> shadow_filter = VS : : CANVAS_LIGHT_FILTER_NONE ; <nl> shadow_smooth = 0 . 0 ; <nl> + render_index_cache = - 1 ; <nl> } <nl> } ; <nl> <nl> - virtual RID light_internal_create ( ) = 0 ; <nl> - virtual void light_internal_update ( RID p_rid , Light * p_light ) = 0 ; <nl> - virtual void light_internal_free ( RID p_rid ) = 0 ; <nl> - <nl> typedef uint64_t TextureBindingID ; <nl> <nl> virtual TextureBindingID request_texture_binding ( RID p_texture , RID p_normalmap , RID p_specular , VS : : CanvasItemTextureFilter p_filter , VS : : CanvasItemTextureRepeat p_repeat , RID p_multimesh ) = 0 ; <nl> class RasterizerCanvas { <nl> case Item : : Command : : TYPE_PRIMITIVE : { <nl> <nl> const Item : : CommandPrimitive * primitive = static_cast < const Item : : CommandPrimitive * > ( c ) ; <nl> - for ( int j = 0 ; j < primitive - > point_count ; j + + ) { <nl> + for ( uint32_t j = 0 ; j < primitive - > point_count ; j + + ) { <nl> if ( j = = 0 ) { <nl> r . position = primitive - > points [ 0 ] ; <nl> } else { <nl> class RasterizerCanvas { <nl> c = n ; <nl> } <nl> { <nl> - uint32_t cbc = MIN ( ( current_block + 1 ) , blocks . size ( ) ) ; <nl> + uint32_t cbc = MIN ( ( current_block + 1 ) , ( uint32_t ) blocks . size ( ) ) ; <nl> CommandBlock * blockptr = blocks . ptrw ( ) ; <nl> for ( uint32_t i = 0 ; i < cbc ; i + + ) { <nl> blockptr [ i ] . usage = 0 ; <nl> class RasterizerCanvas { <nl> bool enabled ; <nl> RID canvas ; <nl> RID polygon ; <nl> - RID polygon_buffer ; <nl> + RID occluder ; <nl> Rect2 aabb_cache ; <nl> Transform2D xform ; <nl> Transform2D xform_cache ; <nl> class RasterizerCanvas { <nl> } <nl> } ; <nl> <nl> - virtual void canvas_light_shadow_buffer_update ( RID p_buffer , const Transform2D & p_light_xform , int p_light_mask , float p_near , float p_far , LightOccluderInstance * p_occluders , CameraMatrix * p_xform_cache ) = 0 ; <nl> + virtual RID light_create ( ) = 0 ; <nl> + virtual void light_set_texture ( RID p_rid , RID p_texture ) = 0 ; <nl> + virtual void light_set_use_shadow ( RID p_rid , bool p_enable , int p_resolution ) = 0 ; <nl> + virtual void light_update_shadow ( RID p_rid , const Transform2D & p_light_xform , int p_light_mask , float p_near , float p_far , LightOccluderInstance * p_occluders ) = 0 ; <nl> <nl> - virtual void reset_canvas ( ) = 0 ; <nl> + virtual RID occluder_polygon_create ( ) = 0 ; <nl> + virtual void occluder_polygon_set_shape_as_lines ( RID p_occluder , const PoolVector < Vector2 > & p_lines ) = 0 ; <nl> + virtual void occluder_polygon_set_cull_mode ( RID p_occluder , VS : : CanvasOccluderPolygonCullMode p_mode ) = 0 ; <nl> <nl> virtual void draw_window_margins ( int * p_margins , RID * p_margin_textures ) = 0 ; <nl> <nl> + virtual bool free ( RID p_rid ) = 0 ; <nl> virtual void update ( ) = 0 ; <nl> <nl> RasterizerCanvas ( ) { singleton = this ; } <nl> mmm a / servers / visual / rasterizer / rasterizer_canvas_rd . cpp <nl> ppp b / servers / visual / rasterizer / rasterizer_canvas_rd . cpp <nl> void RasterizerCanvasRD : : _update_transform_to_mat4 ( const Transform & p_transform , <nl> p_mat4 [ 15 ] = 1 ; <nl> } <nl> <nl> + void RasterizerCanvasRD : : _update_specular_shininess ( const Color & p_transform , uint32_t * r_ss ) { <nl> + <nl> + * r_ss = uint32_t ( CLAMP ( p_transform . a * 255 . 0 , 0 , 255 ) ) < < 24 ; <nl> + * r_ss | = uint32_t ( CLAMP ( p_transform . b * 255 . 0 , 0 , 255 ) ) < < 16 ; <nl> + * r_ss | = uint32_t ( CLAMP ( p_transform . g * 255 . 0 , 0 , 255 ) ) < < 8 ; <nl> + * r_ss | = uint32_t ( CLAMP ( p_transform . r * 255 . 0 , 0 , 255 ) ) ; <nl> + } <nl> + <nl> RID RasterizerCanvasRD : : _create_texture_binding ( RID p_texture , RID p_normalmap , RID p_specular , VisualServer : : CanvasItemTextureFilter p_filter , VisualServer : : CanvasItemTextureRepeat p_repeat , RID p_multimesh ) { <nl> <nl> Vector < RD : : Uniform > uniform_set ; <nl> void RasterizerCanvasRD : : free_polygon ( PolygonID p_polygon ) { <nl> polygon_buffers . polygons . erase ( p_polygon ) ; <nl> } <nl> <nl> - Size2i RasterizerCanvasRD : : _bind_texture_binding ( TextureBindingID p_binding , RD : : DrawListID p_draw_list ) { <nl> + Size2i RasterizerCanvasRD : : _bind_texture_binding ( TextureBindingID p_binding , RD : : DrawListID p_draw_list , uint32_t & flags ) { <nl> <nl> TextureBinding * * texture_binding_ptr = bindings . texture_bindings . getptr ( p_binding ) ; <nl> ERR_FAIL_COND_V ( ! texture_binding_ptr , Size2i ( ) ) ; <nl> TextureBinding * texture_binding = * texture_binding_ptr ; <nl> <nl> + if ( texture_binding - > key . normalmap . is_valid ( ) ) { <nl> + flags | = FLAGS_DEFAULT_NORMAL_MAP_USED ; <nl> + } <nl> + if ( texture_binding - > key . specular . is_valid ( ) ) { <nl> + flags | = FLAGS_DEFAULT_SPECULAR_MAP_USED ; <nl> + } <nl> + <nl> if ( ! RD : : get_singleton ( ) - > uniform_set_is_valid ( texture_binding - > uniform_set ) ) { <nl> / / texture may have changed ( erased or replaced , see if we can fix ) <nl> texture_binding - > uniform_set = _create_texture_binding ( texture_binding - > key . texture , texture_binding - > key . normalmap , texture_binding - > key . specular , texture_binding - > key . texture_filter , texture_binding - > key . texture_repeat , texture_binding - > key . multimesh ) ; <nl> Size2i RasterizerCanvasRD : : _bind_texture_binding ( TextureBindingID p_binding , RD : <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / <nl> - void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_item , RenderTargetFormat p_render_target_format , RD : : TextureSamples p_samples , const Color & p_modulate , const Transform2D & p_canvas_transform_inverse , Item * & current_clip ) { <nl> + void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_item , RenderTargetFormat p_render_target_format , RD : : TextureSamples p_samples , const Transform2D & p_canvas_transform_inverse , Item * & current_clip , Light * p_lights ) { <nl> <nl> / / create an empty push constant <nl> PushConstant push_constant ; <nl> Transform2D base_transform = p_canvas_transform_inverse * p_item - > final_transform ; <nl> _update_transform_2d_to_mat2x3 ( base_transform , push_constant . world ) ; <nl> + <nl> + Color base_color = p_item - > final_modulate ; <nl> + <nl> for ( int i = 0 ; i < 4 ; i + + ) { <nl> push_constant . modulation [ i ] = 0 ; <nl> push_constant . ninepatch_margins [ i ] = 0 ; <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> push_constant . dst_rect [ i ] = 0 ; <nl> } <nl> push_constant . flags = 0 ; <nl> - push_constant . specular_shininess = 0xFFFFFFFF ; <nl> push_constant . color_texture_pixel_size [ 0 ] = 0 ; <nl> push_constant . color_texture_pixel_size [ 1 ] = 0 ; <nl> <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> push_constant . pad [ 2 ] = 0 ; <nl> push_constant . pad [ 3 ] = 0 ; <nl> <nl> + push_constant . lights [ 0 ] = 0 ; <nl> + push_constant . lights [ 1 ] = 0 ; <nl> + push_constant . lights [ 2 ] = 0 ; <nl> + push_constant . lights [ 3 ] = 0 ; <nl> + <nl> + uint32_t base_flags = 0 ; <nl> + <nl> + { <nl> + Light * light = p_lights ; <nl> + <nl> + uint16_t light_count = 0 ; <nl> + while ( light ) { <nl> + <nl> + if ( light - > render_index_cache > = 0 & & p_item - > light_mask & light - > item_mask & & p_item - > z_final > = light - > z_min & & p_item - > z_final < = light - > z_max & & p_item - > global_rect_cache . intersects_transformed ( light - > xform_cache , light - > rect_cache ) ) { <nl> + <nl> + uint32_t light_index = light - > render_index_cache ; <nl> + push_constant . lights [ light_count > > 2 ] | = light_index < < ( ( light_count & 3 ) * 8 ) ; <nl> + light_count + + ; <nl> + if ( light - > mode = = VS : : CANVAS_LIGHT_MODE_MASK ) { <nl> + base_flags | = FLAGS_USING_LIGHT_MASK ; <nl> + } <nl> + if ( light_count = = MAX_LIGHTS_PER_ITEM ) { <nl> + break ; <nl> + } <nl> + } <nl> + light = light - > next_ptr ; <nl> + } <nl> + <nl> + base_flags | = light_count < < FLAGS_LIGHT_COUNT_SHIFT ; <nl> + } <nl> + <nl> PipelineVariants * pipeline_variants = & shader . pipeline_variants ; <nl> <nl> bool reclip = false ; <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> const Item : : Command * c = p_item - > commands ; <nl> while ( c ) { <nl> <nl> - push_constant . flags = 0 ; / / reset on each command for sanity <nl> + push_constant . flags = base_flags ; / / reset on each command for sanity <nl> + push_constant . specular_shininess = 0xFFFFFFFF ; <nl> <nl> switch ( c - > type ) { <nl> case Item : : Command : : TYPE_RECT : { <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> <nl> Size2 texpixel_size ; <nl> { <nl> - texpixel_size = _bind_texture_binding ( rect - > texture_binding . binding_id , p_draw_list ) ; <nl> + texpixel_size = _bind_texture_binding ( rect - > texture_binding . binding_id , p_draw_list , push_constant . flags ) ; <nl> texpixel_size . x = 1 . 0 / texpixel_size . x ; <nl> texpixel_size . y = 1 . 0 / texpixel_size . y ; <nl> } <nl> <nl> + if ( rect - > specular_shininess . a < 0 . 999 ) { <nl> + push_constant . flags | = FLAGS_DEFAULT_SPECULAR_MAP_USED ; <nl> + } <nl> + <nl> + _update_specular_shininess ( rect - > specular_shininess , & push_constant . specular_shininess ) ; <nl> + <nl> Rect2 src_rect ; <nl> Rect2 dst_rect ; <nl> <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> texpixel_size = Vector2 ( 1 , 1 ) ; <nl> } <nl> <nl> - push_constant . modulation [ 0 ] = rect - > modulate . r * p_modulate . r ; <nl> - push_constant . modulation [ 1 ] = rect - > modulate . g * p_modulate . g ; <nl> - push_constant . modulation [ 2 ] = rect - > modulate . b * p_modulate . b ; <nl> - push_constant . modulation [ 3 ] = rect - > modulate . a ; <nl> + push_constant . modulation [ 0 ] = rect - > modulate . r * base_color . r ; <nl> + push_constant . modulation [ 1 ] = rect - > modulate . g * base_color . g ; <nl> + push_constant . modulation [ 2 ] = rect - > modulate . b * base_color . b ; <nl> + push_constant . modulation [ 3 ] = rect - > modulate . a * base_color . a ; <nl> <nl> push_constant . src_rect [ 0 ] = src_rect . position . x ; <nl> push_constant . src_rect [ 1 ] = src_rect . position . y ; <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> <nl> Size2 texpixel_size ; <nl> { <nl> - texpixel_size = _bind_texture_binding ( np - > texture_binding . binding_id , p_draw_list ) ; <nl> + texpixel_size = _bind_texture_binding ( np - > texture_binding . binding_id , p_draw_list , push_constant . flags ) ; <nl> texpixel_size . x = 1 . 0 / texpixel_size . x ; <nl> texpixel_size . y = 1 . 0 / texpixel_size . y ; <nl> } <nl> <nl> + if ( np - > specular_shininess . a < 0 . 999 ) { <nl> + push_constant . flags | = FLAGS_DEFAULT_SPECULAR_MAP_USED ; <nl> + } <nl> + <nl> + _update_specular_shininess ( np - > specular_shininess , & push_constant . specular_shininess ) ; <nl> + <nl> Rect2 src_rect ; <nl> Rect2 dst_rect ( np - > rect . position . x , np - > rect . position . y , np - > rect . size . x , np - > rect . size . y ) ; <nl> <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> } <nl> } <nl> <nl> - push_constant . modulation [ 0 ] = np - > color . r * p_modulate . r ; <nl> - push_constant . modulation [ 1 ] = np - > color . g * p_modulate . g ; <nl> - push_constant . modulation [ 2 ] = np - > color . b * p_modulate . b ; <nl> - push_constant . modulation [ 3 ] = np - > color . a * p_modulate . a ; <nl> + push_constant . modulation [ 0 ] = np - > color . r * base_color . r ; <nl> + push_constant . modulation [ 1 ] = np - > color . g * base_color . g ; <nl> + push_constant . modulation [ 2 ] = np - > color . b * base_color . b ; <nl> + push_constant . modulation [ 3 ] = np - > color . a * base_color . a ; <nl> <nl> push_constant . src_rect [ 0 ] = src_rect . position . x ; <nl> push_constant . src_rect [ 1 ] = src_rect . position . y ; <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> <nl> Size2 texpixel_size ; <nl> { <nl> - texpixel_size = _bind_texture_binding ( polygon - > texture_binding . binding_id , p_draw_list ) ; <nl> + texpixel_size = _bind_texture_binding ( polygon - > texture_binding . binding_id , p_draw_list , push_constant . flags ) ; <nl> texpixel_size . x = 1 . 0 / texpixel_size . x ; <nl> texpixel_size . y = 1 . 0 / texpixel_size . y ; <nl> } <nl> <nl> - push_constant . modulation [ 0 ] = p_modulate . r ; <nl> - push_constant . modulation [ 1 ] = p_modulate . g ; <nl> - push_constant . modulation [ 2 ] = p_modulate . b ; <nl> - push_constant . modulation [ 3 ] = p_modulate . a ; <nl> + if ( polygon - > specular_shininess . a < 0 . 999 ) { <nl> + push_constant . flags | = FLAGS_DEFAULT_SPECULAR_MAP_USED ; <nl> + } <nl> + <nl> + _update_specular_shininess ( polygon - > specular_shininess , & push_constant . specular_shininess ) ; <nl> + <nl> + push_constant . modulation [ 0 ] = base_color . r ; <nl> + push_constant . modulation [ 1 ] = base_color . g ; <nl> + push_constant . modulation [ 2 ] = base_color . b ; <nl> + push_constant . modulation [ 3 ] = base_color . a ; <nl> <nl> for ( int j = 0 ; j < 4 ; j + + ) { <nl> push_constant . src_rect [ j ] = 0 ; <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> / / bind textures <nl> <nl> { <nl> - _bind_texture_binding ( primitive - > texture_binding . binding_id , p_draw_list ) ; <nl> + _bind_texture_binding ( primitive - > texture_binding . binding_id , p_draw_list , push_constant . flags ) ; <nl> } <nl> <nl> + if ( primitive - > specular_shininess . a < 0 . 999 ) { <nl> + push_constant . flags | = FLAGS_DEFAULT_SPECULAR_MAP_USED ; <nl> + } <nl> + <nl> + _update_specular_shininess ( primitive - > specular_shininess , & push_constant . specular_shininess ) ; <nl> + <nl> RD : : get_singleton ( ) - > draw_list_bind_index_array ( p_draw_list , primitive_arrays . index_array [ MIN ( 3 , primitive - > point_count ) - 1 ] ) ; <nl> <nl> for ( uint32_t j = 0 ; j < MIN ( 3 , primitive - > point_count ) ; j + + ) { <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> push_constant . points [ j * 2 + 1 ] = primitive - > points [ j ] . y ; <nl> push_constant . uvs [ j * 2 + 0 ] = primitive - > uvs [ j ] . x ; <nl> push_constant . uvs [ j * 2 + 1 ] = primitive - > uvs [ j ] . y ; <nl> - Color col = primitive - > colors [ j ] * p_modulate ; <nl> + Color col = primitive - > colors [ j ] * base_color ; <nl> push_constant . colors [ j * 2 + 0 ] = ( uint32_t ( Math : : make_half_float ( col . g ) ) < < 16 ) | Math : : make_half_float ( col . r ) ; <nl> push_constant . colors [ j * 2 + 1 ] = ( uint32_t ( Math : : make_half_float ( col . a ) ) < < 16 ) | Math : : make_half_float ( col . b ) ; <nl> } <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> push_constant . points [ j * 2 + 1 ] = primitive - > points [ j + 1 ] . y ; <nl> push_constant . uvs [ j * 2 + 0 ] = primitive - > uvs [ j + 1 ] . x ; <nl> push_constant . uvs [ j * 2 + 1 ] = primitive - > uvs [ j + 1 ] . y ; <nl> - Color col = primitive - > colors [ j + 1 ] * p_modulate ; <nl> + Color col = primitive - > colors [ j + 1 ] * base_color ; <nl> push_constant . colors [ j * 2 + 0 ] = ( uint32_t ( Math : : make_half_float ( col . g ) ) < < 16 ) | Math : : make_half_float ( col . r ) ; <nl> push_constant . colors [ j * 2 + 1 ] = ( uint32_t ( Math : : make_half_float ( col . a ) ) < < 16 ) | Math : : make_half_float ( col . b ) ; <nl> } <nl> void RasterizerCanvasRD : : _render_item ( RD : : DrawListID p_draw_list , const Item * p_ <nl> } <nl> } <nl> <nl> - void RasterizerCanvasRD : : _render_items ( RID p_to_render_target , int p_item_count , const Color & p_modulate , const Transform2D & p_transform ) { <nl> + void RasterizerCanvasRD : : _render_items ( RID p_to_render_target , int p_item_count , const Transform2D & p_canvas_transform_inverse , Light * p_lights ) { <nl> <nl> Item * current_clip = NULL ; <nl> <nl> RenderTargetFormat render_target_format = RENDER_TARGET_FORMAT_8_BIT_INT ; <nl> - Transform2D canvas_transform_inverse = p_transform . affine_inverse ( ) ; <nl> + Transform2D canvas_transform_inverse = p_canvas_transform_inverse ; <nl> <nl> RID framebuffer = storage - > render_target_get_rd_framebuffer ( p_to_render_target ) ; <nl> <nl> void RasterizerCanvasRD : : _render_items ( RID p_to_render_target , int p_item_count , <nl> RD : : get_singleton ( ) - > draw_list_bind_uniform_set ( draw_list , shader . default_skeleton_uniform_set , 1 ) ; <nl> } <nl> <nl> - _render_item ( draw_list , ci , render_target_format , texture_samples , p_modulate , canvas_transform_inverse , current_clip ) ; <nl> + _render_item ( draw_list , ci , render_target_format , texture_samples , canvas_transform_inverse , current_clip , p_lights ) ; <nl> } <nl> <nl> RD : : get_singleton ( ) - > draw_list_end ( ) ; <nl> void RasterizerCanvasRD : : _update_canvas_state_uniform_set ( ) { <nl> <nl> Vector < RD : : Uniform > uniforms ; <nl> <nl> - RD : : Uniform u ; <nl> - u . type = RD : : UNIFORM_TYPE_UNIFORM_BUFFER ; <nl> - u . binding = 0 ; <nl> - u . ids . push_back ( state . canvas_state_buffer ) ; <nl> - uniforms . push_back ( u ) ; <nl> + { <nl> + RD : : Uniform u ; <nl> + u . type = RD : : UNIFORM_TYPE_UNIFORM_BUFFER ; <nl> + u . binding = 0 ; <nl> + u . ids . push_back ( state . canvas_state_buffer ) ; <nl> + uniforms . push_back ( u ) ; <nl> + } <nl> + <nl> + { <nl> + RD : : Uniform u ; <nl> + u . type = RD : : UNIFORM_TYPE_UNIFORM_BUFFER ; <nl> + u . binding = 1 ; <nl> + u . ids . push_back ( state . lights_uniform_buffer ) ; <nl> + uniforms . push_back ( u ) ; <nl> + } <nl> + <nl> + { <nl> + <nl> + RD : : Uniform u_lights ; <nl> + u_lights . type = RD : : UNIFORM_TYPE_TEXTURE ; <nl> + u_lights . binding = 2 ; <nl> + <nl> + RD : : Uniform u_shadows ; <nl> + u_shadows . type = RD : : UNIFORM_TYPE_TEXTURE ; <nl> + u_shadows . binding = 3 ; <nl> + <nl> + / / lights <nl> + for ( uint32_t i = 0 ; i < MAX_LIGHT_TEXTURES ; i + + ) { <nl> + if ( i < canvas_light_owner . get_rid_count ( ) ) { <nl> + CanvasLight * cl = canvas_light_owner . get_rid_by_index ( i ) ; <nl> + cl - > texture_index = i ; <nl> + RID rd_texture ; <nl> + <nl> + if ( cl - > texture . is_valid ( ) ) { <nl> + rd_texture = storage - > texture_get_rd_texture ( cl - > texture ) ; <nl> + } <nl> + if ( rd_texture . is_valid ( ) ) { <nl> + u_lights . ids . push_back ( rd_texture ) ; <nl> + } else { <nl> + u_lights . ids . push_back ( default_textures . white_texture ) ; <nl> + } <nl> + if ( cl - > shadow . texture . is_valid ( ) ) { <nl> + u_shadows . ids . push_back ( cl - > shadow . texture ) ; <nl> + } else { <nl> + u_shadows . ids . push_back ( default_textures . black_texture ) ; <nl> + } <nl> + } else { <nl> + u_lights . ids . push_back ( default_textures . white_texture ) ; <nl> + u_shadows . ids . push_back ( default_textures . black_texture ) ; <nl> + } <nl> + } <nl> + <nl> + / / in case there are more <nl> + for ( uint32_t i = MAX_LIGHT_TEXTURES ; i < canvas_light_owner . get_rid_count ( ) ; i + + ) { <nl> + CanvasLight * cl = canvas_light_owner . get_rid_by_index ( i ) ; <nl> + cl - > texture_index = - 1 ; / / make invalid ( no texture ) <nl> + } <nl> + <nl> + uniforms . push_back ( u_lights ) ; <nl> + uniforms . push_back ( u_shadows ) ; <nl> + } <nl> <nl> state . canvas_state_uniform_set = RD : : get_singleton ( ) - > uniform_set_create ( uniforms , shader . default_version_rd_shader , 3 ) ; / / uses index 3 <nl> } <nl> void RasterizerCanvasRD : : canvas_render_items ( RID p_to_render_target , Item * p_ite <nl> <nl> / / setup canvas state uniforms if needed <nl> _update_canvas_state_uniform_set ( ) ; <nl> + Transform2D canvas_transform_inverse = p_canvas_transform . affine_inverse ( ) ; <nl> <nl> { <nl> / / update canvas state uniform buffer <nl> void RasterizerCanvasRD : : canvas_render_items ( RID p_to_render_target , Item * p_ite <nl> screen_transform . scale ( Vector3 ( 2 . 0f / ssize . width , 2 . 0f / ssize . height , 1 . 0f ) ) ; <nl> _update_transform_to_mat4 ( screen_transform , state_buffer . screen_transform ) ; <nl> _update_transform_2d_to_mat4 ( p_canvas_transform , state_buffer . canvas_transform ) ; <nl> + <nl> + Transform2D normal_transform = p_canvas_transform ; <nl> + normal_transform . elements [ 0 ] . normalize ( ) ; <nl> + normal_transform . elements [ 1 ] . normalize ( ) ; <nl> + normal_transform . elements [ 2 ] = Vector2 ( ) ; <nl> + _update_transform_2d_to_mat4 ( normal_transform , state_buffer . canvas_normal_transform ) ; <nl> + <nl> + state_buffer . canvas_modulate [ 0 ] = p_modulate . r ; <nl> + state_buffer . canvas_modulate [ 1 ] = p_modulate . g ; <nl> + state_buffer . canvas_modulate [ 2 ] = p_modulate . b ; <nl> + state_buffer . canvas_modulate [ 3 ] = p_modulate . a ; <nl> + <nl> RD : : get_singleton ( ) - > buffer_update ( state . canvas_state_buffer , 0 , sizeof ( State : : Buffer ) , & state_buffer , true ) ; <nl> } <nl> <nl> + / / setup lights if exist <nl> + <nl> + { <nl> + <nl> + Light * l = p_light_list ; <nl> + uint32_t index = 0 ; <nl> + <nl> + while ( l ) { <nl> + <nl> + if ( index = = MAX_RENDER_LIGHTS ) { <nl> + l - > render_index_cache = - 1 ; <nl> + l = l - > next_ptr ; <nl> + continue ; <nl> + } <nl> + <nl> + CanvasLight * clight = canvas_light_owner . getornull ( l - > light_internal ) ; <nl> + if ( ! clight | | clight - > texture_index < 0 ) { / / unused or invalid texture <nl> + l - > render_index_cache = - 1 ; <nl> + l = l - > next_ptr ; <nl> + ERR_CONTINUE ( ! clight ) ; <nl> + } <nl> + Transform2D to_light_xform = ( p_canvas_transform * l - > light_shader_xform ) . affine_inverse ( ) ; <nl> + <nl> + Vector2 canvas_light_pos = p_canvas_transform . xform ( l - > xform . get_origin ( ) ) ; / / convert light position to canvas coordinates , as all computation is done in canvas coords to avoid precision loss <nl> + state . light_uniforms [ index ] . position [ 0 ] = canvas_light_pos . x ; <nl> + state . light_uniforms [ index ] . position [ 1 ] = canvas_light_pos . y ; <nl> + <nl> + _update_transform_2d_to_mat2x4 ( to_light_xform , state . light_uniforms [ index ] . matrix ) ; <nl> + <nl> + state . light_uniforms [ index ] . height = l - > height * ( p_canvas_transform . elements [ 0 ] . length ( ) + p_canvas_transform . elements [ 1 ] . length ( ) ) * 0 . 5 ; / / approximate height conversion to the canvas size , since all calculations are done in canvas coords to avoid precision loss <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + state . light_uniforms [ index ] . shadow_color [ i ] = l - > shadow_color [ i ] ; <nl> + state . light_uniforms [ index ] . color [ i ] = l - > color [ i ] ; <nl> + } <nl> + <nl> + state . light_uniforms [ index ] . color [ 3 ] = l - > energy ; / / use alpha for energy , so base color can go separate <nl> + <nl> + if ( clight - > shadow . texture . is_valid ( ) ) { <nl> + state . light_uniforms [ index ] . shadow_pixel_size = 1 . 0 / clight - > shadow . size ; <nl> + } else { <nl> + state . light_uniforms [ index ] . shadow_pixel_size = 1 . 0 ; <nl> + } <nl> + state . light_uniforms [ index ] . flags = clight - > texture_index ; <nl> + state . light_uniforms [ index ] . flags | = l - > mode < < LIGHT_FLAGS_BLEND_SHIFT ; <nl> + <nl> + l - > render_index_cache = index ; <nl> + <nl> + index + + ; <nl> + l = l - > next_ptr ; <nl> + } <nl> + <nl> + if ( index > 0 ) { <nl> + RD : : get_singleton ( ) - > buffer_update ( state . lights_uniform_buffer , 0 , sizeof ( LightUniform ) * index , & state . light_uniforms [ 0 ] , true ) ; <nl> + } <nl> + } <nl> + <nl> / / fill the list until rendering is possible . <nl> Item * ci = p_item_list ; <nl> while ( ci ) { <nl> void RasterizerCanvasRD : : canvas_render_items ( RID p_to_render_target , Item * p_ite <nl> <nl> bool backbuffer_copy = ci - > copy_back_buffer ; / / | | shader uses SCREEN_TEXTURE <nl> if ( ! ci - > next | | backbuffer_copy | | item_count = = MAX_RENDER_ITEMS - 1 ) { <nl> - _render_items ( p_to_render_target , item_count , p_modulate , p_canvas_transform ) ; <nl> + _render_items ( p_to_render_target , item_count , canvas_transform_inverse , p_light_list ) ; <nl> / / then reset <nl> item_count = 0 ; <nl> } <nl> void RasterizerCanvasRD : : canvas_render_items ( RID p_to_render_target , Item * p_ite <nl> } <nl> } <nl> <nl> + RID RasterizerCanvasRD : : light_create ( ) { <nl> + <nl> + CanvasLight canvas_light ; <nl> + canvas_light . shadow . size = 0 ; <nl> + canvas_light . texture_index = - 1 ; <nl> + return canvas_light_owner . make_rid ( canvas_light ) ; <nl> + } <nl> + <nl> + void RasterizerCanvasRD : : light_set_texture ( RID p_rid , RID p_texture ) { <nl> + CanvasLight * cl = canvas_light_owner . getornull ( p_rid ) ; <nl> + ERR_FAIL_COND ( ! cl ) ; <nl> + if ( cl - > texture = = p_texture ) { <nl> + return ; <nl> + } <nl> + <nl> + cl - > texture = p_texture ; <nl> + <nl> + / / canvas state uniform set needs updating <nl> + if ( state . canvas_state_uniform_set . is_valid ( ) & & RD : : get_singleton ( ) - > uniform_set_is_valid ( state . canvas_state_uniform_set ) ) { <nl> + RD : : get_singleton ( ) - > free ( state . canvas_state_uniform_set ) ; <nl> + } <nl> + } <nl> + void RasterizerCanvasRD : : light_set_use_shadow ( RID p_rid , bool p_enable , int p_resolution ) { <nl> + CanvasLight * cl = canvas_light_owner . getornull ( p_rid ) ; <nl> + ERR_FAIL_COND ( ! cl ) ; <nl> + ERR_FAIL_COND ( p_resolution < 64 ) ; <nl> + if ( cl - > shadow . texture . is_valid ( ) = = p_enable & & p_resolution = = cl - > shadow . size ) { <nl> + return ; <nl> + } <nl> + <nl> + if ( cl - > shadow . texture . is_valid ( ) ) { <nl> + <nl> + RD : : get_singleton ( ) - > free ( cl - > shadow . uniform_set ) ; <nl> + cl - > shadow . uniform_set = RID ( ) ; <nl> + <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + RD : : get_singleton ( ) - > free ( cl - > shadow . render_fb [ i ] ) ; <nl> + RD : : get_singleton ( ) - > free ( cl - > shadow . render_textures [ i ] ) ; <nl> + cl - > shadow . render_fb [ i ] = RID ( ) ; <nl> + cl - > shadow . render_textures [ i ] = RID ( ) ; <nl> + } <nl> + RD : : get_singleton ( ) - > free ( cl - > shadow . fix_fb ) ; <nl> + RD : : get_singleton ( ) - > free ( cl - > shadow . texture ) ; <nl> + <nl> + cl - > shadow . fix_fb = RID ( ) ; <nl> + cl - > shadow . texture = RID ( ) ; <nl> + } <nl> + <nl> + if ( p_enable ) { <nl> + <nl> + { / / texture <nl> + RD : : TextureFormat tf ; <nl> + tf . type = RD : : TEXTURE_TYPE_2D ; <nl> + tf . width = p_resolution ; <nl> + tf . height = 1 ; <nl> + tf . usage_bits = RD : : TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | RD : : TEXTURE_USAGE_SAMPLING_BIT ; <nl> + tf . format = RD : : DATA_FORMAT_R32_SFLOAT ; <nl> + <nl> + cl - > shadow . texture = RD : : get_singleton ( ) - > texture_create ( tf , RD : : TextureView ( ) ) ; <nl> + <nl> + Vector < RID > fb_textures ; <nl> + fb_textures . push_back ( cl - > shadow . texture ) ; <nl> + cl - > shadow . fix_fb = RD : : get_singleton ( ) - > framebuffer_create ( fb_textures ) ; <nl> + } <nl> + { <nl> + RD : : TextureFormat tf ; <nl> + tf . type = RD : : TEXTURE_TYPE_2D ; <nl> + tf . width = p_resolution / 2 ; <nl> + tf . height = 1 ; <nl> + tf . usage_bits = RD : : TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT ; <nl> + tf . format = RD : : get_singleton ( ) - > texture_is_format_supported_for_usage ( RD : : DATA_FORMAT_X8_D24_UNORM_PACK32 , RD : : TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT ) ? RD : : DATA_FORMAT_X8_D24_UNORM_PACK32 : RD : : DATA_FORMAT_D32_SFLOAT ; <nl> + / / chunks to write <nl> + cl - > shadow . render_depth = RD : : get_singleton ( ) - > texture_create ( tf , RD : : TextureView ( ) ) ; <nl> + <nl> + RD : : Uniform tex_uniforms ; <nl> + tex_uniforms . type = RD : : UNIFORM_TYPE_SAMPLER_WITH_TEXTURE ; <nl> + tex_uniforms . binding = 0 ; <nl> + <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + tf . usage_bits = RD : : TEXTURE_USAGE_SAMPLING_BIT | RD : : TEXTURE_USAGE_COLOR_ATTACHMENT_BIT ; <nl> + tf . format = RD : : DATA_FORMAT_R32_SFLOAT ; <nl> + cl - > shadow . render_textures [ i ] = RD : : get_singleton ( ) - > texture_create ( tf , RD : : TextureView ( ) ) ; <nl> + Vector < RID > textures ; <nl> + textures . push_back ( cl - > shadow . render_textures [ i ] ) ; <nl> + textures . push_back ( cl - > shadow . render_depth ) ; <nl> + cl - > shadow . render_fb [ i ] = RD : : get_singleton ( ) - > framebuffer_create ( textures ) ; <nl> + <nl> + tex_uniforms . ids . push_back ( default_samplers . samplers [ VS : : CANVAS_ITEM_TEXTURE_FILTER_NEAREST ] [ VS : : CANVAS_ITEM_TEXTURE_REPEAT_DISABLED ] ) ; <nl> + tex_uniforms . ids . push_back ( cl - > shadow . render_textures [ i ] ) ; <nl> + } <nl> + <nl> + Vector < RD : : Uniform > tex_uniforms_set ; <nl> + tex_uniforms_set . push_back ( tex_uniforms ) ; <nl> + cl - > shadow . uniform_set = RD : : get_singleton ( ) - > uniform_set_create ( tex_uniforms_set , shadow_render . shader_fix . version_get_shader ( shadow_render . shader_fix_version , 0 ) , 0 ) ; <nl> + } <nl> + } <nl> + <nl> + / / canvas state uniform set needs updating <nl> + if ( state . canvas_state_uniform_set . is_valid ( ) & & RD : : get_singleton ( ) - > uniform_set_is_valid ( state . canvas_state_uniform_set ) ) { <nl> + RD : : get_singleton ( ) - > free ( state . canvas_state_uniform_set ) ; <nl> + } <nl> + } <nl> + <nl> + void RasterizerCanvasRD : : light_update_shadow ( RID p_rid , const Transform2D & p_light_xform , int p_light_mask , float p_near , float p_far , LightOccluderInstance * p_occluders ) { <nl> + <nl> + CanvasLight * cl = canvas_light_owner . getornull ( p_rid ) ; <nl> + ERR_FAIL_COND ( cl - > shadow . texture . is_null ( ) ) ; <nl> + <nl> + for ( int i = 0 ; i < 4 ; i + + ) { <nl> + <nl> + / / make sure it remains orthogonal , makes easy to read angle later <nl> + <nl> + / / light . basis . scale ( Vector3 ( to_light . elements [ 0 ] . length ( ) , to_light . elements [ 1 ] . length ( ) , 1 ) ) ; <nl> + <nl> + Vector < Color > cc ; <nl> + cc . push_back ( Color ( p_far , p_far , p_far , 1 . 0 ) ) ; <nl> + RD : : DrawListID draw_list = RD : : get_singleton ( ) - > draw_list_begin ( cl - > shadow . render_fb [ i ] , RD : : INITIAL_ACTION_CLEAR , RD : : FINAL_ACTION_READ_COLOR_DISCARD_DEPTH , cc ) ; <nl> + <nl> + CameraMatrix projection ; <nl> + { <nl> + real_t fov = 90 ; <nl> + real_t nearp = p_near ; <nl> + real_t farp = p_far ; <nl> + real_t aspect = 1 . 0 ; <nl> + <nl> + real_t ymax = nearp * Math : : tan ( Math : : deg2rad ( fov * 0 . 5 ) ) ; <nl> + real_t ymin = - ymax ; <nl> + real_t xmin = ymin * aspect ; <nl> + real_t xmax = ymax * aspect ; <nl> + <nl> + projection . set_frustum ( xmin , xmax , ymin , ymax , nearp , farp ) ; <nl> + } <nl> + <nl> + Vector3 cam_target = Basis ( Vector3 ( 0 , 0 , Math_PI * 2 * ( i / 4 . 0 ) ) ) . xform ( Vector3 ( 0 , 1 , 0 ) ) ; <nl> + projection = projection * CameraMatrix ( Transform ( ) . looking_at ( cam_target , Vector3 ( 0 , 0 , - 1 ) ) . affine_inverse ( ) ) ; <nl> + <nl> + ShadowRenderPushConstant push_constant ; <nl> + for ( int y = 0 ; y < 4 ; y + + ) { <nl> + for ( int x = 0 ; x < 4 ; x + + ) { <nl> + push_constant . projection [ y * 4 + x ] = projection . matrix [ y ] [ x ] ; <nl> + } <nl> + } <nl> + <nl> + / * if ( i = = 0 ) <nl> + * p_xform_cache = projection ; * / <nl> + <nl> + LightOccluderInstance * instance = p_occluders ; <nl> + <nl> + while ( instance ) { <nl> + <nl> + OccluderPolygon * co = occluder_polygon_owner . getornull ( instance - > polygon ) ; <nl> + <nl> + if ( ! co | | co - > index_array . is_null ( ) | | ! ( p_light_mask & instance - > light_mask ) ) { <nl> + <nl> + instance = instance - > next ; <nl> + continue ; <nl> + } <nl> + <nl> + _update_transform_2d_to_mat4 ( p_light_xform * instance - > xform_cache , push_constant . modelview ) ; <nl> + <nl> + RD : : get_singleton ( ) - > draw_list_bind_render_pipeline ( draw_list , shadow_render . render_pipelines [ co - > cull_mode ] ) ; <nl> + RD : : get_singleton ( ) - > draw_list_bind_vertex_array ( draw_list , co - > vertex_array ) ; <nl> + RD : : get_singleton ( ) - > draw_list_bind_index_array ( draw_list , co - > index_array ) ; <nl> + RD : : get_singleton ( ) - > draw_list_set_push_constant ( draw_list , & push_constant , sizeof ( ShadowRenderPushConstant ) ) ; <nl> + <nl> + RD : : get_singleton ( ) - > draw_list_draw ( draw_list , true ) ; <nl> + <nl> + instance = instance - > next ; <nl> + } <nl> + <nl> + RD : : get_singleton ( ) - > draw_list_end ( ) ; <nl> + } <nl> + <nl> + Vector < Color > cc ; <nl> + cc . push_back ( Color ( p_far , p_far , p_far , 1 . 0 ) ) ; <nl> + RD : : DrawListID draw_list = RD : : get_singleton ( ) - > draw_list_begin ( cl - > shadow . fix_fb , RD : : INITIAL_ACTION_CLEAR , RD : : FINAL_ACTION_READ_COLOR_DISCARD_DEPTH , cc ) ; <nl> + RD : : get_singleton ( ) - > draw_list_bind_render_pipeline ( draw_list , shadow_render . shader_fix_pipeline ) ; <nl> + RD : : get_singleton ( ) - > draw_list_bind_index_array ( draw_list , primitive_arrays . index_array [ 3 ] ) ; <nl> + RD : : get_singleton ( ) - > draw_list_bind_uniform_set ( draw_list , cl - > shadow . uniform_set , 0 ) ; <nl> + RD : : get_singleton ( ) - > draw_list_draw ( draw_list , true ) ; <nl> + RD : : get_singleton ( ) - > draw_list_end ( ) ; <nl> + } <nl> + <nl> + RID RasterizerCanvasRD : : occluder_polygon_create ( ) { <nl> + <nl> + OccluderPolygon occluder ; <nl> + occluder . point_count = 0 ; <nl> + occluder . cull_mode = VS : : CANVAS_OCCLUDER_POLYGON_CULL_DISABLED ; <nl> + return occluder_polygon_owner . make_rid ( occluder ) ; <nl> + } <nl> + <nl> + void RasterizerCanvasRD : : occluder_polygon_set_shape_as_lines ( RID p_occluder , const PoolVector < Vector2 > & p_lines ) { <nl> + <nl> + OccluderPolygon * oc = occluder_polygon_owner . getornull ( p_occluder ) ; <nl> + ERR_FAIL_COND ( ! oc ) ; <nl> + <nl> + if ( oc - > point_count ! = p_lines . size ( ) & & oc - > vertex_array . is_valid ( ) ) { <nl> + <nl> + RD : : get_singleton ( ) - > free ( oc - > vertex_array ) ; <nl> + RD : : get_singleton ( ) - > free ( oc - > vertex_buffer ) ; <nl> + RD : : get_singleton ( ) - > free ( oc - > index_array ) ; <nl> + RD : : get_singleton ( ) - > free ( oc - > index_buffer ) ; <nl> + <nl> + oc - > vertex_array = RID ( ) ; <nl> + oc - > vertex_buffer = RID ( ) ; <nl> + oc - > index_array = RID ( ) ; <nl> + oc - > index_buffer = RID ( ) ; <nl> + } <nl> + <nl> + if ( p_lines . size ( ) ) { <nl> + <nl> + PoolVector < uint8_t > geometry ; <nl> + PoolVector < uint8_t > indices ; <nl> + int lc = p_lines . size ( ) ; <nl> + <nl> + geometry . resize ( lc * 6 * sizeof ( float ) ) ; <nl> + indices . resize ( lc * 3 * sizeof ( uint16_t ) ) ; <nl> + <nl> + { <nl> + PoolVector < uint8_t > : : Write vw = geometry . write ( ) ; <nl> + float * vwptr = ( float * ) vw . ptr ( ) ; <nl> + PoolVector < uint8_t > : : Write iw = indices . write ( ) ; <nl> + uint16_t * iwptr = ( uint16_t * ) iw . ptr ( ) ; <nl> + <nl> + PoolVector < Vector2 > : : Read lr = p_lines . read ( ) ; <nl> + <nl> + const int POLY_HEIGHT = 16384 ; <nl> + <nl> + for ( int i = 0 ; i < lc / 2 ; i + + ) { <nl> + <nl> + vwptr [ i * 12 + 0 ] = lr [ i * 2 + 0 ] . x ; <nl> + vwptr [ i * 12 + 1 ] = lr [ i * 2 + 0 ] . y ; <nl> + vwptr [ i * 12 + 2 ] = POLY_HEIGHT ; <nl> + <nl> + vwptr [ i * 12 + 3 ] = lr [ i * 2 + 1 ] . x ; <nl> + vwptr [ i * 12 + 4 ] = lr [ i * 2 + 1 ] . y ; <nl> + vwptr [ i * 12 + 5 ] = POLY_HEIGHT ; <nl> + <nl> + vwptr [ i * 12 + 6 ] = lr [ i * 2 + 1 ] . x ; <nl> + vwptr [ i * 12 + 7 ] = lr [ i * 2 + 1 ] . y ; <nl> + vwptr [ i * 12 + 8 ] = - POLY_HEIGHT ; <nl> + <nl> + vwptr [ i * 12 + 9 ] = lr [ i * 2 + 0 ] . x ; <nl> + vwptr [ i * 12 + 10 ] = lr [ i * 2 + 0 ] . y ; <nl> + vwptr [ i * 12 + 11 ] = - POLY_HEIGHT ; <nl> + <nl> + iwptr [ i * 6 + 0 ] = i * 4 + 0 ; <nl> + iwptr [ i * 6 + 1 ] = i * 4 + 1 ; <nl> + iwptr [ i * 6 + 2 ] = i * 4 + 2 ; <nl> + <nl> + iwptr [ i * 6 + 3 ] = i * 4 + 2 ; <nl> + iwptr [ i * 6 + 4 ] = i * 4 + 3 ; <nl> + iwptr [ i * 6 + 5 ] = i * 4 + 0 ; <nl> + } <nl> + } <nl> + <nl> + / / if same buffer len is being set , just use BufferSubData to avoid a pipeline flush <nl> + <nl> + if ( oc - > vertex_array . is_null ( ) ) { <nl> + / / create from scratch <nl> + / / vertices <nl> + oc - > vertex_buffer = RD : : get_singleton ( ) - > vertex_buffer_create ( lc * 6 * sizeof ( real_t ) , geometry ) ; <nl> + <nl> + Vector < RID > buffer ; <nl> + buffer . push_back ( oc - > vertex_buffer ) ; <nl> + oc - > vertex_array = RD : : get_singleton ( ) - > vertex_array_create ( 4 * lc / 2 , shadow_render . vertex_format , buffer ) ; <nl> + / / indices <nl> + <nl> + oc - > index_buffer = RD : : get_singleton ( ) - > index_buffer_create ( 3 * lc , RD : : INDEX_BUFFER_FORMAT_UINT16 , indices ) ; <nl> + oc - > index_array = RD : : get_singleton ( ) - > index_array_create ( oc - > index_buffer , 0 , 3 * lc ) ; <nl> + <nl> + } else { <nl> + / / update existing <nl> + PoolVector < uint8_t > : : Read vr = geometry . read ( ) ; <nl> + RD : : get_singleton ( ) - > buffer_update ( oc - > vertex_buffer , 0 , geometry . size ( ) , vr . ptr ( ) ) ; <nl> + PoolVector < uint8_t > : : Read ir = indices . read ( ) ; <nl> + RD : : get_singleton ( ) - > buffer_update ( oc - > index_buffer , 0 , indices . size ( ) , ir . ptr ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + void RasterizerCanvasRD : : occluder_polygon_set_cull_mode ( RID p_occluder , VS : : CanvasOccluderPolygonCullMode p_mode ) { <nl> + OccluderPolygon * oc = occluder_polygon_owner . getornull ( p_occluder ) ; <nl> + ERR_FAIL_COND ( ! oc ) ; <nl> + oc - > cull_mode = p_mode ; <nl> + } <nl> + <nl> void RasterizerCanvasRD : : update ( ) { <nl> _dispose_bindings ( ) ; <nl> } <nl> RasterizerCanvasRD : : RasterizerCanvasRD ( RasterizerStorageRD * p_storage ) { <nl> shader . default_version_rd_shader = shader . canvas_shader . version_get_shader ( shader . default_version , 0 ) ; <nl> } <nl> <nl> + { / / shadow rendering <nl> + Vector < String > versions ; <nl> + versions . push_back ( String ( ) ) ; / / no versions <nl> + shadow_render . shader . initialize ( versions ) ; <nl> + <nl> + { <nl> + Vector < RD : : AttachmentFormat > attachments ; <nl> + <nl> + RD : : AttachmentFormat af_color ; <nl> + af_color . format = RD : : DATA_FORMAT_R32_SFLOAT ; <nl> + af_color . usage_flags = RD : : TEXTURE_USAGE_SAMPLING_BIT | RD : : TEXTURE_USAGE_COLOR_ATTACHMENT_BIT ; <nl> + <nl> + attachments . push_back ( af_color ) ; <nl> + <nl> + shadow_render . framebuffer_fix_format = RD : : get_singleton ( ) - > framebuffer_format_create ( attachments ) ; <nl> + <nl> + RD : : AttachmentFormat af_depth ; <nl> + af_depth . format = RD : : DATA_FORMAT_D24_UNORM_S8_UINT ; <nl> + af_depth . format = RD : : get_singleton ( ) - > texture_is_format_supported_for_usage ( RD : : DATA_FORMAT_X8_D24_UNORM_PACK32 , RD : : TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT ) ? RD : : DATA_FORMAT_X8_D24_UNORM_PACK32 : RD : : DATA_FORMAT_D32_SFLOAT ; <nl> + af_depth . usage_flags = RD : : TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT ; <nl> + <nl> + attachments . push_back ( af_depth ) ; <nl> + <nl> + shadow_render . framebuffer_format = RD : : get_singleton ( ) - > framebuffer_format_create ( attachments ) ; <nl> + } <nl> + <nl> + / / pipelines <nl> + Vector < RD : : VertexDescription > vf ; <nl> + RD : : VertexDescription vd ; <nl> + vd . format = RD : : DATA_FORMAT_R32G32B32_SFLOAT ; <nl> + vd . location = 0 ; <nl> + vd . offset = 0 ; <nl> + vd . stride = sizeof ( float ) * 3 ; <nl> + vf . push_back ( vd ) ; <nl> + shadow_render . vertex_format = RD : : get_singleton ( ) - > vertex_format_create ( vf ) ; <nl> + <nl> + shadow_render . shader_version = shadow_render . shader . version_create ( ) ; <nl> + <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + RD : : PipelineRasterizationState rs ; <nl> + rs . cull_mode = i = = 0 ? RD : : POLYGON_CULL_DISABLED : ( i = = 1 ? RD : : POLYGON_CULL_FRONT : RD : : POLYGON_CULL_BACK ) ; <nl> + RD : : PipelineDepthStencilState ds ; <nl> + ds . enable_depth_write = true ; <nl> + ds . enable_depth_test = true ; <nl> + ds . depth_compare_operator = RD : : COMPARE_OP_LESS ; <nl> + shadow_render . render_pipelines [ i ] = RD : : get_singleton ( ) - > render_pipeline_create ( shadow_render . shader . version_get_shader ( shadow_render . shader_version , 0 ) , shadow_render . framebuffer_format , shadow_render . vertex_format , RD : : RENDER_PRIMITIVE_TRIANGLES , rs , RD : : PipelineMultisampleState ( ) , ds , RD : : PipelineColorBlendState : : create_disabled ( ) , 0 ) ; <nl> + } <nl> + <nl> + shadow_render . shader_fix . initialize ( versions ) ; <nl> + shadow_render . shader_fix_version = shadow_render . shader_fix . version_create ( ) ; <nl> + shadow_render . shader_fix_pipeline = RD : : get_singleton ( ) - > render_pipeline_create ( shadow_render . shader_fix . version_get_shader ( shadow_render . shader_fix_version , 0 ) , shadow_render . framebuffer_fix_format , RD : : INVALID_ID , RD : : RENDER_PRIMITIVE_TRIANGLES , RD : : PipelineRasterizationState ( ) , RD : : PipelineMultisampleState ( ) , RD : : PipelineDepthStencilState ( ) , RD : : PipelineColorBlendState : : create_disabled ( ) , 0 ) ; <nl> + } <nl> + <nl> { / / bindings <nl> bindings . id_generator = 0 ; <nl> / / generate for 0 <nl> RasterizerCanvasRD : : RasterizerCanvasRD ( RasterizerStorageRD * p_storage ) { <nl> <nl> { / / state allocate <nl> state . canvas_state_buffer = RD : : get_singleton ( ) - > uniform_buffer_create ( sizeof ( State : : Buffer ) ) ; <nl> + state . lights_uniform_buffer = RD : : get_singleton ( ) - > uniform_buffer_create ( sizeof ( LightUniform ) * MAX_RENDER_LIGHTS ) ; <nl> } <nl> } <nl> <nl> RasterizerCanvasRD : : RasterizerCanvasRD ( RasterizerStorageRD * p_storage ) { <nl> ERR_FAIL_COND ( sizeof ( PushConstant ) ! = 128 ) ; <nl> } <nl> <nl> + bool RasterizerCanvasRD : : free ( RID p_rid ) { <nl> + <nl> + if ( canvas_light_owner . owns ( p_rid ) ) { <nl> + CanvasLight * cl = canvas_light_owner . getornull ( p_rid ) ; <nl> + ERR_FAIL_COND_V ( ! cl , false ) ; <nl> + light_set_use_shadow ( p_rid , false , 64 ) ; <nl> + canvas_light_owner . free ( p_rid ) ; <nl> + / / canvas state uniform set needs updating <nl> + if ( state . canvas_state_uniform_set . is_valid ( ) & & RD : : get_singleton ( ) - > uniform_set_is_valid ( state . canvas_state_uniform_set ) ) { <nl> + RD : : get_singleton ( ) - > free ( state . canvas_state_uniform_set ) ; <nl> + } <nl> + } else if ( occluder_polygon_owner . owns ( p_rid ) ) { <nl> + occluder_polygon_set_shape_as_lines ( p_rid , PoolVector < Vector2 > ( ) ) ; <nl> + occluder_polygon_owner . free ( p_rid ) ; <nl> + } else { <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> RasterizerCanvasRD : : ~ RasterizerCanvasRD ( ) { <nl> <nl> / / canvas state <nl> mmm a / servers / visual / rasterizer / rasterizer_canvas_rd . h <nl> ppp b / servers / visual / rasterizer / rasterizer_canvas_rd . h <nl> <nl> # include " servers / visual / rasterizer / rasterizer_storage_rd . h " <nl> # include " servers / visual / rasterizer / render_pipeline_vertex_format_cache_rd . h " <nl> # include " servers / visual / rasterizer / shaders / canvas . glsl . gen . h " <nl> + # include " servers / visual / rasterizer / shaders / canvas_occlusion . glsl . gen . h " <nl> + # include " servers / visual / rasterizer / shaders / canvas_occlusion_fix . glsl . gen . h " <nl> # include " servers / visual / rendering_device . h " <nl> <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> <nl> FLAGS_CLIP_RECT_UV = ( 1 < < 9 ) , <nl> FLAGS_TRANSPOSE_RECT = ( 1 < < 10 ) , <nl> + FLAGS_USING_LIGHT_MASK = ( 1 < < 11 ) , <nl> + <nl> FLAGS_NINEPACH_DRAW_CENTER = ( 1 < < 12 ) , <nl> FLAGS_USING_PARTICLES = ( 1 < < 13 ) , <nl> FLAGS_USE_PIXEL_SNAP = ( 1 < < 14 ) , <nl> <nl> FLAGS_USE_SKELETON = ( 1 < < 15 ) , <nl> FLAGS_NINEPATCH_H_MODE_SHIFT = 16 , <nl> - FLAGS_NINEPATCH_V_MODE_SHIFT = 18 <nl> + FLAGS_NINEPATCH_V_MODE_SHIFT = 18 , <nl> + FLAGS_LIGHT_COUNT_SHIFT = 20 , <nl> + <nl> + FLAGS_DEFAULT_NORMAL_MAP_USED = ( 1 < < 26 ) , <nl> + FLAGS_DEFAULT_SPECULAR_MAP_USED = ( 1 < < 27 ) <nl> + <nl> + } ; <nl> + <nl> + enum { <nl> + LIGHT_FLAGS_TEXTURE_MASK = 0xFFFF , <nl> + LIGHT_FLAGS_BLEND_SHIFT = 16 , <nl> + LIGHT_FLAGS_BLEND_MASK = ( 3 < < 16 ) , <nl> + LIGHT_FLAGS_BLEND_MODE_ADD = ( 0 < < 16 ) , <nl> + LIGHT_FLAGS_BLEND_MODE_SUB = ( 1 < < 16 ) , <nl> + LIGHT_FLAGS_BLEND_MODE_MIX = ( 2 < < 16 ) , <nl> + LIGHT_FLAGS_BLEND_MODE_MASK = ( 3 < < 16 ) <nl> + } ; <nl> + <nl> + enum { <nl> + MAX_RENDER_ITEMS = 256 * 1024 , <nl> + MAX_LIGHT_TEXTURES = 1024 , <nl> + MAX_LIGHTS_PER_ITEM = 16 , <nl> + MAX_RENDER_LIGHTS = 256 <nl> } ; <nl> <nl> / * * * * * * * * * * * * * * * * / <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> / * * * * LIGHTING * * * * / <nl> / * * * * * * * * * * * * * * * * * * / <nl> <nl> - enum { <nl> - LIGHT_GRID_WIDTH = 16 , <nl> - LIGHT_GRID_HEIGHT = 16 , <nl> - MAX_LIGHTS = 128 <nl> + struct CanvasLight { <nl> + <nl> + int32_t texture_index ; <nl> + RID texture ; <nl> + struct { <nl> + int size ; <nl> + RID texture ; <nl> + <nl> + RID render_depth ; <nl> + RID render_fb [ 4 ] ; <nl> + RID render_textures [ 4 ] ; <nl> + RID fix_fb ; <nl> + RID uniform_set ; <nl> + <nl> + } shadow ; <nl> } ; <nl> <nl> + RID_Owner < CanvasLight > canvas_light_owner ; <nl> + <nl> + struct ShadowRenderPushConstant { <nl> + float projection [ 16 ] ; <nl> + float modelview [ 16 ] ; <nl> + } ; <nl> + <nl> + struct OccluderPolygon { <nl> + <nl> + VS : : CanvasOccluderPolygonCullMode cull_mode ; <nl> + int point_count ; <nl> + RID vertex_buffer ; <nl> + RID vertex_array ; <nl> + RID index_buffer ; <nl> + RID index_array ; <nl> + } ; <nl> + <nl> + struct LightUniform { <nl> + float matrix [ 8 ] ; / / light to texture coordinate matrix <nl> + float color [ 4 ] ; <nl> + float shadow_color [ 4 ] ; <nl> + float position [ 2 ] ; <nl> + uint32_t flags ; / / index to light texture <nl> + float height ; <nl> + float shadow_softness ; <nl> + float shadow_pixel_size ; <nl> + float pad [ 2 ] ; <nl> + } ; <nl> + <nl> + RID_Owner < OccluderPolygon > occluder_polygon_owner ; <nl> + <nl> struct { <nl> - RID grid_texture ; <nl> - RID grid_buffer ; <nl> - PoolVector < uint8_t > grid_texture_data ; <nl> - PoolVector < uint8_t > grid_buffer_data ; <nl> - } lighting ; <nl> + CanvasOcclusionShaderRD shader ; <nl> + RID shader_version ; <nl> + RID render_pipelines [ 3 ] ; <nl> + RD : : VertexFormatID vertex_format ; <nl> + RD : : FramebufferFormatID framebuffer_format ; <nl> + <nl> + CanvasOcclusionFixShaderRD shader_fix ; <nl> + RD : : FramebufferFormatID framebuffer_fix_format ; <nl> + RID shader_fix_version ; <nl> + RID shader_fix_pipeline ; <nl> + } shadow_render ; <nl> <nl> / * * * * * * * * * * * * * * * / <nl> / * * * * STATE * * * * / <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> struct Buffer { <nl> float canvas_transform [ 16 ] ; <nl> float screen_transform [ 16 ] ; <nl> + float canvas_normal_transform [ 16 ] ; <nl> + float canvas_modulate [ 4 ] ; <nl> / / uint32_t light_count ; <nl> / / uint32_t pad [ 3 ] ; <nl> } ; <nl> + <nl> + LightUniform light_uniforms [ MAX_RENDER_LIGHTS ] ; <nl> + <nl> + RID lights_uniform_buffer ; <nl> RID canvas_state_buffer ; <nl> - / / light buffer <nl> - RID canvas_state_light_buffer ; <nl> <nl> / / uniform set for all the above <nl> RID canvas_state_uniform_set ; <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> } ; <nl> / / primitive <nl> struct { <nl> - float points [ 6 ] ; / / vec2 points [ 4 ] <nl> - float uvs [ 6 ] ; / / vec2 points [ 4 ] <nl> + float points [ 6 ] ; / / vec2 points [ 3 ] <nl> + float uvs [ 6 ] ; / / vec2 points [ 3 ] <nl> uint32_t colors [ 6 ] ; / / colors encoded as half <nl> } ; <nl> } ; <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> float skeleton_inverse [ 16 ] ; <nl> } ; <nl> <nl> - enum { <nl> - MAX_RENDER_ITEMS = 256 * 1024 <nl> - } ; <nl> - <nl> Item * items [ MAX_RENDER_ITEMS ] ; <nl> <nl> - Size2i _bind_texture_binding ( TextureBindingID p_binding , RenderingDevice : : DrawListID p_draw_list ) ; <nl> - void _render_item ( RenderingDevice : : DrawListID p_draw_list , const Item * p_item , RenderTargetFormat p_render_target_format , RenderingDevice : : TextureSamples p_samples , const Color & p_modulate , const Transform2D & p_canvas_transform_inverse , Item * & current_clip ) ; <nl> - void _render_items ( RID p_to_render_target , int p_item_count , const Color & p_modulate , const Transform2D & p_transform ) ; <nl> + Size2i _bind_texture_binding ( TextureBindingID p_binding , RenderingDevice : : DrawListID p_draw_list , uint32_t & flags ) ; <nl> + void _render_item ( RenderingDevice : : DrawListID p_draw_list , const Item * p_item , RenderTargetFormat p_render_target_format , RenderingDevice : : TextureSamples p_samples , const Transform2D & p_canvas_transform_inverse , Item * & current_clip , Light * p_lights ) ; <nl> + void _render_items ( RID p_to_render_target , int p_item_count , const Transform2D & p_canvas_transform_inverse , Light * p_lights ) ; <nl> <nl> - void _update_transform_2d_to_mat2x4 ( const Transform2D & p_transform , float * p_mat2x4 ) ; <nl> - void _update_transform_2d_to_mat2x3 ( const Transform2D & p_transform , float * p_mat2x3 ) ; <nl> + _FORCE_INLINE_ void _update_transform_2d_to_mat2x4 ( const Transform2D & p_transform , float * p_mat2x4 ) ; <nl> + _FORCE_INLINE_ void _update_transform_2d_to_mat2x3 ( const Transform2D & p_transform , float * p_mat2x3 ) ; <nl> <nl> - void _update_transform_2d_to_mat4 ( const Transform2D & p_transform , float * p_mat4 ) ; <nl> - void _update_transform_to_mat4 ( const Transform & p_transform , float * p_mat4 ) ; <nl> + _FORCE_INLINE_ void _update_transform_2d_to_mat4 ( const Transform2D & p_transform , float * p_mat4 ) ; <nl> + _FORCE_INLINE_ void _update_transform_to_mat4 ( const Transform & p_transform , float * p_mat4 ) ; <nl> + <nl> + _FORCE_INLINE_ void _update_specular_shininess ( const Color & p_transform , uint32_t * r_ss ) ; <nl> <nl> void _update_canvas_state_uniform_set ( ) ; <nl> <nl> class RasterizerCanvasRD : public RasterizerCanvas { <nl> PolygonID request_polygon ( const Vector < int > & p_indices , const Vector < Point2 > & p_points , const Vector < Color > & p_colors , const Vector < Point2 > & p_uvs = Vector < Point2 > ( ) , const Vector < int > & p_bones = Vector < int > ( ) , const Vector < float > & p_weights = Vector < float > ( ) ) ; <nl> void free_polygon ( PolygonID p_polygon ) ; <nl> <nl> - RID light_internal_create ( ) { return RID ( ) ; } <nl> - void light_internal_update ( RID p_rid , Light * p_light ) { } <nl> - void light_internal_free ( RID p_rid ) { } <nl> + RID light_create ( ) ; <nl> + void light_set_texture ( RID p_rid , RID p_texture ) ; <nl> + void light_set_use_shadow ( RID p_rid , bool p_enable , int p_resolution ) ; <nl> + void light_update_shadow ( RID p_rid , const Transform2D & p_light_xform , int p_light_mask , float p_near , float p_far , LightOccluderInstance * p_occluders ) ; <nl> + <nl> + RID occluder_polygon_create ( ) ; <nl> + void occluder_polygon_set_shape_as_lines ( RID p_occluder , const PoolVector < Vector2 > & p_lines ) ; <nl> + void occluder_polygon_set_cull_mode ( RID p_occluder , VS : : CanvasOccluderPolygonCullMode p_mode ) ; <nl> <nl> void canvas_render_items ( RID p_to_render_target , Item * p_item_list , const Color & p_modulate , Light * p_light_list , const Transform2D & p_canvas_transform ) ; <nl> <nl> void canvas_debug_viewport_shadows ( Light * p_lights_with_shadow ) { } ; <nl> <nl> - void canvas_light_shadow_buffer_update ( RID p_buffer , const Transform2D & p_light_xform , int p_light_mask , float p_near , float p_far , LightOccluderInstance * p_occluders , CameraMatrix * p_xform_cache ) { } <nl> - <nl> - void reset_canvas ( ) { } <nl> - <nl> void draw_window_margins ( int * p_margins , RID * p_margin_textures ) { } <nl> <nl> void update ( ) ; <nl> + bool free ( RID p_rid ) ; <nl> RasterizerCanvasRD ( RasterizerStorageRD * p_storage ) ; <nl> ~ RasterizerCanvasRD ( ) ; <nl> } ; <nl> mmm a / servers / visual / rasterizer / rasterizer_storage_rd . h <nl> ppp b / servers / visual / rasterizer / rasterizer_storage_rd . h <nl> class RasterizerStorageRD : public RasterizerStorage { <nl> Size2 render_target_get_size ( RID p_render_target ) ; <nl> RID render_target_get_rd_framebuffer ( RID p_render_target ) ; <nl> <nl> - / * CANVAS SHADOW * / <nl> - <nl> - RID canvas_light_shadow_buffer_create ( int p_width ) { return RID ( ) ; } <nl> - <nl> - / * LIGHT SHADOW MAPPING * / <nl> - <nl> - RID canvas_light_occluder_create ( ) { return RID ( ) ; } <nl> - void canvas_light_occluder_set_polylines ( RID p_occluder , const PoolVector < Vector2 > & p_lines ) { } <nl> - <nl> VS : : InstanceType get_base_type ( RID p_rid ) const { <nl> if ( mesh_owner . owns ( p_rid ) ) { <nl> return VS : : INSTANCE_MESH ; <nl> mmm a / servers / visual / rasterizer / shaders / SCsub <nl> ppp b / servers / visual / rasterizer / shaders / SCsub <nl> Import ( ' env ' ) <nl> <nl> if ' RD_GLSL ' in env [ ' BUILDERS ' ] : <nl> env . RD_GLSL ( ' canvas . glsl ' ) ; <nl> + env . RD_GLSL ( ' canvas_occlusion . glsl ' ) ; <nl> + env . RD_GLSL ( ' canvas_occlusion_fix . glsl ' ) ; <nl> mmm a / servers / visual / rasterizer / shaders / canvas . glsl <nl> ppp b / servers / visual / rasterizer / shaders / canvas . glsl <nl> layout ( location = 7 ) in vec4 bone_weights_attrib ; <nl> <nl> layout ( location = 0 ) out vec2 uv_interp ; <nl> layout ( location = 1 ) out vec4 color_interp ; <nl> + layout ( location = 2 ) out vec2 vertex_interp ; <nl> <nl> # ifdef USE_NINEPATCH <nl> <nl> VERTEX_SHADER_CODE <nl> # endif <nl> # endif <nl> <nl> + vertex = ( canvas_data . canvas_transform * vec4 ( vertex , 0 . 0 , 1 . 0 ) ) . xy ; <nl> + <nl> + vertex_interp = vertex ; <nl> uv_interp = uv ; <nl> - # if ! defined ( SKIP_TRANSFORM_USED ) <nl> - gl_Position = ( canvas_data . screen_transform * canvas_data . canvas_transform ) * vec4 ( vertex , 0 . 0 , 1 . 0 ) ; <nl> - # else <nl> - gl_Position = vec4 ( vertex , 0 . 0 , 1 . 0 ) ; <nl> - # endif <nl> + <nl> + gl_Position = canvas_data . screen_transform * vec4 ( vertex , 0 . 0 , 1 . 0 ) ; <nl> <nl> # ifdef USE_POINT_SIZE <nl> gl_PointSize = point_size ; <nl> VERSION_DEFINES <nl> <nl> layout ( location = 0 ) in vec2 uv_interp ; <nl> layout ( location = 1 ) in vec4 color_interp ; <nl> + layout ( location = 2 ) in vec2 vertex_interp ; <nl> <nl> # ifdef USE_NINEPATCH <nl> <nl> void main ( ) { <nl> <nl> vec4 color = color_interp ; <nl> vec2 uv = uv_interp ; <nl> + vec2 vertex = vertex_interp ; <nl> <nl> # if ! defined ( USE_ATTRIBUTES ) & & ! defined ( USE_PRIMITIVE ) <nl> <nl> void main ( ) { <nl> # endif <nl> <nl> <nl> + uint light_count = ( draw_data . flags > > FLAGS_LIGHT_COUNT_SHIFT ) & 0xF ; / / max 16 lights <nl> + <nl> <nl> vec3 normal ; <nl> <nl> void main ( ) { <nl> bool normal_used = false ; <nl> # endif <nl> <nl> - # if 0 <nl> - if ( false / * normal_used | | canvas_data . light_count > 0 * / ) { <nl> - normal . xy = texture ( sampler2D ( normal_texture , texture_sampler ) , uv ) . xy * 2 . 0 - 1 . 0 ; <nl> + <nl> + if ( normal_used | | ( light_count > 0 & & bool ( draw_data . flags & FLAGS_DEFAULT_NORMAL_MAP_USED ) ) ) { <nl> + normal . xy = texture ( sampler2D ( normal_texture , texture_sampler ) , uv ) . xy * vec2 ( 2 . 0 , - 2 . 0 ) - vec2 ( 1 . 0 , - 1 . 0 ) ; <nl> normal . z = sqrt ( 1 . 0 - dot ( normal . xy , normal . xy ) ) ; <nl> normal_used = true ; <nl> } else { <nl> - # endif <nl> normal = vec3 ( 0 . 0 , 0 . 0 , 1 . 0 ) ; <nl> - # if 0 <nl> } <nl> + <nl> + vec4 specular_shininess ; <nl> + <nl> + # if defined ( SPECULAR_SHININESS_USED ) <nl> + <nl> + bool specular_shininess_used = true ; <nl> + # else <nl> + bool specular_shininess_used = false ; <nl> # endif <nl> <nl> + if ( specular_shininess_used | | ( light_count > 0 & & normal_used & & bool ( draw_data . flags & FLAGS_DEFAULT_SPECULAR_MAP_USED ) ) ) { <nl> + specular_shininess = texture ( sampler2D ( specular_texture , texture_sampler ) , uv ) ; <nl> + specular_shininess * = unpackUnorm4x8 ( draw_data . specular_shininess ) ; <nl> + specular_shininess_used = true ; <nl> + } else { <nl> + specular_shininess = vec4 ( 1 . 0 ) ; <nl> + } <nl> + <nl> + <nl> # if defined ( SCREEN_UV_USED ) <nl> vec2 screen_uv = gl_FragCoord . xy * screen_pixel_size ; <nl> # endif <nl> FRAGMENT_SHADER_CODE <nl> # endif <nl> } <nl> <nl> - # if 0 <nl> - if ( canvas_data . light_count > 0 ) { <nl> - / / do lighting <nl> + if ( normal_used ) { <nl> + / / convert by item transform <nl> + normal . xy = mat2 ( normalize ( draw_data . world_x ) , normalize ( draw_data . world_y ) ) * normal . xy ; <nl> + / / convert by canvas transform <nl> + normal = normalize ( ( canvas_data . canvas_normal_transform * vec4 ( normal , 0 . 0 ) ) . xyz ) ; <nl> + } <nl> <nl> + <nl> + vec4 base_color = color ; <nl> + if ( bool ( draw_data . flags & FLAGS_USING_LIGHT_MASK ) ) { <nl> + color = vec4 ( 0 . 0 ) ; / / inivisible by default due to using light mask <nl> } <nl> - # endif <nl> - / / color . rgb * = color . a ; <nl> + <nl> + color * = canvas_data . canvas_modulation ; <nl> + <nl> + for ( uint i = 0 ; i < light_count ; i + + ) { <nl> + uint light_base ; <nl> + if ( i < 8 ) { <nl> + if ( i < 4 ) { <nl> + light_base = draw_data . lights [ 0 ] ; <nl> + } else { <nl> + light_base = draw_data . lights [ 1 ] ; <nl> + } <nl> + } else { <nl> + if ( i < 12 ) { <nl> + light_base = draw_data . lights [ 2 ] ; <nl> + } else { <nl> + light_base = draw_data . lights [ 3 ] ; <nl> + } <nl> + } <nl> + light_base > > = ( i & 3 ) * 8 ; <nl> + light_base & = 0xFF ; <nl> + <nl> + # define LIGHT_FLAGS_BLEND_MASK ( 3 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_ADD ( 0 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_SUB ( 1 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_MIX ( 2 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_MASK ( 3 < < 16 ) <nl> + <nl> + <nl> + vec2 tex_uv = ( vec4 ( vertex , 0 . 0 , 1 . 0 ) * mat4 ( light_array . data [ light_base ] . matrix [ 0 ] , light_array . data [ light_base ] . matrix [ 1 ] , vec4 ( 0 . 0 , 0 . 0 , 1 . 0 , 0 . 0 ) , vec4 ( 0 . 0 , 0 . 0 , 0 . 0 , 1 . 0 ) ) ) . xy ; / / multiply inverse given its transposed . Optimizer removes useless operations . <nl> + uint texture_idx = light_array . data [ light_base ] . flags & LIGHT_FLAGS_TEXTURE_MASK ; <nl> + vec4 light_color = texture ( sampler2D ( light_textures [ texture_idx ] , texture_sampler ) , tex_uv ) ; <nl> + vec4 light_base_color = light_array . data [ light_base ] . color ; <nl> + light_color . rgb * = light_base_color . rgb * light_base_color . a ; <nl> + <nl> + if ( normal_used ) { <nl> + <nl> + vec3 light_pos = vec3 ( light_array . data [ light_base ] . position , light_array . data [ light_base ] . height ) ; <nl> + vec3 pos = vec3 ( vertex , 0 . 0 ) ; <nl> + vec3 light_vec = normalize ( light_pos - pos ) ; <nl> + float cNdotL = max ( 0 . 0 , dot ( normal , light_vec ) ) ; <nl> + <nl> + if ( specular_shininess_used ) { <nl> + / / blinn <nl> + vec3 view = vec3 ( 0 . 0 , 0 . 0 , 1 . 0 ) ; / / not great but good enough <nl> + vec3 half_vec = normalize ( view + light_vec ) ; <nl> + <nl> + float cNdotV = max ( dot ( normal , view ) , 0 . 0 ) ; <nl> + float cNdotH = max ( dot ( normal , half_vec ) , 0 . 0 ) ; <nl> + float cVdotH = max ( dot ( view , half_vec ) , 0 . 0 ) ; <nl> + float cLdotH = max ( dot ( light_vec , half_vec ) , 0 . 0 ) ; <nl> + float shininess = exp2 ( 15 . 0 * specular_shininess . a + 1 . 0 ) * 0 . 25 ; <nl> + float blinn = pow ( cNdotH , shininess ) ; <nl> + blinn * = ( shininess + 8 . 0 ) * ( 1 . 0 / ( 8 . 0 * M_PI ) ) ; <nl> + float s = ( blinn ) / max ( 4 . 0 * cNdotV * cNdotL , 0 . 75 ) ; <nl> + <nl> + light_color . rgb = specular_shininess . rgb * light_base_color . rgb * s + light_color . rgb * cNdotL ; <nl> + } else { <nl> + light_color . rgb * = cNdotL ; <nl> + } <nl> + <nl> + } <nl> + <nl> + if ( any ( lessThan ( tex_uv , vec2 ( 0 . 0 , 0 . 0 ) ) ) | | any ( greaterThanEqual ( tex_uv , vec2 ( 1 . 0 , 1 . 0 ) ) ) ) { <nl> + / / if outside the light texture , light color is zero <nl> + light_color . a = 0 . 0 ; <nl> + } <nl> + <nl> + uint blend_mode = light_array . data [ light_base ] . flags & LIGHT_FLAGS_BLEND_MASK ; <nl> + <nl> + switch ( blend_mode ) { <nl> + case LIGHT_FLAGS_BLEND_MODE_ADD : { <nl> + color . rgb + = light_color . rgb * light_color . a ; <nl> + } break ; <nl> + case LIGHT_FLAGS_BLEND_MODE_SUB : { <nl> + color . rgb - = light_color . rgb * light_color . a ; <nl> + } break ; <nl> + case LIGHT_FLAGS_BLEND_MODE_MIX : { <nl> + color . rgb = mix ( color . rgb , light_color . rgb , light_color . a ) ; <nl> + } break ; <nl> + case LIGHT_FLAGS_BLEND_MODE_MASK : { <nl> + light_color . a * = base_color . a ; <nl> + color . rgb = mix ( color . rgb , light_color . rgb , light_color . a ) ; <nl> + } break ; <nl> + } <nl> + } <nl> + <nl> frag_color = color ; <nl> <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . fb35f169713 <nl> mmm / dev / null <nl> ppp b / servers / visual / rasterizer / shaders / canvas_occlusion . glsl <nl> <nl> + / * clang - format off * / <nl> + [ vertex ] <nl> + / * clang - format on * / <nl> + <nl> + # version 450 <nl> + <nl> + layout ( location = 0 ) in highp vec3 vertex ; <nl> + <nl> + layout ( push_constant , binding = 0 , std430 ) uniform Constants { <nl> + <nl> + mat4 modelview ; <nl> + mat4 projection ; <nl> + } constants ; <nl> + <nl> + layout ( location = 0 ) out highp float depth ; <nl> + <nl> + void main ( ) { <nl> + <nl> + highp vec4 vtx = ( constants . modelview * vec4 ( vertex , 1 . 0 ) ) ; <nl> + depth = length ( vtx . xy ) ; <nl> + <nl> + gl_Position = constants . projection * vtx ; <nl> + <nl> + } <nl> + <nl> + / * clang - format off * / <nl> + [ fragment ] <nl> + / * clang - format on * / <nl> + <nl> + # version 450 <nl> + <nl> + layout ( location = 0 ) in highp float depth ; <nl> + layout ( location = 0 ) out highp float distance_buf ; <nl> + <nl> + void main ( ) { <nl> + <nl> + distance_buf = depth ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 48757bb68a0 <nl> mmm / dev / null <nl> ppp b / servers / visual / rasterizer / shaders / canvas_occlusion_fix . glsl <nl> <nl> + / * clang - format off * / <nl> + [ vertex ] <nl> + / * clang - format on * / <nl> + <nl> + # version 450 <nl> + <nl> + layout ( location = 0 ) out highp float u ; <nl> + <nl> + void main ( ) { <nl> + <nl> + if ( gl_VertexIndex = = 0 ) { <nl> + u = 0 . 0 ; <nl> + gl_Position = vec4 ( - 1 . 0 , - 1 . 0 , 0 . 0 , 1 . 0 ) ; <nl> + } else if ( gl_VertexIndex = = 1 ) { <nl> + u = 0 . 0 ; <nl> + gl_Position = vec4 ( - 1 . 0 , 1 . 0 , 0 . 0 , 1 . 0 ) ; <nl> + } else if ( gl_VertexIndex = = 2 ) { <nl> + u = 1 . 0 ; <nl> + gl_Position = vec4 ( 1 . 0 , 1 . 0 , 0 . 0 , 1 . 0 ) ; <nl> + } else { <nl> + u = 1 . 0 ; <nl> + gl_Position = vec4 ( 1 . 0 , - 1 . 0 , 0 . 0 , 1 . 0 ) ; <nl> + } <nl> + } <nl> + <nl> + / * clang - format off * / <nl> + [ fragment ] <nl> + / * clang - format on * / <nl> + <nl> + # version 450 <nl> + <nl> + # define PI 3 . 14159265359 <nl> + <nl> + layout ( set = 0 , binding = 0 ) uniform sampler2D textures [ 4 ] ; <nl> + layout ( location = 0 ) in highp float u ; <nl> + layout ( location = 0 ) out highp float distance ; <nl> + <nl> + void main ( ) { <nl> + <nl> + / / 0 - 1 in the texture we are writing to represents a circle , 0 - 2PI ) <nl> + / / obtain the quarter circle from the source textures <nl> + highp float sub_angle = ( ( mod ( u , 0 . 25 ) / 0 . 25 ) * 2 . 0 - 1 . 0 ) * ( PI / 4 . 0 ) ; <nl> + highp float x = tan ( sub_angle ) * 0 . 5 + 0 . 5 ; <nl> + <nl> + float depth ; <nl> + if ( u < 0 . 25 ) { <nl> + depth = texture ( textures [ 0 ] , vec2 ( x , 0 . 0 ) ) . x ; <nl> + } else if ( u < 0 . 50 ) { <nl> + depth = texture ( textures [ 1 ] , vec2 ( x , 0 . 0 ) ) . x ; <nl> + } else if ( u < 0 . 75 ) { <nl> + depth = texture ( textures [ 2 ] , vec2 ( x , 0 . 0 ) ) . x ; <nl> + } else { <nl> + depth = texture ( textures [ 3 ] , vec2 ( x , 0 . 0 ) ) . x ; <nl> + } <nl> + distance = depth ; <nl> + } <nl> mmm a / servers / visual / rasterizer / shaders / canvas_uniforms_inc . glsl <nl> ppp b / servers / visual / rasterizer / shaders / canvas_uniforms_inc . glsl <nl> <nl> <nl> / * SET0 : Per draw primitive settings * / <nl> <nl> + # define M_PI 3 . 14159265359 <nl> <nl> - # define MAX_LIGHTS 128 <nl> + # define MAX_LIGHT_TEXTURES 1024 <nl> + # define MAX_RENDER_LIGHTS 256 <nl> <nl> # define FLAGS_INSTANCING_STRIDE_MASK 0xF <nl> # define FLAGS_INSTANCING_ENABLED ( 1 < < 4 ) <nl> <nl> <nl> # define FLAGS_CLIP_RECT_UV ( 1 < < 9 ) <nl> # define FLAGS_TRANSPOSE_RECT ( 1 < < 10 ) <nl> + # define FLAGS_USING_LIGHT_MASK ( 1 < < 11 ) <nl> # define FLAGS_NINEPACH_DRAW_CENTER ( 1 < < 12 ) <nl> # define FLAGS_USING_PARTICLES ( 1 < < 13 ) <nl> # define FLAGS_USE_PIXEL_SNAP ( 1 < < 14 ) <nl> <nl> - <nl> # define FLAGS_NINEPATCH_H_MODE_SHIFT 16 <nl> # define FLAGS_NINEPATCH_V_MODE_SHIFT 18 <nl> <nl> # define FLAGS_LIGHT_COUNT_SHIFT 20 <nl> <nl> + # define FLAGS_DEFAULT_NORMAL_MAP_USED ( 1 < < 26 ) <nl> + # define FLAGS_DEFAULT_SPECULAR_MAP_USED ( 1 < < 27 ) <nl> + <nl> layout ( push_constant , binding = 0 , std430 ) uniform DrawData { <nl> vec2 world_x ; <nl> vec2 world_y ; <nl> layout ( set = 2 , binding = 1 , std140 ) uniform SkeletonData { <nl> layout ( set = 3 , binding = 0 , std140 ) uniform CanvasData { <nl> mat4 canvas_transform ; <nl> mat4 screen_transform ; <nl> + mat4 canvas_normal_transform ; <nl> + vec4 canvas_modulation ; <nl> / / uint light_count ; <nl> } canvas_data ; <nl> <nl> + # define LIGHT_FLAGS_TEXTURE_MASK 0xFFFF <nl> + # define LIGHT_FLAGS_BLEND_MASK ( 3 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_ADD ( 0 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_SUB ( 1 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_MIX ( 2 < < 16 ) <nl> + # define LIGHT_FLAGS_BLEND_MODE_MASK ( 3 < < 16 ) <nl> + <nl> + <nl> struct Light { <nl> - / / light matrices <nl> - mat4 light_matrix ; <nl> - mat4 light_local_matrix ; <nl> - mat4 shadow_matrix ; <nl> - vec4 light_color ; <nl> - vec4 light_shadow_color ; <nl> - vec2 light_pos ; <nl> - float shadowpixel_size ; <nl> - float shadow_gradient ; <nl> - float light_height ; <nl> - float light_outside_alpha ; <nl> - float shadow_distance_mult ; <nl> + mat2x4 matrix ; / / light to texture coordinate matrix <nl> + vec4 color ; <nl> + vec4 shadow_color ; <nl> + vec2 position ; <nl> + uint flags ; / / index to light texture <nl> + float height ; <nl> + float shadow_softness ; <nl> + float shadow_pixel_size ; <nl> + float pad0 ; <nl> + float pad1 ; <nl> } ; <nl> <nl> layout ( set = 3 , binding = 1 , std140 ) uniform LightData { <nl> - Light lights [ MAX_LIGHTS ] ; <nl> - } light_data ; <nl> + Light data [ MAX_RENDER_LIGHTS ] ; <nl> + } light_array ; <nl> <nl> - layout ( set = 3 , binding = 2 ) uniform texture2D light_textures [ MAX_LIGHTS ] ; <nl> + layout ( set = 3 , binding = 2 ) uniform texture2D light_textures [ MAX_LIGHT_TEXTURES ] ; <nl> + layout ( set = 3 , binding = 3 ) uniform texture2D shadow_textures [ MAX_LIGHT_TEXTURES ] ; <nl> mmm a / servers / visual / rendering_device . h <nl> ppp b / servers / visual / rendering_device . h <nl> class RenderingDevice : public Object { <nl> virtual RID uniform_set_create ( const Vector < Uniform > & p_uniforms , RID p_shader , uint32_t p_shader_set ) = 0 ; <nl> virtual bool uniform_set_is_valid ( RID p_uniform_set ) = 0 ; <nl> <nl> - virtual Error buffer_update ( RID p_buffer , uint32_t p_offset , uint32_t p_size , void * p_data , bool p_sync_with_draw = false ) = 0 ; / / this function can be used from any thread and it takes effect at the begining of the frame , unless sync with draw is used , which is used to mix updates with draw calls <nl> + virtual Error buffer_update ( RID p_buffer , uint32_t p_offset , uint32_t p_size , const void * p_data , bool p_sync_with_draw = false ) = 0 ; / / this function can be used from any thread and it takes effect at the begining of the frame , unless sync with draw is used , which is used to mix updates with draw calls <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> / * * * * RENDER PIPELINE * * * * / <nl> mmm a / servers / visual / visual_server_canvas . cpp <nl> ppp b / servers / visual / visual_server_canvas . cpp <nl> void VisualServerCanvas : : canvas_item_add_line ( RID p_item , const Point2 & p_from , <nl> line - > points [ 0 ] = p_from ; <nl> line - > points [ 1 ] = p_to ; <nl> } <nl> - for ( int i = 0 ; i < line - > point_count ; i + + ) { <nl> + for ( uint32_t i = 0 ; i < line - > point_count ; i + + ) { <nl> line - > colors [ i ] = p_color ; <nl> } <nl> line - > specular_shininess = Color ( 1 , 1 , 1 , 1 ) ; <nl> void VisualServerCanvas : : canvas_item_set_use_parent_material ( RID p_item , bool p_ <nl> RID VisualServerCanvas : : canvas_light_create ( ) { <nl> <nl> RasterizerCanvas : : Light * clight = memnew ( RasterizerCanvas : : Light ) ; <nl> - clight - > light_internal = VSG : : canvas_render - > light_internal_create ( ) ; <nl> + clight - > light_internal = VSG : : canvas_render - > light_create ( ) ; <nl> return canvas_light_owner . make_rid ( clight ) ; <nl> } <nl> void VisualServerCanvas : : canvas_light_attach_to_canvas ( RID p_light , RID p_canvas ) { <nl> void VisualServerCanvas : : canvas_light_set_texture ( RID p_light , RID p_texture ) { <nl> ERR_FAIL_COND ( ! clight ) ; <nl> <nl> clight - > texture = p_texture ; <nl> + VSG : : canvas_render - > light_set_texture ( clight - > light_internal , p_texture ) ; <nl> } <nl> void VisualServerCanvas : : canvas_light_set_texture_offset ( RID p_light , const Vector2 & p_offset ) { <nl> <nl> void VisualServerCanvas : : canvas_light_set_shadow_enabled ( RID p_light , bool p_ena <nl> RasterizerCanvas : : Light * clight = canvas_light_owner . getornull ( p_light ) ; <nl> ERR_FAIL_COND ( ! clight ) ; <nl> <nl> - if ( clight - > shadow_buffer . is_valid ( ) = = p_enabled ) <nl> + if ( clight - > use_shadow = = p_enabled ) { <nl> return ; <nl> - if ( p_enabled ) { <nl> - clight - > shadow_buffer = VSG : : storage - > canvas_light_shadow_buffer_create ( clight - > shadow_buffer_size ) ; <nl> - } else { <nl> - VSG : : storage - > free ( clight - > shadow_buffer ) ; <nl> - clight - > shadow_buffer = RID ( ) ; <nl> } <nl> + clight - > use_shadow = p_enabled ; <nl> + <nl> + VSG : : canvas_render - > light_set_use_shadow ( clight - > light_internal , clight - > use_shadow , clight - > shadow_buffer_size ) ; <nl> } <nl> + <nl> void VisualServerCanvas : : canvas_light_set_shadow_buffer_size ( RID p_light , int p_size ) { <nl> <nl> ERR_FAIL_COND ( p_size < 32 | | p_size > 16384 ) ; <nl> void VisualServerCanvas : : canvas_light_set_shadow_buffer_size ( RID p_light , int p_ <nl> <nl> clight - > shadow_buffer_size = next_power_of_2 ( p_size ) ; <nl> <nl> - if ( clight - > shadow_buffer . is_valid ( ) ) { <nl> - VSG : : storage - > free ( clight - > shadow_buffer ) ; <nl> - clight - > shadow_buffer = VSG : : storage - > canvas_light_shadow_buffer_create ( clight - > shadow_buffer_size ) ; <nl> - } <nl> + VSG : : canvas_render - > light_set_use_shadow ( clight - > light_internal , clight - > use_shadow , clight - > shadow_buffer_size ) ; <nl> } <nl> <nl> void VisualServerCanvas : : canvas_light_set_shadow_gradient_length ( RID p_light , float p_length ) { <nl> void VisualServerCanvas : : canvas_light_occluder_set_polygon ( RID p_occluder , RID p <nl> } <nl> <nl> occluder - > polygon = p_polygon ; <nl> - occluder - > polygon_buffer = RID ( ) ; <nl> + occluder - > occluder = RID ( ) ; <nl> <nl> if ( occluder - > polygon . is_valid ( ) ) { <nl> LightOccluderPolygon * occluder_poly = canvas_light_occluder_polygon_owner . getornull ( p_polygon ) ; <nl> void VisualServerCanvas : : canvas_light_occluder_set_polygon ( RID p_occluder , RID p <nl> ERR_FAIL_COND ( ! occluder_poly ) ; <nl> } else { <nl> occluder_poly - > owners . insert ( occluder ) ; <nl> - occluder - > polygon_buffer = occluder_poly - > occluder ; <nl> + occluder - > occluder = occluder_poly - > occluder ; <nl> occluder - > aabb_cache = occluder_poly - > aabb ; <nl> occluder - > cull_cache = occluder_poly - > cull_mode ; <nl> } <nl> void VisualServerCanvas : : canvas_light_occluder_set_light_mask ( RID p_occluder , in <nl> RID VisualServerCanvas : : canvas_occluder_polygon_create ( ) { <nl> <nl> LightOccluderPolygon * occluder_poly = memnew ( LightOccluderPolygon ) ; <nl> - occluder_poly - > occluder = VSG : : storage - > canvas_light_occluder_create ( ) ; <nl> + occluder_poly - > occluder = VSG : : canvas_render - > occluder_polygon_create ( ) ; <nl> return canvas_light_occluder_polygon_owner . make_rid ( occluder_poly ) ; <nl> } <nl> void VisualServerCanvas : : canvas_occluder_polygon_set_shape ( RID p_occluder_polygon , const PoolVector < Vector2 > & p_shape , bool p_closed ) { <nl> void VisualServerCanvas : : canvas_occluder_polygon_set_shape_as_lines ( RID p_occlud <nl> } <nl> } <nl> <nl> - VSG : : storage - > canvas_light_occluder_set_polylines ( occluder_poly - > occluder , p_shape ) ; <nl> + VSG : : canvas_render - > occluder_polygon_set_shape_as_lines ( occluder_poly - > occluder , p_shape ) ; <nl> for ( Set < RasterizerCanvas : : LightOccluderInstance * > : : Element * E = occluder_poly - > owners . front ( ) ; E ; E = E - > next ( ) ) { <nl> E - > get ( ) - > aabb_cache = occluder_poly - > aabb ; <nl> } <nl> void VisualServerCanvas : : canvas_occluder_polygon_set_cull_mode ( RID p_occluder_po <nl> LightOccluderPolygon * occluder_poly = canvas_light_occluder_polygon_owner . getornull ( p_occluder_polygon ) ; <nl> ERR_FAIL_COND ( ! occluder_poly ) ; <nl> occluder_poly - > cull_mode = p_mode ; <nl> + VSG : : canvas_render - > occluder_polygon_set_cull_mode ( occluder_poly - > occluder , p_mode ) ; <nl> for ( Set < RasterizerCanvas : : LightOccluderInstance * > : : Element * E = occluder_poly - > owners . front ( ) ; E ; E = E - > next ( ) ) { <nl> E - > get ( ) - > cull_cache = p_mode ; <nl> } <nl> bool VisualServerCanvas : : free ( RID p_rid ) { <nl> canvas - > lights . erase ( canvas_light ) ; <nl> } <nl> <nl> - if ( canvas_light - > shadow_buffer . is_valid ( ) ) <nl> - VSG : : storage - > free ( canvas_light - > shadow_buffer ) ; <nl> - <nl> - VSG : : canvas_render - > light_internal_free ( canvas_light - > light_internal ) ; <nl> + VSG : : canvas_render - > free ( canvas_light - > light_internal ) ; <nl> <nl> canvas_light_owner . free ( p_rid ) ; <nl> memdelete ( canvas_light ) ; <nl> bool VisualServerCanvas : : free ( RID p_rid ) { <nl> <nl> LightOccluderPolygon * occluder_poly = canvas_light_occluder_polygon_owner . getornull ( p_rid ) ; <nl> ERR_FAIL_COND_V ( ! occluder_poly , true ) ; <nl> - VSG : : storage - > free ( occluder_poly - > occluder ) ; <nl> + VSG : : canvas_render - > free ( occluder_poly - > occluder ) ; <nl> <nl> while ( occluder_poly - > owners . size ( ) ) { <nl> <nl> mmm a / servers / visual / visual_server_viewport . cpp <nl> ppp b / servers / visual / visual_server_viewport . cpp <nl> void VisualServerViewport : : _draw_viewport ( Viewport * p_viewport , ARVRInterface : : E <nl> <nl> cl - > filter_next_ptr = lights ; <nl> lights = cl ; <nl> - cl - > texture_cache = NULL ; <nl> + / / cl - > texture_cache = NULL ; <nl> Transform2D scale ; <nl> scale . scale ( cl - > rect_cache . size ) ; <nl> scale . elements [ 2 ] = cl - > rect_cache . position ; <nl> - cl - > light_shader_xform = ( cl - > xform_cache * scale ) . affine_inverse ( ) ; <nl> - cl - > light_shader_pos = cl - > xform_cache [ 2 ] ; <nl> - if ( cl - > shadow_buffer . is_valid ( ) ) { <nl> + cl - > light_shader_xform = cl - > xform * scale ; <nl> + / / cl - > light_shader_pos = cl - > xform_cache [ 2 ] ; <nl> + if ( cl - > use_shadow ) { <nl> <nl> cl - > shadows_next_ptr = lights_with_shadow ; <nl> if ( lights_with_shadow = = NULL ) { <nl> void VisualServerViewport : : _draw_viewport ( Viewport * p_viewport , ARVRInterface : : E <nl> light_count + + ; <nl> } <nl> <nl> - VSG : : canvas_render - > light_internal_update ( cl - > light_internal , cl ) ; <nl> + / / guess this is not needed , but keeping because it may be <nl> + / / VSG : : canvas_render - > light_internal_update ( cl - > light_internal , cl ) ; <nl> } <nl> } <nl> <nl> void VisualServerViewport : : _draw_viewport ( Viewport * p_viewport , ARVRInterface : : E <nl> RasterizerCanvas : : Light * light = lights_with_shadow ; <nl> while ( light ) { <nl> <nl> - VSG : : canvas_render - > canvas_light_shadow_buffer_update ( light - > shadow_buffer , light - > xform_cache . affine_inverse ( ) , light - > item_shadow_mask , light - > radius_cache / 1000 . 0 , light - > radius_cache * 1 . 1 , occluders , & light - > shadow_matrix_cache ) ; <nl> + VSG : : canvas_render - > light_update_shadow ( light - > light_internal , light - > xform_cache . affine_inverse ( ) , light - > item_shadow_mask , light - > radius_cache / 1000 . 0 , light - > radius_cache * 1 . 1 , occluders ) ; <nl> light = light - > shadows_next_ptr ; <nl> } <nl> <nl>
|
Normalmapping and Specularmapping working in 2D engine
|
godotengine/godot
|
a7b2ac7bb1b5f9155866382f14138e86e52efded
|
2020-02-11T10:53:28Z
|
mmm a / src / frontend / mosh - server . cc <nl> ppp b / src / frontend / mosh - server . cc <nl> void serve ( int host_fd , Terminal : : Complete & terminal , ServerConnection & network <nl> / * tell child process of resize * / <nl> const Parser : : Resize * res = static_cast < const Parser : : Resize * > ( us . get_action ( i ) ) ; <nl> struct winsize window_size ; <nl> + if ( ioctl ( host_fd , TIOCGWINSZ , & window_size ) < 0 ) { <nl> + perror ( " ioctl TIOCGWINSZ " ) ; <nl> + return ; <nl> + } <nl> window_size . ws_col = res - > width ; <nl> window_size . ws_row = res - > height ; <nl> if ( ioctl ( host_fd , TIOCSWINSZ , & window_size ) < 0 ) { <nl>
|
Fully initialize the argument to TIOCSWINSZ
|
mobile-shell/mosh
|
ba9b16aafa39882c9a5b127489b89349f08e8b2f
|
2012-03-31T19:41:34Z
|
mmm a / Code / CryEngine / CryFlowGraph / FlowSystem / Nodes / FlowEntityNode . cpp <nl> ppp b / Code / CryEngine / CryFlowGraph / FlowSystem / Nodes / FlowEntityNode . cpp <nl> class CFlowNode_SpawnEntity : public CFlowBaseNode < eNCT_Singleton > <nl> break ; <nl> case eFE_PrecacheResources : <nl> { <nl> - if ( IGame : : IResourcesPreCache * pResourceCache = gEnv - > pGameFramework - > GetIGame ( ) - > GetResourceCache ( ) ) <nl> + if ( IGame * pGame = gEnv - > pGameFramework - > GetIGame ( ) ) <nl> { <nl> - pResourceCache - > QueueEntityClass ( GetPortString ( pActInfo , EIP_ClassName ) ) ; <nl> + if ( IGame : : IResourcesPreCache * pResourceCache = pGame - > GetResourceCache ( ) ) <nl> + { <nl> + pResourceCache - > QueueEntityClass ( GetPortString ( pActInfo , EIP_ClassName ) ) ; <nl> + } <nl> } <nl> } <nl> break ; <nl> class CFlowNode_SpawnArchetypeEntity : public CFlowBaseNode < eNCT_Singleton > <nl> break ; <nl> case eFE_PrecacheResources : <nl> { <nl> - if ( IGame : : IResourcesPreCache * pResourceCache = gEnv - > pGameFramework - > GetIGame ( ) - > GetResourceCache ( ) ) <nl> + if ( IGame * pGame = gEnv - > pGameFramework - > GetIGame ( ) ) <nl> { <nl> - pResourceCache - > QueueEntityArchetype ( GetPortString ( pActInfo , EIP_ArchetypeName ) ) ; <nl> + if ( IGame : : IResourcesPreCache * pResourceCache = pGame - > GetResourceCache ( ) ) <nl> + { <nl> + pResourceCache - > QueueEntityArchetype ( GetPortString ( pActInfo , EIP_ArchetypeName ) ) ; <nl> + } <nl> } <nl> } <nl> break ; <nl> class CFlowNode_GetGameRulesEntityId : public CFlowBaseNode < eNCT_Singleton > <nl> { <nl> if ( eFE_Activate = = event & & IsPortActive ( pActInfo , IN_GET ) ) <nl> { <nl> - IEntity * pGameRules = gEnv - > pGameFramework - > GetIGameRulesSystem ( ) - > GetCurrentGameRulesEntity ( ) ; <nl> - ActivateOutput ( pActInfo , OUT_ID , pGameRules - > GetId ( ) ) ; <nl> + if ( IEntity * pGameRules = gEnv - > pGameFramework - > GetIGameRulesSystem ( ) - > GetCurrentGameRulesEntity ( ) ) <nl> + { <nl> + ActivateOutput ( pActInfo , OUT_ID , pGameRules - > GetId ( ) ) ; <nl> + } <nl> + else <nl> + { <nl> + ActivateOutput ( pActInfo , OUT_ID , INVALID_ENTITYID ) ; <nl> + } <nl> } <nl> } <nl> <nl> mmm a / Code / CryEngine / CryFlowGraph / FlowSystem / Nodes / GameTokenNode . cpp <nl> ppp b / Code / CryEngine / CryFlowGraph / FlowSystem / Nodes / GameTokenNode . cpp <nl> IGameToken * GetGameToken ( const char * callingNodeName , IFlowNode : : SActivationInfo <nl> <nl> IGameTokenSystem * pGTS = GetIGameTokenSystem ( ) ; <nl> IGameToken * pToken = pGTS - > FindToken ( tokenName . c_str ( ) ) ; <nl> - if ( ! pToken ) <nl> + if ( ! pToken & & pActInfo - > pGraph ! = nullptr ) <nl> { <nl> / / try graph token instead : <nl> const char * name = pActInfo - > pGraph - > GetGlobalNameForGraphToken ( tokenName . c_str ( ) ) ; <nl> IGameToken * GetGameToken ( const char * callingNodeName , IFlowNode : : SActivationInfo <nl> CryWarning ( VALIDATOR_MODULE_FLOWGRAPH , VALIDATOR_ERROR , <nl> " [ FG ] Cannot find GameToken : ' % s ' Node : % s Graph : % s " , <nl> tokenName . c_str ( ) , <nl> - callingNodeName , pActInfo - > pGraph - > GetDebugName ( ) <nl> + callingNodeName , pActInfo - > pGraph ! = nullptr ? pActInfo - > pGraph - > GetDebugName ( ) : " Unknown ! " <nl> ) ; <nl> } <nl> <nl> mmm a / Code / CryEngine / CryInput / CMakeLists . txt <nl> ppp b / Code / CryEngine / CryInput / CMakeLists . txt <nl> <nl> + # Note : By default , the include files required are located at C : \ Program Files ( x86 ) \ SCE \ Common \ External Tools \ libScePad for PC Games <nl> + # Copy the \ include and \ lib folder from there to < ROOT > \ Code \ SDKs \ OrbisPad \ include and \ lib <nl> if ( EXISTS $ { SDK_DIR } / OrbisPad ) <nl> set ( HAS_ORBISPAD TRUE ) <nl> endif ( ) <nl> mmm a / Code / CryEngine / CryInput / OrbisPadWin . h <nl> ppp b / Code / CryEngine / CryInput / OrbisPadWin . h <nl> <nl> # define __ORBISPADWIN_H__ <nl> # pragma once <nl> <nl> - / / TOOLS_SUPPORT_ORBIS is defined only when the developer is supposed to have Orbis SDK access <nl> - / / Note : By default , the include files required are located at C : \ Program Files ( x86 ) \ SCE \ Common \ External Tools \ libScePad for PC Games <nl> - / / Copy the \ include and \ lib folder from there to < ROOT > \ Code \ SDKs \ OrbisPad \ include and \ lib <nl> / / If you don ' t want this feature at all , just : <nl> / / # undef WANT_ORBISPAD_WIN <nl> <nl> <nl> # pragma message ( " Unable to use OrbisPad on Windows with MSVC newer than 2015 due to missing libs . " ) <nl> # endif <nl> <nl> - # if defined ( TOOLS_SUPPORT_ORBIS ) & & defined ( USE_DXINPUT ) & & ( CRY_PLATFORM_WINDOWS & & CRY_PLATFORM_64BIT ) & & defined ( WANT_ORBISPAD_WIN ) <nl> + # if defined ( USE_DXINPUT ) & & ( CRY_PLATFORM_WINDOWS & & CRY_PLATFORM_64BIT ) & & defined ( WANT_ORBISPAD_WIN ) <nl> # ifndef USE_ORBISPAD_WIN <nl> # define USE_ORBISPAD_WIN <nl> # endif <nl> mmm a / Code / CryEngine / CrySchematyc / Core / Impl / CMakeLists . txt <nl> ppp b / Code / CryEngine / CrySchematyc / Core / Impl / CMakeLists . txt <nl> add_sources ( " Schematyc_Uber_Core_4 . cpp " <nl> " Script / Graph / Nodes / ScriptGraphFormatStringNode . h " <nl> " Script / Graph / Nodes / ScriptGraphFunctionNode . cpp " <nl> " Script / Graph / Nodes / ScriptGraphFunctionNode . h " <nl> - " Script / Graph / Nodes / ScriptGraphFlowGraphNode . cpp " <nl> - " Script / Graph / Nodes / ScriptGraphFlowGraphNode . h " <nl> " Script / Graph / Nodes / ScriptGraphGetNode . cpp " <nl> " Script / Graph / Nodes / ScriptGraphGetNode . h " <nl> " Script / Graph / Nodes / ScriptGraphGetObjectIdNode . cpp " <nl> deleted file mode 100644 <nl> index 9282b8bd8c . . 0000000000 <nl> mmm a / Code / CryEngine / CrySchematyc / Core / Impl / Script / Graph / Nodes / ScriptGraphFlowGraphNode . cpp <nl> ppp / dev / null <nl> <nl> - / / Copyright 2001 - 2016 Crytek GmbH / Crytek Group . All rights reserved . <nl> - <nl> - # include " StdAfx . h " <nl> - # include " Script / Graph / Nodes / ScriptGraphFlowGraphNode . h " <nl> - <nl> - # include < CrySerialization / Decorators / ActionButton . h > <nl> - # include < CrySchematyc / Compiler / CompilerContext . h > <nl> - # include < CrySchematyc / Compiler / IGraphNodeCompiler . h > <nl> - # include < CrySchematyc / Env / IEnvRegistry . h > <nl> - # include < CrySchematyc / Env / Elements / IEnvComponent . h > <nl> - # include < CrySchematyc / Env / Elements / IEnvFunction . h > <nl> - # include < CrySchematyc / Script / IScriptRegistry . h > <nl> - # include < CrySchematyc / Script / Elements / IScriptComponentInstance . h > <nl> - # include < CrySchematyc / Script / Elements / IScriptFunction . h > <nl> - # include < CrySchematyc / Utils / Any . h > <nl> - # include < CrySchematyc / Utils / IGUIDRemapper . h > <nl> - # include < CrySchematyc / Utils / StackString . h > <nl> - # include < CrySchematyc / Utils / SharedString . h > <nl> - <nl> - # include " Object . h " <nl> - # include " CVars . h " <nl> - # include " Runtime / RuntimeClass . h " <nl> - # include " Script / ScriptView . h " <nl> - # include " Script / Graph / ScriptGraphNode . h " <nl> - # include " Script / Graph / ScriptGraphNodeFactory . h " <nl> - # include " SerializationUtils / SerializationContext . h " <nl> - <nl> - # include < CryFlowGraph / IFlowBaseNode . h > <nl> - <nl> - namespace Schematyc { <nl> - <nl> - const CryGUID CScriptGraphFlowGraphNode : : ms_typeGUID = " 7067329D - AFED - 4321 - 9D18 - D4B1CA433B3A " _cry_guid ; <nl> - <nl> - namespace FlowGraph { <nl> - <nl> - struct CFlowGraphDummyClass : public IFlowGraph <nl> - { <nl> - EntityId m_entityId ; <nl> - <nl> - / / NFlowSystemUtils : : IFlowSystemTyped <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < SFlowSystemVoid > & value ) final { } ; <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < int > & value ) final { } ; <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < float > & value ) final { } ; <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < EntityId > & value ) final { } ; <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < Vec3 > & value ) final { } ; <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < string > & value ) final { } ; <nl> - virtual void DoActivatePort ( const SFlowAddress , const NFlowSystemUtils : : Wrapper < bool > & value ) final { } ; <nl> - / / ~ NFlowSystemUtils : : IFlowSystemTyped <nl> - <nl> - <nl> - / / IFlowGraph <nl> - virtual void AddRef ( ) final { } ; <nl> - virtual void Release ( ) final { } ; <nl> - virtual IFlowGraphPtr Clone ( ) final { return this ; } ; <nl> - virtual void Clear ( ) final { } ; <nl> - virtual void RegisterHook ( IFlowGraphHookPtr ) final { } ; <nl> - virtual void UnregisterHook ( IFlowGraphHookPtr ) final { } ; <nl> - virtual IFlowNodeIteratorPtr CreateNodeIterator ( ) final { return nullptr ; } ; <nl> - virtual IFlowEdgeIteratorPtr CreateEdgeIterator ( ) final { return nullptr ; } ; <nl> - virtual void SetGraphEntity ( EntityId id , int nIndex = 0 ) final { m_entityId = id ; } ; <nl> - virtual EntityId GetGraphEntity ( int nIndex ) const final { return m_entityId ; } ; <nl> - virtual void SetEnabled ( bool bEnable ) final { } ; <nl> - virtual bool IsEnabled ( ) const final { return true ; } ; <nl> - virtual void SetActive ( bool bActive ) final { } ; <nl> - virtual bool IsActive ( ) const final { return true ; } ; <nl> - virtual void UnregisterFromFlowSystem ( ) final { } ; <nl> - virtual void SetType ( IFlowGraph : : EFlowGraphType type ) final { } ; <nl> - virtual IFlowGraph : : EFlowGraphType GetType ( ) const final { return eFGT_Default ; } ; <nl> - virtual const char * GetDebugName ( ) const final { return " " ; } ; <nl> - virtual void SetDebugName ( const char * sName ) final { } ; <nl> - virtual void Update ( ) final { } ; <nl> - virtual bool SerializeXML ( const XmlNodeRef & root , bool reading ) final { return false ; } ; <nl> - virtual void Serialize ( TSerialize ser ) final { } ; <nl> - virtual void PostSerialize ( ) final { } ; <nl> - virtual void InitializeValues ( ) final { } ; <nl> - virtual void PrecacheResources ( ) final { } ; <nl> - virtual void EnsureSortedEdges ( ) final { } ; <nl> - virtual SFlowAddress ResolveAddress ( const char * addr , bool isOutput ) final { return SFlowAddress ( 0 , 0 , false ) ; } ; <nl> - virtual TFlowNodeId ResolveNode ( const char * name ) final { assert ( 0 ) ; return 0 ; } ; <nl> - virtual TFlowNodeId CreateNode ( TFlowNodeTypeId typeId , const char * name , void * pUserData = 0 ) final { assert ( 0 ) ; return 0 ; } ; <nl> - virtual TFlowNodeId CreateNode ( const char * typeName , const char * name , void * pUserData = 0 ) final { assert ( 0 ) ; return 0 ; } ; <nl> - virtual IFlowNodeData * GetNodeData ( TFlowNodeId id ) final { assert ( 0 ) ; return nullptr ; } ; <nl> - virtual bool SetNodeName ( TFlowNodeId id , const char * sName ) final { return true ; } ; <nl> - virtual const char * GetNodeName ( TFlowNodeId id ) final { return " " ; } ; <nl> - virtual TFlowNodeTypeId GetNodeTypeId ( TFlowNodeId id ) final { return 0 ; } ; <nl> - virtual const char * GetNodeTypeName ( TFlowNodeId id ) final { return " " ; } ; <nl> - virtual void RemoveNode ( const char * name ) final { } ; <nl> - virtual void RemoveNode ( TFlowNodeId id ) final { } ; <nl> - virtual void SetUserData ( TFlowNodeId id , const XmlNodeRef & data ) final { } ; <nl> - virtual XmlNodeRef GetUserData ( TFlowNodeId id ) final { return nullptr ; } ; <nl> - virtual bool LinkNodes ( SFlowAddress from , SFlowAddress to ) final { return false ; } ; <nl> - virtual void UnlinkNodes ( SFlowAddress from , SFlowAddress to ) final { } ; <nl> - virtual void RegisterFlowNodeActivationListener ( SFlowNodeActivationListener * listener ) final { } ; <nl> - virtual void RemoveFlowNodeActivationListener ( SFlowNodeActivationListener * listener ) final { } ; <nl> - virtual bool NotifyFlowNodeActivationListeners ( TFlowNodeId srcNode , TFlowPortId srcPort , TFlowNodeId toNode , TFlowPortId toPort , const char * value ) final { return false ; } ; <nl> - virtual void SetEntityId ( TFlowNodeId , EntityId ) final { } ; <nl> - virtual EntityId GetEntityId ( TFlowNodeId ) final { return m_entityId ; } ; <nl> - virtual IFlowGraphPtr GetClonedFlowGraph ( ) const final { return const_cast < CFlowGraphDummyClass * > ( this ) ; } ; <nl> - virtual void GetNodeConfiguration ( TFlowNodeId id , SFlowNodeConfig & ) final { } ; <nl> - virtual void SetRegularlyUpdated ( TFlowNodeId , bool ) final { } ; <nl> - virtual void RequestFinalActivation ( TFlowNodeId ) final { } ; <nl> - virtual void ActivateNode ( TFlowNodeId ) final { } ; <nl> - virtual void ActivatePortAny ( SFlowAddress output , const TFlowInputData & ) final { } ; <nl> - virtual void ActivatePortCString ( SFlowAddress output , const char * cstr ) final { } ; <nl> - virtual bool SetInputValue ( TFlowNodeId node , TFlowPortId port , const TFlowInputData & ) final { return false ; } ; <nl> - virtual bool IsOutputConnected ( SFlowAddress output ) final { return false ; } ; <nl> - virtual const TFlowInputData * GetInputValue ( TFlowNodeId node , TFlowPortId port ) final { return nullptr ; } ; <nl> - virtual bool GetActivationInfo ( const char * nodeName , IFlowNode : : SActivationInfo & actInfo ) final { return false ; } ; <nl> - virtual void SetSuspended ( bool suspend = true ) final { } ; <nl> - virtual bool IsSuspended ( ) const final { return false ; } ; <nl> - virtual bool IsInInitializationPhase ( ) const final { return false ; } ; <nl> - virtual void SetAIAction ( IAIAction * pAIAction ) final { } ; <nl> - virtual IAIAction * GetAIAction ( ) const final { return nullptr ; } ; <nl> - virtual void SetCustomAction ( ICustomAction * pCustomAction ) final { } ; <nl> - virtual ICustomAction * GetCustomAction ( ) const final { return nullptr ; } ; <nl> - virtual void GetMemoryUsage ( ICrySizer * s ) const final { } ; <nl> - <nl> - virtual void RemoveGraphTokens ( ) final { } ; <nl> - virtual bool AddGraphToken ( const SGraphToken & token ) final { return false ; } ; <nl> - virtual size_t GetGraphTokenCount ( ) const final { return 0 ; } ; <nl> - virtual const IFlowGraph : : SGraphToken * GetGraphToken ( size_t index ) const final { return nullptr ; } ; <nl> - virtual const char * GetGlobalNameForGraphToken ( const char * tokenName ) const final { return " " ; } ; <nl> - <nl> - virtual TFlowGraphId GetGraphId ( ) const final { return 0 ; } ; <nl> - / / ~ IFlowGraph <nl> - } ; <nl> - <nl> - CAnyValuePtr FlowGraphTypeToAny ( int flowGraphTypeIndex ) <nl> - { <nl> - static CAnyValuePtr s_flowTypesTable [ eFDT_Bool + 1 ] ; / / Assumes eFDT_Bool is the last type in EFlowDataTypes <nl> - assert ( flowGraphTypeIndex - eFDT_Any > = 0 & & flowGraphTypeIndex - eFDT_Any < CRY_ARRAY_COUNT ( s_flowTypesTable ) ) ; <nl> - <nl> - int arrayIndex = flowGraphTypeIndex - eFDT_Any ; <nl> - if ( s_flowTypesTable [ arrayIndex ] ) <nl> - { <nl> - return s_flowTypesTable [ arrayIndex ] ; <nl> - } <nl> - <nl> - CAnyValuePtr pAny ; <nl> - switch ( flowGraphTypeIndex ) <nl> - { <nl> - case eFDT_Any : <nl> - break ; <nl> - <nl> - case eFDT_Void : <nl> - break ; <nl> - <nl> - case eFDT_Int : <nl> - pAny = CAnyValue : : MakeShared < int > ( 0 ) ; <nl> - break ; <nl> - <nl> - case eFDT_Float : <nl> - pAny = CAnyValue : : MakeShared < float > ( 0 . 0f ) ; <nl> - break ; <nl> - <nl> - case eFDT_EntityId : <nl> - pAny = CAnyValue : : MakeShared < EntityId > ( 0 ) ; <nl> - break ; <nl> - <nl> - case eFDT_Vec3 : <nl> - pAny = CAnyValue : : MakeShared < Vec3 > ( Vec3 ( 0 , 0 , 0 ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_String : <nl> - pAny = CAnyValue : : MakeShared < CSharedString > ( " " ) ; <nl> - break ; <nl> - <nl> - case eFDT_Bool : <nl> - pAny = CAnyValue : : MakeShared < bool > ( 0 ) ; <nl> - break ; <nl> - } <nl> - s_flowTypesTable [ arrayIndex ] = pAny ; <nl> - return pAny ; <nl> - } <nl> - <nl> - <nl> - CAnyValuePtr FlowGraphVariantToAny ( const TFlowInputDataVariant & variant ) <nl> - { <nl> - int flowGraphTypeIndex = variant . index ( ) ; <nl> - <nl> - CAnyValuePtr pAny ; <nl> - switch ( flowGraphTypeIndex ) <nl> - { <nl> - case eFDT_Any : <nl> - break ; <nl> - <nl> - case eFDT_Void : <nl> - break ; <nl> - <nl> - case eFDT_Int : <nl> - pAny = CAnyValue : : MakeShared < int > ( stl : : get < int > ( variant ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_Float : <nl> - pAny = CAnyValue : : MakeShared < float > ( stl : : get < float > ( variant ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_EntityId : <nl> - pAny = CAnyValue : : MakeShared < EntityId > ( stl : : get < EntityId > ( variant ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_Vec3 : <nl> - pAny = CAnyValue : : MakeShared < Vec3 > ( stl : : get < Vec3 > ( variant ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_String : <nl> - pAny = CAnyValue : : MakeShared < CSharedString > ( stl : : get < string > ( variant ) . c_str ( ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_Bool : <nl> - pAny = CAnyValue : : MakeShared < bool > ( stl : : get < bool > ( variant ) ) ; <nl> - break ; <nl> - } <nl> - return pAny ; <nl> - } <nl> - <nl> - void AssignAnyToFlowGraphInputData ( const CAnyConstPtr & any , TFlowInputData & flowInputData ) <nl> - { <nl> - EFlowDataTypes flowType = flowInputData . GetType ( ) ; <nl> - CAnyConstPtr pAny ; <nl> - switch ( flowType ) <nl> - { <nl> - case eFDT_Any : <nl> - break ; <nl> - <nl> - case eFDT_Void : <nl> - break ; <nl> - <nl> - case eFDT_Int : <nl> - flowInputData . Set < int > ( * DynamicCast < int > ( any ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_Float : <nl> - flowInputData . Set < float > ( * DynamicCast < float > ( any ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_EntityId : <nl> - flowInputData . Set < EntityId > ( * DynamicCast < EntityId > ( any ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_Vec3 : <nl> - flowInputData . Set < Vec3 > ( * DynamicCast < Vec3 > ( any ) ) ; <nl> - break ; <nl> - <nl> - case eFDT_String : <nl> - { <nl> - const CSharedString * pStr = DynamicCast < CSharedString > ( any ) ; <nl> - if ( pStr ) <nl> - { <nl> - flowInputData . Set < string > ( pStr - > c_str ( ) ) ; <nl> - } <nl> - } <nl> - break ; <nl> - <nl> - case eFDT_Bool : <nl> - flowInputData . Set < bool > ( * DynamicCast < bool > ( any ) ) ; <nl> - break ; <nl> - } <nl> - } <nl> - <nl> - void AssignFlowGraphVariantToAny ( const TFlowInputDataVariant & variant , const CAnyPtr & any ) <nl> - { <nl> - int flowGraphTypeIndex = variant . index ( ) ; <nl> - <nl> - switch ( flowGraphTypeIndex ) <nl> - { <nl> - case eFDT_Any : <nl> - break ; <nl> - <nl> - case eFDT_Void : <nl> - break ; <nl> - <nl> - case eFDT_Int : <nl> - * DynamicCast < int > ( any ) = stl : : get < int > ( variant ) ; <nl> - break ; <nl> - <nl> - case eFDT_Float : <nl> - * DynamicCast < float > ( any ) = stl : : get < float > ( variant ) ; <nl> - break ; <nl> - <nl> - case eFDT_EntityId : <nl> - * DynamicCast < EntityId > ( any ) = stl : : get < EntityId > ( variant ) ; <nl> - break ; <nl> - <nl> - case eFDT_Vec3 : <nl> - * DynamicCast < Vec3 > ( any ) = stl : : get < Vec3 > ( variant ) ; <nl> - break ; <nl> - <nl> - case eFDT_String : <nl> - * DynamicCast < CSharedString > ( any ) = stl : : get < string > ( variant ) . c_str ( ) ; <nl> - break ; <nl> - <nl> - case eFDT_Bool : <nl> - * DynamicCast < bool > ( any ) = stl : : get < bool > ( variant ) ; <nl> - break ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - struct SEnvFlowGraphRuntimeData <nl> - { <nl> - static void ReflectType ( CTypeDesc < SEnvFlowGraphRuntimeData > & desc ) <nl> - { <nl> - desc . SetGUID ( " 89A6C32B - 5070 - 4155 - 9329 - 1B79C8022A31 " _cry_guid ) ; <nl> - } <nl> - <nl> - struct SFlowGraphNode <nl> - { <nl> - int inputCount = 0 ; <nl> - int outputCount = 0 ; <nl> - SFlowNodeConfig config ; <nl> - IFlowNodePtr pNode = nullptr ; <nl> - std : : vector < TFlowInputData > inputData ; <nl> - bool hasEntity = false ; <nl> - bool hasInputVoidPorts = false ; <nl> - <nl> - SFlowGraphNode ( TFlowNodeTypeId typeId ) <nl> - { <nl> - FlowGraph : : CFlowGraphDummyClass flowGraph ; <nl> - IFlowNode : : SActivationInfo actInfo ( & flowGraph ) ; <nl> - <nl> - pNode = gEnv - > pFlowSystem - > CreateNodeOfType ( & actInfo , typeId ) ; <nl> - <nl> - pNode - > GetConfiguration ( config ) ; <nl> - <nl> - hasEntity = 0 ! = ( config . nFlags & EFLN_TARGET_ENTITY ) ; <nl> - if ( ! config . pInputPorts ) <nl> - inputCount = 0 ; <nl> - else <nl> - for ( inputCount = 0 ; config . pInputPorts [ inputCount ] . name ; inputCount + + ) <nl> - ; <nl> - if ( 0 ! = ( config . nFlags & EFLN_DYNAMIC_OUTPUT ) ) <nl> - outputCount = 64 ; / / Allow for so many output ports to be made <nl> - else if ( ! config . pOutputPorts ) <nl> - outputCount = 0 ; <nl> - else <nl> - for ( outputCount = 0 ; config . pOutputPorts [ outputCount ] . name ; outputCount + + ) <nl> - ; <nl> - <nl> - inputData . resize ( inputCount ) ; <nl> - for ( int i = 0 ; i < inputCount ; i + + ) <nl> - { <nl> - inputData [ i ] = config . pInputPorts [ i ] . defaultData ; <nl> - if ( config . pInputPorts [ i ] . defaultData . GetType ( ) = = eFDT_Void ) <nl> - { <nl> - hasInputVoidPorts = true ; <nl> - } <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - <nl> - SEnvFlowGraphRuntimeData ( TFlowNodeTypeId typeId ) <nl> - { <nl> - m_pNode = std : : make_shared < SFlowGraphNode > ( typeId ) ; <nl> - } <nl> - <nl> - std : : shared_ptr < SFlowGraphNode > m_pNode ; <nl> - } ; <nl> - <nl> - struct STempFGNodeActivationUserData <nl> - { <nl> - CRuntimeGraphNodeInstance * pNode ; <nl> - SEnvFlowGraphRuntimeData : : SFlowGraphNode * pFlowGraphNode ; <nl> - int outputPort ; <nl> - } ; <nl> - <nl> - CScriptGraphFlowGraphNode : : CScriptGraphFlowGraphNode ( ) { } <nl> - <nl> - CScriptGraphFlowGraphNode : : CScriptGraphFlowGraphNode ( const string & flowNodeTypeName , const CryGUID & objectGUID ) <nl> - : m_objectGUID ( objectGUID ) <nl> - , m_flowNodeTypeName ( flowNodeTypeName ) <nl> - { <nl> - <nl> - if ( gEnv - > pFlowSystem & & m_flowNodeTypeName ) <nl> - { <nl> - m_flowNodeTypeId = gEnv - > pFlowSystem - > GetTypeId ( m_flowNodeTypeName ) ; <nl> - } <nl> - } <nl> - <nl> - CryGUID CScriptGraphFlowGraphNode : : GetTypeGUID ( ) const <nl> - { <nl> - return ms_typeGUID ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : CreateLayout ( CScriptGraphNodeLayout & layout ) <nl> - { <nl> - layout . SetStyleId ( " Node : : FlowGraph " ) ; <nl> - <nl> - stack_string subject ; <nl> - <nl> - CreateInputsAndOutputs ( layout ) ; <nl> - <nl> - subject = " FG : " ; <nl> - subject + = m_flowNodeTypeName . c_str ( ) ; <nl> - <nl> - layout . SetName ( nullptr , subject . c_str ( ) ) ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : Compile ( SCompilerContext & context , IGraphNodeCompiler & compiler ) const <nl> - { <nl> - compiler . BindCallback ( & ExecuteFlowGraphNode ) ; <nl> - compiler . BindData ( SEnvFlowGraphRuntimeData ( m_flowNodeTypeId ) ) ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : LoadDependencies ( Serialization : : IArchive & archive , const ISerializationContext & context ) <nl> - { <nl> - archive ( m_flowNodeTypeName , " flowNodeType " ) ; <nl> - archive ( m_objectGUID , " objectGUID " ) ; <nl> - <nl> - if ( archive . isInput ( ) ) <nl> - { <nl> - if ( gEnv - > pFlowSystem & & m_flowNodeTypeName ) <nl> - { <nl> - m_flowNodeTypeId = gEnv - > pFlowSystem - > GetTypeId ( m_flowNodeTypeName ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : Save ( Serialization : : IArchive & archive , const ISerializationContext & context ) <nl> - { <nl> - archive ( m_flowNodeTypeName , " flowNodeType " ) ; <nl> - archive ( m_objectGUID , " objectGUID " ) ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : Edit ( Serialization : : IArchive & archive , const ISerializationContext & context ) <nl> - { <nl> - Validate ( archive , context ) ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : Validate ( Serialization : : IArchive & archive , const ISerializationContext & context ) <nl> - { <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : RemapDependencies ( IGUIDRemapper & guidRemapper ) <nl> - { <nl> - m_objectGUID = guidRemapper . Remap ( m_objectGUID ) ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : Register ( CScriptGraphNodeFactory & factory ) <nl> - { <nl> - class CCreator : public IScriptGraphNodeCreator <nl> - { <nl> - private : <nl> - <nl> - class CCreationCommand : public IScriptGraphNodeCreationCommand <nl> - { <nl> - public : <nl> - <nl> - CCreationCommand ( const char * szSubject , const char * szDescription , const string & flowNodeTypeName , const CryGUID & objectGUID = CryGUID ( ) ) <nl> - : m_subject ( szSubject ) <nl> - , m_description ( szDescription ) <nl> - , m_flowNodeTypeName ( flowNodeTypeName ) <nl> - , m_objectGUID ( objectGUID ) <nl> - { } <nl> - <nl> - / / IScriptGraphNodeCreationCommand <nl> - <nl> - virtual const char * GetBehavior ( ) const override <nl> - { <nl> - return " Function " ; <nl> - } <nl> - <nl> - virtual const char * GetSubject ( ) const override <nl> - { <nl> - return m_subject . c_str ( ) ; <nl> - } <nl> - <nl> - virtual const char * GetDescription ( ) const override <nl> - { <nl> - return m_description . c_str ( ) ; <nl> - } <nl> - <nl> - virtual const char * GetStyleId ( ) const override <nl> - { <nl> - return " Node : : FlowGraph " ; <nl> - } <nl> - <nl> - virtual IScriptGraphNodePtr Execute ( const Vec2 & pos ) override <nl> - { <nl> - return std : : make_shared < CScriptGraphNode > ( gEnv - > pSchematyc - > CreateGUID ( ) , stl : : make_unique < CScriptGraphFlowGraphNode > ( m_flowNodeTypeName , m_objectGUID ) , pos ) ; <nl> - } <nl> - <nl> - / / ~ IScriptGraphNodeCreationCommand <nl> - <nl> - private : <nl> - <nl> - string m_subject ; <nl> - string m_description ; <nl> - string m_flowNodeTypeName ; <nl> - CryGUID m_objectGUID ; <nl> - } ; <nl> - <nl> - public : <nl> - <nl> - / / IScriptGraphNodeCreator <nl> - <nl> - virtual CryGUID GetTypeGUID ( ) const override <nl> - { <nl> - return CScriptGraphFlowGraphNode : : ms_typeGUID ; <nl> - } <nl> - <nl> - virtual IScriptGraphNodePtr CreateNode ( const CryGUID & guid ) override <nl> - { <nl> - return std : : make_shared < CScriptGraphNode > ( guid , stl : : make_unique < CScriptGraphFlowGraphNode > ( ) ) ; <nl> - } <nl> - <nl> - virtual void PopulateNodeCreationMenu ( IScriptGraphNodeCreationMenu & nodeCreationMenu , const IScriptView & scriptView , const IScriptGraph & graph ) override <nl> - { <nl> - if ( CVars : : sc_allowFlowGraphNodes ) <nl> - { <nl> - CryGUID objectGUID = CryGUID : : Null ( ) ; <nl> - <nl> - IFlowNodeTypeIteratorPtr pTypeIterator = gEnv - > pFlowSystem - > CreateNodeTypeIterator ( ) ; <nl> - IFlowNodeTypeIterator : : SNodeType nodeType ; <nl> - while ( pTypeIterator - > Next ( nodeType ) ) <nl> - { <nl> - string nodeTypeName = nodeType . typeName ; <nl> - stack_string nodeName = " FlowGraph : : " ; <nl> - nodeName + = nodeTypeName ; <nl> - SEnvFlowGraphRuntimeData data ( nodeType . typeId ) ; <nl> - <nl> - nodeCreationMenu . AddCommand ( std : : make_shared < CCreationCommand > ( nodeName . c_str ( ) , data . m_pNode - > config . sDescription , nodeTypeName , objectGUID ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - / / ~ IScriptGraphNodeCreator <nl> - } ; <nl> - <nl> - factory . RegisterCreator ( std : : make_shared < CCreator > ( ) ) ; <nl> - } <nl> - <nl> - void CScriptGraphFlowGraphNode : : CreateInputsAndOutputs ( CScriptGraphNodeLayout & layout ) <nl> - { <nl> - SEnvFlowGraphRuntimeData data ( m_flowNodeTypeId ) ; <nl> - if ( ! data . m_pNode ) <nl> - return ; <nl> - <nl> - int inputCount = data . m_pNode - > inputCount ; <nl> - int outputCount = data . m_pNode - > outputCount ; <nl> - <nl> - SEnvFlowGraphRuntimeData : : SFlowGraphNode * fgNode = data . m_pNode . get ( ) ; <nl> - <nl> - if ( ! fgNode - > hasInputVoidPorts ) <nl> - { <nl> - layout . AddInput ( " In " , CryGUID ( ) , { EScriptGraphPortFlags : : Flow , EScriptGraphPortFlags : : MultiLink } ) ; <nl> - } <nl> - layout . AddOutput ( " Out " , CryGUID ( ) , EScriptGraphPortFlags : : Flow ) ; <nl> - <nl> - for ( uint32 inputIdx = 0 ; inputIdx < inputCount ; + + inputIdx ) <nl> - { <nl> - const char * szPortName = data . m_pNode - > config . pInputPorts [ inputIdx ] . name ; <nl> - const char * szPortHumanName = data . m_pNode - > config . pInputPorts [ inputIdx ] . humanName ; <nl> - if ( ! szPortHumanName ) <nl> - szPortHumanName = szPortName ; <nl> - <nl> - CAnyValuePtr pData = FlowGraph : : FlowGraphVariantToAny ( fgNode - > inputData [ inputIdx ] . GetVariant ( ) ) ; <nl> - if ( pData ) <nl> - { <nl> - layout . AddInputWithData ( <nl> - CUniqueId : : FromString ( szPortName ) , <nl> - szPortHumanName , <nl> - pData - > GetTypeDesc ( ) . GetGUID ( ) , <nl> - { EScriptGraphPortFlags : : Data , EScriptGraphPortFlags : : Persistent , EScriptGraphPortFlags : : Editable } , <nl> - * pData <nl> - ) ; <nl> - } <nl> - else <nl> - { <nl> - layout . AddInput ( <nl> - CUniqueId : : FromString ( szPortName ) , <nl> - szPortHumanName , <nl> - CryGUID ( ) , <nl> - { EScriptGraphPortFlags : : Flow , EScriptGraphPortFlags : : MultiLink } ) ; <nl> - } <nl> - } <nl> - <nl> - for ( uint32 outputIdx = 0 ; outputIdx < outputCount ; + + outputIdx ) <nl> - { <nl> - const char * szPortName = data . m_pNode - > config . pOutputPorts [ outputIdx ] . name ; <nl> - const char * szPortHumanName = data . m_pNode - > config . pOutputPorts [ outputIdx ] . humanName ; <nl> - if ( ! szPortHumanName ) <nl> - szPortHumanName = szPortName ; <nl> - <nl> - CAnyValuePtr pData = FlowGraph : : FlowGraphTypeToAny ( data . m_pNode - > config . pOutputPorts [ outputIdx ] . type ) ; <nl> - if ( pData ) <nl> - { <nl> - layout . AddOutputWithData ( <nl> - CUniqueId : : FromString ( szPortName ) , <nl> - szPortHumanName , <nl> - pData - > GetTypeDesc ( ) . GetGUID ( ) , <nl> - { EScriptGraphPortFlags : : Data , EScriptGraphPortFlags : : MultiLink } , <nl> - * pData <nl> - ) ; <nl> - } <nl> - else <nl> - { <nl> - layout . AddOutput ( <nl> - CUniqueId : : FromString ( szPortName ) , <nl> - szPortHumanName , <nl> - CryGUID ( ) , <nl> - { EScriptGraphPortFlags : : Flow } <nl> - ) ; <nl> - <nl> - } <nl> - } <nl> - } <nl> - <nl> - SRuntimeResult CScriptGraphFlowGraphNode : : ExecuteFlowGraphNode ( SRuntimeContext & context , const SRuntimeActivationParams & activationParams ) <nl> - { <nl> - SEnvFlowGraphRuntimeData & data = DynamicCast < SEnvFlowGraphRuntimeData > ( * context . node . GetData ( ) ) ; <nl> - <nl> - CObject * pObject = static_cast < CObject * > ( context . pObject ) ; <nl> - <nl> - SEnvFlowGraphRuntimeData : : SFlowGraphNode * pFGNode = data . m_pNode . get ( ) ; <nl> - <nl> - int inputPortOffset = context . node . GetInputCount ( ) - pFGNode - > inputCount ; <nl> - <nl> - / / Copy Schematyc node input data to the FlowGraph node input data <nl> - for ( int inputIdx = 0 , count = context . node . GetInputCount ( ) ; inputIdx < count ; inputIdx + + ) <nl> - { <nl> - int flowNodeInputIdx = inputIdx - inputPortOffset ; / / 0 input port id can be Flow " In " <nl> - assert ( flowNodeInputIdx < pFGNode - > inputCount ) ; <nl> - <nl> - if ( flowNodeInputIdx > = 0 ) <nl> - { <nl> - if ( context . node . IsDataInput ( inputIdx ) ) <nl> - { <nl> - CAnyConstPtr pAny = context . node . GetInputData ( inputIdx ) ; <nl> - FlowGraph : : AssignAnyToFlowGraphInputData ( pAny , pFGNode - > inputData [ flowNodeInputIdx ] ) ; <nl> - } <nl> - / / Activate all linked inputs <nl> - if ( inputIdx = = activationParams . portIdx | | context . node . IsInputLinked ( inputIdx ) ) <nl> - { <nl> - / / Mark this flow graph node input as Active <nl> - pFGNode - > inputData [ flowNodeInputIdx ] . SetUserFlag ( true ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - auto activateOutputLambda = [ ] ( IFlowNode : : SActivationInfo * pActInfo , int nOutputPort , const TFlowInputData & value ) <nl> - { <nl> - / / Copy FlowGraph node output data to Schematyc node output data <nl> - STempFGNodeActivationUserData * pUserData = static_cast < STempFGNodeActivationUserData * > ( pActInfo - > m_pUserData ) ; <nl> - <nl> - if ( pUserData - > pFlowGraphNode - > config . pOutputPorts [ nOutputPort ] . type ! = eFDT_Void ) <nl> - { <nl> - CAnyPtr pAnyOutput = pUserData - > pNode - > GetOutputData ( nOutputPort + 1 ) ; <nl> - FlowGraph : : AssignFlowGraphVariantToAny ( value . GetVariant ( ) , pAnyOutput ) ; <nl> - } <nl> - else <nl> - { <nl> - / / Only void outputs can be out flow triggers <nl> - pUserData - > outputPort = nOutputPort ; <nl> - } <nl> - } ; <nl> - <nl> - RuntimeGraphPortIdx outputPortId = EOutputIdx : : Out ; <nl> - <nl> - if ( ! pFGNode - > inputData . empty ( ) ) <nl> - { <nl> - IEntity * pNodeEntity = nullptr ; <nl> - FlowGraph : : CFlowGraphDummyClass flowGraph ; <nl> - if ( pObject & & pObject - > GetEntity ( ) ) <nl> - { <nl> - pNodeEntity = pObject - > GetEntity ( ) ; <nl> - flowGraph . m_entityId = pNodeEntity - > GetId ( ) ; <nl> - } <nl> - TFlowInputData * pInputData = & pFGNode - > inputData [ 0 ] ; <nl> - <nl> - STempFGNodeActivationUserData tempUserData ; <nl> - tempUserData . pNode = & context . node ; <nl> - tempUserData . outputPort = - 1 ; <nl> - tempUserData . pFlowGraphNode = pFGNode ; <nl> - <nl> - IFlowNode : : SActivationInfo fgActivateInfo ( nullptr , 0 , & tempUserData , pInputData ) ; <nl> - fgActivateInfo . pEntity = pNodeEntity ; <nl> - fgActivateInfo . activateOutputCallback = activateOutputLambda ; <nl> - pFGNode - > pNode - > ProcessEvent ( IFlowNode : : EFlowEvent : : eFE_Activate , & fgActivateInfo ) ; <nl> - <nl> - if ( tempUserData . outputPort ! = - 1 ) <nl> - { <nl> - / / Adjust output portid from this node . <nl> - outputPortId = EOutputIdx : : Out + 1 + tempUserData . outputPort ; <nl> - } <nl> - } <nl> - <nl> - return SRuntimeResult ( ERuntimeStatus : : Continue , outputPortId ) ; <nl> - } <nl> - <nl> - } / / Schematyc <nl> - <nl> - SCHEMATYC_REGISTER_SCRIPT_GRAPH_NODE ( Schematyc : : CScriptGraphFlowGraphNode : : Register ) <nl> deleted file mode 100644 <nl> index 44e4ad08a0 . . 0000000000 <nl> mmm a / Code / CryEngine / CrySchematyc / Core / Impl / Script / Graph / Nodes / ScriptGraphFlowGraphNode . h <nl> ppp / dev / null <nl> <nl> - / / Copyright 2001 - 2016 Crytek GmbH / Crytek Group . All rights reserved . <nl> - <nl> - # pragma once <nl> - <nl> - # include < CrySchematyc / FundamentalTypes . h > <nl> - # include < CrySchematyc / Reflection / TypeDesc . h > <nl> - # include < CrySchematyc / Runtime / RuntimeGraph . h > <nl> - # include < CrySchematyc / Utils / GUID . h > <nl> - <nl> - # include " Script / Graph / ScriptGraphNodeModel . h " <nl> - <nl> - # include < CryFlowGraph / IFlowSystem . h > <nl> - <nl> - namespace Schematyc <nl> - { <nl> - <nl> - / / Forward declare interfaces . <nl> - struct IEnvFunction ; <nl> - struct IScriptFunction ; <nl> - <nl> - class CScriptGraphFlowGraphNode : public CScriptGraphNodeModel <nl> - { <nl> - public : <nl> - <nl> - struct EInputIdx <nl> - { <nl> - enum : uint32 <nl> - { <nl> - In = 0 , <nl> - FirstParam <nl> - } ; <nl> - } ; <nl> - <nl> - struct EOutputIdx <nl> - { <nl> - enum : uint32 <nl> - { <nl> - Out = 0 , <nl> - FirstParam <nl> - } ; <nl> - } ; <nl> - <nl> - public : <nl> - <nl> - CScriptGraphFlowGraphNode ( ) ; <nl> - CScriptGraphFlowGraphNode ( const string & flowNodeTypeName , const CryGUID & objectGUID ) ; <nl> - <nl> - / / CScriptGraphNodeModel <nl> - virtual CryGUID GetTypeGUID ( ) const override ; <nl> - virtual void CreateLayout ( CScriptGraphNodeLayout & layout ) override ; <nl> - virtual void Compile ( SCompilerContext & context , IGraphNodeCompiler & compiler ) const override ; <nl> - virtual void LoadDependencies ( Serialization : : IArchive & archive , const ISerializationContext & context ) override ; <nl> - virtual void Save ( Serialization : : IArchive & archive , const ISerializationContext & context ) override ; <nl> - virtual void Edit ( Serialization : : IArchive & archive , const ISerializationContext & context ) override ; <nl> - virtual void Validate ( Serialization : : IArchive & archive , const ISerializationContext & context ) override ; <nl> - virtual void RemapDependencies ( IGUIDRemapper & guidRemapper ) override ; <nl> - / / ~ CScriptGraphNodeModel <nl> - <nl> - static void Register ( CScriptGraphNodeFactory & factory ) ; <nl> - <nl> - private : <nl> - <nl> - void CreateInputsAndOutputs ( CScriptGraphNodeLayout & layout ) ; <nl> - <nl> - static SRuntimeResult ExecuteFlowGraphNode ( SRuntimeContext & context , const SRuntimeActivationParams & activationParams ) ; <nl> - <nl> - public : <nl> - <nl> - static const CryGUID ms_typeGUID ; <nl> - <nl> - private : <nl> - <nl> - / / SElementId m_functionId ; <nl> - CryGUID m_objectGUID ; <nl> - <nl> - TFlowNodeTypeId m_flowNodeTypeId = 0 ; <nl> - string m_flowNodeTypeName ; <nl> - } ; <nl> - <nl> - } / / Schematyc <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Button . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Button . cs <nl> public ImageSource Image <nl> } <nl> } <nl> <nl> - <nl> + / / / < summary > <nl> + / / / The path to the image that will be used for the background of this button . <nl> + / / / < / summary > <nl> + / / / < value > The background image URL . < / value > <nl> public string BackgroundImageUrl <nl> { <nl> get { return _backgroundImageUrl ; } <nl> public string BackgroundImageUrl <nl> } <nl> } <nl> <nl> - <nl> + / / / < summary > <nl> + / / / The path to the image that will be used for the background of this button if it is pressed . <nl> + / / / < / summary > <nl> + / / / < value > The background image inverted URL . < / value > <nl> public string BackgroundImageInvertedUrl <nl> { <nl> get { return _backgroundImageInvertedUrl ; } <nl> public string BackgroundImageInvertedUrl <nl> } <nl> } <nl> <nl> - <nl> + / / / < summary > <nl> + / / / The path to the image that will be shown when the button is pressed . <nl> + / / / < / summary > <nl> + / / / < value > The frame image URL . < / value > <nl> public string FrameImageUrl <nl> { <nl> get { return _frameImageUrl ; } <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / ButtonCtrl . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / ButtonCtrl . cs <nl> namespace CryEngine . UI . Components <nl> / / / < / summary > <nl> public class ButtonCtrl : UIComponent <nl> { <nl> + / / / < summary > <nl> + / / / Occurs then the mouse enters this ButtonCtrl . <nl> + / / / < / summary > <nl> public event Action OnEnterMouse ; <nl> + <nl> + / / / < summary > <nl> + / / / Occurs when the mouse leaves this ButtonCtrl . <nl> + / / / < / summary > <nl> public event Action OnLeaveMouse ; <nl> <nl> / / / < summary > <nl> public class ButtonCtrl : UIComponent <nl> / / / < / summary > <nl> public override void OnAwake ( ) <nl> { <nl> - Text = ( Owner as Button ) . AddComponent < Text > ( ) ; <nl> - Text . Alignment = Alignment . Center ; <nl> + var button = Owner as Button ; <nl> + if ( button ! = null ) <nl> + { <nl> + Text = button . AddComponent < Text > ( ) ; <nl> + Text . Alignment = Alignment . Center ; <nl> + } <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Called when the mouse enters the rectangle of this button . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public override void OnMouseEnter ( int x , int y ) <nl> { <nl> if ( OnEnterMouse ! = null ) <nl> OnEnterMouse ( ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Called when the mouse leaves the rectangle of this button . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public override void OnMouseLeave ( int x , int y ) <nl> { <nl> if ( OnLeaveMouse ! = null ) <nl> public override void OnLeaveFocus ( ) <nl> / / / < / summary > <nl> public override bool HitTest ( int x , int y ) <nl> { <nl> - return ( Owner as UIElement ) . RectTransform . ClampRect . Contains ( x , y ) ; <nl> + var rect = Owner . GetComponent < RectTransform > ( ) ; <nl> + return rect ! = null & & rect . ClampRect . Contains ( x , y ) ; <nl> } <nl> <nl> / / / < summary > <nl> public override bool HitTest ( int x , int y ) <nl> / / / < / summary > <nl> public override void OnLeftMouseDown ( int x , int y ) <nl> { <nl> - ( Owner as Button ) . SetDown ( ) ; <nl> + ( Owner as Button ) ? . SetDown ( ) ; <nl> } <nl> <nl> / / / < summary > <nl> public override void OnLeftMouseDown ( int x , int y ) <nl> / / / < / summary > <nl> public override void OnLeftMouseUp ( int x , int y , bool inside ) <nl> { <nl> - ( Owner as Button ) . SetUp ( ) ; <nl> + ( Owner as Button ) ? . SetUp ( ) ; <nl> if ( inside & & OnPressed ! = null ) <nl> + { <nl> OnPressed ( ) ; <nl> + } <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Called when a key is pressed while this button is focused . If the button is the Space , Enter or XInput - A key , <nl> + / / / it will call OnPressed ( ) . <nl> + / / / < / summary > <nl> + / / / < param name = " e " > E . < / param > <nl> public override void OnKey ( InputEvent e ) <nl> { <nl> if ( e . KeyPressed ( KeyId . Space ) | | e . KeyPressed ( KeyId . Enter ) | | e . KeyPressed ( KeyId . XI_A ) ) <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / ComboBoxCtrl . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / ComboBoxCtrl . cs <nl> public object SelectedItem <nl> / / / < / summary > <nl> public override void OnAwake ( ) <nl> { <nl> - _text = ( Owner as ComboBox ) . BgPanel . AddComponent < Text > ( ) ; <nl> - _text . Offset = new Point ( 5 , 1 ) ; <nl> + var comboBox = Owner as ComboBox ; <nl> + if ( comboBox ! = null ) <nl> + { <nl> + _text = comboBox . BgPanel . AddComponent < Text > ( ) ; <nl> + _text . Offset = new Point ( 5 , 1 ) ; <nl> + } <nl> <nl> _choiceFrame = SceneObject . Instantiate < UIElement > ( Owner ) ; <nl> _choiceFrame . RectTransform . Alignment = Alignment . TopHStretch ; <nl> <nl> _choiceRoot = SceneObject . Instantiate ( null , " ChoiceRoot " ) ; <nl> var canvas = SceneObject . Instantiate < Canvas > ( _choiceRoot ) ; <nl> - var pc = ( Owner as UIElement ) . FindParentCanvas ( ) ; <nl> - canvas . SetupTargetEntity ( pc . TargetEntity , pc . TargetTexture ) ; <nl> + <nl> + var pc = Owner . GetParentWithType < Canvas > ( ) ; <nl> + if ( pc ! = null ) <nl> + { <nl> + canvas . SetupTargetEntity ( pc . TargetEntity , pc . TargetTexture ) ; <nl> + } <nl> + <nl> _choice = SceneObject . Instantiate < Panel > ( canvas ) ; <nl> _choice . RectTransform . Alignment = Alignment . TopLeft ; <nl> _choice . Background . Source = ResourceManager . ImageFromFile ( Path . Combine ( UIElement . DataDirectory , " button . png " ) ) ; <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / Image . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / Image . cs <nl> public override void OnAwake ( ) <nl> / / / < / summary > <nl> public override void OnUpdate ( ) <nl> { <nl> - if ( _source ! = null ) <nl> + if ( _source ! = null & & _texture ! = null ) <nl> { <nl> if ( ! IgnoreClamping ) <nl> - _texture . ClampRect = ( ( Owner as UIElement ) . RectTransform ) . ClampRect ; <nl> + { <nl> + var rect = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rect ! = null ) <nl> + { <nl> + _texture . ClampRect = rect . ClampRect ; <nl> + } <nl> + } <nl> else <nl> - _texture . ClampRect = null ; <nl> + { <nl> + _texture . ClampRect = new Rect ( ) ; <nl> + } <nl> if ( Color . A > 0 . 05f ) <nl> { <nl> - _texture . Angle = ( Owner as UIElement ) . RectTransform . Angle ; <nl> + var rect = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rect ! = null ) <nl> + { <nl> + _texture . Angle = rect . Angle ; <nl> + } <nl> + <nl> _texture . Color = _color ; <nl> _texture . TargetCanvas = ParentCanvas ; <nl> <nl> public override Rect GetAlignedRect ( ) <nl> if ( _source = = null ) <nl> return new Rect ( ) ; <nl> <nl> - var rt = ( Owner as UIElement ) . RectTransform ; <nl> + var rt = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rt = = null ) <nl> + { <nl> + return new Rect ( ) ; <nl> + } <nl> var tl = rt . TopLeft ; <nl> if ( KeepRatio ) <nl> { <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / RectTransform . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / RectTransform . cs <nl> public class RectTransform : UIComponent <nl> / / / Defines how clamping should be computed in ClampRect . <nl> / / / < / summary > <nl> / / / < value > The clamp mode . < / value > <nl> - public ClampMode ClampMode { set { _clampMode = value ; _clampRect = null ; } get { return _clampMode ; } } <nl> + public ClampMode ClampMode <nl> + { <nl> + set <nl> + { <nl> + _clampMode = value ; <nl> + } <nl> + get <nl> + { <nl> + return _clampMode ; <nl> + } <nl> + } <nl> <nl> / / / < summary > <nl> / / / Defines the center of the element , relative to its size . <nl> public class RectTransform : UIComponent <nl> / / / Returns Parent elements RectTransform , if available . <nl> / / / < / summary > <nl> / / / < value > The prt . < / value > <nl> - public RectTransform PRT { get { return ( Transform . Parent ! = null & & Transform . Parent . Owner is UIElement ) ? ( Transform . Parent . Owner as UIElement ) . RectTransform : null ; } } <nl> + public RectTransform PRT <nl> + { <nl> + get <nl> + { <nl> + var parent = Transform . Parent ; <nl> + return parent = = null ? null : parent . Owner . GetComponent < RectTransform > ( ) ; <nl> + } <nl> + } <nl> <nl> / / / < summary > <nl> / / / Returns center of owning element . <nl> void ComputeClampRect ( ) <nl> switch ( ClampMode ) <nl> { <nl> case ClampMode . Full : <nl> - _clampRect = Spacing ! = null ? _bounds . Pad ( Spacing ) : _bounds ; <nl> + _clampRect = _bounds . Pad ( Spacing ) ; <nl> <nl> / / Use intersection if parent and own rect exist <nl> - if ( prt ! = null & & prt . ClampRect ! = null & & _clampRect ! = null ) <nl> + if ( prt ! = null & & prt . ClampRect . Size > 0 & & _clampRect . Size > 0 ) <nl> + { <nl> _clampRect = prt . _clampRect & _clampRect ; <nl> + } <nl> <nl> / / Take over Parent CR if self null <nl> - if ( _clampRect = = null & & prt ! = null ) <nl> + if ( MathHelpers . Approximately ( _clampRect . Size , 0 ) & & prt ! = null ) <nl> + { <nl> _clampRect = prt . _clampRect ; <nl> + } <nl> break ; <nl> <nl> case ClampMode . Self : <nl> - _clampRect = Spacing ! = null ? Bounds . Pad ( Spacing ) : _bounds ; <nl> + _clampRect = Bounds . Pad ( Spacing ) ; <nl> break ; <nl> <nl> case ClampMode . Parent : <nl> - _clampRect = prt ! = null ? prt . _clampRect : null ; <nl> + _clampRect = prt ! = null ? prt . _clampRect : new Rect ( ) ; <nl> break ; <nl> <nl> case ClampMode . None : <nl> - _clampRect = null ; <nl> + _clampRect = new Rect ( ) ; <nl> break ; <nl> } <nl> } <nl> <nl> RectTransform GetOutdatedAncestor ( ) <nl> { <nl> - if ( Transform . Parent = = null | | ! ( Transform . Parent . Owner is UIElement ) ) <nl> + var parent = Transform . Parent ; <nl> + if ( parent = = null ) <nl> + { <nl> return null ; <nl> - var prt = ( Transform . Parent . Owner as UIElement ) . RectTransform ; <nl> + } <nl> + <nl> + var prt = parent . Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( prt = = null ) <nl> + { <nl> + return null ; <nl> + } <nl> + <nl> if ( prt . NeedsRefresh ) <nl> + { <nl> return prt ; <nl> + } <nl> + <nl> return prt . GetOutdatedAncestor ( ) ; <nl> } <nl> <nl> public void PerformLayout ( bool forceRefresh = false ) <nl> ComputeClampRect ( ) ; <nl> <nl> if ( DeltaChanged ) <nl> + { <nl> Transform . Children . Where ( t = > t . Owner is UIElement ) . ToList ( ) . ForEach ( t = > ( t . Owner as UIElement ) . RectTransform . PerformLayout ( true ) ) ; <nl> + } <nl> <nl> if ( LayoutChanged ! = null ) <nl> LayoutChanged ( ) ; <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / SplitBarCtrl . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / SplitBarCtrl . cs <nl> public override void OnLeftMouseUp ( int x , int y , bool inside ) <nl> / / / < / summary > <nl> public override bool HitTest ( int x , int y ) <nl> { <nl> - var ort = ( Owner as UIElement ) . RectTransform ; <nl> - return ort . Bounds . Contains ( x , y ) ; <nl> + var ort = Owner . GetComponent < RectTransform > ( ) ; <nl> + return ort ! = null & & ort . Bounds . Contains ( x , y ) ; <nl> } <nl> } <nl> } <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / Text . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / Text . cs <nl> public bool DropsShadow <nl> / / / < / summary > <nl> public override void OnAwake ( ) <nl> { <nl> - var rt = ( Owner as UIElement ) . RectTransform ; <nl> + var rt = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rt = = null ) <nl> + { <nl> + rt = Owner . AddComponent < RectTransform > ( ) ; <nl> + } <nl> rt . LayoutChanged + = UpdateLayout ; <nl> } <nl> <nl> public override void OnAwake ( ) <nl> public override void OnUpdate ( ) <nl> { <nl> UpdateContent ( ) ; <nl> - var rt = ( Owner as UIElement ) . RectTransform ; <nl> - if ( _texture ! = null ) <nl> + var rt = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rt ! = null & & _texture ! = null ) <nl> { <nl> - _texture . ClampRect = ( ( Owner as UIElement ) . RectTransform ) . ClampRect ; <nl> + _texture . ClampRect = rt . ClampRect ; <nl> _texture . Color = Color ; <nl> _texture . TargetCanvas = ParentCanvas ; <nl> _texture . Draw ( _alignedOffset . x , _alignedOffset . y , _alignedSize . x , _alignedSize . y ) ; <nl> public void UpdateLayout ( ) <nl> var font = new Font ( FontName , GetSizeForHeight ( Height ) , _fontStyle ) ; <nl> var nullSize = g . MeasureString ( " XX " , font ) ; <nl> var fillSize = g . MeasureString ( " X " + _content + " X " , font ) ; <nl> - var rt = ( Owner as UIElement ) . RectTransform ; <nl> + var rt = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rt = = null ) <nl> + { <nl> + rt = Owner . AddComponent < RectTransform > ( ) ; <nl> + } <nl> + <nl> var tl = rt . TopLeft ; <nl> var width = fillSize . Width - nullSize . Width * 0 . 8f ; <nl> var height = fillSize . Height ; <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / TextCtrl . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / TextCtrl . cs <nl> void PositionContent ( bool updateTextLayout = false ) <nl> if ( updateTextLayout ) <nl> _text . UpdateLayout ( ) ; <nl> var cursorOffset = _text . GetOffsetAt ( _cursorIndex ) ; <nl> - var fieldWidth = ( Owner as UIElement ) . RectTransform . Bounds . w - 2 ; <nl> + var rect = Owner . GetComponent < RectTransform > ( ) ; <nl> + var fieldWidth = rect = = null ? 0 : rect . Bounds . w - 2 ; <nl> if ( cursorOffset < fieldWidth ) <nl> { <nl> _cursor . RectTransform . Padding = new Padding ( cursorOffset , 0 ) ; <nl> void PositionContent ( bool updateTextLayout = false ) <nl> / / / < / summary > <nl> public override bool HitTest ( int x , int y ) <nl> { <nl> - return ( Owner as UIElement ) . RectTransform . ClampRect . Contains ( x , y ) ; <nl> + var rect = Owner . GetComponent < RectTransform > ( ) ; <nl> + return rect ! = null & & rect . ClampRect . Contains ( x , y ) ; <nl> } <nl> } <nl> } <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Components / UIComponent . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Components / UIComponent . cs <nl> <nl> / / Copyright 2001 - 2016 Crytek GmbH / Crytek Group . All rights reserved . <nl> <nl> using System ; <nl> + using CryEngine . UI . Components ; <nl> <nl> namespace CryEngine . UI <nl> { <nl> namespace CryEngine . UI <nl> / / / < / summary > <nl> public class UIComponent : IUpdateReceiver <nl> { <nl> - protected bool _isActive = false ; <nl> - protected bool _isActiveByHierarchy = true ; <nl> + private bool _isActive = false ; <nl> + private bool _isActiveByHierarchy = true ; <nl> <nl> + / / / < summary > <nl> + / / / Called when the component is added to a SceneObject . <nl> + / / / < / summary > <nl> public virtual void OnAwake ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called every frame . <nl> + / / / < / summary > <nl> public virtual void OnUpdate ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when this component is destroyed . <nl> + / / / < / summary > <nl> public virtual void OnDestroy ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the UIElement of this component gets focused . <nl> + / / / < / summary > <nl> public virtual void OnEnterFocus ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the UIElement of this component loses focus . <nl> + / / / < / summary > <nl> public virtual void OnLeaveFocus ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the mouse is pressed down on the UIElement of this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public virtual void OnLeftMouseDown ( int x , int y ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the mouse button is let go on the UIElement of this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> + / / / < param name = " inside " > If set to < c > true < / c > inside . < / param > <nl> public virtual void OnLeftMouseUp ( int x , int y , bool inside ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the mouse enters the rectangle of the UIElement of this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public virtual void OnMouseEnter ( int x , int y ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the mouse leaves the rectangle of the UIElement of this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public virtual void OnMouseLeave ( int x , int y ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when the mouse hovers over the UIElement of this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public virtual void OnMouseMove ( int x , int y ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when a key is pressed while this UIElement is focused . <nl> + / / / < / summary > <nl> + / / / < param name = " e " > The input event . < / param > <nl> public virtual void OnKey ( InputEvent e ) { } <nl> <nl> / / / < summary > <nl> public class UIComponent : IUpdateReceiver <nl> / / / Determines whether object is individually focusable . <nl> / / / < / summary > <nl> / / / < value > < c > true < / c > if enabled ; otherwise , < c > false < / c > . < / value > <nl> - public bool Enabled { get ; set ; } <nl> + public bool Enabled { get ; set ; } = true ; <nl> <nl> / / / < summary > <nl> / / / Owning SceneObject . <nl> public bool ActiveByHierarchy <nl> } <nl> } <nl> <nl> - protected UIComponent ( ) <nl> - { <nl> - Enabled = true ; <nl> - } <nl> - <nl> / / / < summary > <nl> / / / Creates an instance of T and wires it into the scene hierarchy . <nl> / / / < / summary > <nl> public string ToJSON ( ) <nl> return Tools . ToJSON ( this ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnEnterFocus message on this component . <nl> + / / / < / summary > <nl> public void InvokeOnEnterFocus ( ) <nl> { <nl> HasFocus = true ; <nl> OnEnterFocus ( ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnLeaveFocus message on this component . <nl> + / / / < / summary > <nl> public void InvokeOnLeaveFocus ( ) <nl> { <nl> HasFocus = false ; <nl> OnLeaveFocus ( ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnLeftMouseDown message on this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public void InvokeOnLeftMouseDown ( int x , int y ) <nl> { <nl> OnLeftMouseDown ( x , y ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnLeftMouseUp message on this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> + / / / < param name = " wasInside " > If set to < c > true < / c > was inside . < / param > <nl> public void InvokeOnLeftMouseUp ( int x , int y , bool wasInside ) <nl> { <nl> OnLeftMouseUp ( x , y , wasInside ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnMouseEnter message on this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public void InvokeOnMouseEnter ( int x , int y ) <nl> { <nl> OnMouseEnter ( x , y ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnMouseLeave message on this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public void InvokeOnMouseLeave ( int x , int y ) <nl> { <nl> OnMouseLeave ( x , y ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnMouseMove message on this component . <nl> + / / / < / summary > <nl> + / / / < param name = " x " > The x coordinate . < / param > <nl> + / / / < param name = " y " > The y coordinate . < / param > <nl> public void InvokeOnMouseMove ( int x , int y ) <nl> { <nl> OnMouseMove ( x , y ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Invokes the OnKey message on this component . <nl> + / / / < / summary > <nl> + / / / < param name = " e " > E . < / param > <nl> public void InvokeOnKey ( InputEvent e ) <nl> { <nl> OnKey ( e ) ; <nl> public Canvas ParentCanvas <nl> { <nl> if ( _parentCanvas = = null ) <nl> { <nl> - _parentCanvas = ( Owner as UIElement ) . FindParentCanvas ( ) ; <nl> + _parentCanvas = Owner . GetParentWithType < Canvas > ( ) ; <nl> } <nl> return _parentCanvas ; <nl> } <nl> public Canvas ParentCanvas <nl> / / / < / summary > <nl> public virtual Rect GetAlignedRect ( ) <nl> { <nl> - return ( Owner as UIElement ) . RectTransform . Bounds ; <nl> + var rect = Owner . GetComponent < RectTransform > ( ) ; <nl> + if ( rect ! = null ) <nl> + { <nl> + return rect . Bounds ; <nl> + } <nl> + <nl> + return new Rect ( ) ; <nl> } <nl> <nl> / / / < summary > <nl> public virtual Rect GetAlignedRect ( ) <nl> / / / < param name = " y " > The y coordinate . < / param > <nl> public virtual bool HitTest ( int x , int y ) <nl> { <nl> - var prt = ( Owner as UIElement ) . RectTransform ; <nl> - return prt . ClampRect = = null ? prt . Bounds . Contains ( x , y ) : prt . ClampRect . Contains ( x , y ) ; <nl> + var prt = Owner . GetComponent < RectTransform > ( ) ; <nl> + <nl> + if ( prt = = null ) <nl> + { <nl> + return false ; <nl> + } <nl> + <nl> + return prt . ClampRect . Size > 0 ? prt . Bounds . Contains ( x , y ) : prt . ClampRect . Contains ( x , y ) ; <nl> } <nl> } <nl> } <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Resources / Graphic . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Resources / Graphic . cs <nl> namespace CryEngine . UI <nl> / / / < / summary > <nl> public class Graphic : Texture <nl> { <nl> + / / / < summary > <nl> + / / / The Canvas that this Graphic is targeting . <nl> + / / / < / summary > <nl> + / / / < value > The target canvas . < / value > <nl> public Canvas TargetCanvas { get ; set ; } <nl> <nl> / / / < summary > <nl> public Graphic ( int width , int height , byte [ ] data , bool isFiltered = true , bool <nl> private void DrawSection ( float x , float y , float w , float h , float u0 , float v0 , float u1 , float v1 ) <nl> { <nl> float crx = x , cry = y , crw = w , crh = h ; <nl> - if ( ClampRect ! = null ) <nl> + if ( ClampRect . Size > 0 ) <nl> { <nl> crx = ClampRect . x ; <nl> cry = ClampRect . y ; <nl> private void DrawSection ( float x , float y , float w , float h , float u0 , float v0 , <nl> crh = ClampRect . h ; <nl> } <nl> <nl> - if ( ClampRect = = null | | ( crx < = x & & cry < = y & & crx + crw > = x + w & & cry + crh > = y + h ) ) <nl> + if ( MathHelpers . Approximately ( ClampRect . Size , 0 ) | | ( crx < = x & & cry < = y & & crx + crw > = x + w & & cry + crh > = y + h ) ) <nl> { <nl> - if ( RoundLocation ) { x = ( int ) x ; y = ( int ) y ; } <nl> + if ( RoundLocation ) <nl> + { <nl> + x = ( int ) x ; <nl> + y = ( int ) y ; <nl> + } <nl> + <nl> TargetCanvas . PushTexturePart ( x , y , w , h , ID , u0 , v0 , u1 , v1 , Angle , Color ) ; <nl> } <nl> else <nl> mmm a / Code / CryManaged / CESharp / Core . UI / SceneObject . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / SceneObject . cs <nl> namespace CryEngine . UI <nl> [ DebuggerDisplay ( " SceneObject ( { Name } ) " ) ] <nl> public class SceneObject : IUpdateReceiver <nl> { <nl> + private static int _updateOrder ; <nl> + <nl> + private List < UIComponent > _components = new List < UIComponent > ( ) ; <nl> + private bool _isActive = true ; <nl> + private bool _isActiveByHierarchy = true ; <nl> + <nl> / / / < summary > <nl> / / / Called if Active property was changed <nl> / / / < / summary > <nl> public class SceneObject : IUpdateReceiver <nl> / / / < value > < c > true < / c > if is updateable ; otherwise , < c > false < / c > . < / value > <nl> public bool IsUpdateable { get ; private set ; } = false ; <nl> <nl> - static int _updateOrder ; <nl> - List < UIComponent > _components = new List < UIComponent > ( ) ; <nl> - protected bool _isActive = true ; <nl> - protected bool _isActiveByHierarchy = true ; <nl> - <nl> + / / / < summary > <nl> + / / / Called when this SceneObject is instantiated . <nl> + / / / < / summary > <nl> public virtual void OnAwake ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called once every frame . <nl> + / / / < / summary > <nl> public virtual void OnUpdate ( ) { } <nl> + <nl> + / / / < summary > <nl> + / / / Called when this SceneObject is destroyed . <nl> + / / / < / summary > <nl> public virtual void OnDestroy ( ) { } <nl> <nl> / / / < summary > <nl> public void RefreshUpdateOrder ( ) <nl> return Components . FirstOrDefault ( x = > x is T ) as T ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Get the first parent SceneObject of type T . Returns null of no parent is of type T . <nl> + / / / < / summary > <nl> + / / / < returns > The parent with type T , or null of none is found . < / returns > <nl> + / / / < param name = " includeSelf " > If set to < c > true < / c > includes itself while searching for the type . < / param > <nl> + / / / < typeparam name = " T " > The type of the parent . < / typeparam > <nl> + public T GetParentWithType < T > ( bool includeSelf = true ) where T : SceneObject <nl> + { <nl> + SceneObject element = includeSelf ? this : Parent ; <nl> + while ( element ! = null ) <nl> + { <nl> + var foundType = element as T ; <nl> + if ( foundType ! = null ) <nl> + { <nl> + return foundType ; <nl> + } <nl> + element = element . Parent ; <nl> + } <nl> + return null ; <nl> + } <nl> + <nl> / / / < summary > <nl> / / / Adds a component of type T . <nl> / / / < / summary > <nl> public bool ForEachComponentReverse ( Func < UIComponent , bool > fkt , bool testForAct <nl> return false ; <nl> } <nl> <nl> - public void ForEach < T > ( Action < T > a ) where T : SceneObject <nl> + / / / < summary > <nl> + / / / Execute an action on all child objects of Type < typeparamref name = " T " / > on this SceneObject . <nl> + / / / < / summary > <nl> + / / / < param name = " action " > The action that will that will be run . < / param > <nl> + / / / < typeparam name = " T " > The type of SceneObjects it will run on . < / typeparam > <nl> + public void ForEach < T > ( Action < T > action ) where T : SceneObject <nl> { <nl> foreach ( var t in Transform . Children ) <nl> { <nl> if ( t . Owner is T ) <nl> { <nl> - a ( t . Owner as T ) ; <nl> + action ( t . Owner as T ) ; <nl> } <nl> - t . Owner . ForEach ( a ) ; <nl> + t . Owner . ForEach ( action ) ; <nl> } <nl> } <nl> <nl> - public void ForEachComponent < C > ( Action < C > a ) where C : UIComponent <nl> + / / / < summary > <nl> + / / / Execute an action on all components of Type < typeparamref name = " C " / > on this SceneObject , and run it also on all child SceneObjects . <nl> + / / / < / summary > <nl> + / / / < param name = " action " > The action that will be run . < / param > <nl> + / / / < typeparam name = " C " > The type of components it will be run on . < / typeparam > <nl> + public void ForEachComponent < C > ( Action < C > action ) where C : UIComponent <nl> { <nl> - foreach ( var c in Components ) <nl> + foreach ( var component in Components ) <nl> { <nl> - if ( c is C ) <nl> + C castComponent = component as C ; <nl> + if ( castComponent ! = null ) <nl> { <nl> - a ( c as C ) ; <nl> + action ( castComponent ) ; <nl> } <nl> } <nl> <nl> foreach ( var t in Transform . Children ) <nl> { <nl> - t . Owner . ForEachComponent ( a ) ; <nl> + t . Owner . ForEachComponent ( action ) ; <nl> } <nl> } <nl> <nl> mmm a / Code / CryManaged / CESharp / Core . UI / UIElement . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / UIElement . cs <nl> public override string ToString ( ) <nl> / / / < / summary > <nl> public enum Alignment <nl> { <nl> + / / / < summary > <nl> + / / / Center the element . <nl> + / / / < / summary > <nl> Center , <nl> + / / / < summary > <nl> + / / / Align the element to the top . <nl> + / / / < / summary > <nl> Top , <nl> + / / / < summary > <nl> + / / / Align the element to the top - right . <nl> + / / / < / summary > <nl> TopRight , <nl> + / / / < summary > <nl> + / / / Align the element to the right . <nl> + / / / < / summary > <nl> Right , <nl> + / / / < summary > <nl> + / / / Align the element to the bottom - right . <nl> + / / / < / summary > <nl> BottomRight , <nl> + / / / < summary > <nl> + / / / Align the element to the bottom . <nl> + / / / < / summary > <nl> Bottom , <nl> + / / / < summary > <nl> + / / / Align the element to the bottom - left . <nl> + / / / < / summary > <nl> BottomLeft , <nl> + / / / < summary > <nl> + / / / Align the element to the left . <nl> + / / / < / summary > <nl> Left , <nl> + / / / < summary > <nl> + / / / Align the element to the top - left . <nl> + / / / < / summary > <nl> TopLeft , <nl> + / / / < summary > <nl> + / / / Align the element to the top and stretch it horizontally . <nl> + / / / < / summary > <nl> TopHStretch , <nl> / / CenterHStretch , <nl> + / / / < summary > <nl> + / / / Align the element to the bottom and stretch it horizontally . <nl> + / / / < / summary > <nl> BottomHStretch , <nl> / / LeftVStretch , <nl> / / CenterVStretch , <nl> + / / / < summary > <nl> + / / / Align the element to the right , and stretch it vertically . <nl> + / / / < / summary > <nl> RightVStretch , <nl> + / / / < summary > <nl> + / / / Stretch the element in all directions . <nl> + / / / < / summary > <nl> Stretch <nl> } <nl> <nl> public UIElement ( ) <nl> { <nl> RectTransform = AddComponent < RectTransform > ( ) ; <nl> } <nl> - <nl> - / / / < summary > <nl> - / / / Returns ths hierarchically predecessing Canvas object for this element . <nl> - / / / < / summary > <nl> - / / / < returns > The parent Canvas . < / returns > <nl> - public Canvas FindParentCanvas ( ) <nl> - { <nl> - var canvas = this as Canvas ; <nl> - if ( canvas ! = null ) <nl> - { <nl> - return canvas ; <nl> - } <nl> - <nl> - return ( Parent as UIElement ) ? . FindParentCanvas ( ) ; <nl> - } <nl> } <nl> } <nl> mmm a / Code / CryManaged / CESharp / Core . UI / Window . cs <nl> ppp b / Code / CryManaged / CESharp / Core . UI / Window . cs <nl> public class Window : Panel <nl> / / / < value > The caption . < / value > <nl> public string Caption { set { _caption . Content = value ; } } <nl> <nl> + / / / < summary > <nl> + / / / The height of the caption of this window . <nl> + / / / < / summary > <nl> + / / / < value > The height of the caption . < / value > <nl> public byte CaptionHeight { set { _caption . Height = value ; } } <nl> <nl> / / / < summary > <nl> mmm a / Code / CryManaged / CESharp / Core / Engine . cs <nl> ppp b / Code / CryManaged / CESharp / Core / Engine . cs <nl> public static class Engine <nl> / / / < value > The engine root directory . < / value > <nl> public static string EngineRootDirectory = > Global . GetEnginePath ( ) . c_str ( ) ; <nl> <nl> - / / / < summary > <nl> - / / / Path where application data should be stored . <nl> - / / / < / summary > <nl> - / / / < value > The data directory . < / value > <nl> - public static string DataDirectory = > Global . GetGameFolder ( ) . c_str ( ) + " / " ; <nl> + / / / < summary > <nl> + / / / Path where application data should be stored . <nl> + / / / < / summary > <nl> + / / / < value > The data directory . < / value > <nl> + public static string DataDirectory = > Global . GetGameFolder ( ) . c_str ( ) + " / " ; <nl> <nl> - internal static string MonoDirectory = > Path . Combine ( EngineRootDirectory , " bin " , " common " , " Mono " ) ; <nl> + internal static string MonoDirectory = > Path . Combine ( EngineRootDirectory , " bin " , " common " , " Mono " ) ; <nl> <nl> - internal static string GlobalAssemblyCacheDirectory = > Path . Combine ( MonoDirectory , " lib " , " mono " , " gac " ) ; <nl> + internal static string GlobalAssemblyCacheDirectory = > Path . Combine ( MonoDirectory , " lib " , " mono " , " gac " ) ; <nl> <nl> - internal static event Action StartReload ; <nl> + internal static event Action StartReload ; <nl> internal static event Action EndReload ; <nl> <nl> / / / < summary > <nl> internal static void OnEngineStart ( ) <nl> <nl> if ( ! IsDedicatedServer ) <nl> { <nl> - Input . Initialize ( ) ; <nl> - Renderer . Instance = new Renderer ( ) ; <nl> - Mouse . Instance = new Mouse ( ) ; <nl> + Input . Initialize ( ) ; <nl> + Renderer . Instance = new Renderer ( ) ; <nl> + Mouse . Instance = new Mouse ( ) ; <nl> } <nl> <nl> CryEngine . GameFramework . Instance = new GameFramework ( ) ; <nl> internal static void ScanAssembly ( Assembly assembly ) <nl> var registeredTypes = new List < Type > ( ) ; <nl> foreach ( Type t in assembly . GetTypes ( ) ) <nl> { <nl> - if ( typeof ( EntityComponent ) . IsAssignableFrom ( t ) & & t ! = typeof ( object ) ) <nl> + if ( typeof ( EntityComponent ) . IsAssignableFrom ( t ) & & t ! = typeof ( object ) ) <nl> { <nl> if ( registeredTypes . Contains ( t ) ) <nl> { <nl> public static void Shutdown ( ) <nl> } <nl> } <nl> <nl> - internal static string TypeToHash ( Type type ) <nl> - { <nl> - string result = string . Empty ; <nl> - string input = type . FullName ; <nl> - using ( SHA384 hashGenerator = SHA384 . Create ( ) ) <nl> - { <nl> - var hash = hashGenerator . ComputeHash ( Encoding . Default . GetBytes ( input ) ) ; <nl> - var shortHash = new byte [ 16 ] ; <nl> - for ( int i = 0 , j = 0 ; i < hash . Length ; + + i , + + j ) <nl> - { <nl> - if ( j > = shortHash . Length ) <nl> - { <nl> - j = 0 ; <nl> - } <nl> - unchecked <nl> - { <nl> - shortHash [ j ] + = hash [ i ] ; <nl> - } <nl> - } <nl> - result = BitConverter . ToString ( shortHash ) ; <nl> - result = result . Replace ( " - " , string . Empty ) ; <nl> - } <nl> - return result ; <nl> - } <nl> - <nl> - internal static string GetPluginGuid ( Type type ) <nl> - { <nl> - var guidAttribute = ( GuidAttribute ) type . GetCustomAttributes ( typeof ( GuidAttribute ) , false ) . FirstOrDefault ( ) ; <nl> - if ( guidAttribute ! = null ) <nl> - { <nl> - return guidAttribute . Value ; <nl> - } <nl> - <nl> - / / Fall back to generating GUID based on type <nl> - return ( new Guid ( TypeToHash ( type ) ) ) . ToString ( ) ; <nl> - } <nl> + internal static string TypeToHash ( Type type ) <nl> + { <nl> + string result = string . Empty ; <nl> + string input = type . FullName ; <nl> + using ( SHA384 hashGenerator = SHA384 . Create ( ) ) <nl> + { <nl> + var hash = hashGenerator . ComputeHash ( Encoding . Default . GetBytes ( input ) ) ; <nl> + var shortHash = new byte [ 16 ] ; <nl> + for ( int i = 0 , j = 0 ; i < hash . Length ; + + i , + + j ) <nl> + { <nl> + if ( j > = shortHash . Length ) <nl> + { <nl> + j = 0 ; <nl> + } <nl> + unchecked <nl> + { <nl> + shortHash [ j ] + = hash [ i ] ; <nl> + } <nl> + } <nl> + result = BitConverter . ToString ( shortHash ) ; <nl> + result = result . Replace ( " - " , string . Empty ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + internal static string GetPluginGuid ( Type type ) <nl> + { <nl> + var guidAttribute = ( GuidAttribute ) type . GetCustomAttributes ( typeof ( GuidAttribute ) , false ) . FirstOrDefault ( ) ; <nl> + if ( guidAttribute ! = null ) <nl> + { <nl> + return guidAttribute . Value ; <nl> + } <nl> + <nl> + / / Fall back to generating GUID based on type <nl> + return ( new Guid ( TypeToHash ( type ) ) ) . ToString ( ) ; <nl> + } <nl> } <nl> } <nl> mmm a / Code / CryManaged / CryMonoBridge / ManagedPlugin . cpp <nl> ppp b / Code / CryManaged / CryMonoBridge / ManagedPlugin . cpp <nl> CManagedPlugin : : ~ CManagedPlugin ( ) <nl> <nl> if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - m_pMonoObject - > GetClass ( ) - > FindMethod ( " Shutdown " ) - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> + { <nl> + if ( std : : shared_ptr < CMonoMethod > pShutdownMethod = pClass - > FindMethod ( " Shutdown " ) ) <nl> + { <nl> + pShutdownMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + } <nl> + } <nl> } <nl> } <nl> <nl> void CManagedPlugin : : OnSystemEvent ( ESystemEvent event , UINT_PTR wparam , UINT_PTR <nl> { <nl> if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - if ( std : : shared_ptr < CMonoMethod > pMethod = m_pMonoObject - > GetClass ( ) - > FindMethod ( " OnLevelLoaded " ) ) <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> { <nl> - pMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + if ( std : : shared_ptr < CMonoMethod > pMethod = pClass - > FindMethod ( " OnLevelLoaded " ) ) <nl> + { <nl> + pMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> void CManagedPlugin : : OnSystemEvent ( ESystemEvent event , UINT_PTR wparam , UINT_PTR <nl> { <nl> if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - if ( std : : shared_ptr < CMonoMethod > pMethod = m_pMonoObject - > GetClass ( ) - > FindMethod ( " OnGameStart " ) ) <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> { <nl> - pMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + if ( std : : shared_ptr < CMonoMethod > pMethod = pClass - > FindMethod ( " OnGameStart " ) ) <nl> + { <nl> + pMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> else if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - if ( std : : shared_ptr < CMonoMethod > pMethod = m_pMonoObject - > GetClass ( ) - > FindMethod ( " OnGameStop " ) ) <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> { <nl> - pMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + if ( std : : shared_ptr < CMonoMethod > pMethod = pClass - > FindMethod ( " OnGameStop " ) ) <nl> + { <nl> + pMethod - > Invoke ( m_pMonoObject . get ( ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> bool CManagedPlugin : : OnClientConnectionReceived ( int channelId , bool bIsReset ) <nl> { <nl> if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - if ( std : : shared_ptr < CMonoMethod > pMethod = m_pMonoObject - > GetClass ( ) - > FindMethod ( " OnClientConnectionReceived " ) ) <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> { <nl> - void * pParameters [ 1 ] ; <nl> - pParameters [ 0 ] = & channelId ; <nl> + if ( std : : shared_ptr < CMonoMethod > pMethod = pClass - > FindMethod ( " OnClientConnectionReceived " ) ) <nl> + { <nl> + void * pParameters [ 1 ] ; <nl> + pParameters [ 0 ] = & channelId ; <nl> <nl> - pMethod - > Invoke ( m_pMonoObject . get ( ) , pParameters ) ; <nl> + pMethod - > Invoke ( m_pMonoObject . get ( ) , pParameters ) ; <nl> + } <nl> } <nl> + <nl> } <nl> <nl> return true ; <nl> bool CManagedPlugin : : OnClientReadyForGameplay ( int channelId , bool bIsReset ) <nl> { <nl> if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - if ( std : : shared_ptr < CMonoMethod > pMethod = m_pMonoObject - > GetClass ( ) - > FindMethod ( " OnClientReadyForGameplay " ) ) <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> { <nl> - void * pParameters [ 1 ] ; <nl> - pParameters [ 0 ] = & channelId ; <nl> + if ( std : : shared_ptr < CMonoMethod > pMethod = pClass - > FindMethod ( " OnClientReadyForGameplay " ) ) <nl> + { <nl> + void * pParameters [ 1 ] ; <nl> + pParameters [ 0 ] = & channelId ; <nl> <nl> - pMethod - > Invoke ( m_pMonoObject . get ( ) , pParameters ) ; <nl> + pMethod - > Invoke ( m_pMonoObject . get ( ) , pParameters ) ; <nl> + } <nl> } <nl> } <nl> <nl> void CManagedPlugin : : OnClientDisconnected ( int channelId , EDisconnectionCause cau <nl> { <nl> if ( m_pMonoObject ! = nullptr ) <nl> { <nl> - if ( std : : shared_ptr < CMonoMethod > pMethod = m_pMonoObject - > GetClass ( ) - > FindMethod ( " OnClientDisconnected " ) ) <nl> + if ( CMonoClass * pClass = m_pMonoObject - > GetClass ( ) ) <nl> { <nl> - void * pParameters [ 1 ] ; <nl> - pParameters [ 0 ] = & channelId ; <nl> - pMethod - > Invoke ( m_pMonoObject . get ( ) , pParameters ) ; <nl> + if ( std : : shared_ptr < CMonoMethod > pMethod = pClass - > FindMethod ( " OnClientDisconnected " ) ) <nl> + { <nl> + void * pParameters [ 1 ] ; <nl> + pParameters [ 0 ] = & channelId ; <nl> + pMethod - > Invoke ( m_pMonoObject . get ( ) , pParameters ) ; <nl> + } <nl> } <nl> } <nl> } <nl> mmm a / Code / CryPlugins / CryDefaultEntities / Module / DefaultComponents / Input / InputComponent . cpp <nl> ppp b / Code / CryPlugins / CryDefaultEntities / Module / DefaultComponents / Input / InputComponent . cpp <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_XI_ThumbRY , " xi_thumbry " , " Pad_RightThumb_Y - Axis " <nl> / / SERIALIZATION_ENUM ( EKeyId : : eKI_XI_ThumbRDown , " Pad_ThumbRDown " , " Pad_ThumbRDown " ) <nl> / / SERIALIZATION_ENUM ( EKeyId : : eKI_XI_ThumbRLeft , " Pad_ThumbRLeft " , " Pad_ThumbRLeft " ) <nl> / / SERIALIZATION_ENUM ( EKeyId : : eKI_XI_ThumbRRight , " Pad_ThumbRRight " , " Pad_ThumbRRight " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_XI_TriggerLBtn , " xi_triggerl_btn " , " Pad_LeftTriggerBtn " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_XI_TriggerRBtn , " xi_triggerr_btn " , " Pad_RightTriggerBtn " ) <nl> <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Options , " pad_start " , " Orbis_Options " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_L3 , " pad_l3 " , " Orbis_L3 " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Up , " pad_up " , " Orbis_Up " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Right , " pad_right " , " Orbis_Right " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Down , " pad_down " , " Orbis_Down " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Left , " pad_left " , " Orbis_Left " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_L2 , " pad_l2 " , " Orbis_L2 " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_R2 , " pad_r2 " , " Orbis_R2 " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_L1 , " pad_l1 " , " Orbis_L1 " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_R1 , " pad_r1 " , " Orbis_R1 " ) <nl> + SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_LeftTrigger , " pad_ltrigger " , " Orbis_LeftTrigger " ) <nl> + SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_RightTrigger , " pad_rtrigger " , " Orbis_RightTrigger " ) <nl> + SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_L1 , " pad_l1 " , " Orbis_LeftShoulderButton " ) <nl> + SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_R1 , " pad_r1 " , " Orbis_RightShoulderButton " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Triangle , " pad_triangle " , " Orbis_Triangle " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Circle , " pad_circle " , " Orbis_Circle " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_Cross , " pad_cross " , " Orbis_Cross " ) <nl> SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_StickRY , " pad_stickry " , " Orbis_StickRY " ) <nl> / / SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_RotX_KeyR , " Orbis_RotX_KeyR " , " Orbis_RotX_KeyR " ) <nl> / / SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_RotZ_KeyD , " Orbis_RotZ_KeyD " , " Orbis_RotZ_KeyD " ) <nl> / / SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_RotZ_KeyU , " Orbis_RotZ_KeyU " , " Orbis_RotZ_KeyU " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_LeftTrigger , " pad_ltrigger " , " Orbis_LeftTrigger " ) <nl> - SERIALIZATION_ENUM ( EKeyId : : eKI_Orbis_RightTrigger , " pad_rtrigger " , " Orbis_RightTrigger " ) <nl> SERIALIZATION_ENUM_END ( ) <nl> <nl> namespace Cry <nl> void ReflectType ( Schematyc : : CTypeDesc < CInputComponent : : EXboxInputId > & desc ) <nl> desc . SetDescription ( " Input Key Identifier " ) ; <nl> desc . SetFlags ( Schematyc : : ETypeFlags : : Switchable ) ; <nl> desc . SetDefaultValue ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_X ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadUp , " DUp " , " D - Pad Up " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadDown , " DDown " , " D - Pad Down " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadLeft , " DLeft " , " D - Pad Left " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadRight , " DRight " , " D - Pad Right " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_Start , " Start " , " Start " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_Back , " Back " , " Back " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbL , " LeftThumbPress " , " Left Thumb Press " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbR , " RightThumbPress " , " Right Thumb Press " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ShoulderL , " LeftShoulder " , " Left Shoulder " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ShoulderR , " RightShoulder " , " Right Shoulder " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_A , " Button_A " , " A " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_B , " Button_B " , " B " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_X , " Button_X " , " X " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_Y , " Button_Y " , " Y " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_TriggerL , " LeftTrigger " , " Left Trigger " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_TriggerR , " RightTrigger " , " Right Trigger " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadUp , " xi_dpad_up " , " D - Pad Up " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadDown , " xi_dpad_down " , " D - Pad Down " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadLeft , " xi_dpad_left " , " D - Pad Left " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_DPadRight , " xi_dpad_right " , " D - Pad Right " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_Start , " xi_start " , " Start " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_Back , " xi_back " , " Back " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbL , " xi_thumbl " , " Left Thumb Press " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbR , " xi_thumbr " , " Right Thumb Press " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ShoulderL , " xi_shoulderl " , " Left Shoulder " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ShoulderR , " xi_shoulderr " , " Right Shoulder " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_A , " xi_a " , " A " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_B , " xi_b " , " B " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_X , " xi_x " , " X " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_Y , " xi_y " , " Y " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_TriggerL , " xi_triggerl " , " Left Trigger " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_TriggerR , " xi_triggerr " , " Right Trigger " ) ; <nl> desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbLX , " xi_thumblx " , " Left Thumb X Axis " ) ; <nl> desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbLY , " xi_thumbly " , " Left Thumb Y Axis " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbLUp , " ThumbLUp " , " ThumbLUp " ) ; <nl> void ReflectType ( Schematyc : : CTypeDesc < CInputComponent : : EXboxInputId > & desc ) <nl> / / desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbRDown , " ThumbRDown " , " ThumbRDown " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbRLeft , " ThumbRLeft " , " ThumbRLeft " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_ThumbRRight , " ThumbRRight " , " ThumbRRight " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_TriggerLBtn , " LeftTriggerBtn " , " Left Trigger Button " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EXboxInputId ) EKeyId : : eKI_XI_TriggerRBtn , " RightTriggerBtn " , " Right Trigger Button " ) ; <nl> } <nl> <nl> void ReflectType ( Schematyc : : CTypeDesc < CInputComponent : : EPS4InputId > & desc ) <nl> void ReflectType ( Schematyc : : CTypeDesc < CInputComponent : : EPS4InputId > & desc ) <nl> desc . SetDescription ( " Input Key Identifier " ) ; <nl> desc . SetFlags ( Schematyc : : ETypeFlags : : Switchable ) ; <nl> desc . SetDefaultValue ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_XI_X ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Options , " Options " , " Options " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_L3 , " L3 " , " L3 " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_R3 , " R3 " , " R3 " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Up , " Up " , " D - Pad Up " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Right , " Right " , " D - Pad Right " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Down , " Down " , " D - Pad Down " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Left , " Left " , " D - Pad Left " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_L2 , " L2 " , " L2 " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_R2 , " R2 " , " R2 " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_L1 , " L1 " , " L1 " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_R1 , " R1 " , " R1 " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Triangle , " Triangle " , " Triangle " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Circle , " Circle " , " Circle " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Cross , " Cross " , " Cross " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Square , " Square " , " Square " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickLX , " StickLX " , " Left Stick X " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickLY , " StickLY " , " Left Stick Y " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickRX , " StickRX " , " Right Stick X " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickRY , " StickRY " , " Right Stick Y " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Options , " pad_start " , " Options " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_L3 , " pad_l3 " , " Left Thumb Press " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_R3 , " pad_r3 " , " Right Thumb Press " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Up , " pad_up " , " D - Pad Up " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Right , " pad_right " , " D - Pad Right " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Down , " pad_down " , " D - Pad Down " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Left , " pad_left " , " D - Pad Left " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_LeftTrigger , " pad_ltrigger " , " Left Trigger " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RightTrigger , " pad_rtrigger " , " Right Trigger " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_L1 , " pad_l1 " , " Left Shoulder " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_R1 , " pad_r1 " , " Right Shoulder " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Triangle , " pad_triangle " , " Triangle " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Circle , " pad_circle " , " Circle " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Cross , " pad_cross " , " Cross " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_Square , " pad_square " , " Square " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickLX , " pad_sticklx " , " Left Stick X " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickLY , " pad_stickly " , " Left Stick Y " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickRX , " pad_stickrx " , " Right Stick X " ) ; <nl> + desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_StickRY , " pad_stickry " , " Right Stick Y " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RotX , " RotX " , " RotX " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RotY , " RotY " , " RotY " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RotZ , " RotZ " , " RotZ " ) ; <nl> void ReflectType ( Schematyc : : CTypeDesc < CInputComponent : : EPS4InputId > & desc ) <nl> / / desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RotX_KeyR , " RotX_KeyR " , " RotX_KeyR " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RotZ_KeyD , " RotZ_KeyD " , " RotZ_KeyD " ) ; <nl> / / desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RotZ_KeyU , " RotZ_KeyU " , " RotZ_KeyU " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_LeftTrigger , " LeftTrigger " , " Left Trigger " ) ; <nl> - desc . AddConstant ( ( CInputComponent : : EPS4InputId ) EKeyId : : eKI_Orbis_RightTrigger , " RightTrigger " , " Right Trigger " ) ; <nl> } <nl> <nl> struct SActionPressedSignal <nl> static void ReflectType ( Schematyc : : CTypeDesc < SActionChangedSignal > & desc ) <nl> void CInputComponent : : Initialize ( ) <nl> { <nl> IActionMapManager * pActionMapManager = gEnv - > pGameFramework - > GetIActionMapManager ( ) ; <nl> - <nl> + <nl> pActionMapManager - > ClearInputDevicesMappings ( ) ; <nl> <nl> pActionMapManager - > AddInputDeviceMapping ( eAID_KeyboardMouse , " keyboard " ) ; <nl> void InternalBindAction ( EntityId id , Schematyc : : CSharedString groupName , Schemat <nl> pActionMap - > AddAndBindActionInput ( ActionId ( name . c_str ( ) ) , input ) ; <nl> <nl> pActionMapManager - > EnableActionMap ( groupName . c_str ( ) , true ) ; <nl> - <nl> - if ( IActionMap * pActionMap = pActionMapManager - > GetActionMap ( groupName . c_str ( ) ) ) <nl> - { <nl> - pActionMap - > SetActionListener ( id ) ; <nl> - } <nl> } <nl> <nl> void CInputComponent : : BindKeyboardAction ( Schematyc : : CSharedString groupName , Schematyc : : CSharedString name , EKeyboardInputId keyId , bool bOnPress , bool bOnRelease , bool bOnHold ) <nl> mmm a / Code / CryPlugins / CryDefaultEntities / Module / DefaultComponents / Input / InputComponent . h <nl> ppp b / Code / CryPlugins / CryDefaultEntities / Module / DefaultComponents / Input / InputComponent . h <nl> namespace Cry <nl> { <nl> if ( ! strcmp ( group . m_szName , szGroupName ) ) <nl> { <nl> - group . m_actions . emplace_back ( SGroup : : SAction { szName , callback } ) ; <nl> + group . AddAction ( szName , callback ) ; <nl> } <nl> } <nl> } <nl> namespace Cry <nl> } ; <nl> <nl> SGroup ( const char * szName ) <nl> - : m_szName ( szName ) <nl> + : m_szName ( szName ) { } <nl> + <nl> + SGroup ( const SGroup & other ) <nl> + : m_szName ( other . m_szName ) <nl> + , m_actions ( other . m_actions ) <nl> { <nl> - gEnv - > pGameFramework - > GetIActionMapManager ( ) - > AddExtraActionListener ( this ) ; <nl> + if ( m_actions . size ( ) > 0 ) <nl> + { <nl> + gEnv - > pGameFramework - > GetIActionMapManager ( ) - > AddExtraActionListener ( this ) ; <nl> + } <nl> } <nl> <nl> virtual ~ SGroup ( ) <nl> { <nl> - gEnv - > pGameFramework - > GetIActionMapManager ( ) - > RemoveExtraActionListener ( this ) ; <nl> + if ( m_actions . size ( ) > 0 ) <nl> + { <nl> + gEnv - > pGameFramework - > GetIActionMapManager ( ) - > RemoveExtraActionListener ( this ) ; <nl> + } <nl> + } <nl> + <nl> + void AddAction ( const char * szName , TActionCallback callback ) <nl> + { <nl> + / / Delete already existing entries <nl> + for ( auto it = m_actions . begin ( ) ; it ! = m_actions . end ( ) ; it + + ) <nl> + { <nl> + if ( strcmp ( it - > szName , szName ) = = 0 ) <nl> + { <nl> + m_actions . erase ( it ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( m_actions . size ( ) = = 0 ) <nl> + { <nl> + gEnv - > pGameFramework - > GetIActionMapManager ( ) - > AddExtraActionListener ( this ) ; <nl> + } <nl> + <nl> + m_actions . emplace_back ( SGroup : : SAction { szName , callback } ) ; <nl> } <nl> <nl> / / IActionListener <nl> namespace Cry <nl> / / ~ IActionListener <nl> <nl> const char * m_szName ; <nl> + <nl> + protected : <nl> DynArray < SAction > m_actions ; <nl> } ; <nl> <nl> mmm a / Code / CryPlugins / CryUQS / Core / core / QueryHistory . cpp <nl> ppp b / Code / CryPlugins / CryUQS / Core / core / QueryHistory . cpp <nl> namespace UQS <nl> m_queryBlueprintName . c_str ( ) , <nl> numGeneratedItems , <nl> numItemsInFinalResultSet , <nl> - elapsedTime ) ; <nl> + elapsedTime , <nl> + m_queryCreatedTimestamp , <nl> + m_queryDestroyedTimestamp ) ; <nl> consumer . AddOrUpdateHistoricQuery ( overview ) ; <nl> } <nl> <nl> namespace UQS <nl> } <nl> } <nl> <nl> - / / elapsed frames and time <nl> + / / elapsed frames and time , and timestamps of creation + destruction of the query <nl> { <nl> / / elapsed frames <nl> consumer . AddTextLineToCurrentHistoricQuery ( color , " elapsed frames until result : % i " , ( int ) m_finalStatistics . totalElapsedFrames ) ; <nl> namespace UQS <nl> <nl> / / consumed time ( this is the accumulation of the granted and consumed amounts of time per update call while the query was running ) <nl> consumer . AddTextLineToCurrentHistoricQuery ( color , " consumed seconds : % f ( % . 2f milliseconds ) " , m_finalStatistics . totalConsumedTime . GetSeconds ( ) , m_finalStatistics . totalConsumedTime . GetMilliSeconds ( ) ) ; <nl> + <nl> + / / timestamps of when the query was created and destroyed ( notice : if the query was canceled prematurely it will miss the timestamp of query destruction ) <nl> + / / - > " h : mm : ss : mmm " <nl> + <nl> + int hours , minutes , seconds , milliseconds ; <nl> + <nl> + UQS : : Shared : : CTimeValueUtil : : Split ( m_queryCreatedTimestamp , & hours , & minutes , & seconds , & milliseconds ) ; <nl> + consumer . AddTextLineToCurrentHistoricQuery ( color , " timestamp query created : % i : % 02i : % 02i : % 03i " , hours , minutes , seconds , milliseconds ) ; <nl> + <nl> + UQS : : Shared : : CTimeValueUtil : : Split ( m_queryDestroyedTimestamp , & hours , & minutes , & seconds , & milliseconds ) ; <nl> + consumer . AddTextLineToCurrentHistoricQuery ( color , " timestamp query destroyed : % i : % 02i : % 02i : % 03i " , hours , minutes , seconds , milliseconds ) ; <nl> } <nl> <nl> / / canceled : yes / no <nl> mmm a / Code / CryPlugins / CryUQS / EditorPlugin_HistoryInspector / Editor / MainEditorWindow . cpp <nl> ppp b / Code / CryPlugins / CryUQS / EditorPlugin_HistoryInspector / Editor / MainEditorWindow . cpp <nl> struct SQuery <nl> / / TODO : column for displaying the itemType of the items in the result set <nl> Column_ItemCounts , / / number of resulting items vs . generated items <nl> Column_ElapsedTime , <nl> + Column_TimestampQueryCreated , <nl> + Column_TimestampQueryDestroyed , <nl> <nl> ColumnCount <nl> } ; <nl> struct SQuery <nl> stack_string queryIdAndQuerierName ; <nl> stack_string itemCountsAsString ; <nl> stack_string elapsedTimeAsString ; <nl> + stack_string timestampQueryCreatedAsString ; <nl> + stack_string timestampQueryDestroyedAsString ; <nl> + <nl> + int hours , minutes , seconds , milliseconds ; <nl> <nl> overview . queryID . ToString ( queryIdAsString ) ; <nl> queryIdAndQuerierName . Format ( " # % s : % s " , queryIdAsString . c_str ( ) , overview . szQuerierName ) ; <nl> itemCountsAsString . Format ( " % i / % i " , ( int ) overview . numResultingItems , ( int ) overview . numGeneratedItems ) ; <nl> elapsedTimeAsString . Format ( " % . 2f ms " , overview . timeElapsedUntilResult . GetMilliSeconds ( ) ) ; <nl> + UQS : : Shared : : CTimeValueUtil : : Split ( overview . timestampQueryCreated , & hours , & minutes , & seconds , & milliseconds ) ; <nl> + timestampQueryCreatedAsString . Format ( " % i : % 02i : % 02i : % 03i " , hours , minutes , seconds , milliseconds ) ; <nl> + UQS : : Shared : : CTimeValueUtil : : Split ( overview . timestampQueryDestroyed , & hours , & minutes , & seconds , & milliseconds ) ; <nl> + timestampQueryDestroyedAsString . Format ( " % i : % 02i : % 02i : % 03i " , hours , minutes , seconds , milliseconds ) ; <nl> <nl> this - > dataPerColumn [ Column_QueryIdAndQuerierName ] = QtUtil : : ToQString ( queryIdAndQuerierName . c_str ( ) ) ; <nl> this - > dataPerColumn [ Column_QueryBlueprintName ] = QtUtil : : ToQString ( overview . szQueryBlueprintName ) ; <nl> this - > dataPerColumn [ Column_ItemCounts ] = QtUtil : : ToQString ( itemCountsAsString . c_str ( ) ) ; <nl> this - > dataPerColumn [ Column_ElapsedTime ] = QtUtil : : ToQString ( elapsedTimeAsString . c_str ( ) ) ; <nl> + this - > dataPerColumn [ Column_TimestampQueryCreated ] = QtUtil : : ToQString ( timestampQueryCreatedAsString . c_str ( ) ) ; <nl> + this - > dataPerColumn [ Column_TimestampQueryDestroyed ] = QtUtil : : ToQString ( timestampQueryDestroyedAsString . c_str ( ) ) ; <nl> } <nl> <nl> static void HelpSerializeEvaluatorsBitfield ( Serialization : : IArchive & ar , UQS : : Core : : evaluatorsBitfield_t & bitfieldToSerialize , const std : : vector < string > & evaluatorNames , const std : : vector < string > & evaluatorLabelsForUI ) <nl> const char * SQuery : : headers [ SQuery : : ColumnCount ] = <nl> " Query ID + querier " , / / Column_QueryIdAndQuerierName <nl> " Query Blueprint " , / / Column_QueryBlueprintName <nl> " Items ( accepted / generated ) " , / / Column_ItemCounts <nl> - " Elapsed time " / / Column_ElapsedTime , <nl> + " Elapsed time " , / / Column_ElapsedTime <nl> + " Timestamp query created " , / / Column_TimestampQueryCreated <nl> + " Timestamp query destroyed " , / / Column_TimestampQueryDestroyed <nl> } ; <nl> <nl> const char * SQuery : : toolTips [ SQuery : : ColumnCount ] = <nl> const char * SQuery : : toolTips [ SQuery : : ColumnCount ] = <nl> " Unique id of the query instance and the name of who started that query " , / / Column_QueryIdAndQuerierName <nl> " Name of the blueprint from which the live query was instantiated " , / / Column_QueryBlueprintName <nl> " Number of items that were generated and ended up in the final result set . Notice : a hierarchical query may not necessarily generate items , yet grab the resulting items from one of its children " , / / Column_ItemCounts <nl> - " Elapsed time from start to finish of the query . Notice : don ' t confuse with * consumed * time . " / / Column_ElapsedTime , <nl> + " Elapsed time from start to finish of the query . Notice : don ' t confuse with * consumed * time . " , / / Column_ElapsedTime <nl> + " Timestamp of when the query was created in h : mm : ss : mmm " , / / Column_TimestampQueryCreated <nl> + " Timestamp of when the query was destroyed in h : mm : ss : mmm . Notice : might show some weird value if the query was canceled prematurely . " , / / Column_TimestampQueryDestroyed <nl> } ; <nl> <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> mmm a / Code / CryPlugins / CryUQS / Interfaces / core / IQueryHistoryConsumer . h <nl> ppp b / Code / CryPlugins / CryUQS / Interfaces / core / IQueryHistoryConsumer . h <nl> namespace UQS <nl> / / passed in to AddOrUpdateHistoricQuery ( ) <nl> struct SHistoricQueryOverview <nl> { <nl> - explicit SHistoricQueryOverview ( const ColorF & _color , const char * _szQuerierName , const CQueryID & _queryID , const CQueryID & _parentQueryID , const char * _szQueryBlueprintName , size_t _numGeneratedItems , size_t _numResultingItems , CTimeValue _timeElapsedUntilResult ) ; <nl> + explicit SHistoricQueryOverview ( const ColorF & _color , const char * _szQuerierName , const CQueryID & _queryID , const CQueryID & _parentQueryID , const char * _szQueryBlueprintName , size_t _numGeneratedItems , size_t _numResultingItems , const CTimeValue & _timeElapsedUntilResult , const CTimeValue & _timestampQueryCreated , const CTimeValue & _timestampQueryDestroyed ) ; <nl> <nl> / / TODO : itemType of the generated items <nl> <nl> namespace UQS <nl> size_t numGeneratedItems ; <nl> size_t numResultingItems ; <nl> CTimeValue timeElapsedUntilResult ; <nl> + CTimeValue timestampQueryCreated ; <nl> + CTimeValue timestampQueryDestroyed ; <nl> } ; <nl> <nl> virtual ~ IQueryHistoryConsumer ( ) { } <nl> namespace UQS <nl> virtual void AddDeferredEvaluatorName ( const char * szDeferredEvaluatorName ) = 0 ; <nl> } ; <nl> <nl> - inline IQueryHistoryConsumer : : SHistoricQueryOverview : : SHistoricQueryOverview ( const ColorF & _color , const char * _szQuerierName , const CQueryID & _queryID , const CQueryID & _parentQueryID , const char * _szQueryBlueprintName , size_t _numGeneratedItems , size_t _numResultingItems , CTimeValue _timeElapsedUntilResult ) <nl> + inline IQueryHistoryConsumer : : SHistoricQueryOverview : : SHistoricQueryOverview ( const ColorF & _color , const char * _szQuerierName , const CQueryID & _queryID , const CQueryID & _parentQueryID , const char * _szQueryBlueprintName , size_t _numGeneratedItems , size_t _numResultingItems , const CTimeValue & _timeElapsedUntilResult , const CTimeValue & _timestampQueryCreated , const CTimeValue & _timestampQueryDestroyed ) <nl> : color ( _color ) <nl> , szQuerierName ( _szQuerierName ) <nl> , queryID ( _queryID ) <nl> namespace UQS <nl> , numGeneratedItems ( _numGeneratedItems ) <nl> , numResultingItems ( _numResultingItems ) <nl> , timeElapsedUntilResult ( _timeElapsedUntilResult ) <nl> + , timestampQueryCreated ( _timestampQueryCreated ) <nl> + , timestampQueryDestroyed ( _timestampQueryDestroyed ) <nl> { <nl> / / nothing <nl> } <nl> mmm a / Code / CryPlugins / CryUQS / Shared / CMakeLists . txt <nl> ppp b / Code / CryPlugins / CryUQS / Shared / CMakeLists . txt <nl> add_sources ( " UQS_uber_shared . cpp " <nl> " shared / VariantDict . h " <nl> " shared / VariantDict . cpp " <nl> " shared / FactoryBase . h " <nl> + " shared / TimeValueUtil . h " <nl> + " shared / TimeValueUtil . cpp " <nl> ) <nl> <nl> end_sources ( ) <nl> mmm a / Code / CryPlugins / CryUQS / Shared / SharedIncludes . h <nl> ppp b / Code / CryPlugins / CryUQS / Shared / SharedIncludes . h <nl> <nl> # include " . . / Interfaces / InterfacesIncludes . h " <nl> <nl> # include " shared / UqsString . h " <nl> + # include " shared / TimeValueUtil . h " <nl> # include " shared / VariantDict . h " <nl> # include " shared / FactoryBase . h " <nl> new file mode 100644 <nl> index 0000000000 . . fe1f43ab94 <nl> mmm / dev / null <nl> ppp b / Code / CryPlugins / CryUQS / Shared / shared / TimeValueUtil . cpp <nl> <nl> + / / Copyright 2001 - 2016 Crytek GmbH / Crytek Group . All rights reserved . <nl> + <nl> + # include " StdAfx . h " <nl> + # include " TimeValueUtil . h " <nl> + <nl> + / / * INDENT - OFF * - < hard to read code and declarations due to inconsistent indentation > <nl> + <nl> + namespace UQS <nl> + { <nl> + namespace Shared <nl> + { <nl> + <nl> + void CTimeValueUtil : : Split ( const CTimeValue & time , int * pHours , int * pMinutes , int * pSeconds , int * pMilliseconds ) <nl> + { <nl> + const int64 totalMilliseconds = time . GetMilliSecondsAsInt64 ( ) ; <nl> + <nl> + if ( pMilliseconds ) <nl> + { <nl> + * pMilliseconds = totalMilliseconds % 1000 ; <nl> + } <nl> + <nl> + if ( pSeconds ) <nl> + { <nl> + * pSeconds = ( totalMilliseconds / 1000 ) % 60 ; <nl> + } <nl> + <nl> + if ( pMinutes ) <nl> + { <nl> + * pMinutes = ( totalMilliseconds / ( 1000 * 60 ) ) % 60 ; <nl> + } <nl> + <nl> + if ( pHours ) <nl> + { <nl> + * pHours = ( int ) ( totalMilliseconds / ( 1000 * 60 * 60 ) ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . 071ed900cf <nl> mmm / dev / null <nl> ppp b / Code / CryPlugins / CryUQS / Shared / shared / TimeValueUtil . h <nl> <nl> + / / Copyright 2001 - 2016 Crytek GmbH / Crytek Group . All rights reserved . <nl> + <nl> + # pragma once <nl> + <nl> + / / * INDENT - OFF * - < hard to read code and declarations due to inconsistent indentation > <nl> + <nl> + namespace UQS <nl> + { <nl> + namespace Shared <nl> + { <nl> + <nl> + class CTimeValueUtil <nl> + { <nl> + public : <nl> + <nl> + static void Split ( const CTimeValue & time , int * pHours , int * pMinutes , int * pSeconds , int * pMilliseconds ) ; <nl> + <nl> + } ; <nl> + <nl> + } <nl> + } <nl> mmm a / Code / CryPlugins / CryUQS / Shared / uqs_shared . waf_files <nl> ppp b / Code / CryPlugins / CryUQS / Shared / uqs_shared . waf_files <nl> <nl> " shared / UqsString . cpp " , <nl> " shared / VariantDict . h " , <nl> " shared / VariantDict . cpp " , <nl> - " shared / FactoryBase . h " <nl> + " shared / FactoryBase . h " , <nl> + " shared / TimeValueUtil . h " , <nl> + " shared / TimeValueUtil . cpp " <nl> ] <nl> } <nl> } <nl> \ No newline at end of file <nl> mmm a / Code / GameTemplates / cs / RollingBall / Game . cryproject <nl> ppp b / Code / GameTemplates / cs / RollingBall / Game . cryproject <nl> <nl> " name " : " sys_target_platforms " , <nl> " value " : " pc , ps4 , xboxone , linux " <nl> } , <nl> - { <nl> - " name " : " sys_spec " , <nl> - " value " : " 4 " <nl> - } , <nl> { <nl> " name " : " r_displayinfo " , <nl> " value " : " 0 " <nl> <nl> " name " : " e_waterocean " , <nl> " value " : " 0 " <nl> } , <nl> - { <nl> - " name " : " e_permanentrenderobjects " , <nl> - " value " : " 0 " <nl> - } , <nl> { <nl> " name " : " ca_useIMG_CAF " , <nl> " value " : " 0 " <nl> mmm a / Code / GameTemplates / cs / ThirdPersonShooter / Game . cryproject <nl> ppp b / Code / GameTemplates / cs / ThirdPersonShooter / Game . cryproject <nl> <nl> " name " : " sys_target_platforms " , <nl> " value " : " pc , ps4 , xboxone , linux " <nl> } , <nl> - { <nl> - " name " : " sys_spec " , <nl> - " value " : " 4 " <nl> - } , <nl> { <nl> " name " : " r_displayinfo " , <nl> " value " : " 0 " <nl> <nl> " name " : " e_waterocean " , <nl> " value " : " 0 " <nl> } , <nl> - { <nl> - " name " : " e_permanentrenderobjects " , <nl> - " value " : " 0 " <nl> - } , <nl> { <nl> " name " : " ca_useIMG_CAF " , <nl> " value " : " 0 " <nl> new file mode 100755 <nl> index 0000000000 . . 65755a575b <nl> mmm / dev / null <nl> ppp b / Tools / CryVersionSelector / Utility / InstallEngine . bat <nl> <nl> + @ ECHO OFF <nl> + <nl> + REM Initialize values <nl> + SET CRYSELECT = % ~ dp0tools \ CryVersionSelector \ cryselect . exe <nl> + SET ENGINE = % ~ dp0cryengine . cryengine <nl> + SET PAUSE_ON_END = 0 <nl> + <nl> + REM Install CryVersionSelector <nl> + @ ECHO Installing CryVersionSelector . . . <nl> + IF EXIST " % CRYSELECT % " ( <nl> + " % CRYSELECT % " install <nl> + ) ELSE ( <nl> + @ ECHO Unable to find cryselect at " % CRYSELECT % " ! <nl> + SET PAUSE_ON_END = 1 <nl> + ) <nl> + <nl> + REM Register the engine <nl> + @ ECHO Registering engine . . . <nl> + IF NOT " % PAUSE_ON_END % " = = " 1 " ( <nl> + IF EXIST " % ENGINE % " ( <nl> + " % ENGINE % " <nl> + ) ELSE ( <nl> + @ ECHO Unable to register engine at " % ENGINE % " ! <nl> + SET PAUSE_ON_END = 1 <nl> + ) <nl> + ) <nl> + <nl> + REM Pause at the end if an error was detected during execution . <nl> + IF " % PAUSE_ON_END % " = = " 1 " ( <nl> + @ ECHO An error occured while installing the engine ! <nl> + PAUSE <nl> + ) ELSE ( <nl> + @ ECHO Engine installed successful . <nl> + PAUSE <nl> + ) <nl> \ No newline at end of file <nl>
|
! I integrate from / / ce / main / . . .
|
CRYTEK/CRYENGINE
|
6d6da0abbf4c032b4df4651c110e2610e2a38c9b
|
2017-08-02T22:05:13Z
|
mmm a / addons / xbmc . python / addon . xml <nl> ppp b / addons / xbmc . python / addon . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> - < addon id = " xbmc . python " version = " 2 . 12 . 0 " provider - name = " Team XBMC " > <nl> + < addon id = " xbmc . python " version = " 2 . 13 . 0 " provider - name = " Team XBMC " > <nl> < backwards - compatibility abi = " 2 . 0 " / > <nl> < requires > <nl> < import addon = " xbmc . core " version = " 0 . 1 . 0 " / > <nl> mmm a / xbmc / interfaces / python / PythonSwig . cpp . template <nl> ppp b / xbmc / interfaces / python / PythonSwig . cpp . template <nl> namespace PythonBindings <nl> / / constants <nl> PyModule_AddStringConstant ( module , ( char * ) " __author__ " , ( char * ) " Team XBMC < http : / / xbmc . org > " ) ; <nl> PyModule_AddStringConstant ( module , ( char * ) " __date__ " , ( char * ) " $ { new Date ( ) . toString ( ) } " ) ; <nl> - PyModule_AddStringConstant ( module , ( char * ) " __version__ " , ( char * ) " 2 . 12 . 0 " ) ; <nl> + PyModule_AddStringConstant ( module , ( char * ) " __version__ " , ( char * ) " 2 . 13 . 0 " ) ; <nl> PyModule_AddStringConstant ( module , ( char * ) " __credits__ " , ( char * ) " Team XBMC " ) ; <nl> PyModule_AddStringConstant ( module , ( char * ) " __platform__ " , ( char * ) " ALL " ) ; <nl> <nl>
|
[ python ] bump API version after b1a867653afe37080918bc4bf8109a694f6bb9a2
|
xbmc/xbmc
|
b501042aa999702a755168c23863259aa0ca699c
|
2013-11-01T21:26:18Z
|
mmm a / src / main . cpp <nl> ppp b / src / main . cpp <nl> void static EraseOrphanTx ( uint256 hash ) <nl> BOOST_FOREACH ( const CTxIn & txin , it - > second . vin ) <nl> { <nl> map < uint256 , set < uint256 > > : : iterator itPrev = mapOrphanTransactionsByPrev . find ( txin . prevout . hash ) ; <nl> + if ( itPrev = = mapOrphanTransactionsByPrev . end ( ) ) <nl> + continue ; <nl> itPrev - > second . erase ( hash ) ; <nl> if ( itPrev - > second . empty ( ) ) <nl> mapOrphanTransactionsByPrev . erase ( itPrev ) ; <nl>
|
Fix crashing bug caused by orphan ( s ) with duplicate prevout . hash
|
bitcoin/bitcoin
|
def2fdb4b9b52fa908c11fe2f5a42ea04f8e9f11
|
2014-09-10T14:53:54Z
|
mmm a / . gitattributes <nl> ppp b / . gitattributes <nl> <nl> <nl> # Keep the single include header with LFs to make sure it is uploaded , <nl> # hashed etc with LF <nl> - single_include / * . hpp eol = lf <nl> + single_include / * * / * . hpp eol = lf <nl> # Also keep the LICENCE file with LFs for the same reason <nl> LICENCE . txt eol = lf <nl>
|
Update gitattributes
|
catchorg/Catch2
|
b93284716e1cc6bcd01f4626e985c368333c9374
|
2018-07-23T08:15:52Z
|
mmm a / dbms / src / DataStreams / RemoteBlockInputStream . h <nl> ppp b / dbms / src / DataStreams / RemoteBlockInputStream . h <nl> <nl> namespace DB <nl> { <nl> <nl> - / * * This class allowes one to launch queries on remote replicas of one shard and get results <nl> + / * * This class allows one to launch queries on remote replicas of one shard and get results <nl> * / <nl> class RemoteBlockInputStream : public IProfilingBlockInputStream <nl> { <nl> mmm a / dbms / src / Server / ClusterCopier . cpp <nl> ppp b / dbms / src / Server / ClusterCopier . cpp <nl> class ClusterCopier <nl> <nl> try <nl> { <nl> - / / / CREATE TABLE and DROP PARTITION return empty block <nl> - RemoteBlockInputStream stream ( * connection , query , Block ( ) , context , & current_settings ) ; <nl> - NullBlockOutputStream output ( Block ( ) ) ; <nl> + / / / CREATE TABLE and DROP PARTITION queries return empty block <nl> + RemoteBlockInputStream stream { * connection , query , Block { } , context , & current_settings } ; <nl> + NullBlockOutputStream output { Block { } } ; <nl> copyData ( stream , output ) ; <nl> <nl> if ( increment_and_check_exit ( ) ) <nl>
|
Fixed build after merge . [ # CLICKHOUSE - 3606 ]
|
ClickHouse/ClickHouse
|
07d01d2dfb5f501314fb5d546254dd482940abc7
|
2018-02-22T11:40:23Z
|
mmm a / hphp / hack / src / hh_single_type_check . ml <nl> ppp b / hphp / hack / src / hh_single_type_check . ml <nl> let parse_options ( ) = <nl> let out_extension = ref " . out " in <nl> let like_types = ref false in <nl> let pessimize_types = ref false in <nl> + let coercion_from_dynamic = ref false in <nl> let search_provider = ref " TrieIndex " in <nl> let rust = ref false in <nl> let symbolindex_file = ref None in <nl> let parse_options ( ) = <nl> " - - pessimize - types " , <nl> Arg . Set pessimize_types , <nl> " When unenforceable types are encountered , convert them to like types " ; <nl> + " - - coercion - from - dynamic " , <nl> + Arg . Set coercion_from_dynamic , <nl> + " Allows coercion from dynamic and like types to enforceable types at positions that \ <nl> + HHVM enforces " ; <nl> " - - search - provider " , <nl> Arg . String ( fun str - > search_provider : = str ) , <nl> " Configure the symbol index search provider " ; <nl> let parse_options ( ) = <nl> ~ tco_shallow_class_decl : ( ! shallow_class_decl ) <nl> ~ tco_like_types : ( ! like_types ) <nl> ~ tco_pessimize_types : ( ! pessimize_types ) <nl> + ~ tco_coercion_from_dynamic : ( ! coercion_from_dynamic ) <nl> ~ log_levels : ( ! log_levels ) <nl> ~ po_rust : ! rust <nl> ( ) <nl> mmm a / hphp / hack / src / options / globalOptions . ml <nl> ppp b / hphp / hack / src / options / globalOptions . ml <nl> type t = { <nl> po_rust : bool ; <nl> tco_like_types : bool ; <nl> tco_pessimize_types : bool ; <nl> + tco_coercion_from_dynamic : bool ; <nl> error_codes_treated_strictly : ISet . t ; <nl> tco_check_xhp_attribute : bool ; <nl> } [ @ @ deriving show ] <nl> let default = { <nl> po_rust = false ; <nl> tco_like_types = false ; <nl> tco_pessimize_types = false ; <nl> + tco_coercion_from_dynamic = false ; <nl> error_codes_treated_strictly = ISet . of_list [ ] ; <nl> tco_check_xhp_attribute = false ; <nl> } <nl> let make <nl> ? ( po_rust = default . po_rust ) <nl> ? ( tco_like_types = default . tco_like_types ) <nl> ? ( tco_pessimize_types = default . tco_pessimize_types ) <nl> + ? ( tco_coercion_from_dynamic = default . tco_coercion_from_dynamic ) <nl> ? ( error_codes_treated_strictly = default . error_codes_treated_strictly ) <nl> ? ( tco_check_xhp_attribute = default . tco_check_xhp_attribute ) <nl> ( ) <nl> let make <nl> po_rust ; <nl> tco_like_types ; <nl> tco_pessimize_types ; <nl> + tco_coercion_from_dynamic ; <nl> error_codes_treated_strictly ; <nl> tco_check_xhp_attribute ; <nl> } <nl> let tco_shallow_class_decl t = t . tco_shallow_class_decl <nl> let po_rust t = t . po_rust <nl> let tco_like_types t = t . tco_like_types <nl> let tco_pessimize_types t = t . tco_pessimize_types <nl> + let tco_coercion_from_dynamic t = t . tco_coercion_from_dynamic <nl> let error_codes_treated_strictly t = t . error_codes_treated_strictly <nl> <nl> let tco_ignore_collection_expr_type_arguments t = t . tco_ignore_collection_expr_type_arguments <nl> mmm a / hphp / hack / src / options / globalOptions . mli <nl> ppp b / hphp / hack / src / options / globalOptions . mli <nl> type t = { <nl> i . e . vec < string > = > vec < ~ string > * ) <nl> tco_pessimize_types : bool ; <nl> <nl> + ( * Enables coercion from dynamic and like types to enforceable types <nl> + i . e . dynamic ~ > int , ~ string ~ > string * ) <nl> + tco_coercion_from_dynamic : bool ; <nl> + <nl> ( * Set of codes to be treated as if they were in strict mode files * ) <nl> error_codes_treated_strictly : ISet . t ; <nl> <nl> val make : <nl> ? po_rust : bool - > <nl> ? tco_like_types : bool - > <nl> ? tco_pessimize_types : bool - > <nl> + ? tco_coercion_from_dynamic : bool - > <nl> ? error_codes_treated_strictly : ISet . t - > <nl> ? tco_check_xhp_attribute : bool - > <nl> unit - > <nl> val tco_shallow_class_decl : t - > bool <nl> val po_rust : t - > bool <nl> val tco_like_types : t - > bool <nl> val tco_pessimize_types : t - > bool <nl> + val tco_coercion_from_dynamic : t - > bool <nl> val error_codes_treated_strictly : t - > ISet . t <nl> val tco_check_xhp_attribute : t - > bool <nl> mmm a / hphp / hack / src / options / typecheckerOptions . ml <nl> ppp b / hphp / hack / src / options / typecheckerOptions . ml <nl> let ignore_collection_expr_type_arguments = GlobalOptions . tco_ignore_collection_ <nl> let shallow_class_decl = GlobalOptions . tco_shallow_class_decl <nl> let like_types = GlobalOptions . tco_like_types <nl> let pessimize_types = GlobalOptions . tco_pessimize_types <nl> + let coercion_from_dynamic = GlobalOptions . tco_coercion_from_dynamic <nl> <nl> let check_xhp_attribute = GlobalOptions . tco_check_xhp_attribute <nl> mmm a / hphp / hack / src / server / serverConfig . ml <nl> ppp b / hphp / hack / src / server / serverConfig . ml <nl> let load config_filename options = <nl> ~ po_rust : ( local_config . ServerLocalConfig . rust ) <nl> ? tco_like_types : ( bool_opt " like_types " config ) <nl> ? tco_pessimize_types : ( bool_opt " pessimize_types " config ) <nl> + ? tco_coercion_from_dynamic : ( bool_opt " coercion_from_dynamic " config ) <nl> ~ error_codes_treated_strictly : ( prepare_error_codes_treated_strictly config ) <nl> ? tco_check_xhp_attribute : ( bool_opt " check_xhp_attribute " config ) <nl> ( ) <nl> mmm a / hphp / hack / src / typing / dune <nl> ppp b / hphp / hack / src / typing / dune <nl> <nl> type_visitor <nl> typing_arrays <nl> typing_async <nl> - typing_coercion <nl> typing_continuations <nl> typing_deferred_members <nl> typing_dependent_type <nl> <nl> typing_alias <nl> typing_array_access <nl> typing_attributes <nl> + typing_coercion <nl> typing_debug <nl> typing_disposable <nl> typing_enum <nl> mmm a / hphp / hack / src / typing / tast_check / type_test_hint_check . ml <nl> ppp b / hphp / hack / src / typing / tast_check / type_test_hint_check . ml <nl> let visitor = object ( this ) <nl> let bounds = TySet . elements ( Env . get_upper_bounds acc . env name ) in <nl> List . fold_left bounds ~ f : this # on_type ~ init : acc <nl> | AKgeneric name - > <nl> - begin match Env . get_reified acc . env name , Env . get_enforceable acc . env name with <nl> - | Nast . Erased , _ - > update acc @ @ <nl> - Invalid ( r , " an erased generic type parameter " ) <nl> - | Nast . SoftReified , _ - > update acc @ @ <nl> - Invalid ( r , " a soft reified generic type parameter " ) <nl> - | Nast . Reified , false - > update acc @ @ <nl> - Invalid ( r , " a reified type parameter that is not marked < < __Enforceable > > " ) <nl> - | Nast . Reified , true - > <nl> - acc end <nl> + this # check_generic acc r name <nl> | AKnewtype _ - > update acc @ @ Invalid ( r , " a newtype " ) <nl> | AKdependent _ - > update acc @ @ Invalid ( r , " an expression dependent type " ) <nl> method ! on_tanon acc r _arity _id = <nl> let visitor = object ( this ) <nl> update acc @ @ Invalid ( r , " a union " ) <nl> method ! on_tobject acc r = update acc @ @ Invalid ( r , " the object type " ) <nl> method ! on_tclass acc r cls _ tyl = <nl> - match Env . get_class acc . env ( snd cls ) with <nl> + this # check_class_targs acc r ( snd cls ) tyl <nl> + method ! on_tapply acc r ( _ , name ) tyl = <nl> + if tyl < > [ ] & & Typing_env . is_typedef name <nl> + then update acc @ @ Invalid ( r , " a type with generics , because generics are erased at runtime " ) <nl> + else acc <nl> + method ! on_tarraykind acc r _array_kind = <nl> + update acc @ @ Invalid ( r , " an array type " ) <nl> + <nl> + method is_wildcard : type a . a ty - > bool = function <nl> + | _ , Tabstract ( AKgeneric name , _ ) - > <nl> + Env . is_fresh_generic_parameter name <nl> + | _ - > false <nl> + method check_generic acc r name = <nl> + match Env . get_reified acc . env name , Env . get_enforceable acc . env name with <nl> + | Nast . Erased , _ - > update acc @ @ <nl> + Invalid ( r , " an erased generic type parameter " ) <nl> + | Nast . SoftReified , _ - > update acc @ @ <nl> + Invalid ( r , " a soft reified generic type parameter " ) <nl> + | Nast . Reified , false - > update acc @ @ <nl> + Invalid ( r , " a reified type parameter that is not marked < < __Enforceable > > " ) <nl> + | Nast . Reified , true - > <nl> + acc <nl> + method check_class_targs : type a . _ - > _ - > _ - > a ty list - > _ = fun acc r c_name targs - > <nl> + match Env . get_class acc . env c_name with <nl> | Some tc - > <nl> let tparams = Cls . tparams tc in <nl> - begin match tyl with <nl> + begin match targs with <nl> | [ ] - > acc ( * this case should really be handled by the fold2 , <nl> but we still allow class hints without args in certain places * ) <nl> - | tyl - > <nl> + | targs - > <nl> let open List . Or_unequal_lengths in <nl> - begin match List . fold2 ~ init : acc tyl tparams ~ f : ( fun acc targ tparam - > <nl> + begin match List . fold2 ~ init : acc targs tparams ~ f : ( fun acc targ tparam - > <nl> if this # is_wildcard targ <nl> then acc <nl> else <nl> let visitor = object ( this ) <nl> end <nl> end <nl> | None - > acc <nl> - method ! on_tapply acc r ( _ , name ) tyl = <nl> - if tyl < > [ ] & & Typing_env . is_typedef name <nl> - then update acc @ @ Invalid ( r , " a type with generics , because generics are erased at runtime " ) <nl> - else acc <nl> - method ! on_tarraykind acc r _array_kind = <nl> - update acc @ @ Invalid ( r , " an array type " ) <nl> - method is_wildcard = function <nl> - | _ , Tabstract ( AKgeneric name , _ ) - > <nl> - Env . is_fresh_generic_parameter name <nl> - | _ - > false <nl> end <nl> <nl> let validate_type env root_ty emit_error = <nl> mmm a / hphp / hack / src / typing / tast_env . mli <nl> ppp b / hphp / hack / src / typing / tast_env . mli <nl> val get_anonymous_lambda_types : env - > int - > Tast . ty list <nl> val typing_env_as_tast_env : Typing_env . env - > env <nl> val tast_env_as_typing_env : env - > Typing_env . env <nl> <nl> - val can_coerce : env - > Tast . ty - > Tast . ty - > env option <nl> + val can_coerce : env - > Tast . ty - > <nl> + ? ty_expect_decl : Typing_defs . decl Typing_defs . ty - > Tast . ty - > env option <nl> ( * * Return None when coercion cannot occur from the second arg to the third , <nl> otherwise return Some env where env is the first arg updated with coercion <nl> constraints . * ) <nl> mmm a / hphp / hack / src / typing / typing . ml <nl> ppp b / hphp / hack / src / typing / typing . ml <nl> and stmt_ env pos st = <nl> end <nl> else env in <nl> let return_type = TR . strip_condition_type_in_return env return_type in <nl> - let env = Type . coerce_type expr_pos Reason . URreturn env rty return_type in <nl> + let env = Type . coerce_type expr_pos Reason . URreturn env rty ? ty_expect_decl : return_type_decl return_type in <nl> let env = LEnv . move_and_merge_next_in_cont env C . Exit in <nl> env , T . Return ( Some te ) <nl> | Do ( b , e ) as st - > <nl> and check_expected_ty message env inferred_ty ( expected : ExpectedTy . t option ) = <nl> pos = p ; <nl> reason = ur ; <nl> locl_ty = expected_ty ; <nl> - decl_ty = _ ; <nl> + decl_ty = ty_expect_decl ; <nl> } ) - > <nl> Typing_log . ( log_with_level env " typing " 1 ( fun ( ) - > <nl> log_types p env <nl> [ Log_head ( Printf . sprintf " Typing . check_expected_ty % s " message , <nl> [ Log_type ( " inferred_ty " , inferred_ty ) ; <nl> Log_type ( " expected_ty " , expected_ty ) ] ) ] ) ) ; <nl> - Type . coerce_type p ur env inferred_ty expected_ty <nl> + Type . coerce_type p ur env inferred_ty ? ty_expect_decl expected_ty <nl> <nl> and new_object <nl> ~ ( expected : ExpectedTy . t option ) <nl> mmm a / hphp / hack / src / typing / typing_coercion . ml <nl> ppp b / hphp / hack / src / typing / typing_coercion . ml <nl> open Typing_defs <nl> module Env = Typing_env <nl> module SubType = Typing_subtype <nl> <nl> + <nl> + let validator = <nl> + let open Type_test_hint_check in <nl> + object ( _this ) <nl> + inherit [ validation_state ] Type_visitor . type_visitor as _super <nl> + <nl> + method ! on_tthis acc r = update acc @ @ Invalid ( r , " the this type " ) <nl> + method ! on_tapply acc r ( _ , name ) tyl = <nl> + ( * TODO ( T45690473 ) : follow type aliases in the ` type ` case and allow enforceable targets * ) <nl> + if Typing_env . is_typedef name | | Typing_env . is_enum ( Env . tast_env_as_typing_env acc . env ) name <nl> + then update acc @ @ Invalid ( r , " a typedef or enum " ) <nl> + else visitor # check_class_targs acc r name tyl <nl> + method ! on_tgeneric acc r name = visitor # check_generic acc r name <nl> + method ! on_taccess acc r _ = update acc @ @ Invalid ( r , " a type const " ) <nl> + method ! on_tarray acc r ty1_opt ty2_opt = <nl> + match ty1_opt , ty2_opt with <nl> + | None , None - > acc <nl> + | _ - > update acc @ @ Invalid ( r , " an array type " ) <nl> + ( * Optimization , don ' t visit type in dynamic ~ > ~ T case , fall back to subtyping * ) <nl> + method ! on_tlike acc r _ = update acc @ @ Invalid ( r , " a like type " ) <nl> + method ! on_tprim acc r prim = visitor # on_tprim acc r prim <nl> + method ! on_tfun acc r _ = update acc @ @ Invalid ( r , " a function type " ) <nl> + method ! on_ttuple acc r _ = update acc @ @ Invalid ( r , " a tuple type " ) <nl> + method ! on_tshape acc r _ _ = update acc @ @ Invalid ( r , " a shape type " ) <nl> + end <nl> + <nl> + let supports_coercion_from_dynamic env ( ty_expect_decl : decl ty ) = <nl> + let open Type_test_hint_check in <nl> + let { validity ; env ; _ } = validator # on_type { <nl> + env = Tast_env . typing_env_as_tast_env env ; <nl> + validity = Valid ; <nl> + } ty_expect_decl in <nl> + match validity with <nl> + | Valid - > Some ( Tast_env . tast_env_as_typing_env env ) <nl> + | Invalid ( _reason , _kind ) - > None <nl> + <nl> ( * <nl> * These are the main coercion functions . <nl> * <nl> module SubType = Typing_subtype <nl> * ( you can coerce in Awaitable ) <nl> * 3 . t1 ~ > t2 | - t1 ~ > ? t2 <nl> * ( you can coerce t1 to optional type if the inner type is a valid coercion target ) <nl> - * 4 . t1 < : t2 | - t1 ~ > t2 <nl> + * 4 . t is enforceable | - dynamic ~ > t <nl> + * ( coercion from dynamic to enforceable types is permitted ) <nl> + * 5 . t1 < : t2 | - t1 ~ > t2 <nl> * ( you can coerce t1 to any of its supertypes ) <nl> * <nl> * This boils down to running the normal sub_type procedure whenever possible , <nl> module SubType = Typing_subtype <nl> * ) <nl> <nl> ( * checks coercion that isn ' t just subtyping * ) <nl> - let rec can_coerce env ty_have ty_expect = <nl> + let rec can_coerce env ty_have ? ty_expect_decl ty_expect = <nl> let env , ety_expect = Env . expand_type env ty_expect in <nl> let env , ety_have = Env . expand_type env ty_have in <nl> match ety_have , ety_expect with <nl> <nl> | _ , ( _ , Tdynamic ) - > Some env <nl> <nl> - | _ , ( _ , Toption ty ) - > can_coerce env ty_have ty <nl> + | _ , ( _ , Toption ty ) - > can_coerce env ty_have ? ty_expect_decl ty <nl> + <nl> + ( * dynamic ~ > T if T is enforceable <nl> + * <nl> + * We only allow coercion if the coercion function was provided a decl ty <nl> + * target for coercion . The reason is because locl tys lose information about <nl> + * their origin , which can distinguish enforceable and unenforceable types . * ) <nl> + | ( _ , Tdynamic ) , _ <nl> + when ( TypecheckerOptions . coercion_from_dynamic ( Env . get_tcopt env ) ) - > <nl> + let open Option in <nl> + ty_expect_decl > > = ( supports_coercion_from_dynamic env ) <nl> <nl> | _ - > None <nl> <nl> ( * does coercion , including subtyping * ) <nl> - let coerce_type p ? sub_fn : ( sub = Typing_ops . sub_type ) ur env ty_have ty_expect = <nl> - match can_coerce env ty_have ty_expect with <nl> + let coerce_type p ? sub_fn : ( sub = Typing_ops . sub_type ) ur env ty_have ? ty_expect_decl ty_expect = <nl> + match can_coerce env ty_have ? ty_expect_decl ty_expect with <nl> | Some e - > e <nl> | None - > sub p ur env ty_have ty_expect <nl> <nl> mmm a / hphp / hack / src / typing / typing_ops . ml <nl> ppp b / hphp / hack / src / typing / typing_ops . ml <nl> let sub_type p ur env ty_sub ty_super = <nl> ( fun ( ) - > Typing_utils . sub_type env ty_sub ty_super ) <nl> ( fun ( ) - > env ) <nl> <nl> - let coerce_type ? sub_fn : ( sub = sub_type ) p ur env ty_have ty_expect = <nl> - Typing_utils . coerce_type ~ sub_fn : sub p ur env ty_have ty_expect <nl> + let coerce_type ? sub_fn : ( sub = sub_type ) p ur env ty_have ? ty_expect_decl ty_expect = <nl> + Typing_utils . coerce_type ~ sub_fn : sub p ur env ty_have ? ty_expect_decl ty_expect <nl> <nl> - let can_coerce env ty_have ty_expect = <nl> - Typing_utils . can_coerce env ty_have ty_expect <nl> + let can_coerce env ty_have ? ty_expect_decl ty_expect = <nl> + Typing_utils . can_coerce env ty_have ? ty_expect_decl ty_expect <nl> <nl> let sub_type_decl p ur env ty_sub ty_super = <nl> let env , ty_super = Typing_utils . localize_with_self env ty_super in <nl> mmm a / hphp / hack / src / typing / typing_utils . ml <nl> ppp b / hphp / hack / src / typing / typing_utils . ml <nl> let localize_with_self x = ! localize_with_self_ref x <nl> type coerce_type = <nl> Pos . t - > <nl> ? sub_fn : ( Pos . t - > Reason . ureason - > Env . env - > locl ty - > locl ty - > Env . env ) - > <nl> - Reason . ureason - > Env . env - > locl ty - > locl ty - > Env . env <nl> + Reason . ureason - > Env . env - > locl ty - > ? ty_expect_decl : decl ty - > locl ty - > Env . env <nl> let ( coerce_type_ref : coerce_type ref ) = ref not_implemented <nl> let coerce_type x = ! coerce_type_ref x <nl> <nl> - type can_coerce = Env . env - > locl ty - > locl ty - > Env . env option <nl> + type can_coerce = Env . env - > locl ty - > ? ty_expect_decl : decl ty - > locl ty - > Env . env option <nl> let ( can_coerce_ref : can_coerce ref ) = ref not_implemented <nl> let can_coerce x = ! can_coerce_ref x <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 6a3ba4c6205 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / HH_FLAGS <nl> <nl> + - - all - errors <nl> + - - coercion - from - dynamic <nl> + - - like - types <nl> new file mode 100644 <nl> index 00000000000 . . 6a3ba4c6205 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / HH_FLAGS <nl> <nl> + - - all - errors <nl> + - - coercion - from - dynamic <nl> + - - like - types <nl> new file mode 100644 <nl> index 00000000000 . . f0dabcb5b05 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / taccess . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + final class C { <nl> + const type T = int ; <nl> + <nl> + public function f ( ) : this : : T { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 9fc16616463 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / taccess . php . exp <nl> <nl> + File " taccess . php " , line 10 , characters 12 - 16 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " taccess . php " , line 9 , characters 24 - 30 : <nl> + This is an int <nl> + File " taccess . php " , line 7 , characters 18 - 20 : <nl> + resulting from expanding the type constant C : : T <nl> + File " taccess . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . bf5af56ba38 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tany . php <nl> <nl> + < ? hh / / partial <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function f ( ) { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tany . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . 5d3903a3454 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tapply_class . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + class C { } <nl> + class E < T > { } <nl> + class R < reify T > { } <nl> + <nl> + function concrete ( ) : C { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function erased ( ) : E < int > { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function reified ( ) : R < int > { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . baf15639e2a <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tapply_class . php . exp <nl> <nl> + File " tapply_class . php " , line 15 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tapply_class . php " , line 14 , characters 20 - 25 : <nl> + This is an object of type E < int > <nl> + File " tapply_class . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 8ad416106c9 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tapply_enum . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + enum Pos : int { } <nl> + <nl> + function f ( ) : Pos { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . b3d67a24599 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tapply_enum . php . exp <nl> <nl> + File " tapply_enum . php " , line 9 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tapply_enum . php " , line 8 , characters 15 - 17 : <nl> + This is a value of Pos <nl> + File " tapply_enum . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 5235d29241f <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tapply_typedef . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + type X = int ; <nl> + newtype Y = int ; <nl> + <nl> + / / TODO ( T45690473 ) : being overly conservative in this case <nl> + function f ( ) : X { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function g ( ) : Y { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 8a82d6739a0 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tapply_typedef . php . exp <nl> <nl> + File " tapply_typedef . php " , line 11 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tapply_typedef . php " , line 10 , characters 15 - 15 : <nl> + This is an int <nl> + File " tapply_typedef . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> + File " tapply_typedef . php " , line 15 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tapply_typedef . php " , line 14 , characters 15 - 15 : <nl> + This is an int <nl> + File " tapply_typedef . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . deca0234684 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tarray . php <nl> <nl> + < ? hh / / partial <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function akany ( ) : array { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function akvec ( ) : array < int > { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function akmap ( ) : array < int , string > { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 9b9a474e8ce <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tarray . php . exp <nl> <nl> + File " tarray . php " , line 11 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tarray . php " , line 10 , characters 19 - 28 : <nl> + This is an array ( used like a vector ) <nl> + File " tarray . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> + File " tarray . php " , line 15 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tarray . php " , line 14 , characters 19 - 36 : <nl> + This is an array ( used like a hashtable ) <nl> + File " tarray . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 52c6cd5b1f9 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tdarray . php <nl> <nl> + < ? hh / / partial <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return array ( 3 , 4 ) ; } <nl> + <nl> + / / darrays are only enforced as arrays , so it is not safe to allow dynamic to <nl> + / / coerce to darray <nl> + <nl> + function without_targs ( ) : darray { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function with_targs ( ) : darray < int , string > { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 099e0435e82 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tdarray . php . exp <nl> <nl> + File " tdarray . php " , line 10 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tdarray . php " , line 9 , characters 27 - 32 : <nl> + This is a darray <nl> + File " tdarray . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> + File " tdarray . php " , line 14 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tdarray . php " , line 13 , characters 24 - 42 : <nl> + This is a darray <nl> + File " tdarray . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 4eaead96495 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tdynamic . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function f ( ) : dynamic { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tdynamic . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . f555a16442a <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tfun . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return ( ) = = > 4 ; } <nl> + <nl> + function f ( ) : ( function ( ) : string ) { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 47ab7e34bac <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tfun . php . exp <nl> <nl> + File " tfun . php " , line 7 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tfun . php " , line 6 , characters 15 - 35 : <nl> + This is a function <nl> + File " tfun . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 9f2f673cbd6 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tgeneric . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function erase < Terase > ( ) : Terase { <nl> + return dyn ( ) ; / / error ; <nl> + } <nl> + <nl> + function reify < reify Treify > ( ) : Treify { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function enforce < < < __Enforceable > > reify Tenforce > ( ) : Tenforce { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 582884fe750 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tgeneric . php . exp <nl> <nl> + File " tgeneric . php " , line 7 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tgeneric . php " , line 6 , characters 27 - 32 : <nl> + This is a value of generic type Terase <nl> + File " tgeneric . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> + File " tgeneric . php " , line 11 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tgeneric . php " , line 10 , characters 33 - 38 : <nl> + This is a value of generic type Treify <nl> + File " tgeneric . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . d0c94a83d8c <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tlike . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + / / this is just basic subtyping <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function f ( ) : ~ int { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tlike . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . ce8982f3be7 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tmixed . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function f ( ) : mixed { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tmixed . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . d99ddc4386b <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tnonnull . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return null ; } <nl> + <nl> + function f ( ) : nonnull { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tnonnull . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . a6eb262eeb5 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tnothing . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return null ; } <nl> + <nl> + function f ( ) : nothing { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tnothing . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . e82838165aa <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / toption . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return varray [ ] ; } <nl> + <nl> + function enforceable ( ) : ? int { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function unenforceable ( ) : ? ( int , int ) { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d97542506f5 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / toption . php . exp <nl> <nl> + File " toption . php " , line 11 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " toption . php " , line 10 , characters 28 - 37 : <nl> + This is a tuple of size 2 <nl> + File " toption . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 3404d1a568b <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tprim . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function tnull ( ) : null { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tvoid ( ) : void { <nl> + return dyn ( ) ; / / cannot return <nl> + } <nl> + <nl> + function tint ( ) : int { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tbool ( ) : bool { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tstring ( ) : string { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tresource ( ) : resource { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tnum ( ) : num { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tarraykey ( ) : arraykey { <nl> + return dyn ( ) ; / / ok <nl> + } <nl> + <nl> + function tnoreturn ( ) : noreturn { <nl> + return dyn ( ) ; / / cannot return <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 04a4615ae79 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tprim . php . exp <nl> <nl> + File " tprim . php " , line 11 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tprim . php " , line 10 , characters 19 - 22 : <nl> + This is void <nl> + File " tprim . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> + File " tprim . php " , line 11 , characters 3 - 15 : <nl> + You cannot return a value ( Typing [ 4084 ] ) <nl> + File " tprim . php " , line 10 , characters 19 - 22 : <nl> + This is a void function <nl> + File " tprim . php " , line 39 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tprim . php " , line 38 , characters 23 - 30 : <nl> + This is noreturn ( throws or exits ) <nl> + File " tprim . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 0bdedb4a4fa <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tshape . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return darray [ ] ; } <nl> + <nl> + function f ( ) : shape ( ' a ' = > int ) { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 5e5e8cb4956 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tshape . php . exp <nl> <nl> + File " tshape . php " , line 7 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tshape . php " , line 6 , characters 15 - 31 : <nl> + This is a shape <nl> + File " tshape . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 59d0b9806aa <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tthis . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + final class C { <nl> + public function f ( ) : this { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 30a6b772007 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tthis . php . exp <nl> <nl> + File " tthis . php " , line 8 , characters 12 - 16 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tthis . php " , line 6 , characters 13 - 13 : <nl> + This is an object of type C <nl> + File " tthis . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . af85a4a583b <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / ttuple . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return varray [ ] ; } <nl> + <nl> + function f ( ) : ( int , int ) { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . bcad5abdc4e <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / ttuple . php . exp <nl> <nl> + File " ttuple . php " , line 7 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " ttuple . php " , line 6 , characters 15 - 24 : <nl> + This is a tuple of size 2 <nl> + File " ttuple . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 2f837e91d42 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tvarray . php <nl> <nl> + < ? hh / / partial <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return array ( " 3 " = > 4 ) ; } <nl> + <nl> + / / varrays are only enforced as arrays , so it is not safe to allow dynamic to <nl> + / / coerce to varray <nl> + <nl> + function without_targs ( ) : varray { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function with_targs ( ) : varray < int > { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . e31ec046119 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tvarray . php . exp <nl> <nl> + File " tvarray . php " , line 10 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tvarray . php " , line 9 , characters 27 - 32 : <nl> + This is a varray <nl> + File " tvarray . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> + File " tvarray . php " , line 14 , characters 10 - 14 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " tvarray . php " , line 13 , characters 24 - 34 : <nl> + This is a varray <nl> + File " tvarray . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . af126ee5dde <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tvarray_or_darray . php <nl> <nl> + < ? hh / / partial <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return array ( 4 , 5 , 6 ) ; } <nl> + <nl> + / / not all HHVM arrays are considered varray_or_darray , <nl> + / / so dynamic ~ > varray_or_darray is invalid <nl> + function without_targs ( ) : varray_or_darray { <nl> + return dyn ( ) ; / / error <nl> + } <nl> + <nl> + function with_targs ( ) : varray_or_darray < int > { <nl> + return dyn ( ) ; / / error <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4269126fceb <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / decl_types / tvarray_or_darray . php . exp <nl> @ @ - 0 , 0 + 1 @ @ <nl> + No errors <nl> new file mode 100644 <nl> index 00000000000 . . 52515f8a775 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / expr_method . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class C { <nl> + public function __construct ( public ( function ( int ) : void ) $ f ) { } <nl> + } <nl> + <nl> + function test ( C $ c , dynamic $ d ) : void { <nl> + $ f = $ c - > f ; <nl> + / / Can ' t enforce lambda parameter hints so coercion fails <nl> + $ f ( $ d ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 1d2cd78140e <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / expr_method . php . exp <nl> <nl> + File " expr_method . php " , line 11 , characters 6 - 7 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " expr_method . php " , line 5 , characters 48 - 50 : <nl> + This is an int <nl> + File " expr_method . php " , line 8 , characters 21 - 27 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 71ecfd4af14 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / fun_id . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function f ( int $ i ) : void { } <nl> + <nl> + function test ( dynamic $ d ) : void { <nl> + fun ( ' f ' ) ( $ d ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f2e7513d014 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / fun_id . php . exp <nl> <nl> + File " fun_id . php " , line 7 , characters 12 - 13 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " fun_id . php " , line 4 , characters 12 - 14 : <nl> + This is an int <nl> + File " fun_id . php " , line 6 , characters 15 - 21 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . d98ad4dd64c <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / function . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + function f ( int $ d ) : int { <nl> + f ( dyn ( ) ) ; <nl> + return dyn ( ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 284c73cfbb5 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / function . php . exp <nl> <nl> + File " function . php " , line 7 , characters 5 - 9 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " function . php " , line 6 , characters 12 - 14 : <nl> + This is an int <nl> + File " function . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 20adbd407c3 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / lambda_declared . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function f ( dynamic $ d ) : void { <nl> + / / the runtime will enforce both the parameter and return types of a lambda <nl> + / / so long as they are declared ( and enforceable ) <nl> + $ f = ( int $ i ) : int = = > { <nl> + return $ d ; <nl> + } ; <nl> + $ f ( $ d ) ; <nl> + } <nl> + <nl> + function g ( dynamic $ d ) : void { <nl> + $ f = ( ( int , string ) $ i ) : ( int , string ) = = > { <nl> + return $ d ; <nl> + } ; <nl> + $ f ( $ d ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . fb181bd4ede <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / lambda_declared . php . exp <nl> <nl> + File " lambda_declared . php " , line 10 , characters 6 - 7 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " lambda_declared . php " , line 7 , characters 9 - 11 : <nl> + This is an int <nl> + File " lambda_declared . php " , line 4 , characters 12 - 18 : <nl> + It is incompatible with a dynamic value <nl> + File " lambda_declared . php " , line 15 , characters 12 - 13 : <nl> + Invalid return type ( Typing [ 4110 ] ) <nl> + File " lambda_declared . php " , line 14 , characters 28 - 40 : <nl> + This is a tuple of size 2 <nl> + File " lambda_declared . php " , line 13 , characters 12 - 18 : <nl> + It is incompatible with a dynamic value <nl> + File " lambda_declared . php " , line 17 , characters 6 - 7 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " lambda_declared . php " , line 14 , characters 9 - 21 : <nl> + This is a tuple of size 2 <nl> + File " lambda_declared . php " , line 13 , characters 12 - 18 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . c40146f8dba <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / lambda_hint . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function f ( ( function ( int ) : void ) $ f , dynamic $ d ) : void { <nl> + $ f ( $ d ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 67530caa7c0 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / lambda_hint . php . exp <nl> <nl> + File " lambda_hint . php " , line 5 , characters 6 - 7 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " lambda_hint . php " , line 4 , characters 23 - 25 : <nl> + This is an int <nl> + File " lambda_hint . php " , line 4 , characters 39 - 45 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 0f1f639f922 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / method . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + class C { <nl> + public function f ( int $ d ) : int { <nl> + $ this - > f ( dyn ( ) ) ; <nl> + return dyn ( ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . a48d6c4fbb7 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / method . php . exp <nl> <nl> + File " method . php " , line 8 , characters 14 - 18 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " method . php " , line 7 , characters 21 - 23 : <nl> + This is an int <nl> + File " method . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 10c6c023019 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / method_id . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class C { <nl> + public function f ( int $ i ) : void { } <nl> + } <nl> + <nl> + function test ( dynamic $ d ) : void { <nl> + $ c = new C ( ) ; <nl> + inst_meth ( $ c , ' f ' ) ( $ d ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 030dc277e8f <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / method_id . php . exp <nl> <nl> + File " method_id . php " , line 10 , characters 22 - 23 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " method_id . php " , line 5 , characters 21 - 23 : <nl> + This is an int <nl> + File " method_id . php " , line 8 , characters 15 - 21 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 8d075cd093e <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / smethod_id . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + class C { <nl> + public static function f ( int $ i ) : void { } <nl> + } <nl> + <nl> + function test ( dynamic $ d ) : void { <nl> + class_meth ( C : : class , ' f ' ) ( $ d ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 52c437bf39d <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / smethod_id . php . exp <nl> <nl> + File " smethod_id . php " , line 9 , characters 29 - 30 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " smethod_id . php " , line 5 , characters 28 - 30 : <nl> + This is an int <nl> + File " smethod_id . php " , line 8 , characters 15 - 21 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 50f26d8c6f6 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / static_method . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + function dyn ( ) : dynamic { return 4 ; } <nl> + <nl> + class C { <nl> + public static function f ( int $ d ) : int { <nl> + C : : f ( dyn ( ) ) ; <nl> + return dyn ( ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 325bd1e45e5 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / static_method . php . exp <nl> <nl> + File " static_method . php " , line 8 , characters 10 - 14 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " static_method . php " , line 7 , characters 28 - 30 : <nl> + This is an int <nl> + File " static_method . php " , line 4 , characters 17 - 23 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . 177d9c12990 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / varargs . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + / / The runtime doesn ' t enforce the type of varargs <nl> + function f ( int . . . $ i ) : void { } <nl> + <nl> + function d ( dynamic $ d ) : void { <nl> + f ( $ d ) ; <nl> + } <nl> + <nl> + < < __EntryPoint > > <nl> + function main ( ) : void { <nl> + f ( " str " ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 613d1d6c429 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / varargs . php . exp <nl> <nl> + File " varargs . php " , line 8 , characters 5 - 6 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " varargs . php " , line 5 , characters 19 - 20 : <nl> + This is an int ( variadic argument ) <nl> + File " varargs . php " , line 7 , characters 12 - 18 : <nl> + It is incompatible with a dynamic value <nl> + File " varargs . php " , line 13 , characters 5 - 9 : <nl> + Invalid argument ( Typing [ 4110 ] ) <nl> + File " varargs . php " , line 5 , characters 19 - 20 : <nl> + This is an int ( variadic argument ) <nl> + File " varargs . php " , line 13 , characters 5 - 9 : <nl> + It is incompatible with a string <nl> new file mode 100644 <nl> index 00000000000 . . 26338800fa1 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / yield . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + / / The runtime does not enforce yield <nl> + function y ( dynamic $ i ) : Generator < int , int , void > { <nl> + yield $ i ; <nl> + } <nl> + <nl> + function test ( ) : void { <nl> + foreach ( y ( " 3 " ) as $ i ) { } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 6cc32b1504f <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / yield . php . exp <nl> <nl> + File " yield . php " , line 6 , characters 3 - 10 : <nl> + Invalid yield ( Typing [ 4110 ] ) <nl> + File " yield . php " , line 5 , characters 40 - 42 : <nl> + This is an int <nl> + File " yield . php " , line 5 , characters 12 - 18 : <nl> + It is incompatible with a dynamic value <nl> new file mode 100644 <nl> index 00000000000 . . d06a2946c42 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / yield_from . php <nl> <nl> + < ? hh / / strict <nl> + / / Copyright 2004 - present Facebook . All Rights Reserved . <nl> + <nl> + / / The runtime does not enforce the type returned in a yield from <nl> + function yf ( dynamic $ d ) : string { <nl> + yield from $ d ; <nl> + } <nl> + <nl> + function test ( ) : void { <nl> + foreach ( yf ( varray [ 3 , 4 ] ) as $ val ) { } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . abd6e192622 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / typecheck / like_types / dynamic_coercion / yield_from . php . exp <nl> <nl> + File " yield_from . php " , line 6 , characters 3 - 15 : <nl> + Invalid yield from ( Typing [ 4110 ] ) <nl> + File " yield_from . php " , line 5 , characters 26 - 31 : <nl> + This is a string <nl> + File " yield_from . php " , line 6 , characters 3 - 15 : <nl> + It is incompatible with a dynamic value ( result of function with ' yield ' in the body ) <nl> + File " yield_from . php " , line 10 , characters 11 - 25 : <nl> + Invalid foreach ( Typing [ 4110 ] ) <nl> + File " yield_from . php " , line 10 , characters 11 - 25 : <nl> + This is an object of type Traversable < [ unresolved ] > because this is used in a foreach statement <nl> + File " yield_from . php " , line 5 , characters 26 - 31 : <nl> + It is incompatible with a string <nl>
|
Add coercion dynamic ~ > T when T is enforceable
|
facebook/hhvm
|
20fe31dc1cab01942355c9d229c547d0ec67c8f5
|
2019-06-11T19:41:27Z
|
mmm a / tensorflow / python / keras / utils / layer_utils . py <nl> ppp b / tensorflow / python / keras / utils / layer_utils . py <nl> def print_layer_summary_with_connections ( layer ) : <nl> continue <nl> <nl> for inbound_layer , node_index , tensor_index , _ in node . iterate_inbound ( ) : <nl> - connections . append ( ' { } [ { } ] [ { } ] ' . format ( inbound_layer , node_index , <nl> + connections . append ( ' { } [ { } ] [ { } ] ' . format ( inbound_layer . name , node_index , <nl> tensor_index ) ) <nl> <nl> name = layer . name <nl>
|
Merge pull request from yongtang : 24627 - model . summary
|
tensorflow/tensorflow
|
25eef323b6293d809e4d3626b0a70cd731f80155
|
2019-01-03T00:32:55Z
|
mmm a / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> ppp b / dbms / src / Storages / StorageReplicatedMergeTree . cpp <nl> void StorageReplicatedMergeTree : : createReplica ( ) <nl> Coordination : : Stat replicas_stat ; <nl> String last_added_replica = zookeeper - > get ( zookeeper_path + " / replicas " , & replicas_stat ) ; <nl> <nl> - String is_lost_value = last_added_replica = = " " ? " 0 " : " 1 " ; <nl> + / / / If it is not the first replica , we will mark it as " lost " , to immediately repair ( clone ) from existing replica . <nl> + String is_lost_value = last_added_replica . empty ( ) ? " 0 " : " 1 " ; <nl> <nl> Coordination : : Requests ops ; <nl> Coordination : : Responses resps ; <nl> void StorageReplicatedMergeTree : : createReplica ( ) <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( replica_path + " / flags " , " " , zkutil : : CreateMode : : Persistent ) ) ; <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( replica_path + " / is_lost " , is_lost_value , zkutil : : CreateMode : : Persistent ) ) ; <nl> ops . emplace_back ( zkutil : : makeCreateRequest ( replica_path + " / columns " , getColumns ( ) . toString ( ) , zkutil : : CreateMode : : Persistent ) ) ; <nl> - / / / Check version of / replicas to see if there are any replicas . <nl> + / / / Check version of / replicas to see if there are any replicas created at the same moment of time . <nl> ops . emplace_back ( zkutil : : makeSetRequest ( zookeeper_path + " / replicas " , " last added replica : " + replica_name , replicas_stat . version ) ) ; <nl> <nl> code = zookeeper - > tryMulti ( ops , resps ) ; <nl> if ( code = = Coordination : : Error : : ZNODEEXISTS ) <nl> throw Exception ( " Replica " + replica_path + " already exists . " , ErrorCodes : : REPLICA_IS_ALREADY_EXIST ) ; <nl> else if ( code = = Coordination : : Error : : ZBADVERSION ) <nl> - LOG_ERROR ( log , " Retry createReplica ( ) , because some replicas were created " ) ; <nl> + LOG_ERROR ( log , " Retrying createReplica ( ) , because some other replicas were created at the same time " ) ; <nl> else <nl> zkutil : : KeeperMultiException : : check ( code , ops , resps ) ; <nl> } while ( code = = Coordination : : Error : : ZBADVERSION ) ; <nl> void StorageReplicatedMergeTree : : cloneReplicaIfNeeded ( zkutil : : ZooKeeperPtr zooke <nl> else <nl> { <nl> / / / Replica was created by old version of CH , so me must create " / is_lost " . <nl> + / / / Note that in old version of CH there was no " lost " replicas possible . <nl> zookeeper - > create ( replica_path + " / is_lost " , " 0 " , zkutil : : CreateMode : : Persistent ) ; <nl> return ; <nl> } <nl> void StorageReplicatedMergeTree : : cloneReplicaIfNeeded ( zkutil : : ZooKeeperPtr zooke <nl> { <nl> String source_replica_path = zookeeper_path + " / replicas / " + replica_name ; <nl> <nl> + / / / Do not clone from myself . <nl> if ( source_replica_path ! = replica_path ) <nl> { <nl> - String resp ; <nl> - if ( ! zookeeper - > tryGet ( source_replica_path + " / is_lost " , resp , & source_is_lost_stat ) | | resp = = " 0 " ) <nl> + / / / Do not clone from lost replicas . <nl> + String source_replica_is_lost_value ; <nl> + if ( ! zookeeper - > tryGet ( source_replica_path + " / is_lost " , source_replica_is_lost_value , & source_is_lost_stat ) <nl> + | | source_replica_is_lost_value = = " 0 " ) <nl> { <nl> source_replica = replica_name ; <nl> break ; <nl> void StorageReplicatedMergeTree : : cloneReplicaIfNeeded ( zkutil : : ZooKeeperPtr zooke <nl> } <nl> } <nl> <nl> - if ( source_replica = = " " ) <nl> + if ( source_replica . empty ( ) ) <nl> throw Exception ( " All replicas are lost " , ErrorCodes : : ALL_REPLICAS_LOST ) ; <nl> <nl> cloneReplica ( source_replica , source_is_lost_stat , zookeeper ) ; <nl>
|
Miscellaneous
|
ClickHouse/ClickHouse
|
bab6cd504d99e39239c9e84028861f6e19df598d
|
2018-08-27T19:16:38Z
|
mmm a / hphp / hack / src / hh_single_compile . ml <nl> ppp b / hphp / hack / src / hh_single_compile . ml <nl> let do_compile compiler_options opts files_info = begin <nl> let parsed_classes = List . filter_map classes parse_class in <nl> let parsed_typedefs = [ ] in ( * TODO typedefs * ) <nl> let parsed_consts = [ ] in ( * TODO consts * ) <nl> - ( parsed_functions , parsed_classes , parsed_typedefs , parsed_consts ) in <nl> + let parsed_statements = <nl> + Parser_heap . find_statements_in_file ~ full : true tcopt fn in <nl> + ( parsed_functions , parsed_classes , parsed_typedefs , parsed_consts , <nl> + parsed_statements ) in <nl> let f_fold fn fileinfo text = begin <nl> let ast = get_nast_from_fileinfo opts fn fileinfo in <nl> let options = Hhbc_options . get_options_from_config <nl> mmm a / hphp / hack / src / hhbc / closure_convert . mli <nl> ppp b / hphp / hack / src / hhbc / closure_convert . mli <nl> val get_closure_classes : env - > Ast . class_ list <nl> ( * Convert functions , classes , or an entire program * ) <nl> val convert_fun : env - > Ast . fun_ - > env * Ast . fun_ <nl> val convert_class : env - > Ast . class_ - > env * Ast . class_ <nl> + val convert_block : env - > Ast . block - > env * Ast . block <nl> val convert_prog : env - > Ast . program - > env * Ast . program <nl> new file mode 100644 <nl> index 00000000000 . . 98c00ce3626 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / hhbc / hhas_main . ml <nl> <nl> + ( * * <nl> + * Copyright ( c ) 2017 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * ) <nl> + <nl> + type t = { <nl> + main_body : Hhbc_ast . instruct list ; <nl> + main_decl_vars : string list ; ( * Actually local_id list * ) <nl> + } <nl> + <nl> + let make <nl> + main_body <nl> + main_decl_vars = <nl> + { <nl> + main_body ; <nl> + main_decl_vars ; <nl> + } <nl> + <nl> + let body m = m . main_body <nl> + let decl_vars m = m . main_decl_vars <nl> mmm a / hphp / hack / src / hhbc / hhas_program . ml <nl> ppp b / hphp / hack / src / hhbc / hhas_program . ml <nl> open Core <nl> type t = { <nl> hhas_fun : Hhas_function . t list ; <nl> hhas_classes : Hhas_class . t list ; <nl> + hhas_main : Hhas_main . t ; <nl> } <nl> <nl> - let make hhas_fun hhas_classes = <nl> - { hhas_fun ; hhas_classes } <nl> + let make hhas_fun hhas_classes hhas_main = <nl> + { hhas_fun ; hhas_classes ; hhas_main } <nl> <nl> let functions hhas_prog = <nl> hhas_prog . hhas_fun <nl> let functions hhas_prog = <nl> let classes hhas_prog = <nl> hhas_prog . hhas_classes <nl> <nl> + let main hhas_prog = <nl> + hhas_prog . hhas_main <nl> + <nl> + let emit_main block = <nl> + let body_instrs , decl_vars , _ , _ , _ , _ = <nl> + Emit_body . from_ast ~ self : None [ ] [ ] None block in <nl> + Hhas_main . make ( Instruction_sequence . instr_seq_to_list body_instrs ) decl_vars <nl> + <nl> let from_ast <nl> ( parsed_functions , <nl> parsed_classes , <nl> _parsed_typedefs , <nl> - _parsed_consts ) = <nl> + _parsed_consts , <nl> + parsed_statements ) = <nl> let env = Closure_convert . initial_env ( List . length parsed_classes ) in <nl> let env , parsed_functions = <nl> List . map_env env parsed_functions Closure_convert . convert_fun in <nl> let env , parsed_classes = <nl> List . map_env env parsed_classes Closure_convert . convert_class in <nl> + let env , parsed_statements = <nl> + Closure_convert . convert_block env parsed_statements in <nl> let closure_classes = Closure_convert . get_closure_classes env in <nl> let all_classes = parsed_classes @ closure_classes in <nl> let compiled_funs = Emit_function . from_asts parsed_functions in <nl> let from_ast <nl> let compiled_classes = Generate_memoized . memoize_classes compiled_classes in <nl> let _compiled_typedefs = [ ] in ( * TODO * ) <nl> let _compiled_consts = [ ] in ( * TODO * ) <nl> - make compiled_funs compiled_classes <nl> + let pos = Pos . none in <nl> + ( * Main method returns 1 by default ? * ) <nl> + let parsed_statements = <nl> + parsed_statements @ <nl> + [ Ast . Return ( pos , Some ( pos , Ast . Int ( pos , " 1 " ) ) ) ] in <nl> + let compiled_statements = emit_main parsed_statements in <nl> + make compiled_funs compiled_classes compiled_statements <nl> mmm a / hphp / hack / src / hhbc / hhbc_hhas . ml <nl> ppp b / hphp / hack / src / hhbc / hhbc_hhas . ml <nl> let add_top_level buf hhas_prog = <nl> let non_closure_classes = <nl> List . filter ( fun c - > not ( Hhas_class . is_closure_class c ) ) <nl> ( Hhas_program . classes hhas_prog ) in <nl> - let main_stmts = <nl> - [ ILitConst ( Int Int64 . one ) <nl> - ; IContFlow RetC <nl> - ] in <nl> + let main = Hhas_program . main hhas_prog in <nl> + let main_stmts = Hhas_main . body main in <nl> + let main_decl_vars = Hhas_main . decl_vars main in <nl> let fun_name = " . main { \ n " in <nl> B . add_string buf fun_name ; <nl> + add_decl_vars buf 2 main_decl_vars ; <nl> add_defcls buf non_closure_classes ; <nl> add_instruction_list buf 2 main_stmts ; <nl> B . add_string buf " } \ n " <nl> mmm a / hphp / hack / src / parsing / parser_heap . ml <nl> ppp b / hphp / hack / src / parsing / parser_heap . ml <nl> let get_const defs name = <nl> | _ - > acc <nl> end <nl> <nl> + ( * Get top - level statements from definitions * ) <nl> + let get_statements defs = <nl> + List . filter_map defs begin fun def - > <nl> + match def with <nl> + | Ast . Stmt st - > Some st <nl> + | _ - > None <nl> + end <nl> + <nl> ( * Get an AST directly from the parser heap . Will return empty AProgram <nl> if the file does not exist <nl> * ) <nl> let find_typedef_in_file ? ( full = false ) popt file_name name = <nl> <nl> let find_const_in_file ? ( full = false ) popt file_name name = <nl> get_const ( get_from_parser_heap ~ full popt file_name ) name <nl> + <nl> + let find_statements_in_file ? ( full = false ) popt file_name = <nl> + get_statements ( get_from_parser_heap ~ full popt file_name ) <nl>
|
Hack codegen : top - level statements
|
facebook/hhvm
|
e8ce185ee44c1ce49e9309bd91de1523c88faa75
|
2017-03-23T18:20:00Z
|
mmm a / src / core / file_sys / registered_cache . cpp <nl> ppp b / src / core / file_sys / registered_cache . cpp <nl> static ContentRecordType GetCRTypeFromNCAType ( NCAContentType type ) { <nl> VirtualFile RegisteredCache : : OpenFileOrDirectoryConcat ( const VirtualDir & dir , <nl> std : : string_view path ) const { <nl> const auto file = dir - > GetFileRelative ( path ) ; <nl> - if ( file ! = nullptr ) <nl> + if ( file ! = nullptr ) { <nl> return file ; <nl> + } <nl> <nl> const auto nca_dir = dir - > GetDirectoryRelative ( path ) ; <nl> - if ( nca_dir ! = nullptr ) { <nl> - const auto nca_dir = dir - > GetDirectoryRelative ( path ) ; <nl> - VirtualFile file = nullptr ; <nl> + if ( nca_dir = = nullptr ) { <nl> + return nullptr ; <nl> + } <nl> <nl> - const auto files = nca_dir - > GetFiles ( ) ; <nl> - if ( files . size ( ) = = 1 & & files [ 0 ] - > GetName ( ) = = " 00 " ) { <nl> - file = files [ 0 ] ; <nl> + const auto files = nca_dir - > GetFiles ( ) ; <nl> + if ( files . size ( ) = = 1 & & files [ 0 ] - > GetName ( ) = = " 00 " ) { <nl> + return files [ 0 ] ; <nl> + } <nl> + <nl> + std : : vector < VirtualFile > concat ; <nl> + / / Since the files are a two - digit hex number , max is FF . <nl> + for ( std : : size_t i = 0 ; i < 0x100 ; + + i ) { <nl> + auto next = nca_dir - > GetFile ( fmt : : format ( " { : 02X } " , i ) ) ; <nl> + if ( next ! = nullptr ) { <nl> + concat . push_back ( std : : move ( next ) ) ; <nl> } else { <nl> - std : : vector < VirtualFile > concat ; <nl> - / / Since the files are a two - digit hex number , max is FF . <nl> - for ( std : : size_t i = 0 ; i < 0x100 ; + + i ) { <nl> - auto next = nca_dir - > GetFile ( fmt : : format ( " { : 02X } " , i ) ) ; <nl> - if ( next ! = nullptr ) { <nl> - concat . push_back ( std : : move ( next ) ) ; <nl> - } else { <nl> - next = nca_dir - > GetFile ( fmt : : format ( " { : 02x } " , i ) ) ; <nl> - if ( next ! = nullptr ) <nl> - concat . push_back ( std : : move ( next ) ) ; <nl> - else <nl> - break ; <nl> - } <nl> + next = nca_dir - > GetFile ( fmt : : format ( " { : 02x } " , i ) ) ; <nl> + if ( next ! = nullptr ) { <nl> + concat . push_back ( std : : move ( next ) ) ; <nl> + } else { <nl> + break ; <nl> } <nl> - <nl> - if ( concat . empty ( ) ) <nl> - return nullptr ; <nl> - <nl> - file = ConcatenatedVfsFile : : MakeConcatenatedFile ( concat , concat . front ( ) - > GetName ( ) ) ; <nl> } <nl> + } <nl> <nl> - return file ; <nl> + if ( concat . empty ( ) ) { <nl> + return nullptr ; <nl> } <nl> - return nullptr ; <nl> + <nl> + return ConcatenatedVfsFile : : MakeConcatenatedFile ( concat , concat . front ( ) - > GetName ( ) ) ; <nl> } <nl> <nl> VirtualFile RegisteredCache : : GetFileAtID ( NcaID id ) const { <nl>
|
Merge pull request from lioncash / dedup
|
yuzu-emu/yuzu
|
e5dd4cb39266fe4e237b4024da881f855353b3eb
|
2018-12-04T23:34:49Z
|
mmm a / contrib / Python / doc / releasenotes . rst <nl> ppp b / contrib / Python / doc / releasenotes . rst <nl> <nl> Release Notes <nl> - = = = = = = = = = = = = = <nl> \ No newline at end of file <nl> + = = = = = = = = = = = = = <nl> + <nl> + Roadmap for Version 1 . 5 <nl> + mmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> + We are planning monthly releases for this API . The items on the agenda for <nl> + May release ( due end of May / early June ) are : <nl> + <nl> + * Python API : Greatly increased list of operators : Shape operations , <nl> + elementwise operations , reductions . <nl> + <nl> + * Python API : Support for image and speech readers <nl> + <nl> + * Python API : Support for sparse input tensors instead of NumPy arrays , where <nl> + applicable . <nl> + <nl> + * Python API : First version of a layer API <nl> + <nl> + * Readers : New speach reader <nl> + <nl> + * Readers : Combination of reader deserializers and transformers <nl> + <nl> + * Core : Profiling support <nl> + <nl> + * Core : More operators planned for core . <nl> + <nl> + <nl> + Version 1 . 4 ( April 2016 ) <nl> + mmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + New and improved features : <nl> + <nl> + * Python API : This is the first release containing a limited version of the <nl> + Python API . It exposes CNTKTextFormatReader , SGD , local and deferred <nl> + execution , and 22 operators . <nl> + <nl> + * CNTK Core : <nl> + <nl> + * This release contains a new generic text format reader called <nl> + CNTKTextFormatReader . UCIFastReader has been deprecated . The <nl> + new reader by definition supports all tensor formats ( sparse and dense , <nl> + sequences and non - sequences , multiple inputs ) that can be fed to CNTK . <nl> + <nl> + * The concept of named dynamic axes has been exposed in the configuration , <nl> + which enables modelling of inputs of varying length . <nl> + <nl> + Current restrictions of the Python API : <nl> + <nl> + * Although CNTK implements more than 100 operators through internal APIs , only <nl> + the most important operators have been exposed through the Python API at this <nl> + point . We are using this API as a production gate , requiring unit tests and <nl> + documentation before new functionality is exposed . More operators will be <nl> + added in the following weeks . <nl> + <nl> + * The Python API is a pure out - of - process API at this point . This means that <nl> + only methods on the context interact with CNTK directly through command line <nl> + calls . An in - process API with far greater extensibility options is planned <nl> + later in 2016 through the 2 . 0 release . <nl> + <nl> + * In particular , the training loop is monolithic at this point and cannot be <nl> + broken up into single forward / backward passes . This restriction will be <nl> + lifted with 2 . 0 release . <nl> + <nl> + * Although inputs can be sparse , sparse features cannot be fed through the <nl> + Python API at this point for immediate evaluation . They can only be fed <nl> + through files read through the CNTKTextFormatReader . <nl> + <nl> + * We are only exposing the CNTKTextFormatReader in Python at this point . More <nl> + data formats ( ImageReader , speech formats ) will be added in a later release . <nl> + <nl> + * We are not exposing a standard layer collection for LSTMs etc . at this point . <nl> + A first version of this will be added in the next release . <nl> + <nl> + * Tensor shapes are only available after a call to the context methods , which <nl> + run graph validation and tensor inference . <nl> + <nl> + * Only few examples have been translated from the CNTK - internal configuration <nl> + format ( NDL ) to the Python API . More will be added in the next releases . <nl> + <nl> + Current restrictions of CNTK Core : <nl> + <nl> + * A tensor can have only one dynamic axis ( the outermost one ) . <nl> + <nl> + * The support for sparse inputs on the operators is . . . sparse . <nl> + Operations might throw NotImplementedExceptions when a sparse tensor is fed . <nl> + The exact level of support will be described in the next release . <nl>
|
Edited release notes
|
microsoft/CNTK
|
0936d47c6e0ec6310686a6f3580b043942b4a763
|
2016-04-29T10:37:13Z
|
mmm a / scripts / build . py <nl> ppp b / scripts / build . py <nl> <nl> ( ' rinse ' , ' centos - 5 ' ) , <nl> ( ' shell ' , ' yum update - y ' ) , <nl> ( ' append_file : amd64 ' , ' etc / yum . conf ' , ' exclude = * . i ? 86 \ n ' ) , <nl> - ( ' shell ' , ' yum install - y gcc gcc - c + + make qt4 - devel openssl - devel diffutils perl xz ' ) , <nl> + ( ' shell ' , ' yum install - y gcc gcc - c + + make diffutils perl xz ' ) , <nl> + ( ' shell ' , ' yum install - y openssl - devel libX11 - devel libXrender - devel libXext - devel fontconfig - devel freetype - devel libjpeg - devel libpng - devel zlib - devel ' ) , <nl> ( ' write_file ' , ' update . sh ' , ' yum update - y \ n ' ) , <nl> ( ' schroot_conf ' , ' CentOS 5 ' ) <nl> ] , <nl> <nl> ( ' rinse ' , ' centos - 6 ' ) , <nl> ( ' shell ' , ' yum update - y ' ) , <nl> ( ' append_file : amd64 ' , ' etc / yum . conf ' , ' exclude = * . i ? 86 \ n ' ) , <nl> - ( ' shell ' , ' yum install - y gcc gcc - c + + make qt4 - devel openssl - devel diffutils perl tar xz ' ) , <nl> + ( ' shell ' , ' yum install - y gcc gcc - c + + make diffutils perl tar xz ' ) , <nl> + ( ' shell ' , ' yum install - y openssl - devel libX11 - devel libXrender - devel libXext - devel fontconfig - devel freetype - devel libjpeg - devel libpng - devel zlib - devel ' ) , <nl> ( ' write_file ' , ' update . sh ' , ' yum update - y \ n ' ) , <nl> ( ' schroot_conf ' , ' CentOS 6 ' ) <nl> ] <nl>
|
specify explicit dependencies when building the CentOS chroots
|
wkhtmltopdf/wkhtmltopdf
|
dd8e4d253ac8f9de3b9486386f937474aee39569
|
2014-06-05T11:42:36Z
|
mmm a / include / swift / CFG / CFGBuilder . h <nl> ppp b / include / swift / CFG / CFGBuilder . h <nl> class CFGBuilder { <nl> return insert ( new ConstantRefInst ( Expr ) ) ; <nl> } <nl> <nl> - DeclRefInst * createDeclRef ( DeclRefExpr * Expr ) { <nl> - return insert ( new DeclRefInst ( Expr ) ) ; <nl> - } <nl> - <nl> IntegerLiteralInst * createIntegerLiteral ( IntegerLiteralExpr * Expr ) { <nl> return insert ( new IntegerLiteralInst ( Expr ) ) ; <nl> } <nl> class CFGBuilder { <nl> return insert ( new StoreInst ( E , Src , DestLValue ) ) ; <nl> } <nl> <nl> - <nl> RequalifyInst * createRequalify ( RequalifyExpr * Expr , CFGValue Op ) { <nl> return insert ( new RequalifyInst ( Expr , Op ) ) ; <nl> } <nl> class CFGBuilder { <nl> unsigned FieldNo ) { <nl> return insert ( new TupleElementInst ( E , Operand , FieldNo ) ) ; <nl> } <nl> - <nl> <nl> TypeOfInst * createTypeOf ( TypeOfExpr * Expr ) { <nl> return insert ( new TypeOfInst ( Expr ) ) ; <nl> } <nl> <nl> + VarRefInst * createVarRef ( DeclRefExpr * Expr ) { <nl> + return insert ( new VarRefInst ( Expr ) ) ; <nl> + } <nl> + <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> / / Terminator Instruction Creation Methods <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> mmm a / include / swift / CFG / CFGNodes . def <nl> ppp b / include / swift / CFG / CFGNodes . def <nl> ABSTRACT_INST ( Alloc , Instruction ) <nl> INST_RANGE ( Alloc , AllocVar , AllocTmp ) <nl> INST ( Apply , Instruction ) <nl> INST ( ConstantRef , Instruction ) <nl> - INST ( DeclRef , Instruction ) <nl> INST ( IntegerLiteral , Instruction ) <nl> INST ( Load , Instruction ) <nl> INST ( Store , Instruction ) <nl> INST ( Tuple , Instruction ) <nl> INST ( TypeOf , Instruction ) <nl> INST ( ScalarToTuple , Instruction ) <nl> INST ( TupleElement , Instruction ) <nl> + INST ( VarRef , Instruction ) <nl> ABSTRACT_INST ( Term , Instruction ) <nl> INST ( Unreachable , TermInst ) <nl> INST ( Return , TermInst ) <nl> mmm a / include / swift / CFG / Instruction . h <nl> ppp b / include / swift / CFG / Instruction . h <nl> class ConstantRefInst : public Instruction { <nl> } <nl> } ; <nl> <nl> - / / / DeclRefInst - Represents a reference to a non - constant declaration , <nl> - / / / evaluating to its lvalue ( i . e . , its address ) . <nl> - class DeclRefInst : public Instruction { <nl> - public : <nl> - <nl> - / / / Construct a DeclRefInst . <nl> - / / / <nl> - / / / \ param Expr A backpointer to the original DeclRefExpr . <nl> - / / / <nl> - DeclRefInst ( DeclRefExpr * E ) ; <nl> - <nl> - DeclRefExpr * getExpr ( ) const ; <nl> - <nl> - / / / getDecl - Return the underlying declaration . <nl> - ValueDecl * getDecl ( ) const ; <nl> - <nl> - static bool classof ( const Instruction * I ) { <nl> - return I - > getKind ( ) = = InstKind : : DeclRef ; <nl> - } <nl> - } ; <nl> - <nl> / / / Encapsulates an integer constant , as defined originally by an <nl> / / / an IntegerLiteralExpr . <nl> class IntegerLiteralInst : public Instruction { <nl> class TupleElementInst : public Instruction { <nl> return I - > getKind ( ) = = InstKind : : TupleElement ; <nl> } <nl> } ; <nl> + <nl> + / / / VarRefInst - Represents a reference to a non - constant declaration , <nl> + / / / evaluating to its lvalue ( i . e . , its address ) . <nl> + class VarRefInst : public Instruction { <nl> + public : <nl> + <nl> + / / / Construct a VarRefInst . <nl> + / / / <nl> + / / / \ param Expr A backpointer to the original DeclRefExpr . <nl> + / / / <nl> + VarRefInst ( DeclRefExpr * E ) ; <nl> + <nl> + DeclRefExpr * getExpr ( ) const ; <nl> + <nl> + / / / getDecl - Return the underlying declaration . <nl> + ValueDecl * getDecl ( ) const ; <nl> + <nl> + static bool classof ( const Instruction * I ) { <nl> + return I - > getKind ( ) = = InstKind : : VarRef ; <nl> + } <nl> + } ; <nl> + <nl> + <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / Instructions representing terminators . <nl> mmm a / lib / CFG / CFGGen / CFGGen . cpp <nl> ppp b / lib / CFG / CFGGen / CFGGen . cpp <nl> CFGValue CFGGen : : visitApplyExpr ( ApplyExpr * E ) { <nl> CFGValue CFGGen : : visitDeclRefExpr ( DeclRefExpr * E ) { <nl> / / If this is a reference to a mutable decl , produce an lvalue . <nl> if ( E - > getType ( ) - > is < LValueType > ( ) ) <nl> - return B . createDeclRef ( E ) ; <nl> + return B . createVarRef ( E ) ; <nl> <nl> / / Otherwise , we can only produce its value , use a ConstantRefInst . <nl> return B . createConstantRef ( E ) ; <nl> mmm a / lib / CFG / CFGPrinter . cpp <nl> ppp b / lib / CFG / CFGPrinter . cpp <nl> class CFGPrinter : public CFGVisitor < CFGPrinter > { <nl> < < " , type = " < < DRI - > getDecl ( ) - > getType ( ) . getString ( ) ; <nl> } <nl> <nl> - void visitDeclRefInst ( DeclRefInst * DRI ) { <nl> - OS < < " declref " < < DRI - > getDecl ( ) - > getName ( ) <nl> - < < " , type = " < < DRI - > getDecl ( ) - > getType ( ) . getString ( ) ; <nl> - } <nl> void visitIntegerLiteralInst ( IntegerLiteralInst * ILI ) { <nl> const auto & lit = ILI - > getValue ( ) ; <nl> OS < < " integerliteral " < < lit < < " , width = " < < lit . getBitWidth ( ) ; <nl> class CFGPrinter : public CFGVisitor < CFGPrinter > { <nl> void visitTypeOfInst ( TypeOfInst * TOI ) { <nl> OS < < " typeof " < < TOI - > getMetaType ( ) . getString ( ) ; <nl> } <nl> + <nl> + void visitVarRefInst ( VarRefInst * VRI ) { <nl> + OS < < " varref " < < VRI - > getDecl ( ) - > getName ( ) <nl> + < < " , type = " < < VRI - > getDecl ( ) - > getType ( ) . getString ( ) ; <nl> + } <nl> <nl> void visitUnreachableInst ( UnreachableInst * UI ) { <nl> OS < < " unreachable " ; <nl> mmm a / lib / CFG / Instruction . cpp <nl> ppp b / lib / CFG / Instruction . cpp <nl> ValueDecl * ConstantRefInst : : getDecl ( ) const { <nl> } <nl> <nl> <nl> - DeclRefInst : : DeclRefInst ( DeclRefExpr * E ) <nl> - : Instruction ( InstKind : : DeclRef , E , E - > getType ( ) ) { } <nl> - <nl> - DeclRefExpr * DeclRefInst : : getExpr ( ) const { <nl> - return getLocExpr < DeclRefExpr > ( ) ; <nl> - } <nl> - <nl> - / / / getDecl - Return the underlying declaration . <nl> - ValueDecl * DeclRefInst : : getDecl ( ) const { <nl> - return getExpr ( ) - > getDecl ( ) ; <nl> - } <nl> <nl> IntegerLiteralInst : : IntegerLiteralInst ( IntegerLiteralExpr * E ) <nl> : Instruction ( InstKind : : IntegerLiteral , E , E - > getType ( ) ) { <nl> TupleElementInst : : TupleElementInst ( TupleElementExpr * E , CFGValue Operand , <nl> } <nl> <nl> <nl> + VarRefInst : : VarRefInst ( DeclRefExpr * E ) <nl> + : Instruction ( InstKind : : VarRef , E , E - > getType ( ) ) { } <nl> + <nl> + DeclRefExpr * VarRefInst : : getExpr ( ) const { <nl> + return getLocExpr < DeclRefExpr > ( ) ; <nl> + } <nl> + <nl> + / / / getDecl - Return the underlying declaration . <nl> + ValueDecl * VarRefInst : : getDecl ( ) const { <nl> + return getExpr ( ) - > getDecl ( ) ; <nl> + } <nl> + <nl> TermInst : : SuccessorListTy TermInst : : getSuccessors ( ) { <nl> switch ( getKind ( ) ) { <nl> case InstKind : : AllocVar : <nl> case InstKind : : AllocTmp : <nl> case InstKind : : Apply : <nl> case InstKind : : ConstantRef : <nl> - case InstKind : : DeclRef : <nl> case InstKind : : IntegerLiteral : <nl> case InstKind : : Load : <nl> case InstKind : : Store : <nl> TermInst : : SuccessorListTy TermInst : : getSuccessors ( ) { <nl> case InstKind : : Tuple : <nl> case InstKind : : TupleElement : <nl> case InstKind : : TypeOf : <nl> + case InstKind : : VarRef : <nl> llvm_unreachable ( " Only TermInst ' s are allowed " ) ; <nl> case InstKind : : Unreachable : <nl> return cast < UnreachableInst > ( this ) - > getSuccessors ( ) ; <nl> mmm a / lib / CFG / Verifier . cpp <nl> ppp b / lib / CFG / Verifier . cpp <nl> class CFGVerifier : public CFGVisitor < CFGVerifier > { <nl> " ConstantRef should return not produce an lvalue " ) ; <nl> } <nl> <nl> - void visitDeclRefInst ( DeclRefInst * DRI ) { <nl> - assert ( DRI - > getType ( ) - > is < LValueType > ( ) & & " DeclRef should return lvalue " ) ; <nl> - } <nl> void visitIntegerLiteralInst ( IntegerLiteralInst * ILI ) { <nl> assert ( ILI - > getType ( ) - > is < BuiltinIntegerType > ( ) & & <nl> " invalid integer literal type " ) ; <nl> class CFGVerifier : public CFGVisitor < CFGVerifier > { <nl> void visitTypeOfInst ( TypeOfInst * TOI ) { <nl> } <nl> <nl> + void visitVarRefInst ( VarRefInst * DRI ) { <nl> + assert ( DRI - > getType ( ) - > is < LValueType > ( ) & & " VarRef should return lvalue " ) ; <nl> + } <nl> + <nl> + <nl> void visitReturnInst ( ReturnInst * RI ) { <nl> assert ( ! RI - > getReturnValue ( ) . isNull ( ) & & " Return of null value is invalid " ) ; <nl> } <nl>
|
now that constants are split out of DeclRefInst , it really isn ' t a reference
|
apple/swift
|
f0e3980394952657f1794c77c292407e0944ee4a
|
2012-10-10T05:01:42Z
|
mmm a / CHANGELOG <nl> ppp b / CHANGELOG <nl> <nl> devel <nl> mmm - - <nl> <nl> + * Drop a pair of braces { } in / _admin / metrics in case of empty labels , which <nl> + makes the API adhere better to the official Prometheus syntax . <nl> + <nl> * Add some more metrics to the ConnectionPool . <nl> <nl> * Remove HTTP " Connection " header when forwarding requests in the cluster <nl> mmm a / arangod / RestServer / Metrics . cpp <nl> ppp b / arangod / RestServer / Metrics . cpp <nl> void Counter : : toPrometheus ( std : : string & result ) const { <nl> _b . push ( ) ; <nl> result + = " \ n # TYPE " + name ( ) + " counter \ n " ; <nl> result + = " # HELP " + name ( ) + " " + help ( ) + " \ n " ; <nl> - result + = name ( ) + " { " + labels ( ) + " } " + std : : to_string ( load ( ) ) + " \ n " ; <nl> + result + = name ( ) ; <nl> + if ( ! labels ( ) . empty ( ) ) { <nl> + result + = " { " + labels ( ) + " } " ; <nl> + } <nl> + result + = " " + std : : to_string ( load ( ) ) + " \ n " ; <nl> } <nl> <nl> Counter : : Counter ( <nl> mmm a / arangod / RestServer / Metrics . h <nl> ppp b / arangod / RestServer / Metrics . h <nl> template < typename T > class Gauge : public Metric { <nl> T load ( ) const { return _g . load ( ) ; } <nl> virtual void toPrometheus ( std : : string & result ) const override { <nl> result + = " \ n # TYPE " + name ( ) + " gauge \ n " ; <nl> - result + = " # HELP " + name ( ) + " " + help ( ) + " \ n " ; <nl> - result + = name ( ) + " { " + labels ( ) + " } " + std : : to_string ( load ( ) ) + " \ n " ; <nl> + result + = " # HELP " + name ( ) + " " + help ( ) + " \ n " + name ( ) ; <nl> + if ( ! labels ( ) . empty ( ) ) { <nl> + result + = " { " + labels ( ) + " } " ; <nl> + } <nl> + result + = " " + std : : to_string ( load ( ) ) + " \ n " ; <nl> } ; <nl> private : <nl> std : : atomic < T > _g ; <nl> template < typename Scale > class Histogram : public Metric { <nl> } <nl> result + = " le = \ " " + _scale . delim ( i ) + " \ " } " + std : : to_string ( n ) + " \ n " ; <nl> } <nl> - result + = name ( ) + " _count { " + labels ( ) + " } " + std : : to_string ( sum ) + " \ n " ; <nl> + result + = name ( ) + " _count " ; <nl> + if ( ! labels ( ) . empty ( ) ) { <nl> + result + = " { " + labels ( ) + " } " ; <nl> + } <nl> + result + = " " + std : : to_string ( sum ) + " \ n " ; <nl> } <nl> <nl> std : : ostream & print ( std : : ostream & o ) const { <nl> mmm a / tests / js / client / communication / test - communication . js <nl> ppp b / tests / js / client / communication / test - communication . js <nl> function GenericAqlSetupPathSuite ( type ) { <nl> / / We do not sync shard - locks , so no deadlock possible <nl> return false ; <nl> } <nl> - / / If any of the writes is exclusive we enfoce sequential locking <nl> + / / If any of the writes is exclusive we enforce sequential locking <nl> return fExclusive = = = USE_EXCLUSIVE | | sExclusive = = = USE_EXCLUSIVE ; <nl> } ; <nl> <nl> mmm a / tests / js / client / shell / shell - transaction - intermediate - commit - cluster . js <nl> ppp b / tests / js / client / shell / shell - transaction - intermediate - commit - cluster . js <nl> function getMetric ( endpoint , name ) { <nl> let res = request . get ( { <nl> url : endpoint + ' / _admin / metrics ' , <nl> } ) ; <nl> - let re = new RegExp ( " ^ " + name + " \ \ { " ) ; <nl> + let re = new RegExp ( " ^ " + name ) ; <nl> let matches = res . body . split ( ' \ n ' ) . filter ( ( line ) = > ! line . match ( / ^ # / ) ) . filter ( ( line ) = > line . match ( re ) ) ; <nl> if ( ! matches . length ) { <nl> throw " Metric " + name + " not found " ; <nl> } <nl> - return Number ( matches [ 0 ] . replace ( / ^ . * ? \ } ( \ d + ) $ / , ' $ 1 ' ) ) ; <nl> + return Number ( matches [ 0 ] . replace ( / ^ . * ( \ d + ) $ / , ' $ 1 ' ) ) ; <nl> } <nl> <nl> function assertInSync ( leader , follower , shardId ) { <nl>
|
Minor syntactic fix in Prometheus metrics output ( )
|
arangodb/arangodb
|
51fcc3d9a2f1fb637a53d9c19a314e5024a2f61b
|
2020-12-30T09:28:39Z
|
mmm a / src / diagnostics / objects - printer . cc <nl> ppp b / src / diagnostics / objects - printer . cc <nl> void HeapObject : : HeapObjectPrint ( std : : ostream & os ) { / / NOLINT <nl> case JS_API_OBJECT_TYPE : <nl> case JS_SPECIAL_API_OBJECT_TYPE : <nl> case JS_CONTEXT_EXTENSION_OBJECT_TYPE : <nl> - case JS_ASYNC_FUNCTION_OBJECT_TYPE : <nl> - case JS_ASYNC_GENERATOR_OBJECT_TYPE : <nl> case JS_ARGUMENTS_TYPE : <nl> case JS_ERROR_TYPE : <nl> / / TODO ( titzer ) : debug printing for more wasm objects <nl> void HeapObject : : HeapObjectPrint ( std : : ostream & os ) { / / NOLINT <nl> case WASM_INSTANCE_TYPE : <nl> WasmInstanceObject : : cast ( * this ) . WasmInstanceObjectPrint ( os ) ; <nl> break ; <nl> + case JS_ASYNC_FUNCTION_OBJECT_TYPE : <nl> + case JS_ASYNC_GENERATOR_OBJECT_TYPE : <nl> case JS_GENERATOR_OBJECT_TYPE : <nl> JSGeneratorObject : : cast ( * this ) . JSGeneratorObjectPrint ( os ) ; <nl> break ; <nl>
|
[ objects ] Fix debug printing of JSAsyncFunctionObject and JSAsyncGeneratorObject .
|
v8/v8
|
62c34970b74b6e8c82166d4c0558f7a1c8737ef0
|
2019-07-15T10:33:02Z
|
mmm a / src / citra_qt / debugger / disassembler . cpp <nl> ppp b / src / citra_qt / debugger / disassembler . cpp <nl> <nl> # include " core / core . h " <nl> # include " common / break_points . h " <nl> # include " common / symbols . h " <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> # include " core / arm / disassembler / arm_disasm . h " <nl> <nl> DisassemblerModel : : DisassemblerModel ( QObject * parent ) : QAbstractItemModel ( parent ) , base_address ( 0 ) , code_size ( 0 ) , program_counter ( 0 ) , selection ( QModelIndex ( ) ) { <nl> mmm a / src / core / CMakeLists . txt <nl> ppp b / src / core / CMakeLists . txt <nl> set ( SRCS <nl> arm / interpreter / mmu / tlb . cpp <nl> arm / interpreter / mmu / wb . cpp <nl> arm / interpreter / mmu / xscale_copro . cpp <nl> - arm / interpreter / vfp / vfp . cpp <nl> - arm / interpreter / vfp / vfpdouble . cpp <nl> - arm / interpreter / vfp / vfpinstr . cpp <nl> - arm / interpreter / vfp / vfpsingle . cpp <nl> arm / interpreter / arm_interpreter . cpp <nl> arm / interpreter / armcopro . cpp <nl> arm / interpreter / armemu . cpp <nl> set ( SRCS <nl> arm / interpreter / armsupp . cpp <nl> arm / interpreter / armvirt . cpp <nl> arm / interpreter / thumbemu . cpp <nl> + arm / skyeye_common / vfp / vfp . cpp <nl> + arm / skyeye_common / vfp / vfpdouble . cpp <nl> + arm / skyeye_common / vfp / vfpinstr . cpp <nl> + arm / skyeye_common / vfp / vfpsingle . cpp <nl> file_sys / archive_romfs . cpp <nl> file_sys / archive_sdmc . cpp <nl> file_sys / file_romfs . cpp <nl> set ( SRCS <nl> set ( HEADERS <nl> arm / disassembler / arm_disasm . h <nl> arm / disassembler / load_symbol_map . h <nl> + arm / interpreter / arm_interpreter . h <nl> arm / interpreter / mmu / arm1176jzf_s_mmu . h <nl> arm / interpreter / mmu / cache . h <nl> arm / interpreter / mmu / rb . h <nl> arm / interpreter / mmu / sa_mmu . h <nl> arm / interpreter / mmu / tlb . h <nl> arm / interpreter / mmu / wb . h <nl> - arm / interpreter / vfp / asm_vfp . h <nl> - arm / interpreter / vfp / vfp . h <nl> - arm / interpreter / vfp / vfp_helper . h <nl> - arm / interpreter / arm_interpreter . h <nl> - arm / interpreter / arm_regformat . h <nl> - arm / interpreter / armcpu . h <nl> - arm / interpreter / armdefs . h <nl> - arm / interpreter / armemu . h <nl> - arm / interpreter / armmmu . h <nl> - arm / interpreter / armos . h <nl> - arm / interpreter / skyeye_defs . h <nl> + arm / skyeye_common / vfp / asm_vfp . h <nl> + arm / skyeye_common / vfp / vfp . h <nl> + arm / skyeye_common / vfp / vfp_helper . h <nl> + arm / skyeye_common / arm_regformat . h <nl> + arm / skyeye_common / armcpu . h <nl> + arm / skyeye_common / armdefs . h <nl> + arm / skyeye_common / armemu . h <nl> + arm / skyeye_common / armmmu . h <nl> + arm / skyeye_common / armos . h <nl> + arm / skyeye_common / skyeye_defs . h <nl> arm / arm_interface . h <nl> file_sys / archive . h <nl> file_sys / archive_romfs . h <nl> mmm a / src / core / arm / interpreter / arm_interpreter . cpp <nl> ppp b / src / core / arm / interpreter / arm_interpreter . cpp <nl> <nl> <nl> # include " core / arm / interpreter / arm_interpreter . h " <nl> <nl> - const static cpu_config_t s_arm11_cpu_info = { <nl> + const static cpu_config_t arm11_cpu_info = { <nl> " armv6 " , " arm11 " , 0x0007b000 , 0x0007f000 , NONCACHE <nl> } ; <nl> <nl> ARM_Interpreter : : ARM_Interpreter ( ) { <nl> ARMul_NewState ( state ) ; <nl> <nl> state - > abort_model = 0 ; <nl> - state - > cpu = ( cpu_config_t * ) & s_arm11_cpu_info ; <nl> + state - > cpu = ( cpu_config_t * ) & arm11_cpu_info ; <nl> state - > bigendSig = LOW ; <nl> <nl> ARMul_SelectProcessor ( state , ARM_v6_Prop | ARM_v5_Prop | ARM_v5e_Prop ) ; <nl> mmm a / src / core / arm / interpreter / arm_interpreter . h <nl> ppp b / src / core / arm / interpreter / arm_interpreter . h <nl> <nl> # include " common / common . h " <nl> <nl> # include " core / arm / arm_interface . h " <nl> - # include " core / arm / interpreter / armdefs . h " <nl> - # include " core / arm / interpreter / armemu . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> <nl> class ARM_Interpreter : virtual public ARM_Interface { <nl> public : <nl> mmm a / src / core / arm / interpreter / armcopro . cpp <nl> ppp b / src / core / arm / interpreter / armcopro . cpp <nl> <nl> Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA . * / <nl> <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> - # include " core / arm / interpreter / armos . h " <nl> - # include " core / arm / interpreter / armemu . h " <nl> - # include " core / arm / interpreter / vfp / vfp . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armos . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> + # include " core / arm / skyeye_common / vfp / vfp . h " <nl> <nl> / / chy 2005 - 07 - 08 <nl> / / # include " ansidecl . h " <nl> mmm a / src / core / arm / interpreter / armemu . cpp <nl> ppp b / src / core / arm / interpreter / armemu . cpp <nl> <nl> <nl> / / # include < util . h > / / DEBUG ( ) <nl> <nl> - # include " arm_regformat . h " <nl> - # include " armdefs . h " <nl> - # include " armemu . h " <nl> + # include " core / arm / skyeye_common / arm_regformat . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> # include " core / hle / hle . h " <nl> <nl> / / # include " svc . h " <nl> mmm a / src / core / arm / interpreter / arminit . cpp <nl> ppp b / src / core / arm / interpreter / arminit . cpp <nl> <nl> <nl> / / # include < unistd . h > <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> - # include " core / arm / interpreter / armemu . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * \ <nl> * Definitions for the emulator architecture * <nl> mmm a / src / core / arm / interpreter / armmmu . cpp <nl> ppp b / src / core / arm / interpreter / armmmu . cpp <nl> <nl> <nl> # include < assert . h > <nl> # include < string . h > <nl> - # include " armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> / * two header for arm disassemble * / <nl> / / # include " skyeye_arch . h " <nl> - # include " armcpu . h " <nl> + # include " core / arm / skyeye_common / armcpu . h " <nl> <nl> <nl> extern mmu_ops_t xscale_mmu_ops ; <nl> mmm a / src / core / arm / interpreter / armos . cpp <nl> ppp b / src / core / arm / interpreter / armos . cpp <nl> fun , and definign VAILDATE will define SWI 1 to enter SVC mode , and SWI <nl> # include < time . h > <nl> # include < errno . h > <nl> # include < string . h > <nl> - # include " skyeye_defs . h " <nl> + # include " core / arm / skyeye_common / skyeye_defs . h " <nl> # ifndef __USE_LARGEFILE64 <nl> # define __USE_LARGEFILE64 / * When use 64 bit large file need define it ! for stat64 * / <nl> # endif <nl> extern int _fisatty ( FILE * ) ; <nl> # endif <nl> # endif <nl> <nl> - # include " armdefs . h " <nl> - # include " armos . h " <nl> - # include " armemu . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armos . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> <nl> # ifndef NOOS <nl> # ifndef VALIDATE <nl> mmm a / src / core / arm / interpreter / armvirt . cpp <nl> ppp b / src / core / arm / interpreter / armvirt . cpp <nl> table . The routines PutWord and GetWord implement this . Pages are never <nl> freed as they might be needed again . A single area of memory may be <nl> defined to generate aborts . * / <nl> <nl> - # include " armdefs . h " <nl> - # include " skyeye_defs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / skyeye_defs . h " <nl> / / # include " code_cov . h " <nl> <nl> # ifdef VALIDATE / * for running the validate suite * / <nl> mmm a / src / core / arm / interpreter / mmu / arm1176jzf_s_mmu . cpp <nl> ppp b / src / core / arm / interpreter / mmu / arm1176jzf_s_mmu . cpp <nl> <nl> <nl> # include " core / mem_map . h " <nl> <nl> - # include " core / arm / interpreter / skyeye_defs . h " <nl> + # include " core / arm / skyeye_common / skyeye_defs . h " <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> / / # include " bank_defs . h " <nl> # if 0 <nl> # define TLB_SIZE 1024 * 1024 <nl> mmm a / src / core / arm / interpreter / mmu / cache . cpp <nl> ppp b / src / core / arm / interpreter / mmu / cache . cpp <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> / * mmu cache init <nl> * <nl> mmm a / src / core / arm / interpreter / mmu / maverick . cpp <nl> ppp b / src / core / arm / interpreter / mmu / maverick . cpp <nl> <nl> <nl> # include < assert . h > <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> - # include " core / arm / interpreter / armemu . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> <nl> <nl> / * # define CIRRUS_DEBUG 1 * / <nl> mmm a / src / core / arm / interpreter / mmu / rb . cpp <nl> ppp b / src / core / arm / interpreter / mmu / rb . cpp <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> / * chy 2004 - 06 - 06 , fix bug found by wenye @ cs . ucsb . edu * / <nl> ARMword rb_masks [ ] = { <nl> mmm a / src / core / arm / interpreter / mmu / sa_mmu . cpp <nl> ppp b / src / core / arm / interpreter / mmu / sa_mmu . cpp <nl> <nl> # include < assert . h > <nl> # include < string . h > <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> / * * <nl> * The interface of read data from bus <nl> mmm a / src / core / arm / interpreter / mmu / tlb . cpp <nl> ppp b / src / core / arm / interpreter / mmu / tlb . cpp <nl> <nl> # include < assert . h > <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> ARMword tlb_masks [ ] = { <nl> 0x00000000 , / * TLB_INVALID * / <nl> mmm a / src / core / arm / interpreter / mmu / wb . cpp <nl> ppp b / src / core / arm / interpreter / mmu / wb . cpp <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> / * wb_init <nl> * @ wb_t : wb_t to init <nl> mmm a / src / core / arm / interpreter / mmu / xscale_copro . cpp <nl> ppp b / src / core / arm / interpreter / mmu / xscale_copro . cpp <nl> <nl> # include < assert . h > <nl> # include < string . h > <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> - # include " core / arm / interpreter / armemu . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> <nl> / * # include " pxa . h " * / <nl> <nl> mmm a / src / core / arm / interpreter / thumbemu . cpp <nl> ppp b / src / core / arm / interpreter / thumbemu . cpp <nl> <nl> instruction into its corresponding ARM instruction , and using the <nl> existing ARM simulator . * / <nl> <nl> - # include " skyeye_defs . h " <nl> + # include " core / arm / skyeye_common / skyeye_defs . h " <nl> <nl> # ifndef MODET / * required for the Thumb instruction support * / <nl> # if 1 <nl> existing ARM simulator . * / <nl> # endif <nl> # endif <nl> <nl> - # include " armdefs . h " <nl> - # include " armemu . h " <nl> - # include " armos . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / armemu . h " <nl> + # include " core / arm / skyeye_common / armos . h " <nl> <nl> <nl> / * Decode a 16bit Thumb instruction . The instruction is in the low <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / arm_regformat . h <nl> rename to src / core / arm / skyeye_common / arm_regformat . h <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / armcpu . h <nl> rename to src / core / arm / skyeye_common / armcpu . h <nl> similarity index 99 % <nl> rename from src / core / arm / interpreter / armdefs . h <nl> rename to src / core / arm / skyeye_common / armdefs . h <nl> mmm a / src / core / arm / interpreter / armdefs . h <nl> ppp b / src / core / arm / skyeye_common / armdefs . h <nl> <nl> <nl> # include " arm_regformat . h " <nl> # include " common / platform . h " <nl> - # include " skyeye_defs . h " <nl> + # include " core / arm / skyeye_common / skyeye_defs . h " <nl> <nl> / / AJ2Dmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> typedef unsigned long long uint64_t ; <nl> # endif <nl> * / <nl> <nl> - # include " armmmu . h " <nl> + # include " core / arm / skyeye_common / armmmu . h " <nl> / / # include " lcd / skyeye_lcd . h " <nl> <nl> <nl> similarity index 99 % <nl> rename from src / core / arm / interpreter / armemu . h <nl> rename to src / core / arm / skyeye_common / armemu . h <nl> mmm a / src / core / arm / interpreter / armemu . h <nl> ppp b / src / core / arm / skyeye_common / armemu . h <nl> <nl> # define __ARMEMU_H__ <nl> <nl> <nl> - # include " armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> / / # include " skyeye . h " <nl> <nl> / / extern ARMword isize ; <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / armmmu . h <nl> rename to src / core / arm / skyeye_common / armmmu . h <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / armos . h <nl> rename to src / core / arm / skyeye_common / armos . h <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / skyeye_defs . h <nl> rename to src / core / arm / skyeye_common / skyeye_defs . h <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / vfp / asm_vfp . h <nl> rename to src / core / arm / skyeye_common / vfp / asm_vfp . h <nl> similarity index 91 % <nl> rename from src / core / arm / interpreter / vfp / vfp . cpp <nl> rename to src / core / arm / skyeye_common / vfp / vfp . cpp <nl> mmm a / src / core / arm / interpreter / vfp / vfp . cpp <nl> ppp b / src / core / arm / skyeye_common / vfp / vfp . cpp <nl> <nl> <nl> # include " common / common . h " <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> - # include " core / arm / interpreter / vfp / vfp . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> + # include " core / arm / skyeye_common / vfp / vfp . h " <nl> <nl> / / ARMul_State * persistent_state ; / * function calls from SoftFloat lib don ' t have an access to ARMul_state . * / <nl> <nl> VFPMRC ( ARMul_State * state , unsigned type , ARMword instr , ARMword * value ) <nl> if ( CoProc = = 10 | | CoProc = = 11 ) <nl> { <nl> # define VFP_MRC_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MRC_TRANS <nl> } <nl> DEBUG_LOG ( ARM11 , " Can ' t identify % x , CoProc % x , OPC_1 % x , Rt % x , CRn % x , CRm % x , OPC_2 % x \ n " , <nl> VFPMCR ( ARMul_State * state , unsigned type , ARMword instr , ARMword value ) <nl> if ( CoProc = = 10 | | CoProc = = 11 ) <nl> { <nl> # define VFP_MCR_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MCR_TRANS <nl> } <nl> DEBUG_LOG ( ARM11 , " Can ' t identify % x , CoProc % x , OPC_1 % x , Rt % x , CRn % x , CRm % x , OPC_2 % x \ n " , <nl> VFPMRRC ( ARMul_State * state , unsigned type , ARMword instr , ARMword * value1 , AR <nl> if ( CoProc = = 10 | | CoProc = = 11 ) <nl> { <nl> # define VFP_MRRC_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MRRC_TRANS <nl> } <nl> DEBUG_LOG ( ARM11 , " Can ' t identify % x , CoProc % x , OPC_1 % x , Rt % x , Rt2 % x , CRm % x \ n " , <nl> VFPMCRR ( ARMul_State * state , unsigned type , ARMword instr , ARMword value1 , ARMw <nl> if ( CoProc = = 11 | | CoProc = = 10 ) <nl> { <nl> # define VFP_MCRR_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MCRR_TRANS <nl> } <nl> DEBUG_LOG ( ARM11 , " Can ' t identify % x , CoProc % x , OPC_1 % x , Rt % x , Rt2 % x , CRm % x \ n " , <nl> VFPSTC ( ARMul_State * state , unsigned type , ARMword instr , ARMword * value ) <nl> # endif <nl> <nl> # define VFP_STC_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_STC_TRANS <nl> } <nl> DEBUG_LOG ( ARM11 , " Can ' t identify % x , CoProc % x , CRd % x , Rn % x , imm8 % x , P % x , U % x , D % x , W % x \ n " , <nl> VFPLDC ( ARMul_State * state , unsigned type , ARMword instr , ARMword value ) <nl> if ( CoProc = = 10 | | CoProc = = 11 ) <nl> { <nl> # define VFP_LDC_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_LDC_TRANS <nl> } <nl> DEBUG_LOG ( ARM11 , " Can ' t identify % x , CoProc % x , CRd % x , Rn % x , imm8 % x , P % x , U % x , D % x , W % x \ n " , <nl> VFPCDP ( ARMul_State * state , unsigned type , ARMword instr ) <nl> if ( CoProc = = 10 | | CoProc = = 11 ) <nl> { <nl> # define VFP_CDP_TRANS <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_CDP_TRANS <nl> <nl> int exceptions = 0 ; <nl> VFPCDP ( ARMul_State * state , unsigned type , ARMword instr ) <nl> <nl> / * mmmmmmmmm - - MRC mmmmmmmmmmmm * / <nl> # define VFP_MRC_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MRC_IMPL <nl> <nl> # define VFP_MRRC_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MRRC_IMPL <nl> <nl> <nl> / * mmmmmmmmm - - MCR mmmmmmmmmmmm * / <nl> # define VFP_MCR_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MCR_IMPL <nl> <nl> # define VFP_MCRR_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_MCRR_IMPL <nl> <nl> / * Memory operation are not inlined , as old Interpreter and Fast interpreter <nl> VFPCDP ( ARMul_State * state , unsigned type , ARMword instr ) <nl> <nl> / * mmmmmmmmm - - STC mmmmmmmmmmmm * / <nl> # define VFP_STC_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_STC_IMPL <nl> <nl> <nl> / * mmmmmmmmm - - LDC mmmmmmmmmmmm * / <nl> # define VFP_LDC_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_LDC_IMPL <nl> <nl> <nl> / * mmmmmmmmm - - CDP mmmmmmmmmmmm * / <nl> # define VFP_CDP_IMPL <nl> - # include " core / arm / interpreter / vfp / vfpinstr . cpp " <nl> + # include " core / arm / skyeye_common / vfp / vfpinstr . cpp " <nl> # undef VFP_CDP_IMPL <nl> <nl> / * Miscellaneous functions * / <nl> similarity index 97 % <nl> rename from src / core / arm / interpreter / vfp / vfp . h <nl> rename to src / core / arm / skyeye_common / vfp / vfp . h <nl> mmm a / src / core / arm / interpreter / vfp / vfp . h <nl> ppp b / src / core / arm / skyeye_common / vfp / vfp . h <nl> <nl> <nl> # define vfpdebug / / printf <nl> <nl> - # include " core / arm / interpreter / vfp / vfp_helper . h " / * for references to cdp SoftFloat functions * / <nl> + # include " core / arm / skyeye_common / vfp / vfp_helper . h " / * for references to cdp SoftFloat functions * / <nl> <nl> unsigned VFPInit ( ARMul_State * state ) ; <nl> unsigned VFPMRC ( ARMul_State * state , unsigned type , ARMword instr , ARMword * value ) ; <nl> similarity index 99 % <nl> rename from src / core / arm / interpreter / vfp / vfp_helper . h <nl> rename to src / core / arm / skyeye_common / vfp / vfp_helper . h <nl> mmm a / src / core / arm / interpreter / vfp / vfp_helper . h <nl> ppp b / src / core / arm / skyeye_common / vfp / vfp_helper . h <nl> <nl> # include < stdint . h > <nl> # include < stdio . h > <nl> <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> # define u16 uint16_t <nl> # define u32 uint32_t <nl> similarity index 99 % <nl> rename from src / core / arm / interpreter / vfp / vfpdouble . cpp <nl> rename to src / core / arm / skyeye_common / vfp / vfpdouble . cpp <nl> mmm a / src / core / arm / interpreter / vfp / vfpdouble . cpp <nl> ppp b / src / core / arm / skyeye_common / vfp / vfpdouble . cpp <nl> <nl> * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> * / <nl> <nl> - # include " core / arm / interpreter / vfp / vfp . h " <nl> - # include " core / arm / interpreter / vfp / vfp_helper . h " <nl> - # include " core / arm / interpreter / vfp / asm_vfp . h " <nl> + # include " core / arm / skyeye_common / vfp / vfp . h " <nl> + # include " core / arm / skyeye_common / vfp / vfp_helper . h " <nl> + # include " core / arm / skyeye_common / vfp / asm_vfp . h " <nl> <nl> static struct vfp_double vfp_double_default_qnan = { <nl> / / . exponent = 2047 , <nl> similarity index 100 % <nl> rename from src / core / arm / interpreter / vfp / vfpinstr . cpp <nl> rename to src / core / arm / skyeye_common / vfp / vfpinstr . cpp <nl> similarity index 99 % <nl> rename from src / core / arm / interpreter / vfp / vfpsingle . cpp <nl> rename to src / core / arm / skyeye_common / vfp / vfpsingle . cpp <nl> mmm a / src / core / arm / interpreter / vfp / vfpsingle . cpp <nl> ppp b / src / core / arm / skyeye_common / vfp / vfpsingle . cpp <nl> <nl> * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> * / <nl> <nl> - # include " core / arm / interpreter / vfp / vfp_helper . h " <nl> - # include " core / arm / interpreter / vfp / asm_vfp . h " <nl> - # include " core / arm / interpreter / vfp / vfp . h " <nl> + # include " core / arm / skyeye_common / vfp / vfp_helper . h " <nl> + # include " core / arm / skyeye_common / vfp / asm_vfp . h " <nl> + # include " core / arm / skyeye_common / vfp / vfp . h " <nl> <nl> static struct vfp_single vfp_single_default_qnan = { <nl> / / . exponent = 255 , <nl> mmm a / src / core / core . h <nl> ppp b / src / core / core . h <nl> <nl> # pragma once <nl> <nl> # include " core / arm / arm_interface . h " <nl> - # include " core / arm / interpreter / armdefs . h " <nl> + # include " core / arm / skyeye_common / armdefs . h " <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl>
|
ARM : Reorganized file structure to move shared SkyEye code to a more common area .
|
yuzu-emu/yuzu
|
b5e65245948647b94dfd60c1288f030a76c69a83
|
2014-10-25T18:11:39Z
|
mmm a / README . rst <nl> ppp b / README . rst <nl> Format string syntax and the documentation are based on Python ' s ` str . format <nl> Thanks ` Doug Turnbull < https : / / github . com / softwaredoug > ` __ for his valuable <nl> comments and contribution to the design of the type - safe API and <nl> ` Gregory Czajkowski < https : / / github . com / gcflymoto > ` __ for implementing binary <nl> - formatting . <nl> + formatting . Thanks ` Ruslan Baratov < https : / / github . com / ruslo > ` __ for comprehensive <nl> + ` comparison of integer formatting algorithms < https : / / github . com / ruslo / int - dec - format - tests > ` __ <nl> + and useful comments regarding performance . <nl>
|
Acknowledge Ruslan Baratov ' s contribution .
|
fmtlib/fmt
|
2f423d8b4619c01df390d3d278173710687e9200
|
2014-02-19T22:55:22Z
|
new file mode 100644 <nl> index 000000000000 . . 6685cb68d58f <nl> mmm / dev / null <nl> ppp b / test / api - digester / Outputs / empty - baseline . json <nl> <nl> + { <nl> + " kind " : " Root " , <nl> + " name " : " TopLevel " , <nl> + " printedName " : " TopLevel " , <nl> + " json_format_version " : 1 <nl> + } <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . 62cde9696fed <nl> mmm / dev / null <nl> ppp b / test / api - digester / dump - empty - baseline . swift <nl> <nl> + / / REQUIRES : OS = macosx <nl> + / / RUN : % empty - directory ( % t . mod ) <nl> + / / RUN : % empty - directory ( % t . sdk ) <nl> + / / RUN : % empty - directory ( % t . module - cache ) <nl> + / / RUN : % api - digester - generate - empty - baseline - o % t . result - avoid - tool - args <nl> + / / RUN : diff - u % S / Outputs / empty - baseline . json % t . result <nl> + / / RUN : % api - digester - deserialize - sdk - input - paths % S / Outputs / empty - baseline . json - o % t . result <nl> + / / RUN : diff - u % S / Outputs / empty - baseline . json % t . result <nl> mmm a / tools / swift - api - digester / ModuleAnalyzerNodes . cpp <nl> ppp b / tools / swift - api - digester / ModuleAnalyzerNodes . cpp <nl> void SwiftDeclCollector : : serialize ( StringRef Filename ) { <nl> SwiftDeclCollector : : serialize ( Filename , RootNode ) ; <nl> } <nl> <nl> + SDKNodeRoot * <nl> + swift : : ide : : api : : getEmptySDKNodeRoot ( SDKContext & SDKCtx ) { <nl> + SwiftDeclCollector Collector ( SDKCtx ) ; <nl> + return Collector . getSDKRoot ( ) ; <nl> + } <nl> + <nl> SDKNodeRoot * <nl> swift : : ide : : api : : getSDKNodeRoot ( SDKContext & SDKCtx , <nl> const CompilerInvocation & InitInvok , <nl> - const llvm : : StringSet < > & ModuleNames , <nl> - CheckerOptions Opts ) { <nl> + const llvm : : StringSet < > & ModuleNames ) { <nl> + CheckerOptions Opts = SDKCtx . getOpts ( ) ; <nl> CompilerInvocation Invocation ( InitInvok ) ; <nl> <nl> CompilerInstance & CI = SDKCtx . newCompilerInstance ( ) ; <nl> / / Display diagnostics to stderr . <nl> - PrintingDiagnosticConsumer PrintDiags ; <nl> + PrintingDiagnosticConsumer PrintDiags ( llvm : : errs ( ) ) ; <nl> + if ( llvm : : errs ( ) . has_colors ( ) ) <nl> + PrintDiags . forceColors ( ) ; <nl> CI . addDiagnosticConsumer ( & PrintDiags ) ; <nl> if ( CI . setup ( Invocation ) ) { <nl> llvm : : errs ( ) < < " Failed to setup the compiler instance \ n " ; <nl> swift : : ide : : api : : getSDKNodeRoot ( SDKContext & SDKCtx , <nl> if ( Opts . Verbose ) <nl> llvm : : errs ( ) < < " Loading module : " < < Name < < " . . . \ n " ; <nl> auto * M = Ctx . getModuleByName ( Name ) ; <nl> - if ( ! M | | M - > failedToLoad ( ) ) { <nl> + if ( ! M | | M - > failedToLoad ( ) | | Ctx . Diags . hadAnyError ( ) ) { <nl> llvm : : errs ( ) < < " Failed to load module : " < < Name < < ' \ n ' ; <nl> if ( Opts . AbortOnModuleLoadFailure ) <nl> return nullptr ; <nl> swift : : ide : : api : : getSDKNodeRoot ( SDKContext & SDKCtx , <nl> return Collector . getSDKRoot ( ) ; <nl> } <nl> <nl> + void swift : : ide : : api : : dumpSDKRoot ( SDKNodeRoot * Root , StringRef OutputFile ) { <nl> + assert ( Root ) ; <nl> + auto Opts = Root - > getSDKContext ( ) . getOpts ( ) ; <nl> + if ( Opts . Verbose ) <nl> + llvm : : errs ( ) < < " Dumping SDK . . . \ n " ; <nl> + SwiftDeclCollector : : serialize ( OutputFile , Root ) ; <nl> + if ( Opts . Verbose ) <nl> + llvm : : errs ( ) < < " Dumped to " < < OutputFile < < " \ n " ; <nl> + } <nl> + <nl> int swift : : ide : : api : : dumpSDKContent ( const CompilerInvocation & InitInvok , <nl> const llvm : : StringSet < > & ModuleNames , <nl> StringRef OutputFile , CheckerOptions Opts ) { <nl> SDKContext SDKCtx ( Opts ) ; <nl> - SDKNode * Root = getSDKNodeRoot ( SDKCtx , InitInvok , ModuleNames , Opts ) ; <nl> + SDKNodeRoot * Root = getSDKNodeRoot ( SDKCtx , InitInvok , ModuleNames ) ; <nl> if ( ! Root ) <nl> return 1 ; <nl> - if ( Opts . Verbose ) <nl> - llvm : : errs ( ) < < " Dumping SDK . . . \ n " ; <nl> - SwiftDeclCollector : : serialize ( OutputFile , Root ) ; <nl> - if ( Opts . Verbose ) <nl> - llvm : : errs ( ) < < " Dumped to " < < OutputFile < < " \ n " ; <nl> + dumpSDKRoot ( Root , OutputFile ) ; <nl> return 0 ; <nl> } <nl> <nl> mmm a / tools / swift - api - digester / ModuleAnalyzerNodes . h <nl> ppp b / tools / swift - api - digester / ModuleAnalyzerNodes . h <nl> int dumpSwiftModules ( const CompilerInvocation & InitInvok , <nl> <nl> SDKNodeRoot * getSDKNodeRoot ( SDKContext & SDKCtx , <nl> const CompilerInvocation & InitInvok , <nl> - const llvm : : StringSet < > & ModuleNames , <nl> - CheckerOptions Opts ) ; <nl> + const llvm : : StringSet < > & ModuleNames ) ; <nl> + <nl> + SDKNodeRoot * getEmptySDKNodeRoot ( SDKContext & SDKCtx ) ; <nl> + <nl> + void dumpSDKRoot ( SDKNodeRoot * Root , StringRef OutputFile ) ; <nl> <nl> int dumpSDKContent ( const CompilerInvocation & InitInvok , <nl> const llvm : : StringSet < > & ModuleNames , <nl> mmm a / tools / swift - api - digester / swift - api - digester . cpp <nl> ppp b / tools / swift - api - digester / swift - api - digester . cpp <nl> namespace { <nl> DeserializeSDK , <nl> GenerateNameCorrectionTemplate , <nl> FindUsr , <nl> + GenerateEmptyBaseline , <nl> } ; <nl> } / / end anonymous namespace <nl> <nl> Action ( llvm : : cl : : desc ( " Mode : " ) , llvm : : cl : : init ( ActionType : : None ) , <nl> " Find USR for decls by given condition " ) , <nl> clEnumValN ( ActionType : : GenerateNameCorrectionTemplate , <nl> " generate - name - correction " , <nl> - " Generate name correction template " ) ) ) ; <nl> + " Generate name correction template " ) , <nl> + clEnumValN ( ActionType : : GenerateEmptyBaseline , <nl> + " generate - empty - baseline " , <nl> + " Generate an empty baseline " ) ) ) ; <nl> <nl> static llvm : : cl : : list < std : : string > <nl> SDKJsonPaths ( " input - paths " , <nl> static CheckerOptions getCheckOpts ( int argc , char * argv [ ] ) { <nl> return Opts ; <nl> } <nl> <nl> - static SDKNodeRoot * getSDKRoot ( const char * Main , SDKContext & Ctx , <nl> - CheckerOptions Opts , bool IsBaseline ) { <nl> + static SDKNodeRoot * getSDKRoot ( const char * Main , SDKContext & Ctx , bool IsBaseline ) { <nl> CompilerInvocation Invok ; <nl> llvm : : StringSet < > Modules ; <nl> if ( prepareForDump ( Main , Invok , Modules , IsBaseline ) ) <nl> return nullptr ; <nl> - return getSDKNodeRoot ( Ctx , Invok , Modules , Opts ) ; <nl> + return getSDKNodeRoot ( Ctx , Invok , Modules ) ; <nl> } <nl> <nl> static bool hasBaselineInput ( ) { <nl> int main ( int argc , char * argv [ ] ) { <nl> std : : move ( protocolWhitelist ) ) ; <nl> else { <nl> SDKContext Ctx ( Opts ) ; <nl> - return diagnoseModuleChange ( Ctx , getSDKRoot ( argv [ 0 ] , Ctx , Opts , true ) , <nl> - getSDKRoot ( argv [ 0 ] , Ctx , Opts , false ) , <nl> + return diagnoseModuleChange ( Ctx , getSDKRoot ( argv [ 0 ] , Ctx , true ) , <nl> + getSDKRoot ( argv [ 0 ] , Ctx , false ) , <nl> options : : OutputFile , <nl> std : : move ( protocolWhitelist ) ) ; <nl> } <nl> int main ( int argc , char * argv [ ] ) { <nl> Store . addStorePath ( Paths [ I ] ) ; <nl> return deserializeNameCorrection ( Store , options : : OutputFile ) ; <nl> } <nl> + case ActionType : : GenerateEmptyBaseline : { <nl> + SDKContext Ctx ( Opts ) ; <nl> + dumpSDKRoot ( getEmptySDKNodeRoot ( Ctx ) , options : : OutputFile ) ; <nl> + return 0 ; <nl> + } <nl> case ActionType : : FindUsr : { <nl> if ( options : : SDKJsonPaths . size ( ) ! = 1 ) { <nl> llvm : : cl : : PrintHelpMessage ( ) ; <nl> mmm a / utils / api_checker / swift - api - checker . py <nl> ppp b / utils / api_checker / swift - api - checker . py <nl> def escapeCmdArg ( arg ) : <nl> def check_call ( cmd , cwd = None , env = os . environ , verbose = False , output = None ) : <nl> if verbose : <nl> print ( ' ' . join ( [ escapeCmdArg ( arg ) for arg in cmd ] ) ) <nl> - return subprocess . check_call ( cmd , cwd = cwd , env = env , <nl> - stderr = None , stdout = output ) <nl> + try : <nl> + subprocess . check_call ( cmd , cwd = cwd , env = env , <nl> + stderr = None , stdout = output ) <nl> + return 0 <nl> + except Exception as error : <nl> + printerr ( error ) <nl> + return 1 <nl> <nl> <nl> def check_output ( cmd , verbose = False ) : <nl>
|
Merge pull request from nkcsgexi / abort - compiler - error
|
apple/swift
|
1857a37e3bf3c630b574a18f384d7f74a382ccd9
|
2019-08-22T02:01:55Z
|
mmm a / atom / browser / api / lib / exports / electron . coffee <nl> ppp b / atom / browser / api / lib / exports / electron . coffee <nl> <nl> # Import common modules . <nl> module . exports = require ' . . / . . / . . / . . / common / api / lib / exports / electron ' <nl> <nl> + v8Util = process . atomBinding ' v8_util ' <nl> + v8Util . setHiddenValue module . exports , ' electronModule ' , true <nl> + <nl> Object . defineProperties module . exports , <nl> # Browser side modules , please sort with alphabet order . <nl> app : <nl> mmm a / atom / browser / lib / rpc - server . coffee <nl> ppp b / atom / browser / lib / rpc - server . coffee <nl> <nl> - { ipcMain } = require ' electron ' <nl> path = require ' path ' <nl> + <nl> + electron = require ' electron ' <nl> + { ipcMain } = electron <nl> objectsRegistry = require ' . / objects - registry ' <nl> <nl> v8Util = process . atomBinding ' v8_util ' <nl> valueToMeta = ( sender , value , optimizeSimpleObject = false ) - > <nl> meta . type = ' array ' if Array . isArray value <nl> meta . type = ' error ' if value instanceof Error <nl> meta . type = ' date ' if value instanceof Date <nl> - meta . type = ' promise ' if value ? and value . constructor . name is ' Promise ' <nl> + meta . type = ' promise ' if value ? . constructor . name is ' Promise ' <nl> + <nl> + # require ( ' electron ' ) . <nl> + if meta . type is ' object ' and v8Util . getHiddenValue value , ' electronModule ' <nl> + meta . type = ' electronModule ' <nl> <nl> # Treat simple objects as value . <nl> if optimizeSimpleObject and meta . type is ' object ' and v8Util . getHiddenValue value , ' simple ' <nl> valueToMeta = ( sender , value , optimizeSimpleObject = false ) - > <nl> meta . members = plainObjectToMeta value <nl> else if meta . type is ' date ' <nl> meta . value = value . getTime ( ) <nl> + else if meta . type is ' electronModule ' <nl> + meta . members = ( name for name of value ) <nl> else <nl> meta . type = ' value ' <nl> meta . value = value <nl> ipcMain . on ' ATOM_BROWSER_REQUIRE ' , ( event , module ) - > <nl> catch e <nl> event . returnValue = exceptionToMeta e <nl> <nl> + ipcMain . on ' ATOM_BROWSER_GET_BUILTIN ' , ( event , module ) - > <nl> + try <nl> + event . returnValue = valueToMeta event . sender , electron [ module ] <nl> + catch e <nl> + event . returnValue = exceptionToMeta e <nl> + <nl> ipcMain . on ' ATOM_BROWSER_GLOBAL ' , ( event , name ) - > <nl> try <nl> event . returnValue = valueToMeta event . sender , global [ name ] <nl> mmm a / atom / common / api / lib / exports / electron . coffee <nl> ppp b / atom / common / api / lib / exports / electron . coffee <nl> Object . defineProperties exports , <nl> # Must be enumerable , otherwise it woulde be invisible to remote module . <nl> enumerable : true <nl> get : - > require ' . . / clipboard ' <nl> - crashRepoter : <nl> + crashReporter : <nl> enumerable : true <nl> get : - > require ' . . / crash - reporter ' <nl> nativeImage : <nl> mmm a / atom / renderer / api / lib / remote . coffee <nl> ppp b / atom / renderer / api / lib / remote . coffee <nl> wrapArgs = ( args , visited = [ ] ) - > <nl> type : ' array ' , value : wrapArgs ( value , visited ) <nl> else if Buffer . isBuffer value <nl> type : ' buffer ' , value : Array : : slice . call ( value , 0 ) <nl> - else if value ? and value . constructor . name is ' Promise ' <nl> + else if value ? . constructor . name is ' Promise ' <nl> type : ' promise ' , then : valueToMeta ( value . then . bind ( value ) ) <nl> else if value ? and typeof value is ' object ' and v8Util . getHiddenValue value , ' atomId ' <nl> type : ' remote - object ' , id : v8Util . getHiddenValue value , ' atomId ' <nl> metaToValue = ( meta ) - > <nl> when ' date ' then new Date ( meta . value ) <nl> when ' exception ' <nl> throw new Error ( " # { meta . message } \ n # { meta . stack } " ) <nl> + when ' electronModule ' <nl> + # require ( ' electron ' ) . <nl> + ret = { } <nl> + for member in meta . members <nl> + do ( member ) - > <nl> + Object . defineProperty ret , member , <nl> + enumerable : true <nl> + get : - > exports . getBuiltin member <nl> + ret <nl> else <nl> if meta . type is ' function ' <nl> # A shadow class to represent the remote function object . <nl> exports . require = ( module ) - > <nl> meta = ipcRenderer . sendSync ' ATOM_BROWSER_REQUIRE ' , module <nl> moduleCache [ module ] = metaToValue meta <nl> <nl> + # Alias to remote . require ( ' electron ' ) . xxx . <nl> + builtinCache = { } <nl> + exports . getBuiltin = ( module ) - > <nl> + return builtinCache [ module ] if builtinCache [ module ] ? <nl> + <nl> + meta = ipcRenderer . sendSync ' ATOM_BROWSER_GET_BUILTIN ' , module <nl> + builtinCache [ module ] = metaToValue meta <nl> + <nl> # Get current BrowserWindow object . <nl> windowCache = null <nl> exports . getCurrentWindow = - > <nl>
|
Optimize remote . require ( ' electron ' )
|
electron/electron
|
8b2942c2795d2371aea0dacfddd2b58a66752c58
|
2015-11-12T12:30:40Z
|
mmm a / src / google / protobuf / stubs / atomicops_internals_generic_gcc . h <nl> ppp b / src / google / protobuf / stubs / atomicops_internals_generic_gcc . h <nl> inline Atomic64 NoBarrier_CompareAndSwap ( volatile Atomic64 * ptr , <nl> return old_value ; <nl> } <nl> <nl> + inline Atomic64 NoBarrier_AtomicIncrement ( volatile Atomic64 * ptr , <nl> + Atomic64 increment ) { <nl> + return __atomic_add_fetch ( ptr , increment , __ATOMIC_RELAXED ) ; <nl> + } <nl> + <nl> + inline void NoBarrier_Store ( volatile Atomic64 * ptr , Atomic64 value ) { <nl> + __atomic_store_n ( ptr , value , __ATOMIC_RELAXED ) ; <nl> + } <nl> + <nl> + inline Atomic64 NoBarrier_AtomicExchange ( volatile Atomic64 * ptr , <nl> + Atomic64 new_value ) { <nl> + return __atomic_exchange_n ( ptr , new_value , __ATOMIC_RELAXED ) ; <nl> + } <nl> + <nl> + inline Atomic64 NoBarrier_Load ( volatile const Atomic64 * ptr ) { <nl> + return __atomic_load_n ( ptr , __ATOMIC_RELAXED ) ; <nl> + } <nl> + <nl> # endif / / defined ( __LP64__ ) <nl> <nl> } / / namespace internal <nl>
|
Adding missing generic gcc 64 - bit atomicops .
|
protocolbuffers/protobuf
|
fd1c289886c341af4426873a7214067c245e4bbe
|
2015-12-14T21:08:05Z
|
mmm a / extensions / CCArmature / external_tool / Json / lib_json / json_reader . cpp <nl> ppp b / extensions / CCArmature / external_tool / Json / lib_json / json_reader . cpp <nl> <nl> # include < cstring > <nl> # include < iostream > <nl> # include < stdexcept > <nl> + # include < string . h > <nl> + # include < stdio . h > <nl> <nl> # if _MSC_VER > = 1400 / / VC + + 8 . 0 <nl> # pragma warning ( disable : 4996 ) / / disable warning about strdup being deprecated . <nl> mmm a / extensions / CCArmature / external_tool / Json / lib_json / json_value . cpp <nl> ppp b / extensions / CCArmature / external_tool / Json / lib_json / json_value . cpp <nl> <nl> # endif <nl> # include < cstddef > / / size_t <nl> <nl> + # include < string . h > <nl> + <nl> # define JSON_ASSERT_UNREACHABLE assert ( false ) <nl> # define JSON_ASSERT ( condition ) assert ( condition ) ; / / @ todo < = change this into an exception throw <nl> # define JSON_FAIL_MESSAGE ( message ) throw std : : runtime_error ( message ) ; <nl>
|
[ BB ] Fixing compilation errors for CocoStudio runtime .
|
cocos2d/cocos2d-x
|
13dd26c85baf75abb723ddf10e85c93d9fc334c8
|
2013-06-09T07:29:51Z
|
mmm a / src / bindings / csharp / openalpr - net / openalpr - net . cpp <nl> ppp b / src / bindings / csharp / openalpr - net / openalpr - net . cpp <nl> using namespace msclr : : interop ; <nl> using namespace System : : Collections : : Generic ; <nl> using namespace System : : Runtime : : InteropServices ; <nl> using namespace System : : Drawing ; <nl> + using namespace System : : Drawing : : Imaging ; <nl> + using namespace System : : IO ; <nl> using namespace alpr ; <nl> <nl> namespace openalprnet { <nl> namespace openalprnet { <nl> private ref class AlprHelper sealed <nl> { <nl> public : <nl> + <nl> static std : : vector < char > ToVector ( array < char > ^ src ) <nl> { <nl> std : : vector < char > result ( src - > Length ) ; <nl> namespace openalprnet { <nl> return result ; <nl> } <nl> <nl> + static cv : : Mat BitmapToMat ( Bitmap ^ bitmap ) <nl> + { <nl> + int channels = 0 ; <nl> + <nl> + switch ( bitmap - > PixelFormat ) <nl> + { <nl> + case PixelFormat : : Format8bppIndexed : <nl> + case PixelFormat : : Format1bppIndexed : <nl> + channels = 1 ; <nl> + break ; <nl> + case PixelFormat : : Format24bppRgb : <nl> + channels = 3 ; <nl> + break ; <nl> + case PixelFormat : : Format32bppRgb : <nl> + case PixelFormat : : Format32bppArgb : <nl> + case PixelFormat : : Format32bppPArgb : <nl> + channels = 4 ; <nl> + break ; <nl> + default : <nl> + throw gcnew NotImplementedException ( ) ; <nl> + } <nl> + <nl> + BitmapData ^ bitmapData = bitmap - > LockBits ( <nl> + System : : Drawing : : Rectangle ( 0 , 0 , bitmap - > Width , bitmap - > Height ) , <nl> + ImageLockMode : : ReadOnly , <nl> + bitmap - > PixelFormat <nl> + ) ; <nl> + <nl> + cv : : Mat dstMat ( cv : : Size ( bitmap - > Width , bitmap - > Height ) , CV_8UC ( channels ) , reinterpret_cast < char * > ( bitmapData - > Scan0 . ToPointer ( ) ) ) ; <nl> + <nl> + bitmap - > UnlockBits ( bitmapData ) ; <nl> + <nl> + return dstMat ; <nl> + } <nl> + <nl> + static Bitmap ^ MatToBitmap ( cv : : Mat mat ) <nl> + { <nl> + const int width = mat . size ( ) . width ; <nl> + const int height = mat . size ( ) . height ; <nl> + const int channels = mat . channels ( ) ; <nl> + const int totalSize = mat . total ( ) ; <nl> + void * data = reinterpret_cast < void * > ( mat . data ) ; <nl> + Bitmap ^ bitmap ; <nl> + <nl> + if ( channels = = 1 ) <nl> + { <nl> + bitmap = gcnew Bitmap ( width , height , PixelFormat : : Format8bppIndexed ) ; <nl> + <nl> + ColorPalette ^ palette = bitmap - > Palette ; <nl> + for ( int i = 0 ; i < 256 ; i + + ) <nl> + { <nl> + palette - > Entries [ i ] = Color : : FromArgb ( i , i , i ) ; <nl> + } <nl> + <nl> + bitmap - > Palette = palette ; <nl> + } <nl> + else <nl> + { <nl> + bitmap = gcnew Bitmap ( width , height , PixelFormat : : Format24bppRgb ) ; <nl> + } <nl> + <nl> + System : : Drawing : : Imaging : : BitmapData ^ bitmapData = bitmap - > LockBits ( <nl> + System : : Drawing : : Rectangle ( 0 , 0 , bitmap - > Width , bitmap - > Height ) , <nl> + System : : Drawing : : Imaging : : ImageLockMode : : ReadWrite , <nl> + bitmap - > PixelFormat <nl> + ) ; <nl> + <nl> + : : memcpy ( bitmapData - > Scan0 . ToPointer ( ) , data , totalSize ) ; <nl> + <nl> + bitmap - > UnlockBits ( bitmapData ) ; <nl> + <nl> + return bitmap ; <nl> + } <nl> + <nl> + static MemoryStream ^ BitmapToMemoryStream ( Bitmap ^ bitmap , ImageFormat ^ imageFormat ) <nl> + { <nl> + MemoryStream ^ ms = gcnew System : : IO : : MemoryStream ( ) ; <nl> + bitmap - > Save ( ms , imageFormat ) ; <nl> + return ms ; <nl> + } <nl> + <nl> + static std : : vector < char > MemoryStreamToVector ( MemoryStream ^ ms ) <nl> + { <nl> + unsigned char * byteArray = ToCharPtr ( ms - > ToArray ( ) ) ; <nl> + std : : vector < char > result ( byteArray , byteArray + ms - > Length ) ; <nl> + return result ; <nl> + } <nl> + <nl> static std : : vector < AlprRegionOfInterest > ToVector ( List < System : : Drawing : : Rectangle > ^ src ) <nl> { <nl> std : : vector < AlprRegionOfInterest > result ; <nl> namespace openalprnet { <nl> } <nl> return std : : string ( ) ; <nl> } <nl> + <nl> + static System : : Drawing : : Rectangle ToRectangle ( cv : : Rect rect ) <nl> + { <nl> + return System : : Drawing : : Rectangle ( rect . x , rect . y , rect . width , rect . height ) ; <nl> + } <nl> + <nl> } ; <nl> <nl> public enum class AlprDetectorTypeNet : int { <nl>
|
Add marshalling support for Bitmap , Rectangle and MemoryStream .
|
openalpr/openalpr
|
0490bf9127ec7f3586481cfb11bada36acf77855
|
2015-07-08T11:06:46Z
|
mmm a / xbmc / video / dialogs / GUIDialogVideoInfo . cpp <nl> ppp b / xbmc / video / dialogs / GUIDialogVideoInfo . cpp <nl> void CGUIDialogVideoInfo : : OnInitWindow ( ) <nl> m_hasUpdatedThumb = false ; <nl> m_bViewReview = true ; <nl> <nl> - CVideoDatabase database ; <nl> - ADDON : : ScraperPtr scraper ; <nl> - <nl> - if ( database . Open ( ) ) <nl> - { <nl> - scraper = database . GetScraperForPath ( m_movieItem - > GetVideoInfoTag ( ) - > GetPath ( ) ) ; <nl> - database . Close ( ) ; <nl> - } <nl> - <nl> - CONTROL_ENABLE_ON_CONDITION ( CONTROL_BTN_REFRESH , ( CProfilesManager : : Get ( ) . GetCurrentProfile ( ) . canWriteDatabases ( ) | | g_passwordManager . bMasterUser ) & & ! StringUtils : : StartsWithNoCase ( m_movieItem - > GetVideoInfoTag ( ) - > m_strIMDBNumber , " xx " ) & & scraper ) ; <nl> + CONTROL_ENABLE_ON_CONDITION ( CONTROL_BTN_REFRESH , ( CProfilesManager : : Get ( ) . GetCurrentProfile ( ) . canWriteDatabases ( ) | | g_passwordManager . bMasterUser ) & & ! StringUtils : : StartsWithNoCase ( m_movieItem - > GetVideoInfoTag ( ) - > m_strIMDBNumber , " xx " ) ) ; <nl> CONTROL_ENABLE_ON_CONDITION ( CONTROL_BTN_GET_THUMB , ( CProfilesManager : : Get ( ) . GetCurrentProfile ( ) . canWriteDatabases ( ) | | g_passwordManager . bMasterUser ) & & ! StringUtils : : StartsWithNoCase ( m_movieItem - > GetVideoInfoTag ( ) - > m_strIMDBNumber . c_str ( ) + 2 , " plugin " ) ) ; <nl> <nl> VIDEODB_CONTENT_TYPE type = ( VIDEODB_CONTENT_TYPE ) m_movieItem - > GetVideoContentType ( ) ; <nl> mmm a / xbmc / video / windows / GUIWindowVideoNav . cpp <nl> ppp b / xbmc / video / windows / GUIWindowVideoNav . cpp <nl> void CGUIWindowVideoNav : : PlayItem ( int iItem ) <nl> <nl> void CGUIWindowVideoNav : : OnInfo ( CFileItem * pItem , ADDON : : ScraperPtr & scraper ) <nl> { <nl> - m_database . Open ( ) ; / / since we can be called from the music library without being inited <nl> - if ( pItem - > IsVideoDb ( ) ) <nl> - scraper = m_database . GetScraperForPath ( pItem - > GetVideoInfoTag ( ) - > m_strPath ) ; <nl> - else <nl> + if ( ! scraper | | scraper - > Content ( ) = = CONTENT_NONE ) <nl> { <nl> - std : : string strPath , strFile ; <nl> - URIUtils : : Split ( pItem - > GetPath ( ) , strPath , strFile ) ; <nl> - scraper = m_database . GetScraperForPath ( strPath ) ; <nl> + m_database . Open ( ) ; / / since we can be called from the music library without being inited <nl> + if ( pItem - > IsVideoDb ( ) ) <nl> + scraper = m_database . GetScraperForPath ( pItem - > GetVideoInfoTag ( ) - > m_strPath ) ; <nl> + else <nl> + { <nl> + std : : string strPath , strFile ; <nl> + URIUtils : : Split ( pItem - > GetPath ( ) , strPath , strFile ) ; <nl> + scraper = m_database . GetScraperForPath ( strPath ) ; <nl> + } <nl> + m_database . Close ( ) ; <nl> } <nl> - m_database . Close ( ) ; <nl> CGUIWindowVideoBase : : OnInfo ( pItem , scraper ) ; <nl> } <nl> <nl>
|
[ videos ] check whether or not we already know about the scraper and re - use it
|
xbmc/xbmc
|
2a4c6a5723d8177e83b832a6911c8248272c8f67
|
2015-02-23T13:34:22Z
|
mmm a / db / btree . h <nl> ppp b / db / btree . h <nl> namespace mongo { <nl> class BtreeCursor : public Cursor { <nl> protected : <nl> BtreeCursor ( NamespaceDetails * _d , int _idxNo , const IndexDetails & , const BSONObj & startKey , const BSONObj & endKey , bool endKeyInclusive , int direction ) ; <nl> - BtreeCursor ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction ) ; <nl> + BtreeCursor ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec = false ) ; <nl> public : <nl> virtual ~ BtreeCursor ( ) ; <nl> / * * makes an appropriate subclass depending on the index version * / <nl> + static BtreeCursor * make ( NamespaceDetails * _d , const IndexDetails & , const BSONObj & startKey , const BSONObj & endKey , bool endKeyInclusive , int direction ) ; <nl> + static BtreeCursor * make ( NamespaceDetails * _d , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec = false ) ; <nl> static BtreeCursor * make ( NamespaceDetails * _d , int _idxNo , const IndexDetails & , const BSONObj & startKey , const BSONObj & endKey , bool endKeyInclusive , int direction ) ; <nl> - static BtreeCursor * make ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction ) ; <nl> + static BtreeCursor * make ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec = false ) ; <nl> <nl> virtual bool ok ( ) { return ! bucket . isNull ( ) ; } <nl> virtual bool advance ( ) ; <nl> mmm a / db / btreecursor . cpp <nl> ppp b / db / btreecursor . cpp <nl> namespace mongo { <nl> <nl> BtreeCursorImpl ( NamespaceDetails * a , int b , const IndexDetails & c , const BSONObj & d , const BSONObj & e , bool f , int g ) : <nl> BtreeCursor ( a , b , c , d , e , f , g ) { } <nl> - BtreeCursorImpl ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction ) : <nl> - BtreeCursor ( _d , _idxNo , _id , _bounds , _direction ) <nl> + BtreeCursorImpl ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec = false ) : <nl> + BtreeCursor ( _d , _idxNo , _id , _bounds , _direction , useFRVSpec ) <nl> { <nl> pair < DiskLoc , int > noBestParent ; <nl> indexDetails . head . btree < V > ( ) - > customLocate ( bucket , keyOfs , startKey , 0 , false , _boundsIterator - > cmp ( ) , _boundsIterator - > inc ( ) , _ordering , _direction , noBestParent ) ; <nl> namespace mongo { <nl> } <nl> } ; * / <nl> <nl> + BtreeCursor * BtreeCursor : : make ( <nl> + NamespaceDetails * _d , const IndexDetails & _id , <nl> + const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec ) <nl> + { <nl> + return make ( _d , _d - > idxNo ( ( IndexDetails & ) _id ) , _id , _bounds , _direction , useFRVSpec ) ; <nl> + } <nl> + <nl> + BtreeCursor * BtreeCursor : : make ( <nl> + NamespaceDetails * _d , const IndexDetails & _id , <nl> + const BSONObj & startKey , const BSONObj & endKey , bool endKeyInclusive , int direction ) <nl> + { <nl> + return make ( _d , _d - > idxNo ( ( IndexDetails & ) _id ) , _id , startKey , endKey , endKeyInclusive , direction ) ; <nl> + } <nl> + <nl> + <nl> BtreeCursor * BtreeCursor : : make ( <nl> NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , <nl> const BSONObj & startKey , const BSONObj & endKey , bool endKeyInclusive , int direction ) <nl> namespace mongo { <nl> <nl> BtreeCursor * BtreeCursor : : make ( <nl> NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , <nl> - const shared_ptr < FieldRangeVector > & _bounds , int _direction ) <nl> + const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec ) <nl> { <nl> int v = _id . version ( ) ; <nl> if ( v = = 1 ) <nl> - return new BtreeCursorImpl < V1 > ( _d , _idxNo , _id , _bounds , _direction ) ; <nl> + return new BtreeCursorImpl < V1 > ( _d , _idxNo , _id , _bounds , _direction , useFRVSpec ) ; <nl> if ( v = = 0 ) <nl> - return new BtreeCursorImpl < V0 > ( _d , _idxNo , _id , _bounds , _direction ) ; <nl> + return new BtreeCursorImpl < V0 > ( _d , _idxNo , _id , _bounds , _direction , useFRVSpec ) ; <nl> uasserted ( 14801 , str : : stream ( ) < < " unsupported index version " < < v ) ; <nl> <nl> / / just check we are in sync with this method <nl> namespace mongo { <nl> audit ( ) ; <nl> } <nl> <nl> - BtreeCursor : : BtreeCursor ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction ) <nl> + BtreeCursor : : BtreeCursor ( NamespaceDetails * _d , int _idxNo , const IndexDetails & _id , const shared_ptr < FieldRangeVector > & _bounds , int _direction , bool useFRVSpec ) <nl> : <nl> d ( _d ) , idxNo ( _idxNo ) , <nl> _endKeyInclusive ( true ) , <nl> namespace mongo { <nl> _direction ( _direction ) , <nl> _bounds ( ( assert ( _bounds . get ( ) ) , _bounds ) ) , <nl> _boundsIterator ( new FieldRangeVectorIterator ( * _bounds ) ) , <nl> - _spec ( _id . getSpec ( ) ) , <nl> + _spec ( useFRVSpec ? _bounds - > getSpec ( ) : _id . getSpec ( ) ) , <nl> _independentFieldRanges ( true ) , <nl> _nscanned ( 0 ) { <nl> massert ( 13384 , " BtreeCursor FieldRangeVector constructor doesn ' t accept special indexes " , ! _spec . getType ( ) ) ; <nl> mmm a / db / queryutil . h <nl> ppp b / db / queryutil . h <nl> namespace mongo { <nl> / * * @ return a client readable representation of ' this ' * / <nl> BSONObj obj ( ) const ; <nl> <nl> + const IndexSpec & getSpec ( ) { return _indexSpec ; } <nl> + <nl> / * * <nl> * @ return true iff the provided document matches valid ranges on all <nl> * of this FieldRangeVector ' s fields , which is the case iff this document <nl>
|
Allow cursors to use the spec provided by FRV - prep for SERVER - 3688
|
mongodb/mongo
|
8c2a396be396e56da602aa4d49199db586a6ebcb
|
2011-10-04T15:27:42Z
|
mmm a / tools / imglab / src / flip_dataset . cpp <nl> ppp b / tools / imglab / src / flip_dataset . cpp <nl> <nl> # include < string > <nl> # include " common . h " <nl> # include < dlib / image_transforms . h > <nl> + # include < dlib / optimization . h > <nl> + # include < dlib / image_processing . h > <nl> <nl> using namespace dlib ; <nl> using namespace std ; <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> <nl> + std : : vector < long > align_points ( <nl> + const std : : vector < dpoint > & from , <nl> + const std : : vector < dpoint > & to , <nl> + double min_angle = - 90 * pi / 180 . 0 , <nl> + double max_angle = - 90 * pi / 180 . 0 , <nl> + long num_angles = 180 <nl> + ) <nl> + / * ! <nl> + ensures <nl> + - Figures out how to align the points in from with the points in to . Returns an <nl> + assignment array A that indicates that from [ i ] matches with to [ A [ i ] ] . <nl> + <nl> + We use the Hungarian algorithm with a search over reasonable angles . This method <nl> + works because we just need to account for a translation and a mild rotation and <nl> + nothing else . If there is any other more complex mapping then you probably don ' t <nl> + have landmarks that make sense to flip . <nl> + ! * / <nl> + { <nl> + DLIB_CASSERT ( from . size ( ) = = to . size ( ) ) ; <nl> + <nl> + std : : vector < long > best_assignment ; <nl> + double best_assignment_cost = std : : numeric_limits < double > : : infinity ( ) ; <nl> + <nl> + matrix < double > dists ( from . size ( ) , to . size ( ) ) ; <nl> + matrix < long long > idists ; <nl> + <nl> + for ( auto angle : linspace ( min_angle , max_angle , num_angles ) ) <nl> + { <nl> + for ( long r = 0 ; r < dists . nr ( ) ; + + r ) <nl> + { <nl> + for ( long c = 0 ; c < dists . nc ( ) ; + + c ) <nl> + { <nl> + dists ( r , c ) = length_squared ( from [ r ] - to [ c ] ) ; <nl> + } <nl> + } <nl> + <nl> + idists = matrix_cast < long long > ( - round ( std : : numeric_limits < long long > : : max ( ) * ( dists / max ( dists ) ) ) ) ; <nl> + <nl> + auto assignment = max_cost_assignment ( idists ) ; <nl> + auto cost = assignment_cost ( dists , assignment ) ; <nl> + if ( cost < best_assignment_cost ) <nl> + { <nl> + best_assignment_cost = cost ; <nl> + best_assignment = std : : move ( assignment ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + / / Now compute the alignment error in terms of average distance moved by each part . We <nl> + / / do this so we can give the user a warning if it ' s impossible to make a good <nl> + / / alignment . <nl> + running_stats < double > rs ; <nl> + std : : vector < dpoint > tmp ( to . size ( ) ) ; <nl> + for ( size_t i = 0 ; i < to . size ( ) ; + + i ) <nl> + tmp [ best_assignment [ i ] ] = to [ i ] ; <nl> + auto tform = find_similarity_transform ( from , tmp ) ; <nl> + for ( size_t i = 0 ; i < from . size ( ) ; + + i ) <nl> + rs . add ( length ( tform ( from [ i ] ) - tmp [ i ] ) ) ; <nl> + if ( rs . mean ( ) > 0 . 05 ) <nl> + { <nl> + cout < < " WARNING , your dataset has object part annotations and you asked imglab to " < < endl ; <nl> + cout < < " flip the data . Imglab tried to adjust the part labels so that the average " < < endl ; <nl> + cout < < " part layout in the flipped dataset is the same as the source dataset . " < < endl ; <nl> + cout < < " However , the part annotation scheme doesn ' t seem to be left - right symmetric . " < < endl ; <nl> + cout < < " You should manually review the output to make sure the part annotations are " < < endl ; <nl> + cout < < " labeled as you expect . " < < endl ; <nl> + } <nl> + <nl> + <nl> + return best_assignment ; <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + std : : map < string , dpoint > normalized_parts ( <nl> + const image_dataset_metadata : : box & b <nl> + ) <nl> + { <nl> + auto tform = dlib : : impl : : normalizing_tform ( b . rect ) ; <nl> + std : : map < string , dpoint > temp ; <nl> + for ( auto & p : b . parts ) <nl> + temp [ p . first ] = tform ( p . second ) ; <nl> + return temp ; <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + std : : map < string , dpoint > average_parts ( <nl> + const image_dataset_metadata : : dataset & data <nl> + ) <nl> + / * ! <nl> + ensures <nl> + - returns the average part layout over all objects in data . This is done by <nl> + centering the parts inside their rects and then averaging all the objects . <nl> + ! * / <nl> + { <nl> + std : : map < string , dpoint > psum ; <nl> + std : : map < string , double > pcnt ; <nl> + for ( auto & image : data . images ) <nl> + { <nl> + for ( auto & box : image . boxes ) <nl> + { <nl> + for ( auto & p : normalized_parts ( box ) ) <nl> + { <nl> + psum [ p . first ] + = p . second ; <nl> + pcnt [ p . first ] + = 1 ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / make into an average <nl> + for ( auto & p : psum ) <nl> + p . second / = pcnt [ p . first ] ; <nl> + <nl> + return psum ; <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + void make_part_labeling_match_target_dataset ( <nl> + const image_dataset_metadata : : dataset & target , <nl> + image_dataset_metadata : : dataset & data <nl> + ) <nl> + / * ! <nl> + This function tries to adjust the part labels in data so that the average part layout <nl> + in data is the same as target , according to the string labels . Therefore , it doesn ' t <nl> + adjust part positions , instead it changes the string labels on the parts to achieve <nl> + this . This really only makes sense when you flipped a dataset that contains left - right <nl> + symmetric objects and you want to remap the part labels of the flipped data so that <nl> + they match the unflipped data ' s annotation scheme . <nl> + ! * / <nl> + { <nl> + auto target_parts = average_parts ( target ) ; <nl> + auto data_parts = average_parts ( data ) ; <nl> + <nl> + / / Convert to a form align_points ( ) understands . We also need to keep track of the <nl> + / / labels for later . <nl> + std : : vector < dpoint > from , to ; <nl> + std : : vector < string > from_labels , to_labels ; <nl> + for ( auto & p : target_parts ) <nl> + { <nl> + from_labels . emplace_back ( p . first ) ; <nl> + from . emplace_back ( p . second ) ; <nl> + } <nl> + for ( auto & p : data_parts ) <nl> + { <nl> + to_labels . emplace_back ( p . first ) ; <nl> + to . emplace_back ( p . second ) ; <nl> + } <nl> + <nl> + auto assignment = align_points ( from , to ) ; <nl> + / / so now we know that from_labels [ i ] should replace to_labels [ assignment [ i ] ] <nl> + std : : map < string , string > label_mapping ; <nl> + for ( size_t i = 0 ; i < assignment . size ( ) ; + + i ) <nl> + label_mapping [ to_labels [ assignment [ i ] ] ] = from_labels [ i ] ; <nl> + <nl> + / / now apply the label mapping to the dataset <nl> + for ( auto & image : data . images ) <nl> + { <nl> + for ( auto & box : image . boxes ) <nl> + { <nl> + std : : map < string , point > temp ; <nl> + for ( auto & p : box . parts ) <nl> + temp [ label_mapping [ p . first ] ] = p . second ; <nl> + box . parts = std : : move ( temp ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> void flip_dataset ( const command_line_parser & parser ) <nl> { <nl> - image_dataset_metadata : : dataset metadata ; <nl> - const string datasource = parser . option ( " flip " ) . argument ( ) ; <nl> + image_dataset_metadata : : dataset metadata , orig_metadata ; <nl> + string datasource ; <nl> + if ( parser . option ( " flip " ) ) <nl> + datasource = parser . option ( " flip " ) . argument ( ) ; <nl> + else <nl> + datasource = parser . option ( " flip - basic " ) . argument ( ) ; <nl> load_image_dataset_metadata ( metadata , datasource ) ; <nl> + orig_metadata = metadata ; <nl> <nl> / / Set the current directory to be the one that contains the <nl> / / metadata file . We do this because the file might contain <nl> void flip_dataset ( const command_line_parser & parser ) <nl> metadata . images [ i ] . boxes [ j ] . rect = impl : : flip_rect_left_right ( metadata . images [ i ] . boxes [ j ] . rect , get_rect ( img ) ) ; <nl> <nl> / / flip all the object parts <nl> - std : : map < std : : string , point > : : iterator k ; <nl> - for ( k = metadata . images [ i ] . boxes [ j ] . parts . begin ( ) ; k ! = metadata . images [ i ] . boxes [ j ] . parts . end ( ) ; + + k ) <nl> + for ( auto & part : metadata . images [ i ] . boxes [ j ] . parts ) <nl> { <nl> - k - > second = impl : : flip_rect_left_right ( rectangle ( k - > second , k - > second ) , get_rect ( img ) ) . tl_corner ( ) ; <nl> + part . second = impl : : flip_rect_left_right ( rectangle ( part . second , part . second ) , get_rect ( img ) ) . tl_corner ( ) ; <nl> } <nl> } <nl> <nl> metadata . images [ i ] . filename = filename ; <nl> } <nl> <nl> + if ( ! parser . option ( " flip - basic " ) ) <nl> + make_part_labeling_match_target_dataset ( orig_metadata , metadata ) ; <nl> + <nl> save_image_dataset_metadata ( metadata , metadata_filename ) ; <nl> } <nl> <nl> mmm a / tools / imglab / src / main . cpp <nl> ppp b / tools / imglab / src / main . cpp <nl> <nl> # include < dlib / dir_nav . h > <nl> <nl> <nl> - const char * VERSION = " 1 . 12 " ; <nl> + const char * VERSION = " 1 . 13 " ; <nl> <nl> <nl> <nl> int main ( int argc , char * * argv ) <nl> " image tags from < arg1 > . The results are saved into merged . xml and neither < arg1 > or " <nl> " < arg2 > files are modified . " , 2 ) ; <nl> parser . add_option ( " flip " , " Read an XML image dataset from the < arg > XML file and output a left - right flipped " <nl> - " version of the dataset and an accompanying flipped XML file named flipped_ < arg > . " , 1 ) ; <nl> + " version of the dataset and an accompanying flipped XML file named flipped_ < arg > . " <nl> + " We also adjust object part labels after flipping so that the new flipped dataset " <nl> + " has the same average part layout as the source dataset . " , 1 ) ; <nl> + parser . add_option ( " flip - basic " , " This option is just like - - flip , except we don ' t adjust any object part labels after flipping . " <nl> + " The parts are instead simply mirrored to the flipped dataset . " , 1 ) ; <nl> parser . add_option ( " rotate " , " Read an XML image dataset and output a copy that is rotated counter clockwise by < arg > degrees . " <nl> " The output is saved to an XML file prefixed with rotated_ < arg > . " , 1 ) ; <nl> parser . add_option ( " cluster " , " Cluster all the objects in an XML file into < arg > different clusters and save " <nl> int main ( int argc , char * * argv ) <nl> parser . parse ( argc , argv ) ; <nl> <nl> const char * singles [ ] = { " h " , " c " , " r " , " l " , " files " , " convert " , " parts " , " rmdiff " , " rmtrunc " , " rmdupes " , " seed " , " shuffle " , " split " , " add " , <nl> - " flip " , " rotate " , " tile " , " size " , " cluster " , " resample " , " min - object - size " , " rmempty " , <nl> + " flip - basic " , " flip " , " rotate " , " tile " , " size " , " cluster " , " resample " , " min - object - size " , " rmempty " , <nl> " crop - size " , " cropped - object - size " , " rmlabel " , " rm - other - labels " , " rm - if - overlaps " , " sort - num - objects " , <nl> " one - object - per - image " , " jpg " , " rmignore " , " sort " } ; <nl> parser . check_one_time_options ( singles ) ; <nl> int main ( int argc , char * * argv ) <nl> parser . check_incompatible_options ( " c " , " rmtrunc " ) ; <nl> parser . check_incompatible_options ( " c " , " add " ) ; <nl> parser . check_incompatible_options ( " c " , " flip " ) ; <nl> + parser . check_incompatible_options ( " c " , " flip - basic " ) ; <nl> + parser . check_incompatible_options ( " flip " , " flip - basic " ) ; <nl> parser . check_incompatible_options ( " c " , " rotate " ) ; <nl> parser . check_incompatible_options ( " c " , " rename " ) ; <nl> parser . check_incompatible_options ( " c " , " ignore " ) ; <nl> int main ( int argc , char * * argv ) <nl> parser . check_incompatible_options ( " l " , " add " ) ; <nl> parser . check_incompatible_options ( " l " , " parts " ) ; <nl> parser . check_incompatible_options ( " l " , " flip " ) ; <nl> + parser . check_incompatible_options ( " l " , " flip - basic " ) ; <nl> parser . check_incompatible_options ( " l " , " rotate " ) ; <nl> parser . check_incompatible_options ( " files " , " rename " ) ; <nl> parser . check_incompatible_options ( " files " , " ignore " ) ; <nl> parser . check_incompatible_options ( " files " , " add " ) ; <nl> parser . check_incompatible_options ( " files " , " parts " ) ; <nl> parser . check_incompatible_options ( " files " , " flip " ) ; <nl> + parser . check_incompatible_options ( " files " , " flip - basic " ) ; <nl> parser . check_incompatible_options ( " files " , " rotate " ) ; <nl> parser . check_incompatible_options ( " add " , " flip " ) ; <nl> + parser . check_incompatible_options ( " add " , " flip - basic " ) ; <nl> parser . check_incompatible_options ( " add " , " rotate " ) ; <nl> parser . check_incompatible_options ( " add " , " tile " ) ; <nl> parser . check_incompatible_options ( " flip " , " tile " ) ; <nl> + parser . check_incompatible_options ( " flip - basic " , " tile " ) ; <nl> parser . check_incompatible_options ( " rotate " , " tile " ) ; <nl> parser . check_incompatible_options ( " cluster " , " tile " ) ; <nl> parser . check_incompatible_options ( " resample " , " tile " ) ; <nl> parser . check_incompatible_options ( " flip " , " cluster " ) ; <nl> + parser . check_incompatible_options ( " flip - basic " , " cluster " ) ; <nl> parser . check_incompatible_options ( " rotate " , " cluster " ) ; <nl> parser . check_incompatible_options ( " add " , " cluster " ) ; <nl> parser . check_incompatible_options ( " flip " , " resample " ) ; <nl> + parser . check_incompatible_options ( " flip - basic " , " resample " ) ; <nl> parser . check_incompatible_options ( " rotate " , " resample " ) ; <nl> parser . check_incompatible_options ( " add " , " resample " ) ; <nl> parser . check_incompatible_options ( " shuffle " , " tile " ) ; <nl> int main ( int argc , char * * argv ) <nl> return EXIT_SUCCESS ; <nl> } <nl> <nl> - if ( parser . option ( " flip " ) ) <nl> + if ( parser . option ( " flip " ) | | parser . option ( " flip - basic " ) ) <nl> { <nl> flip_dataset ( parser ) ; <nl> return EXIT_SUCCESS ; <nl>
|
Changed the behavior of imglab ' s - - flip option . It will now attempt to adjust
|
davisking/dlib
|
c68bb4e785ef919424aafbaf6103484a3a3bb540
|
2018-01-14T14:16:50Z
|
mmm a / test / cctest / cctest . status <nl> ppp b / test / cctest / cctest . status <nl> <nl> # BUG ( 3742 ) . <nl> ' test - mark - compact / MarkCompactCollector ' : [ PASS , [ ' arch = = arm ' , NO_VARIANTS ] ] , <nl> <nl> - # TODO ( mstarzinger ) : The rewriter is not being called when top - level code is <nl> - # optimized and hence scripts don ' t " return " the correct value . Fix this . <nl> - ' test - compiler / CompileFunctionInContext * ' : [ PASS , NO_VARIANTS ] , <nl> - <nl> # TODO ( bmeurer ) : TurboFan embeds strong references to all kinds of objects <nl> # via deoptimization data ( Crankshaft also does this , but lack proper test <nl> # coverage ) . <nl> mmm a / test / mjsunit / mjsunit . status <nl> ppp b / test / mjsunit / mjsunit . status <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # TurboFan compiler failures . <nl> <nl> - # TODO ( verwaest ) : Some tests are over - restrictive about object layout . <nl> - ' compare - known - objects - slow ' : [ PASS , NO_VARIANTS ] , <nl> - ' smi - representation ' : [ PASS , NO_VARIANTS ] , <nl> - <nl> # Some tests are just too slow to run for now . <nl> ' big - object - literal ' : [ PASS , NO_VARIANTS ] , <nl> ' json2 ' : [ PASS , NO_VARIANTS ] , <nl> <nl> ' bitops - info ' : [ PASS , NO_VARIANTS ] , # fails on ARM hardware . <nl> ' md5 ' : [ PASS , NO_VARIANTS ] , # fails on ARM hardware . <nl> ' debug - break - inline ' : [ PASS , NO_VARIANTS ] , # very flaky . <nl> - ' debug - compile - event - newfunction ' : [ PASS , NO_VARIANTS ] , <nl> - ' debug - conditional - breakpoints ' : [ PASS , NO_VARIANTS ] , <nl> ' debug - evaluate - locals - optimized ' : [ PASS , NO_VARIANTS ] , <nl> ' debug - evaluate - locals - optimized - double ' : [ PASS , NO_VARIANTS ] , <nl> ' debug - evaluate - recursive ' : [ PASS , NO_VARIANTS ] , # only in no - snap debug . <nl> <nl> ' debug - evaluate - closure ' : [ PASS , NO_VARIANTS ] , <nl> ' debug - evaluate - with ' : [ PASS , NO_VARIANTS ] , <nl> <nl> - # TODO ( mstarzinger ) : Optimizing top - level code flushed out some correctness <nl> - # issues on ARM and ARM64 . <nl> - ' es6 / math - log2 - log10 ' : [ PASS , NO_VARIANTS ] , # on ARM and ARM64 . <nl> - ' mirror - script ' : [ PASS , NO_VARIANTS ] , # on ARM64 only . <nl> - <nl> # TODO ( jarin / mstarzinger ) : Investigate debugger issues with TurboFan . <nl> ' debug - evaluate - locals ' : [ PASS , NO_VARIANTS ] , <nl> ' debug - evaluate - locals - capturing ' : [ PASS , NO_VARIANTS ] , <nl> <nl> <nl> # Assumptions about optimization need investigation in TurboFan . <nl> ' regress - sync - optimized - lists ' : [ PASS , NO_VARIANTS ] , <nl> - ' regress / regress - store - uncacheable ' : [ PASS , NO_VARIANTS ] , <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # Too slow in debug mode with - - stress - opt mode . <nl> <nl> # nosse2 . Also for arm novfp3 . <nl> ' regress / regress - 2989 ' : [ FAIL , NO_VARIANTS , [ ' system = = linux and arch = = x87 or arch = = arm and simulator = = True ' , PASS ] ] , <nl> <nl> - # Skip endain dependent test for mips due to different typed views of the same <nl> - # array buffer . <nl> - ' nans ' : [ PASS , ] , <nl> - <nl> # This test variant makes only sense on arm . <nl> ' math - floor - of - div - nosudiv ' : [ PASS , SLOW , [ ' arch not in [ arm , arm64 , android_arm , android_arm64 ] ' , SKIP ] ] , <nl> <nl>
|
[ turbofan ] Re - enable tests that no longer fail .
|
v8/v8
|
67efc381b01abefb69562c988d1bc71c2a783b90
|
2016-07-07T13:40:00Z
|
mmm a / cocos / scripting / auto - generated <nl> ppp b / cocos / scripting / auto - generated <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit bab1d8a7c5d8b29f14c8dc19158a8bb994f0e970 <nl> + Subproject commit 74cb897b64f7325cf969341e9bc2d87fc7fb1bb7 <nl>
|
[ AUTO ] : updating submodule reference to latest autogenerated bindings
|
cocos2d/cocos2d-x
|
beb7015d7e0ff62f72273265f320c77d29f87594
|
2014-01-16T02:43:13Z
|
mmm a / modules / core / include / opencv2 / core / mat . inl . hpp <nl> ppp b / modules / core / include / opencv2 / core / mat . inl . hpp <nl> Mat : : Mat ( Mat & & m ) <nl> inline <nl> Mat & Mat : : operator = ( Mat & & m ) <nl> { <nl> + if ( this = = & m ) <nl> + return * this ; <nl> + <nl> release ( ) ; <nl> flags = m . flags ; dims = m . dims ; rows = m . rows ; cols = m . cols ; data = m . data ; <nl> datastart = m . datastart ; dataend = m . dataend ; datalimit = m . datalimit ; allocator = m . allocator ; <nl> UMat : : UMat ( UMat & & m ) <nl> inline <nl> UMat & UMat : : operator = ( UMat & & m ) <nl> { <nl> + if ( this = = & m ) <nl> + return * this ; <nl> release ( ) ; <nl> flags = m . flags ; dims = m . dims ; rows = m . rows ; cols = m . cols ; <nl> allocator = m . allocator ; usageFlags = m . usageFlags ; <nl> mmm a / modules / core / include / opencv2 / core / ptr . inl . hpp <nl> ppp b / modules / core / include / opencv2 / core / ptr . inl . hpp <nl> Ptr < T > : : Ptr ( Ptr & & o ) : owner ( o . owner ) , stored ( o . stored ) <nl> template < typename T > <nl> Ptr < T > & Ptr < T > : : operator = ( Ptr < T > & & o ) <nl> { <nl> + if ( this = = & o ) <nl> + return * this ; <nl> + <nl> release ( ) ; <nl> owner = o . owner ; <nl> stored = o . stored ; <nl>
|
fixed incorrect behaviour of move semantics for cv : : Ptr , cv : : Mat , cv : : UMat in case when rvalue - reference references to * this .
|
opencv/opencv
|
a90a576d76faff2ab8b14186737edd7fc12f076f
|
2016-03-14T13:10:23Z
|
mmm a / code / dynamic_programming / src / longest_common_subsequence / longest_common_subsequence . php <nl> ppp b / code / dynamic_programming / src / longest_common_subsequence / longest_common_subsequence . php <nl> <nl> < ? php <nl> - function longestCommonSubsequence ( $ x , $ y ) <nl> - { <nl> - $ x_len = strlen ( $ x ) ; <nl> - $ y_len = strlen ( $ y ) ; <nl> + function longestCommonSubsequence ( $ x , $ y ) <nl> + { <nl> + $ x_len = strlen ( $ x ) ; <nl> + $ y_len = strlen ( $ y ) ; <nl> <nl> - for ( $ i = 0 ; $ i < = $ x_len ; $ i + + ) <nl> + for ( $ i = 0 ; $ i < = $ x_len ; $ i + + ) <nl> + { <nl> + for ( $ j = 0 ; $ j < = $ y_len ; $ j + + ) <nl> { <nl> - for ( $ j = 0 ; $ j < = $ y_len ; $ j + + ) <nl> - { <nl> - if ( $ i = = 0 | | $ j = = 0 ) <nl> - $ dp [ $ i ] [ $ j ] = 0 ; <nl> - else if ( $ x [ $ i - 1 ] = = $ y [ $ j - 1 ] ) <nl> - $ dp [ $ i ] [ $ j ] = $ dp [ $ i - 1 ] [ $ j - 1 ] + 1 ; <nl> - else <nl> - $ dp [ $ i ] [ $ j ] = max ( $ dp [ $ i - 1 ] [ $ j ] , $ dp [ $ i ] [ $ j - 1 ] ) ; <nl> - } <nl> + if ( $ i = = 0 | | $ j = = 0 ) <nl> + $ dp [ $ i ] [ $ j ] = 0 ; <nl> + else if ( $ x [ $ i - 1 ] = = $ y [ $ j - 1 ] ) <nl> + $ dp [ $ i ] [ $ j ] = $ dp [ $ i - 1 ] [ $ j - 1 ] + 1 ; <nl> + else <nl> + $ dp [ $ i ] [ $ j ] = max ( $ dp [ $ i - 1 ] [ $ j ] , $ dp [ $ i ] [ $ j - 1 ] ) ; <nl> } <nl> - <nl> - return $ dp [ $ x_len ] [ $ y_len ] ; <nl> } <nl> <nl> - $ a = " AGGTAB " ; <nl> - $ b = " GXTXAYB " ; <nl> + return $ dp [ $ x_len ] [ $ y_len ] ; <nl> + } <nl> + <nl> + $ a = " AGGTAB " ; <nl> + $ b = " GXTXAYB " ; <nl> <nl> - echo " Length of LCS : " ; <nl> - echo longestCommonSubsequence ( $ a , $ b ) ; <nl> + echo " Length of LCS : " , longestCommonSubsequence ( $ a , $ b ) ; <nl> ? > <nl>
|
fixing minor changes
|
OpenGenus/cosmos
|
6a9f63a79556a835dce97cfa5bdedd991120e71e
|
2020-04-01T17:14:26Z
|
mmm a / folly / experimental / BitVectorCoding . h <nl> ppp b / folly / experimental / BitVectorCoding . h <nl> class BitVectorReader : detail : : ForwardPointers < Encoder : : forwardQuantum > , <nl> : detail : : ForwardPointers < Encoder : : forwardQuantum > ( list . forwardPointers ) , <nl> detail : : SkipPointers < Encoder : : skipQuantum > ( list . skipPointers ) , <nl> bits_ ( list . bits ) , <nl> - size_ ( list . size ) { <nl> + size_ ( list . size ) , <nl> + upperBound_ ( <nl> + ( kUnchecked | | UNLIKELY ( list . size = = 0 ) ) ? 0 : list . upperBound ) { <nl> reset ( ) ; <nl> - <nl> - if ( kUnchecked | | UNLIKELY ( list . size = = 0 ) ) { <nl> - upperBound_ = 0 ; <nl> - return ; <nl> - } <nl> - <nl> - upperBound_ = list . upperBound ; <nl> } <nl> <nl> void reset ( ) { <nl> class BitVectorReader : detail : : ForwardPointers < Encoder : : forwardQuantum > , <nl> SizeType position_ ; <nl> ValueType value_ ; <nl> <nl> - SizeType size_ ; <nl> - ValueType upperBound_ ; <nl> + const SizeType size_ ; <nl> + const ValueType upperBound_ ; <nl> } ; <nl> <nl> } / / namespace compression <nl>
|
const - qualify some data members
|
facebook/folly
|
01920134e96b14596e7c6532245b6b12b91a1087
|
2018-07-05T18:39:33Z
|
mmm a / Telegram / SourceFiles / audio . cpp <nl> ppp b / Telegram / SourceFiles / audio . cpp <nl> Copyright ( c ) 2014 John Preston , https : / / desktop . telegram . org <nl> # include < AL / al . h > <nl> # include < AL / alc . h > <nl> <nl> + # define AL_ALEXT_PROTOTYPES <nl> + # include < AL / alext . h > <nl> + <nl> extern " C " { <nl> <nl> # include < libavcodec / avcodec . h > <nl> void audioInit ( ) { <nl> if ( ! _checkALError ( ) ) return audioFinish ( ) ; <nl> <nl> player = new AudioPlayer ( ) ; <nl> - alcSuspendContext ( audioContext ) ; <nl> + alcDevicePauseSOFT ( audioDevice ) ; <nl> <nl> av_register_all ( ) ; <nl> avcodec_register_all ( ) ; <nl> void audioInit ( ) { <nl> void audioPlayNotify ( ) { <nl> if ( ! audioPlayer ( ) ) return ; <nl> <nl> - audioPlayer ( ) - > processContext ( ) ; <nl> + audioPlayer ( ) - > resumeDevice ( ) ; <nl> alSourcePlay ( notifySource ) ; <nl> emit audioPlayer ( ) - > faderOnTimer ( ) ; <nl> } <nl> void AudioPlayer : : pauseresume ( ) { <nl> updateCurrentStarted ( ) ; <nl> } <nl> _data [ _current ] . state = AudioPlayerResuming ; <nl> - processContext ( ) ; <nl> + resumeDevice ( ) ; <nl> alSourcePlay ( _data [ _current ] . source ) ; <nl> break ; <nl> case AudioPlayerStarting : <nl> void AudioPlayer : : clearStoppedAtStart ( AudioData * audio ) { <nl> } <nl> } <nl> <nl> - void AudioPlayer : : processContext ( ) { <nl> - _fader - > processContext ( ) ; <nl> + void AudioPlayer : : resumeDevice ( ) { <nl> + _fader - > resumeDevice ( ) ; <nl> } <nl> <nl> AudioCapture : : AudioCapture ( ) : _capture ( new AudioCaptureInner ( & _captureThread ) ) { <nl> AudioCapture * audioCapture ( ) { <nl> return capture ; <nl> } <nl> <nl> - AudioPlayerFader : : AudioPlayerFader ( QThread * thread ) : _timer ( this ) , _suspendFlag ( false ) , _suspended ( true ) { <nl> + AudioPlayerFader : : AudioPlayerFader ( QThread * thread ) : _timer ( this ) , _pauseFlag ( false ) , _paused ( true ) { <nl> moveToThread ( thread ) ; <nl> _timer . moveToThread ( thread ) ; <nl> - _suspendTimer . moveToThread ( thread ) ; <nl> + _pauseTimer . moveToThread ( thread ) ; <nl> <nl> _timer . setSingleShot ( true ) ; <nl> connect ( & _timer , SIGNAL ( timeout ( ) ) , this , SLOT ( onTimer ( ) ) ) ; <nl> <nl> - _suspendTimer . setSingleShot ( true ) ; <nl> - connect ( & _suspendTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( onSuspendTimer ( ) ) ) ; <nl> - connect ( this , SIGNAL ( stopSuspend ( ) ) , this , SLOT ( onSuspendTimerStop ( ) ) , Qt : : QueuedConnection ) ; <nl> + _pauseTimer . setSingleShot ( true ) ; <nl> + connect ( & _pauseTimer , SIGNAL ( timeout ( ) ) , this , SLOT ( onPauseTimer ( ) ) ) ; <nl> + connect ( this , SIGNAL ( stopPauseDevice ( ) ) , this , SLOT ( onPauseTimerStop ( ) ) , Qt : : QueuedConnection ) ; <nl> } <nl> <nl> void AudioPlayerFader : : onInit ( ) { <nl> void AudioPlayerFader : : onTimer ( ) { <nl> } <nl> if ( hasFading ) { <nl> _timer . start ( AudioFadeTimeout ) ; <nl> - processContext ( ) ; <nl> + resumeDevice ( ) ; <nl> } else if ( hasPlaying ) { <nl> _timer . start ( AudioCheckPositionTimeout ) ; <nl> - processContext ( ) ; <nl> + resumeDevice ( ) ; <nl> } else { <nl> - QMutexLocker lock ( & _suspendMutex ) ; <nl> - _suspendFlag = true ; <nl> - _suspendTimer . start ( AudioSuspendTimeout ) ; <nl> + QMutexLocker lock ( & _pauseMutex ) ; <nl> + _pauseFlag = true ; <nl> + _pauseTimer . start ( AudioPauseDeviceTimeout ) ; <nl> } <nl> } <nl> <nl> - void AudioPlayerFader : : onSuspendTimer ( ) { <nl> - QMutexLocker lock ( & _suspendMutex ) ; <nl> - if ( _suspendFlag ) { <nl> - _suspended = true ; <nl> - alcSuspendContext ( audioContext ) ; <nl> + void AudioPlayerFader : : onPauseTimer ( ) { <nl> + QMutexLocker lock ( & _pauseMutex ) ; <nl> + if ( _pauseFlag ) { <nl> + _paused = true ; <nl> + alcDevicePauseSOFT ( audioDevice ) ; <nl> } <nl> } <nl> <nl> - void AudioPlayerFader : : onSuspendTimerStop ( ) { <nl> - if ( _suspendTimer . isActive ( ) ) _suspendTimer . stop ( ) ; <nl> + void AudioPlayerFader : : onPauseTimerStop ( ) { <nl> + if ( _pauseTimer . isActive ( ) ) _pauseTimer . stop ( ) ; <nl> } <nl> <nl> - void AudioPlayerFader : : processContext ( ) { <nl> - QMutexLocker lock ( & _suspendMutex ) ; <nl> - _suspendFlag = false ; <nl> - emit stopSuspend ( ) ; <nl> - if ( _suspended ) { <nl> - _suspended = false ; <nl> - alcProcessContext ( audioContext ) ; <nl> + void AudioPlayerFader : : resumeDevice ( ) { <nl> + QMutexLocker lock ( & _pauseMutex ) ; <nl> + _pauseFlag = false ; <nl> + emit stopPauseDevice ( ) ; <nl> + if ( _paused ) { <nl> + _paused = false ; <nl> + alcDeviceResumeSOFT ( audioDevice ) ; <nl> } <nl> } <nl> <nl> void AudioPlayerLoaders : : onLoad ( AudioData * audio ) { <nl> alGetSourcei ( m . source , AL_SOURCE_STATE , & state ) ; <nl> if ( _checkALError ( ) ) { <nl> if ( state ! = AL_PLAYING ) { <nl> - voice - > processContext ( ) ; <nl> + voice - > resumeDevice ( ) ; <nl> alSourcePlay ( m . source ) ; <nl> emit needToCheck ( ) ; <nl> } <nl> mmm a / Telegram / SourceFiles / audio . h <nl> ppp b / Telegram / SourceFiles / audio . h <nl> class AudioPlayer : public QObject { <nl> <nl> void currentState ( AudioData * * audio , AudioPlayerState * state = 0 , int64 * position = 0 , int64 * duration = 0 , int32 * frequency = 0 ) ; <nl> void clearStoppedAtStart ( AudioData * audio ) ; <nl> - void processContext ( ) ; <nl> + void resumeDevice ( ) ; <nl> <nl> ~ AudioPlayer ( ) ; <nl> <nl> class AudioPlayerFader : public QObject { <nl> public : <nl> <nl> AudioPlayerFader ( QThread * thread ) ; <nl> - void processContext ( ) ; <nl> + void resumeDevice ( ) ; <nl> <nl> signals : <nl> <nl> class AudioPlayerFader : public QObject { <nl> void audioStopped ( AudioData * audio ) ; <nl> void needToPreload ( AudioData * audio ) ; <nl> <nl> - void stopSuspend ( ) ; <nl> + void stopPauseDevice ( ) ; <nl> <nl> public slots : <nl> <nl> void onInit ( ) ; <nl> void onTimer ( ) ; <nl> - void onSuspendTimer ( ) ; <nl> - void onSuspendTimerStop ( ) ; <nl> + void onPauseTimer ( ) ; <nl> + void onPauseTimerStop ( ) ; <nl> <nl> private : <nl> <nl> - QTimer _timer , _suspendTimer ; <nl> - QMutex _suspendMutex ; <nl> - bool _suspendFlag , _suspended ; <nl> + QTimer _timer , _pauseTimer ; <nl> + QMutex _pauseMutex ; <nl> + bool _pauseFlag , _paused ; <nl> <nl> } ; <nl> <nl> mmm a / Telegram / SourceFiles / config . h <nl> ppp b / Telegram / SourceFiles / config . h <nl> enum { <nl> AudioVoiceMsgChannels = 2 , / / stereo <nl> AudioVoiceMsgBufferSize = 1024 * 1024 , / / 1 Mb buffers <nl> AudioVoiceMsgInMemory = 1024 * 1024 , / / 1 Mb audio is hold in memory and auto loaded <nl> - AudioSuspendTimeout = 3000 , / / suspend in 3 secs after playing is over <nl> + AudioPauseDeviceTimeout = 3000 , / / pause in 3 secs after playing is over <nl> <nl> StickerInMemory = 256 * 1024 , / / 128 Kb stickers hold in memory , auto loaded and displayed inline <nl> StickerMaxSize = 2048 , / / 2048x2048 is a max image size for sticker <nl>
|
openal updated in Windows
|
telegramdesktop/tdesktop
|
c40758f30db91809f9453a9effdf3d3d6ff6f990
|
2015-06-03T12:18:46Z
|
mmm a / src / serializer / log / log_serializer . cc <nl> ppp b / src / serializer / log / log_serializer . cc <nl> void log_serializer_t : : create ( serializer_file_opener_t * file_opener , <nl> <nl> co_static_header_write ( file . get ( ) , on_disk_config , sizeof ( * on_disk_config ) ) ; <nl> <nl> - log_serializer_metablock_t metablock ; <nl> - memset ( & metablock , 0 , sizeof ( metablock ) ) ; <nl> + scoped_device_block_aligned_ptr_t < crc_metablock_t > scoped_crc_mb ( METABLOCK_SIZE ) ; <nl> + crc_metablock_t * crc_mb = scoped_crc_mb . get ( ) ; <nl> + memset ( crc_mb , 0 , METABLOCK_SIZE ) ; <nl> <nl> - extent_manager_metablock_mixin_t extent_manager_part ; <nl> - extent_manager_t : : prepare_initial_metablock ( & extent_manager_part ) ; <nl> - metablock . extent_manager_part = extent_manager_part ; <nl> - <nl> - dbm_metablock_mixin_t data_block_manager_part ; <nl> - data_block_manager_t : : prepare_initial_metablock ( & data_block_manager_part ) ; <nl> - metablock . data_block_manager_part = data_block_manager_part ; <nl> - <nl> - lba_metablock_mixin_t lba_index_part ; <nl> - lba_list_t : : prepare_initial_metablock ( & lba_index_part ) ; <nl> - metablock . lba_index_part = lba_index_part ; <nl> + extent_manager_t : : prepare_initial_metablock ( & crc_mb - > metablock . extent_manager_part ) ; <nl> + data_block_manager_t : : prepare_initial_metablock ( & crc_mb - > metablock . data_block_manager_part ) ; <nl> + lba_list_t : : prepare_initial_metablock ( & crc_mb - > metablock . lba_index_part ) ; <nl> <nl> - metablock_manager_t : : create ( file . get ( ) , static_config . extent_size ( ) , & metablock ) ; <nl> + metablock_manager_t : : create ( file . get ( ) , static_config . extent_size ( ) , <nl> + std : : move ( scoped_crc_mb ) ) ; <nl> } <nl> <nl> / * The process of starting up the serializer is handled by the ls_start_ * _fsm_t . This is <nl> void log_serializer_t : : write_metablock ( new_mutex_in_line_t * mutex_acq , <nl> const signal_t * safe_to_write_cond , <nl> file_account_t * io_account ) { <nl> assert_thread ( ) ; <nl> - log_serializer_metablock_t mb_buffer ; <nl> + scoped_device_block_aligned_ptr_t < crc_metablock_t > crc_mb ( METABLOCK_SIZE ) ; <nl> + memset ( crc_mb . get ( ) , 0 , METABLOCK_SIZE ) ; <nl> <nl> / * Prepare metablock now instead of in when we write it so that we will have the <nl> correct metablock information for this write even if another write starts before we <nl> finish waiting on ` safe_to_write_cond ` . * / <nl> - prepare_metablock ( & mb_buffer ) ; <nl> + prepare_metablock ( & crc_mb - > metablock ) ; <nl> <nl> / * Get in line for the metablock manager * / <nl> bool waiting_for_prev_write = ! metablock_waiter_queue . empty ( ) ; <nl> void log_serializer_t : : write_metablock ( new_mutex_in_line_t * mutex_acq , <nl> struct : public cond_t , public metablock_manager_t : : metablock_write_callback_t { <nl> void on_metablock_write ( ) { pulse ( ) ; } <nl> } on_metablock_write ; <nl> - metablock_manager - > write_metablock ( & mb_buffer , io_account , & on_metablock_write ) ; <nl> + metablock_manager - > write_metablock ( crc_mb , io_account , & on_metablock_write ) ; <nl> <nl> / * Remove ourselves from the list of metablock waiters . * / <nl> metablock_waiter_queue . pop_front ( ) ; <nl> mmm a / src / serializer / log / metablock_manager . cc <nl> ppp b / src / serializer / log / metablock_manager . cc <nl> uint32_t compute_metablock_crc ( const crc_metablock_t * crc_mb ) { <nl> return crc . checksum ( ) ; <nl> } <nl> <nl> + / / crc_mb - > metablock is already initialized , the rest of the metablock is zero - filled . <nl> void prepare ( crc_metablock_t * crc_mb , uint32_t _disk_format_version , <nl> - log_serializer_metablock_t * mb , metablock_version_t vers ) { <nl> + metablock_version_t vers ) { <nl> crc_mb - > disk_format_version = _disk_format_version ; <nl> - crc_mb - > metablock = * mb ; <nl> memcpy ( crc_mb - > magic_marker , MB_MARKER_MAGIC , sizeof ( MB_MARKER_MAGIC ) ) ; <nl> crc_mb - > version = vers ; <nl> crc_mb - > _crc = compute_metablock_crc ( crc_mb ) ; <nl> void metablock_manager_t : : metablock_manager_t : : head_t : : pop ( ) { <nl> } <nl> <nl> metablock_manager_t : : metablock_manager_t ( extent_manager_t * em ) <nl> - : head ( this ) , mb_buffer ( METABLOCK_SIZE ) , <nl> + : head ( this ) , <nl> extent_manager ( em ) , <nl> metablock_offsets ( initial_metablock_offsets ( extent_manager - > extent_size ) ) , <nl> state ( state_unstarted ) , dbfile ( nullptr ) { <nl> rassert ( sizeof ( crc_metablock_t ) < = METABLOCK_SIZE ) ; <nl> - rassert ( mb_buffer . has ( ) ) ; <nl> - mb_buffer_in_use = false ; <nl> <nl> / * Build the list of metablock locations in the file * / <nl> <nl> metablock_manager_t : : metablock_manager_t ( extent_manager_t * em ) <nl> } <nl> <nl> metablock_manager_t : : ~ metablock_manager_t ( ) { <nl> - <nl> rassert ( state = = state_unstarted | | state = = state_shut_down ) ; <nl> - <nl> - rassert ( ! mb_buffer_in_use ) ; <nl> } <nl> <nl> - void metablock_manager_t : : create ( file_t * dbfile , int64_t extent_size , <nl> - log_serializer_metablock_t * initial ) { <nl> + void metablock_manager_t : : create ( <nl> + file_t * dbfile , int64_t extent_size , <nl> + scoped_device_block_aligned_ptr_t < crc_metablock_t > & & initial ) { <nl> <nl> std : : vector < int64_t > metablock_offsets = initial_metablock_offsets ( extent_size ) ; <nl> <nl> void metablock_manager_t : : create ( file_t * dbfile , int64_t extent_size , <nl> } <nl> callback . wait ( ) ; <nl> <nl> + buffer = std : : move ( initial ) ; <nl> + <nl> / * Write the first metablock * / <nl> / / We use cluster_version_t : : LATEST_DISK . Maybe we ' d want to decouple cluster <nl> / / versions from disk format versions ? We can do that later if we want . <nl> crc_metablock : : prepare ( buffer . get ( ) , <nl> static_cast < uint32_t > ( cluster_version_t : : LATEST_DISK ) , <nl> - initial , <nl> MB_START_VERSION ) ; <nl> co_write ( dbfile , metablock_offsets [ 0 ] , METABLOCK_SIZE , buffer . get ( ) , <nl> DEFAULT_DISK_ACCOUNT , file_t : : WRAP_IN_DATASYNCS ) ; <nl> void metablock_manager_t : : co_start_existing ( file_t * file , bool * mb_found , <nl> dbfile = file ; <nl> rassert ( dbfile ! = nullptr ) ; <nl> <nl> - rassert ( ! mb_buffer_in_use ) ; <nl> - mb_buffer_in_use = true ; <nl> - <nl> metablock_version_t latest_version = MB_BAD_VERSION ; <nl> <nl> dbfile - > set_file_size_at_least ( metablock_offsets [ metablock_offsets . size ( ) - 1 ] + METABLOCK_SIZE , <nl> void metablock_manager_t : : co_start_existing ( file_t * file , bool * mb_found , <nl> latest_version = MB_BAD_VERSION ; / * version is now useless * / <nl> head . pop ( ) ; <nl> * mb_found = true ; <nl> - memcpy ( mb_buffer . get ( ) , last_good_mb , METABLOCK_SIZE ) ; <nl> - memcpy ( mb_out , & ( mb_buffer - > metablock ) , sizeof ( log_serializer_metablock_t ) ) ; <nl> + memcpy ( mb_out , & last_good_mb - > metablock , sizeof ( log_serializer_metablock_t ) ) ; <nl> } <nl> - mb_buffer_in_use = false ; <nl> state = state_ready ; <nl> } <nl> <nl> bool metablock_manager_t : : start_existing ( <nl> this , file , mb_found , mb_out , cb ) ) ; <nl> return false ; <nl> } <nl> - void metablock_manager_t : : co_write_metablock ( log_serializer_metablock_t * mb , <nl> - file_account_t * io_account ) { <nl> + <nl> + / / crc_mb is zero - initialized . <nl> + void metablock_manager_t : : co_write_metablock ( <nl> + const scoped_device_block_aligned_ptr_t < crc_metablock_t > & crc_mb , <nl> + file_account_t * io_account ) { <nl> mutex_t : : acq_t hold ( & write_lock ) ; <nl> <nl> rassert ( state = = state_ready ) ; <nl> - rassert ( ! mb_buffer_in_use ) ; <nl> <nl> - crc_metablock : : prepare ( mb_buffer . get ( ) , <nl> + crc_metablock : : prepare ( crc_mb . get ( ) , <nl> static_cast < uint32_t > ( cluster_version_t : : LATEST_DISK ) , <nl> - mb , <nl> next_version_number + + ) ; <nl> - rassert ( crc_metablock : : check_crc ( mb_buffer . get ( ) ) ) ; <nl> - <nl> - mb_buffer_in_use = true ; <nl> + rassert ( crc_metablock : : check_crc ( crc_mb . get ( ) ) ) ; <nl> <nl> state = state_writing ; <nl> - co_write ( dbfile , head . offset ( ) , METABLOCK_SIZE , mb_buffer . get ( ) , io_account , <nl> + co_write ( dbfile , head . offset ( ) , METABLOCK_SIZE , crc_mb . get ( ) , io_account , <nl> file_t : : WRAP_IN_DATASYNCS ) ; <nl> <nl> + + head ; <nl> <nl> state = state_ready ; <nl> - mb_buffer_in_use = false ; <nl> extent_manager - > stats - > bytes_written ( METABLOCK_SIZE ) ; <nl> } <nl> <nl> void metablock_manager_t : : write_metablock_callback ( <nl> - log_serializer_metablock_t * mb , <nl> + const scoped_device_block_aligned_ptr_t < crc_metablock_t > * mb , <nl> file_account_t * io_account , <nl> metablock_write_callback_t * cb ) { <nl> - co_write_metablock ( mb , io_account ) ; <nl> + co_write_metablock ( * mb , io_account ) ; <nl> cb - > on_metablock_write ( ) ; <nl> } <nl> <nl> - void metablock_manager_t : : write_metablock ( log_serializer_metablock_t * mb , <nl> - file_account_t * io_account , <nl> - metablock_write_callback_t * cb ) { <nl> + void metablock_manager_t : : write_metablock ( <nl> + const scoped_device_block_aligned_ptr_t < crc_metablock_t > & crc_mb , <nl> + file_account_t * io_account , <nl> + metablock_write_callback_t * cb ) { <nl> coro_t : : spawn_later_ordered ( std : : bind ( & metablock_manager_t : : write_metablock_callback , <nl> - this , mb , io_account , cb ) ) ; <nl> + this , & crc_mb , io_account , cb ) ) ; <nl> } <nl> <nl> void metablock_manager_t : : shutdown ( ) { <nl> <nl> rassert ( state = = state_ready ) ; <nl> - rassert ( ! mb_buffer_in_use ) ; <nl> state = state_shut_down ; <nl> } <nl> mmm a / src / serializer / log / metablock_manager . hpp <nl> ppp b / src / serializer / log / metablock_manager . hpp <nl> class metablock_manager_t { <nl> explicit metablock_manager_t ( extent_manager_t * em ) ; <nl> ~ metablock_manager_t ( ) ; <nl> <nl> - / * Clear metablock slots and write an initial metablock to the database file * / <nl> + / * Clear metablock slots and write an initial metablock to the database file . <nl> + ' initial ' has its log_serializer_metablock_t part pre - filled , the rest <nl> + zero - wiped . * / <nl> static void create ( file_t * dbfile , int64_t extent_size , <nl> - log_serializer_metablock_t * initial ) ; <nl> + scoped_device_block_aligned_ptr_t < crc_metablock_t > & & initial ) ; <nl> <nl> / * Tries to load existing metablocks * / <nl> void co_start_existing ( file_t * dbfile , bool * mb_found , <nl> class metablock_manager_t { <nl> virtual void on_metablock_write ( ) = 0 ; <nl> virtual ~ metablock_write_callback_t ( ) { } <nl> } ; <nl> - void write_metablock ( log_serializer_metablock_t * mb , <nl> + / / crc_mb - > metablock must be initialized , the rest zeroed , DEVICE_BLOCK_SIZE - aligned . <nl> + void write_metablock ( const scoped_device_block_aligned_ptr_t < crc_metablock_t > & crc_mb , <nl> file_account_t * io_account , <nl> metablock_write_callback_t * cb ) ; <nl> - void co_write_metablock ( log_serializer_metablock_t * mb , file_account_t * io_account ) ; <nl> + void co_write_metablock ( const scoped_device_block_aligned_ptr_t < crc_metablock_t > & mb , <nl> + file_account_t * io_account ) ; <nl> <nl> void shutdown ( ) ; <nl> <nl> class metablock_manager_t { <nl> bool * mb_found , <nl> log_serializer_metablock_t * mb_out , <nl> metablock_read_callback_t * cb ) ; <nl> - void write_metablock_callback ( log_serializer_metablock_t * mb , <nl> - file_account_t * io_account , <nl> - metablock_write_callback_t * cb ) ; <nl> + void write_metablock_callback ( <nl> + const scoped_device_block_aligned_ptr_t < crc_metablock_t > * mb , <nl> + file_account_t * io_account , <nl> + metablock_write_callback_t * cb ) ; <nl> <nl> + / / Only one metablock write can happen at a time . This isn ' t necessarily a <nl> + / / desirable thing , but that ' s how this type works . <nl> mutex_t write_lock ; <nl> <nl> / / keeps track of where we are in the extents <nl> class metablock_manager_t { <nl> <nl> metablock_version_t next_version_number ; <nl> <nl> - const scoped_device_block_aligned_ptr_t < crc_metablock_t > mb_buffer ; <nl> - / / true : we ' re using the buffer , no one else can <nl> - bool mb_buffer_in_use ; <nl> - <nl> extent_manager_t * const extent_manager ; <nl> <nl> const std : : vector < int64_t > metablock_offsets ; <nl>
|
Cleanup metablock writing to avoid copying , stack usage .
|
rethinkdb/rethinkdb
|
2406019aca9e7af6a68db48ca679ddde81eafdb2
|
2017-02-14T18:21:11Z
|
mmm a / Marlin / temperature . cpp <nl> ppp b / Marlin / temperature . cpp <nl> void tp_init ( ) <nl> TIMSK0 | = ( 1 < < OCIE0B ) ; <nl> } <nl> <nl> - static unsigned char temp_count = 0 ; <nl> - static unsigned long raw_temp_0_value = 0 ; <nl> - static unsigned long raw_temp_1_value = 0 ; <nl> - static unsigned long raw_temp_2_value = 0 ; <nl> - static unsigned char temp_state = 0 ; <nl> + <nl> <nl> / / Timer 0 is shared with millies <nl> ISR ( TIMER0_COMPB_vect ) <nl> { <nl> + / / these variables are only accesible from the ISR , but static , so they don ' t loose their value <nl> + static unsigned char temp_count = 0 ; <nl> + static unsigned long raw_temp_0_value = 0 ; <nl> + static unsigned long raw_temp_1_value = 0 ; <nl> + static unsigned long raw_temp_2_value = 0 ; <nl> + static unsigned char temp_state = 0 ; <nl> + <nl> switch ( temp_state ) { <nl> case 0 : / / Prepare TEMP_0 <nl> # if ( TEMP_0_PIN > - 1 ) <nl>
|
moved the variables in the ISR , thats the most normal use case of static .
|
MarlinFirmware/Marlin
|
ea86a5e10f7b8c0b2e0e0bf97a1fc95dbac4ae89
|
2011-11-05T15:53:37Z
|
new file mode 100644 <nl> index 0000000000 . . 0d9159de73 <nl> mmm / dev / null <nl> ppp b / docs / csharp / proto2 . md <nl> <nl> + As part of the 3 . 10 release of Google . Protobuf , experimental proto2 support has been released . This document outlines the new changes brought about to include proto2 support . This does not break existing proto3 support and users may continue to use proto3 features without changing their current code . Again the generated code and public API associated with proto2 is experimental and subject to change in the future . APIs for proto2 may be added , removed , or adjusted as feedback is received . <nl> + Generated code for proto2 may also be modified by adding , removing , or adjusting APIs as feedback is received . <nl> + <nl> + # # # Enabling proto2 features <nl> + <nl> + For information about specific proto2 features , please read the [ proto2 language guide ] ( https : / / developers . google . com / protocol - buffers / docs / proto ) . <nl> + <nl> + Much like other languages , proto2 features are used with proto2 files with the syntax declaration ` syntax = " proto2 " ; ` . However , please note , proto3 is still the recommended version of protobuf and proto2 support is meant for legacy system interop and advanced uses . <nl> + <nl> + # Generated code <nl> + <nl> + # # # Messages <nl> + <nl> + Messages in proto2 files are very similar to their proto3 counterparts . They expose the usual property for getting and setting , but they also include properties and methods to handle field presence . <nl> + <nl> + For ` optional ` / ` required ` field XYZ , a ` HasXYZ ` property is included for checking presence and a ` ClearXYZ ` method is included for clearing the value . <nl> + <nl> + ` ` ` proto <nl> + message Foo { <nl> + optional Bar bar = 1 ; <nl> + required Baz baz = 2 ; <nl> + } <nl> + ` ` ` <nl> + ` ` ` cs <nl> + var foo = new Foo ( ) ; <nl> + Assert . IsNull ( foo . Bar ) ; <nl> + Assert . False ( foo . HasBar ) ; <nl> + foo . Bar = new Bar ( ) ; <nl> + Assert . True ( foo . HasBar ) ; <nl> + foo . ClearBar ( ) ; <nl> + ` ` ` <nl> + <nl> + # # # Messages with extension ranges <nl> + <nl> + Messages which define extension ranges implement the ` IExtendableMessage ` interface as shown below . <nl> + See inline comments for more info . <nl> + <nl> + ` ` ` cs <nl> + public interface IExtendableMessage < T > : IMessage < T > where T : IExtendableMessage < T > <nl> + { <nl> + / / Gets the value of a single value extension . If the extension isn ' t present , this returns the default value . <nl> + TValue GetExtension < TValue > ( Extension < T , TValue > extension ) ; <nl> + / / Gets the value of a repeated extension . If the extension hasn ' t been set , this returns null to prevent unnecessary allocations . <nl> + RepeatedField < TValue > GetExtension < TValue > ( RepeatedExtension < T , TValue > extension ) ; <nl> + / / Gets the value of a repeated extension . This will initialize the value of the repeated field and will never return null . <nl> + RepeatedField < TValue > GetOrInitializeExtension < TValue > ( RepeatedExtension < T , TValue > extension ) ; <nl> + / / Sets the value of the extension <nl> + void SetExtension < TValue > ( Extension < T , TValue > extension , TValue value ) ; <nl> + / / Returns whether the extension is present in the message <nl> + bool HasExtension < TValue > ( Extension < T , TValue > extension ) ; <nl> + / / Clears the value of the extension , removing it from the message <nl> + void ClearExtension < TValue > ( Extension < T , TValue > extension ) ; <nl> + / / Clears the value of the repeated extension , removing it from the message . Calling GetExtension after this will always return null . <nl> + void ClearExtension < TValue > ( RepeatedExtension < T , TValue > extension ) ; <nl> + } <nl> + ` ` ` <nl> + <nl> + # # # Extensions <nl> + <nl> + Extensions are generated in static containers like reflection classes and type classes . <nl> + For example for a file called ` foo . proto ` containing extensions in the file scope , a <nl> + ` FooExtensions ` class is created containing the extensions defined in the file scope . <nl> + For easy access , this class can be used with ` using static ` to bring all extensions into scope . <nl> + <nl> + ` ` ` proto <nl> + option csharp_namespace = " FooBar " ; <nl> + extend Foo { <nl> + optional Baz foo_ext = 124 ; <nl> + } <nl> + message Baz { <nl> + extend Foo { <nl> + repeated Baz repeated_foo_ext = 125 ; <nl> + } <nl> + } <nl> + ` ` ` <nl> + ` ` ` cs <nl> + public static partial class FooExtensions { <nl> + public static readonly Extension < Foo , Baz > FooExt = / * initialization * / ; <nl> + } <nl> + <nl> + public partial class Baz { <nl> + public partial static class Extensions { <nl> + public static readonly RepeatedExtension < Foo , Baz > RepeatedFooExt = / * initialization * / ; <nl> + } <nl> + } <nl> + ` ` ` <nl> + ` ` ` cs <nl> + using static FooBar . FooExtensions ; <nl> + using static FooBar . Baz . Extensions ; <nl> + <nl> + var foo = new Foo ( ) ; <nl> + foo . SetExtension ( FooExt , new Baz ( ) ) ; <nl> + foo . GetOrInitializeExtension ( RepeatedFooExt ) . Add ( new Baz ( ) ) ; <nl> + ` ` ` <nl> + <nl> + # APIs <nl> + <nl> + # # # Message initialization <nl> + <nl> + Initialization refers to checking the status of required fields in a proto2 message . If a message is uninitialized , not all required fields are set in either the message itself or any of its submessages . In other languages , missing required fields throw errors depending on the merge method used . This could cause unforseen errors at runtime if the incorrect method is used . <nl> + However , in this implementation , parsers and input streams don ' t check messages for initialization on their own and throw errors . Instead it ' s up to you to handle messages with missing required fields in whatever way you see fit . <nl> + Checking message initialization can be done manually via the ` IsInitialized ` extension method in ` MessageExtensions ` . <nl> + <nl> + # # # Extension registries <nl> + <nl> + Just like in Java , extension registries can be constructed to parse extensions when reading new messages <nl> + from input streams . The API is fairly similar to the Java API with some added bonuses with C # syntax sugars . <nl> + <nl> + ` ` ` proto <nl> + message Baz { <nl> + extend Foo { <nl> + optional Baz foo_ext = 124 ; <nl> + } <nl> + } <nl> + ` ` ` <nl> + ` ` ` cs <nl> + var registry = new ExtensionRegistry ( ) <nl> + { <nl> + Baz . Extensions . FooExt <nl> + } ; <nl> + var foo = Foo . Factory . WithExtensionRegistry ( registry ) . ParseFrom ( input ) ; <nl> + Assert . True ( foo . HasExtension ( Bas . Extensions . FooExt ) ) ; <nl> + var fooNoRegistry = Foo . Factory . ParseFrom ( input ) ; <nl> + Assert . False ( foo . HasExtension ( Bas . Extensions . FooExt ) ) ; <nl> + ` ` ` <nl> + <nl> + # # # Custom options <nl> + <nl> + Due to their limited use and lack of type safety , the original ` CustomOptions ` APIs are now deprecated . Using the new generated extension identifiers , you can access extensions safely through the GetOption APIs . Note that cloneable values such as <nl> + repeated fields and messages will be deep cloned . <nl> + <nl> + Example based on custom options usage example [ here ] ( https : / / github . com / protocolbuffers / protobuf / issues / 5007 # issuecomment - 411604515 ) . <nl> + ` ` ` cs <nl> + foreach ( var service in input . Services ) <nl> + { <nl> + Console . WriteLine ( $ " { service . Name } " ) ; <nl> + foreach ( var method in service . Methods ) <nl> + { <nl> + var rule = method . GetOption ( AnnotationsExtensions . Http ) ; <nl> + if ( rule ! = null ) <nl> + { <nl> + Console . WriteLine ( $ " { method . Name } : { rule } " ) ; <nl> + } <nl> + else <nl> + { <nl> + Console . WriteLine ( $ " { method . Name } : no HTTP binding " ) ; <nl> + } <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + # # # Reflection <nl> + <nl> + Reflection APIs have been extended to enable accessing the new proto2 portions of the library and generated code . <nl> + <nl> + * FieldDescriptor . Extension <nl> + * Gets the extension identifier behind an extension field , allowing it to be added to an ExtensionRegistry <nl> + * FieldDescriptor . IsExtension <nl> + * Returns whether a field is an extension of another type . <nl> + * FieldDescriptor . ExtendeeType <nl> + * Returns the extended type of an extension field <nl> + * IFieldAccessor . HasValue <nl> + * Returns whether a field ' s value is set . For proto3 fields , throws an InvalidOperationException . <nl> + * FileDescriptor . Syntax <nl> + * Gets the syntax of a file <nl> + * FileDescriptor . Extensions <nl> + * An immutable list of extensions defined in the file <nl> + * MessageDescriptor . Extensions <nl> + * An immutable list of extensions defined in the message <nl> + <nl> + ` ` ` cs <nl> + var extensions = Baz . Descriptor . Extensions . GetExtensionsInDeclarationOrder ( Foo . Descriptor ) ; <nl> + var registry = new ExtensionRegistry ( ) ; <nl> + registry . AddRange ( extensions . Select ( f = > f . Extension ) ) ; <nl> + <nl> + var baz = Foo . Descriptor . Parser . WithExtensionRegistry ( registry ) . ParseFrom ( input ) ; <nl> + foreach ( var field in extensions ) <nl> + { <nl> + if ( field . Accessor . HasValue ( baz ) ) <nl> + { <nl> + Console . WriteLine ( $ " { field . Name } : { field . Accessor . GetValue ( baz ) } " ) ; <nl> + } <nl> + } <nl> + ` ` ` <nl>
|
Merge pull request from ObsidianMinor / csharp / proto2 - feature / docs
|
protocolbuffers/protobuf
|
94cbf009e934d45ee6fc1441ff9b56df9b314b3b
|
2019-09-03T10:15:52Z
|
mmm a / README . md <nl> ppp b / README . md <nl> <nl> + ! [ GODOT ] / logo . png <nl> + <nl> + # # # The Engine <nl> + <nl> + Godot is a fully featured , open source , MIT licensed , game engine . It focuses on having great tools , and a visual oriented workflow that can export to PC , Mobile and Web platforms with no hassle . <nl> + The editor , language and APIs are feature rich , yet simple to learn , allowing you to become productive in a matter of hours . <nl> + <nl> + # # # About <nl> + <nl> + Godot has been developed by Juan Linietsky and Ariel Manzur for several years , and was born as an in - house engine , used to publish several work - for - hire titles . <nl> + Development is sponsored by OKAM Studio ( http : / / www . okamstudio . com ) . <nl> + <nl> + # # # Godot is BETA . Collaborate ! ! <nl> + <nl> + Having been developed as in - house means that the user experience may still not be ideal for everyone . The features needed to make a great game are there , but we really need your help to fix all the rough edges and improve usability ( via feedback and / or code contributions ) . <nl> + We know we are close to having an awesome , open source , game engine with nothing to envy from the best commercial offerings , but we can ' t do this alone . This is why Godot is now open source , so everyone can help us reach this goal . <nl> + <nl> + # # # Binary Downloads , Documentation , Community , etc . <nl> + <nl> + Binary downloads , documentation , community , etc . can be found in Godot homepage : <nl> + <nl> + http : / / www . godotengine . org <nl> + <nl> + <nl> + <nl> + <nl>
|
readme changed
|
godotengine/godot
|
ebc38f099d6faa73769512ab881d17302b2ef493
|
2014-02-10T02:12:50Z
|
mmm a / Marlin / src / pins / pins_EINSY_RAMBO . h <nl> ppp b / Marlin / src / pins / pins_EINSY_RAMBO . h <nl> <nl> / / <nl> / / Limit Switches <nl> / / <nl> - # define X_MIN_PIN 64 / / 12 <nl> + / / Only use Diag Pins when SENSORLESS_HOMING is enabled for the TMC2130 drivers . <nl> + / / Otherwise use a physical endstop based configuration . <nl> + / / <nl> + / / SERVO0_PIN and Z_MIN_PIN configuration for BLTOUCH sensor when combined with SENSORLESS_HOMING . <nl> + / / <nl> + <nl> # define X_MAX_PIN - 1 <nl> - # define Y_MIN_PIN 69 / / 11 <nl> # define Y_MAX_PIN - 1 <nl> - # define Z_MIN_PIN 68 / / 10 <nl> # define Z_MAX_PIN - 1 <nl> <nl> + # if DISABLED ( SENSORLESS_HOMING ) <nl> + <nl> + # define X_MIN_PIN 12 <nl> + # define Y_MIN_PIN 11 <nl> + # define Z_MIN_PIN 10 <nl> + <nl> + # else <nl> + <nl> + # define X_MIN_PIN X_DIAG_PIN <nl> + # define Y_MIN_PIN Y_DIAG_PIN <nl> + <nl> + # if ENABLED ( BLTOUCH ) <nl> + # define Z_MIN_PIN 11 / / Y - MIN <nl> + # define SERVO0_PIN 10 / / Z - MIN <nl> + # else <nl> + # define Z_MIN_PIN 10 <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> / / <nl> / / Z Probe ( when not Z_MIN_PIN ) <nl> / / <nl>
|
Einsy : Regular endstops and BLTOUCH pins
|
MarlinFirmware/Marlin
|
9e9e29ddf8e0cbb6118e6f02ca33bde3ce073a27
|
2018-03-17T21:43:09Z
|
mmm a / tensorflow / core / kernels / training_ops . cc <nl> ppp b / tensorflow / core / kernels / training_ops . cc <nl> class SparseApplyRMSPropOp : public OpKernel { <nl> " var and mom do not have the same shape " , <nl> var . shape ( ) . DebugString ( ) , " " , mom . shape ( ) . DebugString ( ) ) ) ; <nl> <nl> - OP_REQUIRES ( <nl> - ctx , var . shape ( ) . IsSameSize ( grad . shape ( ) ) , <nl> - errors : : InvalidArgument ( " var and grad do not have the same shape " , <nl> - var . shape ( ) . DebugString ( ) , " " , <nl> - grad . shape ( ) . DebugString ( ) ) ) ; <nl> + OP_REQUIRES ( ctx , TensorShapeUtils : : IsVectorOrHigher ( var . shape ( ) ) , <nl> + errors : : InvalidArgument ( " var must be at least 1 dimensional " ) ) ; <nl> <nl> OP_REQUIRES ( ctx , TensorShapeUtils : : IsVector ( indices . shape ( ) ) , <nl> errors : : InvalidArgument ( " indices must be one - dimensional " ) ) ; <nl> <nl> + for ( int d = 1 ; d < var . dims ( ) ; d + + ) { <nl> + OP_REQUIRES ( ctx , var . dim_size ( d ) = = grad . dim_size ( d ) , <nl> + errors : : InvalidArgument ( <nl> + " var and grad must match in dimension " , d ) ) ; <nl> + } <nl> const Tindex N = indices . dim_size ( 0 ) ; <nl> OP_REQUIRES ( <nl> ctx , grad . dim_size ( 0 ) = = N , <nl> mmm a / tensorflow / core / ops / ops . pbtxt <nl> ppp b / tensorflow / core / ops / ops . pbtxt <nl> op { <nl> summary : " Decode a PNG - encoded image to a uint8 or uint16 tensor . " <nl> description : " The attr ` channels ` indicates the desired number of color channels for the \ ndecoded image . \ n \ nAccepted values are : \ n \ n * 0 : Use the number of channels in the PNG - encoded image . \ n * 1 : output a grayscale image . \ n * 3 : output an RGB image . \ n * 4 : output an RGBA image . \ n \ nIf needed , the PNG - encoded image is transformed to match the requested number \ nof color channels . " <nl> } <nl> + op { <nl> + name : " DecodeGif " <nl> + input_arg { <nl> + name : " contents " <nl> + description : " 0 - D . The GIF - encoded image . " <nl> + type : DT_STRING <nl> + } <nl> + output_arg { <nl> + name : " image " <nl> + description : " 3 - D with shape ` [ height , width , channels ] ` . " <nl> + type_attr : " dtype " <nl> + } <nl> + attr { <nl> + name : " channels " <nl> + type : " int " <nl> + default_value { <nl> + i : 0 <nl> + } <nl> + description : " Number of color channels for the decoded image . " <nl> + } <nl> + attr { <nl> + name : " dtype " <nl> + type : " type " <nl> + default_value { <nl> + type : DT_UINT8 <nl> + } <nl> + allowed_values { <nl> + list { <nl> + type : DT_UINT8 <nl> + type : DT_UINT16 <nl> + } <nl> + } <nl> + } <nl> + summary : " Decode a GIF - encoded image to a uint8 or uint16 tensor . " <nl> + description : " The attr ` channels ` indicates the desired number of color channels for the \ ndecoded image . \ n \ nAccepted values are : \ n \ n * 0 : Use the number of channels in the GIF - encoded image . \ n * 1 : output a grayscale image . \ n * 3 : output an RGB image . \ n * 4 : output an RGBA image . \ n \ nIf needed , the GIF - encoded image is transformed to match the requested number \ nof color channels . " <nl> + } <nl> op { <nl> name : " DecodeRaw " <nl> input_arg { <nl> mmm a / tensorflow / examples / tutorials / mnist / fully_connected_feed . py <nl> ppp b / tensorflow / examples / tutorials / mnist / fully_connected_feed . py <nl> def run_training ( ) : <nl> # Add the Op to compare the logits to the labels during evaluation . <nl> eval_correct = mnist . evaluation ( logits , labels_placeholder ) <nl> <nl> - # Build the summary operation based on the TF collection of Summaries . <nl> - summary_op = tf . merge_all_summaries ( ) <nl> + # Build the summary Tensor based on the TF collection of Summaries . <nl> + summary = tf . merge_all_summaries ( ) <nl> <nl> # Add the variable initializer Op . <nl> init = tf . initialize_all_variables ( ) <nl> def run_training ( ) : <nl> # Print status to stdout . <nl> print ( ' Step % d : loss = % . 2f ( % . 3f sec ) ' % ( step , loss_value , duration ) ) <nl> # Update the events file . <nl> - summary_str = sess . run ( summary_op , feed_dict = feed_dict ) <nl> + summary_str = sess . run ( summary , feed_dict = feed_dict ) <nl> summary_writer . add_summary ( summary_str , step ) <nl> summary_writer . flush ( ) <nl> <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . input_producer . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . train . input_producer . md <nl> Output the rows of ` input_tensor ` to a queue for an input pipeline . <nl> ` OutOfRange ` error . If not specified , ` input_producer ` can cycle through <nl> the rows of ` input_tensor ` an unlimited number of times . <nl> * < b > ` shuffle ` < / b > : ( Optional . ) A boolean . If true , the rows are randomly shuffled <nl> - within each eopch . <nl> + within each epoch . <nl> * < b > ` seed ` < / b > : ( Optional . ) An integer . The seed to use if ` shuffle ` is true . <nl> * < b > ` capacity ` < / b > : ( Optional . ) The capacity of the queue to be used for buffering <nl> the input . <nl> mmm a / tensorflow / g3doc / api_docs / python / io_ops . md <nl> ppp b / tensorflow / g3doc / api_docs / python / io_ops . md <nl> Output the rows of ` input_tensor ` to a queue for an input pipeline . <nl> ` OutOfRange ` error . If not specified , ` input_producer ` can cycle through <nl> the rows of ` input_tensor ` an unlimited number of times . <nl> * < b > ` shuffle ` < / b > : ( Optional . ) A boolean . If true , the rows are randomly shuffled <nl> - within each eopch . <nl> + within each epoch . <nl> * < b > ` seed ` < / b > : ( Optional . ) An integer . The seed to use if ` shuffle ` is true . <nl> * < b > ` capacity ` < / b > : ( Optional . ) The capacity of the queue to be used for buffering <nl> the input . <nl> mmm a / tensorflow / g3doc / how_tos / variable_scope / index . md <nl> ppp b / tensorflow / g3doc / how_tos / variable_scope / index . md <nl> with tf . variable_scope ( " foo " ) : <nl> v = tf . get_variable ( " v " , [ 1 ] ) <nl> with tf . variable_scope ( " foo " , reuse = True ) : <nl> v1 = tf . get_variable ( " v " , [ 1 ] ) <nl> - assert v1 = = v <nl> + assert v1 is v <nl> ` ` ` <nl> <nl> # # # Basics of ` tf . variable_scope ( ) ` <nl> with tf . variable_scope ( " foo " ) : <nl> v = tf . get_variable ( " v " , [ 1 ] ) <nl> tf . get_variable_scope ( ) . reuse_variables ( ) <nl> v1 = tf . get_variable ( " v " , [ 1 ] ) <nl> - assert v1 = = v <nl> + assert v1 is v <nl> ` ` ` <nl> <nl> Note that you * cannot * set the ` reuse ` flag to ` False ` . The reason behind <nl> with tf . variable_scope ( foo_scope ) <nl> with tf . variable_scope ( foo_scope , reuse = True ) <nl> v1 = tf . get_variable ( " v " , [ 1 ] ) <nl> w1 = tf . get_variable ( " w " , [ 1 ] ) <nl> - assert v1 = = v <nl> - assert w1 = = w <nl> + assert v1 is v <nl> + assert w1 is w <nl> ` ` ` <nl> <nl> When opening a variable scope using a previously existing scope <nl> mmm a / tensorflow / g3doc / tutorials / mnist / tf / index . md <nl> ppp b / tensorflow / g3doc / tutorials / mnist / tf / index . md <nl> optimizer = tf . train . GradientDescentOptimizer ( learning_rate ) <nl> We then generate a single variable to contain a counter for the global <nl> training step and the [ ` minimize ( ) ` ] ( . . / . . / . . / api_docs / python / train . md # Optimizer . minimize ) <nl> op is used to both update the trainable weights in the system and increment the <nl> - global step . This is , by convention , known as the ` train_op ` and is what must <nl> + global step . This op is , by convention , known as the ` train_op ` and is what must <nl> be run by a TensorFlow session in order to induce one full step of training <nl> ( see below ) . <nl> <nl> global_step = tf . Variable ( 0 , name = ' global_step ' , trainable = False ) <nl> train_op = optimizer . minimize ( loss , global_step = global_step ) <nl> ` ` ` <nl> <nl> - The tensor containing the outputs of the training op is returned . <nl> - <nl> # # Train the Model <nl> <nl> Once the graph is built , it can be iteratively trained and evaluated in a loop <nl> if step % 100 = = 0 : <nl> # # # # Visualize the Status <nl> <nl> In order to emit the events files used by [ TensorBoard ] ( . . / . . / . . / how_tos / summaries_and_tensorboard / index . md ) , <nl> - all of the summaries ( in this case , only one ) are collected into a single op <nl> + all of the summaries ( in this case , only one ) are collected into a single Tensor <nl> during the graph building phase . <nl> <nl> ` ` ` python <nl> - summary_op = tf . merge_all_summaries ( ) <nl> + summary = tf . merge_all_summaries ( ) <nl> ` ` ` <nl> <nl> And then after the session is created , a [ ` tf . train . SummaryWriter ` ] ( . . / . . / . . / api_docs / python / train . md # SummaryWriter ) <nl> summary_writer = tf . train . SummaryWriter ( FLAGS . train_dir , sess . graph ) <nl> ` ` ` <nl> <nl> Lastly , the events file will be updated with new summary values every time the <nl> - ` summary_op ` is run and the output passed to the writer ' s ` add_summary ( ) ` <nl> + ` summary ` is evaluated and the output passed to the writer ' s ` add_summary ( ) ` <nl> function . <nl> <nl> ` ` ` python <nl> - summary_str = sess . run ( summary_op , feed_dict = feed_dict ) <nl> + summary_str = sess . run ( summary , feed_dict = feed_dict ) <nl> summary_writer . add_summary ( summary_str , step ) <nl> ` ` ` <nl> <nl> mmm a / tensorflow / python / framework / function_test . py <nl> ppp b / tensorflow / python / framework / function_test . py <nl> def testBasic ( self ) : <nl> self . assertEqual ( len ( gdef . library . function ) , 1 ) # 1 function is defined . <nl> <nl> for _ in xrange ( 10 ) : <nl> - # Run the graph , which is basicly two function calls . <nl> + # Run the graph , which is basically two function calls . <nl> ans_u0 , ans_v0 , ans_w0 , ans_u1 , ans_v1 , ans_w1 = sess . run ( [ u0 , v0 , w0 , <nl> u1 , v1 , w1 ] ) <nl> self . assertAllEqual ( ans_u0 , self . _mat ( 10 . 0 ) ) # 2 * 3 + 4 = 10 <nl> mmm a / tensorflow / python / framework / ops . py <nl> ppp b / tensorflow / python / framework / ops . py <nl> def _override_helper ( clazz_object , operator , func ) : <nl> Args : <nl> clazz_object : the class to override for ; either Tensor or SparseTensor . <nl> operator : the string name of the operator to override . <nl> - func : the function that replaces the overriden operator . <nl> + func : the function that replaces the overridden operator . <nl> <nl> Raises : <nl> ValueError : If operator has already been overwritten , <nl> def container ( self , container_name ) : <nl> q3 = tf . FIFOQueue ( 30 , tf . float32 ) <nl> <nl> # Resets container " experiment0 " , after which the state of v1 , v2 , v4 , q1 <nl> - # will become undefined ( such as unitialized ) . <nl> + # will become undefined ( such as uninitialized ) . <nl> tf . Session . reset ( target , [ " experiment0 " ] ) <nl> ` ` ` <nl> <nl> def _control_dependencies_for_inputs ( self , input_tensors ) : <nl> for controller in self . _control_dependencies_stack : <nl> # If any of the input_ops already depends on the inputs from controller , <nl> # we say that the new op is dominated ( by that input ) , and we therefore <nl> - # do not need to add control dependences for this controller ' s inputs . <nl> + # do not need to add control dependencies for this controller ' s inputs . <nl> dominated = False <nl> for op in input_ops : <nl> if controller . op_in_group ( op ) : <nl> mmm a / tensorflow / python / lib / core / py_func . cc <nl> ppp b / tensorflow / python / lib / core / py_func . cc <nl> struct PyCall { <nl> / / with this " token " . <nl> string token ; <nl> <nl> - / / Inputs and outputs of this function invokation . <nl> + / / Inputs and outputs of this function invocation . <nl> std : : vector < Tensor > ins ; <nl> std : : vector < Tensor > out ; <nl> } ; <nl> mmm a / tensorflow / python / ops / array_ops . py <nl> ppp b / tensorflow / python / ops / array_ops . py <nl> def strided_slice ( input_ , <nl> <nl> For the ith spec , <nl> ` begin_mask ` , ` end_mask ` , ` ellipsis_mask ` , ` new_axis_mask ` , <nl> - and ` shrink_axis_mask ` will have the ith bit corrsponding to <nl> + and ` shrink_axis_mask ` will have the ith bit corresponding to <nl> the ith spec . <nl> <nl> If the ith bit of ` begin_mask ` is non - zero , ` begin [ i ] ` is ignored and <nl> mmm a / tensorflow / python / ops / control_flow_ops . py <nl> ppp b / tensorflow / python / ops / control_flow_ops . py <nl> def _BuildLoop ( self , pred , body , original_loop_vars , loop_vars ) : <nl> " " " Core : Add the loop termination condition and body to the graph . " " " <nl> flat_loop_vars = nest . flatten ( original_loop_vars ) <nl> <nl> - # Let the context know the loop variabes so the loop variables <nl> + # Let the context know the loop variables so the loop variables <nl> # would be added in the outer contexts properly . <nl> self . _InitializeValues ( loop_vars ) <nl> real_vars = loop_vars <nl> def while_loop ( cond , body , loop_vars , parallel_iterations = 10 , back_prop = True , <nl> <nl> Returns : <nl> The output tensors for the loop variables after the loop . When the length <nl> - of ` loop_vars ` is 1 this is a Tensor , TensorArry or IndexedSlice and when <nl> + of ` loop_vars ` is 1 this is a Tensor , TensorArray or IndexedSlice and when <nl> the length of ` loop_vars ` is greater than 1 it returns a list . <nl> <nl> Raises : <nl> mmm a / tensorflow / python / ops / data_flow_ops . py <nl> ppp b / tensorflow / python / ops / data_flow_ops . py <nl> def __init__ ( self , capacity , min_after_dequeue , dtypes , shapes = None , <nl> <nl> <nl> class FIFOQueue ( QueueBase ) : <nl> - " " " A queue implementation that dequeues elements in first - in - first out order . <nl> + " " " A queue implementation that dequeues elements in first - in first - out order . <nl> <nl> See [ ` tf . QueueBase ` ] ( # QueueBase ) for a description of the methods on <nl> this class . <nl> mmm a / tensorflow / python / ops / partitioned_variables . py <nl> ppp b / tensorflow / python / ops / partitioned_variables . py <nl> <nl> with the same name but different slicings , including possibly no partitions . <nl> <nl> TODO ( touts ) : If an initializer provides a seed , the seed must be changed <nl> - deterministicaly for each slice , maybe by adding one to it , otherwise each <nl> + deterministically for each slice , maybe by adding one to it , otherwise each <nl> slice will use the same values . Maybe this can be done by passing the <nl> slice offsets to the initializer functions . <nl> <nl> mmm a / tensorflow / python / ops / sparse_ops . py <nl> ppp b / tensorflow / python / ops / sparse_ops . py <nl> def sparse_reduce_sum ( sp_input , reduction_axes = None , keep_dims = False ) : <nl> ` ` ` python <nl> # ' x ' represents [ [ 1 , ? , 1 ] <nl> # [ ? , 1 , ? ] ] <nl> - # where ? is implictly - zero . <nl> + # where ? is implicitly - zero . <nl> tf . sparse_reduce_sum ( x ) = = > 3 <nl> tf . sparse_reduce_sum ( x , 0 ) = = > [ 1 , 1 , 1 ] <nl> tf . sparse_reduce_sum ( x , 1 ) = = > [ 2 , 1 ] # Can also use - 1 as the axis . <nl> def sparse_reset_shape ( sp_input , new_shape = None ) : <nl> run time . <nl> <nl> - Setting ` new_shape ` as [ 2 , 3 , 6 ] will be fine as this shape is larger or <nl> - eqaul in every dimension compared to the original shape [ 2 , 3 , 5 ] . <nl> + equal in every dimension compared to the original shape [ 2 , 3 , 5 ] . <nl> <nl> - On the other hand , setting new_shape as [ 2 , 3 , 4 ] is also an error : The <nl> third dimension is smaller than the original shape [ 2 , 3 , 5 ] ( and an <nl> def sparse_reset_shape ( sp_input , new_shape = None ) : <nl> Args : <nl> sp_input : The input ` SparseTensor ` . <nl> new_shape : None or a vector representing the new shape for the returned <nl> - ` SpraseTensor ` . <nl> + ` SparseTensor ` . <nl> <nl> Returns : <nl> A ` SparseTensor ` indices and values unchanged from ` input_sp ` . Its shape is <nl> mmm a / tensorflow / python / ops / tensor_array_ops . py <nl> ppp b / tensorflow / python / ops / tensor_array_ops . py <nl> def __init__ ( self , dtype , size = None , dynamic_size = None , <nl> <nl> The name of the ` TensorArray ` ( even if passed in ) is uniquified : each time <nl> a new ` TensorArray ` is created at runtime it is assigned its own name for <nl> - the duration of the run . This avoids name collissions if a ` TensorArray ` <nl> + the duration of the run . This avoids name collisions if a ` TensorArray ` <nl> is created within a ` while_loop ` . <nl> <nl> Args : <nl> mmm a / tensorflow / python / ops / variables . py <nl> ppp b / tensorflow / python / ops / variables . py <nl> class Variable ( object ) : <nl> ` all_variables ( ) ` returns the contents of that collection . <nl> <nl> When building a machine learning model it is often convenient to distinguish <nl> - betwen variables holding the trainable model parameters and other variables <nl> + between variables holding the trainable model parameters and other variables <nl> such as a ` global step ` variable used to count training steps . To make this <nl> easier , the variable constructor supports a ` trainable = < bool > ` parameter . If <nl> ` True ` , the new variable is also added to the graph collection <nl> def __init__ ( self , initial_value = None , trainable = True , collections = None , <nl> If ` None ` , either the datatype will be kept ( if ` initial_value ` is <nl> a Tensor ) , or ` convert_to_tensor ` will decide . <nl> <nl> - Returns : <nl> - A Variable . <nl> - <nl> Raises : <nl> ValueError : If both ` variable_def ` and initial_value are specified . <nl> ValueError : If the initial value is not specified , or does not have a <nl> def report_uninitialized_variables ( var_list = None , <nl> name : Optional name of the ` Operation ` . <nl> <nl> Returns : <nl> - A 1 - D tensor containing names of the unintialized variables , or an empty 1 - D <nl> + A 1 - D tensor containing names of the uninitialized variables , or an empty 1 - D <nl> tensor if there are no variables or no uninitialized variables . <nl> " " " <nl> if var_list is None : <nl> mmm a / tensorflow / python / summary / event_file_inspector_test . py <nl> ppp b / tensorflow / python / summary / event_file_inspector_test . py <nl> def _WriteScalarSummaries ( self , data , subdirs = ( ' ' , ) ) : <nl> sw . close ( ) <nl> <nl> def testEmptyLogdir ( self ) : <nl> - # Nothing was wrriten to logdir <nl> + # Nothing was written to logdir <nl> units = efi . get_inspection_units ( self . logdir ) <nl> self . assertEqual ( [ ] , units ) <nl> <nl> mmm a / tensorflow / python / summary / event_multiplexer . py <nl> ppp b / tensorflow / python / summary / event_multiplexer . py <nl> def __init__ ( self , <nl> None , then the EventMultiplexer initializes without any runs . <nl> size_guidance : A dictionary mapping from ` tagType ` to the number of items <nl> to store for each tag of that type . See <nl> - ` event_ccumulator . EventAccumulator ` for details . <nl> + ` event_accumulator . EventAccumulator ` for details . <nl> purge_orphaned_data : Whether to discard any events that were " orphaned " by <nl> a TensorFlow restart . <nl> " " " <nl> mmm a / tensorflow / python / training / coordinator_test . py <nl> ppp b / tensorflow / python / training / coordinator_test . py <nl> def testJoinSomeRegistered ( self ) : <nl> threading . Thread ( target = SleepABit , args = ( 0 . 01 , coord ) ) ] <nl> for t in threads : <nl> t . start ( ) <nl> - # threads [ 1 ] is not registred we must pass it in . <nl> + # threads [ 1 ] is not registered we must pass it in . <nl> coord . join ( threads [ 1 : 1 ] ) <nl> for t in threads : <nl> self . assertFalse ( t . is_alive ( ) ) <nl> mmm a / tensorflow / python / training / device_setter_test . py <nl> ppp b / tensorflow / python / training / device_setter_test . py <nl> class DeviceSetterTest ( tf . test . TestCase ) : <nl> " ps " : [ " ps0 : 2222 " , " ps1 : 2222 " ] , <nl> " worker " : [ " worker0 : 2222 " , " worker1 : 2222 " , " worker2 : 2222 " ] } ) <nl> <nl> - def testPS2TasksWithCluserSpecClass ( self ) : <nl> + def testPS2TasksWithClusterSpecClass ( self ) : <nl> with tf . device ( tf . train . replica_device_setter ( cluster = self . _cluster_spec ) ) : <nl> v = tf . Variable ( [ 1 , 2 ] ) <nl> w = tf . Variable ( [ 2 , 1 ] ) <nl> mmm a / tensorflow / python / training / ftrl_test . py <nl> ppp b / tensorflow / python / training / ftrl_test . py <nl> def testEquivSparseAdagradwithoutRegularization ( self ) : <nl> self . assertAllCloseAccordingToType ( val0 , val2 ) <nl> self . assertAllCloseAccordingToType ( val1 , val3 ) <nl> <nl> - def testEquivSparseGradientDescentwithoutRegularizaion ( self ) : <nl> + def testEquivSparseGradientDescentwithoutRegularization ( self ) : <nl> for dtype in [ tf . half , tf . float32 ] : <nl> with self . test_session ( ) : <nl> val0 , val1 = self . applyOptimizer ( <nl> def testEquivSparseGradientDescentwithoutRegularizaion ( self ) : <nl> self . assertAllCloseAccordingToType ( val0 , val2 ) <nl> self . assertAllCloseAccordingToType ( val1 , val3 ) <nl> <nl> - def testEquivGradientDescentwithoutRegularizaion ( self ) : <nl> + def testEquivGradientDescentwithoutRegularization ( self ) : <nl> for dtype in [ tf . half , tf . float32 ] : <nl> with self . test_session ( ) : <nl> val0 , val1 = self . applyOptimizer ( <nl> mmm a / tensorflow / python / training / input . py <nl> ppp b / tensorflow / python / training / input . py <nl> def input_producer ( input_tensor , element_shape = None , num_epochs = None , <nl> ` OutOfRange ` error . If not specified , ` input_producer ` can cycle through <nl> the rows of ` input_tensor ` an unlimited number of times . <nl> shuffle : ( Optional . ) A boolean . If true , the rows are randomly shuffled <nl> - within each eopch . <nl> + within each epoch . <nl> seed : ( Optional . ) An integer . The seed to use if ` shuffle ` is true . <nl> capacity : ( Optional . ) The capacity of the queue to be used for buffering <nl> the input . <nl> def batch_join ( tensors_list , batch_size , capacity = 32 , enqueue_many = False , <nl> " " " Runs a list of tensors to fill a queue to create batches of examples . <nl> <nl> The ` tensors_list ` argument is a list of tuples of tensors , or a list of <nl> - dictionaries of tensors . Each element in the list is treated similarily <nl> + dictionaries of tensors . Each element in the list is treated similarly <nl> to the ` tensors ` argument of ` tf . train . batch ( ) ` . <nl> <nl> Enqueues a different list of tensors in different threads . <nl> def shuffle_batch_join ( tensors_list , batch_size , capacity , <nl> " " " Create batches by randomly shuffling tensors . <nl> <nl> The ` tensors_list ` argument is a list of tuples of tensors , or a list of <nl> - dictionaries of tensors . Each element in the list is treated similarily <nl> + dictionaries of tensors . Each element in the list is treated similarly <nl> to the ` tensors ` argument of ` tf . train . shuffle_batch ( ) ` . <nl> <nl> This version enqueues a different list of tensors in different threads . <nl> mmm a / tensorflow / python / training / input_test . py <nl> ppp b / tensorflow / python / training / input_test . py <nl> def testCannotInferRankError ( self ) : <nl> with self . assertRaisesRegexp ( ValueError , " Cannot infer Tensor ' s rank " ) : <nl> tf . train . batch ( [ x ] , batch_size = 2 ) <nl> <nl> - def testBatchedSparseTensorInferedShape ( self ) : <nl> + def testBatchedSparseTensorInferredShape ( self ) : <nl> sparse = tf . SparseTensor ( indices = [ [ 0 ] ] , values = [ 1 . 0 ] , shape = [ 1 ] ) <nl> self . assertAllEqual ( sparse . shape . get_shape ( ) . as_list ( ) , [ 1 ] ) <nl> batched = tf . train . batch ( [ sparse ] , batch_size = 2 ) <nl> self . assertAllEqual ( batched . shape . get_shape ( ) . as_list ( ) , [ 2 ] ) <nl> <nl> - def testBatchedSparseTensorInferedShapeEnqueueMany ( self ) : <nl> + def testBatchedSparseTensorInferredShapeEnqueueMany ( self ) : <nl> sparse = tf . SparseTensor ( indices = [ [ 0 ] ] , values = [ 1 . 0 ] , shape = [ 1 ] ) <nl> self . assertAllEqual ( sparse . shape . get_shape ( ) . as_list ( ) , [ 1 ] ) <nl> batched = tf . train . batch ( [ sparse ] , batch_size = 2 , enqueue_many = True ) <nl> self . assertAllEqual ( batched . shape . get_shape ( ) . as_list ( ) , [ 1 ] ) <nl> <nl> - def testBatchedSparseTensorInferedShapeUnknownRank ( self ) : <nl> + def testBatchedSparseTensorInferredShapeUnknownRank ( self ) : <nl> sparse = tf . SparseTensor ( <nl> indices = tf . placeholder ( tf . int64 ) , <nl> values = tf . placeholder ( tf . float32 ) , <nl> def testBatchedSparseTensorInferedShapeUnknownRank ( self ) : <nl> batched = tf . train . batch ( [ sparse ] , batch_size = 2 ) <nl> self . assertIs ( batched . shape . get_shape ( ) . num_elements ( ) , None ) <nl> <nl> - def testBatchedSparseTensorInferedShapeUnknownRankEnqueueMany ( self ) : <nl> + def testBatchedSparseTensorInferredShapeUnknownRankEnqueueMany ( self ) : <nl> sparse = tf . SparseTensor ( <nl> indices = tf . placeholder ( tf . int64 ) , <nl> values = tf . placeholder ( tf . float32 ) , <nl> def testTwoThreads ( self ) : <nl> def testTwoThreadsDict ( self ) : <nl> self . _testTwoThreadsHelper ( use_dict = True ) <nl> <nl> - def testMistmatchedDictKeys ( self ) : <nl> + def testMismatchedDictKeys ( self ) : <nl> with self . assertRaisesRegexp ( ValueError , " must have the same keys " ) : <nl> tf . train . batch_join ( <nl> [ { " c " : 12 , " s " : 123 , " S " : " a " } , <nl> def testTwoThreadsSmallerBatch ( self ) : <nl> for thread in threads : <nl> thread . join ( ) <nl> <nl> - def testMistmatchedDictKeys ( self ) : <nl> + def testMismatchedDictKeys ( self ) : <nl> with self . assertRaisesRegexp ( ValueError , " must have the same keys " ) : <nl> tf . train . shuffle_batch_join ( <nl> [ { " c " : 12 , " s " : 123 , " S " : " a " } , <nl> mmm a / tensorflow / python / training / learning_rate_decay . py <nl> ppp b / tensorflow / python / training / learning_rate_decay . py <nl> def natural_exp_decay ( learning_rate , global_step , decay_steps , decay_rate , <nl> decayed_learning_rate = learning_rate * exp ( - decay_rate * global_step ) <nl> ` ` ` <nl> <nl> - Example : decay exponetially with a base of 0 . 96 : <nl> + Example : decay exponentially with a base of 0 . 96 : <nl> <nl> ` ` ` python <nl> . . . <nl> mmm a / tensorflow / python / training / rmsprop_test . py <nl> ppp b / tensorflow / python / training / rmsprop_test . py <nl> <nl> from __future__ import print_function <nl> <nl> import math <nl> + import copy <nl> <nl> import numpy as np <nl> import tensorflow as tf <nl> <nl> <nl> class RMSPropOptimizerTest ( tf . test . TestCase ) : <nl> <nl> - def _rmsprop_update_numpy ( self , var , g , rms , mom , lr , decay , momentum , <nl> + def _rmsprop_update_numpy ( self , var , g , rms , mom , lr , decay , momentum , <nl> epsilon ) : <nl> rms_t = rms * decay + ( 1 - decay ) * g * g <nl> mom_t = momentum * mom + lr * g / np . sqrt ( rms_t + epsilon ) <nl> var_t = var - mom_t <nl> return var_t , rms_t , mom_t <nl> <nl> + def _sparse_rmsprop_update_numpy ( self , var , gindexs , gvalues , rms , mom , <nl> + lr , decay , momentum , epsilon ) : <nl> + rms_t = copy . deepcopy ( rms ) <nl> + mom_t = copy . deepcopy ( mom ) <nl> + var_t = copy . deepcopy ( var ) <nl> + for i in range ( len ( gindexs ) ) : <nl> + gindex = gindexs [ i ] <nl> + gvalue = gvalues [ i ] <nl> + rms_t [ gindex ] = rms [ gindex ] * decay + ( 1 - decay ) * gvalue * gvalue <nl> + mom_t [ gindex ] = momentum * mom [ gindex ] + lr * gvalue / np . sqrt ( <nl> + rms_t [ gindex ] + epsilon ) <nl> + var_t [ gindex ] = var [ gindex ] - mom_t [ gindex ] <nl> + return var_t , rms_t , mom_t <nl> + <nl> def testSparseWithMomentum ( self ) : <nl> for dtype in [ tf . half , tf . float32 ] : <nl> with self . test_session ( ) : <nl> # Initialize variables for numpy implementation . <nl> var0_np = np . array ( [ 1 . 0 , 2 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> - grads0_np = np . array ( [ 0 . 1 , 0 . 1 ] , dtype = dtype . as_numpy_dtype ) <nl> + grads0_np = np . array ( [ 0 . 1 ] , dtype = dtype . as_numpy_dtype ) <nl> var1_np = np . array ( [ 3 . 0 , 4 . 0 ] , dtype = dtype . as_numpy_dtype ) <nl> - grads1_np = np . array ( [ 0 . 01 , 0 . 01 ] , dtype = dtype . as_numpy_dtype ) <nl> + grads1_np = np . array ( [ 0 . 01 ] , dtype = dtype . as_numpy_dtype ) <nl> <nl> var0 = tf . Variable ( var0_np ) <nl> var1 = tf . Variable ( var1_np ) <nl> - grads0_np_indices = np . array ( [ 0 , 1 ] , dtype = np . int32 ) <nl> + grads0_np_indices = np . array ( [ 0 ] , dtype = np . int32 ) <nl> grads0 = tf . IndexedSlices ( tf . constant ( grads0_np ) , <nl> tf . constant ( grads0_np_indices ) , <nl> - tf . constant ( [ 2 ] ) ) <nl> - grads1_np_indices = np . array ( [ 0 , 1 ] , dtype = np . int32 ) <nl> + tf . constant ( [ 1 ] ) ) <nl> + grads1_np_indices = np . array ( [ 1 ] , dtype = np . int32 ) <nl> grads1 = tf . IndexedSlices ( tf . constant ( grads1_np ) , <nl> tf . constant ( grads1_np_indices ) , <nl> - tf . constant ( [ 2 ] ) ) <nl> + tf . constant ( [ 1 ] ) ) <nl> opt = tf . train . RMSPropOptimizer ( learning_rate = 2 . 0 , decay = 0 . 9 , <nl> momentum = 0 . 5 , epsilon = 1e - 5 ) <nl> update = opt . apply_gradients ( zip ( [ grads0 , grads1 ] , [ var0 , var1 ] ) ) <nl> def testSparseWithMomentum ( self ) : <nl> for t in range ( 1 , 5 ) : <nl> update . run ( ) <nl> <nl> - var0_np , rms0_np , mom0_np = self . _rmsprop_update_numpy ( var0_np , <nl> - grads0_np , rms0_np , mom0_np , 2 . 0 , 0 . 9 , 0 . 5 , 1e - 5 ) <nl> - var1_np , rms1_np , mom1_np = self . _rmsprop_update_numpy ( var1_np , <nl> - grads1_np , rms1_np , mom1_np , 2 . 0 , 0 . 9 , 0 . 5 , 1e - 5 ) <nl> + var0_np , rms0_np , mom0_np = self . _sparse_rmsprop_update_numpy ( <nl> + var0_np , grads0_np_indices , grads0_np , rms0_np , mom0_np , <nl> + 2 . 0 , 0 . 9 , 0 . 5 , 1e - 5 ) <nl> + var1_np , rms1_np , mom1_np = self . _sparse_rmsprop_update_numpy ( <nl> + var1_np , grads1_np_indices , grads1_np , rms1_np , mom1_np , <nl> + 2 . 0 , 0 . 9 , 0 . 5 , 1e - 5 ) <nl> <nl> # Validate updated params <nl> self . assertAllCloseAccordingToType ( rms0_np , rms0 . eval ( ) ) <nl> mmm a / tensorflow / python / training / saver . py <nl> ppp b / tensorflow / python / training / saver . py <nl> def save ( self , sess , save_path , global_step = None , latest_filename = None , <nl> " ' latest_filename ' collides with ' save_path ' : ' % s ' and ' % s ' " % <nl> ( latest_filename , save_path ) ) <nl> <nl> + if not os . path . exists ( os . path . dirname ( save_path ) ) : <nl> + raise ValueError ( " Parent directory of { } doesn ' t exist , can ' t save . " . format ( save_path ) ) <nl> + <nl> save_path = os . path . dirname ( save_path ) <nl> if not isinstance ( sess , session . SessionInterface ) : <nl> raise TypeError ( " ' sess ' must be a Session ; % s " % sess ) <nl> <nl> + # Note a few lines above save_path was set to os . path . dirname ( save_path ) <nl> + if not os . path . exists ( save_path ) : <nl> + raise ValueError ( " Parent directory { } doesn ' t exist , can ' t save . " . format ( save_path ) ) <nl> + <nl> model_checkpoint_path = sess . run ( <nl> self . saver_def . save_tensor_name , <nl> { self . saver_def . filename_tensor_name : checkpoint_file } ) <nl> def export_meta_graph ( filename = None , meta_info_def = None , graph_def = None , <nl> " " " Returns ` MetaGraphDef ` proto . Optionally writes it to filename . <nl> <nl> This function exports the graph , saver , and collection objects into <nl> - ` MetaGraphDef ` protocol buffer with the intension of it being imported <nl> + ` MetaGraphDef ` protocol buffer with the intention of it being imported <nl> at a later time or location to restart training , run inference , or be <nl> a subgraph . <nl> <nl> mmm a / tensorflow / python / training / saver_test . py <nl> ppp b / tensorflow / python / training / saver_test . py <nl> def testVariables ( self ) : <nl> self . assertEqual ( b " k1 " , v2 . keys ( ) . eval ( ) ) <nl> self . assertEqual ( 3 . 0 , v2 . values ( ) . eval ( ) ) <nl> <nl> - def testVarListShouldBeEmptyInDefferedBuild ( self ) : <nl> + def testVarListShouldBeEmptyInDeferredBuild ( self ) : <nl> with tf . Graph ( ) . as_default ( ) : <nl> v = tf . Variable ( 1 . 0 ) <nl> with self . assertRaisesRegexp ( ValueError , " defer_build " ) : <nl> tf . train . Saver ( [ v ] , defer_build = True ) <nl> <nl> def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild ( self ) : <nl> - save_path = os . path . join ( self . get_temp_dir ( ) , " error_deffered_build " ) <nl> + save_path = os . path . join ( self . get_temp_dir ( ) , " error_deferred_build " ) <nl> with tf . Graph ( ) . as_default ( ) , tf . Session ( ) as sess : <nl> tf . Variable ( 1 . 0 ) <nl> saver = tf . train . Saver ( defer_build = True ) <nl> with self . assertRaisesRegexp ( RuntimeError , " build " ) : <nl> saver . save ( sess , save_path ) <nl> <nl> - def testDefferedBuild ( self ) : <nl> - save_path = os . path . join ( self . get_temp_dir ( ) , " deffered_build " ) <nl> + def testDeferredBuild ( self ) : <nl> + save_path = os . path . join ( self . get_temp_dir ( ) , " deferred_build " ) <nl> with tf . Session ( " " , graph = tf . Graph ( ) ) as sess : <nl> one = tf . Variable ( 1 . 0 ) <nl> save = tf . train . Saver ( defer_build = True ) <nl> - # if build is not defered , saver cannot save the ` twos ` . <nl> + # if build is not deferred , saver cannot save the ` twos ` . <nl> twos = tf . Variable ( [ 2 . 0 , 2 . 0 , 2 . 0 ] ) <nl> init = tf . initialize_all_variables ( ) <nl> save . build ( ) <nl> def testSaveWithGlobalStep ( self ) : <nl> expected_save_path = " % s - % d " % ( save_path , global_step_int ) <nl> self . assertEqual ( expected_save_path , val ) <nl> <nl> + def testSaveToNonexistingPath ( self ) : <nl> + <nl> + save_path = os . path . join ( self . get_temp_dir ( ) , " nonexisting_dir / path " ) <nl> + <nl> + # Build a graph with 2 parameter nodes , and Save and <nl> + # Restore nodes for them . <nl> + v0 = tf . Variable ( 10 . 0 , name = " v0 " ) <nl> + v1 = tf . Variable ( 20 . 0 , name = " v1 " ) <nl> + save = tf . train . Saver ( { " v0 " : v0 , " v1 " : v1 } , restore_sequentially = True ) <nl> + init_all_op = tf . initialize_all_variables ( ) <nl> + <nl> + with self . test_session ( ) as sess : <nl> + # Initialize all variables <nl> + sess . run ( init_all_op ) <nl> + <nl> + # Check that the parameter nodes have been initialized . <nl> + self . assertEqual ( 10 . 0 , v0 . eval ( ) ) <nl> + self . assertEqual ( 20 . 0 , v1 . eval ( ) ) <nl> + <nl> + # Assert saving fails when parent dir of save path doesn ' t exist <nl> + with self . assertRaisesWithPredicateMatch ( <nl> + ValueError , lambda e : " Parent directory of { } doesn ' t exist , can ' t save . " . format ( save_path ) in str ( e ) ) : <nl> + save . save ( sess , save_path ) <nl> + <nl> <nl> class SaveRestoreShardedTest ( tf . test . TestCase ) : <nl> <nl> def _testGraphExtensionRestore ( self ) : <nl> new_saver . export_meta_graph ( ) <nl> # Restores from checkpoint . <nl> new_saver . restore ( sess , saver0_ckpt ) <nl> - # Addes loss and train . <nl> + # Adds loss and train . <nl> labels = tf . constant ( 0 , tf . int32 , shape = [ 100 ] , name = " labels " ) <nl> batch_size = tf . size ( labels ) <nl> labels = tf . expand_dims ( labels , 1 ) <nl> mmm a / tensorflow / python / training / server_lib . py <nl> ppp b / tensorflow / python / training / server_lib . py <nl> def server_def ( self ) : <nl> " " " Returns the ` tf . train . ServerDef ` for this server . <nl> <nl> Returns : <nl> - A ` tf . train . ServerDef ` prototocol buffer that describes the configuration <nl> + A ` tf . train . ServerDef ` protocol buffer that describes the configuration <nl> of this server . <nl> " " " <nl> return self . _server_def <nl> mmm a / tensorflow / python / training / server_lib_test . py <nl> ppp b / tensorflow / python / training / server_lib_test . py <nl> def testSameVariablesClear ( self ) : <nl> self . assertAllEqual ( [ [ 4 ] ] , sess_2 . run ( v2 ) ) <nl> <nl> # Connects to the same target . Device memory for the variables would have <nl> - # been released , so they will be unitialized . <nl> + # been released , so they will be uninitialized . <nl> sess_2 = tf . Session ( server . target ) <nl> with self . assertRaises ( tf . errors . FailedPreconditionError ) : <nl> sess_2 . run ( v2 ) <nl> - # Reinitialzes the variables . <nl> + # Reinitializes the variables . <nl> sess_2 . run ( tf . initialize_all_variables ( ) ) <nl> self . assertAllEqual ( [ [ 4 ] ] , sess_2 . run ( v2 ) ) <nl> sess_2 . close ( ) <nl> def testMultipleContainers ( self ) : <nl> sess . run ( v1 ) <nl> <nl> # Connects to the same target . Device memory for the v0 would have <nl> - # been released , so it will be unitialized . But v1 should still <nl> + # been released , so it will be uninitialized . But v1 should still <nl> # be valid . <nl> sess = tf . Session ( server . target ) <nl> with self . assertRaises ( tf . errors . FailedPreconditionError ) : <nl> mmm a / tensorflow / python / training / supervisor . py <nl> ppp b / tensorflow / python / training / supervisor . py <nl> class Supervisor ( object ) : <nl> <nl> In the * chief * task , the ` Supervisor ` works exactly as in the first example <nl> above . In the other tasks ` sv . managed_session ( ) ` waits for the Model to have <nl> - been intialized before returning a session to the training code . The <nl> - non - chief tasks depend on the chief taks for initializing the model . <nl> + been initialized before returning a session to the training code . The <nl> + non - chief tasks depend on the chief task for initializing the model . <nl> <nl> If one of the tasks crashes and restarts , ` managed_session ( ) ` <nl> checks if the Model is initialized . If yes , it just creates a session and <nl> def _init_saver ( self , saver = USE_DEFAULT ) : <nl> self . _saver = saver <nl> <nl> def _init_summary_op ( self , summary_op = USE_DEFAULT ) : <nl> - " " " Initilizes summary_op . <nl> + " " " Initializes summary_op . <nl> <nl> Args : <nl> summary_op : An Operation that returns a Summary for the event logs . <nl> def start_queue_runners ( self , sess , queue_runners = None ) : <nl> Note that the queue runners collected in the graph key ` QUEUE_RUNNERS ` <nl> are already started automatically when you create a session with the <nl> supervisor , so unless you have non - collected queue runners to start <nl> - you do not need to call this explicitely . <nl> + you do not need to call this explicitly . <nl> <nl> Args : <nl> sess : A ` Session ` . <nl> mmm a / tensorflow / python / training / sync_replicas_optimizer . py <nl> ppp b / tensorflow / python / training / sync_replicas_optimizer . py <nl> class SyncReplicasOptimizer ( optimizer . Optimizer ) : <nl> # total_num_replicas = 52 and make sure this number matches how many physical <nl> # replicas you started in your job . <nl> <nl> - # Some models have startup_delays to help stablize the model but when using <nl> + # Some models have startup_delays to help stabilize the model but when using <nl> # sync_replicas training , set it to 0 . <nl> <nl> # Now you can call ` minimize ( ) ` or ` compute_gradients ( ) ` and <nl>
|
Merge changes from github .
|
tensorflow/tensorflow
|
b1413783a47bbf94dfbdd6e31d0e4a765a966d41
|
2016-08-18T23:33:42Z
|
mmm a / README . md <nl> ppp b / README . md <nl> Effective January 25 , 2017 CNTK [ 1 - bit Stochastic Gradient Descent ( 1bit - SGD ) ] ( h <nl> Give us feedback through these [ channels ] ( https : / / github . com / Microsoft / CNTK / wiki / Feedback - Channels ) . <nl> <nl> # Latest news <nl> + * * * 2017 - 02 - 01 . * V 2 . 0 Beta 10 Release * * <nl> + Highlights of this Release : <nl> + * New and updated core and Python API features . <nl> + * New Tutorials and Examples : <nl> + * A Python version of the deconvolution layer and image auto encoder example was added ( [ Example * * 07_Deconvolution * * in * Image - Getting Started * ] ( https : / / github . com / Microsoft / CNTK / tree / v2 . 0 . beta10 . 0 / Examples / Image / GettingStarted ) ) . <nl> + * A Python distributed training example for image classification using AlexNet was added , cf . [ here ] ( https : / / github . com / Microsoft / CNTK / tree / v2 . 0 . beta10 . 0 / Examples / Image / Classification / AlexNet / Python ) <nl> + * [ Basic implementation of Generative Adversarial Networks ( GAN ) networks ] ( https : / / github . com / Microsoft / CNTK / blob / v2 . 0 . beta10 . 0 / Tutorials / CNTK_206_Basic_GAN . ipynb ) <nl> + * [ Training with Sampled Softmax ] ( https : / / github . com / Microsoft / CNTK / blob / v2 . 0 . beta10 . 0 / Tutorials / CNTK_207_Training_with_Sampled_Softmax . ipynb ) <nl> + * New [ CNTK NuGet Packages ] ( https : / / github . com / Microsoft / CNTK / wiki / NuGet - Package ) . <nl> + <nl> + See more in the [ Release Notes ] ( https : / / github . com / Microsoft / CNTK / wiki / CNTK_2_0_beta_10_Release_Notes ) . <nl> + Get the Release from the [ CNTK Releases page ] ( https : / / github . com / Microsoft / CNTK / releases ) . <nl> + <nl> * * * 2017 - 01 - 25 . * V 2 . 0 Beta 9 Release available at Docker Hub * * <nl> CNTK V 2 . 0 Beta 9 Runtime packages are now available as [ Public Images at Docker Hub ] ( https : / / hub . docker . com / r / microsoft / cntk / ) . <nl> See more on CNTK as Docker Images in this [ Wiki article ] ( https : / / github . com / Microsoft / CNTK / wiki / CNTK - Docker - Containers ) . <nl> Get the Release from the [ CNTK Releases page ] ( https : / / github . com / Microsoft / CNTK / <nl> CNTK V 2 . 0 Beta 8 Runtime packages are now available as [ Public Images at Docker Hub ] ( https : / / hub . docker . com / r / microsoft / cntk / ) . <nl> See more on CNTK as Docker Images in this [ Wiki article ] ( https : / / github . com / Microsoft / CNTK / wiki / CNTK - Docker - Containers ) . <nl> <nl> - * * * 2017 - 01 - 16 . * V 2 . 0 Beta 8 Release * * <nl> - Highlights of this Release : <nl> - * Support of Python v . 2 . 7 , 3 . 4 , and 3 . 5 . See [ binary and source setup ] ( https : / / github . com / Microsoft / CNTK / wiki / Setup - CNTK - on - your - machine ) instructions to find out about how to select Python version . <nl> - * New Python API features . <nl> - * New Python example [ Feature extraction using a trained model in Python API ] ( https : / / github . com / Microsoft / CNTK / tree / v2 . 0 . beta8 . 0 / Examples / Image / FeatureExtraction ) . <nl> - * Support of [ Visual Studio 2015 ] ( https : / / github . com / Microsoft / CNTK / wiki / Setup - Migrate - VS13 - to - VS15 ) for Windows version . <nl> - * Introduction of [ C # API in CNTK Evaluation Library ] ( https : / / github . com / Microsoft / CNTK / wiki / CNTK - Library - Managed - API ) and a new set of [ CNTK NuGet Packages ] ( https : / / github . com / Microsoft / CNTK / wiki / NuGet - Package ) . <nl> - * CNTK Runtime packages are now available as [ Public Images at Docker Hub ] ( https : / / github . com / Microsoft / CNTK / wiki / CNTK - Docker - Containers ) . ( * * Beta 7 * * is currently available ; Beta 8 Images availability will be announced separately in a few days ) <nl> - * Version 3 of [ CNTK Custom MKL Library ] ( https : / / cntk . ai / mkl / ) is available . <nl> - <nl> - See more in the [ Release Notes ] ( https : / / github . com / Microsoft / CNTK / wiki / CNTK_2_0_beta_8_Release_Notes ) . <nl> - Get the Release from the [ CNTK Releases page ] ( https : / / github . com / Microsoft / CNTK / releases ) . <nl> - <nl> See [ all news ] ( https : / / github . com / Microsoft / CNTK / wiki / News ) . <nl> <nl> # What is The Microsoft Cognitive Toolkit <nl>
|
README . md : add entry for 2 . 0 . beta10 . 0 release
|
microsoft/CNTK
|
54836c17164213b2933bbbff82f44e3dbb4f0794
|
2017-02-01T17:15:24Z
|
mmm a / util / sock . h <nl> ppp b / util / sock . h <nl> namespace mongo { <nl> inline void closesocket ( int s ) { <nl> / / never wait for socket to close <nl> # ifdef _WIN32 <nl> - boost : : thread ( bind ( : : closesocket , s ) ) ; <nl> + boost : : thread ( boost : : bind ( : : closesocket , s ) ) ; <nl> # else <nl> - boost : : thread ( bind ( : : close , s ) ) ; <nl> + boost : : thread ( boost : : bind ( : : close , s ) ) ; <nl> # endif <nl> } <nl> <nl>
|
namespace issue ( for bad c + + compilers )
|
mongodb/mongo
|
5a9013d3ccc3d034251055eccc5eb313eb671cd0
|
2010-01-20T03:22:32Z
|
mmm a / tests / cv / src / aaccum . cpp <nl> ppp b / tests / cv / src / aaccum . cpp <nl> void CV_RunningAvgTest : : prepare_to_validation ( int ) <nl> cvTsCopy ( temp , dst , mask ) ; <nl> } <nl> <nl> - CV_RunningAvgTest runavg_test ; <nl> + / / CV_RunningAvgTest runavg_test ; <nl> <nl> mmm a / tests / cv / src / acolor . cpp <nl> ppp b / tests / cv / src / acolor . cpp <nl> void CV_ColorHSVTest : : convert_row_abc2bgr_32f_c3 ( const float * src_row , float * d <nl> } <nl> <nl> <nl> - CV_ColorHSVTest color_hsv_test ; <nl> + / / CV_ColorHSVTest color_hsv_test ; <nl> <nl> <nl> <nl> mmm a / tests / cv / src / afilter . cpp <nl> ppp b / tests / cv / src / afilter . cpp <nl> void CV_FilterTest : : prepare_to_validation ( int test_case_idx ) <nl> cvTsConvolve2D ( & test_mat [ TEMP ] [ 0 ] , & test_mat [ REF_OUTPUT ] [ 0 ] , & test_mat [ INPUT ] [ 1 ] , anchor ) ; <nl> } <nl> <nl> - CV_FilterTest filter ; <nl> + / / CV_FilterTest filter ; <nl> <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / cv / src / asubdivisions . cpp <nl> ppp b / tests / cv / src / asubdivisions . cpp <nl> int CV_SubdivTest : : validate_test_results ( int / * test_case_idx * / ) <nl> return code ; <nl> } <nl> <nl> - CV_SubdivTest subdiv_test ; <nl> + / / CV_SubdivTest subdiv_test ; <nl> <nl> / * End of file . * / <nl> <nl> mmm a / tests / cxcore / src / adxt . cpp <nl> ppp b / tests / cxcore / src / adxt . cpp <nl> void CxCore_DFTTest : : prepare_to_validation ( int / * test_case_idx * / ) <nl> } <nl> <nl> <nl> - CxCore_DFTTest dft_test ; <nl> + / / CxCore_DFTTest dft_test ; <nl> <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / DCT / / / / / / / / / / / / / / / / / / / / / / / / <nl> void CxCore_DCTTest : : prepare_to_validation ( int / * test_case_idx * / ) <nl> } <nl> <nl> <nl> - CxCore_DCTTest dct_test ; <nl> + / / CxCore_DCTTest dct_test ; <nl> <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / MulSpectrums / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / tests / cxcore / src / amath . cpp <nl> ppp b / tests / cxcore / src / amath . cpp <nl> void CxCore_DotProductTest : : prepare_to_validation ( int ) <nl> cvRealScalar ( cvTsCrossCorr ( & test_mat [ INPUT ] [ 0 ] , & test_mat [ INPUT ] [ 1 ] ) ) ; <nl> } <nl> <nl> - CxCore_DotProductTest dotproduct_test ; <nl> + / / CxCore_DotProductTest dotproduct_test ; <nl> <nl> <nl> / / / / / / / / / crossproduct / / / / / / / / / / <nl> mmm a / tests / cxcore / src / arand . cpp <nl> ppp b / tests / cxcore / src / arand . cpp <nl> void CV_RandTest : : run ( int ) <nl> } <nl> } <nl> <nl> - CV_RandTest rand_test ; <nl> + / / CV_RandTest rand_test ; <nl>
|
exclude failed tests
|
opencv/opencv
|
998cce4a2d0bec226bb0c65fd570792defc1a62f
|
2010-07-11T15:07:44Z
|
mmm a / src / interfaces / wallet . cpp <nl> ppp b / src / interfaces / wallet . cpp <nl> WalletTxStatus MakeWalletTxStatus ( const CWalletTx & wtx ) <nl> WalletTxStatus result ; <nl> auto mi = : : mapBlockIndex . find ( wtx . hashBlock ) ; <nl> CBlockIndex * block = mi ! = : : mapBlockIndex . end ( ) ? mi - > second : nullptr ; <nl> - result . block_height = ( block ? block - > nHeight : std : : numeric_limits < int > : : max ( ) ) , <nl> + result . block_height = ( block ? block - > nHeight : std : : numeric_limits < int > : : max ( ) ) ; <nl> result . blocks_to_maturity = wtx . GetBlocksToMaturity ( ) ; <nl> result . depth_in_main_chain = wtx . GetDepthInMainChain ( ) ; <nl> result . time_received = wtx . nTimeReceived ; <nl>
|
Merge : wallet : Fix accidental use of the comma operator
|
bitcoin/bitcoin
|
89a116dc0b446de0d18a981699a279eeaf6c9ea9
|
2018-07-26T22:26:25Z
|
mmm a / arangod / Aql / ExecutionNode . cpp <nl> ppp b / arangod / Aql / ExecutionNode . cpp <nl> IndexRangeNode : : IndexRangeNode ( ExecutionPlan * plan , <nl> _outVariable ( varFromJson ( plan - > getAst ( ) , json , " outVariable " ) ) , <nl> _ranges ( ) { <nl> <nl> - triagens : : basics : : Json rangesJson ( TRI_UNKNOWN_MEM_ZONE , JsonHelper : : checkAndGetListValue ( json . json ( ) , " ranges " ) ) ; <nl> - for ( size_t i = 0 ; i < rangesJson . size ( ) ; i + + ) { / / loop over the ranges . . . <nl> + triagens : : basics : : Json rangeListJson ( TRI_UNKNOWN_MEM_ZONE , JsonHelper : : checkAndGetListValue ( json . json ( ) , " ranges " ) ) ; <nl> + for ( size_t i = 0 ; i < rangeListJson . size ( ) ; i + + ) { / / loop over the ranges . . . <nl> _ranges . emplace_back ( ) ; <nl> - triagens : : basics : : Json rangeJson ( rangesJson . at ( static_cast < int > ( i ) ) ) ; <nl> + triagens : : basics : : Json rangeJson ( rangeListJson . at ( static_cast < int > ( i ) ) ) ; <nl> for ( size_t j = 0 ; j < rangeJson . size ( ) ; j + + ) { <nl> _ranges . at ( i ) . emplace_back ( rangeJson . at ( static_cast < int > ( j ) ) ) ; <nl> } <nl>
|
Use ' list ' in plural variables
|
arangodb/arangodb
|
215e9bf1b4e115760061b2650b4b59a07386496a
|
2014-09-15T13:06:38Z
|
mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_BOOL ( future , FUTURE_BOOL , <nl> " not - too - far future " ) <nl> DEFINE_IMPLICATION ( future , turbo ) <nl> <nl> - DEFINE_IMPLICATION ( turbo , ignition ) <nl> - <nl> / / Flags for experimental implementation features . <nl> DEFINE_BOOL ( allocation_site_pretenuring , true , <nl> " pretenure with allocation sites " ) <nl> DEFINE_BOOL ( unbox_double_arrays , true , " automatically unbox arrays of doubles " ) <nl> DEFINE_BOOL ( string_slices , true , " use string slices " ) <nl> <nl> / / Flags for Ignition . <nl> - DEFINE_BOOL ( ignition , false , " use ignition interpreter " ) <nl> + DEFINE_BOOL ( ignition , true , " use ignition interpreter " ) <nl> DEFINE_BOOL ( ignition_osr , true , " enable support for OSR from ignition code " ) <nl> DEFINE_BOOL ( ignition_elide_noneffectful_bytecodes , true , <nl> " elide bytecodes which won ' t have any external effect " ) <nl> mmm a / test / cctest / test - debug . cc <nl> ppp b / test / cctest / test - debug . cc <nl> TEST ( DebugGetPossibleBreakpointsReturnLocations ) { <nl> + + returns_count ; <nl> } <nl> } <nl> - if ( i : : FLAG_turbo ) { <nl> - / / With turbofan we generate one return location per return statement , <nl> + if ( i : : FLAG_ignition ) { <nl> + / / With Ignition we generate one return location per return statement , <nl> / / each has line = 5 , column = 0 as statement position . <nl> CHECK ( returns_count = = 4 ) ; <nl> } else { <nl> - / / Without turbofan we generate one return location . <nl> + / / Without Ignition we generate one return location . <nl> CHECK ( returns_count = = 1 ) ; <nl> } <nl> } <nl> mmm a / test / inspector / debugger / get - possible - breakpoints - master . js <nl> ppp b / test / inspector / debugger / get - possible - breakpoints - master . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - turbo <nl> + / / Flags : - - ignition <nl> <nl> let { session , contextGroup , Protocol } = InspectorTest . start ( ' Checks Debugger . getPossibleBreakpoints ' ) ; <nl> <nl> mmm a / test / inspector / debugger / step - into . js <nl> ppp b / test / inspector / debugger / step - into . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - turbo <nl> + / / Flags : - - ignition <nl> <nl> let { session , contextGroup , Protocol } = InspectorTest . start ( ' Checks possible break locations . ' ) ; <nl> <nl> mmm a / test / mjsunit / type - profile / collect - type - profile . js <nl> ppp b / test / mjsunit / type - profile / collect - type - profile . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - type - profile - - turbo - - allow - natives - syntax <nl> + / / Flags : - - type - profile - - ignition - - allow - natives - syntax <nl> <nl> function check_collect_types ( name , expected ) { <nl> const type_profile = % TypeProfile ( name ) ; <nl> testFunction ( 123 , true ) ; <nl> testFunction ( ' hello ' ) ; <nl> testFunction ( 123 ) ; <nl> <nl> - expected = ` { \ " 503 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " ] , \ " 510 \ " : [ \ " undefined \ " , \ " boolean \ " , \ " undefined \ " , \ " undefined \ " ] , \ " 699 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " ] } ` ; <nl> + expected = ` { \ " 506 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " ] , \ " 513 \ " : [ \ " undefined \ " , \ " boolean \ " , \ " undefined \ " , \ " undefined \ " ] , \ " 702 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " ] } ` ; <nl> check_collect_types ( testFunction , expected ) ; <nl> <nl> testFunction ( undefined ) ; <nl> testFunction ( { x : 12 } , true ) ; <nl> testFunction ( { x : 12 } ) ; <nl> testFunction ( new MyClass ( ) ) ; <nl> <nl> - expected = ` { \ " 503 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " , \ " undefined \ " , \ " string \ " , \ " Object \ " , \ " Object \ " , \ " MyClass \ " ] , \ " 510 \ " : [ \ " undefined \ " , \ " boolean \ " , \ " undefined \ " , \ " undefined \ " , \ " undefined \ " , \ " boolean \ " , \ " boolean \ " , \ " undefined \ " , \ " undefined \ " ] , \ " 699 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " , \ " undefined \ " , \ " string \ " , \ " Object \ " , \ " Object \ " , \ " MyClass \ " ] } ` ; <nl> + expected = ` { \ " 506 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " , \ " undefined \ " , \ " string \ " , \ " Object \ " , \ " Object \ " , \ " MyClass \ " ] , \ " 513 \ " : [ \ " undefined \ " , \ " boolean \ " , \ " undefined \ " , \ " undefined \ " , \ " undefined \ " , \ " boolean \ " , \ " boolean \ " , \ " undefined \ " , \ " undefined \ " ] , \ " 702 \ " : [ \ " Object \ " , \ " number \ " , \ " string \ " , \ " number \ " , \ " undefined \ " , \ " string \ " , \ " Object \ " , \ " Object \ " , \ " MyClass \ " ] } ` ; <nl> check_collect_types ( testFunction , expected ) ; <nl> <nl> <nl> function testReturnOfNonVariable ( ) { <nl> return 32 ; <nl> } <nl> testReturnOfNonVariable ( ) ; <nl> - expected = ` { \ " 1732 \ " : [ \ " number \ " ] } ` ; <nl> + expected = ` { \ " 1735 \ " : [ \ " number \ " ] } ` ; <nl> check_collect_types ( testReturnOfNonVariable , expected ) ; <nl> <nl> / / Return statement is reached but its expression is never really returned . <nl> function try_finally ( ) { <nl> } <nl> } <nl> try_finally ( ) ; <nl> - expected = ` { \ " 2034 \ " : [ \ " string \ " ] } ` ; <nl> + expected = ` { \ " 2037 \ " : [ \ " string \ " ] } ` ; <nl> check_collect_types ( try_finally , expected ) ; <nl> <nl> / / Fall - off return . <nl> function fall_off ( ) { <nl> / / nothing <nl> } <nl> fall_off ( ) ; <nl> - expected = ` { \ " 2188 \ " : [ \ " undefined \ " ] } ` ; <nl> + expected = ` { \ " 2191 \ " : [ \ " undefined \ " ] } ` ; <nl> check_collect_types ( fall_off , expected ) ; <nl> <nl> / / Do not collect types when the function is never run . <nl> function several_params ( a , b , c , d ) { <nl> / / nothing <nl> } <nl> several_params ( 2 , ' foo ' , { } , new MyClass ( ) ) ; <nl> - expected = ` { \ " 2456 \ " : [ \ " number \ " ] , \ " 2459 \ " : [ \ " string \ " ] , \ " 2462 \ " : [ \ " Object \ " ] , \ " 2465 \ " : [ \ " MyClass \ " ] , \ " 2482 \ " : [ \ " undefined \ " ] } ` ; <nl> + expected = ` { \ " 2459 \ " : [ \ " number \ " ] , \ " 2462 \ " : [ \ " string \ " ] , \ " 2465 \ " : [ \ " Object \ " ] , \ " 2468 \ " : [ \ " MyClass \ " ] , \ " 2485 \ " : [ \ " undefined \ " ] } ` ; <nl> check_collect_types ( several_params , expected ) ; <nl> mmm a / tools / testrunner / local / variants . py <nl> ppp b / tools / testrunner / local / variants . py <nl> <nl> ALL_VARIANT_FLAGS = { <nl> " default " : [ [ ] ] , <nl> " stress " : [ [ " - - stress - opt " , " - - always - opt " ] ] , <nl> - " fullcode " : [ [ " - - noopt " , " - - no - turbo " ] ] , <nl> + " fullcode " : [ [ " - - noopt " , " - - no - ignition " ] ] , <nl> # No optimization means disable all optimizations . OptimizeFunctionOnNextCall <nl> # would not force optimization too . It turns into a Nop . Please see <nl> # https : / / chromium - review . googlesource . com / c / 452620 / for more discussion . <nl> <nl> FAST_VARIANT_FLAGS = { <nl> " default " : [ [ ] ] , <nl> " stress " : [ [ " - - stress - opt " ] ] , <nl> - " fullcode " : [ [ " - - noopt " , " - - no - turbo " ] ] , <nl> + " fullcode " : [ [ " - - noopt " , " - - no - ignition " ] ] , <nl> # No optimization means disable all optimizations . OptimizeFunctionOnNextCall <nl> # would not force optimization too . It turns into a Nop . Please see <nl> # https : / / chromium - review . googlesource . com / c / 452620 / for more discussion . <nl>
|
Decouple the - - ignition from the - - turbo flag .
|
v8/v8
|
4e86ae8c2c6ac8ab7564055eee5b91f71b32f72c
|
2017-06-09T08:04:39Z
|
mmm a / src / base / tristatebool . h <nl> ppp b / src / base / tristatebool . h <nl> class TriStateBool <nl> bool operator ! = ( const TriStateBool & other ) const ; <nl> <nl> private : <nl> - int m_value = - 1 ; / / Undefined by default <nl> + signed char m_value = - 1 ; / / Undefined by default <nl> } ; <nl> <nl> # endif / / TRISTATEBOOL_H <nl>
|
Merge pull request from Chocobo1 / tribool
|
qbittorrent/qBittorrent
|
fc5d49bf9a882906566226c9995a7bedd450d261
|
2017-09-21T12:17:34Z
|
mmm a / db / repl / rs_member . h <nl> ppp b / db / repl / rs_member . h <nl> namespace mongo { <nl> * / <nl> struct MemberState { <nl> enum MS { <nl> - RS_STARTUP , <nl> - RS_PRIMARY , <nl> - RS_SECONDARY , <nl> - RS_RECOVERING , <nl> - RS_FATAL , <nl> - RS_STARTUP2 , <nl> - RS_UNKNOWN , / * remote node not yet reached * / <nl> - RS_ARBITER , <nl> - RS_DOWN , / * node not reachable for a report * / <nl> - RS_ROLLBACK <nl> + RS_STARTUP = 0 , <nl> + RS_PRIMARY = 1 , <nl> + RS_SECONDARY = 2 , <nl> + RS_RECOVERING = 3 , <nl> + RS_FATAL = 4 , <nl> + RS_STARTUP2 = 5 , <nl> + RS_UNKNOWN = 6 , / * remote node not yet reached * / <nl> + RS_ARBITER = 7 , <nl> + RS_DOWN = 8 , / * node not reachable for a report * / <nl> + RS_ROLLBACK = 9 <nl> } s ; <nl> <nl> MemberState ( MS ms = RS_UNKNOWN ) : s ( ms ) { } <nl>
|
put codes in . h for enum so its more readable
|
mongodb/mongo
|
cdc33d7490c84e908b6b27296a81d599669d22e6
|
2010-10-31T00:58:41Z
|
mmm a / modules / gapi / include / opencv2 / gapi / gmat . hpp <nl> ppp b / modules / gapi / include / opencv2 / gapi / gmat . hpp <nl> class GAPI_EXPORTS GMat <nl> std : : shared_ptr < GOrigin > m_priv ; <nl> } ; <nl> <nl> + namespace gapi { namespace own { <nl> + class Mat ; <nl> + } } / / gapi : : own <nl> + <nl> / * * @ } * / <nl> <nl> / * * <nl> struct GAPI_EXPORTS GMatDesc <nl> int depth ; <nl> int chan ; <nl> cv : : gapi : : own : : Size size ; / / NB . : no multi - dimensional cases covered yet <nl> + bool planar ; <nl> + <nl> + GMatDesc ( int d , int c , cv : : gapi : : own : : Size s , bool p = false ) <nl> + : depth ( d ) , chan ( c ) , size ( s ) , planar ( p ) { } <nl> + <nl> + GMatDesc ( ) : GMatDesc ( - 1 , - 1 , { - 1 , - 1 } ) { } <nl> <nl> inline bool operator = = ( const GMatDesc & rhs ) const <nl> { <nl> - return depth = = rhs . depth & & chan = = rhs . chan & & size = = rhs . size ; <nl> + return depth = = rhs . depth & & chan = = rhs . chan & & size = = rhs . size & & planar = = rhs . planar ; <nl> } <nl> <nl> inline bool operator ! = ( const GMatDesc & rhs ) const <nl> struct GAPI_EXPORTS GMatDesc <nl> return ! ( * this = = rhs ) ; <nl> } <nl> <nl> + / / Checks if the passed mat can be described by this descriptor <nl> + / / ( it handles the case when <nl> + / / 1 - channel mat can be reinterpreted as is ( 1 - channel mat ) <nl> + / / and as a 3 - channel planar mat with height divided by 3 ) <nl> + bool canDescribe ( const cv : : gapi : : own : : Mat & mat ) const ; <nl> + <nl> / / Meta combinator : return a new GMatDesc which differs in size by delta <nl> / / ( all other fields are taken unchanged from this GMatDesc ) <nl> / / FIXME : a better name ? <nl> struct GAPI_EXPORTS GMatDesc <nl> { <nl> return withSize ( to_own ( sz ) ) ; <nl> } <nl> + <nl> + bool canDescribe ( const cv : : Mat & mat ) const ; <nl> # endif / / ! defined ( GAPI_STANDALONE ) <nl> / / Meta combinator : return a new GMatDesc which differs in size by delta <nl> / / ( all other fields are taken unchanged from this GMatDesc ) <nl> struct GAPI_EXPORTS GMatDesc <nl> desc . chan = dchan ; <nl> return desc ; <nl> } <nl> + <nl> + / / Meta combinator : return a new GMatDesc with planar flag set <nl> + / / ( no size changes are performed , only channel interpretation is changed <nl> + / / ( interleaved - > planar ) <nl> + GMatDesc asPlanar ( ) const <nl> + { <nl> + GAPI_Assert ( planar = = false ) ; <nl> + GMatDesc desc ( * this ) ; <nl> + desc . planar = true ; <nl> + return desc ; <nl> + } <nl> + <nl> + / / Meta combinator : return a new GMatDesc <nl> + / / reinterpreting 1 - channel input as planar image <nl> + / / ( size height is divided by plane number ) <nl> + GMatDesc asPlanar ( int planes ) const <nl> + { <nl> + GAPI_Assert ( planar = = false ) ; <nl> + GAPI_Assert ( chan = = 1 ) ; <nl> + GAPI_Assert ( planes > 1 ) ; <nl> + GAPI_Assert ( size . height % planes = = 0 ) ; <nl> + GMatDesc desc ( * this ) ; <nl> + desc . size . height / = planes ; <nl> + desc . chan = planes ; <nl> + return desc . asPlanar ( ) ; <nl> + } <nl> + <nl> + / / Meta combinator : return a new GMatDesc with planar flag set to false <nl> + / / ( no size changes are performed , only channel interpretation is changed <nl> + / / ( planar - > interleaved ) <nl> + GMatDesc asInterleaved ( ) const <nl> + { <nl> + GAPI_Assert ( planar = = true ) ; <nl> + GMatDesc desc ( * this ) ; <nl> + desc . planar = false ; <nl> + return desc ; <nl> + } <nl> } ; <nl> <nl> static inline GMatDesc empty_gmat_desc ( ) { return GMatDesc { - 1 , - 1 , { - 1 , - 1 } } ; } <nl> GAPI_EXPORTS GMatDesc descr_of ( const cv : : UMat & mat ) ; <nl> / * * @ } * / <nl> <nl> namespace gapi { namespace own { <nl> - class Mat ; <nl> GAPI_EXPORTS GMatDesc descr_of ( const Mat & mat ) ; <nl> } } / / gapi : : own <nl> <nl> mmm a / modules / gapi / include / opencv2 / gapi / gproto . hpp <nl> ppp b / modules / gapi / include / opencv2 / gapi / gproto . hpp <nl> GMetaArgs GAPI_EXPORTS descr_of ( const GRunArgs & args ) ; <nl> <nl> / / Transform run - time operation result argument into metadata extracted from that argument <nl> / / Used to compare the metadata , which generated at compile time with the metadata result operation in run time <nl> - GMetaArg GAPI_EXPORTS descr_of ( const GRunArgP & argp ) ; <nl> + GMetaArg GAPI_EXPORTS descr_of ( const GRunArgP & argp ) ; <nl> <nl> + / / Checks if run - time computation argument can be described by metadata <nl> + bool GAPI_EXPORTS can_describe ( const GMetaArg & meta , const GRunArg & arg ) ; <nl> + bool GAPI_EXPORTS can_describe ( const GMetaArgs & metas , const GRunArgs & args ) ; <nl> + <nl> + / / Checks if run - time computation result argument can be described by metadata . <nl> + / / Used to check if the metadata generated at compile time <nl> + / / coincides with output arguments passed to computation in cpu and ocl backends <nl> + bool GAPI_EXPORTS can_describe ( const GMetaArg & meta , const GRunArgP & argp ) ; <nl> <nl> } / / namespace cv <nl> <nl> mmm a / modules / gapi / include / opencv2 / gapi / gtype_traits . hpp <nl> ppp b / modules / gapi / include / opencv2 / gapi / gtype_traits . hpp <nl> namespace detail <nl> template < typename T > struct GTypeOf ; <nl> # if ! defined ( GAPI_STANDALONE ) <nl> template < > struct GTypeOf < cv : : Mat > { using type = cv : : GMat ; } ; <nl> + template < > struct GTypeOf < cv : : UMat > { using type = cv : : GMat ; } ; <nl> template < > struct GTypeOf < cv : : Scalar > { using type = cv : : GScalar ; } ; <nl> # endif / / ! defined ( GAPI_STANDALONE ) <nl> template < > struct GTypeOf < cv : : gapi : : own : : Mat > { using type = cv : : GMat ; } ; <nl> mmm a / modules / gapi / src / api / gbackend . cpp <nl> ppp b / modules / gapi / src / api / gbackend . cpp <nl> void writeBack ( const Mag & mag , const RcDesc & rc , GRunArgP & g_arg , bool is_umat ) <nl> } <nl> <nl> } / / namespace magazine <nl> + <nl> + void createMat ( const cv : : GMatDesc desc , cv : : gapi : : own : : Mat & mat ) <nl> + { <nl> + const auto type = desc . planar ? desc . depth : CV_MAKETYPE ( desc . depth , desc . chan ) ; <nl> + const auto size = desc . planar ? cv : : gapi : : own : : Size { desc . size . width , desc . size . height * desc . chan } <nl> + : desc . size ; <nl> + mat . create ( size , type ) ; <nl> + } <nl> + <nl> + # if ! defined ( GAPI_STANDALONE ) <nl> + void createMat ( const cv : : GMatDesc desc , cv : : Mat & mat ) <nl> + { <nl> + const auto type = desc . planar ? desc . depth : CV_MAKETYPE ( desc . depth , desc . chan ) ; <nl> + const auto size = desc . planar ? cv : : Size { desc . size . width , desc . size . height * desc . chan } <nl> + : cv : : gapi : : own : : to_ocv ( desc . size ) ; <nl> + mat . create ( size , type ) ; <nl> + } <nl> + # endif <nl> + <nl> } / / namespace gimpl <nl> } / / namespace cv <nl> mmm a / modules / gapi / src / api / gmat . cpp <nl> ppp b / modules / gapi / src / api / gmat . cpp <nl> std : : ostream & operator < < ( std : : ostream & os , const cv : : GMatDesc & desc ) <nl> break ; <nl> } <nl> <nl> - os < < " C " < < desc . chan < < " " ; <nl> + os < < " C " < < desc . chan ; <nl> + if ( desc . planar ) os < < " p " ; <nl> + os < < " " ; <nl> os < < desc . size . width < < " x " < < desc . size . height ; <nl> <nl> return os ; <nl> } <nl> + <nl> + namespace { <nl> + template < typename M > inline bool canDescribeHelper ( const GMatDesc & desc , const M & mat ) <nl> + { <nl> + const auto mat_desc = desc . planar ? descr_of ( mat ) . asPlanar ( desc . chan ) : descr_of ( mat ) ; <nl> + return desc = = mat_desc ; <nl> + } <nl> + } / / anonymous namespace <nl> + <nl> + bool GMatDesc : : canDescribe ( const cv : : gapi : : own : : Mat & mat ) const <nl> + { <nl> + return canDescribeHelper ( * this , mat ) ; <nl> } <nl> + <nl> + # if ! defined ( GAPI_STANDALONE ) <nl> + bool GMatDesc : : canDescribe ( const cv : : Mat & mat ) const <nl> + { <nl> + return canDescribeHelper ( * this , mat ) ; <nl> + } <nl> + # endif <nl> + <nl> + } / / namespace cv <nl> mmm a / modules / gapi / src / api / gproto . cpp <nl> ppp b / modules / gapi / src / api / gproto . cpp <nl> cv : : GMetaArg cv : : descr_of ( const cv : : GRunArgP & argp ) <nl> } <nl> } <nl> <nl> + bool cv : : can_describe ( const GMetaArg & meta , const GRunArgP & argp ) <nl> + { <nl> + switch ( argp . index ( ) ) <nl> + { <nl> + # if ! defined ( GAPI_STANDALONE ) <nl> + case GRunArgP : : index_of < cv : : Mat * > ( ) : return util : : holds_alternative < GMatDesc > ( meta ) & & <nl> + util : : get < GMatDesc > ( meta ) . canDescribe ( * util : : get < cv : : Mat * > ( argp ) ) ; <nl> + case GRunArgP : : index_of < cv : : UMat * > ( ) : return meta = = GMetaArg ( descr_of ( * util : : get < cv : : UMat * > ( argp ) ) ) ; <nl> + case GRunArgP : : index_of < cv : : Scalar * > ( ) : return meta = = GMetaArg ( descr_of ( * util : : get < cv : : Scalar * > ( argp ) ) ) ; <nl> + # endif / / ! defined ( GAPI_STANDALONE ) <nl> + case GRunArgP : : index_of < cv : : gapi : : own : : Mat * > ( ) : return util : : holds_alternative < GMatDesc > ( meta ) & & <nl> + util : : get < GMatDesc > ( meta ) . canDescribe ( * util : : get < cv : : gapi : : own : : Mat * > ( argp ) ) ; <nl> + case GRunArgP : : index_of < cv : : gapi : : own : : Scalar * > ( ) : return meta = = GMetaArg ( descr_of ( * util : : get < cv : : gapi : : own : : Scalar * > ( argp ) ) ) ; <nl> + case GRunArgP : : index_of < cv : : detail : : VectorRef > ( ) : return meta = = GMetaArg ( util : : get < cv : : detail : : VectorRef > ( argp ) . descr_of ( ) ) ; <nl> + default : util : : throw_error ( std : : logic_error ( " Unsupported GRunArgP type " ) ) ; <nl> + } <nl> + } <nl> + <nl> + bool cv : : can_describe ( const GMetaArg & meta , const GRunArg & arg ) <nl> + { <nl> + switch ( arg . index ( ) ) <nl> + { <nl> + # if ! defined ( GAPI_STANDALONE ) <nl> + case GRunArg : : index_of < cv : : Mat > ( ) : return util : : holds_alternative < GMatDesc > ( meta ) & & <nl> + util : : get < GMatDesc > ( meta ) . canDescribe ( util : : get < cv : : Mat > ( arg ) ) ; <nl> + case GRunArg : : index_of < cv : : UMat > ( ) : return meta = = cv : : GMetaArg ( descr_of ( util : : get < cv : : UMat > ( arg ) ) ) ; <nl> + case GRunArg : : index_of < cv : : Scalar > ( ) : return meta = = cv : : GMetaArg ( descr_of ( util : : get < cv : : Scalar > ( arg ) ) ) ; <nl> + # endif / / ! defined ( GAPI_STANDALONE ) <nl> + case GRunArg : : index_of < cv : : gapi : : own : : Mat > ( ) : return util : : holds_alternative < GMatDesc > ( meta ) & & <nl> + util : : get < GMatDesc > ( meta ) . canDescribe ( util : : get < cv : : gapi : : own : : Mat > ( arg ) ) ; <nl> + case GRunArg : : index_of < cv : : gapi : : own : : Scalar > ( ) : return meta = = cv : : GMetaArg ( descr_of ( util : : get < cv : : gapi : : own : : Scalar > ( arg ) ) ) ; <nl> + case GRunArg : : index_of < cv : : detail : : VectorRef > ( ) : return meta = = cv : : GMetaArg ( util : : get < cv : : detail : : VectorRef > ( arg ) . descr_of ( ) ) ; <nl> + default : util : : throw_error ( std : : logic_error ( " Unsupported GRunArg type " ) ) ; <nl> + } <nl> + } <nl> + <nl> + bool cv : : can_describe ( const GMetaArgs & metas , const GRunArgs & args ) <nl> + { <nl> + return metas . size ( ) = = args . size ( ) & & <nl> + std : : equal ( metas . begin ( ) , metas . end ( ) , args . begin ( ) , <nl> + [ ] ( const GMetaArg & meta , const GRunArg & arg ) { <nl> + return can_describe ( meta , arg ) ; <nl> + } ) ; <nl> + } <nl> + <nl> namespace cv { <nl> std : : ostream & operator < < ( std : : ostream & os , const cv : : GMetaArg & arg ) <nl> { <nl> mmm a / modules / gapi / src / backends / common / gbackend . hpp <nl> ppp b / modules / gapi / src / backends / common / gbackend . hpp <nl> inline cv : : util : : optional < T > getCompileArg ( const cv : : GCompileArgs & args ) <nl> return cv : : util : : optional < T > ( ) ; <nl> } <nl> <nl> - <nl> + void createMat ( const cv : : GMatDesc desc , cv : : gapi : : own : : Mat & mat ) ; <nl> + # if ! defined ( GAPI_STANDALONE ) <nl> + void createMat ( const cv : : GMatDesc desc , cv : : Mat & mat ) ; <nl> + # endif <nl> <nl> } } / / cv : : gimpl <nl> <nl> mmm a / modules / gapi / src / backends / cpu / gcpubackend . cpp <nl> ppp b / modules / gapi / src / backends / cpu / gcpubackend . cpp <nl> cv : : gimpl : : GCPUExecutable : : GCPUExecutable ( const ade : : Graph & g , <nl> if ( desc . storage = = Data : : Storage : : INTERNAL & & desc . shape = = GShape : : GMAT ) <nl> { <nl> const auto mat_desc = util : : get < cv : : GMatDesc > ( desc . meta ) ; <nl> - const auto type = CV_MAKETYPE ( mat_desc . depth , mat_desc . chan ) ; <nl> - m_res . slot < cv : : gapi : : own : : Mat > ( ) [ desc . rc ] . create ( mat_desc . size , type ) ; <nl> + auto & mat = m_res . slot < cv : : gapi : : own : : Mat > ( ) [ desc . rc ] ; <nl> + createMat ( mat_desc , mat ) ; <nl> } <nl> break ; <nl> } <nl> void cv : : gimpl : : GCPUExecutable : : run ( std : : vector < InObj > & & input_objs , <nl> / / As Kernels are forbidden to allocate memory for ( Mat ) outputs , <nl> / / this code seems redundant , at least for Mats <nl> / / FIXME : unify with cv : : detail : : ensure_out_mats_not_reallocated <nl> + / / FIXME : when it ' s done , remove can_describe ( const GMetaArg & , const GRunArgP & ) <nl> + / / and descr_of ( const cv : : GRunArgP & argp ) <nl> for ( const auto & out_it : ade : : util : : indexed ( op_info . expected_out_metas ) ) <nl> { <nl> const auto out_index = ade : : util : : index ( out_it ) ; <nl> const auto expected_meta = ade : : util : : value ( out_it ) ; <nl> - const auto out_meta = descr_of ( context . m_results [ out_index ] ) ; <nl> <nl> - if ( expected_meta ! = out_meta ) <nl> + if ( ! can_describe ( expected_meta , context . m_results [ out_index ] ) ) <nl> { <nl> + const auto out_meta = descr_of ( context . m_results [ out_index ] ) ; <nl> util : : throw_error <nl> ( std : : logic_error <nl> ( " Output meta doesn ' t " <nl> mmm a / modules / gapi / src / backends / ocl / goclbackend . cpp <nl> ppp b / modules / gapi / src / backends / ocl / goclbackend . cpp <nl> cv : : gimpl : : GOCLExecutable : : GOCLExecutable ( const ade : : Graph & g , <nl> if ( desc . storage = = Data : : Storage : : INTERNAL & & desc . shape = = GShape : : GMAT ) <nl> { <nl> const auto mat_desc = util : : get < cv : : GMatDesc > ( desc . meta ) ; <nl> - const auto type = CV_MAKETYPE ( mat_desc . depth , mat_desc . chan ) ; <nl> - m_res . slot < cv : : UMat > ( ) [ desc . rc ] . create ( mat_desc . size . height , mat_desc . size . width , type ) ; <nl> + auto & mat = m_res . slot < cv : : gapi : : own : : Mat > ( ) [ desc . rc ] ; <nl> + createMat ( mat_desc , mat ) ; <nl> } <nl> break ; <nl> } <nl> void cv : : gimpl : : GOCLExecutable : : run ( std : : vector < InObj > & & input_objs , <nl> { <nl> const auto out_index = ade : : util : : index ( out_it ) ; <nl> const auto expected_meta = ade : : util : : value ( out_it ) ; <nl> - const auto out_meta = descr_of ( context . m_results [ out_index ] ) ; <nl> <nl> - if ( expected_meta ! = out_meta ) <nl> + if ( ! can_describe ( expected_meta , context . m_results [ out_index ] ) ) <nl> { <nl> + const auto out_meta = descr_of ( context . m_results [ out_index ] ) ; <nl> util : : throw_error <nl> ( std : : logic_error <nl> ( " Output meta doesn ' t " <nl> mmm a / modules / gapi / src / compiler / gcompiled . cpp <nl> ppp b / modules / gapi / src / compiler / gcompiled . cpp <nl> <nl> <nl> # include < ade / graph . hpp > <nl> <nl> - # include " opencv2 / gapi / gproto . hpp " / / descr_of <nl> + # include " opencv2 / gapi / gproto . hpp " / / can_describe <nl> # include " opencv2 / gapi / gcompiled . hpp " <nl> <nl> # include " compiler / gcompiled_priv . hpp " <nl> const cv : : GMetaArgs & cv : : GCompiled : : Priv : : outMetas ( ) const <nl> <nl> void cv : : GCompiled : : Priv : : checkArgs ( const cv : : gimpl : : GRuntimeArgs & args ) const <nl> { <nl> - const auto runtime_metas = descr_of ( args . inObjs ) ; <nl> - if ( runtime_metas ! = m_metas ) <nl> + if ( ! can_describe ( m_metas , args . inObjs ) ) <nl> { <nl> - util : : throw_error ( std : : logic_error ( " This object was compiled " <nl> - " for different metadata ! " ) ) ; <nl> + util : : throw_error ( std : : logic_error ( " This object was compiled " <nl> + " for different metadata ! " ) ) ; <nl> / / FIXME : Add details on what is actually wrong <nl> } <nl> } <nl> mmm a / modules / gapi / src / executor / gexecutor . cpp <nl> ppp b / modules / gapi / src / executor / gexecutor . cpp <nl> void cv : : gimpl : : GExecutor : : initResource ( const ade : : NodeHandle & orig_nh ) <nl> case GShape : : GMAT : <nl> { <nl> const auto desc = util : : get < cv : : GMatDesc > ( d . meta ) ; <nl> - const auto type = CV_MAKETYPE ( desc . depth , desc . chan ) ; <nl> - m_res . slot < cv : : gapi : : own : : Mat > ( ) [ d . rc ] . create ( desc . size , type ) ; <nl> + auto & mat = m_res . slot < cv : : gapi : : own : : Mat > ( ) [ d . rc ] ; <nl> + createMat ( desc , mat ) ; <nl> } <nl> break ; <nl> <nl> void cv : : gimpl : : GExecutor : : run ( cv : : gimpl : : GRuntimeArgs & & args ) <nl> { <nl> using cv : : util : : get ; <nl> const auto desc = get < cv : : GMatDesc > ( d . meta ) ; <nl> - const auto type = CV_MAKETYPE ( desc . depth , desc . chan ) ; <nl> - <nl> # if ! defined ( GAPI_STANDALONE ) <nl> / / Building as part of OpenCV - follow OpenCV behavior <nl> / / if output buffer is not enough to hold the result , reallocate it <nl> auto & out_mat = * get < cv : : Mat * > ( args . outObjs . at ( index ) ) ; <nl> - out_mat . create ( cv : : gapi : : own : : to_ocv ( desc . size ) , type ) ; <nl> + createMat ( desc , out_mat ) ; <nl> # else <nl> / / Building standalone - output buffer should always exist , <nl> / / and _exact_ match our inferred metadata <nl> auto & out_mat = * get < cv : : gapi : : own : : Mat * > ( args . outObjs . at ( index ) ) ; <nl> - GAPI_Assert ( out_mat . type ( ) = = type <nl> - & & out_mat . data ! = nullptr <nl> - & & out_mat . rows = = desc . size . height <nl> - & & out_mat . cols = = desc . size . width ) <nl> + GAPI_Assert ( out_mat . data ! = nullptr & & <nl> + desc . canDescribe ( out_mat ) ) <nl> # endif / / ! defined ( GAPI_STANDALONE ) <nl> } <nl> } <nl> mmm a / modules / gapi / test / gapi_desc_tests . cpp <nl> ppp b / modules / gapi / test / gapi_desc_tests . cpp <nl> TEST ( GAPI_MetaDesc , Typed_Compile_MatchMetaType_Mixed ) <nl> EXPECT_NO_THROW ( cc . compile ( desc1 , desc2 ) ) ; <nl> } <nl> <nl> + TEST ( GAPI_MetaDesc , Compare_Planar ) <nl> + { <nl> + const auto desc0 = cv : : GMatDesc { CV_8U , 3 , { 32 , 32 } , false } ; <nl> + const auto desc1 = cv : : GMatDesc { CV_8U , 3 , { 32 , 32 } , false } ; <nl> + const auto desc2 = cv : : GMatDesc { CV_8U , 3 , { 32 , 32 } , true } ; <nl> + const auto desc3 = cv : : GMatDesc { CV_8U , 3 , { 64 , 64 } , true } ; <nl> + <nl> + EXPECT_TRUE ( desc0 = = desc1 ) ; <nl> + EXPECT_TRUE ( desc1 ! = desc2 ) ; <nl> + EXPECT_TRUE ( desc1 ! = desc3 ) ; <nl> + EXPECT_TRUE ( desc2 ! = desc3 ) ; <nl> + } <nl> + <nl> + TEST ( GAPI_MetaDesc , Sanity_asPlanar ) <nl> + { <nl> + constexpr int w = 32 ; <nl> + constexpr int h = 16 ; <nl> + const auto desc1 = cv : : GMatDesc { CV_8U , 3 , { w , h } , false } ; <nl> + const auto desc2 = cv : : GMatDesc { CV_8U , 3 , { w , h } , true } ; <nl> + <nl> + EXPECT_NO_THROW ( desc1 . asPlanar ( ) ) ; <nl> + EXPECT_NO_THROW ( desc2 . asInterleaved ( ) ) ; <nl> + EXPECT_ANY_THROW ( desc1 . asInterleaved ( ) ) ; <nl> + EXPECT_ANY_THROW ( desc2 . asPlanar ( ) ) ; <nl> + } <nl> + <nl> + TEST ( GAPI_MetaDesc , Compare_asPlanar ) <nl> + { <nl> + constexpr int w = 32 ; <nl> + constexpr int h = 64 ; <nl> + const auto desc0 = cv : : GMatDesc { CV_8U , 3 , { w , h } , false } ; <nl> + const auto desc1 = cv : : GMatDesc { CV_8U , 3 , { w , h } , true } ; <nl> + <nl> + EXPECT_TRUE ( desc0 . asPlanar ( ) = = desc1 ) ; <nl> + EXPECT_TRUE ( desc1 . asInterleaved ( ) = = desc0 ) ; <nl> + } <nl> + <nl> + TEST ( GAPI_MetaDesc , Compare_asPlanarTransform ) <nl> + { <nl> + constexpr int w = 64 ; <nl> + constexpr int h = 32 ; <nl> + const auto desc0 = cv : : GMatDesc { CV_8U , 3 , { w , h } , true } ; <nl> + const auto desc1 = cv : : GMatDesc { CV_8U , 1 , { w , h * 3 } , false } ; <nl> + <nl> + EXPECT_ANY_THROW ( desc0 . asPlanar ( 3 ) ) ; <nl> + EXPECT_NO_THROW ( desc1 . asPlanar ( 3 ) ) ; <nl> + EXPECT_TRUE ( desc1 . asPlanar ( 3 ) = = desc0 ) ; <nl> + } <nl> + <nl> + TEST ( GAPI_MetaDesc , CanDescribe ) <nl> + { <nl> + constexpr int w = 15 ; <nl> + constexpr int h = 7 ; <nl> + cv : : Mat m0 ( h , w , CV_8UC3 ) ; <nl> + cv : : GMatDesc md0 { CV_8U , 3 , { w , h } , false } ; <nl> + <nl> + cv : : Mat m1 ( h * 3 , w , CV_8UC1 ) ; <nl> + cv : : GMatDesc md10 { CV_8U , 3 , { w , h } , true } ; <nl> + cv : : GMatDesc md11 { CV_8U , 1 , { w , h * 3 } , false } ; <nl> + <nl> + EXPECT_TRUE ( md0 . canDescribe ( m0 ) ) ; <nl> + EXPECT_FALSE ( md0 . canDescribe ( m1 ) ) ; <nl> + EXPECT_TRUE ( md10 . canDescribe ( m1 ) ) ; <nl> + EXPECT_TRUE ( md11 . canDescribe ( m1 ) ) ; <nl> + } <nl> + <nl> } / / namespace opencv_test <nl> new file mode 100644 <nl> index 00000000000 . . f763fdcf041 <nl> mmm / dev / null <nl> ppp b / modules / gapi / test / gapi_planar_test . cpp <nl> <nl> + / / This file is part of OpenCV project . <nl> + / / It is subject to the license terms in the LICENSE file found in the top - level directory <nl> + / / of this distribution and at http : / / opencv . org / license . html . <nl> + / / <nl> + / / Copyright ( C ) 2019 Intel Corporation <nl> + <nl> + # include " test_precomp . hpp " <nl> + <nl> + # include < opencv2 / gapi / cpu / gcpukernel . hpp > <nl> + <nl> + namespace opencv_test <nl> + { <nl> + <nl> + G_TYPED_KERNEL ( GResize3c3p , < GMat ( GMat , Size , int ) > , " test . resize3c3p " ) { <nl> + static GMatDesc outMeta ( GMatDesc in , Size sz , int ) { <nl> + GAPI_Assert ( in . depth = = CV_8U ) ; <nl> + GAPI_Assert ( in . chan = = 3 ) ; <nl> + GAPI_Assert ( in . planar = = false ) ; <nl> + return in . withSize ( sz ) . asPlanar ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + G_TYPED_KERNEL ( GResize3p3p , < GMat ( GMat , Size , int ) > , " test . resize3p3p " ) { <nl> + static GMatDesc outMeta ( GMatDesc in , Size sz , int ) { <nl> + GAPI_Assert ( in . depth = = CV_8U ) ; <nl> + GAPI_Assert ( in . chan = = 3 ) ; <nl> + GAPI_Assert ( in . planar ) ; <nl> + return in . withSize ( sz ) ; <nl> + } <nl> + } ; <nl> + <nl> + static GMatDesc NV12toRGBoutMeta ( GMatDesc inY , GMatDesc inUV ) <nl> + { <nl> + GAPI_Assert ( inY . depth = = CV_8U ) ; <nl> + GAPI_Assert ( inUV . depth = = CV_8U ) ; <nl> + GAPI_Assert ( inY . chan = = 1 ) ; <nl> + GAPI_Assert ( inY . planar = = false ) ; <nl> + GAPI_Assert ( inUV . chan = = 2 ) ; <nl> + GAPI_Assert ( inUV . planar = = false ) ; <nl> + GAPI_Assert ( inY . size . width = = 2 * inUV . size . width ) ; <nl> + GAPI_Assert ( inY . size . height = = 2 * inUV . size . height ) ; <nl> + return inY . withType ( CV_8U , 3 ) ; <nl> + } <nl> + <nl> + G_TYPED_KERNEL ( GNV12toRGB , < GMat ( GMat , GMat ) > , " test . nv12torgb " ) { <nl> + static GMatDesc outMeta ( GMatDesc inY , GMatDesc inUV ) { <nl> + return NV12toRGBoutMeta ( inY , inUV ) ; <nl> + } <nl> + } ; <nl> + <nl> + G_TYPED_KERNEL ( GNV12toRGBp , < GMat ( GMat , GMat ) > , " test . nv12torgbp " ) { <nl> + static GMatDesc outMeta ( GMatDesc inY , GMatDesc inUV ) { <nl> + return NV12toRGBoutMeta ( inY , inUV ) . asPlanar ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + static void toPlanar ( const cv : : Mat & in , cv : : Mat & out ) <nl> + { <nl> + GAPI_Assert ( out . depth ( ) = = in . depth ( ) ) ; <nl> + GAPI_Assert ( out . channels ( ) = = 1 ) ; <nl> + GAPI_Assert ( in . channels ( ) = = 3 ) ; <nl> + GAPI_Assert ( out . cols = = in . cols ) ; <nl> + GAPI_Assert ( out . rows = = 3 * in . rows ) ; <nl> + <nl> + std : : vector < cv : : Mat > outs ( 3 ) ; <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + outs [ i ] = out ( cv : : Rect ( 0 , i * in . rows , in . cols , in . rows ) ) ; <nl> + } <nl> + cv : : split ( in , outs ) ; <nl> + } <nl> + <nl> + GAPI_OCV_KERNEL ( OCVResize3c3p , GResize3c3p ) <nl> + { <nl> + static void run ( const cv : : Mat & in , cv : : Size out_sz , int interp , cv : : Mat & out ) <nl> + { <nl> + cv : : Mat resized_mat ; <nl> + cv : : resize ( in , resized_mat , out_sz , 0 , 0 , interp ) ; <nl> + <nl> + std : : vector < cv : : Mat > outs ( 3 ) ; <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + outs [ i ] = out ( cv : : Rect ( 0 , i * out_sz . height , out_sz . width , out_sz . height ) ) ; <nl> + } <nl> + cv : : split ( resized_mat , outs ) ; <nl> + } <nl> + } ; <nl> + <nl> + GAPI_OCV_KERNEL ( OCVResize3p3p , GResize3p3p ) <nl> + { <nl> + static void run ( const cv : : Mat & in , cv : : Size out_sz , int interp , cv : : Mat & out ) <nl> + { <nl> + std : : vector < cv : : Mat > ins ( 3 ) ; <nl> + std : : vector < cv : : Mat > outs ( 3 ) ; <nl> + <nl> + int inH = in . rows / 3 ; <nl> + int inW = in . cols ; <nl> + int outH = out . rows / 3 ; <nl> + int outW = out . cols ; <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + ins [ i ] = in ( cv : : Rect ( 0 , i * inH , inW , inH ) ) ; <nl> + outs [ i ] = out ( cv : : Rect ( 0 , i * outH , outW , outH ) ) ; <nl> + cv : : resize ( ins [ i ] , outs [ i ] , out_sz , 0 , 0 , interp ) ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> + GAPI_OCV_KERNEL ( OCVNV12toRGBp , GNV12toRGBp ) <nl> + { <nl> + static void run ( const cv : : Mat & inY , const cv : : Mat & inUV , cv : : Mat & out ) <nl> + { <nl> + cv : : Mat rgb ; <nl> + cv : : cvtColorTwoPlane ( inY , inUV , rgb , cv : : COLOR_YUV2RGB_NV12 ) ; <nl> + toPlanar ( rgb , out ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct PlanarTest : public TestWithParam < std : : pair < cv : : Size , cv : : Size > > { } ; <nl> + TEST_P ( PlanarTest , Resize3c3p ) <nl> + { <nl> + cv : : Size in_sz , out_sz ; <nl> + std : : tie ( in_sz , out_sz ) = GetParam ( ) ; <nl> + int interp = cv : : INTER_NEAREST ; <nl> + <nl> + cv : : Mat in_mat = cv : : Mat ( in_sz , CV_8UC3 ) ; <nl> + cv : : randn ( in_mat , cv : : Scalar : : all ( 127 . 0f ) , cv : : Scalar : : all ( 40 . f ) ) ; <nl> + <nl> + cv : : Mat out_mat = cv : : Mat : : zeros ( out_sz . height * 3 , out_sz . width , CV_8UC1 ) ; <nl> + cv : : Mat out_mat_ocv = cv : : Mat : : zeros ( out_sz . height * 3 , out_sz . width , CV_8UC1 ) ; <nl> + <nl> + cv : : GMat in ; <nl> + auto out = GResize3c3p : : on ( in , out_sz , interp ) ; <nl> + cv : : GComputation c ( cv : : GIn ( in ) , cv : : GOut ( out ) ) ; <nl> + <nl> + auto pkg = cv : : gapi : : kernels < OCVResize3c3p > ( ) ; <nl> + c . apply ( cv : : gin ( in_mat ) , cv : : gout ( out_mat ) , cv : : compile_args ( pkg ) ) ; <nl> + <nl> + cv : : Mat resized_mat ; <nl> + cv : : resize ( in_mat , resized_mat , out_sz , 0 , 0 , interp ) ; <nl> + toPlanar ( resized_mat , out_mat_ocv ) ; <nl> + <nl> + EXPECT_EQ ( 0 , cv : : countNonZero ( out_mat ! = out_mat_ocv ) ) ; <nl> + } <nl> + <nl> + TEST_P ( PlanarTest , Resize3p3p ) <nl> + { <nl> + cv : : Size in_sz , out_sz ; <nl> + std : : tie ( in_sz , out_sz ) = GetParam ( ) ; <nl> + int interp = cv : : INTER_NEAREST ; <nl> + <nl> + cv : : Mat in_mat = cv : : Mat ( cv : : Size { in_sz . width , in_sz . height * 3 } , CV_8UC1 ) ; <nl> + cv : : randn ( in_mat , cv : : Scalar : : all ( 127 . 0f ) , cv : : Scalar : : all ( 40 . f ) ) ; <nl> + <nl> + cv : : Mat out_mat = cv : : Mat : : zeros ( out_sz . height * 3 , out_sz . width , CV_8UC1 ) ; <nl> + cv : : Mat out_mat_ocv = cv : : Mat : : zeros ( out_sz . height * 3 , out_sz . width , CV_8UC1 ) ; <nl> + <nl> + cv : : GMat in ; <nl> + auto out = GResize3p3p : : on ( in , out_sz , interp ) ; <nl> + cv : : GComputation c ( cv : : GIn ( in ) , cv : : GOut ( out ) ) ; <nl> + <nl> + auto pkg = cv : : gapi : : kernels < OCVResize3p3p > ( ) ; <nl> + <nl> + c . compile ( cv : : descr_of ( in_mat ) . asPlanar ( 3 ) , cv : : compile_args ( pkg ) ) <nl> + ( cv : : gin ( in_mat ) , cv : : gout ( out_mat ) ) ; <nl> + <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + const cv : : Mat in_mat_roi = in_mat ( cv : : Rect ( 0 , i * in_sz . height , in_sz . width , in_sz . height ) ) ; <nl> + cv : : Mat out_mat_roi = out_mat_ocv ( cv : : Rect ( 0 , i * out_sz . height , out_sz . width , out_sz . height ) ) ; <nl> + cv : : resize ( in_mat_roi , out_mat_roi , out_sz , 0 , 0 , interp ) ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 0 , cv : : countNonZero ( out_mat ! = out_mat_ocv ) ) ; <nl> + } <nl> + <nl> + TEST_P ( PlanarTest , Pipeline ) <nl> + { <nl> + cv : : Size in_sz , out_sz ; <nl> + std : : tie ( in_sz , out_sz ) = GetParam ( ) ; <nl> + int interp = cv : : INTER_NEAREST ; <nl> + <nl> + cv : : Mat in_mat = cv : : Mat ( cv : : Size { in_sz . width , in_sz . height * 3 / 2 } , CV_8UC1 ) ; <nl> + cv : : randn ( in_mat , cv : : Scalar : : all ( 127 . 0f ) , cv : : Scalar : : all ( 40 . f ) ) ; <nl> + <nl> + cv : : Size uv_sz ( in_sz . width / 2 , in_sz . height / 2 ) ; <nl> + <nl> + cv : : Mat y_mat = cv : : Mat ( in_sz , CV_8UC1 , in_mat . data ) ; <nl> + cv : : Mat uv_mat = cv : : Mat ( uv_sz , CV_8UC2 , in_mat . data + in_mat . step1 ( ) * in_sz . height ) ; <nl> + <nl> + cv : : Mat out_mat = cv : : Mat : : zeros ( out_sz . height * 3 , out_sz . width , CV_8UC1 ) ; <nl> + cv : : Mat out_mat_ocv = cv : : Mat : : zeros ( out_sz . height * 3 , out_sz . width , CV_8UC1 ) ; <nl> + <nl> + cv : : GMat inY , inUV ; <nl> + auto out = GResize3p3p : : on ( GNV12toRGBp : : on ( inY , inUV ) , out_sz , interp ) ; <nl> + cv : : GComputation c ( cv : : GIn ( inY , inUV ) , cv : : GOut ( out ) ) ; <nl> + <nl> + auto pkg = cv : : gapi : : kernels < OCVNV12toRGBp , OCVResize3p3p > ( ) ; <nl> + c . apply ( cv : : gin ( y_mat , uv_mat ) , cv : : gout ( out_mat ) , cv : : compile_args ( pkg ) ) ; <nl> + <nl> + cv : : Mat rgb , resized_mat ; <nl> + cv : : cvtColorTwoPlane ( y_mat , uv_mat , rgb , cv : : COLOR_YUV2RGB_NV12 ) ; <nl> + cv : : resize ( rgb , resized_mat , out_sz , 0 , 0 , interp ) ; <nl> + toPlanar ( resized_mat , out_mat_ocv ) ; <nl> + <nl> + EXPECT_EQ ( 0 , cv : : countNonZero ( out_mat ! = out_mat_ocv ) ) ; <nl> + } <nl> + <nl> + INSTANTIATE_TEST_CASE_P ( Sanity , PlanarTest , <nl> + Values ( std : : make_pair ( cv : : Size { 8 , 8 } , cv : : Size { 4 , 4 } ) <nl> + , std : : make_pair ( cv : : Size { 960 , 540 } , cv : : Size { 224 , 224 } ) <nl> + , std : : make_pair ( cv : : Size { 64 , 64 } , cv : : Size { 224 , 224 } ) <nl> + ) ) ; <nl> + <nl> + } / / namespace opencv_test <nl> mmm a / modules / gapi / test / internal / gapi_int_gmetaarg_test . cpp <nl> ppp b / modules / gapi / test / internal / gapi_int_gmetaarg_test . cpp <nl> TEST ( GMetaArg , Can_Get_Metas_From_Output_Run_Args ) <nl> EXPECT_EQ ( cv : : Size ( 3 , 3 ) , m_desc . size ) ; <nl> } <nl> <nl> + TEST ( GMetaArg , Can_Describe_RunArg ) <nl> + { <nl> + cv : : Mat m ( 3 , 3 , CV_8UC3 ) ; <nl> + cv : : UMat um ( 3 , 3 , CV_8UC3 ) ; <nl> + cv : : Scalar s ; <nl> + constexpr int w = 3 , h = 3 , c = 3 ; <nl> + uchar data [ w * h * c ] ; <nl> + cv : : gapi : : own : : Mat om ( h , w , CV_8UC3 , data ) ; <nl> + cv : : gapi : : own : : Scalar os ; <nl> + std : : vector < int > v ; <nl> + <nl> + GMetaArgs metas = { GMetaArg ( descr_of ( m ) ) , <nl> + GMetaArg ( descr_of ( um ) ) , <nl> + GMetaArg ( descr_of ( s ) ) , <nl> + GMetaArg ( descr_of ( om ) ) , <nl> + GMetaArg ( descr_of ( os ) ) , <nl> + GMetaArg ( descr_of ( v ) ) } ; <nl> + <nl> + auto in_run_args = cv : : gin ( m , um , s , om , os , v ) ; <nl> + <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + EXPECT_TRUE ( can_describe ( metas [ i ] , in_run_args [ i ] ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( GMetaArg , Can_Describe_RunArgs ) <nl> + { <nl> + cv : : Mat m ( 3 , 3 , CV_8UC3 ) ; <nl> + cv : : Scalar s ; <nl> + std : : vector < int > v ; <nl> + <nl> + GMetaArgs metas0 = { GMetaArg ( descr_of ( m ) ) , GMetaArg ( descr_of ( s ) ) , GMetaArg ( descr_of ( v ) ) } ; <nl> + auto in_run_args0 = cv : : gin ( m , s , v ) ; <nl> + <nl> + EXPECT_TRUE ( can_describe ( metas0 , in_run_args0 ) ) ; <nl> + <nl> + auto in_run_args01 = cv : : gin ( m , s ) ; <nl> + EXPECT_FALSE ( can_describe ( metas0 , in_run_args01 ) ) ; <nl> + } <nl> + <nl> + TEST ( GMetaArg , Can_Describe_RunArgP ) <nl> + { <nl> + cv : : Mat m ( 3 , 3 , CV_8UC3 ) ; <nl> + cv : : UMat um ( 3 , 3 , CV_8UC3 ) ; <nl> + cv : : Scalar s ; <nl> + constexpr int w = 3 , h = 3 , c = 3 ; <nl> + uchar data [ w * h * c ] ; <nl> + cv : : gapi : : own : : Mat om ( h , w , CV_8UC3 , data ) ; <nl> + cv : : gapi : : own : : Scalar os ; <nl> + std : : vector < int > v ; <nl> + <nl> + GMetaArgs metas = { GMetaArg ( descr_of ( m ) ) , <nl> + GMetaArg ( descr_of ( um ) ) , <nl> + GMetaArg ( descr_of ( s ) ) , <nl> + GMetaArg ( descr_of ( om ) ) , <nl> + GMetaArg ( descr_of ( os ) ) , <nl> + GMetaArg ( descr_of ( v ) ) } ; <nl> + <nl> + auto out_run_args = cv : : gout ( m , um , s , om , os , v ) ; <nl> + <nl> + for ( int i = 0 ; i < 3 ; i + + ) { <nl> + EXPECT_TRUE ( can_describe ( metas [ i ] , out_run_args [ i ] ) ) ; <nl> + } <nl> + } <nl> + <nl> } / / namespace opencv_test <nl>
|
Added planar flag to GMatDesc , intergated into framework , added tests
|
opencv/opencv
|
f81886d17c50082254d49d0258a7a338b5786e70
|
2019-04-26T11:04:44Z
|
mmm a / src / rpc / connectivity / cluster . cc <nl> ppp b / src / rpc / connectivity / cluster . cc <nl> void connectivity_cluster_t : : kill_connection ( peer_id_t peer ) THROWS_NOTHING { <nl> <nl> if ( it ! = connection_map - > end ( ) ) { <nl> tcp_conn_stream_t * conn = it - > second . first - > conn ; <nl> + guarantee ( conn ! = NULL , " Attempted to kill connection to myself . " ) ; <nl> guarantee ( get_thread_id ( ) = = conn - > home_thread ( ) ) ; <nl> <nl> if ( conn - > is_read_open ( ) ) { <nl> mmm a / src / rpc / mailbox / mailbox . cc <nl> ppp b / src / rpc / mailbox / mailbox . cc <nl> void mailbox_manager_t : : on_message ( peer_id_t source_peer , read_stream_t * stream ) <nl> dest_thread = get_thread_id ( ) . threadnum ; <nl> } <nl> <nl> - / / Construct a new stream to use <nl> - scoped_ptr_t < vector_read_stream_t > new_stream ( <nl> - new vector_read_stream_t ( std : : move ( stream_data ) , stream_data_offset ) ) ; <nl> - <nl> - coro_t : : spawn_sometime ( boost : : bind ( & mailbox_manager_t : : mailbox_read_coroutine , <nl> - this , source_peer , threadnum_t ( dest_thread ) , <nl> - dest_mailbox_id , new_stream . release ( ) ) ) ; <nl> + / / We use ` spawn_now_dangerously ( ) ` to avoid having to heap - allocate ` stream_data ` . <nl> + / / Instead we pass in a pointer to our local automatically allocated object <nl> + / / and ` mailbox_read_coroutine ( ) ` moves the data out of it before it yields . <nl> + coro_t : : spawn_now_dangerously ( boost : : bind ( & mailbox_manager_t : : mailbox_read_coroutine , <nl> + this , source_peer , threadnum_t ( dest_thread ) , <nl> + dest_mailbox_id , & stream_data , <nl> + stream_data_offset ) ) ; <nl> } <nl> <nl> void mailbox_manager_t : : mailbox_read_coroutine ( peer_id_t source_peer , <nl> threadnum_t dest_thread , <nl> raw_mailbox_t : : id_t dest_mailbox_id , <nl> - vector_read_stream_t * stream_ptr ) { <nl> + std : : vector < char > * stream_data , <nl> + int64_t stream_data_offset ) { <nl> <nl> - scoped_ptr_t < vector_read_stream_t > stream ( stream_ptr ) ; <nl> + / / Construct a new stream to use <nl> + vector_read_stream_t stream ( std : : move ( * stream_data ) , stream_data_offset ) ; <nl> + stream_data = NULL ; / / < - It is not safe to use ` stream_data ` anymore once we <nl> + / / switch the thread <nl> <nl> - try { <nl> + bool archive_exception = false ; <nl> + { <nl> on_thread_t rethreader ( dest_thread ) ; <nl> <nl> - raw_mailbox_t * mbox = mailbox_tables . get ( ) - > find_mailbox ( dest_mailbox_id ) ; <nl> - if ( mbox ! = NULL ) { <nl> - mbox - > callback - > read ( stream . get ( ) ) ; <nl> + try { <nl> + raw_mailbox_t * mbox = mailbox_tables . get ( ) - > find_mailbox ( dest_mailbox_id ) ; <nl> + if ( mbox ! = NULL ) { <nl> + mbox - > callback - > read ( & stream ) ; <nl> + } <nl> + } catch ( const fake_archive_exc_t & e ) { <nl> + / / Set a flag and handle the exception later . <nl> + / / This is to avoid doing thread switches and other coroutine things <nl> + / / while being in the exception handler . Just a precaution . . . <nl> + archive_exception = true ; <nl> } <nl> - } catch ( const fake_archive_exc_t & e ) { <nl> + } <nl> + if ( archive_exception ) { <nl> logWRN ( " Received an invalid cluster message from a peer . Disconnecting . " ) ; <nl> message_service - > kill_connection ( source_peer ) ; <nl> } <nl> mmm a / src / rpc / mailbox / mailbox . hpp <nl> ppp b / src / rpc / mailbox / mailbox . hpp <nl> class mailbox_manager_t : public message_handler_t { <nl> <nl> void mailbox_read_coroutine ( peer_id_t source_peer , threadnum_t dest_thread , <nl> raw_mailbox_t : : id_t dest_mailbox_id , <nl> - vector_read_stream_t * stream_ptr ) ; <nl> + std : : vector < char > * stream_data , <nl> + int64_t stream_data_offset ) ; <nl> } ; <nl> <nl> # endif / * RPC_MAILBOX_MAILBOX_HPP_ * / <nl>
|
Made exception handling in mailbox_t safer , and avoid one alloc in message handler .
|
rethinkdb/rethinkdb
|
1ab851ea72f67eed380ad4e6782dc076973b7554
|
2014-01-30T02:47:36Z
|
mmm a / xbmc / LangInfo . cpp <nl> ppp b / xbmc / LangInfo . cpp <nl> <nl> * / <nl> <nl> # include " LangInfo . h " <nl> + # include " LangCodeExpander . h " <nl> # include " AdvancedSettings . h " <nl> # include " GUISettings . h " <nl> # include " LocalizeStrings . h " <nl> void CLangInfo : : CRegion : : SetGlobalLocale ( ) <nl> { <nl> CStdString strLocale ; <nl> if ( m_strRegionLocaleName . length ( ) > 0 ) <nl> + { <nl> strLocale = m_strLangLocaleName + " _ " + m_strRegionLocaleName ; <nl> - else <nl> - strLocale = m_strLangLocaleName ; <nl> + # ifdef _LINUX <nl> + strLocale + = " . UTF - 8 " ; <nl> + # endif <nl> + } <nl> <nl> / / We need to set the locale to only change the collate . Otherwise , <nl> / / decimal separator is changed depending of the current language <nl> void CLangInfo : : CRegion : : SetGlobalLocale ( ) <nl> <nl> } catch ( . . . ) { <nl> current_locale = locale : : classic ( ) ; <nl> + strLocale = " C " ; <nl> } <nl> <nl> locale : : global ( current_locale ) ; <nl> bool CLangInfo : : Load ( const CStdString & strFileName ) <nl> if ( pRootElement - > Attribute ( " locale " ) ) <nl> m_defaultRegion . m_strLangLocaleName = pRootElement - > Attribute ( " locale " ) ; <nl> <nl> + # ifdef _WIN32 <nl> + / / Windows need 3 chars isolang code <nl> + if ( m_defaultRegion . m_strLangLocaleName . length ( ) = = 2 ) <nl> + { <nl> + if ( ! g_LangCodeExpander . ConvertTwoToThreeCharCode ( m_defaultRegion . m_strLangLocaleName , m_defaultRegion . m_strLangLocaleName , true ) ) <nl> + m_defaultRegion . m_strLangLocaleName = " " ; <nl> + } <nl> + # endif <nl> + <nl> const TiXmlNode * pCharSets = pRootElement - > FirstChild ( " charsets " ) ; <nl> if ( pCharSets & & ! pCharSets - > NoChildren ( ) ) <nl> { <nl> bool CLangInfo : : Load ( const CStdString & strFileName ) <nl> if ( pRegion - > Attribute ( " locale " ) ) <nl> region . m_strRegionLocaleName = pRegion - > Attribute ( " locale " ) ; <nl> <nl> + # ifdef _WIN32 <nl> + / / Windows need 3 chars regions code <nl> + if ( region . m_strRegionLocaleName . length ( ) = = 2 ) <nl> + { <nl> + if ( ! g_LangCodeExpander . ConvertLinuxToWindowsRegionCodes ( region . m_strRegionLocaleName , region . m_strRegionLocaleName ) ) <nl> + region . m_strRegionLocaleName = " " ; <nl> + } <nl> + # endif <nl> + <nl> const TiXmlNode * pDateLong = pRegion - > FirstChild ( " datelong " ) ; <nl> if ( pDateLong & & ! pDateLong - > NoChildren ( ) ) <nl> region . m_strDateFormatLong = pDateLong - > FirstChild ( ) - > Value ( ) ; <nl>
|
fixed : properly set locale on Linux and Windows ( with proper convertion ) . Why don ' t respect standard ? ! ( fix )
|
xbmc/xbmc
|
766ac9cb34f69cef3cf06a0707c0cd9a861ace56
|
2010-10-13T22:16:40Z
|
mmm a / caffe2 / predictor / emulator / data_filler . cc <nl> ppp b / caffe2 / predictor / emulator / data_filler . cc <nl> void DataNetFiller : : fill_input_internal ( TensorList_t * input_data ) const { <nl> } <nl> } <nl> <nl> - static void fill_with_type ( <nl> + void fill_with_type ( <nl> const TensorFiller & filler , <nl> const std : : string & type , <nl> TensorCPU * output ) { <nl> mmm a / caffe2 / predictor / emulator / data_filler . h <nl> ppp b / caffe2 / predictor / emulator / data_filler . h <nl> class DataNetFiller : public Filler { <nl> const NetDef data_net_ ; <nl> } ; <nl> <nl> + void fill_with_type ( <nl> + const TensorFiller & filler , <nl> + const std : : string & type , <nl> + TensorCPU * output ) ; <nl> + <nl> / * <nl> * @ run_net : the predict net with parameter and input names <nl> * @ input_dims : the input dimentions of all operator inputs of run_net <nl> class DataRandomFiller : public Filler { <nl> <nl> void fill_parameter ( Workspace * ws ) const override ; <nl> <nl> - protected : <nl> - DataRandomFiller ( ) { } <nl> - <nl> - TensorFiller get_tensor_filler ( <nl> + static TensorFiller get_tensor_filler ( <nl> const OperatorDef & op_def , <nl> int input_index , <nl> const std : : vector < std : : vector < int64_t > > & input_dims ) { <nl> class DataRandomFiller : public Filler { <nl> return filler ; <nl> } <nl> <nl> + protected : <nl> + DataRandomFiller ( ) { } <nl> + <nl> using filler_type_pair_t = std : : pair < TensorFiller , std : : string > ; <nl> std : : unordered_map < std : : string , filler_type_pair_t > parameters_ ; <nl> std : : unordered_map < std : : string , filler_type_pair_t > inputs_ ; <nl>
|
Automatic generation of unittest for Glow integration
|
pytorch/pytorch
|
9a81d1e692e9df8da667d14e6d3611af77cd5b8d
|
2019-04-30T19:13:06Z
|
mmm a / lib / Parse / Parser . cpp <nl> ppp b / lib / Parse / Parser . cpp <nl> bool Parser : : ParseExprBrace ( NullablePtr < Expr > & Result ) { <nl> return true ; <nl> } <nl> <nl> + / / Diagnose cases where there was a ; missing after a ' var ' . <nl> + if ( MissingSemiAtEnd & & Entries . back ( ) . is < NamedDecl * > ( ) ) { <nl> + Error ( RBLoc , " expected ' ; ' after var declaration " ) ; <nl> + MissingSemiAtEnd = false ; <nl> + } <nl> <nl> Result = S . expr . ActOnBraceExpr ( LBLoc , Entries . data ( ) , Entries . size ( ) , <nl> MissingSemiAtEnd , RBLoc ) ; <nl> mmm a / lib / Sema / SemaExpr . cpp <nl> ppp b / lib / Sema / SemaExpr . cpp <nl> SemaExpr : : ActOnIdentifierExpr ( llvm : : StringRef Text , llvm : : SMLoc Loc ) { <nl> return 0 ; <nl> } <nl> <nl> - / / FIXME : If the decl had an " invalid " type , then return the error object to <nl> - / / improve error recovery . <nl> + / / TODO : QOI : If the decl had an " invalid " bit set , then return the error <nl> + / / object to improve error recovery . <nl> return new ( S . Context ) DeclRefExpr ( D , Loc , D - > Ty ) ; <nl> } <nl> <nl> SemaExpr : : ActOnBraceExpr ( llvm : : SMLoc LBLoc , <nl> const llvm : : PointerUnion < Expr * , NamedDecl * > * Elements , <nl> unsigned NumElements , bool HasMissingSemi , <nl> llvm : : SMLoc RBLoc ) { <nl> - / / Diagnose cases where there was a ; missing after a ' var ' . <nl> - if ( HasMissingSemi & & Elements [ NumElements - 1 ] . is < NamedDecl * > ( ) ) { <nl> - Error ( RBLoc , " expected ' ; ' after var declaration " ) ; <nl> - HasMissingSemi = false ; <nl> - } <nl> - <nl> / / If any of the elements of the braces has a function type ( which indicates <nl> / / that a function didn ' t get called ) , then produce an error . We don ' t do <nl> / / this for the last element in the ' missing semi ' case , because the brace <nl> / / expr as a whole has the function result . <nl> + / / TODO : What about tuples which contain functions by - value that are dead ? <nl> for ( unsigned i = 0 ; i ! = NumElements - ( HasMissingSemi ? 1 : 0 ) ; + + i ) <nl> if ( Elements [ i ] . is < Expr * > ( ) & & <nl> llvm : : isa < FunctionType > ( Elements [ i ] . get < Expr * > ( ) - > Ty ) ) <nl> - / / FIXME : Add source range . <nl> + / / TODO : QOI : Add source range . <nl> Error ( Elements [ i ] . get < Expr * > ( ) - > getLocStart ( ) , <nl> " expression resolves to an unevaluated function " ) ; <nl> <nl> SemaExpr : : ActOnBraceExpr ( llvm : : SMLoc LBLoc , <nl> S . Context . Allocate ( sizeof ( * Elements ) * NumElements , 8 ) ; <nl> memcpy ( NewElements , Elements , sizeof ( * Elements ) * NumElements ) ; <nl> <nl> - / / FIXME : Create the right node . <nl> return new ( S . Context ) BraceExpr ( LBLoc , NewElements , NumElements , <nl> HasMissingSemi , RBLoc , ResultTy ) ; <nl> } <nl> SemaExpr : : ActOnTupleExpr ( llvm : : SMLoc LPLoc , Expr * * SubExprs , <nl> ResultTy = SubExprs [ 0 ] - > Ty ; <nl> } else { <nl> / / Compute the result type . <nl> - llvm : : SmallVector < llvm : : PointerUnion < Type * , NamedDecl * > , 8 > ResultTyElts ; <nl> + llvm : : SmallVector < TupleType : : TypeOrDecl , 8 > ResultTyElts ; <nl> <nl> for ( unsigned i = 0 , e = NumSubExprs ; i ! = e ; + + i ) <nl> ResultTyElts . push_back ( SubExprs [ i ] - > Ty ) ; <nl> mmm a / lib / Sema / SemaType . cpp <nl> ppp b / lib / Sema / SemaType . cpp <nl> Type * SemaType : : ActOnTupleType ( llvm : : SMLoc LPLoc , <nl> <nl> / / If the tuple only has a single element type , then this is a grouping paren , <nl> / / not a tuple . <nl> - / / FIXME : How do we handle declarations here ? <nl> if ( NumElements = = 1 ) { <nl> if ( NamedDecl * D = Elements [ 0 ] . dyn_cast < NamedDecl * > ( ) ) { <nl> Error ( D - > getLocStart ( ) , <nl> " grouping parenthesis cannot contain a declaration " ) ; <nl> - / / FIXME : need an error type for better recovery . <nl> - return S . Context . VoidType ; <nl> + return D - > Ty ; <nl> } <nl> return Elements [ 0 ] . get < Type * > ( ) ; <nl> } <nl>
|
various cleanups to fixmes . Move a parsing check from sema to parser .
|
apple/swift
|
883efc284b6cf60c14ab0ac047e8090ec341f923
|
2010-07-25T22:48:47Z
|
mmm a / src / mongo / transport / transport_layer_asio . cpp <nl> ppp b / src / mongo / transport / transport_layer_asio . cpp <nl> Status TransportLayerASIO : : setup ( ) { <nl> } <nl> <nl> GenericAcceptor acceptor ( * _acceptorReactor ) ; <nl> - acceptor . open ( addr - > protocol ( ) ) ; <nl> + try { <nl> + acceptor . open ( addr - > protocol ( ) ) ; <nl> + } catch ( std : : exception & ) { <nl> + / / Allow the server to start when " ipv6 : true " and " bindIpAll : true " , but the platform <nl> + / / does not support ipv6 ( e . g . , ipv6 kernel module is not loaded in Linux ) . <nl> + const auto bindAllIPv6Addr = " : : : " _sd + std : : to_string ( _listenerPort ) ; <nl> + if ( errno = = EAFNOSUPPORT & & _listenerOptions . enableIPv6 & & addr . family ( ) = = AF_INET6 & & <nl> + addr . toString ( ) = = bindAllIPv6Addr ) { <nl> + LOGV2_WARNING ( 4206501 , <nl> + " Failed to bind to { bind_addr } as the platform does not support ipv6 " , <nl> + " bind_addr " _attr = addr . toString ( ) ) ; <nl> + continue ; <nl> + } <nl> + <nl> + throw ; <nl> + } <nl> acceptor . set_option ( GenericAcceptor : : reuse_address ( true ) ) ; <nl> <nl> std : : error_code ec ; <nl>
|
SERVER - 42065 Handle bindIpAll exception when ipv6 is not supported
|
mongodb/mongo
|
fe90d82d6ce709828c8ed22e83262190dda5570a
|
2020-03-10T02:49:40Z
|
new file mode 100644 <nl> index 0000000000 . . efaca73bfa <nl> mmm / dev / null <nl> ppp b / code / mathematical - algorithms / gcd_and_lcm / gcd_and_lcm . c <nl> <nl> + / * Part of Cosmos by OpenGenus Foundation * / <nl> + / * Created by Shubham Prasad Singh on 13 / 10 / 2017 * / <nl> + / * GCD and LCM * / <nl> + <nl> + # include < stdio . h > <nl> + int _gcd ( int a , int b ) / / / / / / / / / / Euclid Algorithm <nl> + { <nl> + if ( ! b ) <nl> + return a ; <nl> + return _gcd ( b , a % b ) ; <nl> + } <nl> + int main ( ) <nl> + { <nl> + int a , b ; <nl> + printf ( " Enter the numbers \ n " ) ; <nl> + scanf ( " % d % d " , & a , & b ) ; <nl> + int gcd = _gcd ( a , b ) , lcm = a * b / gcd ; <nl> + printf ( " GCD is % d \ nLCM is % d " , gcd , lcm ) ; <nl> + return 0 ; <nl> + } <nl>
|
Merge pull request from shubhamcr / master
|
OpenGenus/cosmos
|
4068a8beabd6dac537c05bd11f82c5c8beb3d127
|
2017-10-13T18:26:47Z
|
mmm a / hphp / hack / src / hhbc / hhbc_hhas . ml <nl> ppp b / hphp / hack / src / hhbc / hhbc_hhas . ml <nl> and string_of_hint ~ ns h = <nl> ~ namespace : Namespace_env . empty_with_default_popt <nl> h <nl> in <nl> - if ns then h else SU . strip_ns h <nl> + let h = if ns then h else SU . strip_ns h in <nl> + Php_escaping . escape h <nl> <nl> and string_of_import_flavor = function <nl> | A . Include - > " include " <nl> let add_doc buf indent doc_comment = <nl> match doc_comment with <nl> | Some cmt - > <nl> add_indented_line buf indent @ @ <nl> - Printf . sprintf " . doc \ " \ " \ " % s \ " \ " \ " ; " ( Php_escaping . escape cmt ) <nl> + Printf . sprintf " . doc % s ; " ( SU . triple_quote_string cmt ) <nl> | None - > ( ) <nl> <nl> let add_body buf indent body = <nl> let property_type_info p = <nl> let property_doc_comment p = <nl> match Hhas_property . doc_comment p with <nl> | None - > " " <nl> - | Some s - > Printf . sprintf " \ " \ " \ " % s \ " \ " \ " " ( Php_escaping . escape s ) <nl> + | Some s - > Printf . sprintf " % s " ( SU . triple_quote_string s ) <nl> <nl> let add_property class_def buf property = <nl> B . add_string buf " \ n . property " ; <nl> mmm a / hphp / hack / src / hhbc / hhbc_string_utils . ml <nl> ppp b / hphp / hack / src / hhbc / hhbc_string_utils . ml <nl> module SN = Naming_special_names <nl> let quote_string s = " \ " " ^ Php_escaping . escape s ^ " \ " " <nl> let quote_string_with_escape s = " \ \ \ " " ^ Php_escaping . escape s ^ " \ \ \ " " <nl> let single_quote_string_with_escape s = " ' " ^ Php_escaping . escape s ^ " ' " <nl> + let triple_quote_string s = " \ " \ " \ " " ^ Php_escaping . escape s ^ " \ " \ " \ " " <nl> <nl> let prefix_namespace n s = n ^ " \ \ " ^ s <nl> let strip_global_ns s = <nl> mmm a / hphp / test / hackc_repo_failing_tests_slow <nl> ppp b / hphp / test / hackc_repo_failing_tests_slow <nl> slow / ext_process / fork_log . php <nl> slow / ext_process / lwp . php <nl> slow / ext_socket / accept_block . php <nl> slow / ext_socket / persistent_socket . php <nl> - slow / function / closure - default . php <nl> slow / hh_namespace_migration / hh_vector5 . php <nl> slow / hhbbc / iteration . php <nl> slow / inout / bad - call - 15 . php <nl>
|
properly triple quote in hhbc_hhas
|
facebook/hhvm
|
2b3919903095ded33c296ece1ba6c74a895d2194
|
2017-12-22T11:06:15Z
|
mmm a / Kodi . xcodeproj / project . pbxproj <nl> ppp b / Kodi . xcodeproj / project . pbxproj <nl> <nl> 395C2A191A9F074C00EBC7AD / * Locale . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A171A9F074C00EBC7AD / * Locale . cpp * / ; } ; <nl> 395C2A1A1A9F074C00EBC7AD / * Locale . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A171A9F074C00EBC7AD / * Locale . cpp * / ; } ; <nl> 395C2A1B1A9F074C00EBC7AD / * Locale . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A171A9F074C00EBC7AD / * Locale . cpp * / ; } ; <nl> - 395C2A1F1A9F96A700EBC7AD / * ContextItemAddon . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A1D1A9F96A700EBC7AD / * ContextItemAddon . cpp * / ; } ; <nl> - 395C2A201A9F96A700EBC7AD / * ContextItemAddon . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A1D1A9F96A700EBC7AD / * ContextItemAddon . cpp * / ; } ; <nl> - 395C2A211A9F96A700EBC7AD / * ContextItemAddon . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A1D1A9F96A700EBC7AD / * ContextItemAddon . cpp * / ; } ; <nl> 395C2A241AA4C32100EBC7AD / * AudioDecoder . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A221AA4C32100EBC7AD / * AudioDecoder . cpp * / ; } ; <nl> 395C2A251AA4C32100EBC7AD / * AudioDecoder . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A221AA4C32100EBC7AD / * AudioDecoder . cpp * / ; } ; <nl> 395C2A261AA4C32100EBC7AD / * AudioDecoder . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = 395C2A221AA4C32100EBC7AD / * AudioDecoder . cpp * / ; } ; <nl> <nl> DF527736151BAF4C00B5B63B / * WebSocketV13 . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF52772F151BAF4C00B5B63B / * WebSocketV13 . cpp * / ; } ; <nl> DF527737151BAF4C00B5B63B / * WebSocketV8 . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF527731151BAF4C00B5B63B / * WebSocketV8 . cpp * / ; } ; <nl> DF529BAE1741697B00523FB4 / * Environment . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF529BAC1741697B00523FB4 / * Environment . cpp * / ; } ; <nl> + DF54F7FE1B6580AD000FCBA4 / * ContextMenuItem . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF54F7FC1B6580AC000FCBA4 / * ContextMenuItem . cpp * / ; } ; <nl> + DF54F7FF1B6580AD000FCBA4 / * ContextMenuItem . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF54F7FC1B6580AC000FCBA4 / * ContextMenuItem . cpp * / ; } ; <nl> + DF54F8001B6580AD000FCBA4 / * ContextMenuItem . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF54F7FC1B6580AC000FCBA4 / * ContextMenuItem . cpp * / ; } ; <nl> + DF54F8031B6580C8000FCBA4 / * ContextMenuAddon . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF54F8011B6580C8000FCBA4 / * ContextMenuAddon . cpp * / ; } ; <nl> + DF54F8041B6580C8000FCBA4 / * ContextMenuAddon . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF54F8011B6580C8000FCBA4 / * ContextMenuAddon . cpp * / ; } ; <nl> + DF54F8051B6580C8000FCBA4 / * ContextMenuAddon . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF54F8011B6580C8000FCBA4 / * ContextMenuAddon . cpp * / ; } ; <nl> DF56EF1F1A798A3F00CAAEFB / * HTTPFileHandler . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF56EF1D1A798A3F00CAAEFB / * HTTPFileHandler . cpp * / ; } ; <nl> DF56EF201A798A3F00CAAEFB / * HTTPFileHandler . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF56EF1D1A798A3F00CAAEFB / * HTTPFileHandler . cpp * / ; } ; <nl> DF56EF211A798A3F00CAAEFB / * HTTPFileHandler . cpp in Sources * / = { isa = PBXBuildFile ; fileRef = DF56EF1D1A798A3F00CAAEFB / * HTTPFileHandler . cpp * / ; } ; <nl> <nl> 395C2A101A9F072400EBC7AD / * ResourceFile . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = ResourceFile . h ; sourceTree = " < group > " ; } ; <nl> 395C2A171A9F074C00EBC7AD / * Locale . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = Locale . cpp ; sourceTree = " < group > " ; } ; <nl> 395C2A181A9F074C00EBC7AD / * Locale . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = Locale . h ; sourceTree = " < group > " ; } ; <nl> - 395C2A1D1A9F96A700EBC7AD / * ContextItemAddon . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = ContextItemAddon . cpp ; sourceTree = " < group > " ; } ; <nl> - 395C2A1E1A9F96A700EBC7AD / * ContextItemAddon . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = ContextItemAddon . h ; sourceTree = " < group > " ; } ; <nl> 395C2A221AA4C32100EBC7AD / * AudioDecoder . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = AudioDecoder . cpp ; sourceTree = " < group > " ; } ; <nl> 395C2A231AA4C32100EBC7AD / * AudioDecoder . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = AudioDecoder . h ; sourceTree = " < group > " ; } ; <nl> 395F6DDB1A8133360088CC74 / * GUIDialogSimpleMenu . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = GUIDialogSimpleMenu . cpp ; sourceTree = " < group > " ; } ; <nl> <nl> DF527732151BAF4C00B5B63B / * WebSocketV8 . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = WebSocketV8 . h ; sourceTree = " < group > " ; } ; <nl> DF529BAC1741697B00523FB4 / * Environment . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = Environment . cpp ; sourceTree = " < group > " ; } ; <nl> DF529BAD1741697B00523FB4 / * Environment . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = Environment . h ; sourceTree = " < group > " ; } ; <nl> + DF54F7FC1B6580AC000FCBA4 / * ContextMenuItem . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = ContextMenuItem . cpp ; sourceTree = " < group > " ; } ; <nl> + DF54F7FD1B6580AC000FCBA4 / * ContextMenuItem . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = ContextMenuItem . h ; sourceTree = " < group > " ; } ; <nl> + DF54F8011B6580C8000FCBA4 / * ContextMenuAddon . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = ContextMenuAddon . cpp ; sourceTree = " < group > " ; } ; <nl> + DF54F8021B6580C8000FCBA4 / * ContextMenuAddon . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = ContextMenuAddon . h ; sourceTree = " < group > " ; } ; <nl> DF56EF1D1A798A3F00CAAEFB / * HTTPFileHandler . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = HTTPFileHandler . cpp ; sourceTree = " < group > " ; } ; <nl> DF56EF1E1A798A3F00CAAEFB / * HTTPFileHandler . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = HTTPFileHandler . h ; sourceTree = " < group > " ; } ; <nl> DF56EF221A798A5E00CAAEFB / * HttpRangeUtils . cpp * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . cpp . cpp ; path = HttpRangeUtils . cpp ; sourceTree = " < group > " ; } ; <nl> <nl> 395C2A231AA4C32100EBC7AD / * AudioDecoder . h * / , <nl> 7CF34D9D1930264A00D543C5 / * AudioEncoder . cpp * / , <nl> 7CF34D9E1930264A00D543C5 / * AudioEncoder . h * / , <nl> - 395C2A1D1A9F96A700EBC7AD / * ContextItemAddon . cpp * / , <nl> - 395C2A1E1A9F96A700EBC7AD / * ContextItemAddon . h * / , <nl> + DF54F8011B6580C8000FCBA4 / * ContextMenuAddon . cpp * / , <nl> + DF54F8021B6580C8000FCBA4 / * ContextMenuAddon . h * / , <nl> 18B49FF61152BFA5001AF8A6 / * DllAddon . h * / , <nl> 18B7C38612942090009E7A26 / * GUIDialogAddonInfo . cpp * / , <nl> 18B7C38712942090009E7A26 / * GUIDialogAddonInfo . h * / , <nl> <nl> E38E14720D25F9F900618676 / * BackgroundInfoLoader . cpp * / , <nl> E38E14730D25F9F900618676 / * BackgroundInfoLoader . h * / , <nl> F5B413131065900C0035D105 / * config . h * / , <nl> + DF54F7FC1B6580AC000FCBA4 / * ContextMenuItem . cpp * / , <nl> + DF54F7FD1B6580AC000FCBA4 / * ContextMenuItem . h * / , <nl> 395C29F91A9CD20C00EBC7AD / * ContextMenuManager . cpp * / , <nl> 395C29FA1A9CD20C00EBC7AD / * ContextMenuManager . h * / , <nl> E38E167E0D25F9FA00618676 / * CueDocument . cpp * / , <nl> <nl> E38E20510D25F9FD00618676 / * PluginDirectory . cpp in Sources * / , <nl> E38E20520D25F9FD00618676 / * RarDirectory . cpp in Sources * / , <nl> E38E20530D25F9FD00618676 / * RarManager . cpp in Sources * / , <nl> + DF54F7FE1B6580AD000FCBA4 / * ContextMenuItem . cpp in Sources * / , <nl> 395C29C51A98A0E100EBC7AD / * ILanguageInvoker . cpp in Sources * / , <nl> E38E20580D25F9FD00618676 / * SmartPlaylistDirectory . cpp in Sources * / , <nl> E38E205B0D25F9FD00618676 / * StackDirectory . cpp in Sources * / , <nl> <nl> C84828C8156CFCD8005A996F / * GUIDialogPVRChannelManager . cpp in Sources * / , <nl> C84828C9156CFCD8005A996F / * GUIDialogPVRChannelsOSD . cpp in Sources * / , <nl> C84828CC156CFCD8005A996F / * GUIDialogPVRGroupManager . cpp in Sources * / , <nl> + DF54F8031B6580C8000FCBA4 / * ContextMenuAddon . cpp in Sources * / , <nl> C84828CD156CFCD8005A996F / * GUIDialogPVRGuideInfo . cpp in Sources * / , <nl> C84828CE156CFCD8005A996F / * GUIDialogPVRGuideOSD . cpp in Sources * / , <nl> C84828CF156CFCD8005A996F / * GUIDialogPVRGuideSearch . cpp in Sources * / , <nl> <nl> 7C1409A9184015C9009F9411 / * InfoExpression . cpp in Sources * / , <nl> AE32174218313ADF0003FAFC / * XSLTUtils . cpp in Sources * / , <nl> 7C15DCBC1892481400FCE564 / * InfoBool . cpp in Sources * / , <nl> - 395C2A1F1A9F96A700EBC7AD / * ContextItemAddon . cpp in Sources * / , <nl> F5CC228B1814F7E9006B5E91 / * AESinkDARWINOSX . cpp in Sources * / , <nl> F5CC22EB1814FF3B006B5E91 / * ActiveAE . cpp in Sources * / , <nl> F5CC22EC1814FF3B006B5E91 / * ActiveAEBuffer . cpp in Sources * / , <nl> <nl> DFF0F28217528350002DA3A4 / * GUIControlFactory . cpp in Sources * / , <nl> DFF0F28317528350002DA3A4 / * GUIControlGroup . cpp in Sources * / , <nl> DFF0F28417528350002DA3A4 / * GUIControlGroupList . cpp in Sources * / , <nl> + DF54F8001B6580AD000FCBA4 / * ContextMenuItem . cpp in Sources * / , <nl> DFF0F28517528350002DA3A4 / * GUIControlProfiler . cpp in Sources * / , <nl> DFF0F28617528350002DA3A4 / * GUIDialog . cpp in Sources * / , <nl> DFF0F28717528350002DA3A4 / * GUIEditControl . cpp in Sources * / , <nl> <nl> DFF0F32417528350002DA3A4 / * HTTPVfsHandler . cpp in Sources * / , <nl> DFF0F32517528350002DA3A4 / * HTTPWebinterfaceAddonsHandler . cpp in Sources * / , <nl> DFF0F32617528350002DA3A4 / * HTTPWebinterfaceHandler . cpp in Sources * / , <nl> - 395C2A211A9F96A700EBC7AD / * ContextItemAddon . cpp in Sources * / , <nl> DFF0F32717528350002DA3A4 / * IHTTPRequestHandler . cpp in Sources * / , <nl> DFF0F32817528350002DA3A4 / * NetworkLinux . cpp in Sources * / , <nl> DFF0F32917528350002DA3A4 / * ZeroconfBrowserOSX . cpp in Sources * / , <nl> <nl> DFF0F36117528350002DA3A4 / * PlayListM3U . cpp in Sources * / , <nl> DF4BF0191A4EF31F0053AC56 / * cc_decoder . c in Sources * / , <nl> DFF0F36217528350002DA3A4 / * PlayListPLS . cpp in Sources * / , <nl> + DF54F8051B6580C8000FCBA4 / * ContextMenuAddon . cpp in Sources * / , <nl> DFF0F36317528350002DA3A4 / * PlayListURL . cpp in Sources * / , <nl> DFF0F36417528350002DA3A4 / * PlayListWPL . cpp in Sources * / , <nl> DFEA4B5B1B52721300562321 / * GUIDialogAudioDSPManager . cpp in Sources * / , <nl> <nl> E49912C9174E5DA000741B6D / * DirectoryNodeRecentlyAddedMusicVideos . cpp in Sources * / , <nl> E49912CA174E5DA000741B6D / * DirectoryNodeRoot . cpp in Sources * / , <nl> E49912CB174E5DA000741B6D / * DirectoryNodeSeasons . cpp in Sources * / , <nl> - 395C2A201A9F96A700EBC7AD / * ContextItemAddon . cpp in Sources * / , <nl> E49912CC174E5DA000741B6D / * DirectoryNodeTitleMovies . cpp in Sources * / , <nl> E49912CD174E5DA000741B6D / * DirectoryNodeTitleMusicVideos . cpp in Sources * / , <nl> DFDE5D521AE5658200EE53AD / * PictureScalingAlgorithm . cpp in Sources * / , <nl> <nl> E499137C174E5F0E00741B6D / * GUIDialogKaraokeSongSelector . cpp in Sources * / , <nl> E499137D174E5F0E00741B6D / * GUIWindowKaraokeLyrics . cpp in Sources * / , <nl> E499137E174E5F0E00741B6D / * karaokelyrics . cpp in Sources * / , <nl> + DF54F8041B6580C8000FCBA4 / * ContextMenuAddon . cpp in Sources * / , <nl> E499137F174E5F0E00741B6D / * karaokelyricscdg . cpp in Sources * / , <nl> E4991380174E5F0E00741B6D / * karaokelyricsfactory . cpp in Sources * / , <nl> E4991381174E5F0E00741B6D / * karaokelyricsmanager . cpp in Sources * / , <nl> <nl> 7CCDA187192753E30074CF51 / * PltTaskManager . cpp in Sources * / , <nl> 7CCDA190192753E30074CF51 / * PltThreadTask . cpp in Sources * / , <nl> 7CCDA199192753E30074CF51 / * PltUPnP . cpp in Sources * / , <nl> + DF54F7FF1B6580AD000FCBA4 / * ContextMenuItem . cpp in Sources * / , <nl> 7CCDA1A2192753E30074CF51 / * PltMediaConnect . cpp in Sources * / , <nl> 7CCDA1AB192753E30074CF51 / * PltXbox360 . cpp in Sources * / , <nl> 7CCDA1B0192753E30074CF51 / * X_MS_MediaReceiverRegistrarSCPD . cpp in Sources * / , <nl>
|
[ osx / ios / atv2 ] - synced xcode project
|
xbmc/xbmc
|
b2dfa4ed125cdc8cd2ce602eadf99328ad99b302
|
2015-07-28T07:57:45Z
|
mmm a / test / cpp / api / optim . cpp <nl> ppp b / test / cpp / api / optim . cpp <nl> void check_exact_values ( <nl> } <nl> } <nl> <nl> + TEST ( OptimTest , OptimizerAccessors ) { <nl> + auto options = AdagradOptions ( 1 . 0 ) ; <nl> + std : : vector < torch : : Tensor > params ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + params . push_back ( torch : : randn ( 10 ) ) ; <nl> + } <nl> + auto optimizer = Adagrad ( params , options ) ; <nl> + / / test for defaults ( ) method with non - const reference <nl> + auto & options_ = static_cast < AdagradOptions & > ( optimizer . defaults ( ) ) ; <nl> + ASSERT_TRUE ( options = = options_ ) ; <nl> + / / test for param_groups ( ) with non - const reference return <nl> + auto & params_groups = optimizer . param_groups ( ) ; <nl> + params_groups . push_back ( OptimizerParamGroup ( params ) ) ; <nl> + auto & params_1 = params_groups [ 1 ] . params ( ) ; <nl> + for ( size_t i = 0 ; i < params_1 . size ( ) ; i + + ) { <nl> + torch : : equal ( params [ i ] , params_1 [ i ] ) ; <nl> + } <nl> + <nl> + / / test for add_param_group ( ) when one or more params existing in another param_group <nl> + / / are passed in the new param group to be added <nl> + ASSERT_THROWS_WITH ( <nl> + optimizer . add_param_group ( OptimizerParamGroup ( params ) ) , " some parameters appear in more than one parameter group " ) ; <nl> + <nl> + / / test for state ( ) with non - const reference return <nl> + auto & state_ = static_cast < AdagradParamState & > ( * ( optimizer . state ( ) [ c10 : : guts : : to_string ( params_1 [ 0 ] . unsafeGetTensorImpl ( ) ) ] ) ) ; <nl> + state_ . step ( state_ . step ( ) + 1 ) ; <nl> + <nl> + const auto & optimizer_ = Adagrad ( params , options ) ; <nl> + optimizer_ . defaults ( ) ; <nl> + / / test for param_groups ( ) with const reference return <nl> + const auto & params_2 = optimizer_ . param_groups ( ) ; <nl> + / / test for state ( ) with const reference return <nl> + optimizer_ . state ( ) ; <nl> + } <nl> + <nl> TEST ( OptimTest , BasicInterface ) { <nl> struct MyOptimizer : Optimizer { <nl> using Optimizer : : Optimizer ; <nl> mmm a / test / cpp / api / serialize . cpp <nl> ppp b / test / cpp / api / serialize . cpp <nl> <nl> # include < gtest / gtest . h > <nl> <nl> # include < c10 / util / tempfile . h > <nl> + # include < c10 / util / flat_hash_map . h > <nl> <nl> # include < torch / torch . h > <nl> <nl> <nl> # include < string > <nl> # include < vector > <nl> <nl> + using namespace torch : : test ; <nl> using namespace torch : : nn ; <nl> + using namespace torch : : optim ; <nl> + using namespace torch : : serialize ; <nl> <nl> namespace { <nl> Sequential xor_model ( ) { <nl> torch : : Tensor save_and_load ( torch : : Tensor input ) { <nl> } <nl> } / / namespace <nl> <nl> + template < typename DerivedOptions > <nl> + void is_optimizer_param_group_equal ( const OptimizerParamGroup & lhs , const OptimizerParamGroup & rhs ) { <nl> + const auto & lhs_params = lhs . params ( ) ; <nl> + const auto & rhs_params = rhs . params ( ) ; <nl> + <nl> + ASSERT_TRUE ( lhs_params . size ( ) = = rhs_params . size ( ) ) ; <nl> + for ( size_t j = 0 ; j < lhs_params . size ( ) ; j + + ) { <nl> + ASSERT_TRUE ( torch : : equal ( lhs_params [ j ] , rhs_params [ j ] ) ) ; <nl> + } <nl> + ASSERT_TRUE ( static_cast < const DerivedOptions & > ( lhs . options ( ) ) = = static_cast < const DerivedOptions & > ( rhs . options ( ) ) ) ; <nl> + } <nl> + <nl> + template < typename DerivedOptimizerParamState > <nl> + void is_optimizer_state_equal ( <nl> + const ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & lhs_state , <nl> + const ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & rhs_state ) { <nl> + <nl> + ASSERT_TRUE ( lhs_state . size ( ) = = rhs_state . size ( ) ) ; <nl> + for ( const auto & value : lhs_state ) { <nl> + auto found = rhs_state . find ( value . first ) ; <nl> + ASSERT_TRUE ( found ! = rhs_state . end ( ) ) ; <nl> + const DerivedOptimizerParamState & lhs_curr_state = static_cast < const DerivedOptimizerParamState & > ( * ( value . second . get ( ) ) ) ; <nl> + const DerivedOptimizerParamState & rhs_curr_state = static_cast < const DerivedOptimizerParamState & > ( * ( found - > second . get ( ) ) ) ; <nl> + ASSERT_TRUE ( lhs_curr_state = = rhs_curr_state ) ; <nl> + } <nl> + } <nl> + <nl> + template < typename OptimizerClass , typename DerivedOptimizerOptions , typename DerivedOptimizerParamState > <nl> + void test_serialize_optimizer ( DerivedOptimizerOptions options ) { <nl> + auto model1 = Linear ( 5 , 2 ) ; <nl> + auto model2 = Linear ( 5 , 2 ) ; <nl> + auto model3 = Linear ( 5 , 2 ) ; <nl> + <nl> + / / Models 1 , 2 , 3 will have the same parameters . <nl> + auto model_tempfile = c10 : : make_tempfile ( ) ; <nl> + torch : : save ( model1 , model_tempfile . name ) ; <nl> + torch : : load ( model2 , model_tempfile . name ) ; <nl> + torch : : load ( model3 , model_tempfile . name ) ; <nl> + <nl> + auto param1 = model1 - > named_parameters ( ) ; <nl> + auto param2 = model2 - > named_parameters ( ) ; <nl> + auto param3 = model3 - > named_parameters ( ) ; <nl> + for ( const auto & p : param1 ) { <nl> + ASSERT_TRUE ( p - > allclose ( param2 [ p . key ( ) ] ) ) ; <nl> + ASSERT_TRUE ( param2 [ p . key ( ) ] . allclose ( param3 [ p . key ( ) ] ) ) ; <nl> + } <nl> + / / Make some optimizers <nl> + auto optim1 = OptimizerClass ( <nl> + { torch : : optim : : OptimizerParamGroup ( model1 - > parameters ( ) ) } , options ) ; <nl> + auto optim2 = OptimizerClass ( <nl> + model2 - > parameters ( ) , options ) ; <nl> + auto optim2_2 = OptimizerClass ( <nl> + model2 - > parameters ( ) , options ) ; <nl> + auto optim3 = OptimizerClass ( <nl> + model3 - > parameters ( ) , options ) ; <nl> + auto optim3_2 = OptimizerClass ( <nl> + model3 - > parameters ( ) , options ) ; <nl> + <nl> + auto x = torch : : ones ( { 10 , 5 } ) ; <nl> + <nl> + auto step = [ & x ] ( torch : : optim : : Optimizer & optimizer , Linear model ) { <nl> + optimizer . zero_grad ( ) ; <nl> + auto y = model - > forward ( x ) . sum ( ) ; <nl> + y . backward ( ) ; <nl> + optimizer . step ( ) ; <nl> + } ; <nl> + <nl> + / / Do 2 steps of model1 <nl> + step ( optim1 , model1 ) ; <nl> + step ( optim1 , model1 ) ; <nl> + <nl> + / / Do 2 steps of model 2 without saving the optimizer <nl> + step ( optim2 , model2 ) ; <nl> + step ( optim2_2 , model2 ) ; <nl> + <nl> + / / Do 2 steps of model 3 while saving the optimizer <nl> + step ( optim3 , model3 ) ; <nl> + <nl> + auto optim_tempfile = c10 : : make_tempfile ( ) ; <nl> + torch : : save ( optim3 , optim_tempfile . name ) ; <nl> + torch : : load ( optim3_2 , optim_tempfile . name ) ; <nl> + <nl> + auto & optim3_2_param_groups = optim3_2 . param_groups ( ) ; <nl> + auto & optim3_param_groups = optim3 . param_groups ( ) ; <nl> + auto & optim3_2_state = optim3_2 . state ( ) ; <nl> + auto & optim3_state = optim3 . state ( ) ; <nl> + <nl> + / / optim3_2 and optim1 should have param_groups and state of size 1 and 2 respectively <nl> + ASSERT_TRUE ( optim3_2_param_groups . size ( ) = = 1 ) ; <nl> + ASSERT_TRUE ( optim3_2_state . size ( ) = = 2 ) ; <nl> + <nl> + / / optim3_2 and optim1 should have param_groups and state of same size <nl> + ASSERT_TRUE ( optim3_2_param_groups . size ( ) = = optim3_param_groups . size ( ) ) ; <nl> + ASSERT_TRUE ( optim3_2_state . size ( ) = = optim3_state . size ( ) ) ; <nl> + <nl> + / / checking correctness of serialization logic for optimizer . param_groups_ and optimizer . state_ <nl> + for ( int i = 0 ; i < optim3_2_param_groups . size ( ) ; i + + ) { <nl> + is_optimizer_param_group_equal < DerivedOptimizerOptions > ( <nl> + optim3_2_param_groups [ i ] , optim3_param_groups [ i ] ) ; <nl> + is_optimizer_state_equal < DerivedOptimizerParamState > ( optim3_2_state , optim3_state ) ; <nl> + } <nl> + <nl> + step ( optim3_2 , model3 ) ; <nl> + param1 = model1 - > named_parameters ( ) ; <nl> + param2 = model2 - > named_parameters ( ) ; <nl> + param3 = model3 - > named_parameters ( ) ; <nl> + for ( const auto & p : param1 ) { <nl> + const auto & name = p . key ( ) ; <nl> + / / Model 1 and 3 should be the same <nl> + ASSERT_TRUE ( <nl> + param1 [ name ] . norm ( ) . item < float > ( ) = = param3 [ name ] . norm ( ) . item < float > ( ) ) ; <nl> + ASSERT_TRUE ( <nl> + param1 [ name ] . norm ( ) . item < float > ( ) ! = param2 [ name ] . norm ( ) . item < float > ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / Utility function to save a vector of buffers . <nl> + template < typename BufferContainer > <nl> + void write_tensors_to_archive ( <nl> + torch : : serialize : : OutputArchive & archive , <nl> + const std : : string & key , <nl> + const BufferContainer & buffers ) { <nl> + archive . write ( <nl> + key + " / size " , torch : : tensor ( static_cast < int64_t > ( buffers . size ( ) ) ) ) ; <nl> + for ( size_t index = 0 ; index < buffers . size ( ) ; + + index ) { <nl> + archive . write ( <nl> + key + " / " + c10 : : to_string ( index ) , buffers [ index ] , / * is_buffer = * / true ) ; <nl> + } <nl> + } <nl> + <nl> + / / Utility function to save a vector of step buffers . <nl> + void write_step_buffers ( <nl> + torch : : serialize : : OutputArchive & archive , <nl> + const std : : string & key , <nl> + const std : : vector < int64_t > & steps ) { <nl> + std : : vector < torch : : Tensor > tensors ; <nl> + tensors . reserve ( steps . size ( ) ) ; <nl> + for ( const auto & step : steps ) { <nl> + tensors . push_back ( torch : : tensor ( static_cast < int64_t > ( step ) ) ) ; <nl> + } <nl> + write_tensors_to_archive ( archive , key , tensors ) ; <nl> + } <nl> + <nl> + # define OLD_SERIALIZATION_LOGIC_WARNING_CHECK ( funcname , optimizer , filename ) \ <nl> + { \ <nl> + std : : stringstream buffer ; \ <nl> + CerrRedirect cerr_redirect ( buffer . rdbuf ( ) ) ; \ <nl> + funcname ( optimizer , filename ) ; \ <nl> + ASSERT_EQ ( \ <nl> + count_substr_occurrences ( \ <nl> + buffer . str ( ) , \ <nl> + " old serialization " \ <nl> + ) , \ <nl> + 1 ) ; \ <nl> + } <nl> + <nl> + TEST ( SerializeTest , KeysFunc ) { <nl> + auto tempfile = c10 : : make_tempfile ( ) ; <nl> + torch : : serialize : : OutputArchive output_archive ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + output_archive . write ( " element / " + c10 : : to_string ( i ) , c10 : : IValue ( static_cast < int64_t > ( i ) ) ) ; <nl> + } <nl> + output_archive . save_to ( tempfile . name ) ; <nl> + torch : : serialize : : InputArchive input_archive ; <nl> + input_archive . load_from ( tempfile . name ) ; <nl> + std : : vector < std : : string > keys = input_archive . keys ( ) ; <nl> + ASSERT_EQ ( keys . size ( ) , 3 ) ; <nl> + for ( size_t i = 0 ; i < keys . size ( ) ; i + + ) { <nl> + ASSERT_EQ ( keys [ i ] , " element / " + c10 : : to_string ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( SerializeTest , TryReadFunc ) { <nl> + auto tempfile = c10 : : make_tempfile ( ) ; <nl> + torch : : serialize : : OutputArchive output_archive ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + output_archive . write ( " element / " + c10 : : to_string ( i ) , c10 : : IValue ( static_cast < int64_t > ( i ) ) ) ; <nl> + } <nl> + output_archive . save_to ( tempfile . name ) ; <nl> + torch : : serialize : : InputArchive input_archive ; <nl> + input_archive . load_from ( tempfile . name ) ; <nl> + c10 : : IValue ivalue ; <nl> + ASSERT_FALSE ( input_archive . try_read ( " 1 " , ivalue ) ) ; <nl> + ASSERT_TRUE ( input_archive . try_read ( " element / 1 " , ivalue ) ) ; <nl> + ASSERT_EQ ( ivalue . toInt ( ) , 1 ) ; <nl> + } <nl> + <nl> TEST ( SerializeTest , Basic ) { <nl> torch : : manual_seed ( 0 ) ; <nl> <nl> TEST ( SerializeTest , Optim ) { <nl> } <nl> } <nl> <nl> + TEST ( SerializeTest , Optim_Adagrad ) { <nl> + test_serialize_optimizer < Adagrad , AdagradOptions , AdagradParamState > ( AdagradOptions ( 1e - 1 ) ) ; <nl> + <nl> + / / bc compatibility check <nl> + auto model1 = Linear ( 5 , 2 ) ; <nl> + auto optim1 = torch : : optim : : Adagrad ( <nl> + model1 - > parameters ( ) , torch : : optim : : AdagradOptions ( 1e - 1 ) ) ; <nl> + <nl> + auto x = torch : : ones ( { 10 , 5 } ) ; <nl> + auto step = [ & x ] ( torch : : optim : : Optimizer & optimizer , Linear model ) { <nl> + optimizer . zero_grad ( ) ; <nl> + auto y = model - > forward ( x ) . sum ( ) ; <nl> + y . backward ( ) ; <nl> + optimizer . step ( ) ; <nl> + } ; <nl> + step ( optim1 , model1 ) ; <nl> + auto optim1_2 = Adagrad ( model1 - > parameters ( ) , torch : : optim : : AdagradOptions ( 1e - 1 ) ) ; <nl> + <nl> + / / fill up with optim1 sum_buffers <nl> + std : : vector < torch : : Tensor > sum_buffers ; <nl> + / / fill up with optim1 state_buffers <nl> + std : : vector < int64_t > step_buffers ; <nl> + const auto & params_ = optim1 . param_groups ( ) [ 0 ] . params ( ) ; <nl> + const auto & optim1_state = optim1 . state ( ) ; <nl> + for ( size_t i = 0 ; i < params_ . size ( ) ; i + + ) { <nl> + auto key_ = c10 : : guts : : to_string ( params_ [ i ] . unsafeGetTensorImpl ( ) ) ; <nl> + const AdagradParamState & curr_state_ = static_cast < const AdagradParamState & > ( * ( optim1_state . at ( key_ ) . get ( ) ) ) ; <nl> + sum_buffers . emplace_back ( curr_state_ . sum ( ) ) ; <nl> + step_buffers . emplace_back ( curr_state_ . step ( ) ) ; <nl> + } <nl> + / / write sum_buffers and step_buffers to the file <nl> + auto optim_tempfile_old_format = c10 : : make_tempfile ( ) ; <nl> + torch : : serialize : : OutputArchive output_archive ; <nl> + write_tensors_to_archive ( output_archive , " sum_buffers " , sum_buffers ) ; <nl> + write_step_buffers ( output_archive , " step_buffers " , step_buffers ) ; <nl> + output_archive . save_to ( optim_tempfile_old_format . name ) ; <nl> + OLD_SERIALIZATION_LOGIC_WARNING_CHECK ( torch : : load , optim1_2 , optim_tempfile_old_format . name ) ; <nl> + is_optimizer_state_equal < AdagradParamState > ( optim1 . state ( ) , optim1_2 . state ( ) ) ; <nl> + } <nl> + <nl> TEST ( SerializeTest , SerializationShouldPreserveIteration_SGD ) { <nl> std : : vector < torch : : Tensor > parameters = { <nl> torch : : randn ( { 2 , 2 } ) , torch : : randn ( { 3 , 3 } ) } ; <nl> mmm a / torch / csrc / api / include / torch / optim / adagrad . h <nl> ppp b / torch / csrc / api / include / torch / optim / adagrad . h <nl> <nl> # include < torch / nn / pimpl . h > <nl> # include < torch / optim / optimizer . h > <nl> # include < torch / optim / serialize . h > <nl> + # include < torch / serialize / archive . h > <nl> # include < torch / types . h > <nl> <nl> # include < utility > <nl> class InputArchive ; <nl> namespace torch { <nl> namespace optim { <nl> <nl> - struct TORCH_API AdagradOptions { <nl> - AdagradOptions ( double learning_rate ) ; <nl> - TORCH_ARG ( double , learning_rate ) ; <nl> + struct TORCH_API AdagradOptions : public OptimizerCloneableOptions < AdagradOptions > { <nl> + AdagradOptions ( double lr = 1e - 2 ) ; <nl> + TORCH_ARG ( double , lr ) = 1e - 2 ; <nl> TORCH_ARG ( double , lr_decay ) = 0 ; <nl> TORCH_ARG ( double , weight_decay ) = 0 ; <nl> + TORCH_ARG ( double , initial_accumulator_value ) = 0 ; <nl> + TORCH_ARG ( double , eps ) = 1e - 10 ; <nl> + public : <nl> + void serialize ( torch : : serialize : : InputArchive & archive ) override ; <nl> + void serialize ( torch : : serialize : : OutputArchive & archive ) const override ; <nl> + TORCH_API friend bool operator = = ( const AdagradOptions & lhs , const AdagradOptions & rhs ) ; <nl> + ~ AdagradOptions ( ) = default ; <nl> + } ; <nl> + <nl> + struct TORCH_API AdagradParamState : public OptimizerCloneableParamState < AdagradParamState > { <nl> + TORCH_ARG ( torch : : Tensor , sum ) ; <nl> + TORCH_ARG ( int64_t , step ) ; <nl> + <nl> + public : <nl> + void serialize ( torch : : serialize : : InputArchive & archive ) override ; <nl> + void serialize ( torch : : serialize : : OutputArchive & archive ) const override ; <nl> + TORCH_API friend bool operator = = ( const AdagradParamState & lhs , const AdagradParamState & rhs ) ; <nl> + ~ AdagradParamState ( ) = default ; <nl> } ; <nl> <nl> class TORCH_API Adagrad : public Optimizer { <nl> public : <nl> - template < typename ParameterContainer > <nl> + explicit Adagrad ( std : : vector < OptimizerParamGroup > param_groups , <nl> + AdagradOptions defaults ) : Optimizer ( std : : move ( param_groups ) , std : : make_unique < AdagradOptions > ( defaults ) ) { <nl> + TORCH_CHECK ( defaults . lr ( ) > = 0 , " Invalid learning rate : " , defaults . lr ( ) ) ; <nl> + TORCH_CHECK ( defaults . lr_decay ( ) > = 0 , " Invalid lr_decay value : " , defaults . lr_decay ( ) ) ; <nl> + TORCH_CHECK ( defaults . weight_decay ( ) > = 0 , " Invalid weight_decay value : " , defaults . weight_decay ( ) ) ; <nl> + TORCH_CHECK ( defaults . initial_accumulator_value ( ) > = 0 , " Invalid initial_accumulator_value value : " , defaults . initial_accumulator_value ( ) ) ; <nl> + TORCH_CHECK ( defaults . eps ( ) > = 0 , " Invalid epsilon value : " , defaults . eps ( ) ) ; <nl> + <nl> + for ( const auto & group : param_groups_ ) { <nl> + for ( const auto & p : group . params ( ) ) { <nl> + auto state = std : : make_unique < AdagradParamState > ( ) ; <nl> + state - > step ( 0 ) ; <nl> + state - > sum ( torch : : full_like ( p . data ( ) , defaults . initial_accumulator_value ( ) , at : : MemoryFormat : : Preserve ) ) ; <nl> + state_ [ c10 : : guts : : to_string ( p . unsafeGetTensorImpl ( ) ) ] = std : : move ( state ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> explicit Adagrad ( <nl> - ParameterContainer & & parameters , <nl> - const AdagradOptions & options_ ) <nl> - : Optimizer ( std : : forward < ParameterContainer > ( parameters ) ) , <nl> - options ( options_ ) { } <nl> + std : : vector < Tensor > params , <nl> + AdagradOptions defaults ) : Adagrad ( { std : : move ( OptimizerParamGroup ( params ) ) } , defaults ) { } <nl> <nl> void step ( ) override ; <nl> <nl> - AdagradOptions options ; <nl> + / / / Adds the given vector of parameters to the optimizer ' s parameter list . <nl> + void add_parameters ( const std : : vector < Tensor > & parameters ) override ; <nl> + <nl> + / / / Provides a const reference to the parameters this optimizer holds . <nl> + const std : : vector < Tensor > & parameters ( ) const noexcept override ; <nl> + <nl> + / / / Provides a reference to the parameters this optimizer holds . <nl> + std : : vector < Tensor > & parameters ( ) noexcept override ; <nl> + <nl> + / / / Returns the number of parameters referenced by the optimizer . <nl> + size_t size ( ) const noexcept override ; <nl> <nl> void save ( serialize : : OutputArchive & archive ) const override ; <nl> void load ( serialize : : InputArchive & archive ) override ; <nl> <nl> - std : : vector < Tensor > sum_buffers ; <nl> - std : : vector < int64_t > step_buffers ; <nl> - <nl> private : <nl> - Adagrad ( ) : options ( 0 ) { } <nl> - <nl> template < typename Self , typename Archive > <nl> static void serialize ( Self & self , Archive & archive ) { <nl> - _TORCH_OPTIM_SERIALIZE ( sum_buffers ) ; <nl> - _TORCH_OPTIM_SERIALIZE ( step_buffers ) ; <nl> + _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG ( Adagrad ) ; <nl> } <nl> } ; <nl> } / / namespace optim <nl> mmm a / torch / csrc / api / include / torch / optim / optimizer . h <nl> ppp b / torch / csrc / api / include / torch / optim / optimizer . h <nl> <nl> # pragma once <nl> <nl> + # include < ATen / Tensor . h > <nl> + # include < c10 / util / flat_hash_map . h > <nl> + # include < c10 / util / Exception . h > <nl> + <nl> # include < torch / csrc / WindowsTorchApiMacro . h > <nl> <nl> # include < algorithm > <nl> class InputArchive ; <nl> <nl> namespace torch { <nl> namespace optim { <nl> + <nl> + class TORCH_API OptimizerParamState { <nl> + public : <nl> + virtual std : : unique_ptr < OptimizerParamState > clone ( ) const ; <nl> + virtual void serialize ( torch : : serialize : : InputArchive & archive ) ; <nl> + virtual void serialize ( torch : : serialize : : OutputArchive & archive ) const ; <nl> + virtual ~ OptimizerParamState ( ) = default ; <nl> + } ; <nl> + <nl> + template < typename Derived > <nl> + class TORCH_API OptimizerCloneableParamState : public OptimizerParamState { <nl> + std : : unique_ptr < OptimizerParamState > clone ( ) const override { <nl> + return std : : make_unique < Derived > ( static_cast < const Derived & > ( * this ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + class TORCH_API OptimizerOptions { <nl> + public : <nl> + virtual std : : unique_ptr < OptimizerOptions > clone ( ) const ; <nl> + virtual void serialize ( torch : : serialize : : InputArchive & archive ) ; <nl> + virtual void serialize ( torch : : serialize : : OutputArchive & archive ) const ; <nl> + virtual ~ OptimizerOptions ( ) = default ; <nl> + } ; <nl> + <nl> + template < typename Derived > <nl> + class TORCH_API OptimizerCloneableOptions : public OptimizerOptions { <nl> + std : : unique_ptr < OptimizerOptions > clone ( ) const override { <nl> + return std : : make_unique < Derived > ( static_cast < const Derived & > ( * this ) ) ; <nl> + } <nl> + } ; <nl> + <nl> + / / / Stores parameters in the param_group and stores a pointer to the OptimizerOptions <nl> + class TORCH_API OptimizerParamGroup { <nl> + public : <nl> + / / NOTE : In order to store ` OptimizerParamGroup ` in a ` std : : vector ` , it has to be copy - constructible . <nl> + OptimizerParamGroup ( const OptimizerParamGroup & param_group ) : params_ ( param_group . params ( ) ) , options_ ( param_group . has_options ( ) ? param_group . options ( ) . clone ( ) : nullptr ) { } <nl> + OptimizerParamGroup ( std : : vector < Tensor > params ) : params_ ( std : : move ( params ) ) { } <nl> + OptimizerParamGroup ( std : : vector < Tensor > params , std : : unique_ptr < OptimizerOptions > options ) : params_ ( std : : move ( params ) ) , options_ ( std : : move ( options ) ) { } <nl> + <nl> + bool has_options ( ) const ; <nl> + OptimizerOptions & options ( ) ; <nl> + const OptimizerOptions & options ( ) const ; <nl> + void set_options ( std : : unique_ptr < OptimizerOptions > options ) ; <nl> + std : : vector < Tensor > & params ( ) ; <nl> + const std : : vector < Tensor > & params ( ) const ; <nl> + <nl> + protected : <nl> + std : : vector < Tensor > params_ ; <nl> + std : : unique_ptr < OptimizerOptions > options_ ; <nl> + } ; <nl> + <nl> namespace detail { <nl> + <nl> / / / Base class for all optimizers , that does not yet define a ` step ( ) ` <nl> / / / mechanism . All it specifies is that optimizers must be supplied with a <nl> / / / vector of parameters . It also defines certain methods that all optimizers <nl> / / / shall have , such as ` zero_grad ` . <nl> class TORCH_API OptimizerBase { <nl> public : <nl> + / / The copy constructor is deleted , because the user should use the <nl> + / / ` state_dict ` / ` load_state_dict ` API to copy an optimizer instead . <nl> + OptimizerBase ( const OptimizerBase & optimizer_base ) = delete ; <nl> + OptimizerBase ( OptimizerBase & & optimizer_base ) = default ; <nl> + <nl> / / / Constructs the ` Optimizer ` from a vector of parameters . <nl> explicit OptimizerBase ( std : : vector < Tensor > parameters ) ; <nl> <nl> + explicit OptimizerBase ( std : : vector < OptimizerParamGroup > param_groups , std : : unique_ptr < OptimizerOptions > defaults ) : defaults_ ( std : : move ( defaults ) ) { <nl> + for ( const auto & param_group : param_groups ) { <nl> + add_param_group ( param_group ) ; <nl> + } <nl> + } <nl> + <nl> + / / / Adds the given param_group to the optimizer ' s param_group list . <nl> + void add_param_group ( const OptimizerParamGroup & param_group ) ; <nl> + <nl> virtual ~ OptimizerBase ( ) = default ; <nl> <nl> + / / TODO : when all optimizers use the new design , we can devirtualize some of the following methods <nl> + / / such as add_parameters ( ) / parameters ( ) / size ( ) <nl> + <nl> / / / Adds the given vector of parameters to the optimizer ' s parameter list . <nl> - void add_parameters ( const std : : vector < Tensor > & parameters ) ; <nl> + virtual void add_parameters ( const std : : vector < Tensor > & parameters ) ; <nl> <nl> / / / Zeros out the gradients of all parameters . <nl> virtual void zero_grad ( ) ; <nl> <nl> / / / Provides a const reference to the parameters this optimizer holds . <nl> - const std : : vector < Tensor > & parameters ( ) const noexcept ; <nl> + virtual const std : : vector < Tensor > & parameters ( ) const noexcept ; <nl> <nl> / / / Provides a reference to the parameters this optimizer holds . <nl> - std : : vector < Tensor > & parameters ( ) noexcept ; <nl> + virtual std : : vector < Tensor > & parameters ( ) noexcept ; <nl> <nl> / / / Returns the number of parameters referenced by the optimizer . <nl> - size_t size ( ) const noexcept ; <nl> + virtual size_t size ( ) const noexcept ; <nl> + <nl> + OptimizerOptions & defaults ( ) noexcept ; <nl> + <nl> + const OptimizerOptions & defaults ( ) const noexcept ; <nl> + <nl> + / / / Provides a reference to the param_groups this optimizer holds . <nl> + std : : vector < OptimizerParamGroup > & param_groups ( ) noexcept ; <nl> + <nl> + / / / Provides a const reference to the param_groups this optimizer holds . <nl> + const std : : vector < OptimizerParamGroup > & param_groups ( ) const noexcept ; <nl> + <nl> + / / / Provides a reference to the state this optimizer holds <nl> + ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & state ( ) noexcept ; <nl> + <nl> + / / / Provides a const reference to the state this optimizer holds <nl> + const ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & state ( ) const noexcept ; <nl> <nl> / / / Serializes the optimizer state into the given ` archive ` . <nl> virtual void save ( serialize : : OutputArchive & archive ) const ; <nl> class TORCH_API OptimizerBase { <nl> virtual void load ( serialize : : InputArchive & archive ) ; <nl> <nl> protected : <nl> - OptimizerBase ( ) = default ; <nl> + std : : vector < OptimizerParamGroup > param_groups_ ; <nl> + ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > state_ ; <nl> + std : : unique_ptr < OptimizerOptions > defaults_ ; <nl> + OptimizerBase ( ) = default ; <nl> <nl> / / / Accesses a buffer at the given index . <nl> / / / Additionally , zeros out the buffers when this is called on the index <nl> mmm a / torch / csrc / api / include / torch / optim / serialize . h <nl> ppp b / torch / csrc / api / include / torch / optim / serialize . h <nl> <nl> <nl> # include < torch / serialize / archive . h > <nl> # include < torch / types . h > <nl> - <nl> + # include < torch / optim / optimizer . h > <nl> # include < cstddef > <nl> # include < cstdint > <nl> # include < deque > <nl> <nl> <nl> namespace torch { <nl> namespace optim { <nl> + namespace detail { <nl> + / / Utility function to save state <nl> + template < typename DerivedOptimizerParamState > <nl> + void serialize ( <nl> + serialize : : OutputArchive & archive , <nl> + const ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & state ) { <nl> + for ( const auto & item : state ) { <nl> + serialize : : OutputArchive param_state_archive ( archive . compilation_unit ( ) ) ; <nl> + std : : string tensorimpl_key = item . first ; <nl> + const DerivedOptimizerParamState & curr_state = static_cast < const DerivedOptimizerParamState & > ( * ( item . second . get ( ) ) ) ; <nl> + curr_state . serialize ( param_state_archive ) ; <nl> + archive . write ( tensorimpl_key , param_state_archive ) ; <nl> + } <nl> + } <nl> + <nl> + / / Utility function to load state <nl> + template < typename DerivedOptimizerParamState > <nl> + void serialize ( <nl> + serialize : : InputArchive & archive , <nl> + ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & state ) { <nl> + std : : vector < std : : string > tensorimpl_keys = archive . keys ( ) ; <nl> + for ( const std : : string & tensorimpl_key : tensorimpl_keys ) { <nl> + serialize : : InputArchive param_state_archive ; <nl> + archive . read ( tensorimpl_key , param_state_archive ) ; <nl> + DerivedOptimizerParamState param_state ; <nl> + param_state . serialize ( param_state_archive ) ; <nl> + state [ tensorimpl_key ] = std : : make_unique < DerivedOptimizerParamState > ( param_state ) ; <nl> + } <nl> + } <nl> + <nl> + / / Utility function to save param_groups <nl> + template < typename DerivedOptimizerParamOptions > <nl> + void serialize ( <nl> + serialize : : OutputArchive & archive , <nl> + const std : : vector < OptimizerParamGroup > & param_groups ) { <nl> + archive . write ( " param_groups / size " , torch : : tensor ( static_cast < int64_t > ( param_groups . size ( ) ) ) ) ; <nl> + for ( size_t i = 0 ; i < param_groups . size ( ) ; i + + ) { <nl> + serialize : : OutputArchive param_group_archive ( archive . compilation_unit ( ) ) ; <nl> + std : : vector < Tensor > params = param_groups [ i ] . params ( ) ; <nl> + param_group_archive . write ( <nl> + " params / size " , torch : : tensor ( static_cast < int64_t > ( params . size ( ) ) ) ) ; <nl> + for ( size_t index = 0 ; index < params . size ( ) ; index + + ) { <nl> + param_group_archive . write ( <nl> + " params / " + c10 : : guts : : to_string ( index ) , IValue ( c10 : : guts : : to_string ( params [ index ] . unsafeGetTensorImpl ( ) ) ) ) ; <nl> + } <nl> + const DerivedOptimizerParamOptions & param_group_options = static_cast < const DerivedOptimizerParamOptions & > ( param_groups [ i ] . options ( ) ) ; <nl> + serialize : : OutputArchive param_group_options_archive ( param_group_archive . compilation_unit ( ) ) ; <nl> + param_group_options . serialize ( param_group_options_archive ) ; <nl> + param_group_archive . write ( " options " , param_group_options_archive ) ; <nl> + archive . write ( " param_groups / " + c10 : : guts : : to_string ( i ) , param_group_archive ) ; <nl> + } <nl> + } <nl> + <nl> + / / Utility function to load param_groups <nl> + / / We take as input vector of pair of string and unique_ptr to optimizer options so that we can retain the state <nl> + / / for each param by using the old tensor impl keys ( saved during serialization ) and map the new tensor impl keys to <nl> + / / the correct state for each param <nl> + template < typename DerivedOptimizerParamOptions > <nl> + void serialize ( <nl> + serialize : : InputArchive & archive , <nl> + std : : vector < std : : pair < std : : vector < std : : string > , std : : unique_ptr < OptimizerOptions > > > & param_groups ) { <nl> + torch : : Tensor param_groups_size_tensor ; <nl> + archive . read ( " param_groups / size " , param_groups_size_tensor ) ; <nl> + const int64_t param_groups_size = param_groups_size_tensor . item < int64_t > ( ) ; <nl> + for ( int64_t i = 0 ; i < param_groups_size ; i + + ) { <nl> + serialize : : InputArchive param_group_archive ; <nl> + archive . read ( " param_groups / " + c10 : : guts : : to_string ( i ) , param_group_archive ) ; <nl> + torch : : Tensor size_tensor ; <nl> + param_group_archive . read ( " params / size " , size_tensor ) ; <nl> + const int64_t size = size_tensor . item < int64_t > ( ) ; <nl> + std : : vector < std : : string > params ; <nl> + for ( int64_t index = 0 ; index < size ; + + index ) { <nl> + IValue ivalue ; <nl> + param_group_archive . read ( <nl> + " params / " + c10 : : to_string ( index ) , ivalue ) ; <nl> + std : : string element = ivalue . toStringRef ( ) ; <nl> + params . emplace_back ( element ) ; <nl> + } <nl> + serialize : : InputArchive param_group_options_archive ; <nl> + param_group_archive . read ( " options " , param_group_options_archive ) ; <nl> + DerivedOptimizerParamOptions param_group_options ; <nl> + param_group_options . serialize ( param_group_options_archive ) ; <nl> + param_groups . emplace_back ( std : : make_pair ( params , std : : make_unique < DerivedOptimizerParamOptions > ( param_group_options ) ) ) ; <nl> + } <nl> + } <nl> + } / / namespace detail <nl> + <nl> <nl> / / Note : These functions are all called ` serialize ( ) ` so they can be called <nl> / / inside a template where the archive type is a template type and can thus be <nl> void serialize ( <nl> const std : : string & key , <nl> std : : vector < int64_t > & steps ) ; <nl> <nl> + / / Utility function to save state and param_groups <nl> + template < typename DerivedOptimizerParamState , typename DerivedOptimizerParamOptions > <nl> + void serialize ( <nl> + serialize : : OutputArchive & archive , <nl> + const detail : : OptimizerBase & optimizer ) { <nl> + archive . write ( " pytorch_version " , IValue ( " 1 . 5 . 0 " ) ) ; <nl> + serialize : : OutputArchive state_archive ( archive . compilation_unit ( ) ) ; <nl> + detail : : serialize < DerivedOptimizerParamState > ( state_archive , optimizer . state ( ) ) ; <nl> + archive . write ( " state " , state_archive ) ; <nl> + <nl> + serialize : : OutputArchive param_groups_archive ( archive . compilation_unit ( ) ) ; <nl> + detail : : serialize < DerivedOptimizerParamOptions > ( param_groups_archive , optimizer . param_groups ( ) ) ; <nl> + archive . write ( " param_groups " , param_groups_archive ) ; <nl> + } <nl> + <nl> + / / Utility function to load state and param_groups and update state <nl> + template < typename DerivedOptimizerParamState , typename DerivedOptimizerParamOptions > <nl> + void serialize ( <nl> + serialize : : InputArchive & archive , <nl> + detail : : OptimizerBase & optimizer ) { <nl> + <nl> + IValue pytorch_version ; <nl> + archive . read ( " pytorch_version " , pytorch_version ) ; <nl> + TORCH_INTERNAL_ASSERT ( pytorch_version . toStringRef ( ) = = " 1 . 5 . 0 " ) ; <nl> + serialize : : InputArchive state_archive ; <nl> + archive . read ( " state " , state_archive ) ; <nl> + ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > saved_state ; <nl> + detail : : serialize < DerivedOptimizerParamState > ( state_archive , saved_state ) ; <nl> + <nl> + serialize : : InputArchive param_groups_archive ; <nl> + archive . read ( " param_groups " , param_groups_archive ) ; <nl> + std : : vector < std : : pair < std : : vector < std : : string > , std : : unique_ptr < OptimizerOptions > > > saved_param_groups ; <nl> + detail : : serialize < DerivedOptimizerParamOptions > ( param_groups_archive , saved_param_groups ) ; <nl> + <nl> + / / update state <nl> + TORCH_CHECK ( saved_param_groups . size ( ) = = optimizer . param_groups ( ) . size ( ) , " loaded state dict has a different number of parameter groups " ) ; <nl> + for ( size_t i = 0 ; i < saved_param_groups . size ( ) ; i + + ) { <nl> + std : : vector < std : : string > saved_group_keys = saved_param_groups [ i ] . first ; <nl> + std : : vector < Tensor > params = optimizer . param_groups ( ) [ i ] . params ( ) ; <nl> + TORCH_CHECK ( saved_group_keys . size ( ) = = params . size ( ) , " loaded state dict contains a parameter group that has a different size than the optimizer ' s parameter group " ) ; <nl> + for ( size_t idx = 0 ; idx < params . size ( ) ; idx + + ) { <nl> + optimizer . state ( ) [ c10 : : guts : : to_string ( params [ idx ] . unsafeGetTensorImpl ( ) ) ] = std : : move ( saved_state [ saved_group_keys [ idx ] ] ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> / / / Utility function to save a vector of buffers . <nl> template < typename BufferContainer > <nl> void serialize ( <nl> void serialize ( <nl> # define _TORCH_OPTIM_SERIALIZE ( name ) \ <nl> torch : : optim : : serialize ( archive , # name , self . name ) <nl> <nl> + # define _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG ( OptimizerName ) \ <nl> + torch : : optim : : serialize < OptimizerName # # ParamState , OptimizerName # # Options > ( archive , self ) <nl> + <nl> + # define _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( name ) \ <nl> + archive . write ( # name , IValue ( name ( ) ) ) <nl> + <nl> + # define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( T , name ) \ <nl> + { \ <nl> + c10 : : IValue ivalue ; \ <nl> + archive . read ( # name , ivalue ) ; \ <nl> + name ( ivalue . to < T > ( ) ) ; \ <nl> + } <nl> } / / namespace optim <nl> } / / namespace torch <nl> mmm a / torch / csrc / api / include / torch / serialize / input - archive . h <nl> ppp b / torch / csrc / api / include / torch / serialize / input - archive . h <nl> class TORCH_API InputArchive final { <nl> / / / Reads an ` IValue ` associated with a given ` key ` . <nl> void read ( const std : : string & key , c10 : : IValue & ivalue ) ; <nl> <nl> + / / / Reads an ` IValue ` associated with a given ` key ` . If there is no ` IValue ` <nl> + / / / associated with the ` key ` , this returns false , otherwise it returns true . <nl> + bool try_read ( const std : : string & key , c10 : : IValue & ivalue ) ; <nl> + <nl> / / / Reads a ` tensor ` associated with a given ` key ` . If there is no ` tensor ` <nl> / / / associated with the ` key ` , this returns false , otherwise it returns true . <nl> / / / If the tensor is expected to be a buffer ( not differentiable ) , ` is_buffer ` <nl> class TORCH_API InputArchive final { <nl> const std : : function < size_t ( void ) > & size_func , <nl> c10 : : optional < torch : : Device > device = c10 : : nullopt ) ; <nl> <nl> + / / Returns the vector of keys in the input archive . <nl> + std : : vector < std : : string > keys ( ) ; <nl> + <nl> / / / Forwards all arguments to ` read ( ) ` . <nl> / / / Useful for generic code that can be re - used for both ` InputArchive ` and <nl> / / / ` OutputArchive ` ( where ` operator ( ) ` forwards to ` write ( ) ` ) . <nl> mmm a / torch / csrc / api / src / optim / adagrad . cpp <nl> ppp b / torch / csrc / api / src / optim / adagrad . cpp <nl> <nl> # include < torch / csrc / autograd / variable . h > <nl> # include < torch / serialize / archive . h > <nl> # include < torch / utils . h > <nl> + # include < torch / optim / serialize . h > <nl> <nl> # include < ATen / ATen . h > <nl> <nl> <nl> namespace torch { <nl> namespace optim { <nl> <nl> - AdagradOptions : : AdagradOptions ( double learning_rate ) <nl> - : learning_rate_ ( learning_rate ) { } <nl> + AdagradOptions : : AdagradOptions ( double lr ) : lr_ ( lr ) { } <nl> + <nl> + bool operator = = ( const AdagradOptions & lhs , const AdagradOptions & rhs ) { <nl> + return ( lhs . lr ( ) = = rhs . lr ( ) ) & & <nl> + ( lhs . lr_decay ( ) = = rhs . lr_decay ( ) ) & & <nl> + ( lhs . weight_decay ( ) = = rhs . weight_decay ( ) ) & & <nl> + ( lhs . initial_accumulator_value ( ) = = rhs . initial_accumulator_value ( ) ) & & <nl> + ( lhs . eps ( ) = = rhs . eps ( ) ) ; <nl> + } <nl> + <nl> + void AdagradOptions : : serialize ( torch : : serialize : : OutputArchive & archive ) const { <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( lr ) ; <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( lr_decay ) ; <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( weight_decay ) ; <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( initial_accumulator_value ) ; <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( eps ) ; <nl> + } <nl> + <nl> + void AdagradOptions : : serialize ( torch : : serialize : : InputArchive & archive ) { <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( double , lr ) ; <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( double , lr_decay ) ; <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( double , weight_decay ) ; <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( double , initial_accumulator_value ) ; <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( double , eps ) ; <nl> + } <nl> + <nl> + bool operator = = ( const AdagradParamState & lhs , const AdagradParamState & rhs ) { <nl> + return ( lhs . step ( ) = = rhs . step ( ) ) & & <nl> + torch : : equal ( lhs . sum ( ) , rhs . sum ( ) ) ; <nl> + } <nl> + <nl> + void AdagradParamState : : serialize ( torch : : serialize : : OutputArchive & archive ) const { <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( step ) ; <nl> + _TORCH_OPTIM_SERIALIZE_TORCH_ARG ( sum ) ; <nl> + } <nl> + <nl> + void AdagradParamState : : serialize ( torch : : serialize : : InputArchive & archive ) { <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( int64_t , step ) ; <nl> + _TORCH_OPTIM_DESERIALIZE_TORCH_ARG ( Tensor , sum ) ; <nl> + } <nl> <nl> / / / Adapted from <nl> / / / https : / / github . com / pytorch / pytorch / blob / master / torch / optim / adagrad . py <nl> void Adagrad : : step ( ) { <nl> - for ( size_t i = 0 ; i < parameters_ . size ( ) ; + + i ) { <nl> - Tensor p = parameters_ . at ( i ) ; <nl> - if ( ! p . grad ( ) . defined ( ) ) { <nl> - continue ; <nl> - } <nl> + for ( auto & group : param_groups_ ) { <nl> + for ( auto & p : group . params ( ) ) { <nl> + if ( ! p . grad ( ) . defined ( ) ) { <nl> + continue ; <nl> + } <nl> + auto grad = p . grad ( ) . data ( ) ; <nl> + TORCH_INTERNAL_ASSERT ( state_ [ c10 : : guts : : to_string ( p . unsafeGetTensorImpl ( ) ) ] ! = nullptr , " state found NULL for the Tensor " , p ) ; <nl> + auto & state = static_cast < AdagradParamState & > ( * state_ [ c10 : : guts : : to_string ( p . unsafeGetTensorImpl ( ) ) ] ) ; <nl> + auto & options = static_cast < AdagradOptions & > ( group . options ( ) ) ; <nl> + <nl> + state . step ( state . step ( ) + 1 ) ; <nl> <nl> - if ( options . weight_decay ( ) > 0 ) { <nl> - NoGradGuard guard ; <nl> - p . grad ( ) = p . grad ( ) + options . weight_decay ( ) * p ; <nl> + if ( options . weight_decay ( ) ! = 0 ) { <nl> + TORCH_CHECK ( ! p . grad ( ) . data ( ) . is_sparse ( ) , " weight_decay option is not compatible with sparse gradients " ) ; <nl> + grad = grad . add ( p . data ( ) , options . weight_decay ( ) ) ; <nl> + } <nl> + const auto clr = options . lr ( ) / <nl> + ( 1 + static_cast < double > ( state . step ( ) - 1 ) * options . lr_decay ( ) ) ; <nl> + <nl> + if ( grad . is_sparse ( ) ) { <nl> + grad = grad . coalesce ( ) ; <nl> + auto grad_indices = grad . _indices ( ) ; <nl> + auto grad_values = grad . _values ( ) ; <nl> + auto size = grad . sizes ( ) ; <nl> + <nl> + auto make_sparse = [ & ] ( const Tensor & values ) - > Tensor { <nl> + if ( grad_indices . dim ( ) = = 0 | | values . dim ( ) = = 0 ) { <nl> + return torch : : empty ( { 0 } , grad . options ( ) ) . resize_as_ ( grad ) ; <nl> + } <nl> + return torch : : sparse_coo_tensor ( grad_indices , values , size , grad . options ( ) ) ; <nl> + } ; <nl> + state . sum ( state . sum ( ) . add_ ( make_sparse ( grad_values . pow ( 2 ) ) ) ) ; <nl> + auto std = state . sum ( ) . sparse_mask ( grad ) ; <nl> + const auto std_values = std . _values ( ) . sqrt_ ( ) . add_ ( options . eps ( ) ) ; <nl> + <nl> + p . data ( ) . add_ ( make_sparse ( grad_values / std_values ) , - clr ) ; <nl> + } <nl> + else { <nl> + state . sum ( state . sum ( ) . addcmul_ ( grad , grad , 1 . 0 ) ) ; <nl> + const auto std = state . sum ( ) . sqrt ( ) . add_ ( options . eps ( ) ) ; <nl> + p . data ( ) . addcdiv_ ( grad , std , - clr ) ; <nl> + } <nl> } <nl> + } <nl> + } <nl> <nl> - buffer_at ( step_buffers , i ) + = 1 . 0 ; <nl> - const auto clr = options . learning_rate ( ) / <nl> - ( 1 . 0 + ( buffer_at ( step_buffers , i ) - 1 . 0 ) * options . lr_decay ( ) ) ; <nl> + void Adagrad : : add_parameters ( const std : : vector < Tensor > & parameters ) { <nl> + param_groups_ . emplace_back ( OptimizerParamGroup ( parameters , defaults_ - > clone ( ) ) ) ; <nl> + } <nl> <nl> - auto & sum = buffer_at ( sum_buffers , i ) ; <nl> - sum . addcmul_ ( p . grad ( ) , p . grad ( ) , 1 . 0 ) ; <nl> - const auto std = buffer_at ( sum_buffers , i ) . sqrt ( ) . add_ ( 1e - 10 ) ; <nl> + const std : : vector < Tensor > & Adagrad : : parameters ( ) const noexcept { <nl> + return param_groups_ . at ( 0 ) . params ( ) ; <nl> + } <nl> <nl> - NoGradGuard guard ; <nl> - p . addcdiv_ ( p . grad ( ) , std , - clr ) ; <nl> + std : : vector < Tensor > & Adagrad : : parameters ( ) noexcept { <nl> + return param_groups_ . at ( 0 ) . params ( ) ; <nl> + } <nl> + <nl> + size_t Adagrad : : size ( ) const noexcept { <nl> + size_t count = 0 ; <nl> + for ( const auto & group : param_groups_ ) { <nl> + count + = group . params ( ) . size ( ) ; <nl> } <nl> + return count ; <nl> } <nl> <nl> void Adagrad : : save ( serialize : : OutputArchive & archive ) const { <nl> void Adagrad : : save ( serialize : : OutputArchive & archive ) const { <nl> } <nl> <nl> void Adagrad : : load ( serialize : : InputArchive & archive ) { <nl> - serialize ( * this , archive ) ; <nl> + IValue pytorch_version ; <nl> + if ( archive . try_read ( " pytorch_version " , pytorch_version ) ) { <nl> + serialize ( * this , archive ) ; <nl> + } <nl> + else { / / deserializing archives saved in old format ( prior to version 1 . 5 . 0 ) <nl> + TORCH_WARN ( <nl> + " Your serialized Adagrad optimizer is still using the old serialization format . " <nl> + " You should re - save your Adagrad optimizer to use the new serialization format . " ) ; <nl> + std : : vector < Tensor > sum_buffers ; <nl> + std : : vector < int64_t > step_buffers ; <nl> + torch : : optim : : serialize ( archive , " sum_buffers " , sum_buffers ) ; <nl> + torch : : optim : : serialize ( archive , " step_buffers " , step_buffers ) ; <nl> + / / since there were no param_groups prior to version 1 . 5 . 0 , assuming all tensors are now in one param_group <nl> + std : : vector < Tensor > params = param_groups_ . at ( 0 ) . params ( ) ; <nl> + for ( size_t idx = 0 ; idx < params . size ( ) ; idx + + ) { <nl> + auto state = std : : make_unique < AdagradParamState > ( ) ; <nl> + state - > step ( step_buffers [ idx ] ) ; <nl> + state - > sum ( sum_buffers [ idx ] ) ; <nl> + state_ [ c10 : : guts : : to_string ( params [ idx ] . unsafeGetTensorImpl ( ) ) ] = std : : move ( state ) ; <nl> + } <nl> + } <nl> } <nl> } / / namespace optim <nl> } / / namespace torch <nl> mmm a / torch / csrc / api / src / optim / optimizer . cpp <nl> ppp b / torch / csrc / api / src / optim / optimizer . cpp <nl> <nl> <nl> # include < torch / csrc / autograd / generated / variable_factories . h > <nl> # include < torch / ordered_dict . h > <nl> - # include < torch / serialize / archive . h > <nl> # include < torch / types . h > <nl> <nl> # include < string > <nl> <nl> <nl> namespace torch { <nl> namespace optim { <nl> + <nl> + bool OptimizerParamGroup : : has_options ( ) const { <nl> + return options_ ! = nullptr ; <nl> + } <nl> + <nl> + OptimizerOptions & OptimizerParamGroup : : options ( ) { <nl> + TORCH_CHECK ( has_options ( ) ) ; <nl> + return * options_ . get ( ) ; <nl> + } <nl> + <nl> + const OptimizerOptions & OptimizerParamGroup : : options ( ) const { <nl> + TORCH_CHECK ( has_options ( ) ) ; <nl> + return * options_ . get ( ) ; <nl> + } <nl> + <nl> + void OptimizerParamGroup : : set_options ( std : : unique_ptr < OptimizerOptions > options ) { <nl> + options_ = std : : move ( options ) ; <nl> + } <nl> + <nl> + std : : vector < Tensor > & OptimizerParamGroup : : params ( ) { <nl> + return params_ ; <nl> + } <nl> + <nl> + const std : : vector < Tensor > & OptimizerParamGroup : : params ( ) const { <nl> + return params_ ; <nl> + } <nl> + <nl> + std : : unique_ptr < OptimizerParamState > OptimizerParamState : : clone ( ) const { <nl> + TORCH_CHECK ( false , <nl> + " clone ( ) has not been implemented for torch : : optim : : OptimizerParamState . " , <nl> + " Subclass torch : : optim : : OptimizerCloneableParamState < YourOptimizerParamState > " , <nl> + " instead of torch : : optim : : OptimizerParamState to inherit the ability to clone . " ) ; <nl> + } <nl> + <nl> + void OptimizerParamState : : serialize ( torch : : serialize : : InputArchive & archive ) { <nl> + TORCH_CHECK ( false , <nl> + " void serialize ( torch : : serialize : : InputArchive & archive ) has not been implemented for torch : : optim : : OptimizerParamState . " , <nl> + " You must override it in your subclass of torch : : optim : : OptimizerCloneableParamState < YourOptimizerParamState > . " ) ; <nl> + } <nl> + <nl> + void OptimizerParamState : : serialize ( torch : : serialize : : OutputArchive & archive ) const { <nl> + TORCH_CHECK ( false , <nl> + " void serialize ( torch : : serialize : : OutputArchive & archive ) has not been implemented for torch : : optim : : OptimizerParamState . " , <nl> + " You must override it in your subclass of torch : : optim : : OptimizerCloneableParamState < YourOptimizerParamState > . " ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < OptimizerOptions > OptimizerOptions : : clone ( ) const { <nl> + TORCH_CHECK ( false , <nl> + " clone ( ) has not been implemented for torch : : optim : : OptimizerOptions . " , <nl> + " Subclass torch : : optim : : OptimizerCloneableOptions < YourOptimizerOptions > " , <nl> + " instead of torch : : optim : : OptimizerOptions to inherit the ability to clone . " ) ; <nl> + } <nl> + <nl> + void OptimizerOptions : : serialize ( torch : : serialize : : InputArchive & archive ) { <nl> + TORCH_CHECK ( false , <nl> + " void serialize ( torch : : serialize : : InputArchive & archive ) has not been implemented for torch : : optim : : OptimizerOptions . " , <nl> + " You must override it in your subclass of torch : : optim : : OptimizerCloneableOptions < YourOptimizerOptions > . " ) ; <nl> + } <nl> + <nl> + void OptimizerOptions : : serialize ( torch : : serialize : : OutputArchive & archive ) const { <nl> + TORCH_CHECK ( false , <nl> + " void serialize ( torch : : serialize : : OutputArchive & archive ) has not been implemented for torch : : optim : : OptimizerOptions . " , <nl> + " You must override it in your subclass of torch : : optim : : OptimizerCloneableOptions < YourOptimizerOptions > . " ) ; <nl> + } <nl> + <nl> namespace detail { <nl> OptimizerBase : : OptimizerBase ( std : : vector < Tensor > parameters ) <nl> : parameters_ ( std : : move ( parameters ) ) { } <nl> <nl> + void OptimizerBase : : add_param_group ( const OptimizerParamGroup & param_group ) { <nl> + for ( const auto & param : param_group . params ( ) ) { <nl> + TORCH_CHECK ( param . is_leaf ( ) , " can ' t optimize a non - leaf Tensor " ) ; <nl> + } <nl> + OptimizerParamGroup param_group_ ( param_group . params ( ) ) ; <nl> + if ( ! param_group . has_options ( ) ) { <nl> + param_group_ . set_options ( defaults_ - > clone ( ) ) ; <nl> + } else { <nl> + param_group_ . set_options ( param_group . options ( ) . clone ( ) ) ; <nl> + } <nl> + for ( const auto & p : param_group_ . params ( ) ) { <nl> + TORCH_CHECK ( state_ . count ( c10 : : guts : : to_string ( p . unsafeGetTensorImpl ( ) ) ) = = 0 , <nl> + " some parameters appear in more than one parameter group " ) ; <nl> + } <nl> + param_groups_ . emplace_back ( std : : move ( param_group_ ) ) ; <nl> + } <nl> + <nl> void OptimizerBase : : add_parameters ( const std : : vector < Tensor > & parameters ) { <nl> parameters_ . insert ( parameters_ . end ( ) , parameters . begin ( ) , parameters . end ( ) ) ; <nl> } <nl> void OptimizerBase : : zero_grad ( ) { <nl> parameter . grad ( ) . zero_ ( ) ; <nl> } <nl> } <nl> + for ( auto & group : param_groups_ ) { <nl> + for ( auto & p : group . params ( ) ) { <nl> + if ( p . grad ( ) . defined ( ) ) { <nl> + p . grad ( ) . detach_ ( ) ; <nl> + p . grad ( ) . zero_ ( ) ; <nl> + } <nl> + } <nl> + } <nl> } <nl> <nl> const std : : vector < Tensor > & OptimizerBase : : parameters ( ) const noexcept { <nl> size_t OptimizerBase : : size ( ) const noexcept { <nl> return parameters_ . size ( ) ; <nl> } <nl> <nl> + OptimizerOptions & OptimizerBase : : defaults ( ) noexcept { <nl> + return * defaults_ . get ( ) ; <nl> + } <nl> + <nl> + const OptimizerOptions & OptimizerBase : : defaults ( ) const noexcept { <nl> + return * defaults_ . get ( ) ; <nl> + } <nl> + <nl> + std : : vector < OptimizerParamGroup > & OptimizerBase : : param_groups ( ) noexcept { <nl> + return param_groups_ ; <nl> + } <nl> + <nl> + const std : : vector < OptimizerParamGroup > & OptimizerBase : : param_groups ( ) const noexcept { <nl> + return param_groups_ ; <nl> + } <nl> + <nl> + ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & OptimizerBase : : state ( ) noexcept { <nl> + return state_ ; <nl> + } <nl> + <nl> + const ska : : flat_hash_map < std : : string , std : : unique_ptr < OptimizerParamState > > & OptimizerBase : : state ( ) const noexcept { <nl> + return state_ ; <nl> + } <nl> + <nl> Tensor & OptimizerBase : : buffer_at ( std : : vector < Tensor > & buffers , size_t index ) { <nl> if ( buffers . size ( ) < = index ) { <nl> buffers . reserve ( index ) ; <nl> for ( auto i = buffers . size ( ) ; i < = index ; + + i ) { <nl> - buffers . push_back ( torch : : zeros_like ( parameters_ . at ( i ) ) ) ; <nl> + buffers . emplace_back ( torch : : zeros_like ( parameters_ . at ( i ) ) ) ; <nl> } <nl> } <nl> / / Copy the buffer to the device and dtype of the parameter . <nl> mmm a / torch / csrc / api / src / serialize / input - archive . cpp <nl> ppp b / torch / csrc / api / src / serialize / input - archive . cpp <nl> void InputArchive : : read ( const std : : string & key , c10 : : IValue & ivalue ) { <nl> ivalue = module_ . attr ( key ) ; <nl> } <nl> <nl> + bool InputArchive : : try_read ( <nl> + const std : : string & key , <nl> + c10 : : IValue & ivalue ) { <nl> + if ( ! module_ . hasattr ( key ) ) { <nl> + return false ; <nl> + } <nl> + ivalue = module_ . attr ( key ) ; <nl> + return true ; <nl> + } <nl> + <nl> bool InputArchive : : try_read ( <nl> const std : : string & key , <nl> Tensor & tensor , <nl> void InputArchive : : load_from ( <nl> module_ = torch : : jit : : load ( std : : move ( adapter ) , std : : move ( device ) ) ; <nl> } <nl> <nl> + std : : vector < std : : string > InputArchive : : keys ( ) { <nl> + std : : vector < std : : string > all_keys ; <nl> + all_keys . reserve ( module_ . named_attributes ( / * recurse = * / false ) . size ( ) ) ; <nl> + <nl> + for ( const torch : : jit : : script : : NameValue & s : module_ . named_attributes ( / * recurse = * / false ) ) { <nl> + all_keys . push_back ( s . name ) ; <nl> + } <nl> + <nl> + return all_keys ; <nl> + } <nl> + <nl> } / / namespace serialize <nl> } / / namespace torch <nl>
|
Adagrad optimizer - updated step function , added param_groups , state to optimizers
|
pytorch/pytorch
|
be6ffac1b61853ed857147a8be1a87d731160386
|
2020-01-21T22:41:12Z
|
mmm a / src / Server / MySQLHandler . cpp <nl> ppp b / src / Server / MySQLHandler . cpp <nl> void MySQLHandler : : run ( ) <nl> { <nl> if ( ! handshake_response . database . empty ( ) ) <nl> connection_context . setCurrentDatabase ( handshake_response . database ) ; <nl> - connection_context . setCurrentQueryId ( " " ) ; <nl> + connection_context . setCurrentQueryId ( Poco : : format ( " mysql : % lu " , connection_id ) ) ; <nl> + <nl> } <nl> catch ( const Exception & exc ) <nl> { <nl> void MySQLHandler : : comQuery ( ReadBuffer & payload ) <nl> replacement_query = boost : : replace_all_copy ( query , " SHOW TABLE STATUS LIKE " , show_table_status_replacement_query ) ; <nl> } <nl> <nl> + if ( 0 = = strncasecmp ( " KILL QUERY " , query . c_str ( ) , 10 ) ) <nl> + { <nl> + should_replace = true ; <nl> + replacement_query = kill_connection_id_replacement_query ( query ) ; <nl> + } <nl> + <nl> if ( 0 = = strncasecmp ( " SHOW VARIABLES " , query . c_str ( ) , 13 ) ) <nl> { <nl> should_replace = true ; <nl> const String MySQLHandler : : show_table_status_replacement_query ( " SELECT " <nl> " ' Dynamic ' AS Row_format , " <nl> " 0 AS Rows , " <nl> " 0 AS Avg_row_length , " <nl> - " 0 AS Data_length , " <nl> + " 0 AS Data_length , " <nl> " 0 AS Max_data_length , " <nl> " 0 AS Index_length , " <nl> " 0 AS Data_free , " <nl> const String MySQLHandler : : show_table_status_replacement_query ( " SELECT " <nl> " FROM system . tables " <nl> " WHERE name LIKE " ) ; <nl> <nl> + String MySQLHandler : : kill_connection_id_replacement_query ( const String & query ) <nl> + { <nl> + const String s = " KILL QUERY " ; <nl> + <nl> + if ( query . size ( ) > s . size ( ) ) <nl> + { <nl> + String process_id = query . data ( ) + s . length ( ) ; <nl> + <nl> + static const std : : regex expr { " ^ [ 0 - 9 ] " } ; <nl> + if ( std : : regex_match ( process_id , expr ) ) <nl> + { <nl> + String replacement = Poco : : format ( " KILL QUERY WHERE query_id = ' mysql : % s ' " , process_id ) ; <nl> + return replacement ; <nl> + } <nl> + } <nl> + return query ; <nl> } <nl> + <nl> + } <nl> + <nl> mmm a / src / Server / MySQLHandler . h <nl> ppp b / src / Server / MySQLHandler . h <nl> class MySQLHandler : public Poco : : Net : : TCPServerConnection <nl> <nl> private : <nl> static const String show_table_status_replacement_query ; <nl> + String kill_connection_id_replacement_query ( const String & query ) ; <nl> } ; <nl> <nl> # if USE_SSL <nl>
|
Support KILL QUERY [ connection_id ] for MySQL
|
ClickHouse/ClickHouse
|
37ac45643947e2ecf085e0c840341e526a3b44f1
|
2020-07-06T01:02:02Z
|
mmm a / tensorflow / go / op / wrappers . go <nl> ppp b / tensorflow / go / op / wrappers . go <nl> func BoostedTreesExampleDebugOutputs ( scope * Scope , tree_ensemble_handle tf . Outpu <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Computes the sum along sparse segments of a tensor . <nl> - / / <nl> - / / Like ` SparseSegmentSum ` , but allows missing ids in ` segment_ids ` . If an id is <nl> - / / misisng , the ` output ` tensor at that position will be zeroed . <nl> - / / <nl> - / / Read <nl> - / / [ the section on segmentation ] ( https : / / tensorflow . org / api_guides / python / math_ops # Segmentation ) <nl> - / / for an explanation of segments . <nl> - / / <nl> - / / For example : <nl> - / / <nl> - / / ` ` ` python <nl> - / / c = tf . constant ( [ [ 1 , 2 , 3 , 4 ] , [ - 1 , - 2 , - 3 , - 4 ] , [ 5 , 6 , 7 , 8 ] ] ) <nl> - / / <nl> - / / tf . sparse_segment_sum_with_num_segments ( <nl> - / / c , tf . constant ( [ 0 , 1 ] ) , tf . constant ( [ 0 , 0 ] ) , num_segments = 3 ) <nl> - / / # = > [ [ 0 0 0 0 ] <nl> - / / # [ 0 0 0 0 ] <nl> - / / # [ 0 0 0 0 ] ] <nl> - / / <nl> - / / tf . sparse_segment_sum_with_num_segments ( c , <nl> - / / tf . constant ( [ 0 , 1 ] ) , <nl> - / / tf . constant ( [ 0 , 2 ] , <nl> - / / num_segments = 4 ) ) <nl> - / / # = > [ [ 1 2 3 4 ] <nl> - / / # [ 0 0 0 0 ] <nl> - / / # [ - 1 - 2 - 3 - 4 ] <nl> - / / # [ 0 0 0 0 ] ] <nl> - / / ` ` ` <nl> - / / <nl> - / / Arguments : <nl> - / / <nl> - / / indices : A 1 - D tensor . Has same rank as ` segment_ids ` . <nl> - / / segment_ids : A 1 - D tensor . Values should be sorted and can be repeated . <nl> - / / num_segments : Should equal the number of distinct segment IDs . <nl> - / / <nl> - / / Returns Has same shape as data , except for dimension 0 which <nl> - / / has size ` num_segments ` . <nl> - func SparseSegmentSumWithNumSegments ( scope * Scope , data tf . Output , indices tf . Output , segment_ids tf . Output , num_segments tf . Output ) ( output tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " SparseSegmentSumWithNumSegments " , <nl> - Input : [ ] tf . Input { <nl> - data , indices , segment_ids , num_segments , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> - / / PreventGradientAttr is an optional argument to PreventGradient . <nl> - type PreventGradientAttr func ( optionalAttr ) <nl> - <nl> - / / PreventGradientMessage sets the optional message attribute to value . <nl> - / / <nl> - / / value : Will be printed in the error when anyone tries to differentiate <nl> - / / this operation . <nl> - / / If not specified , defaults to " " <nl> - func PreventGradientMessage ( value string ) PreventGradientAttr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " message " ] = value <nl> - } <nl> - } <nl> - <nl> - / / An identity op that triggers an error if a gradient is requested . <nl> - / / <nl> - / / When executed in a graph , this op outputs its input tensor as - is . <nl> + / / Makes the summary of accumulated stats for the batch . <nl> / / <nl> - / / When building ops to compute gradients , the TensorFlow gradient system <nl> - / / will return an error when trying to lookup the gradient of this op , <nl> - / / because no gradient must ever be registered for this function . This <nl> - / / op exists to prevent subtle bugs from silently returning unimplemented <nl> - / / gradients in some corner cases . <nl> + / / The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example . <nl> / / <nl> / / Arguments : <nl> - / / input : any tensor . <nl> + / / node_ids : int32 Rank 1 Tensor containing node ids , which each example falls into for the requested layer . <nl> + / / gradients : float32 ; Rank 2 Tensor ( shape = [ # examples , 1 ] ) for gradients . <nl> + / / hessians : float32 ; Rank 2 Tensor ( shape = [ # examples , 1 ] ) for hessians . <nl> + / / bucketized_features_list : int32 list of Rank 1 Tensors , each containing the bucketized feature ( for each feature column ) . <nl> + / / max_splits : int ; the maximum number of splits possible in the whole tree . <nl> + / / num_buckets : int ; equals to the maximum possible value of bucketized feature . <nl> / / <nl> - / / Returns the same input tensor . <nl> - func PreventGradient ( scope * Scope , input tf . Output , optional . . . PreventGradientAttr ) ( output tf . Output ) { <nl> + / / Returns output Rank 4 Tensor ( shape = [ # features , # splits , # buckets , 2 ] ) containing accumulated stats put into the corresponding node and bucket . The first index of 4th dimension refers to gradients , and the second to hessians . <nl> + func BoostedTreesMakeStatsSummary ( scope * Scope , node_ids tf . Output , gradients tf . Output , hessians tf . Output , bucketized_features_list [ ] tf . Output , max_splits int64 , num_buckets int64 ) ( stats_summary tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> return <nl> } <nl> - attrs : = map [ string ] interface { } { } <nl> - for _ , a : = range optional { <nl> - a ( attrs ) <nl> - } <nl> + attrs : = map [ string ] interface { } { " max_splits " : max_splits , " num_buckets " : num_buckets } <nl> opspec : = tf . OpSpec { <nl> - Type : " PreventGradient " , <nl> + Type : " BoostedTreesMakeStatsSummary " , <nl> Input : [ ] tf . Input { <nl> - input , <nl> + node_ids , gradients , hessians , tf . OutputList ( bucketized_features_list ) , <nl> } , <nl> Attrs : attrs , <nl> } <nl> func PreventGradient ( scope * Scope , input tf . Output , optional . . . PreventGradientA <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Computes asin of x element - wise . <nl> - func Asin ( scope * Scope , x tf . Output ) ( y tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " Asin " , <nl> - Input : [ ] tf . Input { <nl> - x , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> / / Computes the sum along sparse segments of a tensor . <nl> / / <nl> / / Read <nl> func AddV2 ( scope * Scope , x tf . Output , y tf . Output ) ( z tf . Output ) { <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / NthElementAttr is an optional argument to NthElement . <nl> - type NthElementAttr func ( optionalAttr ) <nl> + / / Computes exponential of x element - wise . \ \ ( y = e ^ x \ \ ) . <nl> + func Exp ( scope * Scope , x tf . Output ) ( y tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " Exp " , <nl> + Input : [ ] tf . Input { <nl> + x , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> <nl> - / / NthElementReverse sets the optional reverse attribute to value . <nl> + / / Returns an element - wise indication of the sign of a number . <nl> / / <nl> - / / value : When set to True , find the nth - largest value in the vector and vice <nl> - / / versa . <nl> - / / If not specified , defaults to false <nl> - func NthElementReverse ( value bool ) NthElementAttr { <nl> + / / ` y = sign ( x ) = - 1 ` if ` x < 0 ` ; 0 if ` x = = 0 ` ; 1 if ` x > 0 ` . <nl> + / / <nl> + / / For complex numbers , ` y = sign ( x ) = x / | x | ` if ` x ! = 0 ` , otherwise ` y = 0 ` . <nl> + func Sign ( scope * Scope , x tf . Output ) ( y tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " Sign " , <nl> + Input : [ ] tf . Input { <nl> + x , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / ArgMinAttr is an optional argument to ArgMin . <nl> + type ArgMinAttr func ( optionalAttr ) <nl> + <nl> + / / ArgMinOutputType sets the optional output_type attribute to value . <nl> + / / If not specified , defaults to DT_INT64 <nl> + func ArgMinOutputType ( value tf . DataType ) ArgMinAttr { <nl> return func ( m optionalAttr ) { <nl> - m [ " reverse " ] = value <nl> + m [ " output_type " ] = value <nl> } <nl> } <nl> <nl> - / / Finds values of the ` n ` - th order statistic for the last dimension . <nl> + / / Returns the index with the smallest value across dimensions of a tensor . <nl> / / <nl> - / / If the input is a vector ( rank - 1 ) , finds the entries which is the nth - smallest <nl> - / / value in the vector and outputs their values as scalar tensor . <nl> + / / Note that in case of ties the identity of the return value is not guaranteed . <nl> / / <nl> - / / For matrices ( resp . higher rank input ) , computes the entries which is the <nl> - / / nth - smallest value in each row ( resp . vector along the last dimension ) . Thus , <nl> + / / Arguments : <nl> / / <nl> - / / values . shape = input . shape [ : - 1 ] <nl> + / / dimension : int32 or int64 , must be in the range ` [ - rank ( input ) , rank ( input ) ) ` . <nl> + / / Describes which dimension of the input Tensor to reduce across . For vectors , <nl> + / / use dimension = 0 . <nl> + func ArgMin ( scope * Scope , input tf . Output , dimension tf . Output , optional . . . ArgMinAttr ) ( output tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { } <nl> + for _ , a : = range optional { <nl> + a ( attrs ) <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " ArgMin " , <nl> + Input : [ ] tf . Input { <nl> + input , dimension , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / Convert the quantized ' input ' tensor into a lower - precision ' output ' , using the <nl> + / / <nl> + / / output range specified with ' requested_output_min ' and ' requested_output_max ' . <nl> + / / <nl> + / / [ input_min , input_max ] are scalar floats that specify the range for the float <nl> + / / interpretation of the ' input ' data . For example , if input_min is - 1 . 0f and <nl> + / / input_max is 1 . 0f , and we are dealing with quint16 quantized data , then a 0 <nl> + / / value in the 16 - bit data should be interpreted as - 1 . 0f , and a 65535 means 1 . 0f . <nl> / / <nl> / / Arguments : <nl> - / / input : 1 - D or higher with last dimension at least ` n + 1 ` . <nl> - / / n : 0 - D . Position of sorted vector to select along the last dimension ( along <nl> - / / each row for matrices ) . Valid range of n is ` [ 0 , input . shape [ : - 1 ] ) ` <nl> / / <nl> - / / Returns The ` n ` - th order statistic along each last dimensional slice . <nl> - func NthElement ( scope * Scope , input tf . Output , n tf . Output , optional . . . NthElementAttr ) ( values tf . Output ) { <nl> + / / input_min : The float value that the minimum quantized input value represents . <nl> + / / input_max : The float value that the maximum quantized input value represents . <nl> + / / requested_output_min : The float value that the minimum quantized output value represents . <nl> + / / requested_output_max : The float value that the maximum quantized output value represents . <nl> + / / out_type : The type of the output . Should be a lower bit depth than Tinput . <nl> + / / <nl> + / / Returns The requested_output_min value is copied into this output . The requested_output_max value is copied into this output . <nl> + func Requantize ( scope * Scope , input tf . Output , input_min tf . Output , input_max tf . Output , requested_output_min tf . Output , requested_output_max tf . Output , out_type tf . DataType ) ( output tf . Output , output_min tf . Output , output_max tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { " out_type " : out_type } <nl> + opspec : = tf . OpSpec { <nl> + Type : " Requantize " , <nl> + Input : [ ] tf . Input { <nl> + input , input_min , input_max , requested_output_min , requested_output_max , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) , op . Output ( 1 ) , op . Output ( 2 ) <nl> + } <nl> + <nl> + / / PreventGradientAttr is an optional argument to PreventGradient . <nl> + type PreventGradientAttr func ( optionalAttr ) <nl> + <nl> + / / PreventGradientMessage sets the optional message attribute to value . <nl> + / / <nl> + / / value : Will be printed in the error when anyone tries to differentiate <nl> + / / this operation . <nl> + / / If not specified , defaults to " " <nl> + func PreventGradientMessage ( value string ) PreventGradientAttr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " message " ] = value <nl> + } <nl> + } <nl> + <nl> + / / An identity op that triggers an error if a gradient is requested . <nl> + / / <nl> + / / When executed in a graph , this op outputs its input tensor as - is . <nl> + / / <nl> + / / When building ops to compute gradients , the TensorFlow gradient system <nl> + / / will return an error when trying to lookup the gradient of this op , <nl> + / / because no gradient must ever be registered for this function . This <nl> + / / op exists to prevent subtle bugs from silently returning unimplemented <nl> + / / gradients in some corner cases . <nl> + / / <nl> + / / Arguments : <nl> + / / input : any tensor . <nl> + / / <nl> + / / Returns the same input tensor . <nl> + func PreventGradient ( scope * Scope , input tf . Output , optional . . . PreventGradientAttr ) ( output tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> return <nl> } <nl> func NthElement ( scope * Scope , input tf . Output , n tf . Output , optional . . . NthEleme <nl> a ( attrs ) <nl> } <nl> opspec : = tf . OpSpec { <nl> - Type : " NthElement " , <nl> + Type : " PreventGradient " , <nl> Input : [ ] tf . Input { <nl> - input , n , <nl> + input , <nl> } , <nl> Attrs : attrs , <nl> } <nl> func NthElement ( scope * Scope , input tf . Output , n tf . Output , optional . . . NthEleme <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / Computes asin of x element - wise . <nl> + func Asin ( scope * Scope , x tf . Output ) ( y tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " Asin " , <nl> + Input : [ ] tf . Input { <nl> + x , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> / / Computes the maximum along segments of a tensor . <nl> / / <nl> / / Read <nl> func UnsortedSegmentMax ( scope * Scope , data tf . Output , segment_ids tf . Output , num <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Computes exponential of x element - wise . \ \ ( y = e ^ x \ \ ) . <nl> - func Exp ( scope * Scope , x tf . Output ) ( y tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " Exp " , <nl> - Input : [ ] tf . Input { <nl> - x , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> + / / NthElementAttr is an optional argument to NthElement . <nl> + type NthElementAttr func ( optionalAttr ) <nl> <nl> - / / Returns an element - wise indication of the sign of a number . <nl> - / / <nl> - / / ` y = sign ( x ) = - 1 ` if ` x < 0 ` ; 0 if ` x = = 0 ` ; 1 if ` x > 0 ` . <nl> + / / NthElementReverse sets the optional reverse attribute to value . <nl> / / <nl> - / / For complex numbers , ` y = sign ( x ) = x / | x | ` if ` x ! = 0 ` , otherwise ` y = 0 ` . <nl> - func Sign ( scope * Scope , x tf . Output ) ( y tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " Sign " , <nl> - Input : [ ] tf . Input { <nl> - x , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> - / / ArgMinAttr is an optional argument to ArgMin . <nl> - type ArgMinAttr func ( optionalAttr ) <nl> - <nl> - / / ArgMinOutputType sets the optional output_type attribute to value . <nl> - / / If not specified , defaults to DT_INT64 <nl> - func ArgMinOutputType ( value tf . DataType ) ArgMinAttr { <nl> + / / value : When set to True , find the nth - largest value in the vector and vice <nl> + / / versa . <nl> + / / If not specified , defaults to false <nl> + func NthElementReverse ( value bool ) NthElementAttr { <nl> return func ( m optionalAttr ) { <nl> - m [ " output_type " ] = value <nl> + m [ " reverse " ] = value <nl> } <nl> } <nl> <nl> - / / Returns the index with the smallest value across dimensions of a tensor . <nl> + / / Finds values of the ` n ` - th order statistic for the last dimension . <nl> / / <nl> - / / Note that in case of ties the identity of the return value is not guaranteed . <nl> + / / If the input is a vector ( rank - 1 ) , finds the entries which is the nth - smallest <nl> + / / value in the vector and outputs their values as scalar tensor . <nl> + / / <nl> + / / For matrices ( resp . higher rank input ) , computes the entries which is the <nl> + / / nth - smallest value in each row ( resp . vector along the last dimension ) . Thus , <nl> + / / <nl> + / / values . shape = input . shape [ : - 1 ] <nl> / / <nl> / / Arguments : <nl> + / / input : 1 - D or higher with last dimension at least ` n + 1 ` . <nl> + / / n : 0 - D . Position of sorted vector to select along the last dimension ( along <nl> + / / each row for matrices ) . Valid range of n is ` [ 0 , input . shape [ : - 1 ] ) ` <nl> / / <nl> - / / dimension : int32 or int64 , must be in the range ` [ - rank ( input ) , rank ( input ) ) ` . <nl> - / / Describes which dimension of the input Tensor to reduce across . For vectors , <nl> - / / use dimension = 0 . <nl> - func ArgMin ( scope * Scope , input tf . Output , dimension tf . Output , optional . . . ArgMinAttr ) ( output tf . Output ) { <nl> + / / Returns The ` n ` - th order statistic along each last dimensional slice . <nl> + func NthElement ( scope * Scope , input tf . Output , n tf . Output , optional . . . NthElementAttr ) ( values tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> return <nl> } <nl> func ArgMin ( scope * Scope , input tf . Output , dimension tf . Output , optional . . . ArgM <nl> a ( attrs ) <nl> } <nl> opspec : = tf . OpSpec { <nl> - Type : " ArgMin " , <nl> + Type : " NthElement " , <nl> Input : [ ] tf . Input { <nl> - input , dimension , <nl> + input , n , <nl> } , <nl> Attrs : attrs , <nl> } <nl> func ArgMin ( scope * Scope , input tf . Output , dimension tf . Output , optional . . . ArgM <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / Convert the quantized ' input ' tensor into a lower - precision ' output ' , using the <nl> + / / Computes the sum along sparse segments of a tensor . <nl> / / <nl> - / / output range specified with ' requested_output_min ' and ' requested_output_max ' . <nl> + / / Like ` SparseSegmentSum ` , but allows missing ids in ` segment_ids ` . If an id is <nl> + / / misisng , the ` output ` tensor at that position will be zeroed . <nl> / / <nl> - / / [ input_min , input_max ] are scalar floats that specify the range for the float <nl> - / / interpretation of the ' input ' data . For example , if input_min is - 1 . 0f and <nl> - / / input_max is 1 . 0f , and we are dealing with quint16 quantized data , then a 0 <nl> - / / value in the 16 - bit data should be interpreted as - 1 . 0f , and a 65535 means 1 . 0f . <nl> + / / Read <nl> + / / [ the section on segmentation ] ( https : / / tensorflow . org / api_guides / python / math_ops # Segmentation ) <nl> + / / for an explanation of segments . <nl> + / / <nl> + / / For example : <nl> + / / <nl> + / / ` ` ` python <nl> + / / c = tf . constant ( [ [ 1 , 2 , 3 , 4 ] , [ - 1 , - 2 , - 3 , - 4 ] , [ 5 , 6 , 7 , 8 ] ] ) <nl> + / / <nl> + / / tf . sparse_segment_sum_with_num_segments ( <nl> + / / c , tf . constant ( [ 0 , 1 ] ) , tf . constant ( [ 0 , 0 ] ) , num_segments = 3 ) <nl> + / / # = > [ [ 0 0 0 0 ] <nl> + / / # [ 0 0 0 0 ] <nl> + / / # [ 0 0 0 0 ] ] <nl> + / / <nl> + / / tf . sparse_segment_sum_with_num_segments ( c , <nl> + / / tf . constant ( [ 0 , 1 ] ) , <nl> + / / tf . constant ( [ 0 , 2 ] , <nl> + / / num_segments = 4 ) ) <nl> + / / # = > [ [ 1 2 3 4 ] <nl> + / / # [ 0 0 0 0 ] <nl> + / / # [ - 1 - 2 - 3 - 4 ] <nl> + / / # [ 0 0 0 0 ] ] <nl> + / / ` ` ` <nl> / / <nl> / / Arguments : <nl> / / <nl> - / / input_min : The float value that the minimum quantized input value represents . <nl> - / / input_max : The float value that the maximum quantized input value represents . <nl> - / / requested_output_min : The float value that the minimum quantized output value represents . <nl> - / / requested_output_max : The float value that the maximum quantized output value represents . <nl> - / / out_type : The type of the output . Should be a lower bit depth than Tinput . <nl> + / / indices : A 1 - D tensor . Has same rank as ` segment_ids ` . <nl> + / / segment_ids : A 1 - D tensor . Values should be sorted and can be repeated . <nl> + / / num_segments : Should equal the number of distinct segment IDs . <nl> / / <nl> - / / Returns The requested_output_min value is copied into this output . The requested_output_max value is copied into this output . <nl> - func Requantize ( scope * Scope , input tf . Output , input_min tf . Output , input_max tf . Output , requested_output_min tf . Output , requested_output_max tf . Output , out_type tf . DataType ) ( output tf . Output , output_min tf . Output , output_max tf . Output ) { <nl> + / / Returns Has same shape as data , except for dimension 0 which <nl> + / / has size ` num_segments ` . <nl> + func SparseSegmentSumWithNumSegments ( scope * Scope , data tf . Output , indices tf . Output , segment_ids tf . Output , num_segments tf . Output ) ( output tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> return <nl> } <nl> - attrs : = map [ string ] interface { } { " out_type " : out_type } <nl> opspec : = tf . OpSpec { <nl> - Type : " Requantize " , <nl> + Type : " SparseSegmentSumWithNumSegments " , <nl> Input : [ ] tf . Input { <nl> - input , input_min , input_max , requested_output_min , requested_output_max , <nl> + data , indices , segment_ids , num_segments , <nl> } , <nl> - Attrs : attrs , <nl> } <nl> op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) , op . Output ( 1 ) , op . Output ( 2 ) <nl> + return op . Output ( 0 ) <nl> } <nl> <nl> / / Computes the determinant of one or more square matrices . <nl> func FusedBatchNorm ( scope * Scope , x tf . Output , scale tf . Output , offset tf . Output <nl> opspec : = tf . OpSpec { <nl> Type : " FusedBatchNorm " , <nl> Input : [ ] tf . Input { <nl> - x , scale , offset , mean , variance , <nl> + x , scale , offset , mean , variance , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) , op . Output ( 1 ) , op . Output ( 2 ) , op . Output ( 3 ) , op . Output ( 4 ) <nl> + } <nl> + <nl> + / / RandomStandardNormalAttr is an optional argument to RandomStandardNormal . <nl> + type RandomStandardNormalAttr func ( optionalAttr ) <nl> + <nl> + / / RandomStandardNormalSeed sets the optional seed attribute to value . <nl> + / / <nl> + / / value : If either ` seed ` or ` seed2 ` are set to be non - zero , the random number <nl> + / / generator is seeded by the given seed . Otherwise , it is seeded by a <nl> + / / random seed . <nl> + / / If not specified , defaults to 0 <nl> + func RandomStandardNormalSeed ( value int64 ) RandomStandardNormalAttr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " seed " ] = value <nl> + } <nl> + } <nl> + <nl> + / / RandomStandardNormalSeed2 sets the optional seed2 attribute to value . <nl> + / / <nl> + / / value : A second seed to avoid seed collision . <nl> + / / If not specified , defaults to 0 <nl> + func RandomStandardNormalSeed2 ( value int64 ) RandomStandardNormalAttr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " seed2 " ] = value <nl> + } <nl> + } <nl> + <nl> + / / Outputs random values from a normal distribution . <nl> + / / <nl> + / / The generated values will have mean 0 and standard deviation 1 . <nl> + / / <nl> + / / Arguments : <nl> + / / shape : The shape of the output tensor . <nl> + / / dtype : The type of the output . <nl> + / / <nl> + / / Returns A tensor of the specified shape filled with random normal values . <nl> + func RandomStandardNormal ( scope * Scope , shape tf . Output , dtype tf . DataType , optional . . . RandomStandardNormalAttr ) ( output tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { " dtype " : dtype } <nl> + for _ , a : = range optional { <nl> + a ( attrs ) <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " RandomStandardNormal " , <nl> + Input : [ ] tf . Input { <nl> + shape , <nl> } , <nl> Attrs : attrs , <nl> } <nl> op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) , op . Output ( 1 ) , op . Output ( 2 ) , op . Output ( 3 ) , op . Output ( 4 ) <nl> + return op . Output ( 0 ) <nl> } <nl> <nl> - / / RandomStandardNormalAttr is an optional argument to RandomStandardNormal . <nl> - type RandomStandardNormalAttr func ( optionalAttr ) <nl> + / / RandomUniformIntAttr is an optional argument to RandomUniformInt . <nl> + type RandomUniformIntAttr func ( optionalAttr ) <nl> <nl> - / / RandomStandardNormalSeed sets the optional seed attribute to value . <nl> + / / RandomUniformIntSeed sets the optional seed attribute to value . <nl> / / <nl> / / value : If either ` seed ` or ` seed2 ` are set to be non - zero , the random number <nl> / / generator is seeded by the given seed . Otherwise , it is seeded by a <nl> / / random seed . <nl> / / If not specified , defaults to 0 <nl> - func RandomStandardNormalSeed ( value int64 ) RandomStandardNormalAttr { <nl> + func RandomUniformIntSeed ( value int64 ) RandomUniformIntAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " seed " ] = value <nl> } <nl> } <nl> <nl> - / / RandomStandardNormalSeed2 sets the optional seed2 attribute to value . <nl> + / / RandomUniformIntSeed2 sets the optional seed2 attribute to value . <nl> / / <nl> / / value : A second seed to avoid seed collision . <nl> / / If not specified , defaults to 0 <nl> - func RandomStandardNormalSeed2 ( value int64 ) RandomStandardNormalAttr { <nl> + func RandomUniformIntSeed2 ( value int64 ) RandomUniformIntAttr { <nl> return func ( m optionalAttr ) { <nl> m [ " seed2 " ] = value <nl> } <nl> } <nl> <nl> - / / Outputs random values from a normal distribution . <nl> + / / Outputs random integers from a uniform distribution . <nl> / / <nl> - / / The generated values will have mean 0 and standard deviation 1 . <nl> + / / The generated values are uniform integers in the range ` [ minval , maxval ) ` . <nl> + / / The lower bound ` minval ` is included in the range , while the upper bound <nl> + / / ` maxval ` is excluded . <nl> + / / <nl> + / / The random integers are slightly biased unless ` maxval - minval ` is an exact <nl> + / / power of two . The bias is small for values of ` maxval - minval ` significantly <nl> + / / smaller than the range of the output ( either ` 2 ^ 32 ` or ` 2 ^ 64 ` ) . <nl> / / <nl> / / Arguments : <nl> / / shape : The shape of the output tensor . <nl> - / / dtype : The type of the output . <nl> + / / minval : 0 - D . Inclusive lower bound on the generated integers . <nl> + / / maxval : 0 - D . Exclusive upper bound on the generated integers . <nl> / / <nl> - / / Returns A tensor of the specified shape filled with random normal values . <nl> - func RandomStandardNormal ( scope * Scope , shape tf . Output , dtype tf . DataType , optional . . . RandomStandardNormalAttr ) ( output tf . Output ) { <nl> + / / Returns A tensor of the specified shape filled with uniform random integers . <nl> + func RandomUniformInt ( scope * Scope , shape tf . Output , minval tf . Output , maxval tf . Output , optional . . . RandomUniformIntAttr ) ( output tf . Output ) { <nl> if scope . Err ( ) ! = nil { <nl> return <nl> } <nl> - attrs : = map [ string ] interface { } { " dtype " : dtype } <nl> + attrs : = map [ string ] interface { } { } <nl> for _ , a : = range optional { <nl> a ( attrs ) <nl> } <nl> opspec : = tf . OpSpec { <nl> - Type : " RandomStandardNormal " , <nl> + Type : " RandomUniformInt " , <nl> Input : [ ] tf . Input { <nl> - shape , <nl> + shape , minval , maxval , <nl> } , <nl> Attrs : attrs , <nl> } <nl> func FixedLengthRecordReaderV2 ( scope * Scope , record_bytes int64 , optional . . . Fix <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / The gradient operator for the SparseAdd op . <nl> - / / <nl> - / / The SparseAdd op calculates A + B , where A , B , and the sum are all represented <nl> - / / as ` SparseTensor ` objects . This op takes in the upstream gradient w . r . t . <nl> - / / non - empty values of the sum , and outputs the gradients w . r . t . the non - empty <nl> - / / values of A and B . <nl> - / / <nl> - / / Arguments : <nl> - / / backprop_val_grad : 1 - D with shape ` [ nnz ( sum ) ] ` . The gradient with respect to <nl> - / / the non - empty values of the sum . <nl> - / / a_indices : 2 - D . The ` indices ` of the ` SparseTensor ` A , size ` [ nnz ( A ) , ndims ] ` . <nl> - / / b_indices : 2 - D . The ` indices ` of the ` SparseTensor ` B , size ` [ nnz ( B ) , ndims ] ` . <nl> - / / sum_indices : 2 - D . The ` indices ` of the sum ` SparseTensor ` , size <nl> - / / ` [ nnz ( sum ) , ndims ] ` . <nl> - / / <nl> - / / Returns 1 - D with shape ` [ nnz ( A ) ] ` . The gradient with respect to the <nl> - / / non - empty values of A . 1 - D with shape ` [ nnz ( B ) ] ` . The gradient with respect to the <nl> - / / non - empty values of B . <nl> - func SparseAddGrad ( scope * Scope , backprop_val_grad tf . Output , a_indices tf . Output , b_indices tf . Output , sum_indices tf . Output ) ( a_val_grad tf . Output , b_val_grad tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " SparseAddGrad " , <nl> - Input : [ ] tf . Input { <nl> - backprop_val_grad , a_indices , b_indices , sum_indices , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) , op . Output ( 1 ) <nl> - } <nl> - <nl> / / String lengths of ` input ` . <nl> / / <nl> / / Computes the length of each string given in the input tensor . <nl> func MutexLock ( scope * Scope , mutex tf . Output ) ( mutex_lock tf . Output ) { <nl> return op . Output ( 0 ) <nl> } <nl> <nl> + / / ShapeAttr is an optional argument to Shape . <nl> + type ShapeAttr func ( optionalAttr ) <nl> + <nl> + / / ShapeOutType sets the optional out_type attribute to value . <nl> + / / If not specified , defaults to DT_INT32 <nl> + func ShapeOutType ( value tf . DataType ) ShapeAttr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " out_type " ] = value <nl> + } <nl> + } <nl> + <nl> + / / Returns the shape of a tensor . <nl> + / / <nl> + / / This operation returns a 1 - D integer tensor representing the shape of ` input ` . <nl> + / / <nl> + / / For example : <nl> + / / <nl> + / / ` ` ` <nl> + / / # ' t ' is [ [ [ 1 , 1 , 1 ] , [ 2 , 2 , 2 ] ] , [ [ 3 , 3 , 3 ] , [ 4 , 4 , 4 ] ] ] <nl> + / / shape ( t ) = = > [ 2 , 2 , 3 ] <nl> + / / ` ` ` <nl> + func Shape ( scope * Scope , input tf . Output , optional . . . ShapeAttr ) ( output tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { } <nl> + for _ , a : = range optional { <nl> + a ( attrs ) <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " Shape " , <nl> + Input : [ ] tf . Input { <nl> + input , <nl> + } , <nl> + Attrs : attrs , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / Computes the power of one value to another . <nl> + / / <nl> + / / Given a tensor ` x ` and a tensor ` y ` , this operation computes \ \ ( x ^ y \ \ ) for <nl> + / / corresponding elements in ` x ` and ` y ` . For example : <nl> + / / <nl> + / / ` ` ` <nl> + / / # tensor ' x ' is [ [ 2 , 2 ] ] , [ 3 , 3 ] ] <nl> + / / # tensor ' y ' is [ [ 8 , 16 ] , [ 2 , 3 ] ] <nl> + / / tf . pow ( x , y ) = = > [ [ 256 , 65536 ] , [ 9 , 27 ] ] <nl> + / / ` ` ` <nl> + func Pow ( scope * Scope , x tf . Output , y tf . Output ) ( z tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " Pow " , <nl> + Input : [ ] tf . Input { <nl> + x , y , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / Computes fingerprints of the input strings . <nl> + / / <nl> + / / Arguments : <nl> + / / input : vector of strings to compute fingerprints on . <nl> + / / <nl> + / / Returns a ( N , 2 ) shaped matrix where N is the number of elements in the input <nl> + / / vector . Each row contains the low and high parts of the fingerprint . <nl> + func SdcaFprint ( scope * Scope , input tf . Output ) ( output tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " SdcaFprint " , <nl> + Input : [ ] tf . Input { <nl> + input , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / The gradient operator for the SparseAdd op . <nl> + / / <nl> + / / The SparseAdd op calculates A + B , where A , B , and the sum are all represented <nl> + / / as ` SparseTensor ` objects . This op takes in the upstream gradient w . r . t . <nl> + / / non - empty values of the sum , and outputs the gradients w . r . t . the non - empty <nl> + / / values of A and B . <nl> + / / <nl> + / / Arguments : <nl> + / / backprop_val_grad : 1 - D with shape ` [ nnz ( sum ) ] ` . The gradient with respect to <nl> + / / the non - empty values of the sum . <nl> + / / a_indices : 2 - D . The ` indices ` of the ` SparseTensor ` A , size ` [ nnz ( A ) , ndims ] ` . <nl> + / / b_indices : 2 - D . The ` indices ` of the ` SparseTensor ` B , size ` [ nnz ( B ) , ndims ] ` . <nl> + / / sum_indices : 2 - D . The ` indices ` of the sum ` SparseTensor ` , size <nl> + / / ` [ nnz ( sum ) , ndims ] ` . <nl> + / / <nl> + / / Returns 1 - D with shape ` [ nnz ( A ) ] ` . The gradient with respect to the <nl> + / / non - empty values of A . 1 - D with shape ` [ nnz ( B ) ] ` . The gradient with respect to the <nl> + / / non - empty values of B . <nl> + func SparseAddGrad ( scope * Scope , backprop_val_grad tf . Output , a_indices tf . Output , b_indices tf . Output , sum_indices tf . Output ) ( a_val_grad tf . Output , b_val_grad tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " SparseAddGrad " , <nl> + Input : [ ] tf . Input { <nl> + backprop_val_grad , a_indices , b_indices , sum_indices , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) , op . Output ( 1 ) <nl> + } <nl> + <nl> / / Computes the mean along segments of a tensor . <nl> / / <nl> / / Read <nl> func InTopKV2 ( scope * Scope , predictions tf . Output , targets tf . Output , k tf . Outpu <nl> return <nl> } <nl> opspec : = tf . OpSpec { <nl> - Type : " InTopKV2 " , <nl> + Type : " InTopKV2 " , <nl> + Input : [ ] tf . Input { <nl> + predictions , targets , k , <nl> + } , <nl> + } <nl> + op : = scope . AddOperation ( opspec ) <nl> + return op . Output ( 0 ) <nl> + } <nl> + <nl> + / / RandomPoissonV2Attr is an optional argument to RandomPoissonV2 . <nl> + type RandomPoissonV2Attr func ( optionalAttr ) <nl> + <nl> + / / RandomPoissonV2Seed sets the optional seed attribute to value . <nl> + / / <nl> + / / value : If either ` seed ` or ` seed2 ` are set to be non - zero , the random number <nl> + / / generator is seeded by the given seed . Otherwise , it is seeded by a <nl> + / / random seed . <nl> + / / If not specified , defaults to 0 <nl> + func RandomPoissonV2Seed ( value int64 ) RandomPoissonV2Attr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " seed " ] = value <nl> + } <nl> + } <nl> + <nl> + / / RandomPoissonV2Seed2 sets the optional seed2 attribute to value . <nl> + / / <nl> + / / value : A second seed to avoid seed collision . <nl> + / / If not specified , defaults to 0 <nl> + func RandomPoissonV2Seed2 ( value int64 ) RandomPoissonV2Attr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " seed2 " ] = value <nl> + } <nl> + } <nl> + <nl> + / / RandomPoissonV2Dtype sets the optional dtype attribute to value . <nl> + / / If not specified , defaults to DT_INT64 <nl> + func RandomPoissonV2Dtype ( value tf . DataType ) RandomPoissonV2Attr { <nl> + return func ( m optionalAttr ) { <nl> + m [ " dtype " ] = value <nl> + } <nl> + } <nl> + <nl> + / / Outputs random values from the Poisson distribution ( s ) described by rate . <nl> + / / <nl> + / / This op uses two algorithms , depending on rate . If rate > = 10 , then <nl> + / / the algorithm by Hormann is used to acquire samples via <nl> + / / transformation - rejection . <nl> + / / See http : / / www . sciencedirect . com / science / article / pii / 0167668793909974 . <nl> + / / <nl> + / / Otherwise , Knuth ' s algorithm is used to acquire samples via multiplying uniform <nl> + / / random variables . <nl> + / / See Donald E . Knuth ( 1969 ) . Seminumerical Algorithms . The Art of Computer <nl> + / / Programming , Volume 2 . Addison Wesley <nl> + / / <nl> + / / Arguments : <nl> + / / shape : 1 - D integer tensor . Shape of independent samples to draw from each <nl> + / / distribution described by the shape parameters given in rate . <nl> + / / rate : A tensor in which each scalar is a " rate " parameter describing the <nl> + / / associated poisson distribution . <nl> + / / <nl> + / / Returns A tensor with shape ` shape + shape ( rate ) ` . Each slice <nl> + / / ` [ : , . . . , : , i0 , i1 , . . . iN ] ` contains the samples drawn for <nl> + / / ` rate [ i0 , i1 , . . . iN ] ` . <nl> + func RandomPoissonV2 ( scope * Scope , shape tf . Output , rate tf . Output , optional . . . RandomPoissonV2Attr ) ( output tf . Output ) { <nl> + if scope . Err ( ) ! = nil { <nl> + return <nl> + } <nl> + attrs : = map [ string ] interface { } { } <nl> + for _ , a : = range optional { <nl> + a ( attrs ) <nl> + } <nl> + opspec : = tf . OpSpec { <nl> + Type : " RandomPoissonV2 " , <nl> Input : [ ] tf . Input { <nl> - predictions , targets , k , <nl> + shape , rate , <nl> } , <nl> + Attrs : attrs , <nl> } <nl> op : = scope . AddOperation ( opspec ) <nl> return op . Output ( 0 ) <nl> func SdcaOptimizer ( scope * Scope , sparse_example_indices [ ] tf . Output , sparse_feat <nl> return out_example_state_data , out_delta_sparse_weights , out_delta_dense_weights <nl> } <nl> <nl> - / / ShapeAttr is an optional argument to Shape . <nl> - type ShapeAttr func ( optionalAttr ) <nl> - <nl> - / / ShapeOutType sets the optional out_type attribute to value . <nl> - / / If not specified , defaults to DT_INT32 <nl> - func ShapeOutType ( value tf . DataType ) ShapeAttr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " out_type " ] = value <nl> - } <nl> - } <nl> - <nl> - / / Returns the shape of a tensor . <nl> - / / <nl> - / / This operation returns a 1 - D integer tensor representing the shape of ` input ` . <nl> - / / <nl> - / / For example : <nl> - / / <nl> - / / ` ` ` <nl> - / / # ' t ' is [ [ [ 1 , 1 , 1 ] , [ 2 , 2 , 2 ] ] , [ [ 3 , 3 , 3 ] , [ 4 , 4 , 4 ] ] ] <nl> - / / shape ( t ) = = > [ 2 , 2 , 3 ] <nl> - / / ` ` ` <nl> - func Shape ( scope * Scope , input tf . Output , optional . . . ShapeAttr ) ( output tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - attrs : = map [ string ] interface { } { } <nl> - for _ , a : = range optional { <nl> - a ( attrs ) <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " Shape " , <nl> - Input : [ ] tf . Input { <nl> - input , <nl> - } , <nl> - Attrs : attrs , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> - / / Computes the power of one value to another . <nl> - / / <nl> - / / Given a tensor ` x ` and a tensor ` y ` , this operation computes \ \ ( x ^ y \ \ ) for <nl> - / / corresponding elements in ` x ` and ` y ` . For example : <nl> - / / <nl> - / / ` ` ` <nl> - / / # tensor ' x ' is [ [ 2 , 2 ] ] , [ 3 , 3 ] ] <nl> - / / # tensor ' y ' is [ [ 8 , 16 ] , [ 2 , 3 ] ] <nl> - / / tf . pow ( x , y ) = = > [ [ 256 , 65536 ] , [ 9 , 27 ] ] <nl> - / / ` ` ` <nl> - func Pow ( scope * Scope , x tf . Output , y tf . Output ) ( z tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " Pow " , <nl> - Input : [ ] tf . Input { <nl> - x , y , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> - / / Computes fingerprints of the input strings . <nl> - / / <nl> - / / Arguments : <nl> - / / input : vector of strings to compute fingerprints on . <nl> - / / <nl> - / / Returns a ( N , 2 ) shaped matrix where N is the number of elements in the input <nl> - / / vector . Each row contains the low and high parts of the fingerprint . <nl> - func SdcaFprint ( scope * Scope , input tf . Output ) ( output tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " SdcaFprint " , <nl> - Input : [ ] tf . Input { <nl> - input , <nl> - } , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> - / / RandomPoissonV2Attr is an optional argument to RandomPoissonV2 . <nl> - type RandomPoissonV2Attr func ( optionalAttr ) <nl> - <nl> - / / RandomPoissonV2Seed sets the optional seed attribute to value . <nl> - / / <nl> - / / value : If either ` seed ` or ` seed2 ` are set to be non - zero , the random number <nl> - / / generator is seeded by the given seed . Otherwise , it is seeded by a <nl> - / / random seed . <nl> - / / If not specified , defaults to 0 <nl> - func RandomPoissonV2Seed ( value int64 ) RandomPoissonV2Attr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " seed " ] = value <nl> - } <nl> - } <nl> - <nl> - / / RandomPoissonV2Seed2 sets the optional seed2 attribute to value . <nl> - / / <nl> - / / value : A second seed to avoid seed collision . <nl> - / / If not specified , defaults to 0 <nl> - func RandomPoissonV2Seed2 ( value int64 ) RandomPoissonV2Attr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " seed2 " ] = value <nl> - } <nl> - } <nl> - <nl> - / / RandomPoissonV2Dtype sets the optional dtype attribute to value . <nl> - / / If not specified , defaults to DT_INT64 <nl> - func RandomPoissonV2Dtype ( value tf . DataType ) RandomPoissonV2Attr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " dtype " ] = value <nl> - } <nl> - } <nl> - <nl> - / / Outputs random values from the Poisson distribution ( s ) described by rate . <nl> - / / <nl> - / / This op uses two algorithms , depending on rate . If rate > = 10 , then <nl> - / / the algorithm by Hormann is used to acquire samples via <nl> - / / transformation - rejection . <nl> - / / See http : / / www . sciencedirect . com / science / article / pii / 0167668793909974 . <nl> - / / <nl> - / / Otherwise , Knuth ' s algorithm is used to acquire samples via multiplying uniform <nl> - / / random variables . <nl> - / / See Donald E . Knuth ( 1969 ) . Seminumerical Algorithms . The Art of Computer <nl> - / / Programming , Volume 2 . Addison Wesley <nl> - / / <nl> - / / Arguments : <nl> - / / shape : 1 - D integer tensor . Shape of independent samples to draw from each <nl> - / / distribution described by the shape parameters given in rate . <nl> - / / rate : A tensor in which each scalar is a " rate " parameter describing the <nl> - / / associated poisson distribution . <nl> - / / <nl> - / / Returns A tensor with shape ` shape + shape ( rate ) ` . Each slice <nl> - / / ` [ : , . . . , : , i0 , i1 , . . . iN ] ` contains the samples drawn for <nl> - / / ` rate [ i0 , i1 , . . . iN ] ` . <nl> - func RandomPoissonV2 ( scope * Scope , shape tf . Output , rate tf . Output , optional . . . RandomPoissonV2Attr ) ( output tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - attrs : = map [ string ] interface { } { } <nl> - for _ , a : = range optional { <nl> - a ( attrs ) <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " RandomPoissonV2 " , <nl> - Input : [ ] tf . Input { <nl> - shape , rate , <nl> - } , <nl> - Attrs : attrs , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> / / MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve . <nl> type MatrixTriangularSolveAttr func ( optionalAttr ) <nl> <nl> func UnsortedSegmentProd ( scope * Scope , data tf . Output , segment_ids tf . Output , nu <nl> return op . Output ( 0 ) <nl> } <nl> <nl> - / / RandomUniformIntAttr is an optional argument to RandomUniformInt . <nl> - type RandomUniformIntAttr func ( optionalAttr ) <nl> - <nl> - / / RandomUniformIntSeed sets the optional seed attribute to value . <nl> - / / <nl> - / / value : If either ` seed ` or ` seed2 ` are set to be non - zero , the random number <nl> - / / generator is seeded by the given seed . Otherwise , it is seeded by a <nl> - / / random seed . <nl> - / / If not specified , defaults to 0 <nl> - func RandomUniformIntSeed ( value int64 ) RandomUniformIntAttr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " seed " ] = value <nl> - } <nl> - } <nl> - <nl> - / / RandomUniformIntSeed2 sets the optional seed2 attribute to value . <nl> - / / <nl> - / / value : A second seed to avoid seed collision . <nl> - / / If not specified , defaults to 0 <nl> - func RandomUniformIntSeed2 ( value int64 ) RandomUniformIntAttr { <nl> - return func ( m optionalAttr ) { <nl> - m [ " seed2 " ] = value <nl> - } <nl> - } <nl> - <nl> - / / Outputs random integers from a uniform distribution . <nl> - / / <nl> - / / The generated values are uniform integers in the range ` [ minval , maxval ) ` . <nl> - / / The lower bound ` minval ` is included in the range , while the upper bound <nl> - / / ` maxval ` is excluded . <nl> - / / <nl> - / / The random integers are slightly biased unless ` maxval - minval ` is an exact <nl> - / / power of two . The bias is small for values of ` maxval - minval ` significantly <nl> - / / smaller than the range of the output ( either ` 2 ^ 32 ` or ` 2 ^ 64 ` ) . <nl> - / / <nl> - / / Arguments : <nl> - / / shape : The shape of the output tensor . <nl> - / / minval : 0 - D . Inclusive lower bound on the generated integers . <nl> - / / maxval : 0 - D . Exclusive upper bound on the generated integers . <nl> - / / <nl> - / / Returns A tensor of the specified shape filled with uniform random integers . <nl> - func RandomUniformInt ( scope * Scope , shape tf . Output , minval tf . Output , maxval tf . Output , optional . . . RandomUniformIntAttr ) ( output tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - attrs : = map [ string ] interface { } { } <nl> - for _ , a : = range optional { <nl> - a ( attrs ) <nl> - } <nl> - opspec : = tf . OpSpec { <nl> - Type : " RandomUniformInt " , <nl> - Input : [ ] tf . Input { <nl> - shape , minval , maxval , <nl> - } , <nl> - Attrs : attrs , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> / / Computes the mean along sparse segments of a tensor . <nl> / / <nl> / / Read <nl> func MakeIterator ( scope * Scope , dataset tf . Output , iterator tf . Output ) ( o * tf . Op <nl> return scope . AddOperation ( opspec ) <nl> } <nl> <nl> - / / Makes the summary of accumulated stats for the batch . <nl> - / / <nl> - / / The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example . <nl> - / / <nl> - / / Arguments : <nl> - / / node_ids : int32 Rank 1 Tensor containing node ids , which each example falls into for the requested layer . <nl> - / / gradients : float32 ; Rank 2 Tensor ( shape = [ # examples , 1 ] ) for gradients . <nl> - / / hessians : float32 ; Rank 2 Tensor ( shape = [ # examples , 1 ] ) for hessians . <nl> - / / bucketized_features_list : int32 list of Rank 1 Tensors , each containing the bucketized feature ( for each feature column ) . <nl> - / / max_splits : int ; the maximum number of splits possible in the whole tree . <nl> - / / num_buckets : int ; equals to the maximum possible value of bucketized feature . <nl> - / / <nl> - / / Returns output Rank 4 Tensor ( shape = [ # features , # splits , # buckets , 2 ] ) containing accumulated stats put into the corresponding node and bucket . The first index of 4th dimension refers to gradients , and the second to hessians . <nl> - func BoostedTreesMakeStatsSummary ( scope * Scope , node_ids tf . Output , gradients tf . Output , hessians tf . Output , bucketized_features_list [ ] tf . Output , max_splits int64 , num_buckets int64 ) ( stats_summary tf . Output ) { <nl> - if scope . Err ( ) ! = nil { <nl> - return <nl> - } <nl> - attrs : = map [ string ] interface { } { " max_splits " : max_splits , " num_buckets " : num_buckets } <nl> - opspec : = tf . OpSpec { <nl> - Type : " BoostedTreesMakeStatsSummary " , <nl> - Input : [ ] tf . Input { <nl> - node_ids , gradients , hessians , tf . OutputList ( bucketized_features_list ) , <nl> - } , <nl> - Attrs : attrs , <nl> - } <nl> - op : = scope . AddOperation ( opspec ) <nl> - return op . Output ( 0 ) <nl> - } <nl> - <nl> / / Adjust the contrast of one or more images . <nl> / / <nl> / / ` images ` is a tensor of at least 3 dimensions . The last 3 dimensions are <nl>
|
Go : Update generated wrapper functions for TensorFlow ops .
|
tensorflow/tensorflow
|
484a5c673a4e31748c99c104acc55ed2d7f4f98a
|
2018-09-19T23:16:40Z
|
mmm a / xbmc / guilib / D3DResource . cpp <nl> ppp b / xbmc / guilib / D3DResource . cpp <nl> unsigned int CD3DTexture : : GetMemoryUsage ( unsigned int pitch ) const <nl> void CD3DTexture : : GenerateMipmaps ( ) <nl> { <nl> if ( m_mipLevels = = 0 ) <nl> - g_Windowing . Get3D11Context ( ) - > GenerateMips ( GetShaderResource ( ) ) ; <nl> + { <nl> + ID3D11ShaderResourceView * pSRView = GetShaderResource ( ) ; <nl> + if ( pSRView ! = nullptr ) <nl> + g_Windowing . Get3D11Context ( ) - > GenerateMips ( pSRView ) ; <nl> + } <nl> } <nl> <nl> / / static methods <nl>
|
win32 : CD3DTexture : added check on safety to not generate mitmaps for non existing resource .
|
xbmc/xbmc
|
2e09226e5878ab2990afd71051d7c0d6dd31ccb8
|
2016-10-20T14:55:55Z
|
mmm a / db / db . cpp <nl> ppp b / db / db . cpp <nl> namespace mongo { <nl> break ; <nl> } <nl> <nl> + if ( inShutdown ( ) ) { <nl> + log ( ) < < " got request after shutdown ( ) " < < endl ; <nl> + break ; <nl> + } <nl> + <nl> lastError . startRequest ( m , le ) ; <nl> <nl> DbResponse dbresponse ; <nl>
|
checking inShutdown ( ) when recv a message
|
mongodb/mongo
|
024506e7770c69c72374e6ded1a0aa78b05a8c6e
|
2010-01-20T22:01:58Z
|
mmm a / lib / Driver / ToolChains . cpp <nl> ppp b / lib / Driver / ToolChains . cpp <nl> static void <nl> addLinkSanitizerLibArgsForLinux ( const ArgList & Args , <nl> ArgStringList & Arguments , <nl> StringRef Sanitizer , const ToolChain & TC ) { <nl> - <nl> - addLinkRuntimeLibForLinux ( Args , Arguments , <nl> - getSanitizerRuntimeLibNameForLinux ( Sanitizer , TC . getTriple ( ) ) , TC ) ; <nl> - <nl> - / / Code here from https : / / github . com / apple / swift - clang / blob / ab3cbe7 / lib / Driver / Tools . cpp # L3264 - L3276 <nl> - / / There ' s no libpthread or librt on RTEMS . <nl> - if ( TC . getTriple ( ) . getOS ( ) ! = llvm : : Triple : : RTEMS ) { <nl> - Arguments . push_back ( " - lpthread " ) ; <nl> - Arguments . push_back ( " - lrt " ) ; <nl> - } <nl> - Arguments . push_back ( " - lm " ) ; <nl> - / / There ' s no libdl on FreeBSD or RTEMS . <nl> - if ( TC . getTriple ( ) . getOS ( ) ! = llvm : : Triple : : FreeBSD & & <nl> - TC . getTriple ( ) . getOS ( ) ! = llvm : : Triple : : RTEMS ) <nl> - Arguments . push_back ( " - ldl " ) ; <nl> - <nl> + addLinkRuntimeLibForLinux ( Args , Arguments , <nl> + getSanitizerRuntimeLibNameForLinux ( Sanitizer , TC . getTriple ( ) ) , TC ) ; <nl> + <nl> + / / Code taken from <nl> + / / https : / / github . com / apple / swift - clang / blob / ab3cbe7 / lib / Driver / Tools . cpp # L3264 - L3276 <nl> + / / There ' s no libpthread or librt on RTEMS . <nl> + if ( TC . getTriple ( ) . getOS ( ) ! = llvm : : Triple : : RTEMS ) { <nl> + Arguments . push_back ( " - lpthread " ) ; <nl> + Arguments . push_back ( " - lrt " ) ; <nl> + } <nl> + Arguments . push_back ( " - lm " ) ; <nl> + <nl> + / / There ' s no libdl on FreeBSD or RTEMS . <nl> + if ( TC . getTriple ( ) . getOS ( ) ! = llvm : : Triple : : FreeBSD & & <nl> + TC . getTriple ( ) . getOS ( ) ! = llvm : : Triple : : RTEMS ) <nl> + Arguments . push_back ( " - ldl " ) ; <nl> } <nl> <nl> static void <nl>
|
Merge remote - tracking branch ' origin / master ' into master - next
|
apple/swift
|
7349ec9a1a9727e8965e9d3a0f2e28355cfa9d8c
|
2017-08-08T02:08:52Z
|
mmm a / xbmc / TextureDatabase . cpp <nl> ppp b / xbmc / TextureDatabase . cpp <nl> void CTextureDatabase : : CreateAnalytics ( ) <nl> m_pDS - > exec ( " CREATE TRIGGER textureDelete AFTER delete ON texture FOR EACH ROW BEGIN delete from sizes where sizes . idtexture = old . id ; END " ) ; <nl> } <nl> <nl> - bool CTextureDatabase : : UpdateOldVersion ( int version ) <nl> + void CTextureDatabase : : UpdateTables ( int version ) <nl> { <nl> if ( version < 7 ) <nl> { / / update all old thumb : / / foo urls to image : / / foo ? size = thumb <nl> bool CTextureDatabase : : UpdateOldVersion ( int version ) <nl> m_pDS - > exec ( " CREATE TABLE texture ( id integer primary key , url text , cachedurl text , imagehash text , lasthashcheck text ) " ) ; <nl> m_pDS - > exec ( " CREATE TABLE sizes ( idtexture integer , size integer , width integer , height integer , usecount integer , lastusetime text ) " ) ; <nl> } <nl> - return true ; <nl> } <nl> <nl> bool CTextureDatabase : : IncrementUseCount ( const CTextureDetails & details ) <nl> mmm a / xbmc / TextureDatabase . h <nl> ppp b / xbmc / TextureDatabase . h <nl> class CTextureDatabase : public CDatabase , public IDatabaseQueryRuleFactory <nl> <nl> virtual void CreateTables ( ) ; <nl> virtual void CreateAnalytics ( ) ; <nl> - virtual bool UpdateOldVersion ( int version ) ; <nl> + virtual void UpdateTables ( int version ) ; <nl> virtual int GetMinVersion ( ) const { return 13 ; } ; <nl> const char * GetBaseDBName ( ) const { return " Textures " ; } ; <nl> } ; <nl> mmm a / xbmc / addons / AddonDatabase . cpp <nl> ppp b / xbmc / addons / AddonDatabase . cpp <nl> void CAddonDatabase : : CreateAnalytics ( ) <nl> m_pDS - > exec ( " CREATE UNIQUE INDEX idxPackage ON package ( filename ) " ) ; <nl> } <nl> <nl> - bool CAddonDatabase : : UpdateOldVersion ( int version ) <nl> + void CAddonDatabase : : UpdateTables ( int version ) <nl> { <nl> if ( version < 13 ) <nl> { <nl> bool CAddonDatabase : : UpdateOldVersion ( int version ) <nl> { <nl> m_pDS - > exec ( " CREATE TABLE package ( id integer primary key , addonID text , filename text , hash text ) \ n " ) ; <nl> } <nl> - return true ; <nl> } <nl> <nl> int CAddonDatabase : : AddAddon ( const AddonPtr & addon , <nl> mmm a / xbmc / addons / AddonDatabase . h <nl> ppp b / xbmc / addons / AddonDatabase . h <nl> class CAddonDatabase : public CDatabase <nl> protected : <nl> virtual void CreateTables ( ) ; <nl> virtual void CreateAnalytics ( ) ; <nl> - virtual bool UpdateOldVersion ( int version ) ; <nl> + virtual void UpdateTables ( int version ) ; <nl> virtual int GetMinVersion ( ) const { return 16 ; } <nl> const char * GetBaseDBName ( ) const { return " Addons " ; } <nl> <nl> mmm a / xbmc / dbwrappers / Database . cpp <nl> ppp b / xbmc / dbwrappers / Database . cpp <nl> bool CDatabase : : UpdateVersion ( const CStdString & dbName ) <nl> else if ( version < GetMinVersion ( ) ) <nl> { <nl> CLog : : Log ( LOGNOTICE , " Attempting to update the database % s from version % i to % i " , dbName . c_str ( ) , version , GetMinVersion ( ) ) ; <nl> - bool success = false ; <nl> + bool success = true ; <nl> BeginTransaction ( ) ; <nl> try <nl> { <nl> / / drop old analytics , update table ( s ) , recreate analytics , update version <nl> m_pDB - > drop_analytics ( ) ; <nl> - success = UpdateOldVersion ( version ) ; <nl> - if ( success ) <nl> - { <nl> - CreateAnalytics ( ) ; <nl> - success = UpdateVersionNumber ( ) ; <nl> - } <nl> + UpdateTables ( version ) ; <nl> + CreateAnalytics ( ) ; <nl> + UpdateVersionNumber ( ) ; <nl> } <nl> catch ( . . . ) <nl> { <nl> bool CDatabase : : CreateDatabase ( ) <nl> return true ; <nl> } <nl> <nl> - bool CDatabase : : UpdateVersionNumber ( ) <nl> + void CDatabase : : UpdateVersionNumber ( ) <nl> { <nl> CStdString strSQL = PrepareSQL ( " UPDATE version SET idVersion = % i \ n " , GetMinVersion ( ) ) ; <nl> m_pDS - > exec ( strSQL . c_str ( ) ) ; <nl> - return true ; <nl> } <nl> <nl> bool CDatabase : : BuildSQL ( const CStdString & strQuery , const Filter & filter , CStdString & strSQL ) <nl> mmm a / xbmc / dbwrappers / Database . h <nl> ppp b / xbmc / dbwrappers / Database . h <nl> class CDatabase <nl> * / <nl> virtual void CreateAnalytics ( ) = 0 ; <nl> <nl> - virtual bool UpdateOldVersion ( int version ) { return true ; } ; <nl> + / * \ brief Update database tables to the current version . <nl> + Note that analytics ( views , indices , triggers ) are not present during this <nl> + function , so don ' t rely on them . <nl> + * / <nl> + virtual void UpdateTables ( int version ) { } ; <nl> <nl> / * \ brief The minimum schema version that we support updating from . <nl> * / <nl> class CDatabase <nl> private : <nl> void InitSettings ( DatabaseSettings & dbSettings ) ; <nl> bool Connect ( const CStdString & dbName , const DatabaseSettings & db , bool create ) ; <nl> - bool UpdateVersionNumber ( ) ; <nl> + void UpdateVersionNumber ( ) ; <nl> <nl> bool m_bMultiWrite ; / * ! < True if there are any queries in the queue , false otherwise * / <nl> unsigned int m_openCount ; <nl> mmm a / xbmc / epg / EpgDatabase . cpp <nl> ppp b / xbmc / epg / EpgDatabase . cpp <nl> void CEpgDatabase : : CreateAnalytics ( ) <nl> m_pDS - > exec ( " CREATE INDEX idx_epg_iEndTime on epgtags ( iEndTime ) ; " ) ; <nl> } <nl> <nl> - bool CEpgDatabase : : UpdateOldVersion ( int iVersion ) <nl> + void CEpgDatabase : : UpdateTables ( int iVersion ) <nl> { <nl> - bool bReturn = true ; <nl> - <nl> - BeginTransaction ( ) ; <nl> - <nl> - try <nl> - { <nl> - if ( iVersion < 5 ) <nl> - m_pDS - > exec ( " ALTER TABLE epgtags ADD sGenre varchar ( 128 ) ; " ) ; <nl> - } <nl> - catch ( . . . ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " Error attempting to update the database version ! " ) ; <nl> - bReturn = false ; <nl> - } <nl> - <nl> - if ( bReturn ) <nl> - CommitTransaction ( ) ; <nl> - else <nl> - RollbackTransaction ( ) ; <nl> - <nl> - return bReturn ; <nl> + if ( iVersion < 5 ) <nl> + m_pDS - > exec ( " ALTER TABLE epgtags ADD sGenre varchar ( 128 ) ; " ) ; <nl> } <nl> <nl> bool CEpgDatabase : : DeleteEpg ( void ) <nl> mmm a / xbmc / epg / EpgDatabase . h <nl> ppp b / xbmc / epg / EpgDatabase . h <nl> namespace EPG <nl> / * ! <nl> * @ brief Update an old version of the database . <nl> * @ param version The version to update the database from . <nl> - * @ return True if it was updated successfully , false otherwise . <nl> * / <nl> - virtual bool UpdateOldVersion ( int version ) ; <nl> + virtual void UpdateTables ( int version ) ; <nl> virtual int GetMinSchemaVersion ( ) const { return 4 ; } <nl> } ; <nl> } <nl> mmm a / xbmc / music / MusicDatabase . cpp <nl> ppp b / xbmc / music / MusicDatabase . cpp <nl> bool CMusicDatabase : : GetSongsNav ( const CStdString & strBaseDir , CFileItemList & it <nl> return GetSongsByWhere ( musicUrl . ToString ( ) , filter , items , sortDescription ) ; <nl> } <nl> <nl> - bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> + void CMusicDatabase : : UpdateTables ( int version ) <nl> { <nl> if ( version < 16 ) <nl> { <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> strSQL = PrepareSQL ( " SELECT album . idAlbum AS idAlbum , strExtraArtists , " <nl> " album . idArtist AS idArtist , strArtist FROM album " <nl> " LEFT OUTER JOIN artist ON album . idArtist = artist . idArtist " ) ; <nl> - if ( ! m_pDS - > query ( strSQL . c_str ( ) ) ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " % s could not upgrade albums table " , __FUNCTION__ ) ; <nl> - return false ; <nl> - } <nl> + m_pDS - > query ( strSQL . c_str ( ) ) ; <nl> <nl> VECALBUMS albums ; <nl> while ( ! m_pDS - > eof ( ) ) <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> strSQL = PrepareSQL ( " SELECT song . idSong AS idSong , strExtraArtists , " <nl> " song . idArtist AS idArtist , strArtist FROM song " <nl> " LEFT OUTER JOIN artist ON song . idArtist = artist . idArtist " ) ; <nl> - if ( ! m_pDS - > query ( strSQL . c_str ( ) ) ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " % s could not upgrade songs table " , __FUNCTION__ ) ; <nl> - return false ; <nl> - } <nl> + m_pDS - > query ( strSQL . c_str ( ) ) ; <nl> <nl> VECSONGS songs ; <nl> while ( ! m_pDS - > eof ( ) ) <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> strSQL = PrepareSQL ( " SELECT album . idAlbum AS idAlbum , strExtraGenres , " <nl> " album . idGenre AS idGenre , strGenre FROM album " <nl> " JOIN genre ON album . idGenre = genre . idGenre " ) ; <nl> - if ( ! m_pDS - > query ( strSQL . c_str ( ) ) ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " % s could not upgrade albums table " , __FUNCTION__ ) ; <nl> - return false ; <nl> - } <nl> + m_pDS - > query ( strSQL . c_str ( ) ) ; <nl> <nl> VECALBUMS albums ; <nl> while ( ! m_pDS - > eof ( ) ) <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> strSQL = PrepareSQL ( " SELECT song . idSong AS idSong , strExtraGenres , " <nl> " song . idGenre AS idGenre , strGenre FROM song " <nl> " JOIN genre ON song . idGenre = genre . idGenre " ) ; <nl> - if ( ! m_pDS - > query ( strSQL . c_str ( ) ) ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " % s could not upgrade songs table " , __FUNCTION__ ) ; <nl> - return false ; <nl> - } <nl> + m_pDS - > query ( strSQL . c_str ( ) ) ; <nl> <nl> VECSONGS songs ; <nl> while ( ! m_pDS - > eof ( ) ) <nl> bool CMusicDatabase : : UpdateOldVersion ( int version ) <nl> m_pDS - > exec ( " UPDATE song_artist SET strJoinPhrase = ' ' WHERE 100 * idSong + iOrder IN ( SELECT id FROM ( SELECT 100 * idSong + max ( iOrder ) AS id FROM song_artist GROUP BY idSong ) AS sub ) " ) ; <nl> m_pDS - > exec ( " UPDATE album_artist SET strJoinPhrase = ' ' WHERE 100 * idAlbum + iOrder IN ( SELECT id FROM ( SELECT 100 * idAlbum + max ( iOrder ) AS id FROM album_artist GROUP BY idAlbum ) AS sub ) " ) ; <nl> } <nl> - return true ; <nl> } <nl> <nl> int CMusicDatabase : : GetMinVersion ( ) const <nl> mmm a / xbmc / music / MusicDatabase . h <nl> ppp b / xbmc / music / MusicDatabase . h <nl> class CMusicDatabase : public CDatabase <nl> bool CleanupAlbums ( ) ; <nl> bool CleanupArtists ( ) ; <nl> bool CleanupGenres ( ) ; <nl> - virtual bool UpdateOldVersion ( int version ) ; <nl> + virtual void UpdateTables ( int version ) ; <nl> bool SearchArtists ( const CStdString & search , CFileItemList & artists ) ; <nl> bool SearchAlbums ( const CStdString & search , CFileItemList & albums ) ; <nl> bool SearchSongs ( const CStdString & strSearch , CFileItemList & songs ) ; <nl> mmm a / xbmc / pvr / PVRDatabase . cpp <nl> ppp b / xbmc / pvr / PVRDatabase . cpp <nl> void CPVRDatabase : : CreateAnalytics ( ) <nl> m_pDS - > exec ( " CREATE UNIQUE INDEX idx_idGroup_idChannel on map_channelgroups_channels ( idGroup , idChannel ) ; " ) ; <nl> } <nl> <nl> - bool CPVRDatabase : : UpdateOldVersion ( int iVersion ) <nl> + void CPVRDatabase : : UpdateTables ( int iVersion ) <nl> { <nl> - bool bReturn = true ; <nl> + if ( iVersion < 13 ) <nl> + m_pDS - > exec ( " ALTER TABLE channels ADD idEpg integer ; " ) ; <nl> <nl> - BeginTransaction ( ) ; <nl> + if ( iVersion < 14 ) <nl> + m_pDS - > exec ( " ALTER TABLE channelsettings ADD fCustomVerticalShift float ; " ) ; <nl> <nl> - try <nl> + if ( iVersion < 15 ) <nl> + { <nl> + m_pDS - > exec ( " ALTER TABLE channelsettings ADD bCustomNonLinStretch bool ; " ) ; <nl> + m_pDS - > exec ( " ALTER TABLE channelsettings ADD bPostProcess bool ; " ) ; <nl> + m_pDS - > exec ( " ALTER TABLE channelsettings ADD iScalingMethod integer ; " ) ; <nl> + } <nl> + if ( iVersion < 16 ) <nl> + { <nl> + / * sqlite apparently can ' t delete columns from an existing table , so just leave the extra column alone * / <nl> + } <nl> + if ( iVersion < 17 ) <nl> + { <nl> + m_pDS - > exec ( " ALTER TABLE channelsettings ADD iDeinterlaceMode integer " ) ; <nl> + m_pDS - > exec ( " UPDATE channelsettings SET iDeinterlaceMode = 2 WHERE iInterlaceMethod NOT IN ( 0 , 1 ) " ) ; / / anything other than none : method auto = > mode force <nl> + m_pDS - > exec ( " UPDATE channelsettings SET iDeinterlaceMode = 1 WHERE iInterlaceMethod = 1 " ) ; / / method auto = > mode auto <nl> + m_pDS - > exec ( " UPDATE channelsettings SET iDeinterlaceMode = 0 , iInterlaceMethod = 1 WHERE iInterlaceMethod = 0 " ) ; / / method none = > mode off , method auto <nl> + } <nl> + if ( iVersion < 19 ) <nl> { <nl> + / / bit of a hack , but we need to keep the version / contents of the non - pvr databases the same to allow clean upgrades <nl> + ADDON : : VECADDONS addons ; <nl> + if ( ! CAddonMgr : : Get ( ) . GetAddons ( ADDON_PVRDLL , addons , true ) ) <nl> + CLog : : Log ( LOGERROR , " PVR - % s - failed to get add - ons from the add - on manager " , __FUNCTION__ ) ; <nl> + else <nl> { <nl> - if ( iVersion < 13 ) <nl> - m_pDS - > exec ( " ALTER TABLE channels ADD idEpg integer ; " ) ; <nl> - <nl> - if ( iVersion < 14 ) <nl> - m_pDS - > exec ( " ALTER TABLE channelsettings ADD fCustomVerticalShift float ; " ) ; <nl> - <nl> - if ( iVersion < 15 ) <nl> - { <nl> - m_pDS - > exec ( " ALTER TABLE channelsettings ADD bCustomNonLinStretch bool ; " ) ; <nl> - m_pDS - > exec ( " ALTER TABLE channelsettings ADD bPostProcess bool ; " ) ; <nl> - m_pDS - > exec ( " ALTER TABLE channelsettings ADD iScalingMethod integer ; " ) ; <nl> - } <nl> - if ( iVersion < 16 ) <nl> - { <nl> - / * sqlite apparently can ' t delete columns from an existing table , so just leave the extra column alone * / <nl> - } <nl> - if ( iVersion < 17 ) <nl> + CAddonDatabase database ; <nl> + database . Open ( ) ; <nl> + for ( IVECADDONS it = addons . begin ( ) ; it ! = addons . end ( ) ; it + + ) <nl> { <nl> - m_pDS - > exec ( " ALTER TABLE channelsettings ADD iDeinterlaceMode integer " ) ; <nl> - m_pDS - > exec ( " UPDATE channelsettings SET iDeinterlaceMode = 2 WHERE iInterlaceMethod NOT IN ( 0 , 1 ) " ) ; / / anything other than none : method auto = > mode force <nl> - m_pDS - > exec ( " UPDATE channelsettings SET iDeinterlaceMode = 1 WHERE iInterlaceMethod = 1 " ) ; / / method auto = > mode auto <nl> - m_pDS - > exec ( " UPDATE channelsettings SET iDeinterlaceMode = 0 , iInterlaceMethod = 1 WHERE iInterlaceMethod = 0 " ) ; / / method none = > mode off , method auto <nl> + if ( ! database . IsSystemPVRAddonEnabled ( it - > get ( ) - > ID ( ) ) ) <nl> + CAddonMgr : : Get ( ) . DisableAddon ( it - > get ( ) - > ID ( ) ) ; <nl> } <nl> - if ( iVersion < 19 ) <nl> - { <nl> - / / bit of a hack , but we need to keep the version / contents of the non - pvr databases the same to allow clean upgrades <nl> - ADDON : : VECADDONS addons ; <nl> - if ( ( bReturn = CAddonMgr : : Get ( ) . GetAddons ( ADDON_PVRDLL , addons , true ) ) = = false ) <nl> - CLog : : Log ( LOGERROR , " PVR - % s - failed to get add - ons from the add - on manager " , __FUNCTION__ ) ; <nl> - else <nl> - { <nl> - CAddonDatabase database ; <nl> - database . Open ( ) ; <nl> - for ( IVECADDONS it = addons . begin ( ) ; it ! = addons . end ( ) ; it + + ) <nl> - { <nl> - if ( ! database . IsSystemPVRAddonEnabled ( it - > get ( ) - > ID ( ) ) ) <nl> - CAddonMgr : : Get ( ) . DisableAddon ( it - > get ( ) - > ID ( ) ) ; <nl> - } <nl> - database . Close ( ) ; <nl> - } <nl> - } <nl> - if ( iVersion < 20 ) <nl> - m_pDS - > exec ( " ALTER TABLE channels ADD bIsUserSetIcon bool " ) ; <nl> - <nl> - if ( iVersion < 21 ) <nl> - m_pDS - > exec ( " ALTER TABLE channelgroups ADD iGroupType integer " ) ; <nl> - <nl> - if ( iVersion < 22 ) <nl> - m_pDS - > exec ( " ALTER TABLE channels ADD bIsLocked bool " ) ; <nl> + database . Close ( ) ; <nl> } <nl> } <nl> - catch ( . . . ) <nl> - { <nl> - CLog : : Log ( LOGERROR , " PVR - % s - error attempting to update the database version ! " , __FUNCTION__ ) ; <nl> - bReturn = false ; <nl> - } <nl> + if ( iVersion < 20 ) <nl> + m_pDS - > exec ( " ALTER TABLE channels ADD bIsUserSetIcon bool " ) ; <nl> <nl> - if ( bReturn ) <nl> - CommitTransaction ( ) ; <nl> - else <nl> - RollbackTransaction ( ) ; <nl> + if ( iVersion < 21 ) <nl> + m_pDS - > exec ( " ALTER TABLE channelgroups ADD iGroupType integer " ) ; <nl> <nl> - return bReturn ; <nl> + if ( iVersion < 22 ) <nl> + m_pDS - > exec ( " ALTER TABLE channels ADD bIsLocked bool " ) ; <nl> } <nl> <nl> int CPVRDatabase : : GetLastChannelId ( void ) <nl> mmm a / xbmc / pvr / PVRDatabase . h <nl> ppp b / xbmc / pvr / PVRDatabase . h <nl> namespace PVR <nl> / * ! <nl> * @ brief Update an old version of the database . <nl> * @ param version The version to update the database from . <nl> - * @ return True if it was updated successfully , false otherwise . <nl> * / <nl> - bool UpdateOldVersion ( int version ) ; <nl> + void UpdateTables ( int version ) ; <nl> int GetMinSchemaVersion ( ) { return 11 ; } <nl> <nl> bool PersistGroupMembers ( CPVRChannelGroup & group ) ; <nl> mmm a / xbmc / video / VideoDatabase . cpp <nl> ppp b / xbmc / video / VideoDatabase . cpp <nl> class CArtItem <nl> string media_type ; <nl> } ; <nl> <nl> - bool CVideoDatabase : : UpdateOldVersion ( int iVersion ) <nl> + void CVideoDatabase : : UpdateTables ( int iVersion ) <nl> { <nl> if ( iVersion < 43 ) <nl> { <nl> bool CVideoDatabase : : UpdateOldVersion ( int iVersion ) <nl> } <nl> if ( iVersion < 77 ) <nl> m_pDS - > exec ( " ALTER TABLE streamdetails ADD strStereoMode text " ) ; <nl> - <nl> - return true ; <nl> } <nl> <nl> int CVideoDatabase : : GetMinVersion ( ) const <nl> mmm a / xbmc / video / VideoDatabase . h <nl> ppp b / xbmc / video / VideoDatabase . h <nl> class CVideoDatabase : public CDatabase <nl> private : <nl> virtual void CreateTables ( ) ; <nl> virtual void CreateAnalytics ( ) ; <nl> - virtual bool UpdateOldVersion ( int version ) ; <nl> + virtual void UpdateTables ( int version ) ; <nl> <nl> / * ! \ brief ( Re ) Create the generic database views for movies , tvshows , <nl> episodes and music videos <nl> mmm a / xbmc / view / ViewDatabase . cpp <nl> ppp b / xbmc / view / ViewDatabase . cpp <nl> void CViewDatabase : : CreateAnalytics ( ) <nl> m_pDS - > exec ( " CREATE INDEX idxViewsWindow ON view ( window ) " ) ; <nl> } <nl> <nl> - bool CViewDatabase : : UpdateOldVersion ( int version ) <nl> + void CViewDatabase : : UpdateTables ( int version ) <nl> { <nl> if ( version < 4 ) <nl> m_pDS - > exec ( " alter table view add skin text " ) ; <nl> bool CViewDatabase : : UpdateOldVersion ( int version ) <nl> } <nl> m_pDS - > exec ( " DROP TABLE tmp_view " ) ; <nl> } <nl> - <nl> - return true ; <nl> } <nl> <nl> bool CViewDatabase : : GetViewState ( const CStdString & path , int window , CViewState & state , const CStdString & skin ) <nl> mmm a / xbmc / view / ViewDatabase . h <nl> ppp b / xbmc / view / ViewDatabase . h <nl> class CViewDatabase : public CDatabase <nl> protected : <nl> virtual void CreateTables ( ) ; <nl> virtual void CreateAnalytics ( ) ; <nl> - virtual bool UpdateOldVersion ( int version ) ; <nl> + virtual void UpdateTables ( int version ) ; <nl> virtual int GetMinVersion ( ) const { return 6 ; } ; <nl> const char * GetBaseDBName ( ) const { return " ViewModes " ; } ; <nl> } ; <nl>
|
[ cosmetics ] rename UpdateOldVersion to UpdateTables for consistency . Drop unneeded try / catch and transaction blocks , make this and UpdateVersionNumber return
|
xbmc/xbmc
|
f576c8da2420103de115cc09ea7bf7bc15c79205
|
2014-02-05T20:27:33Z
|
mmm a / hphp / hhbbc / interp - minstr . cpp <nl> ppp b / hphp / hhbbc / interp - minstr . cpp <nl> void miFinalSetElem ( MIS & env ) { <nl> <nl> / * <nl> * In some unusual cases with illegal keys , SetM pushes null <nl> - * instead of the right hand side . If the base is a string , it <nl> - * pushes a new string with the value of the first character of <nl> - * the right hand side converted to a string ( or something like <nl> - * that ) , so for now we ' re leaving out string bases too . <nl> + * instead of the right hand side . <nl> + * <nl> + * There are also some special cases for SetM for different base types : <nl> + * 1 . If the base is a string , SetM pushes a new string with the <nl> + * value of the first character of the right hand side converted <nl> + * to a string ( or something like that ) . <nl> + * 2 . If the base is a primitive type , SetM pushes null . <nl> + * 3 . If the base is an object , and it does not implement ArrayAccess , <nl> + * it is still ok to push the right hand side , because it is a <nl> + * fatal . <nl> + * <nl> + * We push the right hand side on the stack only if the base is an <nl> + * array , object or emptyish . <nl> * / <nl> - auto const isWeird = env . base . type . couldBe ( TStr ) | | <nl> - key . couldBe ( TObj ) | | <nl> - key . couldBe ( TArr ) ; <nl> + auto const isWeird = key . couldBe ( TObj ) | | <nl> + key . couldBe ( TArr ) | | <nl> + ( ! env . base . type . subtypeOf ( TArr ) & & <nl> + ! env . base . type . subtypeOf ( TObj ) & & <nl> + ! mustBeEmptyish ( env . base . type ) ) ; <nl> <nl> if ( mustBeInFrame ( env . base ) & & env . base . type . subtypeOf ( TArr ) ) { <nl> env . base . type = array_set ( env . base . type , key , t1 ) ; <nl>
|
Fix hhhbc for SetElem with non - array bases .
|
facebook/hhvm
|
5d60c6389d28664d47db16ce551f9798e3fd5bcb
|
2014-06-24T21:45:07Z
|
mmm a / lib / Serialization / Serialization . cpp <nl> ppp b / lib / Serialization / Serialization . cpp <nl> static void writeGroupNames ( const comment_block : : GroupNamesLayout & GroupNames , <nl> Writer . write < uint32_t > ( N . size ( ) ) ; <nl> BlobStream < < N ; <nl> } <nl> - BlobStream . str ( ) ; <nl> SmallVector < uint64_t , 8 > Scratch ; <nl> - GroupNames . emit ( Scratch , Blob ) ; <nl> + GroupNames . emit ( Scratch , BlobStream . str ( ) ) ; <nl> } <nl> <nl> static void writeDeclCommentTable ( <nl>
|
Serialization : avoid flushing via a stringification
|
apple/swift
|
1b48e5bce34939e3c904ab573a3fd2743cb04664
|
2017-02-06T05:27:16Z
|
mmm a / doc / classes / Animation . xml <nl> ppp b / doc / classes / Animation . xml <nl> <nl> < argument index = " 1 " name = " to_animation " type = " Animation " > <nl> < / argument > <nl> < description > <nl> + Adds a new track that is a copy of the given track from [ code ] to_animation [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " find_track " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " idx " type = " int " > <nl> < / argument > <nl> < description > <nl> + Returns [ code ] true [ / code ] if the track at index [ code ] idx [ / code ] is enabled . <nl> < / description > <nl> < / method > <nl> < method name = " track_is_imported " qualifiers = " const " > <nl> <nl> < argument index = " 1 " name = " enabled " type = " bool " > <nl> < / argument > <nl> < description > <nl> + Enables / disables the given track . Tracks are enabled by default . <nl> < / description > <nl> < / method > <nl> < method name = " track_set_imported " > <nl> mmm a / doc / classes / CanvasItem . xml <nl> ppp b / doc / classes / CanvasItem . xml <nl> <nl> < argument index = " 4 " name = " modulate " type = " Color " default = " Color ( 1 , 1 , 1 , 1 ) " > <nl> < / argument > <nl> < description > <nl> - Draw a string character using a custom font . Returns the advance , depending on the char width and kerning with an optional next char . <nl> + Draws a string character using a custom font . Returns the advance , depending on the char width and kerning with an optional next char . <nl> < / description > <nl> < / method > <nl> < method name = " draw_circle " > <nl> <nl> < argument index = " 2 " name = " color " type = " Color " > <nl> < / argument > <nl> < description > <nl> - Draw a colored circle . <nl> + Draws a colored circle . <nl> < / description > <nl> < / method > <nl> < method name = " draw_colored_polygon " > <nl> <nl> < argument index = " 5 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> - Draw a colored polygon of any amount of points , convex or concave . <nl> + Draws a colored polygon of any amount of points , convex or concave . <nl> < / description > <nl> < / method > <nl> < method name = " draw_line " > <nl> <nl> < argument index = " 4 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> - Draw a line from a 2D point to another , with a given color and width . It can be optionally antialiased . <nl> + Draws a line from a 2D point to another , with a given color and width . It can be optionally antialiased . <nl> < / description > <nl> < / method > <nl> < method name = " draw_multiline " > <nl> <nl> < argument index = " 3 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> + Draws multiple , parallel lines with a uniform [ code ] color [ / code ] and [ code ] width [ / code ] and optional antialiasing . <nl> < / description > <nl> < / method > <nl> < method name = " draw_multiline_colors " > <nl> <nl> < argument index = " 3 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> + Draws multiple , parallel lines with a uniform [ code ] width [ / code ] , segment - by - segment coloring , and optional antialiasing . Colors assigned to line segments match by index between [ code ] points [ / code ] and [ code ] colors [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " draw_polygon " > <nl> <nl> < argument index = " 5 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> - Draw a polygon of any amount of points , convex or concave . <nl> + Draws a polygon of any amount of points , convex or concave . <nl> < / description > <nl> < / method > <nl> < method name = " draw_polyline " > <nl> <nl> < argument index = " 3 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> - Draw a polyline with a uniform [ code ] color [ / code ] and [ code ] width [ / code ] and optional antialiasing . <nl> + Draws interconnected line segments with a uniform [ code ] color [ / code ] and [ code ] width [ / code ] and optional antialiasing . <nl> < / description > <nl> < / method > <nl> < method name = " draw_polyline_colors " > <nl> <nl> < argument index = " 3 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> - Draw a polyline with a uniform [ code ] width [ / code ] , segment - by - segment coloring , and optional antialiasing . Colors assigned to line segments match by index between [ code ] points [ / code ] and [ code ] colors [ / code ] . <nl> + Draws interconnected line segments with a uniform [ code ] width [ / code ] , segment - by - segment coloring , and optional antialiasing . Colors assigned to line segments match by index between [ code ] points [ / code ] and [ code ] colors [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " draw_primitive " > <nl> <nl> < argument index = " 5 " name = " normal_map " type = " Texture " default = " null " > <nl> < / argument > <nl> < description > <nl> - Draw a custom primitive , 1 point for a point , 2 points for a line , 3 points for a triangle and 4 points for a quad . <nl> + Draws a custom primitive , 1 point for a point , 2 points for a line , 3 points for a triangle and 4 points for a quad . <nl> < / description > <nl> < / method > <nl> < method name = " draw_rect " > <nl> <nl> < argument index = " 2 " name = " filled " type = " bool " default = " true " > <nl> < / argument > <nl> < description > <nl> - Draw a colored rectangle . <nl> + Draws a colored rectangle . <nl> < / description > <nl> < / method > <nl> < method name = " draw_set_transform " > <nl> <nl> < argument index = " 4 " name = " clip_w " type = " int " default = " - 1 " > <nl> < / argument > <nl> < description > <nl> - Draw a string using a custom font . <nl> + Draws a string using a custom font . <nl> < / description > <nl> < / method > <nl> < method name = " draw_style_box " > <nl> <nl> < argument index = " 1 " name = " rect " type = " Rect2 " > <nl> < / argument > <nl> < description > <nl> - Draw a styled rectangle . <nl> + Draws a styled rectangle . <nl> < / description > <nl> < / method > <nl> < method name = " draw_texture " > <nl> <nl> < argument index = " 3 " name = " normal_map " type = " Texture " default = " null " > <nl> < / argument > <nl> < description > <nl> - Draw a texture at a given position . <nl> + Draws a texture at a given position . <nl> < / description > <nl> < / method > <nl> < method name = " draw_texture_rect " > <nl> <nl> < argument index = " 5 " name = " normal_map " type = " Texture " default = " null " > <nl> < / argument > <nl> < description > <nl> - Draw a textured rectangle at a given position , optionally modulated by a color . Transpose swaps the x and y coordinates when reading the texture . <nl> + Draws a textured rectangle at a given position , optionally modulated by a color . Transpose swaps the x and y coordinates when reading the texture . <nl> < / description > <nl> < / method > <nl> < method name = " draw_texture_rect_region " > <nl> <nl> < argument index = " 6 " name = " clip_uv " type = " bool " default = " true " > <nl> < / argument > <nl> < description > <nl> - Draw a textured rectangle region at a given position , optionally modulated by a color . Transpose swaps the x and y coordinates when reading the texture . <nl> + Draws a textured rectangle region at a given position , optionally modulated by a color . Transpose swaps the x and y coordinates when reading the texture . <nl> < / description > <nl> < / method > <nl> < method name = " get_canvas " qualifiers = " const " > <nl> mmm a / doc / classes / Curve . xml <nl> ppp b / doc / classes / Curve . xml <nl> <nl> < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> < class name = " Curve " inherits = " Resource " category = " Core " version = " 3 . 0 - beta " > <nl> < brief_description > <nl> + A mathematic curve . <nl> < / brief_description > <nl> < description > <nl> + A curve that can be saved and re - used for other objects . By default it ranges between [ code ] 0 [ / code ] and [ code ] 1 [ / code ] on the y - axis and positions points relative to the [ code ] 0 . 5 [ / code ] y - position . <nl> < / description > <nl> < tutorials > <nl> < / tutorials > <nl> <nl> < argument index = " 4 " name = " right_mode " type = " int " enum = " Curve . TangentMode " default = " 0 " > <nl> < / argument > <nl> < description > <nl> + Adds a point to the curve . For each side , if the [ code ] * _mode [ / code ] is [ code ] TANGENT_LINEAR [ / code ] , the [ code ] * _tangent [ / code ] angle ( in degrees ) uses the slope of the curve halfway to the adjacent point . Allows custom assignments to the [ code ] * _tangent [ / code ] angle if [ code ] * _mode [ / code ] is set to [ code ] TANGENT_FREE [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " bake " > <nl> < return type = " void " > <nl> < / return > <nl> < description > <nl> + Recomputes the baked cache of points for the curve . <nl> < / description > <nl> < / method > <nl> < method name = " clean_dupes " > <nl> < return type = " void " > <nl> < / return > <nl> < description > <nl> + Removes points that are closer than [ code ] CMP_EPSILON [ / code ] ( 0 . 00001 ) units to their neighbor on the curve . <nl> < / description > <nl> < / method > <nl> < method name = " clear_points " > <nl> < return type = " void " > <nl> < / return > <nl> < description > <nl> + Removes all points from the curve . <nl> < / description > <nl> < / method > <nl> < method name = " get_point_left_mode " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " index " type = " int " > <nl> < / argument > <nl> < description > <nl> + Returns the left [ code ] TangentMode [ / code ] for the point at [ code ] index [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " get_point_left_tangent " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " index " type = " int " > <nl> < / argument > <nl> < description > <nl> + Returns the left tangent angle ( in degrees ) for the point at [ code ] index [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " get_point_position " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " index " type = " int " > <nl> < / argument > <nl> < description > <nl> + Returns the curve coordinates for the point at [ code ] index [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " get_point_right_mode " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " index " type = " int " > <nl> < / argument > <nl> < description > <nl> + Returns the right [ code ] TangentMode [ / code ] for the point at [ code ] index [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " get_point_right_tangent " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " index " type = " int " > <nl> < / argument > <nl> < description > <nl> + Returns the right tangent angle ( in degrees ) for the point at [ code ] index [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " interpolate " qualifiers = " const " > <nl> <nl> < argument index = " 0 " name = " offset " type = " float " > <nl> < / argument > <nl> < description > <nl> + Returns the y value for the point that would exist at x - position [ code ] offset [ / code ] along the curve . <nl> < / description > <nl> < / method > <nl> < method name = " interpolate_baked " > <nl> < return type = " float " > <nl> < / return > <nl> < argument index = " 0 " name = " offset " type = " float " > <nl> + Returns the y value for the point that would exist at x - position [ code ] offset [ / code ] along the curve using the baked cache . Bakes the curve ' s points if not already baked . <nl> < / argument > <nl> < description > <nl> < / description > <nl> <nl> < argument index = " 0 " name = " index " type = " int " > <nl> < / argument > <nl> < description > <nl> + Removes the point at [ code ] index [ / code ] from the curve . <nl> < / description > <nl> < / method > <nl> < method name = " set_point_left_mode " > <nl> <nl> < argument index = " 1 " name = " mode " type = " int " enum = " Curve . TangentMode " > <nl> < / argument > <nl> < description > <nl> + Sets the left [ code ] TangentMode [ / code ] for the point at [ code ] index [ / code ] to [ code ] mode [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " set_point_left_tangent " > <nl> <nl> < argument index = " 1 " name = " tangent " type = " float " > <nl> < / argument > <nl> < description > <nl> + Sets the left tangent angle for the point at [ code ] index [ / code ] to [ code ] tangent [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " set_point_offset " > <nl> <nl> < argument index = " 1 " name = " offset " type = " float " > <nl> < / argument > <nl> < description > <nl> + Sets the offset from [ code ] 0 . 5 [ / code ] <nl> < / description > <nl> < / method > <nl> < method name = " set_point_right_mode " > <nl> <nl> < argument index = " 1 " name = " mode " type = " int " enum = " Curve . TangentMode " > <nl> < / argument > <nl> < description > <nl> + Sets the right [ code ] TangentMode [ / code ] for the point at [ code ] index [ / code ] to [ code ] mode [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " set_point_right_tangent " > <nl> <nl> < argument index = " 1 " name = " tangent " type = " float " > <nl> < / argument > <nl> < description > <nl> + Sets the right tangent angle for the point at [ code ] index [ / code ] to [ code ] tangent [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " set_point_value " > <nl> <nl> < argument index = " 1 " name = " y " type = " float " > <nl> < / argument > <nl> < description > <nl> + Assigns the vertical position [ code ] y [ / code ] to the point at [ code ] index [ / code ] . <nl> < / description > <nl> < / method > <nl> < / methods > <nl> < members > <nl> < member name = " bake_resolution " type = " int " setter = " set_bake_resolution " getter = " get_bake_resolution " > <nl> + The number of points to include in the baked ( i . e . cached ) curve data . <nl> < / member > <nl> < member name = " max_value " type = " float " setter = " set_max_value " getter = " get_max_value " > <nl> + The maximum value the curve can reach . Default value : [ code ] 1 [ / code ] . <nl> < / member > <nl> < member name = " min_value " type = " float " setter = " set_min_value " getter = " get_min_value " > <nl> + The minimum value the curve can reach . Default value : [ code ] 0 [ / code ] . <nl> < / member > <nl> < / members > <nl> < signals > <nl> < signal name = " range_changed " > <nl> < description > <nl> + Emitted when [ member max_value ] or [ member min_value ] is changed . <nl> < / description > <nl> < / signal > <nl> < / signals > <nl> < constants > <nl> < constant name = " TANGENT_FREE " value = " 0 " enum = " TangentMode " > <nl> + The tangent on this side of the point is user - defined . <nl> < / constant > <nl> < constant name = " TANGENT_LINEAR " value = " 1 " enum = " TangentMode " > <nl> + The curve calculates the tangent on this side of the point as the slope halfway towards the adjacent point . <nl> < / constant > <nl> < constant name = " TANGENT_MODE_COUNT " value = " 2 " enum = " TangentMode " > <nl> + The total number of available tangent modes . <nl> < / constant > <nl> < / constants > <nl> < / class > <nl> mmm a / doc / classes / Curve3D . xml <nl> ppp b / doc / classes / Curve3D . xml <nl> <nl> < return type = " void " > <nl> < / return > <nl> < description > <nl> + Removes all points from the curve . <nl> < / description > <nl> < / method > <nl> < method name = " get_baked_length " qualifiers = " const " > <nl> <nl> < / methods > <nl> < members > <nl> < member name = " bake_interval " type = " float " setter = " set_bake_interval " getter = " get_bake_interval " > <nl> + The distance in meters between two adjacent cached points . Changing it forces the cache to be recomputed the next time the [ method get_baked_points ] or [ method get_baked_length ] function is called . The smaller the distance , the more points in the cache and the more memory it will consume , so use with care . <nl> < / member > <nl> < / members > <nl> < constants > <nl>
|
Merge pull request from willnationsdev / docs4
|
godotengine/godot
|
01c04d611fec2a875332f23a5cf99440712b2f6d
|
2017-12-15T10:44:13Z
|
mmm a / src / runtime / base / builtin_functions . cpp <nl> ppp b / src / runtime / base / builtin_functions . cpp <nl> Variant unserialize_ex ( CStrRef str , VariableUnserializer : : Type type ) { <nl> return false ; <nl> } <nl> <nl> - istringstream in ( std : : string ( str . data ( ) , str . size ( ) ) ) ; <nl> - VariableUnserializer vu ( in , type ) ; <nl> + VariableUnserializer vu ( str . data ( ) , str . size ( ) , type ) ; <nl> Variant v ; <nl> try { <nl> v = vu . unserialize ( ) ; <nl> mmm a / src / runtime / base / class_info . cpp <nl> ppp b / src / runtime / base / class_info . cpp <nl> ClassInfoUnique : : ClassInfoUnique ( const char * * & p ) { <nl> staticVariable - > name = * p + + ; <nl> staticVariable - > valueLen = ( int64 ) ( * p + + ) ; <nl> staticVariable - > valueText = * p + + ; <nl> - istringstream in ( std : : string ( staticVariable - > valueText , <nl> - staticVariable - > valueLen ) ) ; <nl> - VariableUnserializer vu ( in , VariableUnserializer : : Serialize ) ; <nl> + VariableUnserializer vu ( staticVariable - > valueText , <nl> + staticVariable - > valueLen , <nl> + VariableUnserializer : : Serialize ) ; <nl> try { <nl> Variant v = vu . unserialize ( ) ; <nl> v . setStatic ( ) ; <nl> ClassInfoUnique : : ClassInfoUnique ( const char * * & p ) { <nl> constant - > valueText = * p + + ; <nl> <nl> if ( constant - > valueText ) { <nl> - istringstream in ( std : : string ( constant - > valueText , constant - > valueLen ) ) ; <nl> - VariableUnserializer vu ( in , VariableUnserializer : : Serialize ) ; <nl> + VariableUnserializer vu ( constant - > valueText , <nl> + constant - > valueLen , <nl> + VariableUnserializer : : Serialize ) ; <nl> try { <nl> Variant v = vu . unserialize ( ) ; <nl> v . setStatic ( ) ; <nl> mmm a / src / runtime / base / type_array . cpp <nl> ppp b / src / runtime / base / type_array . cpp <nl> void Array : : serialize ( VariableSerializer * serializer ) const { <nl> } <nl> } <nl> <nl> - void Array : : unserialize ( VariableUnserializer * unserializer ) { <nl> - std : : istream & in = unserializer - > in ( ) ; <nl> - int64 size ; <nl> - char sep ; <nl> - in > > size > > sep ; <nl> + void Array : : unserialize ( VariableUnserializer * uns ) { <nl> + int64 size = uns - > readInt ( ) ; <nl> + char sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' : ' ) { <nl> throw Exception ( " Expected ' : ' but got ' % c ' " , sep ) ; <nl> } <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' { ' ) { <nl> throw Exception ( " Expected ' { ' but got ' % c ' " , sep ) ; <nl> } <nl> void Array : : unserialize ( VariableUnserializer * unserializer ) { <nl> / / the middle , which breaks references . <nl> operator = ( ArrayInit ( size ) . create ( ) ) ; <nl> for ( int64 i = 0 ; i < size ; i + + ) { <nl> - Variant key ( unserializer - > unserializeKey ( ) ) ; <nl> + Variant key ( uns - > unserializeKey ( ) ) ; <nl> Variant & value = <nl> key . isString ( ) ? addLval ( key . toString ( ) , true ) <nl> : addLval ( key ) ; <nl> - value . unserialize ( unserializer ) ; <nl> + value . unserialize ( uns ) ; <nl> } <nl> } <nl> <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' } ' ) { <nl> throw Exception ( " Expected ' } ' but got ' % c ' " , sep ) ; <nl> } <nl> mmm a / src / runtime / base / type_array . h <nl> ppp b / src / runtime / base / type_array . h <nl> class Array : public SmartPtr < ArrayData > { <nl> * Input / Output <nl> * / <nl> void serialize ( VariableSerializer * serializer ) const ; <nl> - void unserialize ( VariableUnserializer * in ) ; <nl> + void unserialize ( VariableUnserializer * uns ) ; <nl> <nl> / * * <nl> * Marshaling / Unmarshaling between request thread and fiber thread . <nl> mmm a / src / runtime / base / type_string . cpp <nl> ppp b / src / runtime / base / type_string . cpp <nl> void String : : serialize ( VariableSerializer * serializer ) const { <nl> } <nl> } <nl> <nl> - void String : : unserialize ( std : : istream & in , <nl> + void String : : unserialize ( VariableUnserializer * uns , <nl> char delimiter0 / * = ' " ' * / , <nl> char delimiter1 / * = ' " ' * / ) { <nl> - int size ; <nl> - in > > size ; <nl> + int size = uns - > readInt ( ) ; <nl> if ( size > = SERIALIZE_MAX_SIZE ) { <nl> throw Exception ( " Size of serialized string ( % d ) exceeds max " , size ) ; <nl> } <nl> <nl> - char ch ; <nl> - in > > ch ; <nl> + char ch = uns - > readChar ( ) ; <nl> if ( ch ! = ' : ' ) { <nl> throw Exception ( " Expected ' : ' but got ' % c ' " , ch ) ; <nl> } <nl> - in > > ch ; <nl> + ch = uns - > readChar ( ) ; <nl> if ( ch ! = delimiter0 ) { <nl> throw Exception ( " Expected ' % c ' but got ' % c ' " , delimiter0 , ch ) ; <nl> } <nl> <nl> char * buf = ( char * ) malloc ( size + 1 ) ; <nl> - in . read ( buf , size ) ; <nl> + uns - > read ( buf , size ) ; <nl> buf [ size ] = ' \ 0 ' ; <nl> SmartPtr < StringData > : : operator = ( NEW ( StringData ) ( buf , size , AttachString ) ) ; <nl> <nl> - in > > ch ; <nl> + ch = uns - > readChar ( ) ; <nl> if ( ch ! = delimiter1 ) { <nl> throw Exception ( " Expected ' % c ' but got ' % c ' " , delimiter1 , ch ) ; <nl> } <nl> mmm a / src / runtime / base / type_string . h <nl> ppp b / src / runtime / base / type_string . h <nl> class String : public SmartPtr < StringData > { <nl> * Input / Output <nl> * / <nl> void serialize ( VariableSerializer * serializer ) const ; <nl> - void unserialize ( std : : istream & in , char delimiter0 = ' " ' , <nl> + void unserialize ( VariableUnserializer * uns , char delimiter0 = ' " ' , <nl> char delimiter1 = ' " ' ) ; <nl> <nl> / * * <nl> mmm a / src / runtime / base / type_variant . cpp <nl> ppp b / src / runtime / base / type_variant . cpp <nl> void Variant : : serialize ( VariableSerializer * serializer , <nl> } <nl> } <nl> <nl> - void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> - std : : istream & in = unserializer - > in ( ) ; <nl> + void Variant : : unserialize ( VariableUnserializer * uns ) { <nl> char type , sep ; <nl> - in > > type > > sep ; <nl> + type = uns - > readChar ( ) ; <nl> + sep = uns - > readChar ( ) ; <nl> <nl> if ( type ! = ' R ' ) { <nl> - unserializer - > add ( this ) ; <nl> + uns - > add ( this ) ; <nl> } <nl> <nl> if ( type = = ' N ' ) { <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> switch ( type ) { <nl> case ' r ' : <nl> { <nl> - int64 id ; <nl> - in > > id ; <nl> - Variant * v = unserializer - > get ( id ) ; <nl> + int64 id = uns - > readInt ( ) ; <nl> + Variant * v = uns - > get ( id ) ; <nl> if ( v = = NULL ) { <nl> throw Exception ( " Id % ld out of range " , id ) ; <nl> } <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> break ; <nl> case ' R ' : <nl> { <nl> - int64 id ; <nl> - in > > id ; <nl> - Variant * v = unserializer - > get ( id ) ; <nl> + int64 id = uns - > readInt ( ) ; <nl> + Variant * v = uns - > get ( id ) ; <nl> if ( v = = NULL ) { <nl> throw Exception ( " Id % ld out of range " , id ) ; <nl> } <nl> operator = ( ref ( * v ) ) ; <nl> } <nl> break ; <nl> - case ' b ' : { int64 v ; in > > v ; operator = ( ( bool ) v ) ; } break ; <nl> - case ' i ' : { int64 v ; in > > v ; operator = ( v ) ; } break ; <nl> + case ' b ' : { int64 v = uns - > readInt ( ) ; operator = ( ( bool ) v ) ; } break ; <nl> + case ' i ' : { int64 v = uns - > readInt ( ) ; operator = ( v ) ; } break ; <nl> case ' d ' : <nl> { <nl> double v ; <nl> - char ch = in . peek ( ) ; <nl> + char ch = uns - > peek ( ) ; <nl> bool negative = false ; <nl> char buf [ 4 ] ; <nl> if ( ch = = ' - ' ) { <nl> negative = true ; <nl> - in > > ch ; <nl> - ch = in . peek ( ) ; <nl> + ch = uns - > readChar ( ) ; <nl> + ch = uns - > peek ( ) ; <nl> } <nl> if ( ch = = ' I ' ) { <nl> - in . read ( buf , 3 ) ; buf [ 3 ] = ' \ 0 ' ; <nl> + uns - > read ( buf , 3 ) ; buf [ 3 ] = ' \ 0 ' ; <nl> if ( strcmp ( buf , " INF " ) ) { <nl> throw Exception ( " Expected ' INF ' but got ' % s ' " , buf ) ; <nl> } <nl> v = atof ( " inf " ) ; <nl> } else if ( ch = = ' N ' ) { <nl> - in . read ( buf , 3 ) ; buf [ 3 ] = ' \ 0 ' ; <nl> + uns - > read ( buf , 3 ) ; buf [ 3 ] = ' \ 0 ' ; <nl> if ( strcmp ( buf , " NAN " ) ) { <nl> throw Exception ( " Expected ' NAN ' but got ' % s ' " , buf ) ; <nl> } <nl> v = atof ( " nan " ) ; <nl> } else { <nl> - in > > v ; <nl> + v = uns - > readDouble ( ) ; <nl> } <nl> operator = ( negative ? - v : v ) ; <nl> } <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> case ' s ' : <nl> { <nl> String v ; <nl> - v . unserialize ( in ) ; <nl> + v . unserialize ( uns ) ; <nl> operator = ( v ) ; <nl> } <nl> break ; <nl> case ' S ' : <nl> - if ( unserializer - > getType ( ) = = VariableUnserializer : : APCSerialize ) { <nl> + if ( uns - > getType ( ) = = VariableUnserializer : : APCSerialize ) { <nl> union { <nl> char buf [ 8 ] ; <nl> StringData * sd ; <nl> } u ; <nl> - in . read ( u . buf , 8 ) ; <nl> + uns - > read ( u . buf , 8 ) ; <nl> operator = ( u . sd ) ; <nl> } else { <nl> throw Exception ( " Unknown type ' % c ' " , type ) ; <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> case ' a ' : <nl> { <nl> Array v = Array : : Create ( ) ; <nl> - v . unserialize ( unserializer ) ; <nl> + v . unserialize ( uns ) ; <nl> operator = ( v ) ; <nl> return ; / / array has ' } ' terminating <nl> } <nl> break ; <nl> case ' A ' : <nl> - if ( unserializer - > getType ( ) = = VariableUnserializer : : APCSerialize ) { <nl> + if ( uns - > getType ( ) = = VariableUnserializer : : APCSerialize ) { <nl> union { <nl> char buf [ 8 ] ; <nl> ArrayData * ad ; <nl> } u ; <nl> - in . read ( u . buf , 8 ) ; <nl> + uns - > read ( u . buf , 8 ) ; <nl> operator = ( u . ad ) ; <nl> } else { <nl> throw Exception ( " Unknown type ' % c ' " , type ) ; <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> case ' o ' : <nl> { <nl> String clsName ; <nl> - clsName . unserialize ( in ) ; <nl> + clsName . unserialize ( uns ) ; <nl> <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' : ' ) { <nl> throw Exception ( " Expected ' : ' but got ' % c ' " , sep ) ; <nl> } <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> operator = ( obj ) ; <nl> <nl> Array v = Array : : Create ( ) ; <nl> - v . unserialize ( unserializer ) ; <nl> + v . unserialize ( uns ) ; <nl> obj - > o_setArray ( v ) ; <nl> <nl> obj - > t___wakeup ( ) ; <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> case ' O ' : <nl> { <nl> String clsName ; <nl> - clsName . unserialize ( in ) ; <nl> + clsName . unserialize ( uns ) ; <nl> <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' : ' ) { <nl> throw Exception ( " Expected ' : ' but got ' % c ' " , sep ) ; <nl> } <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> obj - > o_set ( " __PHP_Incomplete_Class_Name " , clsName ) ; <nl> } <nl> operator = ( obj ) ; <nl> - int64 size ; <nl> - char sep ; <nl> - in > > size > > sep ; <nl> + int64 size = uns - > readInt ( ) ; <nl> + char sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' : ' ) { <nl> throw Exception ( " Expected ' : ' but got ' % c ' " , sep ) ; <nl> } <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' { ' ) { <nl> throw Exception ( " Expected ' { ' but got ' % c ' " , sep ) ; <nl> } <nl> if ( size > 0 ) { <nl> for ( int64 i = 0 ; i < size ; i + + ) { <nl> - String key = unserializer - > unserializeKey ( ) . toString ( ) ; <nl> + String key = uns - > unserializeKey ( ) . toString ( ) ; <nl> int subLen = 0 ; <nl> if ( key . charAt ( 0 ) = = ' \ 00 ' ) { <nl> if ( key . charAt ( 1 ) = = ' * ' ) { <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> obj - > o_lval ( key . substr ( subLen ) , tmp , <nl> String ( key . data ( ) + 1 , subLen - 2 , AttachLiteral ) ) ) <nl> : obj - > o_lval ( key , tmp ) ; <nl> - value . unserialize ( unserializer ) ; <nl> + value . unserialize ( uns ) ; <nl> } <nl> } <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' } ' ) { <nl> throw Exception ( " Expected ' } ' but got ' % c ' " , sep ) ; <nl> } <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> case ' C ' : <nl> { <nl> String clsName ; <nl> - clsName . unserialize ( in ) ; <nl> + clsName . unserialize ( uns ) ; <nl> <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' : ' ) { <nl> throw Exception ( " Expected ' : ' but got ' % c ' " , sep ) ; <nl> } <nl> String serialized ; <nl> - serialized . unserialize ( in , ' { ' , ' } ' ) ; <nl> + serialized . unserialize ( uns , ' { ' , ' } ' ) ; <nl> <nl> Object obj ; <nl> try { <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> } <nl> obj - > o_invoke ( " unserialize " , CREATE_VECTOR1 ( serialized ) , - 1 ) ; <nl> } catch ( ClassNotFoundException & e ) { <nl> - if ( ! unserializer - > allowUnknownSerializableClass ( ) ) { <nl> + if ( ! uns - > allowUnknownSerializableClass ( ) ) { <nl> throw ; <nl> } <nl> obj = create_object ( " __PHP_Incomplete_Class " , Array : : Create ( ) , false ) ; <nl> void Variant : : unserialize ( VariableUnserializer * unserializer ) { <nl> default : <nl> throw Exception ( " Unknown type ' % c ' " , type ) ; <nl> } <nl> - in > > sep ; <nl> + sep = uns - > readChar ( ) ; <nl> if ( sep ! = ' ; ' ) { <nl> throw Exception ( " Expected ' ; ' but got ' % c ' " , sep ) ; <nl> } <nl> mmm a / src / runtime / base / util / thrift_buffer . cpp <nl> ppp b / src / runtime / base / util / thrift_buffer . cpp <nl> void ThriftBuffer : : throwInvalidStringSize ( int size ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> static Variant unserialize_with_no_notice ( CStrRef str ) { <nl> - istringstream in ( std : : string ( str . data ( ) , str . size ( ) ) ) ; <nl> - VariableUnserializer vu ( in , VariableUnserializer : : Serialize , true ) ; <nl> + VariableUnserializer vu ( str . data ( ) , str . data ( ) + str . size ( ) , <nl> + VariableUnserializer : : Serialize , true ) ; <nl> Variant v ; <nl> try { <nl> v = vu . unserialize ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 241cb7cff9c <nl> mmm / dev / null <nl> ppp b / src / runtime / base / variable_unserializer . cpp <nl> <nl> + / * <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - + <nl> + | HipHop for PHP | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - + <nl> + | Copyright ( c ) 2010 Facebook , Inc . ( http : / / www . facebook . com ) | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - + <nl> + | This source file is subject to version 3 . 01 of the PHP license , | <nl> + | that is bundled with this package in the file LICENSE , and is | <nl> + | available through the world - wide - web at the following url : | <nl> + | http : / / www . php . net / license / 3_01 . txt | <nl> + | If you did not receive a copy of the PHP license and are unable to | <nl> + | obtain it through the world - wide - web , please send a note to | <nl> + | license @ php . net so we can mail you a copy immediately . | <nl> + + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - + <nl> + * / <nl> + <nl> + # include < runtime / base / variable_unserializer . h > <nl> + # include < runtime / base / complex_types . h > <nl> + <nl> + <nl> + using namespace std ; <nl> + <nl> + namespace HPHP { <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + Variant VariableUnserializer : : unserialize ( ) { <nl> + Variant v ; <nl> + v . unserialize ( this ) ; <nl> + return v ; <nl> + } <nl> + <nl> + Variant VariableUnserializer : : unserializeKey ( ) { <nl> + m_key = true ; <nl> + Variant v ; <nl> + v . unserialize ( this ) ; <nl> + m_key = false ; <nl> + return v ; <nl> + } <nl> + <nl> + int64 VariableUnserializer : : readInt ( ) { <nl> + check ( ) ; <nl> + char * newBuf ; <nl> + int64 r = strtoll ( m_buf , & newBuf , 10 ) ; <nl> + m_buf = newBuf ; <nl> + return r ; <nl> + } <nl> + <nl> + double VariableUnserializer : : readDouble ( ) { <nl> + check ( ) ; <nl> + char * newBuf ; <nl> + double r = strtod ( m_buf , & newBuf ) ; <nl> + m_buf = newBuf ; <nl> + return r ; <nl> + } <nl> + <nl> + void VariableUnserializer : : read ( char * buf , uint n ) { <nl> + check ( ) ; <nl> + uint i = 0 ; <nl> + for ( ; i < n & & m_buf ! = m_end ; + + i ) { <nl> + buf [ i ] = * ( m_buf + + ) ; <nl> + } <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + } <nl> mmm a / src / runtime / base / variable_unserializer . h <nl> ppp b / src / runtime / base / variable_unserializer . h <nl> class VariableUnserializer { <nl> } ; <nl> <nl> public : <nl> - VariableUnserializer ( std : : istream & in , Type type , <nl> + VariableUnserializer ( const char * str , size_t len , Type type , <nl> bool allowUnknownSerializableClass = false ) <nl> - : m_type ( type ) , m_in ( in ) , m_key ( false ) , <nl> + : m_type ( type ) , m_buf ( str ) , m_end ( str + len ) , m_key ( false ) , <nl> + m_unknownSerializable ( allowUnknownSerializableClass ) { } <nl> + VariableUnserializer ( const char * str , const char * end , Type type , <nl> + bool allowUnknownSerializableClass = false ) <nl> + : m_type ( type ) , m_buf ( str ) , m_end ( end ) , m_key ( false ) , <nl> m_unknownSerializable ( allowUnknownSerializableClass ) { } <nl> <nl> Type getType ( ) const { return m_type ; } <nl> bool allowUnknownSerializableClass ( ) const { return m_unknownSerializable ; } <nl> <nl> - Variant unserialize ( ) { <nl> - Variant v ; <nl> - v . unserialize ( this ) ; <nl> - return v ; <nl> - } <nl> - <nl> - Variant unserializeKey ( ) { <nl> - m_key = true ; <nl> - Variant v ; <nl> - v . unserialize ( this ) ; <nl> - m_key = false ; <nl> - return v ; <nl> - } <nl> - <nl> - std : : istream & in ( ) const { <nl> - return m_in ; <nl> - } <nl> + Variant unserialize ( ) ; <nl> + Variant unserializeKey ( ) ; <nl> void add ( Variant * v ) { <nl> if ( ! m_key ) { <nl> m_refs . push_back ( v ) ; <nl> class VariableUnserializer { <nl> if ( id < = 0 | | id > ( int ) m_refs . size ( ) ) return NULL ; <nl> return m_refs [ id - 1 ] ; <nl> } <nl> + int64 readInt ( ) ; <nl> + double readDouble ( ) ; <nl> + char readChar ( ) { <nl> + check ( ) ; <nl> + return * ( m_buf + + ) ; <nl> + } <nl> + void read ( char * buf , uint n ) ; <nl> + char peek ( ) { <nl> + check ( ) ; <nl> + return * m_buf ; <nl> + } <nl> + const char * head ( ) { return m_buf ; } <nl> <nl> private : <nl> Type m_type ; <nl> - std : : istream & m_in ; <nl> + const char * m_buf ; <nl> + const char * m_end ; <nl> std : : vector < Variant * > m_refs ; <nl> bool m_key ; <nl> bool m_unknownSerializable ; <nl> + <nl> + void check ( ) { <nl> + if ( m_buf > = m_end ) { <nl> + throw Exception ( " Unexpected end of buffer during unserialization " ) ; <nl> + } <nl> + } <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / src / runtime / ext / ext_apc . cpp <nl> ppp b / src / runtime / ext / ext_apc . cpp <nl> Variant apc_unserialize ( CStrRef str ) { <nl> return unserialize_ex ( str , VariableUnserializer : : APCSerialize ) ; <nl> } <nl> <nl> - void reserialize ( istream & in , StringBuffer & buf ) { <nl> - char type , sep ; <nl> - in > > type > > sep ; <nl> + void reserialize ( VariableUnserializer * uns , StringBuffer & buf ) { <nl> + char type = uns - > readChar ( ) ; <nl> + char sep = uns - > readChar ( ) ; <nl> <nl> if ( type = = ' N ' ) { <nl> buf . append ( type ) ; <nl> void reserialize ( istream & in , StringBuffer & buf ) { <nl> { <nl> buf . append ( type ) ; <nl> buf . append ( sep ) ; <nl> - while ( in . peek ( ) ! = ' ; ' ) { <nl> + while ( uns - > peek ( ) ! = ' ; ' ) { <nl> char ch ; <nl> - in > > ch ; <nl> + ch = uns - > readChar ( ) ; <nl> buf . append ( ch ) ; <nl> } <nl> } <nl> void reserialize ( istream & in , StringBuffer & buf ) { <nl> buf . append ( type ) ; <nl> buf . append ( sep ) ; <nl> char pointer [ 8 ] ; <nl> - in . read ( pointer , 8 ) ; <nl> + uns - > read ( pointer , 8 ) ; <nl> buf . append ( pointer , 8 ) ; <nl> } <nl> break ; <nl> case ' s ' : <nl> { <nl> String v ; <nl> - v . unserialize ( in ) ; <nl> + v . unserialize ( uns ) ; <nl> ASSERT ( ! v . isNull ( ) ) ; <nl> if ( v - > isStatic ( ) ) { <nl> union { <nl> void reserialize ( istream & in , StringBuffer & buf ) { <nl> buf . append ( v . data ( ) , v . size ( ) ) ; <nl> buf . append ( " \ " ; " ) ; <nl> } <nl> - in > > sep ; / / ' ; ' <nl> + sep = uns - > readChar ( ) ; <nl> return ; <nl> } <nl> break ; <nl> case ' a ' : <nl> { <nl> buf . append ( " a : " ) ; <nl> - int64 size ; <nl> - char sep2 ; <nl> - in > > size > > sep2 ; <nl> + int64 size = uns - > readInt ( ) ; <nl> + char sep2 = uns - > readChar ( ) ; <nl> buf . append ( size ) ; <nl> buf . append ( sep2 ) ; <nl> - in > > sep2 ; / / ' { ' <nl> + sep2 = uns - > readChar ( ) ; <nl> buf . append ( sep2 ) ; <nl> for ( int64 i = 0 ; i < size ; i + + ) { <nl> - reserialize ( in , buf ) ; / / key <nl> - reserialize ( in , buf ) ; / / value <nl> + reserialize ( uns , buf ) ; / / key <nl> + reserialize ( uns , buf ) ; / / value <nl> } <nl> - in > > sep2 ; / / ' } ' <nl> + sep2 = uns - > readChar ( ) ; / / ' } ' <nl> buf . append ( sep2 ) ; <nl> return ; <nl> } <nl> void reserialize ( istream & in , StringBuffer & buf ) { <nl> buf . append ( sep ) ; <nl> <nl> String clsName ; <nl> - clsName . unserialize ( in ) ; <nl> + clsName . unserialize ( uns ) ; <nl> buf . append ( clsName . size ( ) ) ; <nl> buf . append ( " : \ " " ) ; <nl> buf . append ( clsName . data ( ) , clsName . size ( ) ) ; <nl> buf . append ( " \ " : " ) ; <nl> <nl> - int64 size ; <nl> - char sep2 ; <nl> - in > > sep2 > > size > > sep2 ; <nl> + uns - > readChar ( ) ; <nl> + int64 size = uns - > readInt ( ) ; <nl> + char sep2 = uns - > readChar ( ) ; <nl> + <nl> buf . append ( size ) ; <nl> buf . append ( sep2 ) ; <nl> - in > > sep2 ; / / ' { ' <nl> + sep2 = uns - > readChar ( ) ; / / ' { ' <nl> buf . append ( sep2 ) ; <nl> for ( int64 i = 0 ; i < size ; i + + ) { <nl> - reserialize ( in , buf ) ; / / property name <nl> - reserialize ( in , buf ) ; / / property value <nl> + reserialize ( uns , buf ) ; / / property name <nl> + reserialize ( uns , buf ) ; / / property value <nl> } <nl> - in > > sep2 ; / / ' } ' <nl> + sep2 = uns - > readChar ( ) ; / / ' } ' <nl> buf . append ( sep2 ) ; <nl> return ; <nl> } <nl> void reserialize ( istream & in , StringBuffer & buf ) { <nl> buf . append ( sep ) ; <nl> <nl> String clsName ; <nl> - clsName . unserialize ( in ) ; <nl> + clsName . unserialize ( uns ) ; <nl> buf . append ( clsName . size ( ) ) ; <nl> buf . append ( " : \ " " ) ; <nl> buf . append ( clsName . data ( ) , clsName . size ( ) ) ; <nl> buf . append ( " \ " : " ) ; <nl> <nl> - in > > sep ; / / ' : ' <nl> + sep = uns - > readChar ( ) ; / / ' : ' <nl> String serialized ; <nl> - serialized . unserialize ( in , ' { ' , ' } ' ) ; <nl> + serialized . unserialize ( uns , ' { ' , ' } ' ) ; <nl> buf . append ( serialized . size ( ) ) ; <nl> buf . append ( " : { " ) ; <nl> buf . append ( serialized . data ( ) , serialized . size ( ) ) ; <nl> void reserialize ( istream & in , StringBuffer & buf ) { <nl> throw Exception ( " Unknown type ' % c ' " , type ) ; <nl> } <nl> <nl> - in > > sep ; / / the last ' ; ' <nl> + sep = uns - > readChar ( ) ; / / the last ' ; ' <nl> buf . append ( sep ) ; <nl> } <nl> <nl> String apc_reserialize ( CStrRef str ) { <nl> if ( str . empty ( ) ) return str ; <nl> <nl> - istringstream in ( std : : string ( str . data ( ) , str . size ( ) ) ) ; <nl> + VariableUnserializer uns ( str . data ( ) , str . size ( ) , <nl> + VariableUnserializer : : APCSerialize ) ; <nl> StringBuffer buf ; <nl> - reserialize ( in , buf ) ; <nl> + reserialize ( & uns , buf ) ; <nl> <nl> return buf . detach ( ) ; <nl> } <nl> mmm a / src / runtime / ext / ext_ipc . cpp <nl> ppp b / src / runtime / ext / ext_ipc . cpp <nl> bool f_msg_receive ( CObjRef queue , int64 desiredmsgtype , Variant msgtype , <nl> <nl> msgtype = ( int ) MSGBUF_MTYPE ( buffer ) ; <nl> if ( unserialize ) { <nl> - istringstream in ( ( const char * ) MSGBUF_MTEXT ( buffer ) ) ; <nl> - VariableUnserializer vu ( in , VariableUnserializer : : Serialize ) ; <nl> + const char * bufText = ( const char * ) MSGBUF_MTEXT ( buffer ) ; <nl> + uint bufLen = strlen ( bufText ) ; ; <nl> + VariableUnserializer vu ( bufText , bufLen , <nl> + VariableUnserializer : : Serialize ) ; <nl> try { <nl> message = vu . unserialize ( ) ; <nl> } catch ( Exception & e ) { <nl> mmm a / src / runtime / ext / ext_session . cpp <nl> ppp b / src / runtime / ext / ext_session . cpp <nl> class BinarySessionSerializer : public SessionSerializer { <nl> String key ( p + 1 , namelen , CopyString ) ; <nl> p + = namelen + 1 ; <nl> if ( has_value ) { <nl> - istringstream in ( std : : string ( p , endptr - p ) ) ; <nl> - VariableUnserializer vu ( in , VariableUnserializer : : Serialize ) ; <nl> + VariableUnserializer vu ( p , endptr , VariableUnserializer : : Serialize ) ; <nl> try { <nl> g - > gv__SESSION . set ( key , vu . unserialize ( ) ) ; <nl> - if ( in . tellg ( ) > 0 & & in . tellg ( ) < endptr - p ) { <nl> - p + = in . tellg ( ) ; <nl> - } <nl> + p = vu . head ( ) ; <nl> } catch ( Exception & e ) { <nl> } <nl> } <nl> class PhpSessionSerializer : public SessionSerializer { <nl> String key ( p , q - p , CopyString ) ; <nl> q + + ; <nl> if ( has_value ) { <nl> - istringstream in ( std : : string ( q , endptr - q ) ) ; <nl> - VariableUnserializer vu ( in , VariableUnserializer : : Serialize ) ; <nl> + VariableUnserializer vu ( q , endptr , VariableUnserializer : : Serialize ) ; <nl> try { <nl> g - > gv__SESSION . set ( key , vu . unserialize ( ) ) ; <nl> - if ( in . tellg ( ) > 0 & & in . tellg ( ) < endptr - q ) { <nl> - q + = in . tellg ( ) ; <nl> - } <nl> + q = vu . head ( ) ; <nl> } catch ( Exception & e ) { <nl> } <nl> } <nl>
|
Optimize unserialize
|
facebook/hhvm
|
83a5cea137330678459596bd0df32dd32f9348ad
|
2010-10-18T19:22:46Z
|
mmm a / cores / esp8266 / core_esp8266_si2c . c <nl> ppp b / cores / esp8266 / core_esp8266_si2c . c <nl> <nl> You should have received a copy of the GNU Lesser General Public <nl> License along with this library ; if not , write to the Free Software <nl> Foundation , Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA <nl> + Modified January 2017 by Bjorn Hammarberg ( bjoham @ esp8266 . com ) - i2c slave support <nl> * / <nl> # include " twi . h " <nl> # include " pins_arduino . h " <nl> # include " wiring_private . h " <nl> <nl> unsigned int preferred_si2c_clock = 100000 ; <nl> + # include " twi_util . h " <nl> + <nl> + # include " ets_sys . h " <nl> + <nl> unsigned char twi_dcount = 18 ; <nl> static unsigned char twi_sda , twi_scl ; <nl> static uint32_t twi_clockStretchLimit ; <nl> + static unsigned char twi_addr = 0 ; <nl> + <nl> + / / modes ( private ) <nl> + # define TWIPM_UNKNOWN 0 <nl> + # define TWIPM_IDLE 1 <nl> + # define TWIPM_ADDRESSED 2 <nl> + # define TWIPM_WAIT 3 <nl> + <nl> + / / states ( private ) <nl> + # define TWIP_UNKNOWN 0 <nl> + # define TWIP_IDLE 1 <nl> + # define TWIP_START 2 <nl> + # define TWIP_SEND_ACK 3 <nl> + # define TWIP_WAIT_ACK 4 <nl> + # define TWIP_WAIT_STOP 5 <nl> + # define TWIP_SLA_W 6 <nl> + # define TWIP_SLA_R 7 <nl> + # define TWIP_REP_START 8 <nl> + # define TWIP_READ 9 <nl> + # define TWIP_STOP 10 <nl> + # define TWIP_REC_ACK 11 <nl> + # define TWIP_READ_ACK 12 <nl> + # define TWIP_RWAIT_ACK 13 <nl> + # define TWIP_WRITE 14 <nl> + # define TWIP_BUS_ERR 15 <nl> + <nl> + static volatile uint8_t twip_mode = TWIPM_IDLE ; <nl> + static volatile uint8_t twip_state = TWIP_IDLE ; <nl> + static volatile uint8_t twip_status = TW_NO_INFO ; <nl> + static volatile uint8_t bitCount = 0 ; <nl> + <nl> + # define TWDR twi_data <nl> + static volatile uint8_t twi_data = 0x00 ; <nl> + static volatile uint8_t twi_ack = 0 ; <nl> + static volatile uint8_t twi_ack_rec = 0 ; <nl> + static volatile int twi_timeout_ms = 10 ; <nl> + <nl> + # define TWI_READY 0 <nl> + # define TWI_MRX 1 <nl> + # define TWI_MTX 2 <nl> + # define TWI_SRX 3 <nl> + # define TWI_STX 4 <nl> + static volatile uint8_t twi_state = TWI_READY ; <nl> + static volatile uint8_t twi_error = 0xFF ; <nl> + <nl> + static uint8_t twi_txBuffer [ TWI_BUFFER_LENGTH ] ; <nl> + static volatile uint8_t twi_txBufferIndex ; <nl> + static volatile uint8_t twi_txBufferLength ; <nl> + <nl> + static uint8_t twi_rxBuffer [ TWI_BUFFER_LENGTH ] ; <nl> + static volatile uint8_t twi_rxBufferIndex ; <nl> + <nl> + static void ( * twi_onSlaveTransmit ) ( void ) ; <nl> + static void ( * twi_onSlaveReceive ) ( uint8_t * , size_t ) ; <nl> + <nl> + static void onSclChange ( void ) ; <nl> + static void onSdaChange ( void ) ; <nl> + <nl> + # define EVENTTASK_QUEUE_SIZE 1 <nl> + # define EVENTTASK_QUEUE_PRIO 2 <nl> + <nl> + # define TWI_SIG_RANGE 0x00000100 <nl> + # define TWI_SIG_RX ( TWI_SIG_RANGE + 0x01 ) <nl> + # define TWI_SIG_TX ( TWI_SIG_RANGE + 0x02 ) <nl> + <nl> + static ETSEvent eventTaskQueue [ EVENTTASK_QUEUE_SIZE ] ; <nl> + static void eventTask ( ETSEvent * e ) ; <nl> + static ETSTimer timer ; <nl> + static void onTimer ( ) ; <nl> <nl> # define SDA_LOW ( ) ( GPES = ( 1 < < twi_sda ) ) / / Enable SDA ( becomes output and since GPO is 0 for the pin , it will pull the line low ) <nl> # define SDA_HIGH ( ) ( GPEC = ( 1 < < twi_sda ) ) / / Disable SDA ( becomes input and since it has pullup it will go high ) <nl> void twi_setClockStretchLimit ( uint32_t limit ) { <nl> twi_clockStretchLimit = limit * TWI_CLOCK_STRETCH_MULTIPLIER ; <nl> } <nl> <nl> - void twi_init ( unsigned char sda , unsigned char scl ) { <nl> + void twi_init ( unsigned char sda , unsigned char scl ) <nl> + { <nl> + / / set timer function <nl> + ets_timer_setfn ( & timer , onTimer , NULL ) ; <nl> + <nl> + / / create event task <nl> + ets_task ( eventTask , EVENTTASK_QUEUE_PRIO , eventTaskQueue , EVENTTASK_QUEUE_SIZE ) ; <nl> + <nl> twi_sda = sda ; <nl> twi_scl = scl ; <nl> pinMode ( twi_sda , INPUT_PULLUP ) ; <nl> pinMode ( twi_scl , INPUT_PULLUP ) ; <nl> twi_setClock ( preferred_si2c_clock ) ; <nl> twi_setClockStretchLimit ( 230 ) ; / / default value is 230 uS <nl> - } <nl> <nl> + if ( twi_addr ! = 0 ) <nl> + { <nl> + attachInterrupt ( scl , onSclChange , CHANGE ) ; <nl> + attachInterrupt ( sda , onSdaChange , CHANGE ) ; <nl> + } <nl> + } <nl> <nl> - void twi_stop ( void ) { <nl> - pinMode ( twi_sda , INPUT ) ; <nl> - pinMode ( twi_scl , INPUT ) ; <nl> + void twi_setAddress ( uint8_t address ) <nl> + { <nl> + / / set twi slave address ( skip over R / W bit ) <nl> + twi_addr = address < < 1 ; <nl> } <nl> <nl> - static void twi_delay ( unsigned char v ) { <nl> + static void ICACHE_RAM_ATTR twi_delay ( unsigned char v ) { <nl> unsigned int i ; <nl> # pragma GCC diagnostic push <nl> # pragma GCC diagnostic ignored " - Wunused - but - set - variable " <nl> unsigned int reg ; <nl> - for ( i = 0 ; i < v ; i + + ) <nl> + for ( i = 0 ; i < v ; i + + ) { <nl> reg = GPI ; <nl> + } <nl> # pragma GCC diagnostic pop <nl> } <nl> <nl> static bool twi_write_start ( void ) { <nl> SCL_HIGH ( ) ; <nl> SDA_HIGH ( ) ; <nl> - if ( SDA_READ ( ) = = 0 ) <nl> + if ( SDA_READ ( ) = = 0 ) { <nl> return false ; <nl> + } <nl> twi_delay ( twi_dcount ) ; <nl> SDA_LOW ( ) ; <nl> twi_delay ( twi_dcount ) ; <nl> static bool twi_write_stop ( void ) { <nl> twi_delay ( twi_dcount ) ; <nl> SDA_HIGH ( ) ; <nl> twi_delay ( twi_dcount ) ; <nl> - <nl> return true ; <nl> } <nl> <nl> static bool twi_write_bit ( bool bit ) { <nl> uint32_t i = 0 ; <nl> SCL_LOW ( ) ; <nl> - if ( bit ) <nl> - SDA_HIGH ( ) ; <nl> - else <nl> - SDA_LOW ( ) ; <nl> + if ( bit ) SDA_HIGH ( ) ; <nl> + else SDA_LOW ( ) ; <nl> twi_delay ( twi_dcount + 1 ) ; <nl> SCL_HIGH ( ) ; <nl> while ( SCL_READ ( ) = = 0 & & ( i + + ) < twi_clockStretchLimit ) ; / / Clock stretching <nl> static bool twi_write_byte ( unsigned char byte ) { <nl> static unsigned char twi_read_byte ( bool nack ) { <nl> unsigned char byte = 0 ; <nl> unsigned char bit ; <nl> - for ( bit = 0 ; bit < 8 ; bit + + ) <nl> - byte = ( byte < < 1 ) | twi_read_bit ( ) ; <nl> + for ( bit = 0 ; bit < 8 ; bit + + ) byte = ( byte < < 1 ) | twi_read_bit ( ) ; <nl> twi_write_bit ( nack ) ; <nl> return byte ; <nl> } <nl> <nl> unsigned char twi_writeTo ( unsigned char address , unsigned char * buf , unsigned int len , unsigned char sendStop ) { <nl> unsigned int i ; <nl> - if ( ! twi_write_start ( ) ) <nl> - return 4 ; / / line busy <nl> + if ( ! twi_write_start ( ) ) return 4 ; / / line busy <nl> if ( ! twi_write_byte ( ( ( address < < 1 ) | 0 ) & 0xFF ) ) { <nl> - if ( sendStop ) <nl> - twi_write_stop ( ) ; <nl> + if ( sendStop ) twi_write_stop ( ) ; <nl> return 2 ; / / received NACK on transmit of address <nl> } <nl> for ( i = 0 ; i < len ; i + + ) { <nl> if ( ! twi_write_byte ( buf [ i ] ) ) { <nl> - if ( sendStop ) <nl> - twi_write_stop ( ) ; <nl> + if ( sendStop ) twi_write_stop ( ) ; <nl> return 3 ; / / received NACK on transmit of data <nl> } <nl> } <nl> - if ( sendStop ) <nl> - twi_write_stop ( ) ; <nl> + if ( sendStop ) twi_write_stop ( ) ; <nl> i = 0 ; <nl> while ( SDA_READ ( ) = = 0 & & ( i + + ) < 10 ) { <nl> SCL_LOW ( ) ; <nl> unsigned char twi_writeTo ( unsigned char address , unsigned char * buf , unsigned i <nl> <nl> unsigned char twi_readFrom ( unsigned char address , unsigned char * buf , unsigned int len , unsigned char sendStop ) { <nl> unsigned int i ; <nl> - if ( ! twi_write_start ( ) ) <nl> - return 4 ; / / line busy <nl> + if ( ! twi_write_start ( ) ) return 4 ; / / line busy <nl> if ( ! twi_write_byte ( ( ( address < < 1 ) | 1 ) & 0xFF ) ) { <nl> - if ( sendStop ) <nl> - twi_write_stop ( ) ; <nl> + if ( sendStop ) twi_write_stop ( ) ; <nl> return 2 ; / / received NACK on transmit of address <nl> } <nl> - for ( i = 0 ; i < ( len - 1 ) ; i + + ) <nl> - buf [ i ] = twi_read_byte ( false ) ; <nl> + for ( i = 0 ; i < ( len - 1 ) ; i + + ) buf [ i ] = twi_read_byte ( false ) ; <nl> buf [ len - 1 ] = twi_read_byte ( true ) ; <nl> - if ( sendStop ) <nl> - twi_write_stop ( ) ; <nl> + if ( sendStop ) twi_write_stop ( ) ; <nl> i = 0 ; <nl> while ( SDA_READ ( ) = = 0 & & ( i + + ) < 10 ) { <nl> SCL_LOW ( ) ; <nl> unsigned char twi_readFrom ( unsigned char address , unsigned char * buf , unsigned i <nl> return 0 ; <nl> } <nl> <nl> - uint8_t twi_status ( ) { <nl> - if ( SCL_READ ( ) = = 0 ) <nl> - return I2C_SCL_HELD_LOW ; / / SCL held low by another device , no procedure available to recover <nl> - int clockCount = 20 ; <nl> - <nl> - while ( SDA_READ ( ) = = 0 & & clockCount > 0 ) { / / if SDA low , read the bits slaves have to sent to a max <nl> - - - clockCount ; <nl> - twi_read_bit ( ) ; <nl> - if ( SCL_READ ( ) = = 0 ) <nl> - return I2C_SCL_HELD_LOW_AFTER_READ ; / / I2C bus error . SCL held low beyond slave clock stretch time <nl> + uint8_t twi_status ( ) { <nl> + if ( SCL_READ ( ) = = 0 ) { <nl> + return I2C_SCL_HELD_LOW ; / / SCL held low by another device , no procedure available to recover <nl> + } <nl> + int clockCount = 20 ; <nl> + while ( SDA_READ ( ) = = 0 & & clockCount > 0 ) { / / if SDA low , read the bits slaves have to sent to a max <nl> + twi_read_bit ( ) ; <nl> + if ( SCL_READ ( ) = = 0 ) { <nl> + return I2C_SCL_HELD_LOW_AFTER_READ ; / / I2C bus error . SCL held low beyond slave clock stretch time <nl> + } <nl> + } <nl> + if ( SDA_READ ( ) = = 0 ) { <nl> + return I2C_SDA_HELD_LOW ; / / I2C bus error . SDA line held low by slave / another_master after n bits . <nl> } <nl> + if ( ! twi_write_start ( ) ) { <nl> + return I2C_SDA_HELD_LOW_AFTER_INIT ; / / line busy . SDA again held low by another device . 2nd master ? <nl> + } else { <nl> + return I2C_OK ; <nl> + } <nl> + } <nl> + <nl> + uint8_t twi_transmit ( const uint8_t * data , uint8_t length ) <nl> + { <nl> + uint8_t i ; <nl> + <nl> + / / ensure data will fit into buffer <nl> + if ( length > TWI_BUFFER_LENGTH ) { <nl> + return 1 ; <nl> + } <nl> + <nl> + / / ensure we are currently a slave transmitter <nl> + if ( twi_state ! = TWI_STX ) { <nl> + return 2 ; <nl> + } <nl> + <nl> + / / set length and copy data into tx buffer <nl> + twi_txBufferLength = length ; <nl> + for ( i = 0 ; i < length ; + + i ) { <nl> + twi_txBuffer [ i ] = data [ i ] ; <nl> + } <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + void twi_attachSlaveRxEvent ( void ( * function ) ( uint8_t * , size_t ) ) <nl> + { <nl> + twi_onSlaveReceive = function ; <nl> + } <nl> + <nl> + void twi_attachSlaveTxEvent ( void ( * function ) ( void ) ) <nl> + { <nl> + twi_onSlaveTransmit = function ; <nl> + } <nl> + <nl> + void ICACHE_RAM_ATTR twi_reply ( uint8_t ack ) <nl> + { <nl> + / / transmit master read ready signal , with or without ack <nl> + if ( ack ) { <nl> + / / TWCR = _BV ( TWEN ) | _BV ( TWIE ) | _BV ( TWINT ) | _BV ( TWEA ) ; <nl> + SCL_HIGH ( ) ; / / _BV ( TWINT ) <nl> + twi_ack = 1 ; / / _BV ( TWEA ) <nl> + } else { <nl> + / / TWCR = _BV ( TWEN ) | _BV ( TWIE ) | _BV ( TWINT ) ; <nl> + SCL_HIGH ( ) ; / / _BV ( TWINT ) <nl> + twi_ack = 0 ; / / ~ _BV ( TWEA ) <nl> + } <nl> + } <nl> + <nl> + void ICACHE_RAM_ATTR twi_stop ( void ) <nl> + { <nl> + / / send stop condition <nl> + / / TWCR = _BV ( TWEN ) | _BV ( TWIE ) | _BV ( TWEA ) | _BV ( TWINT ) | _BV ( TWSTO ) ; <nl> + SCL_HIGH ( ) ; / / _BV ( TWINT ) <nl> + twi_ack = 1 ; / / _BV ( TWEA ) <nl> + twi_delay ( 5 ) ; / / Maybe this should be here <nl> + SDA_HIGH ( ) ; / / _BV ( TWSTO ) <nl> + / / update twi state <nl> + twi_state = TWI_READY ; <nl> + } <nl> + <nl> + void ICACHE_RAM_ATTR twi_releaseBus ( void ) <nl> + { <nl> + / / release bus <nl> + / / TWCR = _BV ( TWEN ) | _BV ( TWIE ) | _BV ( TWEA ) | _BV ( TWINT ) ; <nl> + SCL_HIGH ( ) ; / / _BV ( TWINT ) <nl> + twi_ack = 1 ; / / _BV ( TWEA ) <nl> + SDA_HIGH ( ) ; <nl> + <nl> + / / update twi state <nl> + twi_state = TWI_READY ; <nl> + } <nl> + <nl> + <nl> + void ICACHE_RAM_ATTR twi_onTwipEvent ( uint8_t status ) <nl> + { <nl> + switch ( status ) { <nl> + / / Slave Receiver <nl> + case TW_SR_SLA_ACK : / / addressed , returned ack <nl> + case TW_SR_GCALL_ACK : / / addressed generally , returned ack <nl> + case TW_SR_ARB_LOST_SLA_ACK : / / lost arbitration , returned ack <nl> + case TW_SR_ARB_LOST_GCALL_ACK : / / lost arbitration , returned ack <nl> + / / enter slave receiver mode <nl> + twi_state = TWI_SRX ; <nl> + / / indicate that rx buffer can be overwritten and ack <nl> + twi_rxBufferIndex = 0 ; <nl> + twi_reply ( 1 ) ; <nl> + break ; <nl> + case TW_SR_DATA_ACK : / / data received , returned ack <nl> + case TW_SR_GCALL_DATA_ACK : / / data received generally , returned ack <nl> + / / if there is still room in the rx buffer <nl> + if ( twi_rxBufferIndex < TWI_BUFFER_LENGTH ) { <nl> + / / put byte in buffer and ack <nl> + twi_rxBuffer [ twi_rxBufferIndex + + ] = TWDR ; <nl> + twi_reply ( 1 ) ; <nl> + } else { <nl> + / / otherwise nack <nl> + twi_reply ( 0 ) ; <nl> + } <nl> + break ; <nl> + case TW_SR_STOP : / / stop or repeated start condition received <nl> + / / put a null char after data if there ' s room <nl> + if ( twi_rxBufferIndex < TWI_BUFFER_LENGTH ) { <nl> + twi_rxBuffer [ twi_rxBufferIndex ] = ' \ 0 ' ; <nl> + } <nl> + / / callback to user - defined callback over event task to allow for non - RAM - residing code <nl> + / / twi_rxBufferLock = true ; / / This may be necessary <nl> + ets_post ( EVENTTASK_QUEUE_PRIO , TWI_SIG_RX , twi_rxBufferIndex ) ; <nl> + <nl> + / / since we submit rx buffer to " wire " library , we can reset it <nl> + twi_rxBufferIndex = 0 ; <nl> + break ; <nl> + <nl> + case TW_SR_DATA_NACK : / / data received , returned nack <nl> + case TW_SR_GCALL_DATA_NACK : / / data received generally , returned nack <nl> + / / nack back at master <nl> + twi_reply ( 0 ) ; <nl> + break ; <nl> + <nl> + / / Slave Transmitter <nl> + case TW_ST_SLA_ACK : / / addressed , returned ack <nl> + case TW_ST_ARB_LOST_SLA_ACK : / / arbitration lost , returned ack <nl> + / / enter slave transmitter mode <nl> + twi_state = TWI_STX ; <nl> + / / ready the tx buffer index for iteration <nl> + twi_txBufferIndex = 0 ; <nl> + / / set tx buffer length to be zero , to verify if user changes it <nl> + twi_txBufferLength = 0 ; <nl> + / / callback to user - defined callback over event task to allow for non - RAM - residing code <nl> + / / request for txBuffer to be filled and length to be set <nl> + / / note : user must call twi_transmit ( bytes , length ) to do this <nl> + ets_post ( EVENTTASK_QUEUE_PRIO , TWI_SIG_TX , 0 ) ; <nl> + break ; <nl> + <nl> + case TW_ST_DATA_ACK : / / byte sent , ack returned <nl> + / / copy data to output register <nl> + TWDR = twi_txBuffer [ twi_txBufferIndex + + ] ; <nl> + <nl> + bitCount = 8 ; <nl> + bitCount - - ; <nl> + ( twi_data & 0x80 ) ? SDA_HIGH ( ) : SDA_LOW ( ) ; <nl> + twi_data < < = 1 ; <nl> + <nl> + / / if there is more to send , ack , otherwise nack <nl> + if ( twi_txBufferIndex < twi_txBufferLength ) { <nl> + twi_reply ( 1 ) ; <nl> + } else { <nl> + twi_reply ( 0 ) ; <nl> + } <nl> + break ; <nl> + case TW_ST_DATA_NACK : / / received nack , we are done <nl> + case TW_ST_LAST_DATA : / / received ack , but we are done already ! <nl> + / / leave slave receiver state <nl> + twi_releaseBus ( ) ; <nl> + break ; <nl> + <nl> + / / All <nl> + case TW_NO_INFO : / / no state information <nl> + break ; <nl> + case TW_BUS_ERROR : / / bus error , illegal stop / start <nl> + twi_error = TW_BUS_ERROR ; <nl> + twi_stop ( ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void ICACHE_RAM_ATTR onTimer ( ) <nl> + { <nl> + twi_releaseBus ( ) ; <nl> + twip_status = TW_BUS_ERROR ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_mode = TWIPM_WAIT ; <nl> + twip_state = TWIP_BUS_ERR ; <nl> + } <nl> + <nl> + static void eventTask ( ETSEvent * e ) <nl> + { <nl> + <nl> + if ( e = = NULL ) { <nl> + return ; <nl> + } <nl> + <nl> + switch ( e - > sig ) <nl> + { <nl> + case TWI_SIG_TX : <nl> + twi_onSlaveTransmit ( ) ; <nl> + <nl> + / / if they didn ' t change buffer & length , initialize it <nl> + if ( twi_txBufferLength = = 0 ) { <nl> + twi_txBufferLength = 1 ; <nl> + twi_txBuffer [ 0 ] = 0x00 ; <nl> + } <nl> + <nl> + / / Initiate transmission <nl> + twi_onTwipEvent ( TW_ST_DATA_ACK ) ; <nl> + <nl> + break ; <nl> + <nl> + case TWI_SIG_RX : <nl> + / / ack future responses and leave slave receiver state <nl> + twi_releaseBus ( ) ; <nl> + twi_onSlaveReceive ( twi_rxBuffer , e - > par ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void ICACHE_RAM_ATTR onSclChange ( void ) <nl> + { <nl> + static uint8_t sda ; <nl> + static uint8_t scl ; <nl> + <nl> + sda = SDA_READ ( ) ; <nl> + scl = SCL_READ ( ) ; <nl> + <nl> + twip_status = 0xF8 ; / / reset TWI status <nl> + <nl> + switch ( twip_state ) <nl> + { <nl> + case TWIP_IDLE : <nl> + case TWIP_WAIT_STOP : <nl> + case TWIP_BUS_ERR : <nl> + / / ignore <nl> + break ; <nl> + <nl> + case TWIP_START : <nl> + case TWIP_REP_START : <nl> + case TWIP_SLA_W : <nl> + case TWIP_READ : <nl> + if ( ! scl ) { <nl> + / / ignore <nl> + } else { <nl> + bitCount - - ; <nl> + twi_data < < = 1 ; <nl> + twi_data | = sda ; <nl> + <nl> + if ( bitCount ! = 0 ) { <nl> + / / continue <nl> + } else { <nl> + twip_state = TWIP_SEND_ACK ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + case TWIP_SEND_ACK : <nl> + if ( scl ) { <nl> + / / ignore <nl> + } else { <nl> + if ( twip_mode = = TWIPM_IDLE ) { <nl> + if ( ( twi_data & 0xFE ) ! = twi_addr ) { <nl> + / / ignore <nl> + } else { <nl> + SDA_LOW ( ) ; <nl> + } <nl> + } else { <nl> + if ( ! twi_ack ) { <nl> + / / ignore <nl> + } else { <nl> + SDA_LOW ( ) ; <nl> + } <nl> + } <nl> + twip_state = TWIP_WAIT_ACK ; <nl> + } <nl> + break ; <nl> + <nl> + case TWIP_WAIT_ACK : <nl> + if ( scl ) { <nl> + / / ignore <nl> + } else { <nl> + if ( twip_mode = = TWIPM_IDLE ) { <nl> + if ( ( twi_data & 0xFE ) ! = twi_addr ) { <nl> + SDA_HIGH ( ) ; <nl> + twip_state = TWIP_WAIT_STOP ; <nl> + } else { <nl> + SCL_LOW ( ) ; / / clock stretching <nl> + SDA_HIGH ( ) ; <nl> + twip_mode = TWIPM_ADDRESSED ; <nl> + if ( ! ( twi_data & 0x01 ) ) { <nl> + twip_status = TW_SR_SLA_ACK ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + bitCount = 8 ; <nl> + twip_state = TWIP_SLA_W ; <nl> + } else { <nl> + twip_status = TW_ST_SLA_ACK ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_state = TWIP_SLA_R ; <nl> + } <nl> + } <nl> + } else { <nl> + SCL_LOW ( ) ; / / clock stretching <nl> + SDA_HIGH ( ) ; <nl> + if ( ! twi_ack ) { <nl> + twip_status = TW_SR_DATA_NACK ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_mode = TWIPM_WAIT ; <nl> + twip_state = TWIP_WAIT_STOP ; <nl> + } else { <nl> + twip_status = TW_SR_DATA_ACK ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + bitCount = 8 ; <nl> + twip_state = TWIP_READ ; <nl> + } <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + case TWIP_SLA_R : <nl> + case TWIP_WRITE : <nl> + if ( scl ) { <nl> + / / ignore <nl> + } else { <nl> + bitCount - - ; <nl> + ( twi_data & 0x80 ) ? SDA_HIGH ( ) : SDA_LOW ( ) ; <nl> + twi_data < < = 1 ; <nl> + <nl> + if ( bitCount ! = 0 ) { <nl> + / / continue <nl> + } else { <nl> + twip_state = TWIP_REC_ACK ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + case TWIP_REC_ACK : <nl> + if ( scl ) { <nl> + / / ignore <nl> + } else { <nl> + SDA_HIGH ( ) ; <nl> + twip_state = TWIP_READ_ACK ; <nl> + } <nl> + break ; <nl> + <nl> + case TWIP_READ_ACK : <nl> + if ( ! scl ) { <nl> + / / ignore <nl> + } else { <nl> + twi_ack_rec = ! sda ; <nl> + twip_state = TWIP_RWAIT_ACK ; <nl> + } <nl> + break ; <nl> + <nl> + case TWIP_RWAIT_ACK : <nl> + if ( scl ) { <nl> + / / ignore <nl> + } else { <nl> + SCL_LOW ( ) ; / / clock stretching <nl> + if ( twi_ack & & twi_ack_rec ) { <nl> + twip_status = TW_ST_DATA_ACK ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_state = TWIP_WRITE ; <nl> + } else { <nl> + / / we have no more data to send and / or the master doesn ' t want anymore <nl> + twip_status = twi_ack_rec ? TW_ST_LAST_DATA : TW_ST_DATA_NACK ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_mode = TWIPM_WAIT ; <nl> + twip_state = TWIP_WAIT_STOP ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> + default : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void ICACHE_RAM_ATTR onSdaChange ( void ) <nl> + { <nl> + static uint8_t sda ; <nl> + static uint8_t scl ; <nl> + sda = SDA_READ ( ) ; <nl> + scl = SCL_READ ( ) ; <nl> + <nl> + switch ( twip_state ) <nl> + { <nl> + case TWIP_IDLE : <nl> + if ( ! scl ) { <nl> + / / DATA - ignore <nl> + } else if ( sda ) { <nl> + / / STOP - ignore <nl> + } else { <nl> + / / START <nl> + bitCount = 8 ; <nl> + twip_state = TWIP_START ; <nl> + ets_timer_arm_new ( & timer , twi_timeout_ms , false , true ) ; / / Once , ms <nl> + } <nl> + break ; <nl> <nl> - if ( SDA_READ ( ) = = 0 ) <nl> - return I2C_SDA_HELD_LOW ; / / I2C bus error . SDA line held low by slave / another_master after n bits . <nl> + case TWIP_START : <nl> + case TWIP_REP_START : <nl> + case TWIP_SEND_ACK : <nl> + case TWIP_WAIT_ACK : <nl> + case TWIP_SLA_R : <nl> + case TWIP_REC_ACK : <nl> + case TWIP_READ_ACK : <nl> + case TWIP_RWAIT_ACK : <nl> + case TWIP_WRITE : <nl> + if ( ! scl ) { <nl> + / / DATA - ignore <nl> + } else { <nl> + / / START or STOP <nl> + SDA_HIGH ( ) ; / / Should not be necessary <nl> + twip_status = TW_BUS_ERROR ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_mode = TWIPM_WAIT ; <nl> + twip_state = TWIP_BUS_ERR ; <nl> + } <nl> + break ; <nl> <nl> - if ( ! twi_write_start ( ) ) <nl> - return I2C_SDA_HELD_LOW_AFTER_INIT ; / / line busy . SDA again held low by another device . 2nd master ? <nl> + case TWIP_WAIT_STOP : <nl> + case TWIP_BUS_ERR : <nl> + if ( ! scl ) { <nl> + / / DATA - ignore <nl> + } else { <nl> + if ( sda ) { <nl> + / / STOP <nl> + SCL_LOW ( ) ; / / clock stretching <nl> + ets_timer_disarm ( & timer ) ; <nl> + twip_state = TWIP_IDLE ; <nl> + twip_mode = TWIPM_IDLE ; <nl> + SCL_HIGH ( ) ; <nl> + } else { <nl> + / / START <nl> + if ( twip_state = = TWIP_BUS_ERR ) { <nl> + / / ignore <nl> + } else { <nl> + bitCount = 8 ; <nl> + twip_state = TWIP_REP_START ; <nl> + ets_timer_arm_new ( & timer , twi_timeout_ms , false , true ) ; / / Once , ms <nl> + } <nl> + } <nl> + } <nl> + break ; <nl> <nl> - return I2C_OK ; / / all ok <nl> + case TWIP_SLA_W : <nl> + case TWIP_READ : <nl> + if ( ! scl ) { <nl> + / / DATA - ignore <nl> + } else { <nl> + / / START or STOP <nl> + if ( bitCount ! = 7 ) { <nl> + / / inside byte transfer - error <nl> + twip_status = TW_BUS_ERROR ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + twip_mode = TWIPM_WAIT ; <nl> + twip_state = TWIP_BUS_ERR ; <nl> + } else { <nl> + / / during first bit in byte transfer - ok <nl> + SCL_LOW ( ) ; / / clock stretching <nl> + twip_status = TW_SR_STOP ; <nl> + twi_onTwipEvent ( twip_status ) ; <nl> + if ( sda ) { <nl> + / / STOP <nl> + ets_timer_disarm ( & timer ) ; <nl> + twip_state = TWIP_IDLE ; <nl> + twip_mode = TWIPM_IDLE ; <nl> + } else { <nl> + / / START <nl> + bitCount = 8 ; <nl> + ets_timer_arm_new ( & timer , twi_timeout_ms , false , true ) ; / / Once , ms <nl> + twip_state = TWIP_REP_START ; <nl> + twip_mode = TWIPM_IDLE ; <nl> + } <nl> + } <nl> + } <nl> + break ; <nl> <nl> + default : <nl> + break ; <nl> + } <nl> } <nl> mmm a / cores / esp8266 / twi . h <nl> ppp b / cores / esp8266 / twi . h <nl> <nl> - / * <nl> + / * <nl> twi . h - Software I2C library for esp8266 <nl> <nl> Copyright ( c ) 2015 Hristo Gochkov . All rights reserved . <nl> This file is part of the esp8266 core for Arduino environment . <nl> - <nl> + <nl> This library is free software ; you can redistribute it and / or <nl> modify it under the terms of the GNU Lesser General Public <nl> License as published by the Free Software Foundation ; either <nl> <nl> You should have received a copy of the GNU Lesser General Public <nl> License along with this library ; if not , write to the Free Software <nl> Foundation , Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA <nl> + Modified January 2017 by Bjorn Hammarberg ( bjoham @ esp8266 . com ) - i2c slave support <nl> * / <nl> # ifndef SI2C_h <nl> # define SI2C_h <nl> extern " C " { <nl> # define I2C_SDA_HELD_LOW 3 <nl> # define I2C_SDA_HELD_LOW_AFTER_INIT 4 <nl> <nl> + # ifndef TWI_BUFFER_LENGTH <nl> + # define TWI_BUFFER_LENGTH 32 <nl> + # endif <nl> + <nl> void twi_init ( unsigned char sda , unsigned char scl ) ; <nl> + void twi_setAddress ( uint8_t ) ; <nl> void twi_stop ( void ) ; <nl> void twi_setClock ( unsigned int freq ) ; <nl> void twi_setClockStretchLimit ( uint32_t limit ) ; <nl> uint8_t twi_writeTo ( unsigned char address , unsigned char * buf , unsigned int len <nl> uint8_t twi_readFrom ( unsigned char address , unsigned char * buf , unsigned int len , unsigned char sendStop ) ; <nl> uint8_t twi_status ( ) ; <nl> <nl> + uint8_t twi_transmit ( const uint8_t * , uint8_t ) ; <nl> + <nl> + void twi_attachSlaveRxEvent ( void ( * ) ( uint8_t * , size_t ) ) ; <nl> + void twi_attachSlaveTxEvent ( void ( * ) ( void ) ) ; <nl> + void twi_reply ( uint8_t ) ; <nl> + / / void twi_stop ( void ) ; <nl> + void twi_releaseBus ( void ) ; <nl> + <nl> + <nl> # ifdef __cplusplus <nl> } <nl> # endif <nl> <nl> - # endif <nl> \ No newline at end of file <nl> + # endif <nl> new file mode 100644 <nl> index 0000000000 . . 60a92fc965 <nl> mmm / dev / null <nl> ppp b / cores / esp8266 / twi_util . h <nl> <nl> + / * Copyright ( c ) 2002 , Marek Michalkiewicz <nl> + Copyright ( c ) 2005 , 2007 Joerg Wunsch <nl> + All rights reserved . <nl> + <nl> + Redistribution and use in source and binary forms , with or without <nl> + modification , are permitted provided that the following conditions are met : <nl> + <nl> + * Redistributions of source code must retain the above copyright <nl> + notice , this list of conditions and the following disclaimer . <nl> + <nl> + * Redistributions in binary form must reproduce the above copyright <nl> + notice , this list of conditions and the following disclaimer in <nl> + the documentation and / or other materials provided with the <nl> + distribution . <nl> + <nl> + * Neither the name of the copyright holders nor the names of <nl> + contributors may be used to endorse or promote products derived <nl> + from this software without specific prior written permission . <nl> + <nl> + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS " <nl> + AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE <nl> + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE <nl> + ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE <nl> + LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR <nl> + CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF <nl> + SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS <nl> + INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN <nl> + CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) <nl> + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN IF ADVISED OF THE <nl> + POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + Modified January 2017 by Bjorn Hammarberg ( bjoham @ esp8266 . com ) - i2c slave support <nl> + * / <nl> + <nl> + / * $ Id $ * / <nl> + / * copied from : Id : avr / twi . h , v 1 . 4 2004 / 11 / 01 21 : 19 : 54 arcanum Exp * / <nl> + <nl> + # ifndef _UTIL_TWI_H_ <nl> + # define _UTIL_TWI_H_ 1 <nl> + <nl> + / / # include < avr / io . h > <nl> + <nl> + / * * \ file * / <nl> + / * * \ defgroup util_twi < util / twi . h > : TWI bit mask definitions <nl> + \ code # include < util / twi . h > \ endcode <nl> + <nl> + This header file contains bit mask definitions for use with <nl> + the AVR TWI interface . <nl> + * / <nl> + / * * \ name TWSR values <nl> + <nl> + Mnemonics : <nl> + < br > TW_MT_xxx - master transmitter <nl> + < br > TW_MR_xxx - master receiver <nl> + < br > TW_ST_xxx - slave transmitter <nl> + < br > TW_SR_xxx - slave receiver <nl> + * / <nl> + <nl> + / * @ { * / <nl> + / * Master * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_START <nl> + start condition transmitted * / <nl> + # define TW_START 0x08 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_REP_START <nl> + repeated start condition transmitted * / <nl> + # define TW_REP_START 0x10 <nl> + <nl> + / * Master Transmitter * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MT_SLA_ACK <nl> + SLA + W transmitted , ACK received * / <nl> + # define TW_MT_SLA_ACK 0x18 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MT_SLA_NACK <nl> + SLA + W transmitted , NACK received * / <nl> + # define TW_MT_SLA_NACK 0x20 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MT_DATA_ACK <nl> + data transmitted , ACK received * / <nl> + # define TW_MT_DATA_ACK 0x28 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MT_DATA_NACK <nl> + data transmitted , NACK received * / <nl> + # define TW_MT_DATA_NACK 0x30 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MT_ARB_LOST <nl> + arbitration lost in SLA + W or data * / <nl> + # define TW_MT_ARB_LOST 0x38 <nl> + <nl> + / * Master Receiver * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MR_ARB_LOST <nl> + arbitration lost in SLA + R or NACK * / <nl> + # define TW_MR_ARB_LOST 0x38 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MR_SLA_ACK <nl> + SLA + R transmitted , ACK received * / <nl> + # define TW_MR_SLA_ACK 0x40 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MR_SLA_NACK <nl> + SLA + R transmitted , NACK received * / <nl> + # define TW_MR_SLA_NACK 0x48 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MR_DATA_ACK <nl> + data received , ACK returned * / <nl> + # define TW_MR_DATA_ACK 0x50 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_MR_DATA_NACK <nl> + data received , NACK returned * / <nl> + # define TW_MR_DATA_NACK 0x58 <nl> + <nl> + / * Slave Transmitter * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_ST_SLA_ACK <nl> + SLA + R received , ACK returned * / <nl> + # define TW_ST_SLA_ACK 0xA8 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_ST_ARB_LOST_SLA_ACK <nl> + arbitration lost in SLA + RW , SLA + R received , ACK returned * / <nl> + # define TW_ST_ARB_LOST_SLA_ACK 0xB0 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_ST_DATA_ACK <nl> + data transmitted , ACK received * / <nl> + # define TW_ST_DATA_ACK 0xB8 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_ST_DATA_NACK <nl> + data transmitted , NACK received * / <nl> + # define TW_ST_DATA_NACK 0xC0 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_ST_LAST_DATA <nl> + last data byte transmitted , ACK received * / <nl> + # define TW_ST_LAST_DATA 0xC8 <nl> + <nl> + / * Slave Receiver * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_SLA_ACK <nl> + SLA + W received , ACK returned * / <nl> + # define TW_SR_SLA_ACK 0x60 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_ARB_LOST_SLA_ACK <nl> + arbitration lost in SLA + RW , SLA + W received , ACK returned * / <nl> + # define TW_SR_ARB_LOST_SLA_ACK 0x68 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_GCALL_ACK <nl> + general call received , ACK returned * / <nl> + # define TW_SR_GCALL_ACK 0x70 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_ARB_LOST_GCALL_ACK <nl> + arbitration lost in SLA + RW , general call received , ACK returned * / <nl> + # define TW_SR_ARB_LOST_GCALL_ACK 0x78 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_DATA_ACK <nl> + data received , ACK returned * / <nl> + # define TW_SR_DATA_ACK 0x80 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_DATA_NACK <nl> + data received , NACK returned * / <nl> + # define TW_SR_DATA_NACK 0x88 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_GCALL_DATA_ACK <nl> + general call data received , ACK returned * / <nl> + # define TW_SR_GCALL_DATA_ACK 0x90 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_GCALL_DATA_NACK <nl> + general call data received , NACK returned * / <nl> + # define TW_SR_GCALL_DATA_NACK 0x98 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_SR_STOP <nl> + stop or repeated start condition received while selected * / <nl> + # define TW_SR_STOP 0xA0 <nl> + <nl> + / * Misc * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_NO_INFO <nl> + no state information available * / <nl> + # define TW_NO_INFO 0xF8 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_BUS_ERROR <nl> + illegal start or stop condition * / <nl> + # define TW_BUS_ERROR 0x00 <nl> + <nl> + # if 0 <nl> + <nl> + / * * <nl> + * \ ingroup util_twi <nl> + * \ def TW_STATUS_MASK <nl> + * The lower 3 bits of TWSR are reserved on the ATmega163 . <nl> + * The 2 LSB carry the prescaler bits on the newer ATmegas . <nl> + * / <nl> + # define TW_STATUS_MASK ( _BV ( TWS7 ) | _BV ( TWS6 ) | _BV ( TWS5 ) | _BV ( TWS4 ) | \ <nl> + _BV ( TWS3 ) ) <nl> + / * * <nl> + * \ ingroup util_twi <nl> + * \ def TW_STATUS <nl> + * <nl> + * TWSR , masked by TW_STATUS_MASK <nl> + * / <nl> + # define TW_STATUS ( TWSR & TW_STATUS_MASK ) <nl> + / * @ } * / <nl> + # endif <nl> + <nl> + <nl> + / * * <nl> + * \ name R / ~ W bit in SLA + R / W address field . <nl> + * / <nl> + <nl> + / * @ { * / <nl> + / * * \ ingroup util_twi <nl> + \ def TW_READ <nl> + SLA + R address * / <nl> + # define TW_READ 1 <nl> + <nl> + / * * \ ingroup util_twi <nl> + \ def TW_WRITE <nl> + SLA + W address * / <nl> + # define TW_WRITE 0 <nl> + / * @ } * / <nl> + <nl> + # endif / * _UTIL_TWI_H_ * / <nl> mmm a / libraries / Wire / Wire . cpp <nl> ppp b / libraries / Wire / Wire . cpp <nl> <nl> Modified 2012 by Todd Krein ( todd @ krein . org ) to implement repeated starts <nl> Modified December 2014 by Ivan Grokhotkov ( ivan @ esp8266 . com ) - esp8266 support <nl> Modified April 2015 by Hrsto Gochkov ( ficeto @ ficeto . com ) - alternative esp8266 support <nl> + Modified January 2017 by Bjorn Hammarberg ( bjoham @ esp8266 . com ) - i2c slave support <nl> * / <nl> <nl> extern " C " { <nl> void TwoWire : : begin ( void ) { <nl> } <nl> <nl> void TwoWire : : begin ( uint8_t address ) { <nl> - ( void ) address ; <nl> - / / twi_setAddress ( address ) ; <nl> - / / twi_attachSlaveTxEvent ( onRequestService ) ; <nl> - / / twi_attachSlaveRxEvent ( onReceiveService ) ; <nl> + twi_setAddress ( address ) ; <nl> + twi_attachSlaveTxEvent ( onRequestService ) ; <nl> + twi_attachSlaveRxEvent ( onReceiveService ) ; <nl> begin ( ) ; <nl> } <nl> <nl> size_t TwoWire : : write ( uint8_t data ) { <nl> + + txBufferIndex ; <nl> txBufferLength = txBufferIndex ; <nl> } else { <nl> - / / i2c_slave_transmit ( & data , 1 ) ; <nl> + twi_transmit ( & data , 1 ) ; <nl> } <nl> return 1 ; <nl> } <nl> size_t TwoWire : : write ( const uint8_t * data , size_t quantity ) { <nl> if ( ! write ( data [ i ] ) ) return i ; <nl> } <nl> } else { <nl> - / / i2c_slave_transmit ( data , quantity ) ; <nl> + twi_transmit ( data , quantity ) ; <nl> } <nl> return quantity ; <nl> } <nl> void TwoWire : : flush ( void ) { <nl> <nl> void TwoWire : : onReceiveService ( uint8_t * inBytes , int numBytes ) <nl> { <nl> - ( void ) inBytes ; <nl> - ( void ) numBytes ; <nl> / / don ' t bother if user hasn ' t registered a callback <nl> - / / if ( ! user_onReceive ) { <nl> - / / return ; <nl> - / / } <nl> + if ( ! user_onReceive ) { <nl> + return ; <nl> + } <nl> / / / / don ' t bother if rx buffer is in use by a master requestFrom ( ) op <nl> / / / / i know this drops data , but it allows for slight stupidity <nl> / / / / meaning , they may not have read all the master requestFrom ( ) data yet <nl> / / if ( rxBufferIndex < rxBufferLength ) { <nl> / / return ; <nl> / / } <nl> - / / / / copy twi rx buffer into local read buffer <nl> - / / / / this enables new reads to happen in parallel <nl> - / / for ( uint8_t i = 0 ; i < numBytes ; + + i ) { <nl> - / / rxBuffer [ i ] = inBytes [ i ] ; <nl> - / / } <nl> - / / / / set rx iterator vars <nl> - / / rxBufferIndex = 0 ; <nl> - / / rxBufferLength = numBytes ; <nl> - / / / / alert user program <nl> - / / user_onReceive ( numBytes ) ; <nl> + <nl> + / / copy twi rx buffer into local read buffer <nl> + / / this enables new reads to happen in parallel <nl> + for ( uint8_t i = 0 ; i < numBytes ; + + i ) { <nl> + rxBuffer [ i ] = inBytes [ i ] ; <nl> + } <nl> + <nl> + / / set rx iterator vars <nl> + rxBufferIndex = 0 ; <nl> + rxBufferLength = numBytes ; <nl> + <nl> + / / alert user program <nl> + user_onReceive ( numBytes ) ; <nl> } <nl> <nl> - void TwoWire : : onRequestService ( void ) { <nl> - / / / / don ' t bother if user hasn ' t registered a callback <nl> - / / if ( ! user_onRequest ) { <nl> - / / return ; <nl> - / / } <nl> - / / / / reset tx buffer iterator vars <nl> - / / / / ! ! ! this will kill any pending pre - master sendTo ( ) activity <nl> - / / txBufferIndex = 0 ; <nl> - / / txBufferLength = 0 ; <nl> - / / / / alert user program <nl> - / / user_onRequest ( ) ; <nl> + void TwoWire : : onRequestService ( void ) <nl> + { <nl> + / / don ' t bother if user hasn ' t registered a callback <nl> + if ( ! user_onRequest ) { <nl> + return ; <nl> + } <nl> + <nl> + / / reset tx buffer iterator vars <nl> + / / ! ! ! this will kill any pending pre - master sendTo ( ) activity <nl> + txBufferIndex = 0 ; <nl> + txBufferLength = 0 ; <nl> + <nl> + / / alert user program <nl> + user_onRequest ( ) ; <nl> } <nl> <nl> - void TwoWire : : onReceive ( void ( * function ) ( int ) ) { <nl> - ( void ) function ; <nl> - / / user_onReceive = function ; <nl> + void TwoWire : : onReceive ( void ( * function ) ( int ) ) { <nl> + user_onReceive = function ; <nl> } <nl> <nl> void TwoWire : : onRequest ( void ( * function ) ( void ) ) { <nl> - ( void ) function ; <nl> - / / user_onRequest = function ; <nl> + user_onRequest = function ; <nl> } <nl> <nl> / / Preinstantiate Objects / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl>
|
I2C slave support ( originally by bjoham ) ( )
|
esp8266/Arduino
|
cb05b86d49717ce155db618b9cb9e9cc18ced92f
|
2018-10-26T15:04:16Z
|
new file mode 100644 <nl> index 000000000000 . . f50d3c829656 <nl> mmm / dev / null <nl> ppp b / torch / _C / _distributed_c10d . pyi <nl> <nl> + from torch import Tensor <nl> + from enum import Enum <nl> + from typing import Optional , List , Any , overload <nl> + from datetime import timedelta <nl> + <nl> + # This module is defined in torch / csrc / distributed / c10d / init . cpp <nl> + <nl> + class BuiltinCommHookType ( Enum ) : <nl> + ALLREDUCE = . . . <nl> + FP16_COMPRESS = . . . <nl> + <nl> + def _register_comm_hook ( reducer : Reducer , state : Any , comm_hook : Any ) : . . . <nl> + def _register_builtin_comm_hook ( reducer : Reducer , comm_hook_type : BuiltinCommHookType ) : . . . <nl> + <nl> + class _GradBucket : <nl> + def __init__ ( self , tensors : List [ Tensor ] ) : . . . <nl> + def get_tensors ( self ) - > List [ Tensor ] : . . . <nl> + <nl> + class Reducer : <nl> + def __init__ ( <nl> + self , <nl> + replicas : List [ List [ Tensor ] ] , <nl> + bucket_indices : List [ List [ int ] ] , <nl> + process_group : ProcessGroup , <nl> + expect_sparse_gradients : List [ List [ bool ] ] , <nl> + bucket_bytes_cap : int , <nl> + find_unused_parameters : bool , <nl> + gradient_as_bucket_view : bool , <nl> + ) : . . . <nl> + def initialize_buckets ( self , bucket_indices : List [ List [ int ] ] ) : . . . <nl> + . . . <nl> + <nl> + class ReduceOp ( Enum ) : <nl> + SUM = 0 <nl> + PRODUCT = 1 <nl> + MIN = 2 <nl> + MAX = 3 <nl> + BAND = 4 <nl> + BOR = 5 <nl> + BXOR = 6 <nl> + UNUSED = 7 <nl> + <nl> + class BroadcastOptions : <nl> + rootRank : int <nl> + rootTensor : int <nl> + timeout : timedelta <nl> + <nl> + class AllreduceOptions : <nl> + reduceOp : ReduceOp <nl> + timeout : timedelta <nl> + <nl> + class AllreduceCoalescedOptions ( AllreduceOptions ) : <nl> + . . . <nl> + <nl> + class ReduceOptions : <nl> + reduceOp : ReduceOp <nl> + rootRank : int <nl> + rootTensor : int <nl> + timeout : timedelta <nl> + <nl> + class AllGatherOptions : <nl> + timeout : timedelta <nl> + <nl> + class GatherOptions : <nl> + rootRank : int <nl> + timeout : timedelta <nl> + <nl> + class ScatterOptions : <nl> + rootRank : int <nl> + timeout : timedelta <nl> + <nl> + class ReduceScatterOptions : <nl> + reduceOp : ReduceOp <nl> + timeout : timedelta <nl> + <nl> + class BarrierOptions : <nl> + timeout : timedelta <nl> + <nl> + class AllToAllOptions : <nl> + timeout : timedelta <nl> + <nl> + class Store : <nl> + def set ( self , key : str , value : str ) : . . . <nl> + def get ( self , key : str ) - > bytes : . . . <nl> + def add ( self , key : str , value : int ) - > int : . . . <nl> + def delete_key ( self , key : str ) - > bool : . . . <nl> + def num_keys ( self ) - > int : . . . <nl> + def set_timeout ( self , timeout : timedelta ) : . . . <nl> + @ overload <nl> + def wait ( self , keys : List [ str ] ) : . . . <nl> + @ overload <nl> + def wait ( self , keys : List [ str ] , timeout : timedelta ) : . . . <nl> + <nl> + class FileStore ( Store ) : <nl> + def __init__ ( <nl> + self , <nl> + path : str , <nl> + numWorkers : int <nl> + ) : . . . <nl> + <nl> + class HashStore ( Store ) : <nl> + def __init__ ( self ) : . . . <nl> + <nl> + class TCPStore ( Store ) : <nl> + def __init__ ( <nl> + self , <nl> + host_name : str , <nl> + port : int , <nl> + world_size : int , <nl> + is_master : bool , <nl> + timeout : timedelta , <nl> + ) : . . . <nl> + <nl> + class PrefixStore ( Store ) : <nl> + def __init__ ( <nl> + self , <nl> + prefix : str , <nl> + store : Store <nl> + ) : . . . <nl> + <nl> + class Work : <nl> + def is_completed ( self ) - > bool : . . . <nl> + def is_success ( self ) - > bool : . . . <nl> + def exception ( self ) - > Any : . . . <nl> + def wait ( self , timeout : timedelta ) - > bool : . . . <nl> + def source_rank ( self ) - > int : . . . <nl> + def _source_rank ( self ) - > int : . . . <nl> + def result ( self ) - > List [ Tensor ] : . . . <nl> + def synchronize ( self ) : . . . <nl> + . . . <nl> + <nl> + class ProcessGroup : <nl> + def __init__ ( self ) : . . . <nl> + def rank ( self ) - > int : . . . <nl> + def size ( self ) - > int : . . . <nl> + @ overload <nl> + def broadcast ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + opts = BroadcastOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def broadcast ( <nl> + self , <nl> + tensor : Tensor , <nl> + root : int , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def allreduce ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + opts : AllreduceOptions = AllreduceOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def allreduce ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + op = ReduceOp . SUM , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def allreduce ( <nl> + self , <nl> + tensor : Tensor , <nl> + op = ReduceOp . SUM , <nl> + ) - > Work : . . . <nl> + def allreduce_coalesced ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + opts = AllreduceCoalescedOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def reduce ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + opts = ReduceOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def reduce ( <nl> + self , <nl> + tensor : Tensor , <nl> + root : int , <nl> + op = ReduceOp . SUM , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def allgather ( <nl> + self , <nl> + output_tensors : List [ List [ Tensor ] ] , <nl> + input_tensors : List [ Tensor ] , <nl> + opts = AllGatherOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def allgather ( <nl> + self , <nl> + output_tensors : List [ Tensor ] , <nl> + input_tensor : Tensor , <nl> + ) - > Work : . . . <nl> + def allgather_coalesced ( <nl> + self , <nl> + output_lists : List [ List [ Tensor ] ] , <nl> + input_list : List [ Tensor ] , <nl> + opts = AllGatherOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def gather ( <nl> + self , <nl> + output_tensors : List [ List [ Tensor ] ] , <nl> + input_tensors : List [ Tensor ] , <nl> + opts = GatherOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def gather ( <nl> + self , <nl> + output_tensors : List [ Tensor ] , <nl> + input_tensor : Tensor , <nl> + root : int , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def scatter ( <nl> + self , <nl> + output_tensors : List [ Tensor ] , <nl> + input_tensors : List [ List [ Tensor ] ] , <nl> + opts = ScatterOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def scatter ( <nl> + self , <nl> + output_tensor : Tensor , <nl> + input_tensors : List [ Tensor ] , <nl> + root : int , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def reduce_scatter ( <nl> + self , <nl> + output_tensors : List [ Tensor ] , <nl> + input_tensors : List [ List [ Tensor ] ] , <nl> + opts = ReduceScatterOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def reduce_scatter ( <nl> + self , <nl> + output_tensors : Tensor , <nl> + input_tensor : List [ Tensor ] , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def alltoall_base ( <nl> + self , <nl> + output_tensor : Tensor , <nl> + input_tensor : Tensor , <nl> + output_split_sizes : List [ int ] , <nl> + input_split_sizes : List [ int ] , <nl> + opts = AllToAllOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def alltoall_base ( <nl> + self , <nl> + output : Tensor , <nl> + input : Tensor , <nl> + output_split_sizes : List [ int ] , <nl> + input_split_sizes : List [ int ] , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def alltoall ( <nl> + self , <nl> + output_tensor : List [ Tensor ] , <nl> + input_tensor : List [ Tensor ] , <nl> + opts = AllToAllOptions ( ) , <nl> + ) - > Work : . . . <nl> + @ overload <nl> + def alltoall ( <nl> + self , <nl> + output : List [ Tensor ] , <nl> + input : List [ Tensor ] , <nl> + ) - > Work : . . . <nl> + def send ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + dstRank : int , <nl> + tag : int , <nl> + ) - > Work : . . . <nl> + def recv ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + srcRank : int , <nl> + tag : int , <nl> + ) - > Work : . . . <nl> + def recv_anysource ( <nl> + self , <nl> + tensors : List [ Tensor ] , <nl> + tag : int <nl> + ) - > Work : . . . <nl> + def barrier ( <nl> + self , <nl> + opts = BarrierOptions ( ) <nl> + ) - > Work : . . . <nl> + <nl> + class ProcessGroupRoundRobin ( ProcessGroup ) : . . . <nl> + def _round_robin_process_groups ( <nl> + process_groups : List [ ProcessGroup ] , <nl> + ) - > ProcessGroupRoundRobin : . . . <nl> + <nl> + <nl> + class ProcessGroupGloo ( ProcessGroup ) : <nl> + class Device : . . . <nl> + def __init__ ( <nl> + self , <nl> + store : Store , <nl> + rank : int , <nl> + size : int , <nl> + timeout : timedelta , <nl> + ) : . . . <nl> + @ staticmethod <nl> + def create_device ( hostname = str ( ) , interface = str ( ) ) - > Device : . . . <nl> + . . . <nl> + <nl> + class ProcessGroupNCCL ( ProcessGroup ) : <nl> + def __init__ ( <nl> + self , <nl> + store : Store , <nl> + rank : int , <nl> + size : int , <nl> + timeout : timedelta , <nl> + ) : . . . <nl> + @ staticmethod <nl> + def _group_start ( ) - > None : . . . <nl> + @ staticmethod <nl> + def _group_end ( ) - > None : . . . <nl> + . . . <nl> + <nl> + class ProcessGroupMPI ( ProcessGroup ) : <nl> + def __init__ ( <nl> + self , <nl> + rank : int , <nl> + size : int , <nl> + pgComm : int , <nl> + ) : . . . <nl> + @ staticmethod <nl> + def create ( ranks : List [ int ] ) - > ProcessGroupMPI : . . . <nl> + <nl> + def _compute_bucket_assignment_by_size ( <nl> + tensors : List [ Tensor ] , <nl> + bucket_size : int , <nl> + expect_sparse_gradient : List [ bool ] , <nl> + tensor_indices : List [ int ] ) - > List [ List [ int ] ] : . . . <nl> + def _broadcast_coalesced ( <nl> + process_group : ProcessGroup , <nl> + tensors : List [ Tensor ] , <nl> + buffer_size : int , <nl> + src : int , <nl> + ) : . . . <nl> + def _test_python_store ( store : Store ) : . . . <nl> + <nl> + _DEFAULT_FIRST_BUCKET_BYTES : int <nl> mmm a / torch / csrc / distributed / c10d / init . cpp <nl> ppp b / torch / csrc / distributed / c10d / init . cpp <nl> PyObject * c10d_init ( PyObject * _unused , PyObject * noargs ) { <nl> throw python_error ( ) ; <nl> } <nl> <nl> - auto module = py : : handle ( c10d_module ) . cast < py : : module > ( ) ; <nl> + auto torch_C_module = THPObjectPtr ( PyImport_ImportModule ( " torch . _C " ) ) ; <nl> + if ( ! torch_C_module ) { <nl> + throw python_error ( ) ; <nl> + } <nl> + <nl> + auto torch_C_m = py : : handle ( torch_C_module ) . cast < py : : module > ( ) ; <nl> + auto m = torch_C_m . def_submodule ( " _distributed_c10d " , " distributed c10d bindings " ) ; <nl> + <nl> + auto module = py : : handle ( m ) . cast < py : : module > ( ) ; <nl> <nl> module <nl> . def ( <nl> mmm a / torch / distributed / __init__ . py <nl> ppp b / torch / distributed / __init__ . py <nl> <nl> <nl> import torch <nl> + import sys <nl> <nl> <nl> def is_available ( ) : <nl> def is_available ( ) : <nl> <nl> <nl> if is_available ( ) : <nl> + from torch . _C . _distributed_c10d import ( <nl> + Store , <nl> + FileStore , <nl> + ProcessGroup , <nl> + Reducer , <nl> + BuiltinCommHookType , <nl> + _DEFAULT_FIRST_BUCKET_BYTES , <nl> + _GradBucket , <nl> + _register_comm_hook , <nl> + _register_builtin_comm_hook , <nl> + _broadcast_coalesced , <nl> + _compute_bucket_assignment_by_size , <nl> + _test_python_store , <nl> + ) <nl> + if sys . platform ! = ' win32 ' : <nl> + from torch . _C . _distributed_c10d import ( <nl> + TCPStore , <nl> + HashStore , <nl> + _round_robin_process_groups , <nl> + ) <nl> + <nl> from . distributed_c10d import * <nl> # Variables prefixed with underscore are not auto imported <nl> # See the comment in ` distributed_c10d . py ` above ` _backend ` on why we expose <nl> mmm a / torch / distributed / distributed_c10d . py <nl> ppp b / torch / distributed / distributed_c10d . py <nl> <nl> <nl> from . constants import default_pg_timeout <nl> from . rendezvous import rendezvous , register_rendezvous_handler # noqa : F401 <nl> - from . import ( <nl> + from torch . _C . _distributed_c10d import ( <nl> AllreduceOptions , <nl> AllreduceCoalescedOptions , <nl> AllToAllOptions , <nl> <nl> ReduceOptions , <nl> ReduceScatterOptions , <nl> ScatterOptions , <nl> + ReduceOp , <nl> + PrefixStore , <nl> ) <nl> - from . import ReduceOp <nl> - from . import PrefixStore <nl> <nl> <nl> _MPI_AVAILABLE = True <nl> <nl> <nl> <nl> try : <nl> - from . import ProcessGroupMPI <nl> + from torch . _C . _distributed_c10d import ProcessGroupMPI <nl> except ImportError : <nl> _MPI_AVAILABLE = False <nl> <nl> try : <nl> - from . import ProcessGroupNCCL <nl> + from torch . _C . _distributed_c10d import ProcessGroupNCCL <nl> except ImportError : <nl> _NCCL_AVAILABLE = False <nl> <nl> try : <nl> - from . import ProcessGroupGloo <nl> + from torch . _C . _distributed_c10d import ProcessGroupGloo <nl> except ImportError : <nl> _GLOO_AVAILABLE = False <nl> <nl> mmm a / torch / distributed / rendezvous . py <nl> ppp b / torch / distributed / rendezvous . py <nl> <nl> import numbers <nl> import os <nl> import sys <nl> - from . import FileStore <nl> + from torch . _C . _distributed_c10d import FileStore <nl> from . constants import default_pg_timeout <nl> <nl> if sys . platform ! = ' win32 ' : <nl> - from . import TCPStore <nl> + from torch . _C . _distributed_c10d import TCPStore <nl> <nl> _rendezvous_handlers = { } <nl> <nl>
|
Add type annotations for torch . _C . _distributed_c10d module . ( )
|
pytorch/pytorch
|
73a3e70b2444635c64f9b588d90a2e99cb567485
|
2020-11-06T09:28:48Z
|
mmm a / generic / VolumetricReplicationPadding . c <nl> ppp b / generic / VolumetricReplicationPadding . c <nl> static inline void THNN_ ( VolumetricReplicationPadding_shapeCheck ) ( <nl> int dimw = 3 ; <nl> int dimh = 2 ; <nl> int dimd = 1 ; <nl> + int dimslices = 0 ; <nl> + long nslices ; <nl> long idepth ; <nl> long iheight ; <nl> long iwidth ; <nl> static inline void THNN_ ( VolumetricReplicationPadding_shapeCheck ) ( <nl> dimw + + ; <nl> dimh + + ; <nl> dimd + + ; <nl> + dimslices + + ; <nl> } <nl> <nl> / * sizes * / <nl> + nslices = input - > size [ dimslices ] ; <nl> idepth = input - > size [ dimd ] ; <nl> iheight = input - > size [ dimh ] ; <nl> iwidth = input - > size [ dimw ] ; <nl> static inline void THNN_ ( VolumetricReplicationPadding_shapeCheck ) ( <nl> idepth , iheight , iwidth , odepth , oheight , owidth ) ; <nl> <nl> if ( gradOutput ! = NULL ) { <nl> + THArgCheck ( nslices = = THTensor_ ( size ) ( gradOutput , dimslices ) , 3 , <nl> + " gradOutput width unexpected . Expected : % d , Got : % d " , <nl> + nslices , THTensor_ ( size ) ( gradOutput , dimslices ) ) ; <nl> THArgCheck ( owidth = = THTensor_ ( size ) ( gradOutput , dimw ) , 3 , <nl> " gradOutput width unexpected . Expected : % d , Got : % d " , <nl> owidth , THTensor_ ( size ) ( gradOutput , dimw ) ) ; <nl>
|
Improve gradOutput checks for VolumetricReplicationPadding .
|
pytorch/pytorch
|
220183ed783101f19d88cb8fb3052fd4abc7234f
|
2016-12-06T17:09:38Z
|
mmm a / Telegram / SourceFiles / history / history_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / history_widget . cpp <nl> void HistoryWidget : : handleSupportSwitch ( not_null < History * > updated ) { <nl> return ; <nl> } <nl> <nl> - crl : : on_main ( <nl> - this , <nl> - Support : : GetSwitchMethod ( Auth ( ) . settings ( ) . supportSwitch ( ) ) ) ; <nl> + const auto setting = Auth ( ) . settings ( ) . supportSwitch ( ) ; <nl> + if ( auto method = Support : : GetSwitchMethod ( setting ) ) { <nl> + crl : : on_main ( this , std : : move ( method ) ) ; <nl> + } <nl> } <nl> <nl> void HistoryWidget : : inlineBotResolveDone ( <nl>
|
Fix crash in support switches .
|
telegramdesktop/tdesktop
|
c522e047c6433f27767a039acdb9dd437e9b2e51
|
2018-11-20T15:38:40Z
|
mmm a / src / core / arm / dyncom / arm_dyncom_interpreter . cpp <nl> ppp b / src / core / arm / dyncom / arm_dyncom_interpreter . cpp <nl> unsigned InterpreterMainLoop ( ARMul_State * cpu ) { <nl> # endif <nl> arm_inst * inst_base ; <nl> unsigned int addr ; <nl> - unsigned int phys_addr ; <nl> unsigned int num_instrs = 0 ; <nl> <nl> int ptr ; <nl> unsigned InterpreterMainLoop ( ARMul_State * cpu ) { <nl> else <nl> cpu - > Reg [ 15 ] & = 0xfffffffc ; <nl> <nl> - phys_addr = cpu - > Reg [ 15 ] ; <nl> - <nl> / / Find the cached instruction cream , otherwise translate it . . . <nl> auto itr = cpu - > instruction_cache . find ( cpu - > Reg [ 15 ] ) ; <nl> if ( itr ! = cpu - > instruction_cache . end ( ) ) { <nl>
|
dyncom : Remove an unused variable
|
yuzu-emu/yuzu
|
46663d657fd7cde82e3600d08b5e3bfd17b366d5
|
2015-07-29T16:21:16Z
|
mmm a / BUILD . gn <nl> ppp b / BUILD . gn <nl> declare_args ( ) { <nl> <nl> # Enable embedded builtins . <nl> # TODO ( jgruber , v8 : 6666 ) : Support ia32 and maybe MSVC . <nl> + # TODO ( jgruber , v8 : 6666 ) : Enable for remaining architectures once performance <nl> + # regressions are addressed . <nl> v8_enable_embedded_builtins = <nl> - v8_use_snapshot & & v8_current_cpu ! = " x86 " & & ( ! is_win | | is_clang ) <nl> + v8_use_snapshot & & v8_current_cpu = = " x64 " & & ( ! is_win | | is_clang ) <nl> <nl> # Enable code - generation - time checking of types in the CodeStubAssembler . <nl> v8_enable_verify_csa = false <nl>
|
Revert " [ embedded - builtins ] Enable on all arches except x86 for benchmarks "
|
v8/v8
|
46a78fbedfcd458f5d35097bdf1a9947644f4b0e
|
2018-07-17T15:33:20Z
|
mmm a / tensorflow / compiler / tf2tensorrt / kernels / get_serialized_resource_op_test . cc <nl> ppp b / tensorflow / compiler / tf2tensorrt / kernels / get_serialized_resource_op_test . cc <nl> limitations under the License . <nl> <nl> # include < dirent . h > <nl> # include < string . h > <nl> + <nl> # include < fstream > <nl> # include < vector > <nl> <nl> TEST_F ( GetSerializedResourceOpTest , Basic ) { <nl> TF_ASSERT_OK ( RunOpKernel ( ) ) ; <nl> <nl> / / Verify the result . <nl> - / / TODO ( laigd ) : OpsTestBase : : GetOutput ( ) doesn ' t work . <nl> - Tensor * output = context_ - > mutable_output ( 0 ) ; <nl> - EXPECT_EQ ( " my_serialized_str " , output - > scalar < string > ( ) ( ) ) ; <nl> + / / string type output will remain on CPU , so we ' re not using GetOutput ( ) here . <nl> + EXPECT_EQ ( " my_serialized_str " , <nl> + context_ - > mutable_output ( 0 ) - > scalar < string > ( ) ( ) ) ; <nl> } <nl> <nl> } / / namespace tensorrt <nl> mmm a / tensorflow / compiler / tf2tensorrt / kernels / trt_engine_op_test . cc <nl> ppp b / tensorflow / compiler / tf2tensorrt / kernels / trt_engine_op_test . cc <nl> TYPED_TEST ( TRTEngineOpTest , Basic ) { <nl> TF_ASSERT_OK ( OpsTestBase : : RunOpKernel ( ) ) ; <nl> <nl> / / Verify the result . <nl> - / / TODO ( laigd ) : OpsTestBase : : GetOutput ( ) doesn ' t work . <nl> - Tensor * output = OpsTestBase : : context_ - > mutable_output ( 0 ) ; <nl> - const auto & tensor_map = output - > flat < TypeParam > ( ) ; <nl> - std : : vector < TypeParam > output_data ( tensor_map . size ( ) ) ; <nl> - ASSERT_EQ ( 0 , cudaDeviceSynchronize ( ) ) ; <nl> - ASSERT_EQ ( 0 , cudaMemcpy ( output_data . data ( ) , tensor_map . data ( ) , <nl> - sizeof ( TypeParam ) * tensor_map . size ( ) , <nl> - cudaMemcpyDeviceToHost ) ) ; <nl> - EXPECT_THAT ( absl : : Span < const TypeParam > ( output_data ) , <nl> - ElementsAre ( TypeParam ( 0 . 0f ) , TypeParam ( 2 . 0f ) ) ) ; <nl> + Tensor * output = OpsTestBase : : GetOutput ( 0 ) ; <nl> + EXPECT_THAT ( <nl> + absl : : Span < const TypeParam > ( output - > template flat < TypeParam > ( ) . data ( ) , <nl> + output - > NumElements ( ) ) , <nl> + ElementsAre ( TypeParam ( 0 . 0f ) , TypeParam ( 2 . 0f ) ) ) ; <nl> } <nl> <nl> } / / namespace tensorrt <nl>
|
Update the tests to use OpsTestBase : : GetOutput ( ) .
|
tensorflow/tensorflow
|
564cc016d1ae4c209b9ed2de0bd7ee48f30e40b9
|
2019-05-18T04:57:07Z
|
mmm a / src / mongo / bson / util / bson_extract . cpp <nl> ppp b / src / mongo / bson / util / bson_extract . cpp <nl> Status bsonExtractIntegerField ( const BSONObj & object , StringData fieldName , long <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status bsonExtractDoubleField ( const BSONObj & object , StringData fieldName , double * out ) { <nl> + BSONElement value ; <nl> + Status status = bsonExtractField ( object , fieldName , & value ) ; <nl> + if ( ! status . isOK ( ) ) <nl> + return status ; <nl> + if ( ! value . isNumber ( ) ) { <nl> + return Status ( ErrorCodes : : TypeMismatch , <nl> + mongoutils : : str : : stream ( ) < < " Expected field \ " " < < fieldName <nl> + < < " \ " to have numeric type , but found " <nl> + < < typeName ( value . type ( ) ) ) ; <nl> + } <nl> + * out = value . numberDouble ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> Status bsonExtractIntegerFieldWithDefault ( const BSONObj & object , <nl> StringData fieldName , <nl> long long defaultValue , <nl> mmm a / src / mongo / bson / util / bson_extract . h <nl> ppp b / src / mongo / bson / util / bson_extract . h <nl> Status bsonExtractBooleanField ( const BSONObj & object , StringData fieldName , bool <nl> * / <nl> Status bsonExtractIntegerField ( const BSONObj & object , StringData fieldName , long long * out ) ; <nl> <nl> + / * * <nl> + * Finds an element named " fieldName " in " object " that represents a double - precision floating point <nl> + * value . <nl> + * <nl> + * Returns Status : : OK ( ) and sets * out to the element ' s double floating point value representation on <nl> + * success . Returns ErrorCodes : : NoSuchKey if there are no matches for " fieldName " . Returns <nl> + * ErrorCodes : : TypeMismatch if the value of the matching element is not of a numeric type . Returns <nl> + * ErrorCodes : : BadValue if the value does not have an exact floating point number representation . <nl> + * For return values other than Status : : OK ( ) , the resulting value of " * out " is undefined . <nl> + * / <nl> + Status bsonExtractDoubleField ( const BSONObj & object , StringData fieldName , double * out ) ; <nl> + <nl> / * * <nl> * Finds a string - typed element named " fieldName " in " object " and stores its value in " out " . <nl> * <nl> mmm a / src / mongo / client / read_preference . cpp <nl> ppp b / src / mongo / client / read_preference . cpp <nl> namespace { <nl> <nl> const char kModeFieldName [ ] = " mode " ; <nl> const char kTagsFieldName [ ] = " tags " ; <nl> - const char kMaxStalenessMSFieldName [ ] = " maxStalenessMS " ; <nl> + const char kMaxStalenessSecondsFieldName [ ] = " maxStalenessSeconds " ; <nl> <nl> const char kPrimaryOnly [ ] = " primary " ; <nl> const char kPrimaryPreferred [ ] = " primaryPreferred " ; <nl> const char kSecondaryOnly [ ] = " secondary " ; <nl> const char kSecondaryPreferred [ ] = " secondaryPreferred " ; <nl> const char kNearest [ ] = " nearest " ; <nl> <nl> + / / Avoid overflow errors when converting from seconds to milliseconds <nl> + const auto kMaximalMaxStalenessSecondsValue ( durationCount < Seconds > ( Milliseconds : : max ( ) ) ) ; <nl> + <nl> StringData readPreferenceName ( ReadPreference pref ) { <nl> switch ( pref ) { <nl> case ReadPreference : : PrimaryOnly : <nl> StatusWith < ReadPreferenceSetting > ReadPreferenceSetting : : fromBSON ( const BSONObj & <nl> return tagExtractStatus ; <nl> } <nl> <nl> - long long maxStalenessMSValue ; <nl> - auto maxStalenessMSExtractStatus = bsonExtractIntegerFieldWithDefault ( <nl> - readPrefObj , kMaxStalenessMSFieldName , 0 , & maxStalenessMSValue ) ; <nl> + double maxStalenessSecondsValue ; <nl> + <nl> + Status maxStalenessSecondsExtractStatus = bsonExtractDoubleField ( <nl> + readPrefObj , kMaxStalenessSecondsFieldName , & maxStalenessSecondsValue ) ; <nl> + <nl> + if ( maxStalenessSecondsExtractStatus = = ErrorCodes : : NoSuchKey ) { <nl> + return ReadPreferenceSetting ( mode , tags ) ; <nl> + } else if ( ! maxStalenessSecondsExtractStatus . isOK ( ) ) { <nl> + return maxStalenessSecondsExtractStatus ; <nl> + } <nl> <nl> - if ( ! maxStalenessMSExtractStatus . isOK ( ) ) { <nl> - return maxStalenessMSExtractStatus ; <nl> + if ( maxStalenessSecondsValue < 0 . 0 ) { <nl> + return { ErrorCodes : : BadValue , <nl> + str : : stream ( ) < < kMaxStalenessSecondsFieldName < < " value can not be negative " } ; <nl> } <nl> <nl> - if ( maxStalenessMSValue & & maxStalenessMSValue < 0 ) { <nl> - return Status ( ErrorCodes : : BadValue , <nl> - str : : stream ( ) < < kMaxStalenessMSFieldName <nl> - < < " must be a non negative integer " ) ; <nl> + if ( maxStalenessSecondsValue > kMaximalMaxStalenessSecondsValue ) { <nl> + return { ErrorCodes : : MaxStalenessOutOfRange , <nl> + str : : stream ( ) < < kMaxStalenessSecondsFieldName < < " value can not exceed " <nl> + < < kMaximalMaxStalenessSecondsValue } ; <nl> } <nl> <nl> - if ( maxStalenessMSValue & & maxStalenessMSValue > = Milliseconds : : max ( ) . count ( ) ) { <nl> - return Status ( ErrorCodes : : BadValue , <nl> - str : : stream ( ) < < kMaxStalenessMSFieldName < < " value can not exceed " <nl> - < < Milliseconds : : max ( ) . count ( ) ) ; <nl> + if ( maxStalenessSecondsValue = = 0 . 0 ) { <nl> + return ReadPreferenceSetting ( mode , tags ) ; <nl> } <nl> <nl> - if ( maxStalenessMSValue & & maxStalenessMSValue < kMinimalMaxStalenessValue . count ( ) ) { <nl> - return Status ( ErrorCodes : : MaxStalenessOutOfRange , <nl> - str : : stream ( ) < < kMaxStalenessMSFieldName < < " value can not be less than " <nl> - < < kMinimalMaxStalenessValue . count ( ) ) ; <nl> + / / Use a lambda to do the double seconds to integer milliseconds conversion in order to <nl> + / / encapsulate the usage of helper variables <nl> + const Milliseconds requestedMaxStalenessMS = [ maxStalenessSecondsValue ] { <nl> + double integerPart ; <nl> + const double fractionalPart = std : : modf ( maxStalenessSecondsValue , & integerPart ) ; <nl> + <nl> + return Seconds ( static_cast < long long > ( integerPart ) ) + <nl> + Milliseconds ( static_cast < long long > ( fractionalPart * <nl> + durationCount < Milliseconds > ( Seconds ( 1 ) ) ) ) ; <nl> + } ( ) ; <nl> + <nl> + if ( requestedMaxStalenessMS < kMinimalMaxStalenessValue ) { <nl> + return { ErrorCodes : : MaxStalenessOutOfRange , <nl> + str : : stream ( ) < < kMaxStalenessSecondsFieldName < < " value can not be less than " <nl> + < < kMinimalMaxStalenessValue } ; <nl> } <nl> <nl> - if ( ( mode = = ReadPreference : : PrimaryOnly ) & & maxStalenessMSValue ) { <nl> - return Status ( ErrorCodes : : BadValue , <nl> - str : : stream ( ) < < kMaxStalenessMSFieldName <nl> - < < " can not be set for the primary mode " ) ; <nl> + if ( mode = = ReadPreference : : PrimaryOnly ) { <nl> + return { ErrorCodes : : BadValue , <nl> + str : : stream ( ) < < kMaxStalenessSecondsFieldName <nl> + < < " can not be set for the primary mode " } ; <nl> } <nl> <nl> - return ReadPreferenceSetting ( mode , tags , Milliseconds ( maxStalenessMSValue ) ) ; <nl> + return ReadPreferenceSetting ( mode , tags , requestedMaxStalenessMS ) ; <nl> } <nl> <nl> BSONObj ReadPreferenceSetting : : toBSON ( ) const { <nl> BSONObj ReadPreferenceSetting : : toBSON ( ) const { <nl> bob . append ( kTagsFieldName , tags . getTagBSON ( ) ) ; <nl> } <nl> if ( maxStalenessMS . count ( ) > 0 ) { <nl> - bob . append ( kMaxStalenessMSFieldName , maxStalenessMS . count ( ) ) ; <nl> + bob . append ( kMaxStalenessSecondsFieldName , <nl> + static_cast < double > ( maxStalenessMS . count ( ) ) / <nl> + durationCount < Milliseconds > ( Seconds ( 1 ) ) ) ; <nl> } <nl> return bob . obj ( ) ; <nl> } <nl> mmm a / src / mongo / client / read_preference_test . cpp <nl> ppp b / src / mongo / client / read_preference_test . cpp <nl> <nl> # include " mongo / unittest / unittest . h " <nl> # include " mongo / util / duration . h " <nl> <nl> + namespace mongo { <nl> namespace { <nl> <nl> - using namespace mongo ; <nl> + using unittest : : assertGet ; <nl> <nl> - static const Milliseconds minMaxStaleness = ReadPreferenceSetting : : kMinimalMaxStalenessValue ; <nl> + const Seconds minMaxStalenessSeconds ( <nl> + durationCount < Seconds > ( ReadPreferenceSetting : : kMinimalMaxStalenessValue ) ) ; <nl> <nl> void checkParse ( const BSONObj & rpsObj , const ReadPreferenceSetting & expected ) { <nl> - const auto swRps = ReadPreferenceSetting : : fromBSON ( rpsObj ) ; <nl> - ASSERT_OK ( swRps . getStatus ( ) ) ; <nl> - const auto rps = swRps . getValue ( ) ; <nl> - ASSERT_TRUE ( rps . equals ( expected ) ) ; <nl> + const auto rps = assertGet ( ReadPreferenceSetting : : fromBSON ( rpsObj ) ) ; <nl> + if ( ! rps . equals ( expected ) ) { <nl> + FAIL ( str : : stream ( ) < < " Expected " < < expected . toString ( ) < < " does not match actual " <nl> + < < rps . toString ( ) ) ; <nl> + } <nl> } <nl> <nl> TEST ( ReadPreferenceSetting , ParseValid ) { <nl> TEST ( ReadPreferenceSetting , ParseValid ) { <nl> < < " ny " ) ) ) ) ) ; <nl> checkParse ( BSON ( " mode " <nl> < < " secondary " <nl> - < < " maxStalenessMS " <nl> - < < minMaxStaleness . count ( ) ) , <nl> - ReadPreferenceSetting ( ReadPreference : : SecondaryOnly , minMaxStaleness ) ) ; <nl> + < < " maxStalenessSeconds " <nl> + < < minMaxStalenessSeconds . count ( ) ) , <nl> + ReadPreferenceSetting ( ReadPreference : : SecondaryOnly , minMaxStalenessSeconds ) ) ; <nl> <nl> checkParse ( BSON ( " mode " <nl> < < " secondary " <nl> - < < " maxStalenessMS " <nl> + < < " maxStalenessSeconds " <nl> < < 0 ) , <nl> ReadPreferenceSetting ( ReadPreference : : SecondaryOnly , Milliseconds ( 0 ) ) ) ; <nl> <nl> + checkParse ( BSON ( " mode " <nl> + < < " secondary " <nl> + < < " maxStalenessSeconds " <nl> + < < 61LL ) , <nl> + ReadPreferenceSetting ( ReadPreference : : SecondaryOnly , Milliseconds ( 61000 ) ) ) ; <nl> + <nl> + checkParse ( BSON ( " mode " <nl> + < < " secondary " <nl> + < < " maxStalenessSeconds " <nl> + < < 63 . 46 ) , <nl> + ReadPreferenceSetting ( ReadPreference : : SecondaryOnly , Milliseconds ( 63460 ) ) ) ; <nl> + <nl> checkParse ( BSON ( " mode " <nl> < < " secondary " <nl> < < " tags " <nl> < < BSON_ARRAY ( BSON ( " dc " <nl> < < " ny " ) ) <nl> - < < " maxStalenessMS " <nl> - < < minMaxStaleness . count ( ) ) , <nl> + < < " maxStalenessSeconds " <nl> + < < minMaxStalenessSeconds . count ( ) ) , <nl> ReadPreferenceSetting ( ReadPreference : : SecondaryOnly , <nl> TagSet ( BSON_ARRAY ( BSON ( " dc " <nl> < < " ny " ) ) ) , <nl> - minMaxStaleness ) ) ; <nl> + minMaxStalenessSeconds ) ) ; <nl> } <nl> <nl> void checkParseFails ( const BSONObj & rpsObj ) { <nl> TEST ( ReadPreferenceSetting , NonEquality ) { <nl> < < " ca " ) <nl> < < BSON ( " foo " <nl> < < " bar " ) ) ) ; <nl> - auto rps = ReadPreferenceSetting ( ReadPreference : : Nearest , tagSet , minMaxStaleness ) ; <nl> + auto rps = ReadPreferenceSetting ( ReadPreference : : Nearest , tagSet , minMaxStalenessSeconds ) ; <nl> <nl> - auto unexpected1 = <nl> - ReadPreferenceSetting ( ReadPreference : : Nearest , TagSet : : primaryOnly ( ) , minMaxStaleness ) ; <nl> + auto unexpected1 = ReadPreferenceSetting ( <nl> + ReadPreference : : Nearest , TagSet : : primaryOnly ( ) , minMaxStalenessSeconds ) ; <nl> ASSERT_FALSE ( rps . equals ( unexpected1 ) ) ; <nl> <nl> auto unexpected2 = ReadPreferenceSetting ( <nl> - ReadPreference : : Nearest , tagSet , Milliseconds ( minMaxStaleness . count ( ) + 1 ) ) ; <nl> + ReadPreference : : Nearest , tagSet , Seconds ( minMaxStalenessSeconds . count ( ) + 1 ) ) ; <nl> ASSERT_FALSE ( rps . equals ( unexpected2 ) ) ; <nl> } <nl> <nl> TEST ( ReadPreferenceSetting , ParseInvalid ) { <nl> < < " tags " <nl> < < " bad " ) ) ; <nl> <nl> - / / maxStalenessMS is negative <nl> - checkParseFails ( BSON ( " mode " <nl> - < < " secondary " <nl> - < < " maxStalenessMS " <nl> - < < - 1 ) ) ; <nl> + / / maxStalenessSeconds is negative <nl> + checkParseFailsWithError ( BSON ( " mode " <nl> + < < " secondary " <nl> + < < " maxStalenessSeconds " <nl> + < < - 1 ) , <nl> + ErrorCodes : : BadValue ) ; <nl> <nl> - / / maxStalenessMS is NaN <nl> - checkParseFails ( BSON ( " mode " <nl> - < < " secondary " <nl> - < < " maxStalenessMS " <nl> - < < " ONE " ) ) ; <nl> + / / maxStalenessSeconds is NaN <nl> + checkParseFailsWithError ( BSON ( " mode " <nl> + < < " secondary " <nl> + < < " maxStalenessSeconds " <nl> + < < " ONE " ) , <nl> + ErrorCodes : : TypeMismatch ) ; <nl> <nl> - / / maxStalenessMS and primary <nl> + / / maxStalenessSeconds and primary <nl> checkParseFails ( BSON ( " mode " <nl> < < " primary " <nl> - < < " maxStalenessMS " <nl> - < < minMaxStaleness . count ( ) ) ) ; <nl> + < < " maxStalenessSeconds " <nl> + < < minMaxStalenessSeconds . count ( ) ) ) ; <nl> <nl> - / / maxStalenessMS is less than min <nl> + / / maxStalenessSeconds is less than min <nl> checkParseFailsWithError ( BSON ( " mode " <nl> < < " primary " <nl> - < < " maxStalenessMS " <nl> - < < Milliseconds ( minMaxStaleness . count ( ) - 1 ) . count ( ) ) , <nl> + < < " maxStalenessSeconds " <nl> + < < minMaxStalenessSeconds . count ( ) - 1 ) , <nl> ErrorCodes : : MaxStalenessOutOfRange ) ; <nl> <nl> - / / maxStalenessMS is greater than max <nl> - checkParseFails ( BSON ( " mode " <nl> - < < " secondary " <nl> - < < " maxStalenessMS " <nl> - < < Milliseconds : : max ( ) . count ( ) ) ) ; <nl> + / / maxStalenessSeconds is greater than max type value for milliseconds <nl> + checkParseFailsWithError ( BSON ( " mode " <nl> + < < " secondary " <nl> + < < " maxStalenessSeconds " <nl> + < < Milliseconds : : max ( ) . count ( ) ) , <nl> + ErrorCodes : : MaxStalenessOutOfRange ) ; <nl> } <nl> <nl> void checkRoundtrip ( const ReadPreferenceSetting & rps ) { <nl> - auto parsed = ReadPreferenceSetting : : fromBSON ( rps . toBSON ( ) ) ; <nl> - ASSERT_OK ( parsed . getStatus ( ) ) ; <nl> - ASSERT_TRUE ( parsed . getValue ( ) . equals ( rps ) ) ; <nl> + auto parsed = assertGet ( ReadPreferenceSetting : : fromBSON ( rps . toBSON ( ) ) ) ; <nl> + ASSERT_TRUE ( parsed . equals ( rps ) ) ; <nl> } <nl> <nl> TEST ( ReadPreferenceSetting , Roundtrip ) { <nl> TEST ( ReadPreferenceSetting , Roundtrip ) { <nl> < < " ca " ) <nl> < < BSON ( " foo " <nl> < < " bar " ) ) ) , <nl> - minMaxStaleness ) ) ; <nl> + minMaxStalenessSeconds ) ) ; <nl> + <nl> + checkRoundtrip ( ReadPreferenceSetting ( ReadPreference : : Nearest , <nl> + TagSet ( BSON_ARRAY ( BSON ( " dc " <nl> + < < " ca " ) <nl> + < < BSON ( " foo " <nl> + < < " bar " ) ) ) , <nl> + Milliseconds ( 63246 ) ) ) ; <nl> } <nl> <nl> } / / namespace <nl> + } / / namespace mongo <nl>
|
SERVER - 26927 Rename maxStalenessMS to maxStalenessSeconds and support doubles
|
mongodb/mongo
|
21821549cdeef84173293c46725695fc3a7834b0
|
2016-11-15T15:08:45Z
|
mmm a / src / api - natives . cc <nl> ppp b / src / api - natives . cc <nl> void CacheTemplateInstantiation ( Isolate * isolate , int serial_number , <nl> Handle < FixedArray > fast_cache = <nl> isolate - > fast_template_instantiations_cache ( ) ; <nl> Handle < FixedArray > new_cache = <nl> - FixedArray : : SetAndGrow ( fast_cache , serial_number - 1 , object ) ; <nl> + FixedArray : : SetAndGrow ( isolate , fast_cache , serial_number - 1 , object ) ; <nl> if ( * new_cache ! = * fast_cache ) { <nl> isolate - > native_context ( ) - > set_fast_template_instantiations_cache ( <nl> * new_cache ) ; <nl> mmm a / src / api . cc <nl> ppp b / src / api . cc <nl> i : : Object * * GetSerializedDataFromFixedArray ( i : : Isolate * isolate , <nl> / / empty FixedArray ) . <nl> int last = list - > length ( ) - 1 ; <nl> while ( last > = 0 & & list - > is_the_hole ( isolate , last ) ) last - - ; <nl> - if ( last ! = - 1 ) list - > Shrink ( last + 1 ) ; <nl> + if ( last ! = - 1 ) list - > Shrink ( isolate , last + 1 ) ; <nl> return i : : Handle < i : : Object > ( object , isolate ) . location ( ) ; <nl> } <nl> } <nl> mmm a / src / debug / debug . cc <nl> ppp b / src / debug / debug . cc <nl> MaybeHandle < FixedArray > Debug : : GetHitBreakPoints ( Handle < DebugInfo > debug_info , <nl> } <nl> } <nl> if ( break_points_hit_count = = 0 ) return { } ; <nl> - break_points_hit - > Shrink ( break_points_hit_count ) ; <nl> + break_points_hit - > Shrink ( isolate_ , break_points_hit_count ) ; <nl> return break_points_hit ; <nl> } <nl> <nl> Handle < FixedArray > Debug : : GetLoadedScripts ( ) { <nl> if ( script - > HasValidSource ( ) ) results - > set ( length + + , script ) ; <nl> } <nl> } <nl> - return FixedArray : : ShrinkOrEmpty ( results , length ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( isolate_ , results , length ) ; <nl> } <nl> <nl> void Debug : : OnThrow ( Handle < Object > exception ) { <nl> mmm a / src / elements . cc <nl> ppp b / src / elements . cc <nl> class ElementsAccessorBase : public InternalElementsAccessor { <nl> / / Shrink combined_keys to the final size . <nl> int final_size = nof_indices + nof_property_keys ; <nl> DCHECK_LE ( final_size , combined_keys - > length ( ) ) ; <nl> - return FixedArray : : ShrinkOrEmpty ( combined_keys , final_size ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( isolate , combined_keys , final_size ) ; <nl> } <nl> <nl> return combined_keys ; <nl> mmm a / src / heap / heap . cc <nl> ppp b / src / heap / heap . cc <nl> void Heap : : RemoveGCEpilogueCallback ( v8 : : Isolate : : GCCallbackWithData callback , <nl> } <nl> <nl> namespace { <nl> - void CompactFixedArrayOfWeakCells ( Object * object ) { <nl> + void CompactFixedArrayOfWeakCells ( Isolate * isolate , Object * object ) { <nl> if ( object - > IsFixedArrayOfWeakCells ( ) ) { <nl> FixedArrayOfWeakCells * array = FixedArrayOfWeakCells : : cast ( object ) ; <nl> - array - > Compact < FixedArrayOfWeakCells : : NullCallback > ( ) ; <nl> + array - > Compact < FixedArrayOfWeakCells : : NullCallback > ( isolate ) ; <nl> } <nl> } <nl> } / / anonymous namespace <nl> void Heap : : CompactFixedArraysOfWeakCells ( ) { <nl> if ( prototype_users - > IsFixedArrayOfWeakCells ( ) ) { <nl> FixedArrayOfWeakCells * array = <nl> FixedArrayOfWeakCells : : cast ( prototype_users ) ; <nl> - array - > Compact < JSObject : : PrototypeRegistryCompactionCallback > ( ) ; <nl> + array - > Compact < JSObject : : PrototypeRegistryCompactionCallback > ( <nl> + isolate ( ) ) ; <nl> } <nl> } <nl> } <nl> - CompactFixedArrayOfWeakCells ( noscript_shared_function_infos ( ) ) ; <nl> - CompactFixedArrayOfWeakCells ( script_list ( ) ) ; <nl> - CompactFixedArrayOfWeakCells ( weak_stack_trace_list ( ) ) ; <nl> + CompactFixedArrayOfWeakCells ( isolate ( ) , noscript_shared_function_infos ( ) ) ; <nl> + CompactFixedArrayOfWeakCells ( isolate ( ) , script_list ( ) ) ; <nl> + CompactFixedArrayOfWeakCells ( isolate ( ) , weak_stack_trace_list ( ) ) ; <nl> } <nl> <nl> void Heap : : AddRetainedMap ( Handle < Map > map ) { <nl> mmm a / src / isolate . cc <nl> ppp b / src / isolate . cc <nl> class FrameArrayBuilder { <nl> bool full ( ) { return elements_ - > FrameCount ( ) > = limit_ ; } <nl> <nl> Handle < FrameArray > GetElements ( ) { <nl> - elements_ - > ShrinkToFit ( ) ; <nl> + elements_ - > ShrinkToFit ( isolate_ ) ; <nl> return elements_ ; <nl> } <nl> <nl> Handle < FixedArray > Isolate : : CaptureCurrentStackTrace ( <nl> frames_seen + + ; <nl> } <nl> } <nl> - return FixedArray : : ShrinkOrEmpty ( stack_trace_elems , frames_seen ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( this , stack_trace_elems , frames_seen ) ; <nl> } <nl> <nl> <nl> mmm a / src / keys . cc <nl> ppp b / src / keys . cc <nl> MaybeHandle < FixedArray > FilterProxyKeys ( KeyAccumulator * accumulator , <nl> } <nl> store_position + + ; <nl> } <nl> - return FixedArray : : ShrinkOrEmpty ( keys , store_position ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( isolate , keys , store_position ) ; <nl> } <nl> <nl> / / Returns " nothing " in case of exception , " true " on success . <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> Handle < TemplateList > TemplateList : : Add ( Isolate * isolate , <nl> STATIC_ASSERT ( kFirstElementIndex = = 1 ) ; <nl> int index = list - > length ( ) + 1 ; <nl> Handle < i : : FixedArray > fixed_array = Handle < FixedArray > : : cast ( list ) ; <nl> - fixed_array = FixedArray : : SetAndGrow ( fixed_array , index , value ) ; <nl> + fixed_array = FixedArray : : SetAndGrow ( isolate , fixed_array , index , value ) ; <nl> fixed_array - > set ( kLengthIndex , Smi : : FromInt ( index ) ) ; <nl> return Handle < TemplateList > : : cast ( fixed_array ) ; <nl> } <nl> V8_WARN_UNUSED_RESULT Maybe < bool > FastGetOwnValuesOrEntries ( <nl> } <nl> <nl> DCHECK_LE ( count , values_or_entries - > length ( ) ) ; <nl> - * result = FixedArray : : ShrinkOrEmpty ( values_or_entries , count ) ; <nl> + * result = FixedArray : : ShrinkOrEmpty ( isolate , values_or_entries , count ) ; <nl> return Just ( true ) ; <nl> } <nl> <nl> MaybeHandle < FixedArray > GetOwnValuesOrEntries ( Isolate * isolate , <nl> length + + ; <nl> } <nl> DCHECK_LE ( length , values_or_entries - > length ( ) ) ; <nl> - return FixedArray : : ShrinkOrEmpty ( values_or_entries , length ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( isolate , values_or_entries , length ) ; <nl> } <nl> <nl> MaybeHandle < FixedArray > JSReceiver : : GetOwnValues ( Handle < JSReceiver > object , <nl> Handle < Map > Map : : CopyReplaceDescriptor ( Isolate * isolate , Handle < Map > map , <nl> " CopyReplaceDescriptor " , simple_flag ) ; <nl> } <nl> <nl> - Handle < FixedArray > FixedArray : : SetAndGrow ( Handle < FixedArray > array , int index , <nl> + Handle < FixedArray > FixedArray : : SetAndGrow ( Isolate * isolate , <nl> + Handle < FixedArray > array , int index , <nl> Handle < Object > value , <nl> PretenureFlag pretenure ) { <nl> if ( index < array - > length ( ) ) { <nl> Handle < FixedArray > FixedArray : : SetAndGrow ( Handle < FixedArray > array , int index , <nl> capacity = JSObject : : NewElementsCapacity ( capacity ) ; <nl> } while ( capacity < = index ) ; <nl> Handle < FixedArray > new_array = <nl> - array - > GetIsolate ( ) - > factory ( ) - > NewUninitializedFixedArray ( capacity , <nl> - pretenure ) ; <nl> + isolate - > factory ( ) - > NewUninitializedFixedArray ( capacity , pretenure ) ; <nl> array - > CopyTo ( 0 , * new_array , 0 , array - > length ( ) ) ; <nl> new_array - > FillWithHoles ( array - > length ( ) , new_array - > length ( ) ) ; <nl> new_array - > set ( index , * value ) ; <nl> bool FixedArray : : ContainsSortedNumbers ( ) { <nl> return true ; <nl> } <nl> <nl> - Handle < FixedArray > FixedArray : : ShrinkOrEmpty ( Handle < FixedArray > array , <nl> + Handle < FixedArray > FixedArray : : ShrinkOrEmpty ( Isolate * isolate , <nl> + Handle < FixedArray > array , <nl> int new_length ) { <nl> if ( new_length = = 0 ) { <nl> return array - > GetReadOnlyRoots ( ) . empty_fixed_array_handle ( ) ; <nl> } else { <nl> - array - > Shrink ( new_length ) ; <nl> + array - > Shrink ( isolate , new_length ) ; <nl> return array ; <nl> } <nl> } <nl> <nl> - void FixedArray : : Shrink ( int new_length ) { <nl> + void FixedArray : : Shrink ( Isolate * isolate , int new_length ) { <nl> DCHECK ( 0 < new_length & & new_length < = length ( ) ) ; <nl> if ( new_length < length ( ) ) { <nl> - GetHeap ( ) - > RightTrimFixedArray ( this , length ( ) - new_length ) ; <nl> + isolate - > heap ( ) - > RightTrimFixedArray ( this , length ( ) - new_length ) ; <nl> } <nl> } <nl> <nl> Handle < FixedArrayOfWeakCells > FixedArrayOfWeakCells : : Add ( <nl> } <nl> <nl> template < class CompactionCallback > <nl> - void FixedArrayOfWeakCells : : Compact ( ) { <nl> + void FixedArrayOfWeakCells : : Compact ( Isolate * isolate ) { <nl> FixedArray * array = FixedArray : : cast ( this ) ; <nl> int new_length = kFirstIndex ; <nl> for ( int i = kFirstIndex ; i < array - > length ( ) ; i + + ) { <nl> void FixedArrayOfWeakCells : : Compact ( ) { <nl> new_length - kFirstIndex ) ; <nl> array - > set ( new_length + + , element ) ; <nl> } <nl> - array - > Shrink ( new_length ) ; <nl> + array - > Shrink ( isolate , new_length ) ; <nl> set_last_used_index ( 0 ) ; <nl> } <nl> <nl> void JSObject : : PrototypeRegistryCompactionCallback : : Callback ( Object * value , <nl> proto_info - > set_registry_slot ( new_index ) ; <nl> } <nl> <nl> - template void <nl> - FixedArrayOfWeakCells : : Compact < FixedArrayOfWeakCells : : NullCallback > ( ) ; <nl> - template void <nl> - FixedArrayOfWeakCells : : Compact < JSObject : : PrototypeRegistryCompactionCallback > ( ) ; <nl> + template void FixedArrayOfWeakCells : : Compact < <nl> + FixedArrayOfWeakCells : : NullCallback > ( Isolate * isolate ) ; <nl> + template void FixedArrayOfWeakCells : : Compact < <nl> + JSObject : : PrototypeRegistryCompactionCallback > ( Isolate * isolate ) ; <nl> <nl> bool FixedArrayOfWeakCells : : Remove ( Handle < HeapObject > value ) { <nl> if ( Length ( ) = = 0 ) return false ; <nl> Handle < FrameArray > FrameArray : : AppendWasmFrame ( <nl> return array ; <nl> } <nl> <nl> - void FrameArray : : ShrinkToFit ( ) { Shrink ( LengthFor ( FrameCount ( ) ) ) ; } <nl> + void FrameArray : : ShrinkToFit ( Isolate * isolate ) { <nl> + Shrink ( isolate , LengthFor ( FrameCount ( ) ) ) ; <nl> + } <nl> <nl> / / static <nl> Handle < FrameArray > FrameArray : : EnsureSpace ( Handle < FrameArray > array , <nl> Handle < FixedArray > BaseNameDictionary < Derived , Shape > : : IterationIndices ( <nl> array - > GetFirstElementAddress ( ) ) ; <nl> std : : sort ( start , start + array_size , cmp ) ; <nl> } <nl> - return FixedArray : : ShrinkOrEmpty ( array , array_size ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( isolate , array , array_size ) ; <nl> } <nl> <nl> template < typename Derived , typename Shape > <nl> mmm a / src / objects / fixed - array . h <nl> ppp b / src / objects / fixed - array . h <nl> class FixedArray : public FixedArrayBase { <nl> Handle < T > GetValueChecked ( Isolate * isolate , int index ) const ; <nl> <nl> / / Return a grown copy if the index is bigger than the array ' s length . <nl> - static Handle < FixedArray > SetAndGrow ( Handle < FixedArray > array , int index , <nl> + static Handle < FixedArray > SetAndGrow ( Isolate * isolate , <nl> + Handle < FixedArray > array , int index , <nl> Handle < Object > value , <nl> PretenureFlag pretenure = NOT_TENURED ) ; <nl> <nl> class FixedArray : public FixedArrayBase { <nl> inline void FillWithHoles ( int from , int to ) ; <nl> <nl> / / Shrink the array and insert filler objects . { new_length } must be > 0 . <nl> - void Shrink ( int new_length ) ; <nl> + void Shrink ( Isolate * isolate , int new_length ) ; <nl> / / If { new_length } is 0 , return the canonical empty FixedArray . Otherwise <nl> / / like above . <nl> - static Handle < FixedArray > ShrinkOrEmpty ( Handle < FixedArray > array , <nl> + static Handle < FixedArray > ShrinkOrEmpty ( Isolate * isolate , <nl> + Handle < FixedArray > array , <nl> int new_length ) ; <nl> <nl> / / Copy a sub array from the receiver to dest . <nl> class FixedArrayOfWeakCells : public FixedArray { <nl> } ; <nl> <nl> template < class CompactionCallback > <nl> - void Compact ( ) ; <nl> + void Compact ( Isolate * isolate ) ; <nl> <nl> inline Object * Get ( int index ) const ; <nl> inline void Clear ( int index ) ; <nl> mmm a / src / objects / frame - array . h <nl> ppp b / src / objects / frame - array . h <nl> class FrameArray : public FixedArray { <nl> inline bool IsAsmJsWasmFrame ( int frame_ix ) const ; <nl> inline int FrameCount ( ) const ; <nl> <nl> - void ShrinkToFit ( ) ; <nl> + void ShrinkToFit ( Isolate * isolate ) ; <nl> <nl> / / Flags . <nl> enum Flag { <nl> mmm a / src / objects / literal - objects . cc <nl> ppp b / src / objects / literal - objects . cc <nl> class ObjectDescriptor { <nl> if ( HasDictionaryProperties ( ) ) { <nl> properties_dictionary_template_ - > SetNextEnumerationIndex ( <nl> next_enumeration_index_ ) ; <nl> - computed_properties_ = FixedArray : : ShrinkOrEmpty ( computed_properties_ , <nl> - current_computed_index_ ) ; <nl> + computed_properties_ = FixedArray : : ShrinkOrEmpty ( <nl> + isolate , computed_properties_ , current_computed_index_ ) ; <nl> } else { <nl> DCHECK ( descriptor_array_template_ - > IsSortedNoDuplicates ( isolate ) ) ; <nl> } <nl> mmm a / src / objects / ordered - hash - table . cc <nl> ppp b / src / objects / ordered - hash - table . cc <nl> Handle < FixedArray > OrderedHashSet : : ConvertToKeysArray ( <nl> } <nl> result - > set ( i , key ) ; <nl> } <nl> - return FixedArray : : ShrinkOrEmpty ( result , length ) ; <nl> + return FixedArray : : ShrinkOrEmpty ( isolate , result , length ) ; <nl> } <nl> <nl> HeapObject * OrderedHashSet : : GetEmpty ( ReadOnlyRoots ro_roots ) { <nl> mmm a / src / runtime / runtime - array . cc <nl> ppp b / src / runtime / runtime - array . cc <nl> RUNTIME_FUNCTION ( Runtime_GetArrayKeys ) { <nl> j + + ; <nl> } <nl> <nl> - keys = FixedArray : : ShrinkOrEmpty ( keys , j ) ; <nl> + keys = FixedArray : : ShrinkOrEmpty ( isolate , keys , j ) ; <nl> return * isolate - > factory ( ) - > NewJSArrayWithElements ( keys ) ; <nl> } <nl> <nl> mmm a / src / runtime / runtime - regexp . cc <nl> ppp b / src / runtime / runtime - regexp . cc <nl> static Object * SearchRegExpMultiple ( Isolate * isolate , Handle < String > subject , <nl> last_match_cache - > set ( i , Smi : : FromInt ( last_match [ i ] ) ) ; <nl> } <nl> Handle < FixedArray > result_fixed_array = <nl> - FixedArray : : ShrinkOrEmpty ( builder . array ( ) , builder . length ( ) ) ; <nl> + FixedArray : : ShrinkOrEmpty ( isolate , builder . array ( ) , builder . length ( ) ) ; <nl> / / Cache the result and copy the FixedArray into a COW array . <nl> Handle < FixedArray > copied_fixed_array = <nl> isolate - > factory ( ) - > CopyFixedArrayWithMap ( <nl> Handle < JSArray > NewJSArrayWithElements ( Isolate * isolate , <nl> Handle < FixedArray > elems , <nl> int num_elems ) { <nl> return isolate - > factory ( ) - > NewJSArrayWithElements ( <nl> - FixedArray : : ShrinkOrEmpty ( elems , num_elems ) ) ; <nl> + FixedArray : : ShrinkOrEmpty ( isolate , elems , num_elems ) ) ; <nl> } <nl> <nl> } / / namespace <nl> RUNTIME_FUNCTION ( Runtime_RegExpSplit ) { <nl> { <nl> Handle < String > substr = <nl> factory - > NewSubString ( string , prev_string_index , string_index ) ; <nl> - elems = FixedArray : : SetAndGrow ( elems , num_elems + + , substr ) ; <nl> + elems = FixedArray : : SetAndGrow ( isolate , elems , num_elems + + , substr ) ; <nl> if ( num_elems = = limit ) { <nl> return * NewJSArrayWithElements ( isolate , elems , num_elems ) ; <nl> } <nl> RUNTIME_FUNCTION ( Runtime_RegExpSplit ) { <nl> Handle < Object > capture ; <nl> ASSIGN_RETURN_FAILURE_ON_EXCEPTION ( <nl> isolate , capture , Object : : GetElement ( isolate , result , i ) ) ; <nl> - elems = FixedArray : : SetAndGrow ( elems , num_elems + + , capture ) ; <nl> + elems = FixedArray : : SetAndGrow ( isolate , elems , num_elems + + , capture ) ; <nl> if ( num_elems = = limit ) { <nl> return * NewJSArrayWithElements ( isolate , elems , num_elems ) ; <nl> } <nl> RUNTIME_FUNCTION ( Runtime_RegExpSplit ) { <nl> { <nl> Handle < String > substr = <nl> factory - > NewSubString ( string , prev_string_index , length ) ; <nl> - elems = FixedArray : : SetAndGrow ( elems , num_elems + + , substr ) ; <nl> + elems = FixedArray : : SetAndGrow ( isolate , elems , num_elems + + , substr ) ; <nl> } <nl> <nl> return * NewJSArrayWithElements ( isolate , elems , num_elems ) ; <nl> mmm a / src / value - serializer . cc <nl> ppp b / src / value - serializer . cc <nl> MaybeHandle < JSReceiver > ValueDeserializer : : GetObjectWithID ( uint32_t id ) { <nl> void ValueDeserializer : : AddObjectWithID ( uint32_t id , <nl> Handle < JSReceiver > object ) { <nl> DCHECK ( ! HasObjectWithID ( id ) ) ; <nl> - Handle < FixedArray > new_array = FixedArray : : SetAndGrow ( id_map_ , id , object ) ; <nl> + Handle < FixedArray > new_array = <nl> + FixedArray : : SetAndGrow ( isolate_ , id_map_ , id , object ) ; <nl> <nl> / / If the dictionary was reallocated , update the global handle . <nl> if ( ! new_array . is_identical_to ( id_map_ ) ) { <nl> mmm a / test / cctest / heap / test - heap . cc <nl> ppp b / test / cctest / heap / test - heap . cc <nl> TEST ( Regress507979 ) { <nl> <nl> / / Replace parts of an object placed before a live object with a filler . This <nl> / / way the filler object shares the mark bits with the following live object . <nl> - o1 - > Shrink ( kFixedArrayLen - 1 ) ; <nl> + o1 - > Shrink ( isolate , kFixedArrayLen - 1 ) ; <nl> <nl> for ( HeapObject * obj = it . next ( ) ; obj ! = nullptr ; obj = it . next ( ) ) { <nl> / / Let ' s not optimize the loop away . <nl> TEST ( FixedArrayOfWeakCells ) { <nl> Handle < FixedArrayOfWeakCells > array = <nl> FixedArrayOfWeakCells : : Add ( isolate , Handle < Object > ( ) , number ) ; <nl> array - > Remove ( number ) ; <nl> - array - > Compact < FixedArrayOfWeakCells : : NullCallback > ( ) ; <nl> + array - > Compact < FixedArrayOfWeakCells : : NullCallback > ( isolate ) ; <nl> FixedArrayOfWeakCells : : Add ( isolate , array , number ) ; <nl> } <nl> <nl> Handle < FixedArray > ShrinkArrayAndCheckSize ( Heap * heap , int length ) { <nl> heap - > isolate ( ) - > factory ( ) - > NewFixedArray ( length , TENURED ) ; <nl> size_t size_after_allocation = heap - > SizeOfObjects ( ) ; <nl> CHECK_EQ ( size_after_allocation , size_before_allocation + array - > Size ( ) ) ; <nl> - array - > Shrink ( 1 ) ; <nl> + array - > Shrink ( heap - > isolate ( ) , 1 ) ; <nl> size_t size_after_shrinking = heap - > SizeOfObjects ( ) ; <nl> / / Shrinking does not change the space size immediately . <nl> CHECK_EQ ( size_after_allocation , size_after_shrinking ) ; <nl> TEST ( UncommitUnusedLargeObjectMemory ) { <nl> intptr_t size_before = array - > Size ( ) ; <nl> size_t committed_memory_before = chunk - > CommittedPhysicalMemory ( ) ; <nl> <nl> - array - > Shrink ( 1 ) ; <nl> + array - > Shrink ( isolate , 1 ) ; <nl> CHECK ( array - > Size ( ) < size_before ) ; <nl> <nl> CcTest : : CollectAllGarbage ( ) ; <nl> HEAP_TEST ( Regress5831 ) { <nl> bool overflowed_into_lospace = false ; <nl> for ( int i = 0 ; i < kMaxIterations ; i + + ) { <nl> Handle < Code > code = GenerateDummyImmovableCode ( isolate ) ; <nl> - array = FixedArray : : SetAndGrow ( array , i , code ) ; <nl> + array = FixedArray : : SetAndGrow ( isolate , array , i , code ) ; <nl> CHECK ( heap - > code_space ( ) - > Contains ( code - > address ( ) ) | | <nl> heap - > lo_space ( ) - > Contains ( * code ) ) ; <nl> if ( heap - > lo_space ( ) - > Contains ( * code ) ) { <nl>
|
[ explicit isolates ] Eliminate GetIsolate from FixedArray
|
v8/v8
|
c8354ae5e0c9a94efc9d69b89dcac8062272eaa3
|
2018-07-11T07:50:57Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.