diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / tensorflow / compiler / mlir / tensorflow / tests / compile_mlir_util / graph - resource . pbtxt <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / compile_mlir_util / graph - resource . pbtxt <nl> <nl> - # RUN : tf - mlir - translate - graphdef - to - mlir - tf - enable - shape - inference - on - import = false % s - tf - graph - as - function | tf - mlir - translate - mlir - tf - graph - to - hlo - text - tf - input - shapes = 2 : 2 - tf - input - data - types = DT_FLOAT , DT_FLOAT - tf - xla - input - types = parameter , resource - emit - return - tuple | FileCheck % s <nl> + # RUN : tf - mlir - translate - graphdef - to - mlir - tf - enable - shape - inference - on - import = false % s - tf - graph - as - function - tf - control - output - arrays = assign_variable | tf - mlir - translate - mlir - tf - graph - to - hlo - text - tf - input - shapes = 2 : 2 - tf - input - data - types = DT_FLOAT , DT_FLOAT - tf - xla - input - types = parameter , resource - emit - return - tuple | FileCheck % s <nl> <nl> node { <nl> name : " arg0 " <nl> mmm a / tensorflow / compiler / mlir / tensorflow / tests / graph_pruning . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / graph_pruning . mlir <nl> <nl> - / / RUN : tf - opt % s - tf - executor - graph - pruning | FileCheck % s <nl> + / / RUN : tf - opt % s - split - input - file - tf - executor - graph - pruning | FileCheck % s <nl> <nl> / / Two islands chained by data - flow contributing to the graph return are <nl> / / preserved . <nl> func @ chained_islands ( % arg0 : i32 ) - > i32 { <nl> return % 0 : i32 <nl> } <nl> <nl> - / / Check that a function that does not have arguments / results is ignored by <nl> - / / thep pruning pass : this could be a V1 graph imported without feeds / fetches . <nl> - / / CHECK - LABEL : func @ empty_islands ( <nl> - func @ empty_islands ( ) { <nl> - / / CHECK : tf_executor . island <nl> - tf_executor . graph { <nl> - % 0 = tf_executor . island { <nl> - tf_executor . yield <nl> - } <nl> - tf_executor . fetch <nl> - } <nl> - return <nl> - } <nl> - <nl> / / Check that an unused island that doesn ' t contribute to the fetch is removed . <nl> / / CHECK - LABEL : func @ dead_island ( <nl> func @ dead_island ( % arg0 : i32 ) - > i32 { <nl> func @ control_fetch ( % arg0 : i32 ) { <nl> } <nl> return <nl> } <nl> + <nl> + / / mmm - - <nl> + <nl> + / / Check that a function that is named " main " and does not have the <nl> + / / " tf . entry_function " attribute defined is ignored by the pruning pass : this <nl> + / / could be a V1 graph imported without feed / fetch / target nodes . <nl> + / / CHECK - LABEL : func @ main ( <nl> + func @ main ( ) { <nl> + / / CHECK : tf_executor . island <nl> + tf_executor . graph { <nl> + % 0 = tf_executor . island { <nl> + tf_executor . yield <nl> + } <nl> + tf_executor . fetch <nl> + } <nl> + return <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> + / / Check that a function that is named " main " and does have the <nl> + / / " tf . entry_function " attribute defined with no feed / fetch / target nodes is <nl> + / / pruned . <nl> + / / CHECK - LABEL : func @ main ( <nl> + func @ main ( ) attributes { tf . entry_function = { control_outputs = " " , inputs = " " , outputs = " " } } { <nl> + / / CHECK - NOT : tf_executor . island <nl> + tf_executor . graph { <nl> + % 0 = tf_executor . island { <nl> + tf_executor . yield <nl> + } <nl> + tf_executor . fetch <nl> + } <nl> + return <nl> + } <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / graph_pruning . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / graph_pruning . cc <nl> limitations under the License . <nl> # include " llvm / ADT / SmallVector . h " <nl> # include " llvm / ADT / iterator_range . h " <nl> # include " llvm / Support / Casting . h " <nl> + # include " mlir / IR / Attributes . h " / / from @ llvm - project <nl> # include " mlir / IR / Operation . h " / / from @ llvm - project <nl> # include " mlir / IR / UseDefLists . h " / / from @ llvm - project <nl> # include " mlir / IR / Value . h " / / from @ llvm - project <nl> limitations under the License . <nl> namespace mlir { <nl> namespace tf_executor { <nl> <nl> + namespace { <nl> + <nl> + / / Checks if a tf_executor . Graph can be pruned . <nl> + / / For TensorFlow V1 . 0 compatibility : when importing a graph without providing <nl> + / / feeds / fetches / targets we should not attempt to prune . The best approximation <nl> + / / here is to check if the graph is of the " main " function and does not have the <nl> + / / " tf . entry_function " attribute defined . <nl> + bool CanPruneGraph ( FuncOp func ) { <nl> + return func . getName ( ) ! = " main " | | <nl> + func . getAttrOfType < DictionaryAttr > ( " tf . entry_function " ) ! = nullptr ; <nl> + } <nl> + <nl> / / Visits an op ' s operand if it is an output of an Operation in the same <nl> / / tf_executor . graph . <nl> void VisitOpOperand ( GraphOp graph , Value operand , <nl> void VisitOp ( GraphOp graph , Operation * op , <nl> } <nl> } <nl> <nl> + } / / namespace <nl> + <nl> / / Prunes unreachable operations of a tf_executor . graph operation . <nl> void PruneGraph ( GraphOp graph ) { <nl> / / A graph has a single block which forms a DAG : operations that aren ' t <nl> namespace { <nl> / / This transformation pass prunes a TF graph eliminating dead - nodes . <nl> struct GraphPruning : public PassWrapper < GraphPruning , FunctionPass > { <nl> void runOnFunction ( ) override { <nl> - getFunction ( ) . walk ( [ ] ( tf_executor : : GraphOp graph ) { <nl> - / / For TensorFlow V1 . 0 compatibility : when importing a graph without <nl> - / / providing feeds / fetches we should not attempt to prune . The best <nl> - / / approximation here is to check if the graph does not have any fetched <nl> - / / values . <nl> - if ( ! graph . GetFetch ( ) . getNumOperands ( ) ) return ; <nl> - <nl> - PruneGraph ( graph ) ; <nl> - } ) ; <nl> + if ( ! CanPruneGraph ( getFunction ( ) ) ) return ; <nl> + getFunction ( ) . walk ( [ ] ( tf_executor : : GraphOp graph ) { PruneGraph ( graph ) ; } ) ; <nl> } <nl> } ; <nl> <nl> mmm a / tensorflow / compiler / mlir / tensorflow / translate / import_model . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / translate / import_model . cc <nl> StatusOr < mlir : : OwningModuleRef > GraphDefImporter : : Convert ( <nl> TF_RETURN_IF_ERROR ( importer . GetControlRetsFromGraph ( specs . control_outputs , <nl> & control_ret_nodes ) ) ; <nl> <nl> - if ( ! arg_nodes . empty ( ) | | ! ret_nodes . empty ( ) | | <nl> - ! control_ret_nodes . empty ( ) ) { <nl> - mlir : : Builder b ( context ) ; <nl> - std : : string s ; <nl> - llvm : : raw_string_ostream ss ( s ) ; <nl> - auto node_name = [ & ] ( const OutputTensor & tensor ) { <nl> - ss < < tensor . node - > name ( ) ; <nl> - } ; <nl> - llvm : : interleave ( arg_nodes , ss , node_name , " , " ) ; <nl> - auto inputs = b . getNamedAttr ( " inputs " , b . getStringAttr ( ss . str ( ) ) ) ; <nl> - s . clear ( ) ; <nl> - llvm : : interleave ( ret_nodes , ss , node_name , " , " ) ; <nl> - auto outputs = b . getNamedAttr ( " outputs " , b . getStringAttr ( ss . str ( ) ) ) ; <nl> - s . clear ( ) ; <nl> - llvm : : interleave ( specs . control_outputs , ss , " , " ) ; <nl> - auto control_outputs = <nl> - b . getNamedAttr ( " control_outputs " , b . getStringAttr ( ss . str ( ) ) ) ; <nl> - <nl> - attrs . push_back ( b . getNamedAttr ( <nl> - " tf . entry_function " , <nl> - b . getDictionaryAttr ( { inputs , outputs , control_outputs } ) ) ) ; <nl> - } <nl> + mlir : : Builder b ( context ) ; <nl> + std : : string s ; <nl> + llvm : : raw_string_ostream ss ( s ) ; <nl> + auto node_name = [ & ] ( const OutputTensor & tensor ) { <nl> + ss < < tensor . node - > name ( ) ; <nl> + } ; <nl> + llvm : : interleave ( arg_nodes , ss , node_name , " , " ) ; <nl> + auto inputs = b . getNamedAttr ( " inputs " , b . getStringAttr ( ss . str ( ) ) ) ; <nl> + s . clear ( ) ; <nl> + llvm : : interleave ( ret_nodes , ss , node_name , " , " ) ; <nl> + auto outputs = b . getNamedAttr ( " outputs " , b . getStringAttr ( ss . str ( ) ) ) ; <nl> + s . clear ( ) ; <nl> + llvm : : interleave ( specs . control_outputs , ss , " , " ) ; <nl> + auto control_outputs = <nl> + b . getNamedAttr ( " control_outputs " , b . getStringAttr ( ss . str ( ) ) ) ; <nl> + <nl> + / / Under ` graph_as_function ` mode , ` tf . entry_function ` is always set as it <nl> + / / is assumed feed , fetch , and target nodes are set correctly . <nl> + attrs . push_back ( b . getNamedAttr ( <nl> + " tf . entry_function " , <nl> + b . getDictionaryAttr ( { inputs , outputs , control_outputs } ) ) ) ; <nl> } else { <nl> / / Collects the argument and return nodes by looking up the node names <nl> / / specified by the user . <nl>
|
Update importer to always populate " tf . entry_function " attribute when graph_as_function is set and update graph pruning pass to only not prune graphs for functions named " main " and the " tf . entry_function " attribute is not set .
|
tensorflow/tensorflow
|
ab60326f11460ca2d691cec9e407f251e6e49024
|
2020-10-07T22:00:46Z
|
mmm a / src / mongo / client / SConscript <nl> ppp b / src / mongo / client / SConscript <nl> env . Library ( <nl> LIBDEPS = [ <nl> ' $ BUILD_DIR / mongo / bson / bson ' , <nl> ' $ BUILD_DIR / mongo / db / common ' , <nl> + ' $ BUILD_DIR / mongo / rpc / metadata ' , <nl> ' $ BUILD_DIR / mongo / util / foundation ' , <nl> ' $ BUILD_DIR / mongo / util / net / hostandport ' , <nl> ] <nl> env . Library ( <nl> ' remote_command_runner ' , <nl> ' $ BUILD_DIR / mongo / db / query / command_request_response ' , <nl> ' $ BUILD_DIR / mongo / db / query / lite_parsed_query ' , <nl> + ' $ BUILD_DIR / mongo / rpc / metadata ' , <nl> ] <nl> ) <nl> <nl> mmm a / src / mongo / client / remote_command_runner . h <nl> ppp b / src / mongo / client / remote_command_runner . h <nl> <nl> <nl> # include " mongo / base / disallow_copying . h " <nl> # include " mongo / db / jsobj . h " <nl> + # include " mongo / rpc / metadata . h " <nl> # include " mongo / util / net / hostandport . h " <nl> # include " mongo / util / time_support . h " <nl> <nl> struct RemoteCommandRequest { <nl> <nl> RemoteCommandRequest ( ) : timeout ( kNoTimeout ) , expirationDate ( kNoExpirationDate ) { } <nl> <nl> + RemoteCommandRequest ( const HostAndPort & theTarget , <nl> + const std : : string & theDbName , <nl> + const BSONObj & theCmdObj , <nl> + const BSONObj & metadataObj , <nl> + const Milliseconds timeoutMillis = kNoTimeout ) <nl> + : target ( theTarget ) , <nl> + dbname ( theDbName ) , <nl> + metadata ( metadataObj ) , <nl> + cmdObj ( theCmdObj ) , <nl> + timeout ( timeoutMillis ) { <nl> + if ( timeoutMillis = = kNoTimeout ) { <nl> + expirationDate = kNoExpirationDate ; <nl> + } <nl> + } <nl> + <nl> RemoteCommandRequest ( const HostAndPort & theTarget , <nl> const std : : string & theDbName , <nl> const BSONObj & theCmdObj , <nl> struct RemoteCommandRequest { <nl> <nl> HostAndPort target ; <nl> std : : string dbname ; <nl> + BSONObj metadata { rpc : : makeEmptyMetadata ( ) } ; <nl> BSONObj cmdObj ; <nl> Milliseconds timeout ; <nl> <nl> struct RemoteCommandResponse { <nl> <nl> RemoteCommandResponse ( BSONObj obj , Milliseconds millis ) : data ( obj ) , elapsedMillis ( millis ) { } <nl> <nl> + RemoteCommandResponse ( BSONObj dataObj , BSONObj metadataObj , Milliseconds millis ) <nl> + : data ( std : : move ( dataObj ) ) , metadata ( std : : move ( metadataObj ) ) , elapsedMillis ( millis ) { } <nl> + <nl> std : : string toString ( ) const ; <nl> <nl> BSONObj data ; <nl> + BSONObj metadata ; <nl> Milliseconds elapsedMillis ; <nl> } ; <nl> <nl> mmm a / src / mongo / client / remote_command_runner_impl . cpp <nl> ppp b / src / mongo / client / remote_command_runner_impl . cpp <nl> void RemoteCommandRunnerImpl : : shutdown ( ) { <nl> StatusWith < RemoteCommandResponse > RemoteCommandRunnerImpl : : runCommand ( <nl> const RemoteCommandRequest & request ) { <nl> try { <nl> - BSONObj output ; <nl> - <nl> const Date_t requestStartDate = Date_t : : now ( ) ; <nl> const auto timeoutMillis = getTimeoutMillis ( request . expirationDate , requestStartDate ) ; <nl> if ( ! timeoutMillis . isOK ( ) ) { <nl> StatusWith < RemoteCommandResponse > RemoteCommandRunnerImpl : : runCommand ( <nl> ConnectionPool : : ConnectionPtr conn ( <nl> & _connPool , request . target , requestStartDate , timeoutMillis . getValue ( ) ) ; <nl> <nl> - bool ok = conn . get ( ) - > runCommand ( request . dbname , request . cmdObj , output ) ; <nl> + rpc : : UniqueReply commandResponse = <nl> + conn . get ( ) - > runCommandWithMetadata ( request . dbname , <nl> + request . cmdObj . firstElementFieldName ( ) , <nl> + request . metadata , <nl> + request . cmdObj ) ; <nl> + <nl> + BSONObj output = commandResponse - > getCommandReply ( ) . getOwned ( ) ; <nl> <nl> / / If remote server does not support either find or getMore commands , down convert <nl> / / to using DBClientInterface : : query ( ) / getMore ( ) . <nl> / / TODO : Perform down conversion based on wire protocol version . <nl> / / Refer to the down conversion implementation in the shell . <nl> - if ( ! ok & & getStatusFromCommandResult ( output ) . code ( ) = = ErrorCodes : : CommandNotFound ) { <nl> + if ( getStatusFromCommandResult ( output ) . code ( ) = = ErrorCodes : : CommandNotFound ) { <nl> / / ' commandName ' will be an empty string if the command object is an empty BSON <nl> / / document . <nl> StringData commandName = request . cmdObj . firstElement ( ) . fieldNameStringData ( ) ; <nl> StatusWith < RemoteCommandResponse > RemoteCommandRunnerImpl : : runCommand ( <nl> conn . done ( requestFinishDate ) ; <nl> <nl> return StatusWith < RemoteCommandResponse > ( <nl> - RemoteCommandResponse ( output , Milliseconds ( requestFinishDate - requestStartDate ) ) ) ; <nl> + RemoteCommandResponse ( std : : move ( output ) , <nl> + commandResponse - > getCommandReply ( ) . getOwned ( ) , <nl> + Milliseconds ( requestFinishDate - requestStartDate ) ) ) ; <nl> } catch ( const DBException & ex ) { <nl> return StatusWith < RemoteCommandResponse > ( ex . toStatus ( ) ) ; <nl> } catch ( const std : : exception & ex ) { <nl> mmm a / src / mongo / db / repl / data_replicator . cpp <nl> ppp b / src / mongo / db / repl / data_replicator . cpp <nl> Status DatabasesCloner : : start ( ) { <nl> <nl> log ( ) < < " starting cloning of all databases " ; <nl> / / Schedule listDatabase command which will kick off the database cloner per result db . <nl> - Request listDBsReq ( _source , " admin " , BSON ( " listDatabases " < < true ) ) ; <nl> + Request listDBsReq ( _source , " admin " , BSON ( " listDatabases " < < true ) , BSON ( " $ secondaryOk " < < 1 ) ) ; <nl> CBHStatus s = _exec - > scheduleRemoteCommand ( <nl> listDBsReq , <nl> stdx : : bind ( & DatabasesCloner : : _onListDatabaseFinish , this , stdx : : placeholders : : _1 ) ) ; <nl>
|
SERVER - 19010 support sending and receiving metadata in RemoteCommandRunner
|
mongodb/mongo
|
f6c46a59105647f4db0c01236c1ea38ca2db70a5
|
2015-06-26T09:59:45Z
|
mmm a / cmake / scripts / osx / ArchSetup . cmake <nl> ppp b / cmake / scripts / osx / ArchSetup . cmake <nl> list ( APPEND DEPLIBS " - framework DiskArbitration " " - framework IOKit " <nl> " - framework CoreGraphics " " - framework CoreMedia " <nl> " - framework VideoToolbox " ) <nl> <nl> - set ( CMAKE_OSX_DEPLOYMENT_TARGET 10 . 8 ) <nl> + set ( CMAKE_OSX_DEPLOYMENT_TARGET 10 . 9 ) <nl> set ( CMAKE_XCODE_ATTRIBUTE_CLANG_LINK_OBJC_RUNTIME OFF ) <nl> mmm a / docs / README . osx . md <nl> ppp b / docs / README . osx . md <nl> There are 3 ways to build Kodi for Mac : <nl> Kodi for Mac is composed of a main binary with numerous dynamic libraries and <nl> codecs that support a multitude of music and video formats . <nl> <nl> - The minimum version of OSX you need to run ( ! ) Kodi is 10 . 8 atm . <nl> + The minimum version of OSX you need to run ( ! ) Kodi is 10 . 9 atm . <nl> <nl> - On El Capitan ( OSX 10 . 11 . x ) we recommend using Xcode 8 . x . <nl> - On Sierra ( macOS 10 . 12 . x ) we recommend using Xcode 8 . x . <nl> mmm a / tools / depends / configure . ac <nl> ppp b / tools / depends / configure . ac <nl> case $ host in <nl> <nl> # now that we know which sdk , error check sdk_name <nl> case $ use_sdk in <nl> - 10 . 8 ) ; ; <nl> 10 . 9 ) ; ; <nl> 10 . 10 ) ; ; <nl> 10 . 11 ) ; ; <nl> case $ host in <nl> AC_MSG_ERROR ( error in configure of - - with - sdk = $ use_sdk ) <nl> esac <nl> sdk_name = macosx $ use_sdk <nl> - platform_min_version = " macosx - version - min = 10 . 8 " <nl> + platform_min_version = " macosx - version - min = 10 . 9 " <nl> <nl> use_sdk_path = [ ` $ use_xcodebuild - version - sdk $ sdk_name Path ` ] <nl> platform_os = " osx " <nl>
|
[ osx ] bump minimum version to 10 . 9
|
xbmc/xbmc
|
6ea8315cfcc9420d4fcfe3ba716a57a57cb768d7
|
2018-05-30T08:00:00Z
|
mmm a / bin / import_cv . py <nl> ppp b / bin / import_cv . py <nl> def _maybe_convert_set ( extracted_dir , source_csv , target_csv ) : <nl> pool . join ( ) <nl> <nl> print ( ' Writing " % s " . . . ' % target_csv ) <nl> - with open ( target_csv , " w " ) as target_csv_file : <nl> + with open ( target_csv , " w " , encoding = " utf - 8 " , newline = " " ) as target_csv_file : <nl> writer = csv . DictWriter ( target_csv_file , fieldnames = FIELDNAMES ) <nl> writer . writeheader ( ) <nl> bar = progressbar . ProgressBar ( max_value = len ( rows ) , widgets = SIMPLE_BAR ) <nl> mmm a / bin / import_lingua_libre . py <nl> ppp b / bin / import_lingua_libre . py <nl> def _maybe_convert_sets ( target_dir , extracted_data ) : <nl> pool . close ( ) <nl> pool . join ( ) <nl> <nl> - with open ( target_csv_template . format ( " train " ) , " w " ) as train_csv_file : # 80 % <nl> - with open ( target_csv_template . format ( " dev " ) , " w " ) as dev_csv_file : # 10 % <nl> - with open ( target_csv_template . format ( " test " ) , " w " ) as test_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " train " ) , " w " , encoding = " utf - 8 " , newline = " " ) as train_csv_file : # 80 % <nl> + with open ( target_csv_template . format ( " dev " ) , " w " , encoding = " utf - 8 " , newline = " " ) as dev_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " test " ) , " w " , encoding = " utf - 8 " , newline = " " ) as test_csv_file : # 10 % <nl> train_writer = csv . DictWriter ( train_csv_file , fieldnames = FIELDNAMES ) <nl> train_writer . writeheader ( ) <nl> dev_writer = csv . DictWriter ( dev_csv_file , fieldnames = FIELDNAMES ) <nl> mmm a / bin / import_m - ailabs . py <nl> ppp b / bin / import_m - ailabs . py <nl> def _maybe_convert_sets ( target_dir , extracted_data ) : <nl> pool . close ( ) <nl> pool . join ( ) <nl> <nl> - with open ( target_csv_template . format ( " train " ) , " w " ) as train_csv_file : # 80 % <nl> - with open ( target_csv_template . format ( " dev " ) , " w " ) as dev_csv_file : # 10 % <nl> - with open ( target_csv_template . format ( " test " ) , " w " ) as test_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " train " ) , " w " , encoding = " utf - 8 " , newline = " " ) as train_csv_file : # 80 % <nl> + with open ( target_csv_template . format ( " dev " ) , " w " , encoding = " utf - 8 " , newline = " " ) as dev_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " test " ) , " w " , encoding = " utf - 8 " , newline = " " ) as test_csv_file : # 10 % <nl> train_writer = csv . DictWriter ( train_csv_file , fieldnames = FIELDNAMES ) <nl> train_writer . writeheader ( ) <nl> dev_writer = csv . DictWriter ( dev_csv_file , fieldnames = FIELDNAMES ) <nl> mmm a / bin / import_slr57 . py <nl> ppp b / bin / import_slr57 . py <nl> def _maybe_convert_sets ( target_dir , extracted_data ) : <nl> pool . close ( ) <nl> pool . join ( ) <nl> <nl> - with open ( target_csv_template . format ( " train " ) , " w " ) as train_csv_file : # 80 % <nl> - with open ( target_csv_template . format ( " dev " ) , " w " ) as dev_csv_file : # 10 % <nl> - with open ( target_csv_template . format ( " test " ) , " w " ) as test_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " train " ) , " w " , encoding = " utf - 8 " , newline = " " ) as train_csv_file : # 80 % <nl> + with open ( target_csv_template . format ( " dev " ) , " w " , encoding = " utf - 8 " , newline = " " ) as dev_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " test " ) , " w " , encoding = " utf - 8 " , newline = " " ) as test_csv_file : # 10 % <nl> train_writer = csv . DictWriter ( train_csv_file , fieldnames = FIELDNAMES ) <nl> train_writer . writeheader ( ) <nl> dev_writer = csv . DictWriter ( dev_csv_file , fieldnames = FIELDNAMES ) <nl> mmm a / bin / import_swc . py <nl> ppp b / bin / import_swc . py <nl> def write_csvs ( samples , language ) : <nl> base_dir = os . path . abspath ( CLI_ARGS . base_dir ) <nl> csv_path = os . path . join ( base_dir , language + " - " + sub_set + " . csv " ) <nl> print ( ' Writing " { } " . . . ' . format ( csv_path ) ) <nl> - with open ( csv_path , " w " ) as csv_file : <nl> + with open ( csv_path , " w " , encoding = " utf - 8 " , newline = " " ) as csv_file : <nl> writer = csv . DictWriter ( <nl> csv_file , fieldnames = FIELDNAMES_EXT if CLI_ARGS . add_meta else FIELDNAMES <nl> ) <nl> mmm a / bin / import_ts . py <nl> ppp b / bin / import_ts . py <nl> def _maybe_convert_sets ( target_dir , extracted_data , english_compatible = False ) : <nl> pool . close ( ) <nl> pool . join ( ) <nl> <nl> - with open ( target_csv_template . format ( " train " ) , " w " ) as train_csv_file : # 80 % <nl> - with open ( target_csv_template . format ( " dev " ) , " w " ) as dev_csv_file : # 10 % <nl> - with open ( target_csv_template . format ( " test " ) , " w " ) as test_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " train " ) , " w " , encoding = " utf - 8 " , newline = " " ) as train_csv_file : # 80 % <nl> + with open ( target_csv_template . format ( " dev " ) , " w " , encoding = " utf - 8 " , newline = " " ) as dev_csv_file : # 10 % <nl> + with open ( target_csv_template . format ( " test " ) , " w " , encoding = " utf - 8 " , newline = " " ) as test_csv_file : # 10 % <nl> train_writer = csv . DictWriter ( train_csv_file , fieldnames = FIELDNAMES ) <nl> train_writer . writeheader ( ) <nl> dev_writer = csv . DictWriter ( dev_csv_file , fieldnames = FIELDNAMES ) <nl> mmm a / bin / import_tuda . py <nl> ppp b / bin / import_tuda . py <nl> def write_csvs ( extracted ) : <nl> CLI_ARGS . base_dir , " tuda - { } - { } . csv " . format ( TUDA_VERSION , sub_set ) <nl> ) <nl> print ( ' Writing " { } " . . . ' . format ( csv_path ) ) <nl> - with open ( csv_path , " w " ) as csv_file : <nl> + with open ( csv_path , " w " , encoding = " utf - 8 " , newline = " " ) as csv_file : <nl> writer = csv . DictWriter ( csv_file , fieldnames = FIELDNAMES ) <nl> writer . writeheader ( ) <nl> set_dir = os . path . join ( extracted , sub_set ) <nl>
|
Fix csv DictWriter parameter
|
mozilla/DeepSpeech
|
a252ae01a051792565d8b39b74ad3c03215e8830
|
2020-06-05T13:27:19Z
|
mmm a / hphp / tools / gdb / gdbutils . py <nl> ppp b / hphp / tools / gdb / gdbutils . py <nl> def hash_string ( s ) : <nl> if tail_sz = = 0 : <nl> return crc > > 1 <nl> <nl> - shift = - ( ( tail_sz - 8 ) < < 3 ) & 0b11111 <nl> + shift = - ( ( tail_sz - 8 ) < < 3 ) & 0b111111 <nl> tail = _unpack ( s [ size : ] . ljust ( 8 , ' \ 0 ' ) ) <nl> <nl> crc = _crc32q ( crc , tail < < shift ) <nl>
|
Fix hash_string for short string tails
|
facebook/hhvm
|
9ebe2b8b58a026ef702dc051a1711a4b533e847a
|
2015-02-18T02:00:32Z
|
mmm a / Marlin / Configuration . h <nl> ppp b / Marlin / Configuration . h <nl> <nl> # define U8GLIB_SH1106 <nl> # endif <nl> <nl> + / / <nl> + / / Original Ulticontroller from Ultimaker 2 printer with SSD1309 I2C display and encoder <nl> + / / https : / / github . com / Ultimaker / Ultimaker2 / tree / master / 1249_Ulticontroller_Board_ ( x1 ) <nl> + / / <nl> + / / # define ULTI_CONTROLLER <nl> + <nl> / / <nl> / / CONTROLLER TYPE : Shift register panels <nl> / / <nl> mmm a / Marlin / src / inc / Conditionals_LCD . h <nl> ppp b / Marlin / src / inc / Conditionals_LCD . h <nl> <nl> # define DEFAULT_LCD_CONTRAST 17 <nl> # endif <nl> <nl> - / / Generic support for SSD1306 / SH1106 OLED based LCDs . <nl> - # if ENABLED ( U8GLIB_SSD1306 ) | | ENABLED ( U8GLIB_SH1106 ) <nl> + # if ENABLED ( ULTI_CONTROLLER ) <nl> + # define U8GLIB_SSD1309 <nl> + # define REVERSE_ENCODER_DIRECTION <nl> + # define LCD_RESET_PIN LCD_PINS_D6 / / This controller need a reset pin <nl> + # define LCD_CONTRAST_MIN 0 <nl> + # define LCD_CONTRAST_MAX 254 <nl> + # define DEFAULT_LCD_CONTRAST 127 <nl> + # define ENCODER_PULSES_PER_STEP 2 <nl> + # define ENCODER_STEPS_PER_MENU_ITEM 2 <nl> + # endif <nl> + <nl> + / / Generic support for SSD1306 / SSD1309 / SH1106 OLED based LCDs . <nl> + # if ENABLED ( U8GLIB_SSD1306 ) | | ENABLED ( U8GLIB_SSD1309 ) | | ENABLED ( U8GLIB_SH1106 ) <nl> # define ULTRA_LCD / / general LCD support , also 16x2 <nl> - # define DOGLCD / / Support for I2C LCD 128x64 ( Controller SSD1306 / SH1106 graphic Display Family ) <nl> + # define DOGLCD / / Support for I2C LCD 128x64 ( Controller SSD1306 / SSD1309 / SH1106 graphic Display Family ) <nl> # endif <nl> <nl> # if ENABLED ( PANEL_ONE ) | | ENABLED ( U8GLIB_SH1106 ) <nl> <nl> # if ENABLED ( ULTIMAKERCONTROLLER ) \ <nl> | | ENABLED ( REPRAP_DISCOUNT_SMART_CONTROLLER ) \ <nl> | | ENABLED ( G3D_PANEL ) \ <nl> - | | ENABLED ( RIGIDBOT_PANEL ) <nl> + | | ENABLED ( RIGIDBOT_PANEL ) \ <nl> + | | ENABLED ( ULTI_CONTROLLER ) <nl> # define ULTIPANEL <nl> # endif <nl> <nl> mmm a / Marlin / src / inc / SanityCheck . h <nl> ppp b / Marlin / src / inc / SanityCheck . h <nl> static_assert ( 1 > = 0 <nl> # if ENABLED ( ZONESTAR_LCD ) <nl> + 1 <nl> # endif <nl> + # if ENABLED ( ULTI_CONTROLLER ) <nl> + + 1 <nl> + # endif <nl> , " Please select no more than one LCD controller option . " <nl> ) ; <nl> <nl> mmm a / Marlin / src / lcd / ultralcd_impl_DOGM . h <nl> ppp b / Marlin / src / lcd / ultralcd_impl_DOGM . h <nl> <nl> / / Generic support for SH1106 OLED I2C LCDs <nl> / / U8GLIB_SH1106_128X64_2X_I2C_2_WIRE u8g ( U8G_I2C_OPT_NONE | U8G_I2C_OPT_FAST ) ; / / 4 stripes <nl> U8GLIB_SH1106_128X64_2X u8g ( U8G_I2C_OPT_NONE | U8G_I2C_OPT_FAST ) ; / / 4 stripes <nl> + # elif ENABLED ( U8GLIB_SSD1309 ) <nl> + / / Generic support for SSD1309 OLED I2C LCDs <nl> + U8GLIB_SSD1309_128X64 u8g ( U8G_I2C_OPT_NONE | U8G_I2C_OPT_FAST ) ; <nl> # elif ENABLED ( MINIPANEL ) <nl> / / The MINIPanel display <nl> / / U8GLIB_MINI12864 u8g ( DOGLCD_CS , DOGLCD_A0 ) ; / / 8 stripes <nl>
|
Add UltiController v2 . 1 support
|
MarlinFirmware/Marlin
|
16739264d3a3799b44396985c3a200ffde73823f
|
2018-02-06T04:30:03Z
|
mmm a / src / idl / base . php <nl> ppp b / src / idl / base . php <nl> function generateFuncProfileHeader ( $ func , $ f ) { <nl> if ( isset ( $ func [ ' taint_observer ' ] ) ) { <nl> fprintf ( <nl> $ f , <nl> - " TAINT_OBSERVER ( % d , % d ) ; \ n " , <nl> + " TAINT_OBSERVER ( % s , % s ) ; \ n " , <nl> $ func [ ' taint_observer ' ] [ ' set_mask ' ] , <nl> $ func [ ' taint_observer ' ] [ ' clear_mask ' ] ) ; <nl> } else { <nl> - fprintf ( $ f , " TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; \ n " ) ; <nl> + fprintf ( $ f , " TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; \ n " ) ; <nl> } <nl> <nl> fprintf ( $ f , " " ) ; <nl> mmm a / src / idl / fb . idl . php <nl> ppp b / src / idl / fb . idl . php <nl> <nl> ' desc ' = > " The bit to flag . " , <nl> ) , <nl> ) , <nl> + ' taint_observer ' = > array ( <nl> + ' set_mask ' = > " TAINT_BIT_NONE " , <nl> + ' clear_mask ' = > " TAINT_BIT_NONE " , <nl> + ) , <nl> ) ) ; <nl> <nl> DefineFunction ( <nl> <nl> ' desc ' = > " The bit to clear . " , <nl> ) , <nl> ) , <nl> + ' taint_observer ' = > array ( <nl> + ' set_mask ' = > " TAINT_BIT_NONE " , <nl> + ' clear_mask ' = > " TAINT_BIT_NONE " , <nl> + ) , <nl> ) ) ; <nl> <nl> DefineFunction ( <nl> <nl> ' desc ' = > " Checks to see if a bit is set . " , <nl> ' flags ' = > HasDocComment | HipHopSpecific , <nl> ' return ' = > array ( <nl> - ' type ' = > Int32 , <nl> - ' desc ' = > " All bits that were tainted . " , <nl> + ' type ' = > Boolean , <nl> + ' desc ' = > " Whether the taint was set . " , <nl> ) , <nl> ' args ' = > array ( <nl> array ( <nl> <nl> ' type ' = > String , <nl> ' desc ' = > " The string to check . " , <nl> ) , <nl> + array ( <nl> + ' name ' = > " taint " , <nl> + ' type ' = > Int32 , <nl> + ' desc ' = > " The bit to check against . " , <nl> + ) , <nl> + ) , <nl> + ' taint_observer ' = > array ( <nl> + ' set_mask ' = > " TAINT_BIT_NONE " , <nl> + ' clear_mask ' = > " TAINT_BIT_NONE " , <nl> ) , <nl> ) ) ; <nl> <nl> mmm a / src / runtime / base / execution_context . cpp <nl> ppp b / src / runtime / base / execution_context . cpp <nl> void ExecutionContext : : write ( CStrRef s ) { <nl> taint_warn_if_tainted ( s , TAINT_BIT_HTML ) ; <nl> } <nl> # endif <nl> - <nl> write ( s . data ( ) , s . size ( ) ) ; <nl> } <nl> <nl> mmm a / src / runtime / base / taint / taint_data . h <nl> ppp b / src / runtime / base / taint / taint_data . h <nl> <nl> <nl> # ifdef TAINTED <nl> <nl> - # define TAINT_BIT_HTML ( 0x01 ) <nl> - # define TAINT_BIT_SQL ( 0x02 ) <nl> - # define TAINT_BIT_ALL ( 0x03 ) <nl> - # define TAINT_BIT_NONE ( 0x00 ) <nl> + / / Taint bits have the semantic of being propagated by OR ; untainted then <nl> + / / implies a semantic of propagation by AND . <nl> + # define TAINT_BIT_HTML ( 0x01 ) <nl> + # define TAINT_BIT_SQL ( 0x02 ) <nl> + # define TAINT_BIT_MUTATED ( 0x04 ) <nl> + # define TAINT_BIT_ALL ( 0x07 ) / / Does not include TRACED bit <nl> + # define TAINT_BIT_TRACED ( 0x08 ) <nl> + # define TAINT_BIT_NONE ( 0x00 ) <nl> <nl> # include < runtime / base / taint / taint_metadata . h > <nl> <nl> class TaintData { <nl> void setTaint ( bitstring bits , const char * original_str ) ; <nl> void unsetTaint ( bitstring bits ) ; <nl> const char * getOriginalStr ( ) const ; <nl> - <nl> void clearMetadata ( ) ; <nl> - <nl> void dump ( ) const ; <nl> private : <nl> bitstring m_taint_bits ; <nl> mmm a / src / runtime / base / taint / taint_helper . cpp <nl> ppp b / src / runtime / base / taint / taint_helper . cpp <nl> <nl> <nl> # ifdef TAINTED <nl> <nl> + # include < map > <nl> + # include < boost / assign . hpp > <nl> + <nl> # include < runtime / base / types . h > <nl> # include < runtime / base / array / array_iterator . h > <nl> # include < runtime / base / complex_types . h > <nl> <nl> <nl> namespace HPHP { <nl> <nl> + std : : map < int , std : : string > taint_names = boost : : assign : : map_list_of <nl> + ( TAINT_BIT_HTML , " HTML - unsafe " ) <nl> + ( TAINT_BIT_SQL , " SQL - unsafe " ) <nl> + ( TAINT_BIT_MUTATED , " non - static " ) ; <nl> + <nl> void taint_warn_if_tainted ( CStrRef s , const bitstring bit ) { <nl> if ( s . get ( ) - > getTaintDataRef ( ) . getTaint ( ) & bit ) { <nl> - std : : string buf = " using a tainted string ! ! ! \ n " ; <nl> + std : : string buf = " Using a " + taint_names [ bit ] + " ( tainted ) string ! \ n " ; <nl> if ( s . get ( ) - > getTaintDataRef ( ) . getOriginalStr ( ) ) { <nl> buf + = " original string : " ; <nl> buf + = s . get ( ) - > getTaintDataRef ( ) . getOriginalStr ( ) ; <nl> mmm a / src / runtime / base / taint / taint_observer . cpp <nl> ppp b / src / runtime / base / taint / taint_observer . cpp <nl> void TaintObserver : : RegisterMutated ( StringBuffer * string_buffer ) { <nl> return ; <nl> } <nl> <nl> - / / Prevent recursive calls into the TaintObserver <nl> + / / Prevent recursive calls into the TaintObserver . <nl> TaintObserver * tc = * instance ; <nl> * instance = NULL ; <nl> <nl> mmm a / src / runtime / base / type_string . cpp <nl> ppp b / src / runtime / base / type_string . cpp <nl> <nl> # include < runtime / base / zend / zend_functions . h > <nl> # include < runtime / base / zend / zend_string . h > <nl> # include < runtime / base / zend / zend_printf . h > <nl> + # include < runtime / base / taint / taint_observer . h > <nl> <nl> namespace HPHP { <nl> <nl> String : : String ( int n ) { <nl> int len ; <nl> char * buf ; <nl> <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> + <nl> tmpbuf [ 11 ] = ' \ 0 ' ; <nl> p = conv_10 ( n , & is_negative , & tmpbuf [ 11 ] , & len ) ; <nl> <nl> String : : String ( int64 n ) { <nl> int len ; <nl> char * buf ; <nl> <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> + <nl> tmpbuf [ 20 ] = ' \ 0 ' ; <nl> p = conv_10 ( n , & is_negative , & tmpbuf [ 20 ] , & len ) ; <nl> <nl> String : : String ( int64 n ) { <nl> <nl> String : : String ( double n ) { <nl> char * buf ; <nl> + <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> + <nl> if ( n = = 0 . 0 ) n = 0 . 0 ; / / so to avoid " - 0 " output <nl> vspprintf ( & buf , 0 , " % . * G " , 14 , n ) ; <nl> m_px = NEW ( StringData ) ( buf , AttachString ) ; <nl> mmm a / src / runtime / base / util / string_buffer . cpp <nl> ppp b / src / runtime / base / util / string_buffer . cpp <nl> char * StringBuffer : : detach ( int & size ) { <nl> String StringBuffer : : detach ( ) { <nl> TAINT_OBSERVER_REGISTER_ACCESSED ( this ) ; <nl> # ifdef TAINTED <nl> - getTaintData ( ) - > unsetTaint ( TAINT_BIT_ALL ) ; <nl> + getTaintData ( ) - > unsetTaint ( TAINT_BIT_ALL ) ; <nl> # endif <nl> <nl> if ( m_buffer & & m_pos ) { <nl> mmm a / src / runtime / ext / ext_fb . cpp <nl> ppp b / src / runtime / ext / ext_fb . cpp <nl> void f_fb_unset_taint ( VRefParam str , int taint ) { <nl> # endif <nl> } <nl> <nl> - int f_fb_get_taint ( CStrRef str ) { <nl> + bool f_fb_get_taint ( CStrRef str , int taint ) { <nl> # ifdef TAINTED <nl> StringData * string_data = str . get ( ) ; <nl> ASSERT ( string_data ) ; <nl> - return string_data - > getTaintData ( ) - > getTaint ( ) ; <nl> + return string_data - > getTaintData ( ) - > getTaint ( ) & taint ; <nl> # else <nl> - return 0 ; <nl> + return false ; <nl> # endif <nl> } <nl> <nl> mmm a / src / runtime / ext / ext_fb . h <nl> ppp b / src / runtime / ext / ext_fb . h <nl> Array f_fb_parallel_query ( CArrRef sql_map , int max_thread = 50 , bool combine_res <nl> Array f_fb_crossall_query ( CStrRef sql , int max_thread = 50 , bool retry_query_on_fail = true , int connect_timeout = - 1 , int read_timeout = - 1 , bool timeout_in_ms = false ) ; <nl> void f_fb_set_taint ( VRefParam str , int taint ) ; <nl> void f_fb_unset_taint ( VRefParam str , int taint ) ; <nl> - int f_fb_get_taint ( CStrRef str ) ; <nl> + bool f_fb_get_taint ( CStrRef str , int taint ) ; <nl> Variant f_fb_const_fetch ( CVarRef key ) ; <nl> bool f_fb_output_compression ( bool new_value ) ; <nl> void f_fb_set_exit_callback ( CVarRef function ) ; <nl> mmm a / src / runtime / ext / profile / extprofile_fb . h <nl> ppp b / src / runtime / ext / profile / extprofile_fb . h <nl> namespace HPHP { <nl> <nl> inline Variant x_fb_thrift_serialize ( CVarRef thing ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_thrift_serialize ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_thrift_serialize ( thing ) ; <nl> } <nl> <nl> inline Variant x_fb_thrift_unserialize ( CVarRef thing , VRefParam success , VRefParam errcode = null_variant ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_thrift_unserialize ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_thrift_unserialize ( thing , success , errcode ) ; <nl> } <nl> <nl> inline Variant x_fb_serialize ( CVarRef thing ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_serialize ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_serialize ( thing ) ; <nl> } <nl> <nl> inline Variant x_fb_unserialize ( CVarRef thing , VRefParam success , VRefParam errcode = null_variant ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_unserialize ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_unserialize ( thing , success , errcode ) ; <nl> } <nl> <nl> inline bool x_fb_intercept ( CStrRef name , CVarRef handler , CVarRef data = null_variant ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_intercept ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_intercept ( name , handler , data ) ; <nl> } <nl> <nl> inline Variant x_fb_stubout_intercept_handler ( CStrRef name , CVarRef obj , CArrRef params , CVarRef data , VRefParam done ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_stubout_intercept_handler ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_stubout_intercept_handler ( name , obj , params , data , done ) ; <nl> } <nl> <nl> inline Variant x_fb_rpc_intercept_handler ( CStrRef name , CVarRef obj , CArrRef params , CVarRef data , VRefParam done ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_rpc_intercept_handler ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_rpc_intercept_handler ( name , obj , params , data , done ) ; <nl> } <nl> <nl> inline void x_fb_renamed_functions ( CArrRef names ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_renamed_functions ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_fb_renamed_functions ( names ) ; <nl> } <nl> <nl> inline bool x_fb_rename_function ( CStrRef orig_func_name , CStrRef new_func_name ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_rename_function ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_rename_function ( orig_func_name , new_func_name ) ; <nl> } <nl> <nl> inline bool x_fb_utf8ize ( VRefParam input ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_utf8ize ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_utf8ize ( input ) ; <nl> } <nl> <nl> inline Array x_fb_call_user_func_safe ( int _argc , CVarRef function , CArrRef _argv = null_array ) { <nl> FUNCTION_NOPROFILE_BUILTIN ( fb_call_user_func_safe ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_call_user_func_safe ( _argc , function , _argv ) ; <nl> } <nl> <nl> inline Variant x_fb_call_user_func_safe_return ( int _argc , CVarRef function , CVarRef def , CArrRef _argv = null_array ) { <nl> FUNCTION_NOPROFILE_BUILTIN ( fb_call_user_func_safe_return ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_call_user_func_safe_return ( _argc , function , def , _argv ) ; <nl> } <nl> <nl> inline Array x_fb_call_user_func_array_safe ( CVarRef function , CArrRef params ) { <nl> FUNCTION_NOPROFILE_BUILTIN ( fb_call_user_func_array_safe ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_call_user_func_array_safe ( function , params ) ; <nl> } <nl> <nl> inline Variant x_fb_get_code_coverage ( bool flush ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_get_code_coverage ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_get_code_coverage ( flush ) ; <nl> } <nl> <nl> inline void x_xhprof_enable ( int flags = 0 , CArrRef args = null_array ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_enable ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_xhprof_enable ( flags , args ) ; <nl> } <nl> <nl> inline Variant x_xhprof_disable ( ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_disable ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_xhprof_disable ( ) ; <nl> } <nl> <nl> inline void x_xhprof_network_enable ( ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_network_enable ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_xhprof_network_enable ( ) ; <nl> } <nl> <nl> inline Variant x_xhprof_network_disable ( ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_network_disable ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_xhprof_network_disable ( ) ; <nl> } <nl> <nl> inline void x_xhprof_frame_begin ( CStrRef name ) { <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_xhprof_frame_begin ( name ) ; <nl> } <nl> <nl> inline void x_xhprof_frame_end ( ) { <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_xhprof_frame_end ( ) ; <nl> } <nl> <nl> inline Variant x_xhprof_run_trace ( CStrRef packedTrace , int flags ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_run_trace ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_xhprof_run_trace ( packedTrace , flags ) ; <nl> } <nl> <nl> inline void x_xhprof_sample_enable ( ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_sample_enable ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_xhprof_sample_enable ( ) ; <nl> } <nl> <nl> inline Variant x_xhprof_sample_disable ( ) { <nl> FUNCTION_INJECTION_BUILTIN ( xhprof_sample_disable ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_xhprof_sample_disable ( ) ; <nl> } <nl> <nl> inline void x_fb_load_local_databases ( CArrRef servers ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_load_local_databases ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_fb_load_local_databases ( servers ) ; <nl> } <nl> <nl> inline Array x_fb_parallel_query ( CArrRef sql_map , int max_thread = 50 , bool combine_result = true , bool retry_query_on_fail = true , int connect_timeout = - 1 , int read_timeout = - 1 , bool timeout_in_ms = false ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_parallel_query ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_parallel_query ( sql_map , max_thread , combine_result , retry_query_on_fail , connect_timeout , read_timeout , timeout_in_ms ) ; <nl> } <nl> <nl> inline Array x_fb_crossall_query ( CStrRef sql , int max_thread = 50 , bool retry_query_on_fail = true , int connect_timeout = - 1 , int read_timeout = - 1 , bool timeout_in_ms = false ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_crossall_query ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_crossall_query ( sql , max_thread , retry_query_on_fail , connect_timeout , read_timeout , timeout_in_ms ) ; <nl> } <nl> <nl> inline void x_fb_unset_taint ( VRefParam str , int taint ) { <nl> f_fb_unset_taint ( str , taint ) ; <nl> } <nl> <nl> - inline int x_fb_get_taint ( CStrRef str ) { <nl> + inline bool x_fb_get_taint ( CStrRef str , int taint ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_get_taint ) ; <nl> TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> - return f_fb_get_taint ( str ) ; <nl> + return f_fb_get_taint ( str , taint ) ; <nl> } <nl> <nl> inline Variant x_fb_const_fetch ( CVarRef key ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_const_fetch ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_const_fetch ( key ) ; <nl> } <nl> <nl> inline bool x_fb_output_compression ( bool new_value ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_output_compression ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_output_compression ( new_value ) ; <nl> } <nl> <nl> inline void x_fb_set_exit_callback ( CVarRef function ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_set_exit_callback ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> f_fb_set_exit_callback ( function ) ; <nl> } <nl> <nl> inline Array x_fb_get_flush_stat ( ) { <nl> FUNCTION_INJECTION_BUILTIN ( fb_get_flush_stat ) ; <nl> - TAINT_OBSERVER ( TAINT_BIT_NONE , TAINT_BIT_NONE ) ; <nl> + TAINT_OBSERVER ( TAINT_BIT_MUTATED , TAINT_BIT_NONE ) ; <nl> return f_fb_get_flush_stat ( ) ; <nl> } <nl> <nl> mmm a / src / system / fb . inc <nl> ppp b / src / system / fb . inc <nl> <nl> " fb_crossall_query " , T ( Array ) , S ( 0 ) , " sql " , T ( String ) , NULL , NULL , S ( 0 ) , " max_thread " , T ( Int32 ) , " i : 50 ; " , " 50 " , S ( 0 ) , " retry_query_on_fail " , T ( Boolean ) , " b : 1 ; " , " true " , S ( 0 ) , " connect_timeout " , T ( Int32 ) , " i : - 1 ; " , " - 1 " , S ( 0 ) , " read_timeout " , T ( Int32 ) , " i : - 1 ; " , " - 1 " , S ( 0 ) , " timeout_in_ms " , T ( Boolean ) , " b : 0 ; " , " false " , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Runs a MySQL query against all databases in the map loaded by \ n * fb_load_local_databases ( ) . Internally it will spawn threads , each of \ n * which executes the same query against one or more databases \ n * sequentially . \ n * \ n * @ sql string The SQL query to execute . For safety reasons , this \ n * has to be a SELECT statement with WHERE clause . \ n * @ max_thread int Maximum number of threads to run . \ n * @ retry_query_on_fail \ n * bool Whether or not retry query once when it fails . This \ n * may be useful if database connection is temporarily \ n * lost during queries and re - executing a SELECT or \ n * other idempotent queries are acceptable . \ n * @ connect_timeout \ n * int Connection timeout . If timeout_in_ms is TRUE , it is \ n * in seconds . Otherwise , it is in milli - seconds . \ n * @ read_timeout \ n * int Query timeout . If timeout_in_ms is TRUE , it is in \ n * seconds . Otherwise , it is in milli - seconds . \ n * @ timeout_in_ms \ n * bool Whether connect_timeout or read_timeout are in \ n * seconds or milli - seconds . \ n * \ n * @ return map Query result in a format of array ( ' affected ' = > \ n * { number of affected rows } , ' result ' = > array ( { dbId1 } \ n * = > { dataset } , { dbId2 } = > . . . ) , ' error ' = > \ n * array ( { dbId1 } = > { error message } , { dbId2 } = > . . . . ) , \ n * ' errno ' = > array ( { dbId1 } = > { error code } , { dbId2 } = > \ n * . . . . ) ) . \ n * / " , <nl> " fb_set_taint " , T ( Void ) , S ( 0 ) , " str " , T ( Variant ) , NULL , NULL , S ( 1 ) , " taint " , T ( Int32 ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Taints a string with a flag or a bit . This bit is contagious in string \ n * operations , being carried over to new strings that are copied or \ n * composed from this string . This may be used for checking dirty or clean \ n * status of end user ' s input for different purposes . \ n * \ n * @ str mixed The string to taint . \ n * @ taint int The bit to flag . \ n * / " , <nl> " fb_unset_taint " , T ( Void ) , S ( 0 ) , " str " , T ( Variant ) , NULL , NULL , S ( 1 ) , " taint " , T ( Int32 ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Untaints a string by clearing off the bit that was set or carried over . \ n * \ n * @ str mixed The string to untaint . \ n * @ taint int The bit to clear . \ n * / " , <nl> - " fb_get_taint " , T ( Int32 ) , S ( 0 ) , " str " , T ( String ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Checks to see if a bit is set . \ n * \ n * @ str string The string to check . \ n * \ n * @ return int All bits that were tainted . \ n * / " , <nl> + " fb_get_taint " , T ( Boolean ) , S ( 0 ) , " str " , T ( String ) , NULL , NULL , S ( 0 ) , " taint " , T ( Int32 ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Checks to see if a bit is set . \ n * \ n * @ str string The string to check . \ n * @ taint int The bit to check against . \ n * \ n * @ return bool Whether the taint was set . \ n * / " , <nl> " fb_const_fetch " , T ( Variant ) , S ( 0 ) , " key " , T ( Variant ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Fetches a constant string from a special store that ' s compiled into the \ n * executable . This is faster than apc_fetch ( ) , which needs locking between \ n * different threads . This store is immutable and loaded once with strings \ n * that are never changed . Therefore , no locking is needed when accessed by \ n * different threads . \ n * \ n * @ key mixed The key for locating the value . \ n * \ n * @ return mixed The value that was stored . \ n * / " , <nl> " fb_output_compression " , T ( Boolean ) , S ( 0 ) , " new_value " , T ( Boolean ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Toggles the compression status of HipHop output , if headers have \ n * already been sent this may be ignored . \ n * \ n * @ new_value bool The new value for the compression state . \ n * \ n * @ return bool The old value . \ n * / " , <nl> " fb_set_exit_callback " , T ( Void ) , S ( 0 ) , " function " , T ( Variant ) , NULL , NULL , S ( 0 ) , NULL , S ( 81920 ) , " / * * \ n * ( HipHop specific ) \ n * \ n * Set a callback function that is called when php tries to exit . \ n * \ n * @ function mixed The callback to invoke . An exception object will be \ n * passed to the function \ n * / " , <nl> mmm a / src / system / gen / sys / dynamic_table_func . no . cpp <nl> ppp b / src / system / gen / sys / dynamic_table_func . no . cpp <nl> Variant ifa_mysql_select_db ( void * extra , int count , INVOKE_FEW_ARGS_IMPL_ARGS ) { <nl> } <nl> Variant i_fb_get_taint ( void * extra , CArrRef params ) { <nl> int count ATTRIBUTE_UNUSED = params . size ( ) ; <nl> - if ( UNLIKELY ( count ! = 1 ) ) return throw_wrong_arguments ( " fb_get_taint " , count , 1 , 1 , 1 ) ; <nl> + if ( UNLIKELY ( count ! = 2 ) ) return throw_wrong_arguments ( " fb_get_taint " , count , 2 , 2 , 1 ) ; <nl> { <nl> ArrayData * ad ( params . get ( ) ) ; <nl> ssize_t pos = ad ? ad - > iter_begin ( ) : ArrayData : : invalid_index ; <nl> CVarRef arg0 ( ( ad - > getValue ( pos ) ) ) ; <nl> - return ( x_fb_get_taint ( arg0 ) ) ; <nl> + CVarRef arg1 ( ( ad - > getValue ( pos = ad - > iter_advance ( pos ) ) ) ) ; <nl> + return ( x_fb_get_taint ( arg0 , arg1 ) ) ; <nl> } <nl> } <nl> Variant ifa_fb_get_taint ( void * extra , int count , INVOKE_FEW_ARGS_IMPL_ARGS ) { <nl> - if ( UNLIKELY ( count ! = 1 ) ) return throw_wrong_arguments ( " fb_get_taint " , count , 1 , 1 , 1 ) ; <nl> + if ( UNLIKELY ( count ! = 2 ) ) return throw_wrong_arguments ( " fb_get_taint " , count , 2 , 2 , 1 ) ; <nl> CVarRef arg0 ( a0 ) ; <nl> - return ( x_fb_get_taint ( arg0 ) ) ; <nl> + CVarRef arg1 ( a1 ) ; <nl> + return ( x_fb_get_taint ( arg0 , arg1 ) ) ; <nl> } <nl> Variant i_use_soap_error_handler ( void * extra , CArrRef params ) { <nl> int count ATTRIBUTE_UNUSED = params . size ( ) ; <nl> CallInfo ci_substr_replace ( ( void * ) & i_substr_replace , ( void * ) & ifa_substr_replace , <nl> CallInfo ci_xbox_get_thread_time ( ( void * ) & i_xbox_get_thread_time , ( void * ) & ifa_xbox_get_thread_time , 0 , 0 , 0x0000000000000000LL ) ; <nl> CallInfo ci_xmlwriter_write_dtd ( ( void * ) & i_xmlwriter_write_dtd , ( void * ) & ifa_xmlwriter_write_dtd , 5 , 0 , 0x0000000000000000LL ) ; <nl> CallInfo ci_mysql_select_db ( ( void * ) & i_mysql_select_db , ( void * ) & ifa_mysql_select_db , 2 , 0 , 0x0000000000000000LL ) ; <nl> - CallInfo ci_fb_get_taint ( ( void * ) & i_fb_get_taint , ( void * ) & ifa_fb_get_taint , 1 , 0 , 0x0000000000000000LL ) ; <nl> + CallInfo ci_fb_get_taint ( ( void * ) & i_fb_get_taint , ( void * ) & ifa_fb_get_taint , 2 , 0 , 0x0000000000000000LL ) ; <nl> CallInfo ci_use_soap_error_handler ( ( void * ) & i_use_soap_error_handler , ( void * ) & ifa_use_soap_error_handler , 1 , 0 , 0x0000000000000000LL ) ; <nl> CallInfo ci_debug_backtrace ( ( void * ) & i_debug_backtrace , ( void * ) & ifa_debug_backtrace , 1 , 0 , 0x0000000000000000LL ) ; <nl> CallInfo ci_drawpathcurvetosmoothrelative ( ( void * ) & i_drawpathcurvetosmoothrelative , ( void * ) & ifa_drawpathcurvetosmoothrelative , 5 , 0 , 0x0000000000000000LL ) ; <nl>
|
[ HPHP Tainting ] Static string detection
|
facebook/hhvm
|
3b5cbb710027c1410edb72e31259fca7ed41a6d3
|
2011-06-24T22:38:23Z
|
mmm a / src / objective - c / BoringSSL - GRPC . podspec <nl> ppp b / src / objective - c / BoringSSL - GRPC . podspec <nl> Pod : : Spec . new do | s | <nl> # for public headers and the other for implementation . Each gets its own ` header_mappings_dir ` , <nl> # making the linter happy . <nl> s . subspec ' Interface ' do | ss | <nl> - ss . header_mappings_dir = ' include / openssl ' <nl> - ss . source_files = ' include / openssl / * . h ' <nl> + ss . header_mappings_dir = ' src / include / openssl ' <nl> + ss . source_files = ' src / include / openssl / * . h ' <nl> end <nl> s . subspec ' Implementation ' do | ss | <nl> - ss . header_mappings_dir = ' . ' <nl> - ss . source_files = ' ssl / * . { h , c , cc } ' , <nl> - ' ssl / * * / * . { h , c , cc } ' , <nl> - ' crypto / * . { h , c , cc } ' , <nl> - ' crypto / * * / * . { h , c , cc } ' , <nl> + ss . header_mappings_dir = ' src ' <nl> + ss . source_files = ' src / ssl / * . { h , c , cc } ' , <nl> + ' src / ssl / * * / * . { h , c , cc } ' , <nl> + ' src / crypto / * . { h , c , cc } ' , <nl> + ' src / crypto / * * / * . { h , c , cc } ' , <nl> # We have to include fiat because spake25519 depends on it <nl> - ' third_party / fiat / * . { h , c , cc } ' , <nl> + ' src / third_party / fiat / * . { h , c , cc } ' , <nl> # Include the err_data . c generated in prepare_command below <nl> - ' err_data . c ' <nl> + ' src / err_data . c ' <nl> <nl> - ss . private_header_files = ' ssl / * . h ' , <nl> - ' ssl / * * / * . h ' , <nl> - ' crypto / * . h ' , <nl> - ' crypto / * * / * . h ' , <nl> - ' third_party / fiat / * . h ' <nl> + ss . private_header_files = ' src / ssl / * . h ' , <nl> + ' src / ssl / * * / * . h ' , <nl> + ' src / crypto / * . h ' , <nl> + ' src / crypto / * * / * . h ' , <nl> + ' src / third_party / fiat / * . h ' <nl> # bcm . c includes other source files , creating duplicated symbols . Since it is not used , we <nl> # explicitly exclude it from the pod . <nl> # TODO ( mxyan ) : Work with BoringSSL team to remove this hack . <nl> - ss . exclude_files = ' crypto / fipsmodule / bcm . c ' , <nl> - ' * * / * _test . * ' , <nl> - ' * * / test_ * . * ' , <nl> - ' * * / test / * . * ' <nl> + ss . exclude_files = ' src / crypto / fipsmodule / bcm . c ' , <nl> + ' src / * * / * _test . * ' , <nl> + ' src / * * / test_ * . * ' , <nl> + ' src / * * / test / * . * ' <nl> <nl> ss . dependency " # { s . name } / Interface " , version <nl> end <nl> <nl> s . prepare_command = < < - END_OF_COMMAND <nl> # Add a module map and an umbrella header <nl> - cat > include / openssl / umbrella . h < < EOF <nl> + cat > src / include / openssl / umbrella . h < < EOF <nl> # include " ssl . h " <nl> # include " crypto . h " <nl> # include " aes . h " <nl> Pod : : Spec . new do | s | <nl> # include " x509 . h " <nl> # include " x509v3 . h " <nl> EOF <nl> - cat > include / openssl / BoringSSL . modulemap < < EOF <nl> + cat > src / include / openssl / BoringSSL . modulemap < < EOF <nl> framework module openssl { <nl> umbrella header " umbrella . h " <nl> textual header " arm_arch . h " <nl> Pod : : Spec . new do | s | <nl> # TODO ( jtattermusch ) : avoid needing to run tools / buildgen / generate_projects . sh twice on update <nl> # TODO ( jtattermusch ) : another pre - generated copy of err_data . c is under third_party / boringssl - with - bazel <nl> # investigate if we could use it . <nl> - cat > err_data . c < < EOF <nl> + cat > src / err_data . c < < EOF <nl> / * Copyright ( c ) 2015 , Google Inc . <nl> * <nl> * Permission to use , copy , modify , and / or distribute this software for any <nl> Pod : : Spec . new do | s | <nl> # SOME_BORINGSSL_SYMBOL " Such type of redefinition will cause " SOME_BORINGSSL_SYMBOL redefined " <nl> # error when using together with our prefix header . So the workaround in the below lines removes <nl> # all such type of # define directives . <nl> - sed - i ' . back ' ' / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) \ \ 1 / d ' include / openssl / * . h <nl> + sed - i ' . back ' ' / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) \ \ 1 / d ' src / include / openssl / * . h <nl> # Remove lines of the format below for the same reason above <nl> # # define SOME_BORINGSSL_SYMBOL \ <nl> # SOME_BORINGSSL_SYMBOL <nl> - sed - i ' . back ' ' / ^ # define . * \ \ \ \ $ / { N ; / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) * \ \ \ \ \ \ n * \ \ 1 / d ; } ' include / openssl / * . h <nl> + sed - i ' . back ' ' / ^ # define . * \ \ \ \ $ / { N ; / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) * \ \ \ \ \ \ n * \ \ 1 / d ; } ' src / include / openssl / * . h <nl> <nl> # We are renaming openssl to openssl_grpc so that there is no conflict with openssl if it exists <nl> find . - type f \ \ ( - path ' * . h ' - or - path ' * . cc ' - or - path ' * . c ' \ \ ) - print0 | xargs - 0 - L1 sed - E - i ' . grpc_back ' ' s ; # include < openssl / ; # include < openssl_grpc / ; g ' <nl> mmm a / templates / src / objective - c / BoringSSL - GRPC . podspec . template <nl> ppp b / templates / src / objective - c / BoringSSL - GRPC . podspec . template <nl> <nl> # for public headers and the other for implementation . Each gets its own ` header_mappings_dir ` , <nl> # making the linter happy . <nl> s . subspec ' Interface ' do | ss | <nl> - ss . header_mappings_dir = ' include / openssl ' <nl> - ss . source_files = ' include / openssl / * . h ' <nl> + ss . header_mappings_dir = ' src / include / openssl ' <nl> + ss . source_files = ' src / include / openssl / * . h ' <nl> end <nl> s . subspec ' Implementation ' do | ss | <nl> - ss . header_mappings_dir = ' . ' <nl> - ss . source_files = ' ssl / * . { h , c , cc } ' , <nl> - ' ssl / * * / * . { h , c , cc } ' , <nl> - ' crypto / * . { h , c , cc } ' , <nl> - ' crypto / * * / * . { h , c , cc } ' , <nl> + ss . header_mappings_dir = ' src ' <nl> + ss . source_files = ' src / ssl / * . { h , c , cc } ' , <nl> + ' src / ssl / * * / * . { h , c , cc } ' , <nl> + ' src / crypto / * . { h , c , cc } ' , <nl> + ' src / crypto / * * / * . { h , c , cc } ' , <nl> # We have to include fiat because spake25519 depends on it <nl> - ' third_party / fiat / * . { h , c , cc } ' , <nl> + ' src / third_party / fiat / * . { h , c , cc } ' , <nl> # Include the err_data . c generated in prepare_command below <nl> - ' err_data . c ' <nl> + ' src / err_data . c ' <nl> <nl> - ss . private_header_files = ' ssl / * . h ' , <nl> - ' ssl / * * / * . h ' , <nl> - ' crypto / * . h ' , <nl> - ' crypto / * * / * . h ' , <nl> - ' third_party / fiat / * . h ' <nl> + ss . private_header_files = ' src / ssl / * . h ' , <nl> + ' src / ssl / * * / * . h ' , <nl> + ' src / crypto / * . h ' , <nl> + ' src / crypto / * * / * . h ' , <nl> + ' src / third_party / fiat / * . h ' <nl> # bcm . c includes other source files , creating duplicated symbols . Since it is not used , we <nl> # explicitly exclude it from the pod . <nl> # TODO ( mxyan ) : Work with BoringSSL team to remove this hack . <nl> - ss . exclude_files = ' crypto / fipsmodule / bcm . c ' , <nl> - ' * * / * _test . * ' , <nl> - ' * * / test_ * . * ' , <nl> - ' * * / test / * . * ' <nl> + ss . exclude_files = ' src / crypto / fipsmodule / bcm . c ' , <nl> + ' src / * * / * _test . * ' , <nl> + ' src / * * / test_ * . * ' , <nl> + ' src / * * / test / * . * ' <nl> <nl> ss . dependency " # { s . name } / Interface " , version <nl> end <nl> <nl> s . prepare_command = < < - END_OF_COMMAND <nl> # Add a module map and an umbrella header <nl> - cat > include / openssl / umbrella . h < < EOF <nl> + cat > src / include / openssl / umbrella . h < < EOF <nl> # include " ssl . h " <nl> # include " crypto . h " <nl> # include " aes . h " <nl> <nl> # include " x509 . h " <nl> # include " x509v3 . h " <nl> EOF <nl> - cat > include / openssl / BoringSSL . modulemap < < EOF <nl> + cat > src / include / openssl / BoringSSL . modulemap < < EOF <nl> framework module openssl { <nl> umbrella header " umbrella . h " <nl> textual header " arm_arch . h " <nl> <nl> # TODO ( jtattermusch ) : avoid needing to run tools / buildgen / generate_projects . sh twice on update <nl> # TODO ( jtattermusch ) : another pre - generated copy of err_data . c is under third_party / boringssl - with - bazel <nl> # investigate if we could use it . <nl> - cat > err_data . c < < EOF <nl> + cat > src / err_data . c < < EOF <nl> % for err_data in open ( " src / boringssl / err_data . c " , " r " ) . readlines ( ) : <nl> $ { err_data . replace ( ' \ \ 0 ' , ' \ \ \ \ 0 ' ) } \ <nl> % endfor <nl> <nl> # SOME_BORINGSSL_SYMBOL " Such type of redefinition will cause " SOME_BORINGSSL_SYMBOL redefined " <nl> # error when using together with our prefix header . So the workaround in the below lines removes <nl> # all such type of # define directives . <nl> - sed - i ' . back ' ' / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) \ \ 1 / d ' include / openssl / * . h <nl> + sed - i ' . back ' ' / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) \ \ 1 / d ' src / include / openssl / * . h <nl> # Remove lines of the format below for the same reason above <nl> # # define SOME_BORINGSSL_SYMBOL $ { " \ \ " } <nl> # SOME_BORINGSSL_SYMBOL <nl> - sed - i ' . back ' ' / ^ # define . * \ \ \ \ $ / { N ; / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) * \ \ \ \ \ \ n * \ \ 1 / d ; } ' include / openssl / * . h <nl> + sed - i ' . back ' ' / ^ # define . * \ \ \ \ $ / { N ; / ^ # define \ \ ( [ A - Za - z0 - 9_ ] * \ \ ) * \ \ \ \ \ \ n * \ \ 1 / d ; } ' src / include / openssl / * . h <nl> <nl> # We are renaming openssl to openssl_grpc so that there is no conflict with openssl if it exists <nl> find . - type f \ \ ( - path ' * . h ' - or - path ' * . cc ' - or - path ' * . c ' \ \ ) - print0 | xargs - 0 - L1 sed - E - i ' . grpc_back ' ' s ; # include < openssl / ; # include < openssl_grpc / ; g ' <nl>
|
Fix boringssl podspec with new paths
|
grpc/grpc
|
e6865e01a96d149c0e1b4d72122b6807df75dd00
|
2020-01-18T08:08:11Z
|
mmm a / tensorflow / contrib / cmake / CMakeLists . txt <nl> ppp b / tensorflow / contrib / cmake / CMakeLists . txt <nl> mark_as_advanced ( DOWNLOAD_LOCATION ) <nl> set ( CMAKE_POSITION_INDEPENDENT_CODE ON ) <nl> add_definitions ( - DEIGEN_AVOID_STL_ARRAY ) <nl> if ( WIN32 ) <nl> - add_definitions ( - DNOMINMAX - D_WIN32_WINNT = 0x0A00 - DLANG_CXX11 - DCOMPILER_MSVC ) <nl> + add_definitions ( - DNOMINMAX - D_WIN32_WINNT = 0x0A00 - DLANG_CXX11 - DCOMPILER_MSVC - D__VERSION__ = \ " MSVC \ " ) <nl> + set ( CMAKE_CXX_FLAGS $ { CMAKE_CXX_FLAGS } / MP ) <nl> endif ( ) <nl> <nl> if ( " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " GNU " ) <nl> mmm a / tensorflow / contrib / cmake / README . md <nl> ppp b / tensorflow / contrib / cmake / README . md <nl> <nl> - This directory contains * CMake * files that can be used to build TensorFlow <nl> - core library . <nl> + TensorFlow CMake build <nl> + = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> + This directory contains CMake files for building TensorFlow on Microsoft <nl> + Windows . [ CMake ] ( https : / / cmake . org ) is a cross - platform tool that can <nl> + generate build scripts for multiple build systems , including Microsoft <nl> + Visual Studio . <nl> + <nl> + * * N . B . * * We provide Linux build instructions primarily for the purpose of <nl> + testing the build . We recommend using the standard Bazel - based build on <nl> + Linux . <nl> <nl> Current Status <nl> mmmmmmmmmmmm - - <nl> <nl> - CMake build is not yet ready for general usage ! <nl> - <nl> - We are actively working on CMake support . Please help us improve it . <nl> - Pull requests are welcomed ! <nl> - <nl> - <nl> - Linux CMake + Docker ( very simple ) <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - ` ` ` bash <nl> - git clone - - recursive https : / / github . com / tensorflow / tensorflow . git <nl> - cd tensorflow <nl> - tensorflow / tools / ci_build / ci_build . sh CPU tensorflow / tools / ci_build / builds / cmake . sh <nl> - ` ` ` <nl> - <nl> - That ' s it . Dependencies included . Otherwise read the rest of this readme . . . <nl> - <nl> - <nl> - Prerequisites <nl> - = = = = = = = = = = = = = <nl> - <nl> - You need to have [ CMake ] ( http : / / www . cmake . org ) and [ Git ] ( http : / / git - scm . com ) <nl> - installed on your computer before proceeding . <nl> - <nl> - Most of the instructions will be given to the * Сommand Prompt * , but the same <nl> - actions can be performed using appropriate GUI tools . <nl> - <nl> - <nl> - Environment Setup <nl> - = = = = = = = = = = = = = = = = = <nl> - <nl> - Open the appropriate * Command Prompt * from the * Start * menu . <nl> - <nl> - For example * VS2013 x64 Native Tools Command Prompt * : <nl> - <nl> - C : \ Program Files ( x86 ) \ Microsoft Visual Studio 12 . 0 \ VC \ bin \ amd64 > <nl> - <nl> - Change to your working directory : <nl> - <nl> - C : \ Program Files ( x86 ) \ Microsoft Visual Studio 12 . 0 \ VC \ bin \ amd64 > cd C : \ Path \ to <nl> - C : \ Path \ to > <nl> - <nl> - Where * C : \ Path \ to * is the path to your real working directory . <nl> - <nl> - Create a folder where TensorFlow headers / libraries / binaries will be installed <nl> - after they are built : <nl> - <nl> - C : \ Path \ to > mkdir install <nl> - <nl> - If * cmake * command is not available from * Command Prompt * , add it to system <nl> - * PATH * variable : <nl> + The CMake files in this directory can build the core TensorFlow runtime , an <nl> + example C + + binary , and a PIP package containing the runtime and Python <nl> + bindings . Currently , only CPU builds are supported , but we are working on <nl> + providing a GPU build as well . <nl> <nl> - C : \ Path \ to > set PATH = % PATH % ; C : \ Program Files ( x86 ) \ CMake \ bin <nl> + Note : Windows support is in an * * alpha * * state , and we welcome your feedback . <nl> <nl> - If * git * command is not available from * Command Prompt * , add it to system <nl> - * PATH * variable : <nl> + # # # Pre - requisites <nl> <nl> - C : \ Path \ to > set PATH = % PATH % ; C : \ Program Files \ Git \ cmd <nl> + * CMake version 3 . 1 or later <nl> <nl> - Good . Now you are ready to continue . <nl> + * [ Git ] ( http : / / git - scm . com ) <nl> <nl> - Getting Sources <nl> - = = = = = = = = = = = = = = = <nl> + * [ SWIG ] ( http : / / www . swig . org / download . html ) <nl> <nl> - You can get the latest stable source packages from the <nl> - [ releases ] ( https : / / github . com / tensorflow / tensorflow / releases ) page . <nl> - Or you can type : <nl> + * Additional pre - requisites for Microsoft Windows : <nl> + - Visual Studio 2015 <nl> + - Python 3 . 5 <nl> + - NumPy 1 . 11 . 0 or later <nl> <nl> - C : \ Path \ to > git clone - - recursive - b [ release_tag ] https : / / github . com / tensorflow / tensorflow . git <nl> + * Additional pre - requisites for Linux : <nl> + - Python 2 . 7 or later <nl> + - [ Docker ] ( https : / / www . docker . com / ) ( for automated testing ) <nl> + - NumPy 1 . 11 . 0 or later <nl> <nl> - Where * [ release_tag ] * is a git tag like * v0 . 6 . 0 * or a branch name like * master * <nl> - if you want to get the latest code . <nl> + # # # Known - good configurations <nl> <nl> - Go to the project folder : <nl> + * Microsoft Windows 10 <nl> + - Microsoft Visual Studio Enterprise 2015 with Visual C + + 2015 <nl> + - [ Anaconda 4 . 1 . 1 ( Python 3 . 5 64 - bit ) ] ( https : / / www . continuum . io / downloads ) <nl> + - [ Git for Windows version 2 . 9 . 2 . windows . 1 ] ( https : / / git - scm . com / download / win ) <nl> + - [ swigwin - 3 . 0 . 10 ] ( http : / / www . swig . org / download . html ) <nl> <nl> - C : \ Path \ to > cd tensorflow <nl> - C : \ Path \ to \ tensorflow > <nl> + * Ubuntu 14 . 04 <nl> + - Makefile generator <nl> + - Docker 1 . 9 . 1 ( for automated testing ) <nl> <nl> - Now go to * tensorflow \ contrib \ cmake * folder in TensorFlow ' s contrib sources : <nl> + # # # Current known limitations <nl> <nl> - C : \ Path \ to \ tensorflow > cd tensorflow \ contrib \ cmake <nl> - C : \ Path \ to \ tensorflow \ tensorflow \ contrib \ cmake > <nl> + * CPU support only <nl> <nl> - Good . Now you are ready to configure * CMake * . <nl> + - We are in the process of porting the GPU code in <nl> + ` tensorflow / stream_executor ` to build with CMake and work on non - POSIX <nl> + platforms . <nl> <nl> - CMake Configuration <nl> - = = = = = = = = = = = = = = = = = = = <nl> + * Additional limitations for the Windows build : <nl> <nl> - * CMake * supports a lot of different <nl> - [ generators ] ( http : / / www . cmake . org / cmake / help / latest / manual / cmake - generators . 7 . html ) <nl> - for various native build systems . We are only interested in <nl> - [ Makefile ] ( http : / / www . cmake . org / cmake / help / latest / manual / cmake - generators . 7 . html # makefile - generators ) <nl> - and <nl> - [ Visual Studio ] ( http : / / www . cmake . org / cmake / help / latest / manual / cmake - generators . 7 . html # visual - studio - generators ) <nl> - generators . <nl> + - The Python package supports * * Python 3 . 5 only * * , because that is the only <nl> + version for which standard Python binaries exist and those binaries are <nl> + compatible with the TensorFlow runtime . ( On Windows , the standard Python <nl> + binaries for versions earlier than 3 . 5 were compiled with older compilers <nl> + that do not have all of the features ( e . g . C + + 11 support ) needed to compile <nl> + TensorFlow . We welcome patches for making TensorFlow work with Python 2 . 7 <nl> + on Windows , but have not yet committed to supporting that configuration . ) <nl> <nl> - We will use shadow building to separate the temporary files from the TensorFlow <nl> - source code . <nl> + - The following Python APIs are not currently implemented : <nl> + * Loading custom op libraries via ` tf . load_op_library ( ) ` . <nl> + * Path manipulation functions ( such as ` tf . gfile . ListDirectory ( ) ` ) are not <nl> + functional . <nl> <nl> - Create a temporary * build * folder and change your working directory to it : <nl> + - The ` tf . contrib ` libraries are not currently included in the PIP package . <nl> <nl> - C : \ Path \ to \ tensorflow \ tensorflow \ contrib \ cmake > mkdir build & cd build <nl> - C : \ Path \ to \ tensorflow \ tensorflow \ contrib \ cmake \ build > <nl> + - The following operations are not currently implemented : <nl> + * ` DepthwiseConv2dNative ` <nl> + * ` Digamma ` <nl> + * ` Erf ` <nl> + * ` Erfc ` <nl> + * ` Igamma ` <nl> + * ` Igammac ` <nl> + * ` ImmutableConst ` <nl> + * ` Lgamma ` <nl> + * ` Polygamma ` <nl> + * ` SparseMatmul ` <nl> + * ` Zeta ` <nl> <nl> - The * Makefile * generator can build the project in only one configuration , so <nl> - you need to build a separate folder for each configuration . <nl> + - Google Cloud Storage support is not currently implemented . The GCS library <nl> + currently depends on ` libcurl ` and ` boringssl ` , and the Windows version <nl> + could use standard Windows APIs for making HTTP requests and cryptography <nl> + ( for OAuth ) . Contributions are welcome for this feature . <nl> <nl> - To start using a * Release * configuration : <nl> + We are actively working on improving CMake and Windows support , and addressing <nl> + these limitations . We would appreciate pull requests that implement missing <nl> + ops or APIs . <nl> <nl> - [ . . . ] \ contrib \ cmake \ build > mkdir release & cd release <nl> - [ . . . ] \ contrib \ cmake \ build \ release > cmake - G " NMake Makefiles " ^ <nl> - - DCMAKE_BUILD_TYPE = Release ^ <nl> - - DCMAKE_INSTALL_PREFIX = . . / . . / . . / . . / . . / . . / install ^ <nl> - . . / . . <nl> <nl> - It will generate * nmake * * Makefile * in current directory . <nl> - <nl> - To use * Debug * configuration : <nl> - <nl> - [ . . . ] \ contrib \ cmake \ build > mkdir debug & cd debug <nl> - [ . . . ] \ contrib \ cmake \ build \ debug > cmake - G " NMake Makefiles " ^ <nl> - - DCMAKE_BUILD_TYPE = Debug ^ <nl> - - DCMAKE_INSTALL_PREFIX = . . / . . / . . / . . / . . / . . / install ^ <nl> - . . / . . <nl> - <nl> - It will generate * nmake * * Makefile * in current directory . <nl> - <nl> - To create * Visual Studio * solution file : <nl> - <nl> - [ . . . ] \ contrib \ cmake \ build > mkdir solution & cd solution <nl> - [ . . . ] \ contrib \ cmake \ build \ solution > cmake - G " Visual Studio 12 2013 Win64 " ^ <nl> - - DCMAKE_INSTALL_PREFIX = . . / . . / . . / . . / . . / . . / install ^ <nl> - . . / . . <nl> - <nl> - It will generate * Visual Studio * solution file * tensorflow . sln * in current <nl> - directory . <nl> - <nl> - If the * gmock * directory does not exist , and / or you do not want to build <nl> - TensorFlow unit tests , you need to add * cmake * command argument <nl> - ` - Dtensorflow_BUILD_TESTS = OFF ` to disable testing . <nl> - <nl> - Compiling <nl> - = = = = = = = = = <nl> - <nl> - To compile tensorflow : <nl> - <nl> - [ . . . ] \ contrib \ cmake \ build \ release > nmake <nl> - <nl> - or <nl> - <nl> - [ . . . ] \ contrib \ cmake \ build \ debug > nmake <nl> - <nl> - And wait for the compilation to finish . <nl> - <nl> - If you prefer to use the IDE : <nl> - <nl> - * Open the generated tensorflow . sln file in Microsoft Visual Studio . <nl> - * Choose " Debug " or " Release " configuration as desired . <nl> - * From the Build menu , choose " Build Solution " . <nl> - <nl> - And wait for the compilation to finish . <nl> - <nl> - Testing <nl> - = = = = = = = <nl> - <nl> - To run unit - tests : <nl> + Step - by - step Windows build <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> - [ . . . ] \ contrib \ cmake \ build \ release > nmake check <nl> + 1 . Install the pre - requisites detailed above , and set up your environment . <nl> <nl> - or <nl> + * The following commands assume that you are using the Windows Command <nl> + Prompt ( ` cmd . exe ` ) . You will need to set up your environment to use the <nl> + appropriate toolchain , i . e . the 64 - bit tools . ( Some of the binary targets <nl> + we will build are too large for the 32 - bit tools , and they will fail with <nl> + out - of - memory errors . ) The typical command to do set up your <nl> + environment is : <nl> <nl> - [ . . . ] \ contrib \ cmake \ build \ debug > nmake check <nl> + ` ` ` <nl> + D : \ temp > " C : \ Program Files ( x86 ) \ Microsoft Visual Studio 14 . 0 \ VC \ bin \ amd64 \ vcvarsall . bat " <nl> + ` ` ` <nl> <nl> - You can also build project * check * from Visual Studio solution . <nl> - Yes , it may sound strange , but it works . <nl> + * We assume that ` cmake ` and ` git ` are installed and in your ` % PATH % ` . If <nl> + for example ` cmake ` is not in your path and it is installed in <nl> + ` C : \ Program Files ( x86 ) \ CMake \ bin \ cmake . exe ` , you can add this directory <nl> + to your ` % PATH % ` as follows : <nl> <nl> - You should see an output similar to : <nl> + ` ` ` <nl> + D : \ temp > set PATH = " % PATH % ; C : \ Program Files ( x86 ) \ CMake \ bin \ cmake . exe " <nl> + ` ` ` <nl> <nl> - Running main ( ) from gmock_main . cc <nl> - [ = = = = = = = = = = ] Running 1546 tests from 165 test cases . <nl> - <nl> - . . . <nl> - <nl> - [ = = = = = = = = = = ] 1546 tests from 165 test cases ran . ( 2529 ms total ) <nl> - [ PASSED ] 1546 tests . <nl> + 2 . Clone the TensorFlow repository and create a working directory for your <nl> + build : <nl> <nl> - To run specific tests : <nl> + ` ` ` <nl> + D : \ temp > git clone https : / / github . com / tensorflow / tensorflow . git <nl> + D : \ temp > cd tensorflow \ tensorflow \ contrib \ cmake <nl> + D : \ temp \ tensorflow \ tensorflow \ contrib \ cmake > mkdir build <nl> + D : \ temp \ tensorflow \ tensorflow \ contrib \ cmake > cd build <nl> + D : \ temp \ tensorflow \ tensorflow \ contrib \ cmake \ build > <nl> + ` ` ` <nl> <nl> - C : \ Path \ to \ tensorflow > tensorflow \ contrib \ cmake \ build \ release \ tests . exe ^ <nl> - - - gtest_filter = AnyTest * <nl> - Running main ( ) from gmock_main . cc <nl> - Note : Google Test filter = AnyTest * <nl> - [ = = = = = = = = = = ] Running 3 tests from 1 test case . <nl> - [ mmmmmmmmm - ] Global test environment set - up . <nl> - [ mmmmmmmmm - ] 3 tests from AnyTest <nl> - [ RUN ] AnyTest . TestPackAndUnpack <nl> - [ OK ] AnyTest . TestPackAndUnpack ( 0 ms ) <nl> - [ RUN ] AnyTest . TestPackAndUnpackAny <nl> - [ OK ] AnyTest . TestPackAndUnpackAny ( 0 ms ) <nl> - [ RUN ] AnyTest . TestIs <nl> - [ OK ] AnyTest . TestIs ( 0 ms ) <nl> - [ mmmmmmmmm - ] 3 tests from AnyTest ( 1 ms total ) <nl> - <nl> - [ mmmmmmmmm - ] Global test environment tear - down <nl> - [ = = = = = = = = = = ] 3 tests from 1 test case ran . ( 2 ms total ) <nl> - [ PASSED ] 3 tests . <nl> + 3 . Invoke CMake to create Visual Studio solution and project files . <nl> <nl> - Note that the tests must be run from the source folder . <nl> + * * N . B . * * This assumes that ` cmake . exe ` is in your ` % PATH % ` environment <nl> + variable . The other paths are for illustrative purposes only , and may <nl> + be different on your platform . The ` ^ ` character is a line continuation <nl> + and must be the last character on each line . <nl> <nl> - If all tests are passed , safely continue . <nl> + ` ` ` <nl> + D : \ . . . \ build > cmake . . - A x64 - DCMAKE_BUILD_TYPE = Release ^ <nl> + More ? - DSWIG_EXECUTABLE = C : / tools / swigwin - 3 . 0 . 10 / swig . exe ^ <nl> + More ? - DPYTHON_EXECUTABLE = C : / Users / % USERNAME % / AppData / Local / Continuum / Anaconda3 / python . exe ^ <nl> + More ? - DPYTHON_LIBRARIES = C : / Users / % USERNAME % / AppData / Local / Continuum / Anaconda3 / libs / python35 . lib <nl> + ` ` ` <nl> <nl> - Installing <nl> - = = = = = = = = = = <nl> + Note that the ` - DCMAKE_BUILD_TYPE = Release ` flag must match the build <nl> + configuration that you choose when invoking ` msbuild ` . The known - good <nl> + values are ` Release ` and ` RelWithDebInfo ` . The ` Debug ` build type is <nl> + not currently supported , because it relies on a ` Debug ` library for <nl> + Python ( ` python35d . lib ` ) that is not distributed by default . <nl> <nl> - To install TensorFlow to the specified * install * folder : <nl> + There are various options that can be specified when generating the <nl> + solution and project files : <nl> <nl> - [ . . . ] \ contrib \ cmake \ build \ release > nmake install <nl> + * ` - DCMAKE_BUILD_TYPE = ( Release | RelWithDebInfo ) ` : Note that the <nl> + ` CMAKE_BUILD_TYPE ` option must match the build configuration that you <nl> + choose when invoking MSBuild in step 4 . The known - good values are <nl> + ` Release ` and ` RelWithDebInfo ` . The ` Debug ` build type is not currently <nl> + supported , because it relies on a ` Debug ` library for Python <nl> + ( ` python35d . lib ` ) that is not distributed by default . <nl> <nl> - or <nl> + * ` - Dtensorflow_BUILD_ALL_KERNELS = ( ON | OFF ) ` . Defaults to ` ON ` . You can <nl> + build a small subset of the kernels for a faster build by setting this <nl> + option to ` OFF ` . <nl> <nl> - [ . . . ] \ contrib \ cmake \ build \ debug > nmake install <nl> + * ` - Dtensorflow_BUILD_CC_EXAMPLE = ( ON | OFF ) ` . Defaults to ` ON ` . Generate <nl> + project files for a simple C + + <nl> + [ example training program ] ( https : / / github . com / tensorflow / tensorflow / blob / master / tensorflow / cc / tutorials / example_trainer . cc ) . <nl> <nl> - You can also build project * INSTALL * from Visual Studio solution . <nl> - It sounds not so strange and it works . <nl> + * ` - Dtensorflow_BUILD_PYTHON_BINDINGS = ( ON | OFF ) ` . Defaults to ` ON ` . Generate <nl> + project files for building a PIP package containing the TensorFlow runtime <nl> + and its Python bindings . <nl> <nl> - This will create the following folders under the * install * location : <nl> - * bin - that contains tensorflow binaries ; <nl> - * include - that contains C + + headers and TensorFlow * . proto files ; <nl> - * lib - that contains linking libraries and * CMake * configuration files for <nl> - * tensorflow * package . <nl> + * ` - Dtensorflow_ENABLE_GRPC_SUPPORT = ( ON | OFF ) ` . Defaults to ` ON ` . Include <nl> + gRPC support and the distributed client and server code in the TensorFlow <nl> + runtime . <nl> <nl> - Now you can if needed : <nl> - * Copy the contents of the include directory to wherever you want to put <nl> - headers . <nl> - * Copy binaries wherever you put build tools ( probably somewhere in your <nl> - PATH ) . <nl> - * Copy linking libraries libtensorflow [ d ] . lib wherever you put libraries . <nl> + * ` - Dtensorflow_ENABLE_SSL_SUPPORT = ( ON | OFF ) ` . Defaults to ` OFF ` . Include <nl> + SSL support ( for making secure HTTP requests ) in the TensorFlow runtime . <nl> + This support is incomplete , and will be used for Google Cloud Storage <nl> + support . <nl> <nl> - To avoid conflicts between the MSVC debug and release runtime libraries , when <nl> - compiling a debug build of your application , you may need to link against a <nl> - debug build of libtensorflowd . lib with " d " postfix . Similarly , release builds <nl> - should link against release libtensorflow . lib library . <nl> + 4 . Invoke MSBuild to build TensorFlow . <nl> <nl> - DLLs vs . static linking <nl> - = = = = = = = = = = = = = = = = = = = = = = = <nl> + To build the C + + example program , which will be created as a ` . exe ` <nl> + executable in the subdirectory ` . \ Release ` : <nl> <nl> - Static linking is now the default for the TensorFlow Buffer libraries . Due to <nl> - issues with Win32 ' s use of a separate heap for each DLL , as well as binary <nl> - compatibility issues between different versions of MSVC ' s STL library , it is <nl> - recommended that you use static linkage only . However , it is possible to <nl> - build libtensorflow as DLLs if you really want . To do this , do the following : <nl> + ` ` ` <nl> + D : \ . . . \ build > MSBuild / p : Configuration = Release tf_tutorials_example_trainer . vcxproj <nl> + D : \ . . . \ build > Release \ tf_tutorials_example_trainer . exe <nl> + ` ` ` <nl> <nl> - * Add an additional flag ` - Dtensorflow_BUILD_SHARED_LIBS = ON ` when invoking <nl> - cmake <nl> - * Follow the same steps as described in the above section . <nl> - * When compiling your project , make sure to ` # define TENSORFLOW_USE_DLLS ` . <nl> + To build the PIP package , which will be created as a ` . whl ` file in the <nl> + subdirectory ` . \ tf_python \ dist ` : <nl> + <nl> + ` ` ` <nl> + D : \ . . . \ build > MSBuild / p : Configuration = Release tf_python_build_pip_package . vcxproj <nl> + ` ` ` <nl> <nl> - When distributing your software to end users , we strongly recommend that you <nl> - do NOT install libtensorflow . dll to any shared location . <nl> - Instead , keep these libraries next to your binaries , in your application ' s <nl> - own install directory . C + + makes it very difficult to maintain binary <nl> - compatibility between releases , so it is likely that future versions of these <nl> - libraries will * not * be usable as drop - in replacements . <nl> <nl> - If your project is itself a DLL intended for use by third - party software , we <nl> - recommend that you do NOT expose TensorFlow objects in your library ' s <nl> - public interface , and that you statically link them into your library . <nl> - <nl> - Notes on Compiler Warnings <nl> - = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + Linux Continuous Integration build <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + This build requires [ Docker ] ( https : / / www . docker . com / ) to be installed on the <nl> + local machine . <nl> <nl> - The following warnings have been disabled while building the tensorflow <nl> - libraries and binaries . You may have to disable some of them in your own <nl> - project as well , or live with them . <nl> + ` ` ` bash <nl> + $ git clone - - recursive https : / / github . com / tensorflow / tensorflow . git <nl> + $ cd tensorflow <nl> + $ tensorflow / tools / ci_build / ci_build . sh CMAKE tensorflow / tools / ci_build / builds / cmake . sh <nl> + ` ` ` <nl> <nl> - * [ TODO ] <nl> + That ' s it . Dependencies included . <nl> mmm a / tensorflow / contrib / cmake / external / protobuf . cmake <nl> ppp b / tensorflow / contrib / cmake / external / protobuf . cmake <nl> <nl> include ( ExternalProject ) <nl> <nl> set ( PROTOBUF_INCLUDE_DIRS $ { CMAKE_CURRENT_BINARY_DIR } / protobuf / src / protobuf / src ) <nl> - set ( PROTOBUF_URL https : / / github . com / google / protobuf / releases / download / v3 . 1 . 0 / protobuf - cpp - 3 . 1 . 0 . zip ) <nl> - set ( PROTOBUF_HASH SHA256 = 0c18ccc99e921c407f359047f9b56cca196c3ab36eed79e5979df6c1f9e623b7 ) <nl> + set ( PROTOBUF_URL https : / / github . com / mrry / protobuf . git ) # Includes MSVC fix . <nl> + set ( PROTOBUF_TAG 1d2c7b6c7376f396c8c7dd9b6afd2d4f83f3cb05 ) <nl> <nl> if ( WIN32 ) <nl> set ( protobuf_STATIC_LIBRARIES $ { CMAKE_CURRENT_BINARY_DIR } / protobuf / src / protobuf / $ { CMAKE_BUILD_TYPE } / libprotobuf . lib ) <nl> set ( PROTOBUF_PROTOC_EXECUTABLE $ { CMAKE_CURRENT_BINARY_DIR } / protobuf / src / protobuf / $ { CMAKE_BUILD_TYPE } / protoc . exe ) <nl> + set ( PROTOBUF_ADDITIONAL_CMAKE_OPTIONS - Dprotobuf_MSVC_STATIC_RUNTIME : BOOL = OFF - A x64 ) <nl> else ( ) <nl> set ( protobuf_STATIC_LIBRARIES $ { CMAKE_CURRENT_BINARY_DIR } / protobuf / src / protobuf / libprotobuf . a ) <nl> set ( PROTOBUF_PROTOC_EXECUTABLE $ { CMAKE_CURRENT_BINARY_DIR } / protobuf / src / protobuf / protoc ) <nl> endif ( ) <nl> <nl> ExternalProject_Add ( protobuf <nl> PREFIX protobuf <nl> - URL $ { PROTOBUF_URL } <nl> - URL_HASH $ { PROTOBUF_HASH } <nl> + GIT_REPOSITORY $ { PROTOBUF_URL } <nl> + GIT_TAG $ { PROTOBUF_TAG } <nl> DOWNLOAD_DIR " $ { DOWNLOAD_LOCATION } " <nl> BUILD_IN_SOURCE 1 <nl> SOURCE_DIR $ { CMAKE_BINARY_DIR } / protobuf / src / protobuf <nl> - CONFIGURE_COMMAND $ { CMAKE_COMMAND } cmake / - Dprotobuf_BUILD_TESTS = OFF - DCMAKE_POSITION_INDEPENDENT_CODE = ON - Dprotobuf_MSVC_STATIC_RUNTIME : BOOL = OFF <nl> + CONFIGURE_COMMAND $ { CMAKE_COMMAND } cmake / <nl> + - Dprotobuf_BUILD_TESTS = OFF <nl> + - DCMAKE_POSITION_INDEPENDENT_CODE = ON <nl> + $ { PROTOBUF_ADDITIONAL_CMAKE_OPTIONS } <nl> INSTALL_COMMAND " " <nl> CMAKE_CACHE_ARGS <nl> - DCMAKE_BUILD_TYPE : STRING = Release <nl> - DCMAKE_VERBOSE_MAKEFILE : BOOL = OFF <nl> - - Dprotobuf_MSVC_STATIC_RUNTIME : BOOL = OFF <nl> - DCMAKE_POSITION_INDEPENDENT_CODE : BOOL = ON <nl> ) <nl> mmm a / tensorflow / contrib / cmake / setup . py <nl> ppp b / tensorflow / contrib / cmake / setup . py <nl> def find_files ( pattern , root ) : <nl> <nl> <nl> matches = [ ' . . / ' + x for x in find_files ( ' * ' , ' external ' ) if ' . py ' not in x ] <nl> + if os . name = = ' nt ' : <nl> + EXTENSION_NAME = ' python / _pywrap_tensorflow . pyd ' <nl> + else : <nl> + EXTENSION_NAME = ' python / _pywrap_tensorflow . so ' <nl> <nl> <nl> # TODO ( mrry ) : Add support for development headers . <nl> def find_files ( pattern , root ) : <nl> # Add in any packaged data . <nl> include_package_data = True , <nl> package_data = { <nl> - ' tensorflow ' : [ ' python / _pywrap_tensorflow . so ' , <nl> - ] + matches , <nl> + ' tensorflow ' : [ EXTENSION_NAME ] + matches , <nl> } , <nl> zip_safe = False , <nl> distclass = BinaryDistribution , <nl> mmm a / tensorflow / contrib / cmake / tf_core_framework . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_core_framework . cmake <nl> RELATIVE_PROTOBUF_TEXT_GENERATE_CPP ( PROTO_TEXT_SRCS PROTO_TEXT_HDRS <nl> ) <nl> <nl> add_library ( tf_protos_cc $ { PROTO_SRCS } $ { PROTO_HDRS } ) <nl> - target_link_libraries ( tf_protos_cc PUBLIC <nl> - $ { PROTOBUF_LIBRARIES } <nl> - ) <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # tf_core_lib library <nl> if ( UNIX ) <nl> list ( APPEND tf_core_lib_srcs $ { tf_core_platform_posix_srcs } ) <nl> endif ( UNIX ) <nl> <nl> + if ( WIN32 ) <nl> + file ( GLOB tf_core_platform_windows_srcs <nl> + " $ { tensorflow_source_dir } / tensorflow / core / platform / windows / * . h " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / platform / windows / * . cc " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / platform / posix / error . h " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / platform / posix / error . cc " <nl> + ) <nl> + list ( APPEND tf_core_lib_srcs $ { tf_core_platform_windows_srcs } ) <nl> + endif ( WIN32 ) <nl> + <nl> if ( tensorflow_ENABLE_SSL_SUPPORT ) <nl> # Cloud libraries require boringssl . <nl> file ( GLOB tf_core_platform_cloud_srcs <nl> file ( GLOB_RECURSE tf_core_lib_test_srcs <nl> ) <nl> list ( REMOVE_ITEM tf_core_lib_srcs $ { tf_core_lib_test_srcs } ) <nl> <nl> - if ( NOT tensorflow_ENABLE_SSL_SUPPORT ) <nl> - file ( GLOB_RECURSE tf_core_lib_cloud_srcs <nl> - " $ { tensorflow_source_dir } / tensorflow / core / platform / cloud / * . h " <nl> - " $ { tensorflow_source_dir } / tensorflow / core / platform / cloud / * . cc " <nl> - ) <nl> - list ( REMOVE_ITEM tf_core_lib_srcs $ { tf_core_lib_cloud_srcs } ) <nl> - endif ( ) <nl> - <nl> add_library ( tf_core_lib OBJECT $ { tf_core_lib_srcs } ) <nl> add_dependencies ( tf_core_lib $ { tensorflow_EXTERNAL_DEPENDENCIES } tf_protos_cc ) <nl> <nl> file ( GLOB_RECURSE tf_core_framework_test_srcs <nl> " $ { tensorflow_source_dir } / tensorflow / core / util / * main . cc " <nl> ) <nl> <nl> - list ( REMOVE_ITEM tf_core_framework_srcs $ { tf_core_framework_test_srcs } ) <nl> + list ( REMOVE_ITEM tf_core_framework_srcs $ { tf_core_framework_test_srcs } <nl> + " $ { tensorflow_source_dir } / tensorflow / core / util / memmapped_file_system . cc " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / util / memmapped_file_system . h " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / util / memmapped_file_system_writer . cc " <nl> + ) <nl> <nl> add_library ( tf_core_framework OBJECT <nl> $ { tf_core_framework_srcs } <nl> mmm a / tensorflow / contrib / cmake / tf_core_kernels . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_core_kernels . cmake <nl> file ( GLOB_RECURSE tf_core_kernels_exclude_srcs <nl> ) <nl> list ( REMOVE_ITEM tf_core_kernels_srcs $ { tf_core_kernels_exclude_srcs } ) <nl> <nl> + if ( WIN32 ) <nl> + file ( GLOB_RECURSE tf_core_kernels_windows_exclude_srcs <nl> + # Not currently working on Windows : <nl> + " $ { tensorflow_source_dir } / tensorflow / core / kernels / depthwise_conv_op . cc " # Cannot find symbol : tensorflow : : LaunchConv2DOp < struct Eigen : : ThreadPoolDevice , double > : : launch ( . . . ) . <nl> + " $ { tensorflow_source_dir } / tensorflow / core / kernels / fact_op . cc " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / kernels / immutable_constant_op . cc " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / kernels / immutable_constant_op . h " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / kernels / sparse_matmul_op . cc " <nl> + " $ { tensorflow_source_dir } / tensorflow / core / kernels / sparse_matmul_op . h " <nl> + ) <nl> + list ( REMOVE_ITEM tf_core_kernels_srcs $ { tf_core_kernels_windows_exclude_srcs } ) <nl> + endif ( WIN32 ) <nl> + <nl> add_library ( tf_core_kernels OBJECT $ { tf_core_kernels_srcs } ) <nl> <nl> + if ( WIN32 ) <nl> + target_compile_options ( tf_core_kernels PRIVATE / MP ) <nl> + endif ( ) <nl> + <nl> add_dependencies ( tf_core_kernels tf_core_cpu ) <nl> mmm a / tensorflow / contrib / cmake / tf_python . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_python . cmake <nl> add_library ( pywrap_tensorflow SHARED <nl> " $ { tensorflow_source_dir } / tensorflow / python / lib / io / py_record_reader . cc " <nl> " $ { tensorflow_source_dir } / tensorflow / python / lib / io / py_record_writer . h " <nl> " $ { tensorflow_source_dir } / tensorflow / python / lib / io / py_record_writer . cc " <nl> + " $ { tensorflow_source_dir } / tensorflow / python / util / kernel_registry . h " <nl> + " $ { tensorflow_source_dir } / tensorflow / python / util / kernel_registry . cc " <nl> " $ { tensorflow_source_dir } / tensorflow / c / c_api . cc " <nl> " $ { tensorflow_source_dir } / tensorflow / c / c_api . h " <nl> " $ { tensorflow_source_dir } / tensorflow / c / checkpoint_reader . cc " <nl> mmm a / tensorflow / contrib / makefile / Makefile <nl> ppp b / tensorflow / contrib / makefile / Makefile <nl> $ ( wildcard tensorflow / core / platform / google / * / * ) \ <nl> $ ( wildcard tensorflow / core / platform / jpeg . * ) \ <nl> $ ( wildcard tensorflow / core / platform / png . * ) \ <nl> $ ( wildcard tensorflow / core / platform / stream_executor . * ) \ <nl> + $ ( wildcard tensorflow / core / platform / windows / * ) \ <nl> $ ( wildcard tensorflow / core / user_ops / * . cu . cc ) \ <nl> $ ( wildcard tensorflow / core / common_runtime / gpu / * ) \ <nl> $ ( wildcard tensorflow / core / common_runtime / gpu_device_factory . * ) <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> filegroup ( <nl> " platform / png . * " , <nl> " platform / gif . * " , <nl> " platform / stream_executor . * " , <nl> + " platform / windows / * * / * " , <nl> " user_ops / * * / * . cu . cc " , <nl> " common_runtime / gpu / * * / * " , <nl> " common_runtime / gpu_device_factory . * " , <nl> mmm a / tensorflow / core / framework / numeric_types . h <nl> ppp b / tensorflow / core / framework / numeric_types . h <nl> EIGEN_STRONG_INLINE bool operator = = ( const tensorflow : : bfloat16 a , <nl> <nl> } / / namespace Eigen <nl> <nl> + # ifdef COMPILER_MSVC <nl> + namespace std { <nl> + template < > <nl> + struct hash < Eigen : : half > { <nl> + std : : size_t operator ( ) ( const Eigen : : half & a ) const { <nl> + return static_cast < std : : size_t > ( a . x ) ; <nl> + } <nl> + } ; <nl> + } / / namespace std <nl> + # endif / / COMPILER_MSVC <nl> + <nl> # endif / / TENSORFLOW_FRAMEWORK_NUMERIC_TYPES_H_ <nl> mmm a / tensorflow / core / lib / random / random_distributions . h <nl> ppp b / tensorflow / core / lib / random / random_distributions . h <nl> limitations under the License . <nl> <nl> # define _USE_MATH_DEFINES <nl> # include < cmath > <nl> + # include < math . h > <nl> # undef _USE_MATH_DEFINES <nl> <nl> - # include < math . h > <nl> # include < string . h > <nl> # include < algorithm > <nl> <nl> mmm a / tensorflow / core / platform / default / logging . h <nl> ppp b / tensorflow / core / platform / default / logging . h <nl> limitations under the License . <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> + / / TODO ( mrry ) : Prevent this Windows . h # define from leaking out of our headers . <nl> + # undef ERROR <nl> + <nl> namespace tensorflow { <nl> const int INFO = 0 ; / / base_logging : : INFO ; <nl> const int WARNING = 1 ; / / base_logging : : WARNING ; <nl> mmm a / tensorflow / core / platform / dynamic_annotations . h <nl> ppp b / tensorflow / core / platform / dynamic_annotations . h <nl> limitations under the License . <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / build_config / dynamic_annotations . h " <nl> # elif defined ( PLATFORM_POSIX ) | | defined ( PLATFORM_POSIX_ANDROID ) | | \ <nl> - defined ( PLATFORM_GOOGLE_ANDROID ) <nl> + defined ( PLATFORM_GOOGLE_ANDROID ) | | defined ( PLATFORM_WINDOWS ) <nl> # include " tensorflow / core / platform / default / dynamic_annotations . h " <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> mmm a / tensorflow / core / platform / file_system . h <nl> ppp b / tensorflow / core / platform / file_system . h <nl> limitations under the License . <nl> # include " tensorflow / core / lib / core / stringpiece . h " <nl> # include " tensorflow / core / platform / file_statistics . h " <nl> # include " tensorflow / core / platform / macros . h " <nl> + # include " tensorflow / core / platform / platform . h " <nl> # include " tensorflow / core / platform / protobuf . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> + # ifdef PLATFORM_WINDOWS <nl> + # undef DeleteFile <nl> + # endif <nl> + <nl> namespace tensorflow { <nl> <nl> class RandomAccessFile ; <nl> mmm a / tensorflow / core / platform / gif . h <nl> ppp b / tensorflow / core / platform / gif . h <nl> limitations under the License . <nl> <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / build_config / gif . h " <nl> - # elif defined ( PLATFORM_POSIX ) & & ! defined ( IS_MOBILE_PLATFORM ) <nl> + # elif ( defined ( PLATFORM_POSIX ) & & ! defined ( IS_MOBILE_PLATFORM ) ) | | defined ( PLATFORM_WINDOWS ) <nl> # include < gif_lib . h > <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> mmm a / tensorflow / core / platform / jpeg . h <nl> ppp b / tensorflow / core / platform / jpeg . h <nl> limitations under the License . <nl> <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / build_config / jpeg . h " <nl> - # elif defined ( PLATFORM_POSIX ) & & ! defined ( IS_MOBILE_PLATFORM ) <nl> + # elif ( defined ( PLATFORM_POSIX ) & & ! defined ( IS_MOBILE_PLATFORM ) ) | | defined ( PLATFORM_WINDOWS ) <nl> # include < stddef . h > <nl> # include < stdio . h > <nl> # include < stdlib . h > <nl> mmm a / tensorflow / core / platform / macros . h <nl> ppp b / tensorflow / core / platform / macros . h <nl> limitations under the License . <nl> __attribute__ ( ( __format__ ( __printf__ , string_index , first_to_check ) ) ) <nl> # define TF_SCANF_ATTRIBUTE ( string_index , first_to_check ) \ <nl> __attribute__ ( ( __format__ ( __scanf__ , string_index , first_to_check ) ) ) <nl> - <nl> + # elif defined ( COMPILER_MSVC ) <nl> + / / Non - GCC equivalents <nl> + # define TF_ATTRIBUTE_NORETURN __declspec ( noreturn ) <nl> + # define TF_ATTRIBUTE_NOINLINE <nl> + # define TF_ATTRIBUTE_UNUSED <nl> + # define TF_ATTRIBUTE_COLD <nl> + # define TF_MUST_USE_RESULT <nl> + # define TF_PACKED <nl> + # define TF_PRINTF_ATTRIBUTE ( string_index , first_to_check ) <nl> + # define TF_SCANF_ATTRIBUTE ( string_index , first_to_check ) <nl> # else <nl> / / Non - GCC equivalents <nl> # define TF_ATTRIBUTE_NORETURN <nl> mmm a / tensorflow / core / platform / mutex . h <nl> ppp b / tensorflow / core / platform / mutex . h <nl> enum ConditionResult { kCond_Timeout , kCond_MaybeNotified } ; <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / mutex . h " <nl> # elif defined ( PLATFORM_POSIX ) | | defined ( PLATFORM_POSIX_ANDROID ) | | \ <nl> - defined ( PLATFORM_GOOGLE_ANDROID ) <nl> + defined ( PLATFORM_GOOGLE_ANDROID ) | | defined ( PLATFORM_WINDOWS ) <nl> # include " tensorflow / core / platform / default / mutex . h " <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> mmm a / tensorflow / core / platform / notification . h <nl> ppp b / tensorflow / core / platform / notification . h <nl> limitations under the License . <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / notification . h " <nl> # elif defined ( PLATFORM_POSIX ) | | defined ( PLATFORM_POSIX_ANDROID ) | | \ <nl> - defined ( PLATFORM_GOOGLE_ANDROID ) <nl> + defined ( PLATFORM_GOOGLE_ANDROID ) | | defined ( PLATFORM_WINDOWS ) <nl> # include " tensorflow / core / platform / default / notification . h " <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> mmm a / tensorflow / core / platform / platform . h <nl> ppp b / tensorflow / core / platform / platform . h <nl> limitations under the License . <nl> <nl> # elif defined ( __APPLE__ ) <nl> # define PLATFORM_POSIX <nl> - <nl> # include " TargetConditionals . h " <nl> # if TARGET_IPHONE_SIMULATOR <nl> # define IS_MOBILE_PLATFORM <nl> limitations under the License . <nl> # define IS_MOBILE_PLATFORM <nl> # endif <nl> <nl> + # elif defined ( _WIN32 ) <nl> + # define PLATFORM_WINDOWS <nl> + <nl> # elif defined ( __arm__ ) <nl> # define PLATFORM_POSIX <nl> <nl> mmm a / tensorflow / core / platform / png . h <nl> ppp b / tensorflow / core / platform / png . h <nl> limitations under the License . <nl> <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / build_config / png . h " <nl> - # elif defined ( PLATFORM_POSIX ) & & ! defined ( IS_MOBILE_PLATFORM ) <nl> + # elif ( defined ( PLATFORM_POSIX ) & & ! defined ( IS_MOBILE_PLATFORM ) ) | | defined ( PLATFORM_WINDOWS ) <nl> # include < png . h > <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> mmm a / tensorflow / core / platform / posix / error . cc <nl> ppp b / tensorflow / core / platform / posix / error . cc <nl> error : : Code ErrnoToCode ( int err_number ) { <nl> case EBUSY : / / Device or resource busy <nl> case ECHILD : / / No child processes <nl> case EISCONN : / / Socket is connected <nl> + # if ! defined ( _WIN32 ) <nl> case ENOTBLK : / / Block device required <nl> + # endif <nl> case ENOTCONN : / / The socket is not connected <nl> case EPIPE : / / Broken pipe <nl> + # if ! defined ( _WIN32 ) <nl> case ESHUTDOWN : / / Cannot send after transport endpoint shutdown <nl> + # endif <nl> case ETXTBSY : / / Text file busy <nl> code = error : : FAILED_PRECONDITION ; <nl> break ; <nl> case ENOSPC : / / No space left on device <nl> + # if ! defined ( _WIN32 ) <nl> case EDQUOT : / / Disk quota exceeded <nl> + # endif <nl> case EMFILE : / / Too many open files <nl> case EMLINK : / / Too many links <nl> case ENFILE : / / Too many open files in system <nl> error : : Code ErrnoToCode ( int err_number ) { <nl> case ENODATA : / / No message is available on the STREAM read queue <nl> case ENOMEM : / / Not enough space <nl> case ENOSR : / / No STREAM resources <nl> + # if ! defined ( _WIN32 ) <nl> case EUSERS : / / Too many users <nl> + # endif <nl> code = error : : RESOURCE_EXHAUSTED ; <nl> break ; <nl> case EFBIG : / / File too large <nl> error : : Code ErrnoToCode ( int err_number ) { <nl> case ENOSYS : / / Function not implemented <nl> case ENOTSUP : / / Operation not supported <nl> case EAFNOSUPPORT : / / Address family not supported <nl> + # if ! defined ( _WIN32 ) <nl> case EPFNOSUPPORT : / / Protocol family not supported <nl> + # endif <nl> case EPROTONOSUPPORT : / / Protocol not supported <nl> + # if ! defined ( _WIN32 ) <nl> case ESOCKTNOSUPPORT : / / Socket type not supported <nl> + # endif <nl> case EXDEV : / / Improper link <nl> code = error : : UNIMPLEMENTED ; <nl> break ; <nl> error : : Code ErrnoToCode ( int err_number ) { <nl> case ECONNABORTED : / / Connection aborted <nl> case ECONNRESET : / / Connection reset <nl> case EINTR : / / Interrupted function call <nl> + # if ! defined ( _WIN32 ) <nl> case EHOSTDOWN : / / Host is down <nl> + # endif <nl> case EHOSTUNREACH : / / Host is unreachable <nl> case ENETDOWN : / / Network is down <nl> case ENETRESET : / / Connection aborted by network <nl> case ENETUNREACH : / / Network unreachable <nl> case ENOLCK : / / No locks available <nl> case ENOLINK : / / Link has been severed <nl> - # if ! defined ( __APPLE__ ) <nl> + # if ! ( defined ( __APPLE__ ) | | defined ( _WIN32 ) ) <nl> case ENONET : / / Machine is not on the network <nl> # endif <nl> code = error : : UNAVAILABLE ; <nl> break ; <nl> case EDEADLK : / / Resource deadlock avoided <nl> + # if ! defined ( _WIN32 ) <nl> case ESTALE : / / Stale file handle <nl> + # endif <nl> code = error : : ABORTED ; <nl> break ; <nl> case ECANCELED : / / Operation cancelled <nl> error : : Code ErrnoToCode ( int err_number ) { <nl> case ENOEXEC : / / Exec format error <nl> case ENOMSG : / / No message of the desired type <nl> case EPROTO : / / Protocol error <nl> + # if ! defined ( _WIN32 ) <nl> case EREMOTE : / / Object is remote <nl> + # endif <nl> code = error : : UNKNOWN ; <nl> break ; <nl> default : { <nl> mmm a / tensorflow / core / platform / thread_annotations . h <nl> ppp b / tensorflow / core / platform / thread_annotations . h <nl> limitations under the License . <nl> # if defined ( PLATFORM_GOOGLE ) <nl> # include " tensorflow / core / platform / google / build_config / thread_annotations . h " <nl> # elif defined ( PLATFORM_POSIX ) | | defined ( PLATFORM_POSIX_ANDROID ) | | \ <nl> - defined ( PLATFORM_GOOGLE_ANDROID ) <nl> + defined ( PLATFORM_GOOGLE_ANDROID ) | | defined ( PLATFORM_WINDOWS ) <nl> # include " tensorflow / core / platform / default / thread_annotations . h " <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> mmm a / tensorflow / core / platform / types . h <nl> ppp b / tensorflow / core / platform / types . h <nl> limitations under the License . <nl> # if defined ( PLATFORM_GOOGLE ) | | defined ( GOOGLE_INTEGRAL_TYPES ) <nl> # include " tensorflow / core / platform / google / integral_types . h " <nl> # elif defined ( PLATFORM_POSIX ) | | defined ( PLATFORM_POSIX_ANDROID ) | | \ <nl> - defined ( PLATFORM_GOOGLE_ANDROID ) <nl> + defined ( PLATFORM_GOOGLE_ANDROID ) | | defined ( PLATFORM_WINDOWS ) <nl> # include " tensorflow / core / platform / default / integral_types . h " <nl> # else <nl> # error Define the appropriate PLATFORM_ < foo > macro for this platform <nl> new file mode 100644 <nl> index 0000000000000 . . a2182a831cb99 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / platform / windows / env . cc <nl> <nl> + / * Copyright 2015 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / platform / env . h " <nl> + <nl> + # include < Shlwapi . h > <nl> + # include < Windows . h > <nl> + # include < errno . h > <nl> + # include < fcntl . h > <nl> + # include < stdio . h > <nl> + # include < time . h > <nl> + # undef LoadLibrary <nl> + # undef ERROR <nl> + <nl> + # include < thread > <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / core / lib / core / error_codes . pb . h " <nl> + # include " tensorflow / core / platform / load_library . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / windows / windows_file_system . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + namespace { <nl> + <nl> + class StdThread : public Thread { <nl> + public : <nl> + / / name and thread_options are both ignored . <nl> + StdThread ( const ThreadOptions & thread_options , const string & name , <nl> + std : : function < void ( ) > fn ) <nl> + : thread_ ( fn ) { } <nl> + ~ StdThread ( ) { thread_ . join ( ) ; } <nl> + <nl> + private : <nl> + std : : thread thread_ ; <nl> + } ; <nl> + <nl> + class WindowsEnv : public Env { <nl> + public : <nl> + WindowsEnv ( ) { } <nl> + ~ WindowsEnv ( ) override { <nl> + LOG ( FATAL ) < < " Env : : Default ( ) must not be destroyed " ; <nl> + } <nl> + <nl> + bool MatchPath ( const string & path , const string & pattern ) override { <nl> + return PathMatchSpec ( path . c_str ( ) , pattern . c_str ( ) ) = = S_OK ; <nl> + } <nl> + <nl> + uint64 NowMicros ( ) override { <nl> + FILETIME temp ; <nl> + GetSystemTimeAsFileTime ( & temp ) ; <nl> + uint64 now_ticks = <nl> + ( uint64 ) temp . dwLowDateTime + ( ( uint64 ) ( temp . dwHighDateTime ) < < 32LL ) ; <nl> + return now_ticks / 10LL ; <nl> + } <nl> + <nl> + void SleepForMicroseconds ( int64 micros ) override { Sleep ( micros / 1000 ) ; } <nl> + <nl> + Thread * StartThread ( const ThreadOptions & thread_options , const string & name , <nl> + std : : function < void ( ) > fn ) override { <nl> + return new StdThread ( thread_options , name , fn ) ; <nl> + } <nl> + <nl> + void SchedClosure ( std : : function < void ( ) > closure ) override { <nl> + / / TODO ( b / 27290852 ) : Spawning a new thread here is wasteful , but <nl> + / / needed to deal with the fact that many ` closure ` functions are <nl> + / / blocking in the current codebase . <nl> + std : : thread closure_thread ( closure ) ; <nl> + closure_thread . detach ( ) ; <nl> + } <nl> + <nl> + void SchedClosureAfter ( int64 micros , std : : function < void ( ) > closure ) override { <nl> + / / TODO ( b / 27290852 ) : Consuming a thread here is wasteful , but this <nl> + / / code is ( currently ) only used in the case where a step fails <nl> + / / ( AbortStep ) . This could be replaced by a timer thread <nl> + SchedClosure ( [ this , micros , closure ] ( ) { <nl> + SleepForMicroseconds ( micros ) ; <nl> + closure ( ) ; <nl> + } ) ; <nl> + } <nl> + <nl> + Status LoadLibrary ( const char * library_filename , void * * handle ) override { <nl> + return errors : : Unimplemented ( " WindowsEnv : : LoadLibrary " ) ; <nl> + } <nl> + <nl> + Status GetSymbolFromLibrary ( void * handle , const char * symbol_name , <nl> + void * * symbol ) override { <nl> + return errors : : Unimplemented ( " WindowsEnv : : GetSymbolFromLibrary " ) ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> + REGISTER_FILE_SYSTEM ( " " , WindowsFileSystem ) ; <nl> + Env * Env : : Default ( ) { <nl> + static Env * default_env = new WindowsEnv ; <nl> + return default_env ; <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . fbc0c39c9cb60 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / platform / windows / net . cc <nl> <nl> + / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / core / platform / net . h " <nl> + <nl> + # include < cerrno > <nl> + # include < cstdlib > <nl> + # include < unordered_set > <nl> + <nl> + # include < sys / types . h > <nl> + # include < winsock . h > <nl> + <nl> + # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + <nl> + # undef ERROR <nl> + <nl> + namespace tensorflow { <nl> + namespace internal { <nl> + <nl> + namespace { <nl> + bool IsPortAvailable ( int * port , bool is_tcp ) { <nl> + const int protocol = is_tcp ? IPPROTO_TCP : 0 ; <nl> + const int fd = socket ( AF_INET , is_tcp ? SOCK_STREAM : SOCK_DGRAM , protocol ) ; <nl> + <nl> + struct sockaddr_in addr ; <nl> + int addr_len = static_cast < int > ( sizeof ( addr ) ) ; <nl> + int actual_port ; <nl> + <nl> + CHECK_GE ( * port , 0 ) ; <nl> + CHECK_LE ( * port , 65535 ) ; <nl> + if ( fd < 0 ) { <nl> + LOG ( ERROR ) < < " socket ( ) failed : " < < strerror ( errno ) ; <nl> + return false ; <nl> + } <nl> + <nl> + / / SO_REUSEADDR lets us start up a server immediately after it exists . <nl> + int one = 1 ; <nl> + if ( setsockopt ( fd , SOL_SOCKET , SO_REUSEADDR , ( const char * ) & one , sizeof ( one ) ) < <nl> + 0 ) { <nl> + LOG ( ERROR ) < < " setsockopt ( ) failed : " < < strerror ( errno ) ; <nl> + closesocket ( fd ) ; <nl> + return false ; <nl> + } <nl> + <nl> + / / Try binding to port . <nl> + addr . sin_family = AF_INET ; <nl> + addr . sin_addr . s_addr = INADDR_ANY ; <nl> + addr . sin_port = htons ( ( uint16_t ) * port ) ; <nl> + if ( bind ( fd , ( struct sockaddr * ) & addr , sizeof ( addr ) ) < 0 ) { <nl> + LOG ( WARNING ) < < " bind ( port = " < < * port < < " ) failed : " < < strerror ( errno ) ; <nl> + closesocket ( fd ) ; <nl> + return false ; <nl> + } <nl> + <nl> + / / Get the bound port number . <nl> + if ( getsockname ( fd , ( struct sockaddr * ) & addr , & addr_len ) < 0 ) { <nl> + LOG ( WARNING ) < < " getsockname ( ) failed : " < < strerror ( errno ) ; <nl> + closesocket ( fd ) ; <nl> + return false ; <nl> + } <nl> + CHECK_LE ( addr_len , sizeof ( addr ) ) ; <nl> + actual_port = ntohs ( addr . sin_port ) ; <nl> + CHECK_GT ( actual_port , 0 ) ; <nl> + if ( * port = = 0 ) { <nl> + * port = actual_port ; <nl> + } else { <nl> + CHECK_EQ ( * port , actual_port ) ; <nl> + } <nl> + closesocket ( fd ) ; <nl> + return true ; <nl> + } <nl> + <nl> + const int kNumRandomPortsToPick = 100 ; <nl> + const int kMaximumTrials = 1000 ; <nl> + <nl> + } / / namespace <nl> + <nl> + int PickUnusedPortOrDie ( ) { <nl> + static std : : unordered_set < int > chosen_ports ; <nl> + <nl> + / / Type of port to first pick in the next iteration . <nl> + bool is_tcp = true ; <nl> + int trial = 0 ; <nl> + while ( true ) { <nl> + int port ; <nl> + trial + + ; <nl> + CHECK_LE ( trial , kMaximumTrials ) <nl> + < < " Failed to pick an unused port for testing . " ; <nl> + if ( trial = = 1 ) { <nl> + port = GetCurrentProcessId ( ) % ( 65536 - 30000 ) + 30000 ; <nl> + } else if ( trial < = kNumRandomPortsToPick ) { <nl> + port = rand ( ) % ( 65536 - 30000 ) + 30000 ; <nl> + } else { <nl> + port = 0 ; <nl> + } <nl> + <nl> + if ( chosen_ports . find ( port ) ! = chosen_ports . end ( ) ) { <nl> + continue ; <nl> + } <nl> + if ( ! IsPortAvailable ( & port , is_tcp ) ) { <nl> + continue ; <nl> + } <nl> + <nl> + CHECK_GT ( port , 0 ) ; <nl> + if ( ! IsPortAvailable ( & port , ! is_tcp ) ) { <nl> + is_tcp = ! is_tcp ; <nl> + continue ; <nl> + } <nl> + <nl> + chosen_ports . insert ( port ) ; <nl> + return port ; <nl> + } <nl> + <nl> + return 0 ; <nl> + } <nl> + <nl> + } / / namespace internal <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . b08c1cf9f4ebd <nl> mmm / dev / null <nl> ppp b / tensorflow / core / platform / windows / port . cc <nl> <nl> + / * Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < stdio . h > <nl> + # include < stdlib . h > <nl> + # include < string . h > <nl> + # ifdef SNAPPY <nl> + # include < snappy . h > <nl> + # endif <nl> + # include < WinSock2 . h > <nl> + <nl> + # include " tensorflow / core / platform / cpu_info . h " <nl> + # include " tensorflow / core / platform / demangle . h " <nl> + # include " tensorflow / core / platform / host_info . h " <nl> + # include " tensorflow / core / platform / init_main . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / mem . h " <nl> + # include " tensorflow / core / platform / snappy . h " <nl> + # include " tensorflow / core / platform / types . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace port { <nl> + <nl> + void InitMain ( const char * usage , int * argc , char * * * argv ) { } <nl> + <nl> + string Hostname ( ) { <nl> + char hostname [ 1024 ] ; <nl> + gethostname ( hostname , sizeof hostname ) ; <nl> + hostname [ sizeof hostname - 1 ] = 0 ; <nl> + return string ( hostname ) ; <nl> + } <nl> + <nl> + int NumSchedulableCPUs ( ) { <nl> + SYSTEM_INFO system_info ; <nl> + GetSystemInfo ( & system_info ) ; <nl> + return system_info . dwNumberOfProcessors ; <nl> + } <nl> + <nl> + void * aligned_malloc ( size_t size , int minimum_alignment ) { <nl> + return _aligned_malloc ( size , minimum_alignment ) ; <nl> + } <nl> + <nl> + void aligned_free ( void * aligned_memory ) { _aligned_free ( aligned_memory ) ; } <nl> + <nl> + void MallocExtension_ReleaseToSystem ( std : : size_t num_bytes ) { <nl> + / / No - op . <nl> + } <nl> + <nl> + std : : size_t MallocExtension_GetAllocatedSize ( const void * p ) { return 0 ; } <nl> + <nl> + void AdjustFilenameForLogging ( string * filename ) { <nl> + / / Nothing to do <nl> + } <nl> + <nl> + bool Snappy_Compress ( const char * input , size_t length , string * output ) { <nl> + # ifdef SNAPPY <nl> + output - > resize ( snappy : : MaxCompressedLength ( length ) ) ; <nl> + size_t outlen ; <nl> + snappy : : RawCompress ( input , length , & ( * output ) [ 0 ] , & outlen ) ; <nl> + output - > resize ( outlen ) ; <nl> + return true ; <nl> + # else <nl> + return false ; <nl> + # endif <nl> + } <nl> + <nl> + bool Snappy_GetUncompressedLength ( const char * input , size_t length , <nl> + size_t * result ) { <nl> + # ifdef SNAPPY <nl> + return snappy : : GetUncompressedLength ( input , length , result ) ; <nl> + # else <nl> + return false ; <nl> + # endif <nl> + } <nl> + <nl> + bool Snappy_Uncompress ( const char * input , size_t length , char * output ) { <nl> + # ifdef SNAPPY <nl> + return snappy : : RawUncompress ( input , length , output ) ; <nl> + # else <nl> + return false ; <nl> + # endif <nl> + } <nl> + <nl> + string Demangle ( const char * mangled ) { return mangled ; } <nl> + <nl> + } / / namespace port <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 44b26d94e86a0 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / platform / windows / windows_file_system . cc <nl> <nl> + / * Copyright 2015 Google Inc . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include < Windows . h > <nl> + # include < direct . h > <nl> + # include < errno . h > <nl> + # include < fcntl . h > <nl> + # include < io . h > <nl> + # include < Shlwapi . h > <nl> + # undef StrCat <nl> + # include < stdio . h > <nl> + # include < sys / stat . h > <nl> + # include < sys / types . h > <nl> + # include < time . h > <nl> + <nl> + # include " tensorflow / core / lib / core / error_codes . pb . h " <nl> + # include " tensorflow / core / lib / strings / strcat . h " <nl> + # include " tensorflow / core / platform / env . h " <nl> + # include " tensorflow / core / platform / logging . h " <nl> + # include " tensorflow / core / platform / posix / error . h " <nl> + # include " tensorflow / core / platform / windows / windows_file_system . h " <nl> + <nl> + / / TODO ( mrry ) : Prevent this Windows . h # define from leaking out of our headers . <nl> + # undef DeleteFile <nl> + <nl> + namespace tensorflow { <nl> + <nl> + namespace { <nl> + <nl> + / / read ( ) based random - access <nl> + class WindowsRandomAccessFile : public RandomAccessFile { <nl> + private : <nl> + string filename_ ; <nl> + FILE * file_ ; <nl> + <nl> + public : <nl> + WindowsRandomAccessFile ( const string & fname , FILE * f ) <nl> + : filename_ ( fname ) , file_ ( f ) { } <nl> + ~ WindowsRandomAccessFile ( ) override { <nl> + if ( file_ ! = NULL ) { <nl> + / / Ignoring any potential errors <nl> + fclose ( file_ ) ; <nl> + } <nl> + } <nl> + <nl> + Status Read ( uint64 offset , size_t n , StringPiece * result , <nl> + char * scratch ) const override { <nl> + Status s ; <nl> + char * dst = scratch ; <nl> + int seek_result = fseek ( file_ , offset , SEEK_SET ) ; <nl> + if ( seek_result ) { <nl> + return IOError ( filename_ , errno ) ; <nl> + } <nl> + while ( n > 0 & & s . ok ( ) ) { <nl> + size_t r = fread ( dst , 1 , n , file_ ) ; <nl> + if ( r > 0 ) { <nl> + dst + = r ; <nl> + n - = r ; <nl> + } else if ( r = = 0 ) { <nl> + s = Status ( error : : OUT_OF_RANGE , " Read fewer bytes than requested " ) ; <nl> + } else if ( errno = = EINTR | | errno = = EAGAIN ) { <nl> + / / Retry <nl> + } else { <nl> + s = IOError ( filename_ , errno ) ; <nl> + } <nl> + } <nl> + * result = StringPiece ( scratch , dst - scratch ) ; <nl> + return s ; <nl> + } <nl> + } ; <nl> + <nl> + class WindowsWritableFile : public WritableFile { <nl> + private : <nl> + string filename_ ; <nl> + FILE * file_ ; <nl> + <nl> + public : <nl> + WindowsWritableFile ( const string & fname , FILE * f ) <nl> + : filename_ ( fname ) , file_ ( f ) { } <nl> + <nl> + ~ WindowsWritableFile ( ) override { <nl> + if ( file_ ! = NULL ) { <nl> + / / Ignoring any potential errors <nl> + fclose ( file_ ) ; <nl> + } <nl> + } <nl> + <nl> + Status Append ( const StringPiece & data ) override { <nl> + size_t r = fwrite ( data . data ( ) , 1 , data . size ( ) , file_ ) ; <nl> + if ( r ! = data . size ( ) ) { <nl> + return IOError ( filename_ , errno ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Close ( ) override { <nl> + Status result ; <nl> + if ( fclose ( file_ ) ! = 0 ) { <nl> + result = IOError ( filename_ , errno ) ; <nl> + } <nl> + file_ = NULL ; <nl> + return result ; <nl> + } <nl> + <nl> + Status Flush ( ) override { <nl> + if ( fflush ( file_ ) ! = 0 ) { <nl> + return IOError ( filename_ , errno ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Sync ( ) override { <nl> + Status s ; <nl> + if ( fflush ( file_ ) ! = 0 ) { <nl> + s = IOError ( filename_ , errno ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + } ; <nl> + <nl> + } / / namespace <nl> + <nl> + Status WindowsFileSystem : : NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) { <nl> + string translated_fname = TranslateName ( fname ) ; <nl> + result - > reset ( ) ; <nl> + Status s ; <nl> + FILE * f = fopen ( translated_fname . c_str ( ) , " r " ) ; <nl> + if ( f = = NULL ) { <nl> + s = IOError ( fname , errno ) ; <nl> + } else { <nl> + result - > reset ( new WindowsRandomAccessFile ( translated_fname , f ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : NewWritableFile ( <nl> + const string & fname , std : : unique_ptr < WritableFile > * result ) { <nl> + string translated_fname = TranslateName ( fname ) ; <nl> + Status s ; <nl> + FILE * f = fopen ( translated_fname . c_str ( ) , " w " ) ; <nl> + if ( f = = NULL ) { <nl> + result - > reset ( ) ; <nl> + s = IOError ( fname , errno ) ; <nl> + } else { <nl> + result - > reset ( new WindowsWritableFile ( translated_fname , f ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : NewAppendableFile ( <nl> + const string & fname , std : : unique_ptr < WritableFile > * result ) { <nl> + string translated_fname = TranslateName ( fname ) ; <nl> + Status s ; <nl> + FILE * f = fopen ( translated_fname . c_str ( ) , " a " ) ; <nl> + if ( f = = NULL ) { <nl> + result - > reset ( ) ; <nl> + s = IOError ( fname , errno ) ; <nl> + } else { <nl> + result - > reset ( new WindowsWritableFile ( translated_fname , f ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : NewReadOnlyMemoryRegionFromFile ( <nl> + const string & fname , std : : unique_ptr < ReadOnlyMemoryRegion > * result ) { <nl> + return errors : : Unimplemented ( <nl> + " WindowsFileSystem : : NewReadOnlyMemoryRegionFromFile " ) ; <nl> + } <nl> + <nl> + bool WindowsFileSystem : : FileExists ( const string & fname ) { <nl> + return _access ( TranslateName ( fname ) . c_str ( ) , 0 ) = = 0 ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : GetChildren ( const string & dir , <nl> + std : : vector < string > * result ) { <nl> + string translated_dir = TranslateName ( dir ) ; <nl> + result - > clear ( ) ; <nl> + <nl> + WIN32_FIND_DATA find_data ; <nl> + HANDLE find_handle = FindFirstFile ( translated_dir . c_str ( ) , & find_data ) ; <nl> + if ( find_handle = = INVALID_HANDLE_VALUE ) { <nl> + / / TODO ( mrry ) : Convert to a more specific error . <nl> + return errors : : Unknown ( " Error code : " , GetLastError ( ) ) ; <nl> + } <nl> + result - > push_back ( find_data . cFileName ) ; <nl> + while ( FindNextFile ( find_handle , & find_data ) ) { <nl> + result - > push_back ( find_data . cFileName ) ; <nl> + } <nl> + if ( ! FindClose ( find_handle ) ) { <nl> + / / TODO ( mrry ) : Convert to a more specific error . <nl> + return errors : : Unknown ( " Error closing find handle : " , GetLastError ( ) ) ; <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : DeleteFile ( const string & fname ) { <nl> + Status result ; <nl> + if ( unlink ( TranslateName ( fname ) . c_str ( ) ) ! = 0 ) { <nl> + result = IOError ( fname , errno ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : CreateDir ( const string & name ) { <nl> + Status result ; <nl> + if ( _mkdir ( TranslateName ( name ) . c_str ( ) ) ! = 0 ) { <nl> + result = IOError ( name , errno ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : DeleteDir ( const string & name ) { <nl> + Status result ; <nl> + if ( _rmdir ( TranslateName ( name ) . c_str ( ) ) ! = 0 ) { <nl> + result = IOError ( name , errno ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : GetFileSize ( const string & fname , uint64 * size ) { <nl> + Status s ; <nl> + struct _stat sbuf ; <nl> + if ( _stat ( TranslateName ( fname ) . c_str ( ) , & sbuf ) ! = 0 ) { <nl> + * size = 0 ; <nl> + s = IOError ( fname , errno ) ; <nl> + } else { <nl> + * size = sbuf . st_size ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : RenameFile ( const string & src , const string & target ) { <nl> + Status result ; <nl> + if ( rename ( TranslateName ( src ) . c_str ( ) , TranslateName ( target ) . c_str ( ) ) ! = 0 ) { <nl> + result = IOError ( src , errno ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + Status WindowsFileSystem : : Stat ( const string & fname , FileStatistics * stat ) { <nl> + Status s ; <nl> + struct _stat sbuf ; <nl> + if ( _stat ( TranslateName ( fname ) . c_str ( ) , & sbuf ) ! = 0 ) { <nl> + s = IOError ( fname , errno ) ; <nl> + } else { <nl> + stat - > mtime_nsec = sbuf . st_mtime * 1e9 ; <nl> + stat - > length = sbuf . st_size ; <nl> + stat - > is_directory = PathIsDirectory ( TranslateName ( fname ) . c_str ( ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 0000000000000 . . 68b391fb108a7 <nl> mmm / dev / null <nl> ppp b / tensorflow / core / platform / windows / windows_file_system . h <nl> <nl> + / * Copyright 2015 Google Inc . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_ <nl> + # define TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_ <nl> + <nl> + # include " tensorflow / core / platform / file_system . h " <nl> + <nl> + # ifdef PLATFORM_WINDOWS <nl> + # undef DeleteFile <nl> + # endif <nl> + <nl> + namespace tensorflow { <nl> + <nl> + class WindowsFileSystem : public FileSystem { <nl> + public : <nl> + WindowsFileSystem ( ) { } <nl> + <nl> + ~ WindowsFileSystem ( ) { } <nl> + <nl> + Status NewRandomAccessFile ( <nl> + const string & fname , std : : unique_ptr < RandomAccessFile > * result ) override ; <nl> + <nl> + Status NewWritableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> + <nl> + Status NewAppendableFile ( const string & fname , <nl> + std : : unique_ptr < WritableFile > * result ) override ; <nl> + <nl> + Status NewReadOnlyMemoryRegionFromFile ( <nl> + const string & fname , <nl> + std : : unique_ptr < ReadOnlyMemoryRegion > * result ) override ; <nl> + <nl> + bool FileExists ( const string & fname ) override ; <nl> + <nl> + Status GetChildren ( const string & dir , std : : vector < string > * result ) override ; <nl> + <nl> + Status Stat ( const string & fname , FileStatistics * stat ) override ; <nl> + <nl> + Status DeleteFile ( const string & fname ) override ; <nl> + <nl> + Status CreateDir ( const string & name ) override ; <nl> + <nl> + Status DeleteDir ( const string & name ) override ; <nl> + <nl> + Status GetFileSize ( const string & fname , uint64 * size ) override ; <nl> + <nl> + Status RenameFile ( const string & src , const string & target ) override ; <nl> + <nl> + string TranslateName ( const string & name ) const override { <nl> + return name ; <nl> + } <nl> + } ; <nl> + <nl> + Status IOError ( const string & context , int err_number ) ; <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_CORE_PLATFORM_WINDOWS_WINDOWS_FILE_SYSTEM_H_ <nl> mmm a / tensorflow / tools / git / gen_git_source . py <nl> ppp b / tensorflow / tools / git / gen_git_source . py <nl> def get_git_version ( git_base_path ) : <nl> Args : <nl> git_base_path : where the . git directory is located <nl> Returns : <nl> - A string representing the git version <nl> + A bytestring representing the git version <nl> " " " <nl> - <nl> - unknown_label = " unknown " <nl> + unknown_label = b " unknown " <nl> try : <nl> val = subprocess . check_output ( [ " git " , " - C " , git_base_path , " describe " , <nl> " - - long " , " - - dirty " , " - - tags " ] ) . strip ( ) <nl> mmm a / third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor <nl> ppp b / third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor <nl> @ @ - 1 + 1 , 10 @ @ <nl> # include " unsupported / Eigen / CXX11 / Tensor " <nl> + <nl> + # ifdef _WIN32 <nl> + / / On Windows , Eigen will include Windows . h , which defines various <nl> + / / macros that conflict with TensorFlow symbols . Undefine them here to <nl> + / / prevent clashes . <nl> + # undef DeleteFile <nl> + # undef ERROR <nl> + # undef LoadLibrary <nl> + # endif / / _WIN32 <nl>
|
Initial support for building TensorFlow on Windows ( ) . ( )
|
tensorflow/tensorflow
|
d0d975f8c3330b5402263b2356b038bc8af919a2
|
2016-10-05T17:02:01Z
|
mmm a / src / app / ui / timeline / timeline . cpp <nl> ppp b / src / app / ui / timeline / timeline . cpp <nl> bool Timeline : : onProcessMessage ( Message * msg ) <nl> <nl> case PART_FRAME_TAG_SWITCH_BAND_BUTTON : <nl> if ( m_clk . band > = 0 ) { <nl> - if ( m_tagFocusBand < 0 ) { <nl> - m_tagFocusBand = m_clk . band ; <nl> - } <nl> - else { <nl> - m_tagFocusBand = - 1 ; <nl> - } <nl> + focusTagBand ( m_clk . band ) ; <nl> regenRows = true ; <nl> relayout = true ; <nl> } <nl> bool Timeline : : onProcessMessage ( Message * msg ) <nl> return true ; <nl> } <nl> <nl> + case PART_FRAME_TAG_BAND : <nl> + if ( m_hot . band > = 0 ) { <nl> + focusTagBand ( m_hot . band ) ; <nl> + regenerateRows ( ) ; <nl> + invalidate ( ) ; <nl> + layout ( ) ; <nl> + } <nl> + break ; <nl> + <nl> } <nl> break ; <nl> <nl> void Timeline : : showCurrentCel ( ) <nl> showCel ( layer , m_frame ) ; <nl> } <nl> <nl> + void Timeline : : focusTagBand ( int band ) <nl> + { <nl> + if ( m_tagFocusBand < 0 ) { <nl> + m_tagFocusBand = band ; <nl> + } <nl> + else { <nl> + m_tagFocusBand = - 1 ; <nl> + } <nl> + } <nl> + <nl> void Timeline : : cleanClk ( ) <nl> { <nl> invalidateHit ( m_clk ) ; <nl> void Timeline : : updateDropRange ( const gfx : : Point & pt ) <nl> / / Special drop target for expanded groups <nl> else if ( m_range . type ( ) = = Range : : kLayers & & <nl> m_hot . layer > = 0 & & <nl> - m_hot . layer < m_rows . size ( ) & & <nl> + m_hot . layer < int ( m_rows . size ( ) ) & & <nl> m_rows [ m_hot . layer ] . layer ( ) - > isGroup ( ) & & <nl> static_cast < LayerGroup * > ( m_rows [ m_hot . layer ] . layer ( ) ) - > isExpanded ( ) ) { <nl> m_dropTarget . vhit = DropTarget : : FirstChild ; <nl> mmm a / src / app / ui / timeline / timeline . h <nl> ppp b / src / app / ui / timeline / timeline . h <nl> namespace app { <nl> void setHot ( const Hit & hit ) ; <nl> void showCel ( layer_t layer , frame_t frame ) ; <nl> void showCurrentCel ( ) ; <nl> + void focusTagBand ( int band ) ; <nl> void cleanClk ( ) ; <nl> gfx : : Size getScrollableSize ( ) const ; <nl> gfx : : Point getMaxScrollablePos ( ) const ; <nl>
|
Double - clicking a band on the Timeline will expand / collapse it ( )
|
aseprite/aseprite
|
04475a825d55d97c60f67a5c0d29fc3e46a38dfb
|
2017-04-14T16:18:01Z
|
mmm a / src / compiler / bytecode - graph - builder . cc <nl> ppp b / src / compiler / bytecode - graph - builder . cc <nl> void BytecodeGraphBuilder : : Environment : : PrepareForLoop ( ) { <nl> <nl> bool BytecodeGraphBuilder : : Environment : : StateValuesRequireUpdate ( <nl> Node * * state_values , int offset , int count ) { <nl> - if ( ! builder ( ) - > deoptimization_enabled_ ) { <nl> - return false ; <nl> - } <nl> if ( * state_values = = nullptr ) { <nl> return true ; <nl> } <nl> void BytecodeGraphBuilder : : Environment : : UpdateStateValues ( Node * * state_values , <nl> <nl> Node * BytecodeGraphBuilder : : Environment : : Checkpoint ( <nl> BailoutId bailout_id , OutputFrameStateCombine combine ) { <nl> - if ( ! builder ( ) - > deoptimization_enabled_ ) { <nl> - return builder ( ) - > jsgraph ( ) - > EmptyFrameState ( ) ; <nl> - } <nl> - <nl> / / TODO ( rmcilroy ) : Consider using StateValuesCache for some state values . <nl> UpdateStateValues ( & parameters_state_values_ , 0 , parameter_count ( ) ) ; <nl> UpdateStateValues ( & registers_state_values_ , register_base ( ) , <nl> bool BytecodeGraphBuilder : : Environment : : StateValuesAreUpToDate ( <nl> <nl> bool BytecodeGraphBuilder : : Environment : : StateValuesAreUpToDate ( <nl> int output_poke_offset , int output_poke_count ) { <nl> - if ( ! builder ( ) - > deoptimization_enabled_ ) return true ; <nl> / / Poke offset is relative to the top of the stack ( i . e . , the accumulator ) . <nl> int output_poke_start = accumulator_base ( ) - output_poke_offset ; <nl> int output_poke_end = output_poke_start + output_poke_count ; <nl> BytecodeGraphBuilder : : BytecodeGraphBuilder ( Zone * local_zone , <nl> FrameStateType : : kInterpretedFunction , <nl> bytecode_array ( ) - > parameter_count ( ) , <nl> bytecode_array ( ) - > register_count ( ) , info - > shared_info ( ) ) ) , <nl> - deoptimization_enabled_ ( info - > is_deoptimization_enabled ( ) ) , <nl> merge_environments_ ( local_zone ) , <nl> exception_handlers_ ( local_zone ) , <nl> current_exception_handler_ ( 0 ) , <nl> mmm a / src / compiler / bytecode - graph - builder . h <nl> ppp b / src / compiler / bytecode - graph - builder . h <nl> class BytecodeGraphBuilder { <nl> const BytecodeBranchAnalysis * branch_analysis_ ; <nl> Environment * environment_ ; <nl> <nl> - / / Indicates whether deoptimization support is enabled for this compilation <nl> - / / and whether valid frame states need to be attached to deoptimizing nodes . <nl> - bool deoptimization_enabled_ ; <nl> - <nl> / / Merge environments are snapshots of the environment at points where the <nl> / / control flow merges . This models a forward data flow propagation of all <nl> / / values from all predecessors of the merge in question . <nl>
|
[ turbofan ] The BytecodeGraphBuilder should never insert the empty frame state .
|
v8/v8
|
a329afdaecceca133b1bcaefc566bcfe0636b2d1
|
2016-05-25T09:44:00Z
|
mmm a / modules / common / adapters / adapter . h <nl> ppp b / modules / common / adapters / adapter . h <nl> class Adapter { <nl> header - > set_sequence_num ( + + seq_num_ ) ; <nl> } <nl> <nl> - / * * <nl> - * @ brief fills the fields module_name , user provided timestamp and <nl> - * sequence_num in the header . <nl> - * / <nl> - void FillHeader ( const std : : string & module_name , const double timestamp , <nl> - D * data ) { <nl> - static_assert ( std : : is_base_of < google : : protobuf : : Message , D > : : value , <nl> - " Can only fill header to proto messages ! " ) ; <nl> - auto * header = data - > mutable_header ( ) ; <nl> - header - > set_module_name ( module_name ) ; <nl> - header - > set_timestamp_sec ( timestamp ) ; <nl> - header - > set_sequence_num ( + + seq_num_ ) ; <nl> - } <nl> - <nl> uint32_t GetSeqNum ( ) const { return seq_num_ ; } <nl> <nl> void SetLatestPublished ( const D & data ) { <nl> mmm a / modules / common / adapters / adapter_manager . h <nl> ppp b / modules / common / adapters / adapter_manager . h <nl> namespace adapter { <nl> " Data type must be the same with adapter ' s type ! " ) ; \ <nl> instance ( ) - > name # # _ - > FillHeader ( module_name , data ) ; \ <nl> } \ <nl> - template < typename T > \ <nl> - static void Fill # # name # # Header ( const std : : string & module_name , \ <nl> - const double timestamp , T * data ) { \ <nl> - static_assert ( std : : is_same < name # # Adapter : : DataType , T > : : value , \ <nl> - " Data type must be the same with adapter ' s type ! " ) ; \ <nl> - instance ( ) - > name # # _ - > FillHeader ( module_name , timestamp , data ) ; \ <nl> - } \ <nl> static void Add # # name # # Callback ( name # # Adapter : : Callback callback ) { \ <nl> CHECK ( instance ( ) - > name # # _ ) \ <nl> < < " Initialize adapter before setting callback " ; \ <nl>
|
Common : remove unused function
|
ApolloAuto/apollo
|
480905da844cb3ae8aa177db0ec45c972a70aaba
|
2017-11-22T19:27:42Z
|
mmm a / src / arch / timer . cc <nl> ppp b / src / arch / timer . cc <nl> <nl> # include " arch / runtime / thread_pool . hpp " <nl> # include " utils . hpp " <nl> <nl> + class timer_token_t : public intrusive_priority_queue_node_t < timer_token_t > { <nl> + friend class timer_handler_t ; <nl> + <nl> + bool is_higher_priority_than ( timer_token_t * competitor ) { <nl> + return next_time_in_nanos < competitor - > next_time_in_nanos ; <nl> + } <nl> + <nl> + private : <nl> + timer_token_t ( ) : interval_nanos ( - 1 ) , next_time_in_nanos ( - 1 ) , callback ( NULL ) { } <nl> + <nl> + / / The time between rings , if a repeating timer , otherwise zero . <nl> + int64_t interval_nanos ; <nl> + <nl> + / / The time of the next ' ring ' . <nl> + int64_t next_time_in_nanos ; <nl> + <nl> + / / The callback we call upon each ' ring ' . <nl> + timer_callback_t * callback ; <nl> + <nl> + DISABLE_COPYING ( timer_token_t ) ; <nl> + } ; <nl> + <nl> timer_handler_t : : timer_handler_t ( linux_event_queue_t * queue ) <nl> : timer_provider ( queue ) , <nl> expected_oneshot_time_in_nanos ( 0 ) { <nl> mmm a / src / arch / timer . hpp <nl> ppp b / src / arch / timer . hpp <nl> struct timer_callback_t { <nl> virtual ~ timer_callback_t ( ) { } <nl> } ; <nl> <nl> - class timer_token_t : public intrusive_priority_queue_node_t < timer_token_t > { <nl> - friend class timer_handler_t ; <nl> - <nl> - bool is_higher_priority_than ( timer_token_t * competitor ) { <nl> - return next_time_in_nanos < competitor - > next_time_in_nanos ; <nl> - } <nl> - <nl> - private : <nl> - timer_token_t ( ) : interval_nanos ( - 1 ) , next_time_in_nanos ( - 1 ) , callback ( NULL ) { } <nl> - <nl> - / / The time between rings , if a repeating timer , otherwise zero . <nl> - int64_t interval_nanos ; <nl> - <nl> - / / The time of the next ' ring ' . <nl> - int64_t next_time_in_nanos ; <nl> - <nl> - / / The callback we call upon each ' ring ' . <nl> - timer_callback_t * callback ; <nl> - <nl> - DISABLE_COPYING ( timer_token_t ) ; <nl> - } ; <nl> - <nl> / * This timer class uses the underlying OS timer provider to get one - shot timing events . It then <nl> * manages a list of application timers based on that lower level interface . Everyone who needs a <nl> * timer should use this class ( through the thread pool ) . * / <nl>
|
Moved timer_token_t definition to timer . cc .
|
rethinkdb/rethinkdb
|
bd771af1d4bf36117674ac07856e816e61df3ec6
|
2013-01-23T01:10:49Z
|
diff - - git a / doc / Text Formatting . html b / doc / Text Formatting . html <nl> mmm a / doc / Text Formatting . html <nl> ppp b / doc / Text Formatting . html <nl> < h3 > < a name = " SyntaxRef " > Format string syntax < / a > < / h3 > <nl> <nl> < p > <nl> A < code > format - spec < / code > field can also include nested replacement fields <nl> - within it . These nested replacement fields can contain only an argument index ; <nl> - format specifications are not allowed . Formatting is performed as if the <nl> - replacement fields within the < code > format - spec < / code > are substituted before <nl> - the format - spec string is interpreted . This allows the formatting of a value <nl> - to be dynamically specified . <nl> + in certain position within it . These nested replacement fields can contain only <nl> + an argument index ; format specifications are not allowed . This allows the <nl> + formatting of a value to be dynamically specified . <nl> < / p > <nl> <nl> < h4 > < a name = " FormatSpec " > Format specification mini - language < / a > < / h4 > <nl> < h4 > < a name = " FormatSpec " > Format specification mini - language < / a > < / h4 > <nl> < / tbody > <nl> < / table > <nl> <nl> + < p > <nl> + Note that unless a minimum field width is defined , the field width will always <nl> + be the same size as the data to fill it , so that the alignment option has no <nl> + meaning in this case . <nl> + < / p > <nl> + <nl> + < p > <nl> + The < code > sign < / code > option is only valid for number types , and can be one of <nl> + the following : <nl> + < / p > <nl> + <nl> + < table > <nl> + < thead > <nl> + < tr > < th > Option < / th > < th > Meaning < / th > < / tr > <nl> + < / thead > <nl> + < tbody > <nl> + < tr > <nl> + < td > ' + ' < / td > <nl> + < td > Indicates that a sign should be used for both positive as well as negative <nl> + numbers . < / td > <nl> + < / tr > <nl> + < tr > <nl> + < td > ' - ' < / td > <nl> + < td > Indicates that a sign should be used only for negative numbers ( this is <nl> + the default behavior ) . < / td > <nl> + < / tr > <nl> + < tr > <nl> + < td > space < / td > <nl> + < td > Indicates that a leading space should be used on positive numbers , and a <nl> + minus sign on negative numbers . < / td > <nl> + < / tr > <nl> + < / tbody > <nl> + < / table > <nl> + <nl> < p > TODO < / p > <nl> <nl> < h3 > Class < code > format_error < / code > < / h3 > <nl>
|
Update paper
|
fmtlib/fmt
|
3d5125cd87564cdbaaa663bd9e5ab9eb86c2f193
|
2016-08-25T13:50:09Z
|
mmm a / documentation / sphinx / source / command - line - interface . rst <nl> ppp b / documentation / sphinx / source / command - line - interface . rst <nl> status json <nl> <nl> ` ` status json ` ` will provide the cluster status in its JSON format . For a detailed description of this format , see : doc : ` mr - status ` . <nl> <nl> + triggerddteaminfolog <nl> + mmmmmmmmmmmmmmmmmm - - <nl> + <nl> + The ` ` triggerddteaminfolog ` ` command would trigger the data distributor to log very detailed teams information into trace event logs . <nl> + <nl> unlock <nl> mmmmmm <nl> <nl> mmm a / documentation / sphinx / source / release - notes / release - notes - 620 . rst <nl> ppp b / documentation / sphinx / source / release - notes / release - notes - 620 . rst <nl> Release Notes <nl> * Fix invalid memory access on data distributor when snapshotting large clusters . ` ( PR # 4076 ) < https : / / github . com / apple / foundationdb / pull / 4076 > ` _ <nl> * Add human - readable DateTime to trace events ` ( PR # 4087 ) < https : / / github . com / apple / foundationdb / pull / 4087 > ` _ <nl> * Proxy rejects transaction batch that exceeds MVCC window ` ( PR # 4113 ) < https : / / github . com / apple / foundationdb / pull / 4113 > ` _ <nl> + * Add a command in fdbcli to manually trigger the detailed teams information loggings in data distribution . ` ( PR # 4060 ) < https : / / github . com / apple / foundationdb / pull / 4060 > ` _ <nl> <nl> 6 . 2 . 28 <nl> = = = = = = <nl> mmm a / fdbcli / fdbcli . actor . cpp <nl> ppp b / fdbcli / fdbcli . actor . cpp <nl> void initHelp ( ) { <nl> CommandHelp ( " unlock < UID > " , " unlock the database with the provided lockUID " , <nl> " Unlocks the database with the provided lockUID . This is a potentially dangerous operation , so the " <nl> " user will be asked to enter a passphrase to confirm their intent . " ) ; <nl> + helpMap [ " triggerddteaminfolog " ] = <nl> + CommandHelp ( " triggerddteaminfolog " , " trigger the data distributor teams logging " , <nl> + " Trigger the data distributor to log detailed information about its teams . " ) ; <nl> <nl> hiddenCommands . insert ( " expensive_data_check " ) ; <nl> hiddenCommands . insert ( " datadistribution " ) ; <nl> int printStatusFromJSON ( std : : string const & jsonFileName ) { <nl> } <nl> } <nl> <nl> + ACTOR Future < Void > triggerDDTeamInfoLog ( Database db ) { <nl> + state ReadYourWritesTransaction tr ( db ) ; <nl> + loop { <nl> + try { <nl> + tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> + tr . setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ; <nl> + std : : string v = deterministicRandom ( ) - > randomUniqueID ( ) . toString ( ) ; <nl> + tr . set ( triggerDDTeamInfoPrintKey , v ) ; <nl> + wait ( tr . commit ( ) ) ; <nl> + printf ( " Triggered team info logging in data distribution . \ n " ) ; <nl> + return Void ( ) ; <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> ACTOR Future < Void > timeWarning ( double when , const char * msg ) { <nl> wait ( delay ( when ) ) ; <nl> fputs ( msg , stderr ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> continue ; <nl> } <nl> <nl> + if ( tokencmp ( tokens [ 0 ] , " triggerddteaminfolog " ) ) { <nl> + wait ( triggerDDTeamInfoLog ( db ) ) ; <nl> + continue ; <nl> + } <nl> + <nl> if ( tokencmp ( tokens [ 0 ] , " configure " ) ) { <nl> bool err = wait ( configure ( db , tokens , db - > getConnectionFile ( ) , & linenoise , warn ) ) ; <nl> if ( err ) is_error = true ; <nl> mmm a / fdbclient / SystemData . cpp <nl> ppp b / fdbclient / SystemData . cpp <nl> ProcessClass decodeProcessClassValue ( ValueRef const & value ) { <nl> const KeyRangeRef configKeys ( LiteralStringRef ( " \ xff / conf / " ) , LiteralStringRef ( " \ xff / conf0 " ) ) ; <nl> const KeyRef configKeysPrefix = configKeys . begin ; <nl> <nl> + const KeyRef triggerDDTeamInfoPrintKey ( LiteralStringRef ( " \ xff / triggerDDTeamInfoPrint " ) ) ; <nl> + <nl> const KeyRangeRef excludedServersKeys ( LiteralStringRef ( " \ xff / conf / excluded / " ) , LiteralStringRef ( " \ xff / conf / excluded0 " ) ) ; <nl> const KeyRef excludedServersPrefix = excludedServersKeys . begin ; <nl> const KeyRef excludedServersVersionKey = LiteralStringRef ( " \ xff / conf / excluded " ) ; <nl> mmm a / fdbclient / SystemData . h <nl> ppp b / fdbclient / SystemData . h <nl> UID decodeProcessClassKeyOld ( KeyRef const & key ) ; <nl> extern const KeyRangeRef configKeys ; <nl> extern const KeyRef configKeysPrefix ; <nl> <nl> + / / Change the value of this key to anything and that will trigger detailed data distribution team info log . <nl> + extern const KeyRef triggerDDTeamInfoPrintKey ; <nl> + <nl> / / " \ xff / conf / excluded / 1 . 2 . 3 . 4 " : = " " <nl> / / " \ xff / conf / excluded / 1 . 2 . 3 . 4 : 4000 " : = " " <nl> / / These are inside configKeysPrefix since they represent a form of configuration and they are convenient <nl> mmm a / fdbserver / DataDistribution . actor . cpp <nl> ppp b / fdbserver / DataDistribution . actor . cpp <nl> ACTOR Future < Void > printSnapshotTeamsInfo ( Reference < DDTeamCollection > self ) { <nl> state int traceEventsPrinted = 0 ; <nl> state std : : vector < const UID * > serverIDs ; <nl> state double lastPrintTime = 0 ; <nl> + state ReadYourWritesTransaction tr ( self - > cx ) ; <nl> loop { <nl> - wait ( self - > printDetailedTeamsInfo . onTrigger ( ) ) ; <nl> - if ( now ( ) - lastPrintTime < SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_INTERVAL ) { <nl> - continue ; <nl> - } <nl> - lastPrintTime = now ( ) ; <nl> - <nl> - traceEventsPrinted = 0 ; <nl> + try { <nl> + tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ; <nl> + state Future < Void > watchFuture = tr . watch ( triggerDDTeamInfoPrintKey ) ; <nl> + wait ( tr . commit ( ) ) ; <nl> + wait ( self - > printDetailedTeamsInfo . onTrigger ( ) | | watchFuture ) ; <nl> + tr . reset ( ) ; <nl> + if ( now ( ) - lastPrintTime < SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_INTERVAL ) { <nl> + continue ; <nl> + } <nl> + lastPrintTime = now ( ) ; <nl> <nl> - double snapshotStart = now ( ) ; <nl> + traceEventsPrinted = 0 ; <nl> <nl> - configuration = self - > configuration ; <nl> - server_info = self - > server_info ; <nl> - teams = self - > teams ; <nl> - machine_info = self - > machine_info ; <nl> - machineTeams = self - > machineTeams ; <nl> - / / internedLocalityRecordKeyNameStrings = self - > machineLocalityMap . _keymap - > _lookuparray ; <nl> - / / machineLocalityMapEntryArraySize = self - > machineLocalityMap . size ( ) ; <nl> - / / machineLocalityMapRecordArray = self - > machineLocalityMap . getRecordArray ( ) ; <nl> - std : : vector < const UID * > _uids = self - > machineLocalityMap . getObjects ( ) ; <nl> - serverIDs = _uids ; <nl> + double snapshotStart = now ( ) ; <nl> <nl> - auto const & keys = self - > server_status . getKeys ( ) ; <nl> - for ( auto const & key : keys ) { <nl> - server_status . emplace ( key , self - > server_status . get ( key ) ) ; <nl> - } <nl> + configuration = self - > configuration ; <nl> + server_info = self - > server_info ; <nl> + teams = self - > teams ; <nl> + machine_info = self - > machine_info ; <nl> + machineTeams = self - > machineTeams ; <nl> + / / internedLocalityRecordKeyNameStrings = self - > machineLocalityMap . _keymap - > _lookuparray ; <nl> + / / machineLocalityMapEntryArraySize = self - > machineLocalityMap . size ( ) ; <nl> + / / machineLocalityMapRecordArray = self - > machineLocalityMap . getRecordArray ( ) ; <nl> + std : : vector < const UID * > _uids = self - > machineLocalityMap . getObjects ( ) ; <nl> + serverIDs = _uids ; <nl> <nl> - TraceEvent ( " DDPrintSnapshotTeasmInfo " , self - > distributorId ) <nl> - . detail ( " SnapshotSpeed " , now ( ) - snapshotStart ) <nl> - . detail ( " Primary " , self - > primary ) ; <nl> + auto const & keys = self - > server_status . getKeys ( ) ; <nl> + for ( auto const & key : keys ) { <nl> + server_status . emplace ( key , self - > server_status . get ( key ) ) ; <nl> + } <nl> <nl> - / / Print to TraceEvents <nl> - TraceEvent ( " DDConfig " , self - > distributorId ) <nl> - . detail ( " StorageTeamSize " , configuration . storageTeamSize ) <nl> - . detail ( " DesiredTeamsPerServer " , SERVER_KNOBS - > DESIRED_TEAMS_PER_SERVER ) <nl> - . detail ( " MaxTeamsPerServer " , SERVER_KNOBS - > MAX_TEAMS_PER_SERVER ) <nl> - . detail ( " Primary " , self - > primary ) ; <nl> - <nl> - TraceEvent ( " ServerInfo " , self - > distributorId ) <nl> - . detail ( " Size " , server_info . size ( ) ) <nl> - . detail ( " Primary " , self - > primary ) ; <nl> - state int i ; <nl> - state std : : map < UID , Reference < TCServerInfo > > : : iterator server = server_info . begin ( ) ; <nl> - for ( i = 0 ; i < server_info . size ( ) ; i + + ) { <nl> - TraceEvent ( " ServerInfo " , self - > distributorId ) <nl> - . detail ( " ServerInfoIndex " , i ) <nl> - . detail ( " ServerID " , server - > first . toString ( ) ) <nl> - . detail ( " ServerTeamOwned " , server - > second - > teams . size ( ) ) <nl> - . detail ( " MachineID " , server - > second - > machine - > machineID . contents ( ) . toString ( ) ) <nl> + TraceEvent ( " DDPrintSnapshotTeasmInfo " , self - > distributorId ) <nl> + . detail ( " SnapshotSpeed " , now ( ) - snapshotStart ) <nl> . detail ( " Primary " , self - > primary ) ; <nl> - server + + ; <nl> - if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - wait ( yield ( ) ) ; <nl> - } <nl> - } <nl> <nl> - server = server_info . begin ( ) ; <nl> - for ( i = 0 ; i < server_info . size ( ) ; i + + ) { <nl> - const UID & uid = server - > first ; <nl> - TraceEvent ( " ServerStatus " , self - > distributorId ) <nl> - . detail ( " ServerUID " , uid ) <nl> - . detail ( " Healthy " , ! server_status . at ( uid ) . isUnhealthy ( ) ) <nl> - . detail ( " MachineIsValid " , server_info [ uid ] - > machine . isValid ( ) ) <nl> - . detail ( " MachineTeamSize " , <nl> - server_info [ uid ] - > machine . isValid ( ) ? server_info [ uid ] - > machine - > machineTeams . size ( ) : - 1 ) <nl> + / / Print to TraceEvents <nl> + TraceEvent ( " DDConfig " , self - > distributorId ) <nl> + . detail ( " StorageTeamSize " , configuration . storageTeamSize ) <nl> + . detail ( " DesiredTeamsPerServer " , SERVER_KNOBS - > DESIRED_TEAMS_PER_SERVER ) <nl> + . detail ( " MaxTeamsPerServer " , SERVER_KNOBS - > MAX_TEAMS_PER_SERVER ) <nl> . detail ( " Primary " , self - > primary ) ; <nl> - server + + ; <nl> - if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - wait ( yield ( ) ) ; <nl> - } <nl> - } <nl> <nl> - TraceEvent ( " ServerTeamInfo " , self - > distributorId ) . detail ( " Size " , teams . size ( ) ) . detail ( " Primary " , self - > primary ) ; <nl> - for ( i = 0 ; i < teams . size ( ) ; i + + ) { <nl> - const auto & team = teams [ i ] ; <nl> - TraceEvent ( " ServerTeamInfo " , self - > distributorId ) <nl> - . detail ( " TeamIndex " , i ) <nl> - . detail ( " Healthy " , team - > isHealthy ( ) ) <nl> - . detail ( " TeamSize " , team - > size ( ) ) <nl> - . detail ( " MemberIDs " , team - > getServerIDsStr ( ) ) <nl> + TraceEvent ( " ServerInfo " , self - > distributorId ) <nl> + . detail ( " Size " , server_info . size ( ) ) <nl> . detail ( " Primary " , self - > primary ) ; <nl> - if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - wait ( yield ( ) ) ; <nl> + state int i ; <nl> + state std : : map < UID , Reference < TCServerInfo > > : : iterator server = server_info . begin ( ) ; <nl> + for ( i = 0 ; i < server_info . size ( ) ; i + + ) { <nl> + TraceEvent ( " ServerInfo " , self - > distributorId ) <nl> + . detail ( " ServerInfoIndex " , i ) <nl> + . detail ( " ServerID " , server - > first . toString ( ) ) <nl> + . detail ( " ServerTeamOwned " , server - > second - > teams . size ( ) ) <nl> + . detail ( " MachineID " , server - > second - > machine - > machineID . contents ( ) . toString ( ) ) <nl> + . detail ( " Primary " , self - > primary ) ; <nl> + server + + ; <nl> + if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + wait ( yield ( ) ) ; <nl> + } <nl> } <nl> - } <nl> <nl> - TraceEvent ( " MachineInfo " , self - > distributorId ) <nl> - . detail ( " Size " , machine_info . size ( ) ) <nl> - . detail ( " Primary " , self - > primary ) ; <nl> - state std : : map < Standalone < StringRef > , Reference < TCMachineInfo > > : : iterator machine = machine_info . begin ( ) ; <nl> - state bool isMachineHealthy = false ; <nl> - for ( i = 0 ; i < machine_info . size ( ) ; i + + ) { <nl> - Reference < TCMachineInfo > _machine = machine - > second ; <nl> - if ( ! _machine . isValid ( ) | | machine_info . find ( _machine - > machineID ) = = machine_info . end ( ) | | <nl> - _machine - > serversOnMachine . empty ( ) ) { <nl> - isMachineHealthy = false ; <nl> + server = server_info . begin ( ) ; <nl> + for ( i = 0 ; i < server_info . size ( ) ; i + + ) { <nl> + const UID & uid = server - > first ; <nl> + TraceEvent ( " ServerStatus " , self - > distributorId ) <nl> + . detail ( " ServerUID " , uid ) <nl> + . detail ( " Healthy " , ! server_status . at ( uid ) . isUnhealthy ( ) ) <nl> + . detail ( " MachineIsValid " , server_info [ uid ] - > machine . isValid ( ) ) <nl> + . detail ( " MachineTeamSize " , <nl> + server_info [ uid ] - > machine . isValid ( ) ? server_info [ uid ] - > machine - > machineTeams . size ( ) : - 1 ) <nl> + . detail ( " Primary " , self - > primary ) ; <nl> + server + + ; <nl> + if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + wait ( yield ( ) ) ; <nl> + } <nl> } <nl> <nl> - / / Healthy machine has at least one healthy server <nl> - for ( auto & server : _machine - > serversOnMachine ) { <nl> - if ( ! server_status . at ( server - > id ) . isUnhealthy ( ) ) { <nl> - isMachineHealthy = true ; <nl> + TraceEvent ( " ServerTeamInfo " , self - > distributorId ) <nl> + . detail ( " Size " , teams . size ( ) ) <nl> + . detail ( " Primary " , self - > primary ) ; <nl> + for ( i = 0 ; i < teams . size ( ) ; i + + ) { <nl> + const auto & team = teams [ i ] ; <nl> + TraceEvent ( " ServerTeamInfo " , self - > distributorId ) <nl> + . detail ( " TeamIndex " , i ) <nl> + . detail ( " Healthy " , team - > isHealthy ( ) ) <nl> + . detail ( " TeamSize " , team - > size ( ) ) <nl> + . detail ( " MemberIDs " , team - > getServerIDsStr ( ) ) <nl> + . detail ( " Primary " , self - > primary ) ; <nl> + if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + wait ( yield ( ) ) ; <nl> } <nl> } <nl> <nl> - isMachineHealthy = false ; <nl> TraceEvent ( " MachineInfo " , self - > distributorId ) <nl> - . detail ( " MachineInfoIndex " , i ) <nl> - . detail ( " Healthy " , isMachineHealthy ) <nl> - . detail ( " MachineID " , machine - > first . contents ( ) . toString ( ) ) <nl> - . detail ( " MachineTeamOwned " , machine - > second - > machineTeams . size ( ) ) <nl> - . detail ( " ServerNumOnMachine " , machine - > second - > serversOnMachine . size ( ) ) <nl> - . detail ( " ServersID " , machine - > second - > getServersIDStr ( ) ) <nl> + . detail ( " Size " , machine_info . size ( ) ) <nl> . detail ( " Primary " , self - > primary ) ; <nl> - machine + + ; <nl> - if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - wait ( yield ( ) ) ; <nl> + state std : : map < Standalone < StringRef > , Reference < TCMachineInfo > > : : iterator machine = machine_info . begin ( ) ; <nl> + state bool isMachineHealthy = false ; <nl> + for ( i = 0 ; i < machine_info . size ( ) ; i + + ) { <nl> + Reference < TCMachineInfo > _machine = machine - > second ; <nl> + if ( ! _machine . isValid ( ) | | machine_info . find ( _machine - > machineID ) = = machine_info . end ( ) | | <nl> + _machine - > serversOnMachine . empty ( ) ) { <nl> + isMachineHealthy = false ; <nl> + } <nl> + <nl> + / / Healthy machine has at least one healthy server <nl> + for ( auto & server : _machine - > serversOnMachine ) { <nl> + if ( ! server_status . at ( server - > id ) . isUnhealthy ( ) ) { <nl> + isMachineHealthy = true ; <nl> + } <nl> + } <nl> + <nl> + isMachineHealthy = false ; <nl> + TraceEvent ( " MachineInfo " , self - > distributorId ) <nl> + . detail ( " MachineInfoIndex " , i ) <nl> + . detail ( " Healthy " , isMachineHealthy ) <nl> + . detail ( " MachineID " , machine - > first . contents ( ) . toString ( ) ) <nl> + . detail ( " MachineTeamOwned " , machine - > second - > machineTeams . size ( ) ) <nl> + . detail ( " ServerNumOnMachine " , machine - > second - > serversOnMachine . size ( ) ) <nl> + . detail ( " ServersID " , machine - > second - > getServersIDStr ( ) ) <nl> + . detail ( " Primary " , self - > primary ) ; <nl> + machine + + ; <nl> + if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + wait ( yield ( ) ) ; <nl> + } <nl> } <nl> - } <nl> <nl> - TraceEvent ( " MachineTeamInfo " , self - > distributorId ) <nl> - . detail ( " Size " , machineTeams . size ( ) ) <nl> - . detail ( " Primary " , self - > primary ) ; <nl> - for ( i = 0 ; i < machineTeams . size ( ) ; i + + ) { <nl> - const auto & team = machineTeams [ i ] ; <nl> TraceEvent ( " MachineTeamInfo " , self - > distributorId ) <nl> - . detail ( " TeamIndex " , i ) <nl> - . detail ( " MachineIDs " , team - > getMachineIDsStr ( ) ) <nl> - . detail ( " ServerTeams " , team - > serverTeams . size ( ) ) <nl> + . detail ( " Size " , machineTeams . size ( ) ) <nl> . detail ( " Primary " , self - > primary ) ; <nl> - if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - wait ( yield ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - / / TODO : re - enable the following logging or remove them . <nl> - / / TraceEvent ( " LocalityRecordKeyName " , self - > distributorId ) <nl> - / / . detail ( " Size " , internedLocalityRecordKeyNameStrings . size ( ) ) <nl> - / / . detail ( " Primary " , self - > primary ) ; <nl> - / / for ( i = 0 ; i < internedLocalityRecordKeyNameStrings . size ( ) ; i + + ) { <nl> - / / TraceEvent ( " LocalityRecordKeyIndexName " , self - > distributorId ) <nl> - / / . detail ( " KeyIndex " , i ) <nl> - / / . detail ( " KeyName " , internedLocalityRecordKeyNameStrings [ i ] ) <nl> - / / . detail ( " Primary " , self - > primary ) ; <nl> - / / if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - / / wait ( yield ( ) ) ; <nl> - / / } <nl> - / / } <nl> + for ( i = 0 ; i < machineTeams . size ( ) ; i + + ) { <nl> + const auto & team = machineTeams [ i ] ; <nl> + TraceEvent ( " MachineTeamInfo " , self - > distributorId ) <nl> + . detail ( " TeamIndex " , i ) <nl> + . detail ( " MachineIDs " , team - > getMachineIDsStr ( ) ) <nl> + . detail ( " ServerTeams " , team - > serverTeams . size ( ) ) <nl> + . detail ( " Primary " , self - > primary ) ; <nl> + if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + wait ( yield ( ) ) ; <nl> + } <nl> + } <nl> <nl> - / / TraceEvent ( " MachineLocalityMap " , self - > distributorId ) <nl> - / / . detail ( " Size " , machineLocalityMapEntryArraySize ) <nl> - / / . detail ( " Primary " , self - > primary ) ; <nl> - / / for ( i = 0 ; i < serverIDs . size ( ) ; i + + ) { <nl> - / / const auto & serverID = serverIDs [ i ] ; <nl> - / / Reference < LocalityRecord > record = machineLocalityMapRecordArray [ i ] ; <nl> - / / if ( record . isValid ( ) ) { <nl> - / / TraceEvent ( " MachineLocalityMap " , self - > distributorId ) <nl> - / / . detail ( " LocalityIndex " , i ) <nl> - / / . detail ( " UID " , serverID - > toString ( ) ) <nl> - / / . detail ( " LocalityRecord " , record - > toString ( ) ) <nl> - / / . detail ( " Primary " , self - > primary ) ; <nl> - / / } else { <nl> - / / TraceEvent ( " MachineLocalityMap " , self - > distributorId ) <nl> - / / . detail ( " LocalityIndex " , i ) <nl> - / / . detail ( " UID " , serverID - > toString ( ) ) <nl> - / / . detail ( " LocalityRecord " , " [ NotFound ] " ) <nl> - / / . detail ( " Primary " , self - > primary ) ; <nl> - / / } <nl> - / / if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> - / / wait ( yield ( ) ) ; <nl> - / / } <nl> - / / } <nl> + / / TODO : re - enable the following logging or remove them . <nl> + / / TraceEvent ( " LocalityRecordKeyName " , self - > distributorId ) <nl> + / / . detail ( " Size " , internedLocalityRecordKeyNameStrings . size ( ) ) <nl> + / / . detail ( " Primary " , self - > primary ) ; <nl> + / / for ( i = 0 ; i < internedLocalityRecordKeyNameStrings . size ( ) ; i + + ) { <nl> + / / TraceEvent ( " LocalityRecordKeyIndexName " , self - > distributorId ) <nl> + / / . detail ( " KeyIndex " , i ) <nl> + / / . detail ( " KeyName " , internedLocalityRecordKeyNameStrings [ i ] ) <nl> + / / . detail ( " Primary " , self - > primary ) ; <nl> + / / if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + / / wait ( yield ( ) ) ; <nl> + / / } <nl> + / / } <nl> + <nl> + / / TraceEvent ( " MachineLocalityMap " , self - > distributorId ) <nl> + / / . detail ( " Size " , machineLocalityMapEntryArraySize ) <nl> + / / . detail ( " Primary " , self - > primary ) ; <nl> + / / for ( i = 0 ; i < serverIDs . size ( ) ; i + + ) { <nl> + / / const auto & serverID = serverIDs [ i ] ; <nl> + / / Reference < LocalityRecord > record = machineLocalityMapRecordArray [ i ] ; <nl> + / / if ( record . isValid ( ) ) { <nl> + / / TraceEvent ( " MachineLocalityMap " , self - > distributorId ) <nl> + / / . detail ( " LocalityIndex " , i ) <nl> + / / . detail ( " UID " , serverID - > toString ( ) ) <nl> + / / . detail ( " LocalityRecord " , record - > toString ( ) ) <nl> + / / . detail ( " Primary " , self - > primary ) ; <nl> + / / } else { <nl> + / / TraceEvent ( " MachineLocalityMap " , self - > distributorId ) <nl> + / / . detail ( " LocalityIndex " , i ) <nl> + / / . detail ( " UID " , serverID - > toString ( ) ) <nl> + / / . detail ( " LocalityRecord " , " [ NotFound ] " ) <nl> + / / . detail ( " Primary " , self - > primary ) ; <nl> + / / } <nl> + / / if ( + + traceEventsPrinted % SERVER_KNOBS - > DD_TEAMS_INFO_PRINT_YIELD_COUNT = = 0 ) { <nl> + / / wait ( yield ( ) ) ; <nl> + / / } <nl> + / / } <nl> + } catch ( Error & e ) { <nl> + wait ( tr . onError ( e ) ) ; <nl> + } <nl> } <nl> } <nl> <nl>
|
Merge pull request from dongxinEric / feature / allow - manually - trigger - dd - teams - info - logging
|
apple/foundationdb
|
03de3ac899cfa7954755ce4d6d345b4d9224d769
|
2020-12-22T19:04:49Z
|
mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> LIB_INTERNAL_PUBLIC_HEADERS = tf_additional_lib_hdrs ( ) + [ <nl> " platform / tracing . h " , <nl> ] <nl> <nl> + # Replicated for lib_internal and lib_internal_impl . <nl> + LIB_INTERNAL_DEFINES = ( tf_additional_lib_defines ( ) + [ <nl> + " TF_USE_SNAPPY " , <nl> + ] + tf_additional_verbs_lib_defines ( ) + <nl> + tf_additional_mpi_lib_defines ( ) + <nl> + tf_additional_gdr_lib_defines ( ) ) <nl> + <nl> cc_library ( <nl> name = " lib_internal " , <nl> srcs = LIB_INTERNAL_PRIVATE_HEADERS , <nl> hdrs = LIB_INTERNAL_PUBLIC_HEADERS , <nl> copts = tf_copts ( ) , <nl> - defines = tf_additional_lib_defines ( ) + [ <nl> - " TF_USE_SNAPPY " , <nl> - ] + tf_additional_verbs_lib_defines ( ) + <nl> - tf_additional_mpi_lib_defines ( ) + <nl> - tf_additional_gdr_lib_defines ( ) , <nl> + defines = LIB_INTERNAL_DEFINES , <nl> linkopts = select ( { <nl> " / / tensorflow : freebsd " : [ ] , <nl> " / / tensorflow : windows " : [ ] , <nl> cc_library ( <nl> ) , <nl> hdrs = LIB_INTERNAL_PUBLIC_HEADERS , <nl> copts = tf_copts ( ) , <nl> + defines = LIB_INTERNAL_DEFINES , <nl> deps = tf_additional_lib_deps ( ) + [ <nl> " : lib_hash_crc32c_accelerate_internal " , <nl> " : lib_proto_parsing " , <nl>
|
Apply lib_internal defines to both lib_internal and lib_internal_impl
|
tensorflow/tensorflow
|
8fdcecc8018682eef1386beb3ae1bdcf137c5c1c
|
2017-11-04T00:55:52Z
|
mmm a / dbms / src / Functions / CMakeLists . txt <nl> ppp b / dbms / src / Functions / CMakeLists . txt <nl> generate_function_register ( Array <nl> FunctionCountEqual <nl> FunctionArrayEnumerate <nl> FunctionArrayEnumerateUniq <nl> + FunctionArrayEnumerateDense <nl> FunctionArrayUniq <nl> FunctionArrayDistinct <nl> FunctionEmptyArrayUInt8 <nl> mmm a / dbms / src / Functions / FunctionsArray . cpp <nl> ppp b / dbms / src / Functions / FunctionsArray . cpp <nl> void FunctionArrayDistinct : : executeHashed ( <nl> } <nl> } <nl> <nl> - / / / Implementation of FunctionArrayEnumerateUniq . <nl> + / / / Implementation of FunctionArrayEnumerateExtended . <nl> <nl> - FunctionPtr FunctionArrayEnumerateUniq : : create ( const Context & ) <nl> + template < typename Derived > <nl> + FunctionPtr FunctionArrayEnumerateExtended < Derived > : : create ( const Context & ) <nl> { <nl> - return std : : make_shared < FunctionArrayEnumerateUniq > ( ) ; <nl> + return std : : make_shared < Derived > ( ) ; <nl> } <nl> <nl> - String FunctionArrayEnumerateUniq : : getName ( ) const <nl> + template < typename Derived > <nl> + String FunctionArrayEnumerateExtended < Derived > : : getName ( ) const <nl> { <nl> - return name ; <nl> + return Derived : : name ; <nl> } <nl> <nl> - DataTypePtr FunctionArrayEnumerateUniq : : getReturnTypeImpl ( const DataTypes & arguments ) const <nl> + template < typename Derived > <nl> + DataTypePtr FunctionArrayEnumerateExtended < Derived > : : getReturnTypeImpl ( const DataTypes & arguments ) const <nl> { <nl> if ( arguments . size ( ) = = 0 ) <nl> throw Exception ( " Number of arguments for function " + getName ( ) + " doesn ' t match : passed " <nl> DataTypePtr FunctionArrayEnumerateUniq : : getReturnTypeImpl ( const DataTypes & argu <nl> return std : : make_shared < DataTypeArray > ( std : : make_shared < DataTypeUInt32 > ( ) ) ; <nl> } <nl> <nl> - void FunctionArrayEnumerateUniq : : executeImpl ( Block & block , const ColumnNumbers & arguments , size_t result , size_t / * input_rows_count * / ) <nl> + template < typename Derived > <nl> + void FunctionArrayEnumerateExtended < Derived > : : executeImpl ( Block & block , const ColumnNumbers & arguments , size_t result , size_t / * input_rows_count * / ) <nl> { <nl> const ColumnArray : : Offsets * offsets = nullptr ; <nl> ColumnRawPtrs data_columns ; <nl> void FunctionArrayEnumerateUniq : : executeImpl ( Block & block , const ColumnNumbers <nl> } <nl> <nl> <nl> + template < typename Derived > <nl> template < typename T > <nl> - bool FunctionArrayEnumerateUniq : : executeNumber ( const ColumnArray * array , const IColumn * null_map , ColumnUInt32 : : Container & res_values ) <nl> + bool FunctionArrayEnumerateExtended < Derived > : : executeNumber ( const ColumnArray * array , const IColumn * null_map , ColumnUInt32 : : Container & res_values ) <nl> { <nl> const IColumn * inner_col ; <nl> <nl> bool FunctionArrayEnumerateUniq : : executeNumber ( const ColumnArray * array , const <nl> <nl> ValuesToIndices indices ; <nl> size_t prev_off = 0 ; <nl> - for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + if constexpr ( std : : is_same_v < Derived , FunctionArrayEnumerateUniq > ) <nl> { <nl> - indices . clear ( ) ; <nl> - UInt32 null_count = 0 ; <nl> - size_t off = offsets [ i ] ; <nl> - for ( size_t j = prev_off ; j < off ; + + j ) <nl> + / / Unique <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> { <nl> - if ( null_map_data & & ( ( * null_map_data ) [ j ] = = 1 ) ) <nl> - res_values [ j ] = + + null_count ; <nl> - else <nl> - res_values [ j ] = + + indices [ values [ j ] ] ; <nl> + indices . clear ( ) ; <nl> + UInt32 null_count = 0 ; <nl> + size_t off = offsets [ i ] ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + if ( null_map_data & & ( ( * null_map_data ) [ j ] = = 1 ) ) <nl> + res_values [ j ] = + + null_count ; <nl> + else <nl> + res_values [ j ] = + + indices [ values [ j ] ] ; <nl> + } <nl> + prev_off = off ; <nl> + } <nl> + } else { <nl> + / / Dense <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + { <nl> + indices . clear ( ) ; <nl> + size_t rank = 0 ; <nl> + UInt32 null_index = 0 ; <nl> + size_t off = offsets [ i ] ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + if ( null_map_data & & ( ( * null_map_data ) [ j ] = = 1 ) ) <nl> + { <nl> + if ( ! null_index ) <nl> + null_index = + + rank ; <nl> + res_values [ j ] = null_index ; <nl> + } <nl> + else <nl> + { <nl> + auto & idx = indices [ values [ j ] ] ; <nl> + if ( ! idx ) <nl> + idx = + + rank ; <nl> + res_values [ j ] = idx ; <nl> + } <nl> + } <nl> + prev_off = off ; <nl> } <nl> - prev_off = off ; <nl> } <nl> return true ; <nl> } <nl> <nl> - bool FunctionArrayEnumerateUniq : : executeString ( const ColumnArray * array , const IColumn * null_map , ColumnUInt32 : : Container & res_values ) <nl> + template < typename Derived > <nl> + bool FunctionArrayEnumerateExtended < Derived > : : executeString ( const ColumnArray * array , const IColumn * null_map , ColumnUInt32 : : Container & res_values ) <nl> { <nl> const IColumn * inner_col ; <nl> <nl> bool FunctionArrayEnumerateUniq : : executeString ( const ColumnArray * array , const <nl> null_map_data = & static_cast < const ColumnUInt8 * > ( null_map ) - > getData ( ) ; <nl> <nl> ValuesToIndices indices ; <nl> - for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + if constexpr ( std : : is_same_v < Derived , FunctionArrayEnumerateUniq > ) <nl> { <nl> - indices . clear ( ) ; <nl> - UInt32 null_count = 0 ; <nl> - size_t off = offsets [ i ] ; <nl> - for ( size_t j = prev_off ; j < off ; + + j ) <nl> + / / Unique <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> { <nl> - if ( null_map_data & & ( ( * null_map_data ) [ j ] = = 1 ) ) <nl> - res_values [ j ] = + + null_count ; <nl> - else <nl> - res_values [ j ] = + + indices [ nested - > getDataAt ( j ) ] ; <nl> + indices . clear ( ) ; <nl> + UInt32 null_count = 0 ; <nl> + size_t off = offsets [ i ] ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + if ( null_map_data & & ( ( * null_map_data ) [ j ] = = 1 ) ) <nl> + res_values [ j ] = + + null_count ; <nl> + else <nl> + res_values [ j ] = + + indices [ nested - > getDataAt ( j ) ] ; <nl> + } <nl> + prev_off = off ; <nl> + } <nl> + } else { <nl> + / / Dense <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + { <nl> + indices . clear ( ) ; <nl> + size_t rank = 0 ; <nl> + UInt32 null_index = 0 ; <nl> + size_t off = offsets [ i ] ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + if ( null_map_data & & ( ( * null_map_data ) [ j ] = = 1 ) ) <nl> + { <nl> + if ( ! null_index ) <nl> + null_index = + + rank ; <nl> + res_values [ j ] = null_index ; <nl> + } <nl> + else <nl> + { <nl> + auto & idx = indices [ nested - > getDataAt ( j ) ] ; <nl> + if ( ! idx ) <nl> + idx = + + rank ; <nl> + res_values [ j ] = idx ; <nl> + } <nl> + } <nl> + prev_off = off ; <nl> } <nl> - prev_off = off ; <nl> } <nl> return true ; <nl> } <nl> <nl> - bool FunctionArrayEnumerateUniq : : execute128bit ( <nl> + template < typename Derived > <nl> + bool FunctionArrayEnumerateExtended < Derived > : : execute128bit ( <nl> const ColumnArray : : Offsets & offsets , <nl> const ColumnRawPtrs & columns , <nl> const ColumnRawPtrs & null_maps , <nl> bool FunctionArrayEnumerateUniq : : execute128bit ( <nl> <nl> ValuesToIndices indices ; <nl> size_t prev_off = 0 ; <nl> - for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + if constexpr ( std : : is_same_v < Derived , FunctionArrayEnumerateUniq > ) <nl> { <nl> - indices . clear ( ) ; <nl> - size_t off = offsets [ i ] ; <nl> - for ( size_t j = prev_off ; j < off ; + + j ) <nl> + / / Unique <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> { <nl> - if ( has_nullable_columns ) <nl> + indices . clear ( ) ; <nl> + size_t off = offsets [ i ] ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> { <nl> - KeysNullMap < UInt128 > bitmap { } ; <nl> + if ( has_nullable_columns ) <nl> + { <nl> + KeysNullMap < UInt128 > bitmap { } ; <nl> <nl> - for ( size_t i = 0 ; i < columns . size ( ) ; + + i ) <nl> + for ( size_t i = 0 ; i < columns . size ( ) ; + + i ) <nl> + { <nl> + if ( null_maps [ i ] ) <nl> + { <nl> + const auto & null_map = static_cast < const ColumnUInt8 & > ( * null_maps [ i ] ) . getData ( ) ; <nl> + if ( null_map [ j ] = = 1 ) <nl> + { <nl> + size_t bucket = i / 8 ; <nl> + size_t offset = i % 8 ; <nl> + bitmap [ bucket ] | = UInt8 ( 1 ) < < offset ; <nl> + } <nl> + } <nl> + } <nl> + res_values [ j ] = + + indices [ packFixed < UInt128 > ( j , count , columns , key_sizes , bitmap ) ] ; <nl> + } <nl> + else <nl> + res_values [ j ] = + + indices [ packFixed < UInt128 > ( j , count , columns , key_sizes ) ] ; <nl> + } <nl> + prev_off = off ; <nl> + } <nl> + } else { <nl> + / / Dense <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + { <nl> + indices . clear ( ) ; <nl> + size_t off = offsets [ i ] ; <nl> + size_t rank = 0 ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + if ( has_nullable_columns ) <nl> { <nl> - if ( null_maps [ i ] ) <nl> + KeysNullMap < UInt128 > bitmap { } ; <nl> + <nl> + for ( size_t i = 0 ; i < columns . size ( ) ; + + i ) <nl> { <nl> - const auto & null_map = static_cast < const ColumnUInt8 & > ( * null_maps [ i ] ) . getData ( ) ; <nl> - if ( null_map [ j ] = = 1 ) <nl> + if ( null_maps [ i ] ) <nl> { <nl> - size_t bucket = i / 8 ; <nl> - size_t offset = i % 8 ; <nl> - bitmap [ bucket ] | = UInt8 ( 1 ) < < offset ; <nl> + const auto & null_map = static_cast < const ColumnUInt8 & > ( * null_maps [ i ] ) . getData ( ) ; <nl> + if ( null_map [ j ] = = 1 ) <nl> + { <nl> + size_t bucket = i / 8 ; <nl> + size_t offset = i % 8 ; <nl> + bitmap [ bucket ] | = UInt8 ( 1 ) < < offset ; <nl> + } <nl> } <nl> } <nl> + auto & idx = indices [ packFixed < UInt128 > ( j , count , columns , key_sizes , bitmap ) ] ; <nl> + if ( ! idx ) <nl> + idx = + + rank ; <nl> + res_values [ j ] = idx ; <nl> + } <nl> + else <nl> + { <nl> + auto & idx = indices [ packFixed < UInt128 > ( j , count , columns , key_sizes ) ] ; ; <nl> + if ( ! idx ) <nl> + idx = + + rank ; <nl> + res_values [ j ] = idx ; <nl> } <nl> - res_values [ j ] = + + indices [ packFixed < UInt128 > ( j , count , columns , key_sizes , bitmap ) ] ; <nl> } <nl> - else <nl> - res_values [ j ] = + + indices [ packFixed < UInt128 > ( j , count , columns , key_sizes ) ] ; <nl> + prev_off = off ; <nl> } <nl> - prev_off = off ; <nl> } <nl> <nl> return true ; <nl> } <nl> <nl> - void FunctionArrayEnumerateUniq : : executeHashed ( <nl> + template < typename Derived > <nl> + void FunctionArrayEnumerateExtended < Derived > : : executeHashed ( <nl> const ColumnArray : : Offsets & offsets , <nl> const ColumnRawPtrs & columns , <nl> ColumnUInt32 : : Container & res_values ) <nl> void FunctionArrayEnumerateUniq : : executeHashed ( <nl> <nl> ValuesToIndices indices ; <nl> size_t prev_off = 0 ; <nl> - for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + if constexpr ( std : : is_same_v < Derived , FunctionArrayEnumerateUniq > ) <nl> { <nl> - indices . clear ( ) ; <nl> - size_t off = offsets [ i ] ; <nl> - for ( size_t j = prev_off ; j < off ; + + j ) <nl> + / / Unique <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> { <nl> - res_values [ j ] = + + indices [ hash128 ( j , count , columns ) ] ; <nl> + indices . clear ( ) ; <nl> + size_t off = offsets [ i ] ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + res_values [ j ] = + + indices [ hash128 ( j , count , columns ) ] ; <nl> + } <nl> + prev_off = off ; <nl> + } <nl> + } <nl> + else <nl> + { <nl> + / / Dense <nl> + for ( size_t i = 0 ; i < offsets . size ( ) ; + + i ) <nl> + { <nl> + indices . clear ( ) ; <nl> + size_t off = offsets [ i ] ; <nl> + size_t rank = 0 ; <nl> + for ( size_t j = prev_off ; j < off ; + + j ) <nl> + { <nl> + auto & idx = indices [ hash128 ( j , count , columns ) ] ; <nl> + if ( ! idx ) <nl> + idx = + + rank ; <nl> + res_values [ j ] = idx ; <nl> + } <nl> + prev_off = off ; <nl> } <nl> - prev_off = off ; <nl> } <nl> } <nl> <nl> + template class FunctionArrayEnumerateExtended < FunctionArrayEnumerateUniq > ; <nl> + template class FunctionArrayEnumerateExtended < FunctionArrayEnumerateDense > ; <nl> + <nl> / / / Implementation of FunctionEmptyArrayToSingle . <nl> <nl> FunctionPtr FunctionEmptyArrayToSingle : : create ( const Context & ) { return std : : make_shared < FunctionEmptyArrayToSingle > ( ) ; } <nl> mmm a / dbms / src / Functions / FunctionsArray . h <nl> ppp b / dbms / src / Functions / FunctionsArray . h <nl> class FunctionArrayDistinct : public IFunction <nl> } ; <nl> <nl> <nl> - class FunctionArrayEnumerateUniq : public IFunction <nl> + template < typename Derived > <nl> + class FunctionArrayEnumerateExtended : public IFunction <nl> { <nl> public : <nl> - static constexpr auto name = " arrayEnumerateUniq " ; <nl> static FunctionPtr create ( const Context & context ) ; <nl> <nl> String getName ( ) const override ; <nl> class FunctionArrayEnumerateUniq : public IFunction <nl> ColumnUInt32 : : Container & res_values ) ; <nl> } ; <nl> <nl> + class FunctionArrayEnumerateUniq : public FunctionArrayEnumerateExtended < FunctionArrayEnumerateUniq > <nl> + { <nl> + using Base = FunctionArrayEnumerateExtended < FunctionArrayEnumerateUniq > ; <nl> + public : <nl> + static constexpr auto name = " arrayEnumerateUniq " ; <nl> + using Base : : create ; <nl> + } ; <nl> + <nl> + class FunctionArrayEnumerateDense : public FunctionArrayEnumerateExtended < FunctionArrayEnumerateDense > <nl> + { <nl> + using Base = FunctionArrayEnumerateExtended < FunctionArrayEnumerateDense > ; <nl> + public : <nl> + static constexpr auto name = " arrayEnumerateDense " ; <nl> + using Base : : create ; <nl> + } ; <nl> <nl> template < typename Type > struct TypeToColumnType { using ColumnType = ColumnVector < Type > ; } ; <nl> template < > struct TypeToColumnType < String > { using ColumnType = ColumnString ; } ; <nl> new file mode 100644 <nl> index 00000000000 . . f48ebd99f0f <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00710_array_enumerate_dense . reference <nl> <nl> + [ 1 , 2 , 3 , 1 ] <nl> + [ 1 , 2 , 1 ] <nl> new file mode 100644 <nl> index 00000000000 . . 194c941b72b <nl> mmm / dev / null <nl> ppp b / dbms / tests / queries / 0_stateless / 00710_array_enumerate_dense . sql <nl> <nl> + - - empty result set <nl> + SELECT a FROM ( SELECT groupArray ( intDiv ( number , 54321 ) ) AS a , arrayUniq ( a ) AS u , arrayEnumerateDense ( a ) AS arr FROM ( SELECT number FROM system . numbers LIMIT 1000000 ) GROUP BY intHash32 ( number ) % 100000 ) where u < > arrayReverseSort ( arr ) [ 1 ] ; <nl> + <nl> + SELECT arrayEnumerateDense ( [ [ 1 ] , [ 2 ] , [ 34 ] , [ 1 ] ] ) ; <nl> + SELECT arrayEnumerateDense ( [ ( 1 , 2 ) , ( 3 , 4 ) , ( 1 , 2 ) ] ) ; <nl>
|
Extending arrayEnumerateUniq .
|
ClickHouse/ClickHouse
|
a4dfe0ae38161a4a93f54906b9614bfe5782fd9a
|
2018-08-28T07:31:25Z
|
mmm a / tensorflow / compiler / xla / service / hlo_domain_map . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_domain_map . cc <nl> Status HloDomainMap : : TryProcessEmptyDomain ( HloInstruction * instruction ) { <nl> } <nl> <nl> Status HloDomainMap : : Populate ( HloComputation * computation ) { <nl> + InstructionOrderMap instructions_post_order ; <nl> + int64 count = 0 ; <nl> + for ( HloInstruction * instruction : computation - > MakeInstructionPostOrder ( ) ) { <nl> + instructions_post_order . insert ( std : : make_pair ( instruction , count + + ) ) ; <nl> + } <nl> for ( HloInstruction * instruction : computation - > instructions ( ) ) { <nl> if ( IsDomainInstruction ( instruction ) ) { <nl> / / If this is a kDomain of the kind we are currently processing , check <nl> Status HloDomainMap : : Populate ( HloComputation * computation ) { <nl> continue ; <nl> } <nl> TF_ASSIGN_OR_RETURN ( std : : unique_ptr < DomainMetadata : : Domain > domain , <nl> - CreateDomain ( instruction ) ) ; <nl> + CreateDomain ( instruction , instructions_post_order ) ) ; <nl> TF_RETURN_IF_ERROR ( InsertDomain ( std : : move ( domain ) ) ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> Status HloDomainMap : : ExpandDomain ( HloInstruction * instruction , <nl> } <nl> <nl> StatusOr < std : : unique_ptr < DomainMetadata : : Domain > > HloDomainMap : : CreateDomain ( <nl> - HloInstruction * instruction ) const { <nl> + HloInstruction * instruction , <nl> + const InstructionOrderMap & instructions_order ) const { <nl> auto domain = absl : : make_unique < DomainMetadata : : Domain > ( ) ; <nl> TF_RETURN_IF_ERROR ( ExpandDomain ( instruction , domain . get ( ) ) ) ; <nl> - domain - > instructions = MakeNonDomainInstructions ( domain - > reach_set ) ; <nl> + domain - > instructions = <nl> + MakeNonDomainInstructions ( domain - > reach_set , instructions_order ) ; <nl> return std : : move ( domain ) ; <nl> } <nl> <nl> bool HloDomainMap : : IsDomainInstruction ( HloInstruction * instruction ) const { <nl> <nl> / * static * / std : : vector < HloInstruction * > <nl> HloDomainMap : : MakeNonDomainInstructions ( <nl> - const tensorflow : : gtl : : FlatSet < HloInstruction * > & instruction_set ) { <nl> + const tensorflow : : gtl : : FlatSet < HloInstruction * > & instruction_set , <nl> + const InstructionOrderMap & instructions_order ) { <nl> std : : vector < HloInstruction * > instructions ; <nl> instructions . reserve ( instruction_set . size ( ) ) ; <nl> for ( HloInstruction * instruction : instruction_set ) { <nl> HloDomainMap : : MakeNonDomainInstructions ( <nl> instructions . push_back ( instruction ) ; <nl> } <nl> } <nl> + / / sort instructions according to instructions_order <nl> std : : sort ( instructions . begin ( ) , instructions . end ( ) , <nl> - [ ] ( HloInstruction * a , HloInstruction * b ) { <nl> - return a - > unique_id ( ) < b - > unique_id ( ) ; <nl> + [ & instructions_order ] ( HloInstruction * a , HloInstruction * b ) { <nl> + return instructions_order . at ( a ) < instructions_order . at ( b ) ; <nl> } ) ; <nl> return instructions ; <nl> } <nl> mmm a / tensorflow / compiler / xla / service / hlo_domain_map . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_domain_map . h <nl> class HloDomainMap { <nl> int64 GetDomainId ( HloInstruction * instruction ) const ; <nl> <nl> private : <nl> + / / Map used for representing instruction ordering , i . e . <nl> + / / order_map [ a ] < order_map [ b ] means a must be ordered before b . <nl> + using InstructionOrderMap = <nl> + tensorflow : : gtl : : FlatMap < const HloInstruction * , int64 > ; <nl> + <nl> HloDomainMap ( string domain_kind ) : domain_kind_ ( std : : move ( domain_kind ) ) { } <nl> <nl> / / Check if the kDomain instruction is facing ( via its operand link ) another <nl> class HloDomainMap { <nl> <nl> / / Creates a domain data structure using the ExpandDomain ( ) API . <nl> StatusOr < std : : unique_ptr < DomainMetadata : : Domain > > CreateDomain ( <nl> - HloInstruction * instruction ) const ; <nl> + HloInstruction * instruction , <nl> + const InstructionOrderMap & instructions_order ) const ; <nl> <nl> / / Out of an instruction set , returns a vector of all the ones which are not <nl> / / a kDomain kind . <nl> static std : : vector < HloInstruction * > MakeNonDomainInstructions ( <nl> - const tensorflow : : gtl : : FlatSet < HloInstruction * > & instruction_set ) ; <nl> + const tensorflow : : gtl : : FlatSet < HloInstruction * > & instruction_set , <nl> + const InstructionOrderMap & instructions_order ) ; <nl> <nl> string domain_kind_ ; <nl> std : : vector < std : : unique_ptr < DomainMetadata : : Domain > > instruction_domains_ ; <nl> mmm a / tensorflow / compiler / xla / service / hlo_domain_metadata . h <nl> ppp b / tensorflow / compiler / xla / service / hlo_domain_metadata . h <nl> class DomainMetadata { <nl> / / two domains of different kind intersect each other . <nl> tensorflow : : gtl : : FlatSet < HloInstruction * > reach_set ; <nl> <nl> - / / The same instructions in reach_set , but purged from kDomain instructions . <nl> + / / The same instructions in reach_set , but purged from kDomain instructions <nl> + / / and ordered according to their computation graph post - order , i . e . <nl> + / / if instructions [ pos_a ] depends on instructions [ pos_b ] , then pos_a > <nl> + / / pos_b . <nl> std : : vector < HloInstruction * > instructions ; <nl> <nl> / / If we consider a graph edge as an arrow oriented from the operand to the <nl> mmm a / tensorflow / compiler / xla / service / hlo_domain_test . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_domain_test . cc <nl> ENTRY entry { <nl> token = token [ ] after - all ( ) <nl> infeed = ( ( f32 [ 4 ] , f32 [ 4 ] ) , token [ ] ) infeed ( token ) , <nl> sharding = { { maximal device = 1 } , { maximal device = 0 } , { maximal device = 0 } } <nl> - infeed . data = ( f32 [ 4 ] , f32 [ 4 ] ) get - tuple - element ( infeed ) , index = 0 <nl> + infeed . data = ( f32 [ 4 ] , f32 [ 4 ] ) get - tuple - element ( infeed ) , index = 0 , <nl> + sharding = { { maximal device = 1 } , { maximal device = 0 } } <nl> gte0 = f32 [ 4 ] get - tuple - element ( infeed . data ) , index = 0 <nl> gte1 = f32 [ 4 ] get - tuple - element ( infeed . data ) , index = 1 <nl> copy0 = f32 [ 4 ] copy ( gte0 ) <nl> ENTRY entry { <nl> / / \ / <nl> / / TUPLE <nl> / / | <nl> - HloInstruction * infeed = FindInstruction ( module , " infeed " ) ; <nl> - ASSERT_NE ( infeed , nullptr ) ; <nl> - HloInstruction * infeed_data = <nl> - infeed - > parent ( ) - > AddInstruction ( HloInstruction : : CreateGetTupleElement ( <nl> - ShapeUtil : : GetTupleElementShape ( infeed - > shape ( ) , 0 ) , infeed , 0 ) ) ; <nl> + HloInstruction * infeed_data = FindInstruction ( module , " infeed . data " ) ; <nl> + ASSERT_NE ( infeed_data , nullptr ) ; <nl> <nl> auto infeed_data_users = infeed_data - > users ( ) ; <nl> HloInstruction * new_gte0 = infeed_data - > parent ( ) - > AddInstruction ( <nl> TEST_F ( HloDomainTest , DumpParseNullSharding ) { <nl> ASSERT_TRUE ( ParseModule ( hlo_string ) . status ( ) . ok ( ) ) ; <nl> } <nl> <nl> + / / Tuple inputs are domain instructions . <nl> TEST_F ( HloDomainTest , DomainTuple ) { <nl> const char * const hlo_string = R " ( <nl> HloModule Module <nl> HloModule Module <nl> ENTRY entry { <nl> p0 = f32 [ 4 ] parameter ( 0 ) , sharding = { maximal device = 0 } <nl> cst = u32 [ ] constant ( 0 ) , sharding = { maximal device = 1 } <nl> - tpl = ( u32 [ ] , f32 [ 4 ] ) tuple ( cst , p0 ) , sharding = { { maximal device = 1 } , { maximal device = 0 } } <nl> + tpl = ( u32 [ ] , f32 [ 4 ] ) tuple ( cst , p0 ) , <nl> + sharding = { { maximal device = 1 } , { maximal device = 0 } } <nl> ROOT gte = f32 [ 4 ] get - tuple - element ( tpl ) , index = 1 , sharding = { maximal device = 0 } <nl> } <nl> ) " ; <nl> ENTRY % entry ( p0 : ( f32 [ 4 ] , f32 [ 4 ] ) ) - > ( f32 [ 4 ] , f32 [ 4 ] , f32 [ 4 ] ) { <nl> EXPECT_FALSE ( HasDomainEdge ( module , " d " , " c " ) ) ; <nl> } <nl> <nl> + / / Emulate instructions inserted at top and bottom within nested tuple domain . <nl> + TEST_F ( HloDomainTest , DomainTupleTopBottomInsert ) { <nl> + const char * const hlo_string = R " ( <nl> + HloModule Module <nl> + <nl> + ENTRY entry { <nl> + p0 = f32 [ 4 ] parameter ( 0 ) , sharding = { maximal device = 1 } <nl> + p1 = ( f32 [ 5 ] , f32 [ 6 ] ) parameter ( 1 ) , <nl> + sharding = { { maximal device = 1 } , { maximal device = 0 } } <nl> + tuple . 0 = ( f32 [ 4 ] , ( f32 [ 5 ] , f32 [ 6 ] ) ) tuple ( p0 , p1 ) , <nl> + sharding = { { maximal device = 1 } , { maximal device = 1 } , { maximal device = 0 } } <nl> + ROOT res = ( f32 [ 5 ] , f32 [ 6 ] ) get - tuple - element ( tuple . 0 ) , index = 1 , <nl> + sharding = { { maximal device = 1 } , { maximal device = 0 } } <nl> + } <nl> + ) " ; <nl> + <nl> + TF_ASSERT_OK_AND_ASSIGN ( HloModule * module , ParseModule ( hlo_string ) ) ; <nl> + <nl> + HloDomainIsolator isolator ( ShardingDomainCreator { } ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool isolator_changed , isolator . Run ( module ) ) ; <nl> + EXPECT_TRUE ( isolator_changed ) ; <nl> + <nl> + / / Clear sharding of tuple . 0 instruction , in order to test domain sharding <nl> + / / application . <nl> + auto tuple0 = FindInstruction ( module , " tuple . 0 " ) ; <nl> + tuple0 - > clear_sharding ( ) ; <nl> + <nl> + / / Insert the following instructons above and below tuple . 0 , to emulate other <nl> + / / passes effects : <nl> + / / COPY . 0 <nl> + / / \ / <nl> + / / TUPLE . 0 <nl> + / / / \ <nl> + / / COPY . 1 \ <nl> + / / / \ <nl> + / / GTE . 0 GTE . 1 <nl> + / / | | <nl> + / / | COPY . 2 <nl> + / / \ / <nl> + / / \ / <nl> + / / TUPLE . 1 <nl> + / / | <nl> + auto tuple0_users = tuple0 - > users ( ) ; <nl> + auto computation = tuple0 - > parent ( ) ; <nl> + HloInstruction * copy0 = computation - > AddInstruction ( <nl> + HloInstruction : : CreateUnary ( tuple0 - > operand ( 1 ) - > shape ( ) , HloOpcode : : kCopy , <nl> + tuple0 - > mutable_operand ( 1 ) ) ) ; <nl> + TF_EXPECT_OK ( tuple0 - > ReplaceOperandWith ( 1 , copy0 ) ) ; <nl> + <nl> + HloInstruction * copy1 = computation - > AddInstruction ( <nl> + HloInstruction : : CreateUnary ( tuple0 - > shape ( ) , HloOpcode : : kCopy , tuple0 ) ) ; <nl> + HloInstruction * gte0 = <nl> + computation - > AddInstruction ( HloInstruction : : CreateGetTupleElement ( <nl> + ShapeUtil : : GetTupleElementShape ( copy1 - > shape ( ) , 0 ) , copy1 , 0 ) ) ; <nl> + HloInstruction * gte1 = <nl> + computation - > AddInstruction ( HloInstruction : : CreateGetTupleElement ( <nl> + ShapeUtil : : GetTupleElementShape ( tuple0 - > shape ( ) , 1 ) , tuple0 , 1 ) ) ; <nl> + HloInstruction * copy2 = computation - > AddInstruction ( <nl> + HloInstruction : : CreateUnary ( gte1 - > shape ( ) , HloOpcode : : kCopy , gte1 ) ) ; <nl> + HloInstruction * tuple1 = <nl> + computation - > AddInstruction ( HloInstruction : : CreateTuple ( { gte0 , copy2 } ) ) ; <nl> + <nl> + for ( HloInstruction * user : tuple0_users ) { <nl> + TF_EXPECT_OK ( tuple0 - > ReplaceUseWith ( user , tuple1 ) ) ; <nl> + } <nl> + <nl> + HloDomainRemover remover ( ShardingMetadata : : KindName ( ) , <nl> + ShardingMetadata : : NormalizeShardingDomain ) ; <nl> + TF_ASSERT_OK_AND_ASSIGN ( bool remover_changed , remover . Run ( module ) ) ; <nl> + EXPECT_TRUE ( remover_changed ) ; <nl> + <nl> + EXPECT_TRUE ( tuple0 - > has_sharding ( ) ) ; <nl> + EXPECT_EQ ( HloSharding : : Tuple ( tuple0 - > shape ( ) , { HloSharding : : AssignDevice ( 1 ) , <nl> + HloSharding : : AssignDevice ( 1 ) , <nl> + HloSharding : : AssignDevice ( 0 ) } ) , <nl> + tuple0 - > sharding ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( copy0 - > has_sharding ( ) ) ; <nl> + EXPECT_EQ ( HloSharding : : Tuple ( copy0 - > shape ( ) , { HloSharding : : AssignDevice ( 1 ) , <nl> + HloSharding : : AssignDevice ( 0 ) } ) , <nl> + copy0 - > sharding ( ) ) ; <nl> + <nl> + / / copy1 has partial information only from gte . 0 , so in the end it gets no <nl> + / / sharding at all . During propagation it does propagate the information from <nl> + / / gte . 0 though , enabling Tuple . 0 to be fully sharded . <nl> + EXPECT_FALSE ( copy1 - > has_sharding ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( gte0 - > has_sharding ( ) ) ; <nl> + EXPECT_EQ ( HloSharding : : AssignDevice ( 1 ) , gte0 - > sharding ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( gte1 - > has_sharding ( ) ) ; <nl> + EXPECT_EQ ( HloSharding : : Tuple ( gte1 - > shape ( ) , { HloSharding : : AssignDevice ( 1 ) , <nl> + HloSharding : : AssignDevice ( 0 ) } ) , <nl> + gte1 - > sharding ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( copy2 - > has_sharding ( ) ) ; <nl> + EXPECT_EQ ( HloSharding : : Tuple ( copy2 - > shape ( ) , { HloSharding : : AssignDevice ( 1 ) , <nl> + HloSharding : : AssignDevice ( 0 ) } ) , <nl> + copy2 - > sharding ( ) ) ; <nl> + <nl> + EXPECT_TRUE ( tuple1 - > has_sharding ( ) ) ; <nl> + EXPECT_EQ ( tuple0 - > sharding ( ) , tuple1 - > sharding ( ) ) ; <nl> + } <nl> + <nl> } / / namespace <nl> } / / namespace xla <nl> mmm a / tensorflow / compiler / xla / service / hlo_sharding_metadata . cc <nl> ppp b / tensorflow / compiler / xla / service / hlo_sharding_metadata . cc <nl> namespace xla { <nl> <nl> namespace { <nl> <nl> + / / AssignmentKind and kUnassignedDevice are used during tuple domain sharding <nl> + / / propagation in order to distinguish among three cases : <nl> + / / kUnassigned : no assignment has occurred <nl> + / / kAssigned : at least an assignment has occurred <nl> + / / kConflict : no assignment has occurred because of conflicting propagations , <nl> + / / which occurs when multiple users of an instruction have different <nl> + / / shardings . <nl> + enum class AssignmentKind { kUnassigned , kAssigned , kConflict } ; <nl> + <nl> + / / kUnassignedDevice can only be assigned to tuple leaf shardings to indicate <nl> + / / absence of sharding information for that particular sub - sharding during <nl> + / / sharding propagation . It is used to be able to express tuple shardings with <nl> + / / partial information . At the end of the propagation the sharding of <nl> + / / tuple - shaped instructions using kUnassignedDevice ' s is cleared . <nl> + / / TODO ( b / 112883246 ) : Centralized enum of reserved devices . <nl> + constexpr int64 kUnassignedDevice = - 2 ; <nl> + <nl> struct PassThrough { <nl> PassThrough ( HloInstruction * user , HloInstruction * operand ) <nl> : user ( user ) , operand ( operand ) { } <nl> Status ApplyDomainSingleSharding ( const DomainMetadata : : Domain & domain , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / Retrieves the sharding of a tuple shaped instruction in form of a ShapeTree . <nl> - / / If the instruction has no sharding , a ShapeTree with HloSharding : : Replicate ( ) <nl> - / / sharding will be returned . <nl> - ShapeTree < HloSharding > GetTupleSharding ( HloInstruction * tuple ) { <nl> - if ( tuple - > has_sharding ( ) ) { <nl> - return tuple - > sharding ( ) . GetAsShapeTree ( tuple - > shape ( ) ) ; <nl> + / / Return the ShapeTree < HloSharding > of the user argument . The user argument <nl> + / / is assumed to be a user of the instruction argument . <nl> + / / If user is a tuple instruction , return the tuple subsharding corresponding to <nl> + / / the operand matching the instruction argument , because that is the <nl> + / / subsharding corresponding to instruction . <nl> + ShapeTree < HloSharding > GetShardingTreeFromUser ( <nl> + const HloInstruction & instruction , const HloInstruction & user ) { <nl> + if ( user . opcode ( ) = = HloOpcode : : kTuple ) { <nl> + return user . sharding ( ) <nl> + . GetSubSharding ( user . shape ( ) , { user . operand_index ( & instruction ) } ) <nl> + . GetAsShapeTree ( instruction . shape ( ) ) ; <nl> + } <nl> + return user . sharding ( ) . GetAsShapeTree ( user . shape ( ) ) ; <nl> + } <nl> + <nl> + / / Assign rhs to lhs . If rhs is unassigned ( assigned to kUnassignedDevice ) <nl> + / / then no assignment is made . Therefore kUnassignedDevice is never propagated . <nl> + / / kConflict is returned if lhs is already assigned and rhs is assigned to a <nl> + / / different device . <nl> + StatusOr < AssignmentKind > AssignLeafSharding ( HloSharding * lhs , <nl> + const HloSharding & rhs ) { <nl> + TF_RET_CHECK ( ! lhs - > IsTuple ( ) & & ! rhs . IsTuple ( ) ) ; <nl> + if ( rhs . UsesDevice ( kUnassignedDevice ) ) { <nl> + return AssignmentKind : : kUnassigned ; <nl> + } <nl> + if ( lhs - > UsesDevice ( kUnassignedDevice ) ) { <nl> + * lhs = rhs ; <nl> + return AssignmentKind : : kAssigned ; <nl> } <nl> - return ShapeTree < HloSharding > ( tuple - > shape ( ) , HloSharding : : Replicate ( ) ) ; <nl> + return lhs - > UniqueDevice ( ) ! = rhs . UniqueDevice ( ) <nl> + ? AssignmentKind : : kConflict <nl> + : AssignmentKind : : kUnassigned ; <nl> } <nl> <nl> - / / Retrieves the sharding of operand , asked from a user instruction which is <nl> - / / within domain . If operand is a kDomain , it means that sharding argument is <nl> - / / the operand sharding , otherwise the operand ' s own sharding will be returned . <nl> - const HloSharding * GetOperandSharding ( const HloInstruction * operand , <nl> + / / Assigns the whole rhs tree to lhs_tree , starting at lhs_it . <nl> + / / In case of conflicting assignment AssignmentKind : : kConflict is returned . In <nl> + / / this case lhs_tree is partially assigned , up to the conflicting leaf . It is <nl> + / / up to the caller to discard the partial assignment in case of conflict . <nl> + StatusOr < AssignmentKind > AssignTreeSharding ( <nl> + ShapeTree < HloSharding > * lhs_tree , ShapeTree < HloSharding > : : iterator lhs_it , <nl> + const ShapeTree < HloSharding > & rhs_tree ) { <nl> + AssignmentKind assigned = AssignmentKind : : kUnassigned ; <nl> + auto rhs_it = rhs_tree . begin ( ) ; <nl> + for ( ; lhs_it ! = lhs_tree - > end ( ) & & rhs_it ! = rhs_tree . end ( ) ; <nl> + + + lhs_it , + + rhs_it ) { <nl> + / / TODO ( b / 112885211 ) : Add ShapeTree : : IsLeaf ( const ShapeTreeIterator & it ) <nl> + if ( rhs_tree . IsLeaf ( rhs_it - > first ) ) { <nl> + TF_RET_CHECK ( lhs_tree - > IsLeaf ( lhs_it - > first ) ) ; <nl> + TF_ASSIGN_OR_RETURN ( AssignmentKind sub_assigned , <nl> + AssignLeafSharding ( & lhs_it - > second , rhs_it - > second ) ) ; <nl> + if ( sub_assigned = = AssignmentKind : : kConflict ) { <nl> + / / In case of conflict we return conflict to the caller . At this point <nl> + / / partial assignments to lhs_tree may have been made already . It is up <nl> + / / to the caller to discard the partial assignment in case of conflict . <nl> + return AssignmentKind : : kConflict ; <nl> + } else if ( sub_assigned = = AssignmentKind : : kAssigned ) { <nl> + assigned = sub_assigned ; <nl> + } <nl> + } <nl> + } <nl> + TF_RET_CHECK ( rhs_it = = rhs_tree . end ( ) ) ; <nl> + return assigned ; <nl> + } <nl> + <nl> + StatusOr < bool > ApplyShardingFromUsers ( HloInstruction * instruction , <nl> const DomainMetadata : : Domain & domain , <nl> - const HloSharding & sharding ) { <nl> - / / Here the user of operand is within the domain instruction set , and since it <nl> - / / is user of operand , we need to look into the enter_domains set . If this is <nl> - / / not a kDomain within the user domains set , then return the operand <nl> - / / sharding , if any . <nl> - if ( operand - > opcode ( ) ! = HloOpcode : : kDomain | | <nl> - domain . enter_domains . count ( const_cast < HloInstruction * > ( operand ) ) = = 0 ) { <nl> - return operand - > has_sharding ( ) ? & operand - > sharding ( ) : nullptr ; <nl> + const HloSharding & domain_sharding ) { <nl> + if ( instruction - > users ( ) . empty ( ) ) { <nl> + / / No sharding from users , use domain_sharding , after checking <nl> + / / compatibility . <nl> + TF_RET_CHECK ( ShapeUtil : : IsTuple ( instruction - > shape ( ) ) & & <nl> + ShapeUtil : : GetLeafCount ( instruction - > shape ( ) ) = = <nl> + domain_sharding . tuple_elements ( ) . size ( ) ) ; <nl> + instruction - > set_sharding ( domain_sharding ) ; <nl> + return true ; <nl> + } <nl> + AssignmentKind assigned = AssignmentKind : : kUnassigned ; <nl> + / / The sharding_tree leaves are initialized to kUnassignedDevice . Only Tuple <nl> + / / subshardings can result in a final sharding assignment containing <nl> + / / kUnassignedDevice leaves , in case some tuple indexes are not used , or are <nl> + / / used by users that don ' t have a sharding . <nl> + / / Non - tuple shardings are either assigned to a real sharding , or are not <nl> + / / assigned at all . As such they will never get assigned to kUnassignedDevice . <nl> + / / In any case , kUnassignedDevice is never propagated , from the implementation <nl> + / / of AssignLeafSharding . <nl> + ShapeTree < HloSharding > sharding_tree ( <nl> + instruction - > shape ( ) , HloSharding : : AssignDevice ( kUnassignedDevice ) ) ; <nl> + for ( HloInstruction * user : instruction - > users ( ) ) { <nl> + if ( user - > opcode ( ) = = HloOpcode : : kDomain & & <nl> + domain . exit_domains . count ( const_cast < HloInstruction * > ( user ) ) > 0 ) { <nl> + / / If a user is a domain and it is registered in the domain exits , then <nl> + / / the instruction sharding is taken directly from the domain , and no <nl> + / / further users need to be visited . <nl> + instruction - > set_sharding ( domain_sharding ) ; <nl> + return true ; <nl> + } <nl> + if ( ! user - > has_sharding ( ) ) { <nl> + continue ; <nl> + } <nl> + AssignmentKind sub_assigned = AssignmentKind : : kUnassigned ; <nl> + ShapeTree < HloSharding > user_sharding_tree = <nl> + GetShardingTreeFromUser ( * instruction , * user ) ; <nl> + if ( ShapeUtil : : IsTuple ( instruction - > shape ( ) ) ) { <nl> + / / For tuple - shaped instructions collect individual tuple subshardings <nl> + / / from the uses , and then combine them into the tuple sharding . <nl> + / / If the user is a GTE its sharding concerns only the subtree of <nl> + / / sharding_tree at index user - > tuple_index , otherwise the whole <nl> + / / sharding_tree is affected . <nl> + ShapeTree < HloSharding > : : iterator sharding_tree_begin = <nl> + user - > opcode ( ) = = HloOpcode : : kGetTupleElement <nl> + ? sharding_tree . find ( { user - > tuple_index ( ) } ) <nl> + : sharding_tree . begin ( ) ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + sub_assigned , AssignTreeSharding ( & sharding_tree , sharding_tree_begin , <nl> + user_sharding_tree ) ) ; <nl> + } else { <nl> + / / Non - tuple shape : assign common users sharding . <nl> + TF_RET_CHECK ( user_sharding_tree . leaf_count ( ) = = 1 ) <nl> + < < " Expected non - tuple user sharding " ; <nl> + TF_ASSIGN_OR_RETURN ( <nl> + sub_assigned , <nl> + AssignTreeSharding ( & sharding_tree , sharding_tree . begin ( ) , <nl> + user_sharding_tree ) ) ; <nl> + } <nl> + <nl> + if ( sub_assigned = = AssignmentKind : : kConflict ) { <nl> + / / In case of conflict we don ' t assign any sharding . <nl> + return false ; <nl> + } else if ( sub_assigned = = AssignmentKind : : kAssigned ) { <nl> + assigned = sub_assigned ; <nl> + } <nl> + } <nl> + <nl> + if ( assigned = = AssignmentKind : : kAssigned ) { <nl> + if ( ShapeUtil : : IsTuple ( instruction - > shape ( ) ) ) { <nl> + instruction - > set_sharding ( HloSharding : : Tuple ( sharding_tree ) ) ; <nl> + } else { <nl> + TF_RET_CHECK ( sharding_tree . leaf_count ( ) = = 1 ) ; <nl> + instruction - > set_sharding ( sharding_tree . leaf_begin ( ) - > second ) ; <nl> + } <nl> + return true ; <nl> } <nl> - / / At this point operand is a kDomain of the currently processed domain , so we <nl> - / / can refer to sharding as the domain sharding . <nl> - return & sharding ; <nl> + return false ; <nl> } <nl> <nl> / / Tries to propagate the sharding information into the instructions that are <nl> - / / part of the domain , in a post order manner ( operand propagate to user ) . <nl> + / / part of the domain , in a reverse post order manner ( users propoagate to <nl> + / / instruction ) . <nl> StatusOr < int64 > ApplyDomainShardingPass ( const DomainMetadata : : Domain & domain , <nl> - const HloSharding & sharding ) { <nl> + const HloSharding & domain_sharding ) { <nl> int64 assigned = 0 ; <nl> - for ( HloInstruction * instruction : domain . instructions ) { <nl> + / / domain . instructions are ordered in a post - order manner . As we do <nl> + / / user - > operand propagation we process instructions in reverse order . In so <nl> + / / doing we are guaranteed to process all users before their operands . <nl> + for ( auto it = domain . instructions . rbegin ( ) ; it ! = domain . instructions . rend ( ) ; <nl> + + + it ) { <nl> + HloInstruction * instruction = * it ; <nl> if ( instruction - > has_sharding ( ) ) { <nl> continue ; <nl> } <nl> - if ( instruction - > opcode ( ) = = HloOpcode : : kGetTupleElement ) { <nl> - HloInstruction * tuple = instruction - > mutable_operand ( 0 ) ; <nl> - const HloSharding * tuple_sharding = <nl> - GetOperandSharding ( tuple , domain , sharding ) ; <nl> - if ( tuple_sharding ! = nullptr ) { <nl> - if ( tuple_sharding - > IsTuple ( ) ) { <nl> - HloSharding sub_sharding = tuple_sharding - > GetSubSharding ( <nl> - tuple - > shape ( ) , { instruction - > tuple_index ( ) } ) ; <nl> - VLOG ( 4 ) < < " " < < instruction - > name ( ) < < " to sharding " <nl> - < < sub_sharding ; <nl> - instruction - > set_sharding ( sub_sharding ) ; <nl> - } else { <nl> - SetSingleSharding ( instruction , * tuple_sharding ) ; <nl> - } <nl> - + + assigned ; <nl> - } <nl> - } else if ( instruction - > opcode ( ) = = HloOpcode : : kTuple ) { <nl> - int64 tuple_assigned = 0 ; <nl> - ShapeTree < HloSharding > shape_tree = GetTupleSharding ( instruction ) ; <nl> - for ( int64 i = 0 ; i < instruction - > operand_count ( ) ; + + i ) { <nl> - const HloSharding * operand_sharding = <nl> - GetOperandSharding ( instruction - > operand ( i ) , domain , sharding ) ; <nl> - if ( operand_sharding ! = nullptr ) { <nl> - HloSharding operand_subsharding = HloSharding : : Replicate ( ) ; <nl> - if ( operand_sharding = = & sharding ) { <nl> - operand_subsharding = <nl> - sharding . GetSubSharding ( instruction - > shape ( ) , { i } ) ; <nl> - operand_sharding = & operand_subsharding ; <nl> - } <nl> - if ( shape_tree . element ( { i } ) ! = * operand_sharding ) { <nl> - * shape_tree . mutable_element ( { i } ) = * operand_sharding ; <nl> - + + tuple_assigned ; <nl> - } <nl> - } <nl> - } <nl> - if ( tuple_assigned > 0 ) { <nl> - HloSharding tuple_sharding = HloSharding : : Tuple ( shape_tree ) ; <nl> - VLOG ( 4 ) < < " " < < instruction - > name ( ) < < " to sharding " <nl> - < < tuple_sharding ; <nl> - instruction - > set_sharding ( tuple_sharding ) ; <nl> - + + assigned ; <nl> - } <nl> - } else { <nl> - / / If all the operand of the given instruction has the same single device <nl> - / / assignment , assign that device to this instruction as well . <nl> - const HloSharding * common_sharding = nullptr ; <nl> - for ( const HloInstruction * operand : instruction - > operands ( ) ) { <nl> - const HloSharding * operand_sharding = <nl> - GetOperandSharding ( operand , domain , sharding ) ; <nl> - if ( operand_sharding ! = nullptr ) { <nl> - if ( common_sharding ! = nullptr & & <nl> - * common_sharding ! = * operand_sharding ) { <nl> - common_sharding = nullptr ; <nl> - break ; <nl> - } <nl> - common_sharding = operand_sharding ; <nl> - } <nl> - } <nl> - if ( common_sharding ! = nullptr ) { <nl> - VLOG ( 4 ) < < " " < < instruction - > name ( ) < < " to sharding " <nl> - < < * common_sharding ; <nl> - instruction - > set_sharding ( * common_sharding ) ; <nl> - + + assigned ; <nl> - } <nl> + / / Take the sharding from the users . <nl> + TF_ASSIGN_OR_RETURN ( <nl> + bool instruction_assigned , <nl> + ApplyShardingFromUsers ( instruction , domain , domain_sharding ) ) ; <nl> + if ( instruction_assigned ) { <nl> + + + assigned ; <nl> + VLOG ( 4 ) < < " " < < instruction - > name ( ) < < " to sharding " <nl> + < < instruction - > sharding ( ) ; <nl> } <nl> } <nl> return assigned ; <nl> Status ApplyDomainSharding ( const DomainMetadata : : Domain & domain , <nl> return ApplyDomainSingleSharding ( domain , * single_sharding ) ; <nl> } <nl> VLOG ( 1 ) < < " Assigning non - trivial sharding " < < sharding ; <nl> - for ( ; ; ) { <nl> - TF_ASSIGN_OR_RETURN ( int64 assigned , <nl> - ApplyDomainShardingPass ( domain , sharding ) ) ; <nl> - if ( assigned = = 0 ) { <nl> - break ; <nl> - } <nl> - } <nl> + TF_RETURN_IF_ERROR ( ApplyDomainShardingPass ( domain , sharding ) . status ( ) ) ; <nl> + <nl> int64 unassigned = 0 ; <nl> for ( HloInstruction * instruction : domain . instructions ) { <nl> if ( ! instruction - > has_sharding ( ) ) { <nl> LOG ( WARNING ) < < " Unassigned instruction : " < < instruction - > ToString ( ) ; <nl> + + unassigned ; <nl> + } else { <nl> + / / Un - set sharding of tuples whose sub - sgardings are assigned to <nl> + / / kUnassignedDevice . Indeed in case of doubt it is better to leave the <nl> + / / entire tuple unassigned , and let the device placer decide for it . <nl> + if ( instruction - > sharding ( ) . UsesDevice ( kUnassignedDevice ) ) { <nl> + TF_RET_CHECK ( ShapeUtil : : IsTuple ( instruction - > shape ( ) ) ) <nl> + < < " Only tuples can have kUnassignedDevice sub shardings " ; <nl> + instruction - > clear_sharding ( ) ; <nl> + } <nl> } <nl> } <nl> / / Should we error out if unassigned > 0 ? <nl>
|
Domain tuple sharding propagation from users instead of from operands .
|
tensorflow/tensorflow
|
57919740bf151cb6395aa60e30404ee9caa066d6
|
2018-08-28T12:26:51Z
|
deleted file mode 100644 <nl> index 7ccadced7ce . . 00000000000 <nl> mmm a / lib / BasicsC / timer . h <nl> ppp / dev / null <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief timing helper macros <nl> - / / / <nl> - / / / @ file <nl> - / / / <nl> - / / / DISCLAIMER <nl> - / / / <nl> - / / / Copyright 2004 - 2012 triagens GmbH , Cologne , Germany <nl> - / / / <nl> - / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - / / / you may not use this file except in compliance with the License . <nl> - / / / You may obtain a copy of the License at <nl> - / / / <nl> - / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - / / / <nl> - / / / Unless required by applicable law or agreed to in writing , software <nl> - / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> - / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - / / / See the License for the specific language governing permissions and <nl> - / / / limitations under the License . <nl> - / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> - / / / <nl> - / / / @ author Jan Steemann <nl> - / / / @ author Copyright 2009 - 2012 , triAGENS GmbH , Cologne , Germany <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # ifndef TRIAGENS_BASICS_C_TIMER_H <nl> - # define TRIAGENS_BASICS_C_TIMER_H 1 <nl> - <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - public macros <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ addtogroup timing <nl> - / / / @ { <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief stringify the timer name <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_NAME ( name ) timer # # name <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief declare a timer <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_DECLARE ( name ) double TRI_TIMER_NAME ( name ) = 0 . 0 <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief initialise a timer <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_INIT ( name ) TRI_TIMER_NAME ( name ) = TRI_microtime ( ) <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief get the current value of a timer <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_CURRENT ( name ) ( TRI_microtime ( ) - TRI_TIMER_NAME ( name ) ) <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief stop a timer <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_STOP ( name ) TRI_TIMER_NAME ( name ) = TRI_TIMER_CURRENT ( name ) <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief dump a timer to stdout <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_DUMP ( name ) fprintf ( stdout , " timer % s : % f \ n " , # name , TRI_TIMER_NAME ( name ) ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief log a timer value to the log in debug mode <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # define TRI_TIMER_LOG ( name ) LOG_DEBUG ( " timer % s : % f " , # name , TRI_TIMER_NAME ( name ) ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - # endif <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl>
|
removed an unnecessary file
|
arangodb/arangodb
|
2d7f0a7979229be8cf0b8168658330605e367d2a
|
2013-01-31T09:33:16Z
|
mmm a / modules / common / adapters / adapter_gflags . cc <nl> ppp b / modules / common / adapters / adapter_gflags . cc <nl> DEFINE_string ( pad_topic , " / apollo / control / pad " , <nl> " control pad message topic name " ) ; <nl> DEFINE_string ( control_command_topic , " / apollo / control " , <nl> " control command topic name " ) ; <nl> - DEFINE_string ( pointcloud_topic , " / apollo / compensator / pointcloud " , <nl> + DEFINE_string ( pointcloud_topic , <nl> + " / apollo / sensor / velodyne64 / compensator / PointCloud2 " , <nl> " pointcloud topic name " ) ; <nl> DEFINE_string ( prediction_topic , " / apollo / prediction " , " prediction topic name " ) ; <nl> DEFINE_string ( perception_obstacle_topic , " / apollo / perception / obstacles " , <nl> mmm a / modules / perception / conf / perception . conf <nl> ppp b / modules / perception / conf / perception . conf <nl> <nl> - # The pointcloud topic name . <nl> - # type : string <nl> - # default : / apollo / sensor / velodyne64 / compensator / PointCloud2 <nl> mmmpointcloud_topic = / apollo / sensor / velodyne64 / compensator / PointCloud2 <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # Flags from lib / config_manager / config_manager . cc <nl>
|
common : unify point cloud topic name .
|
ApolloAuto/apollo
|
d6e6dcc71083f1b447709c63260bf5c7259b5885
|
2017-12-02T02:06:22Z
|
mmm a / tests / cpp - tests / Classes / Sprite3DTest / Sprite3DTest . cpp <nl> ppp b / tests / cpp - tests / Classes / Sprite3DTest / Sprite3DTest . cpp <nl> void Sprite3DCubeMapTest : : addNewSpriteWithCoords ( Vec2 p ) <nl> _textureCube - > retain ( ) ; <nl> / / set texture parameters <nl> Texture2D : : TexParams tRepeatParams ; <nl> - tRepeatParams . magFilter = backend : : SamplerFilter : : LINEAR ; <nl> - tRepeatParams . minFilter = backend : : SamplerFilter : : LINEAR ; <nl> - tRepeatParams . wrapS = backend : : SamplerAddressMode : : CLAMP_TO_EDGE ; <nl> - tRepeatParams . wrapT = backend : : SamplerAddressMode : : CLAMP_TO_EDGE ; <nl> + tRepeatParams . magFilter = backend : : SamplerFilter : : LINEAR ; <nl> + tRepeatParams . minFilter = backend : : SamplerFilter : : LINEAR ; <nl> + tRepeatParams . sAddressMode = backend : : SamplerAddressMode : : CLAMP_TO_EDGE ; <nl> + tRepeatParams . tAddressMode = backend : : SamplerAddressMode : : CLAMP_TO_EDGE ; <nl> _textureCube - > setTexParameters ( tRepeatParams ) ; <nl> <nl> auto mat = Sprite3DMaterial : : createWithFilename ( " Sprite3DTest / CubeMap . material " ) ; <nl>
|
fix android compile
|
cocos2d/cocos2d-x
|
73ad5c62d33a9c61732b3051a56925f2c4d2b1b6
|
2019-03-13T03:16:03Z
|
mmm a / src / core / lib / surface / call . c <nl> ppp b / src / core / lib / surface / call . c <nl> static grpc_error * consolidate_batch_errors ( batch_control * bctl ) { <nl> if ( n = = 0 ) { <nl> return GRPC_ERROR_NONE ; <nl> } else if ( n = = 1 ) { <nl> + / * Skip creating a composite error in the case that only one error was <nl> + logged * / <nl> grpc_error * e = bctl - > errors [ 0 ] ; <nl> bctl - > errors [ 0 ] = NULL ; <nl> return e ; <nl>
|
Add comment
|
grpc/grpc
|
a78da60a8ac8f8777aedc02e463f35c4e0cac906
|
2017-01-27T16:16:23Z
|
mmm a / include / LightGBM / boosting . h <nl> ppp b / include / LightGBM / boosting . h <nl> class Boosting { <nl> * \ param result used to store prediction result , should allocate memory before call this function <nl> * \ param out_len lenght of returned score <nl> * / <nl> - virtual void GetPredictAt ( int data_idx , score_t * result , data_size_t * out_len ) const = 0 ; <nl> + virtual void GetPredictAt ( int data_idx , score_t * result , data_size_t * out_len ) = 0 ; <nl> <nl> / * ! <nl> * \ brief Prediction for one record , not sigmoid transform <nl> class Boosting { <nl> * \ brief Get number of weak sub - models <nl> * \ return Number of weak sub - models <nl> * / <nl> - virtual int NumberOfSubModels ( ) const = 0 ; <nl> + virtual int NumberOfTotalModel ( ) const = 0 ; <nl> <nl> / * ! <nl> * \ brief Get number of classes <nl> class Boosting { <nl> / * ! <nl> * \ brief Set number of used model for prediction <nl> * / <nl> - virtual void SetNumUsedModel ( int num_used_model ) = 0 ; <nl> + virtual void SetNumIterationForPred ( int num_iteration ) = 0 ; <nl> <nl> / * ! <nl> * \ brief Get Type name of this boosting object <nl> mmm a / include / LightGBM / c_api . h <nl> ppp b / include / LightGBM / c_api . h <nl> DllExport int LGBM_BoosterCreate ( const DatesetHandle train_data , <nl> / * ! <nl> * \ brief load an existing boosting from model file <nl> * \ param filename filename of model <nl> + * \ param out_num_total_model number of total models <nl> * \ param out handle of created Booster <nl> * \ return 0 when success , - 1 when failure happens <nl> * / <nl> DllExport int LGBM_BoosterCreateFromModelfile ( <nl> const char * filename , <nl> + int64_t * out_num_total_model , <nl> BoosterHandle * out ) ; <nl> <nl> / * ! <nl> DllExport int LGBM_BoosterCreateFromModelfile ( <nl> * / <nl> DllExport int LGBM_BoosterFree ( BoosterHandle handle ) ; <nl> <nl> + / * ! <nl> + * \ brief Get number of class <nl> + * \ return number of class <nl> + * / <nl> + DllExport int LGBM_BoosterGetNumClasses ( BoosterHandle handle , int64_t * out_len ) ; <nl> + <nl> / * ! <nl> * \ brief update the model in one round <nl> * \ param handle handle <nl> DllExport int LGBM_BoosterGetEvalCounts ( BoosterHandle handle , int64_t * out_len ) ; <nl> * \ brief Get number of eval <nl> * \ return total number of eval result <nl> * / <nl> - DllExport int LGBM_BoosterGetEvalNames ( BoosterHandle handle , int64_t * out_len , const char * * * out_strs ) ; <nl> + DllExport int LGBM_BoosterGetEvalNames ( BoosterHandle handle , int64_t * out_len , char * * out_strs ) ; <nl> <nl> / * ! <nl> * \ brief get evaluation for training data and validation data <nl> DllExport int LGBM_BoosterGetEval ( BoosterHandle handle , <nl> int64_t * out_len , <nl> float * out_results ) ; <nl> <nl> - / * ! <nl> - * \ brief get raw score for training data , used to calculate gradients outside <nl> - * \ param handle handle <nl> - * \ param out_len len of output result <nl> - * \ param out_result used to set a pointer to array <nl> - * \ return 0 when success , - 1 when failure happens <nl> - * / <nl> - DllExport int LGBM_BoosterGetTrainingScore ( BoosterHandle handle , <nl> - int64_t * out_len , <nl> - const float * * out_result ) ; <nl> - <nl> / * ! <nl> * \ brief Get prediction for training data and validation data <nl> this can be used to support customized eval function <nl> DllExport int LGBM_BoosterGetPredict ( BoosterHandle handle , <nl> / * ! <nl> * \ brief make prediction for file <nl> * \ param handle handle <nl> + * \ param data_filename filename of data file <nl> + * \ param data_has_header data file has header or not <nl> * \ param predict_type <nl> * 0 : raw score <nl> * 1 : with transform ( if needed ) <nl> * 2 : leaf index <nl> - * \ param n_used_trees number of used tree <nl> - * \ param data_has_header data file has header or not <nl> - * \ param data_filename filename of data file <nl> + * \ param num_iteration number of iteration for prediction <nl> * \ param result_filename filename of result file <nl> * \ return 0 when success , - 1 when failure happens <nl> * / <nl> DllExport int LGBM_BoosterPredictForFile ( BoosterHandle handle , <nl> - int predict_type , <nl> - int64_t n_used_trees , <nl> - int data_has_header , <nl> const char * data_filename , <nl> + int data_has_header , <nl> + int predict_type , <nl> + int64_t num_iteration , <nl> const char * result_filename ) ; <nl> <nl> / * ! <nl> DllExport int LGBM_BoosterPredictForFile ( BoosterHandle handle , <nl> * 0 : raw score <nl> * 1 : with transform ( if needed ) <nl> * 2 : leaf index <nl> - * \ param n_used_trees number of used tree <nl> + * \ param num_iteration number of iteration for prediction <nl> + * \ param out_len len of output result <nl> * \ param out_result used to set a pointer to array , should allocate memory before call this function <nl> * \ return 0 when success , - 1 when failure happens <nl> * / <nl> DllExport int LGBM_BoosterPredictForCSR ( BoosterHandle handle , <nl> int64_t nelem , <nl> int64_t num_col , <nl> int predict_type , <nl> - int64_t n_used_trees , <nl> - double * out_result ) ; <nl> + int64_t num_iteration , <nl> + int64_t * out_len , <nl> + float * out_result ) ; <nl> <nl> / * ! <nl> * \ brief make prediction for an new data set <nl> DllExport int LGBM_BoosterPredictForCSR ( BoosterHandle handle , <nl> * 0 : raw score <nl> * 1 : with transform ( if needed ) <nl> * 2 : leaf index <nl> - * \ param n_used_trees number of used tree <nl> + * \ param num_iteration number of iteration for prediction <nl> + * \ param out_len len of output result <nl> * \ param out_result used to set a pointer to array , should allocate memory before call this function <nl> * \ return 0 when success , - 1 when failure happens <nl> * / <nl> DllExport int LGBM_BoosterPredictForMat ( BoosterHandle handle , <nl> int32_t ncol , <nl> int is_row_major , <nl> int predict_type , <nl> - int64_t n_used_trees , <nl> - double * out_result ) ; <nl> + int64_t num_iteration , <nl> + int64_t * out_len , <nl> + float * out_result ) ; <nl> <nl> / * ! <nl> * \ brief save model into file <nl> * \ param handle handle <nl> - * \ param num_used_model <nl> + * \ param num_iteration <nl> * \ param filename file name <nl> * \ return 0 when success , - 1 when failure happens <nl> * / <nl> DllExport int LGBM_BoosterSaveModel ( BoosterHandle handle , <nl> - int num_used_model , <nl> + int num_iteration , <nl> const char * filename ) ; <nl> <nl> <nl> mmm a / include / LightGBM / config . h <nl> ppp b / include / LightGBM / config . h <nl> struct IOConfig : public ConfigBase { <nl> std : : string output_result = " LightGBM_predict_result . txt " ; <nl> std : : string input_model = " " ; <nl> int verbosity = 1 ; <nl> - int num_model_predict = NO_LIMIT ; <nl> + int num_iteration_predict = NO_LIMIT ; <nl> bool is_pre_partition = false ; <nl> bool is_enable_sparse = true ; <nl> bool use_two_round_loading = false ; <nl> mmm a / python - package / lightgbm / basic . py <nl> ppp b / python - package / lightgbm / basic . py <nl> <nl> import ctypes <nl> import collections <nl> import re <nl> + import tempfile <nl> <nl> import numpy as np <nl> import scipy . sparse <nl> def c_array ( ctype , values ) : <nl> return ( ctype * len ( values ) ) ( * values ) <nl> <nl> def dict_to_str ( data ) : <nl> - if len ( data ) = = 0 : <nl> + if data is None or len ( data ) = = 0 : <nl> return " " <nl> pairs = [ ] <nl> for key in data : <nl> def c_float_array ( data ) : <nl> data = np . array ( data , copy = False ) <nl> if is_numpy_1d_array ( data ) : <nl> if data . dtype = = np . float32 : <nl> - ptr_data = c_array ( ctypes . c_float , data ) <nl> + ptr_data = data . ctypes . data_as ( ctypes . c_float ) <nl> type_data = C_API_DTYPE_FLOAT32 <nl> elif data . dtype = = np . float64 : <nl> - ptr_data = c_array ( ctypes . c_double , data ) <nl> + ptr_data = data . ctypes . data_as ( ctypes . c_double ) <nl> type_data = C_API_DTYPE_FLOAT64 <nl> else : <nl> raise TypeError ( " expected np . float32 or np . float64 , met type ( { } ) " . format ( data . dtype ) ) <nl> def c_int_array ( data ) : <nl> data = np . array ( data , copy = False ) <nl> if is_numpy_1d_array ( data ) : <nl> if data . dtype = = np . int32 : <nl> - ptr_data = c_array ( ctypes . c_int32 , data ) <nl> + ptr_data = data . ctypes . data_as ( ctypes . c_int32 ) <nl> type_data = C_API_DTYPE_INT32 <nl> elif data . dtype = = np . int64 : <nl> - ptr_data = c_array ( ctypes . c_int64 , data ) <nl> + ptr_data = data . ctypes . data_as ( ctypes . c_int64 ) <nl> type_data = C_API_DTYPE_INT64 <nl> else : <nl> raise TypeError ( " expected np . int32 or np . int64 , met type ( { } ) " . format ( data . dtype ) ) <nl> def __init__ ( self , data , max_bin = 255 , reference = None , <nl> self . raw_data = data <nl> else : <nl> self . raw_data = None <nl> + self . data_has_header = False <nl> " " " process for args " " " <nl> params = { } <nl> params [ " max_bin " ] = max_bin <nl> def __init__ ( self , data , max_bin = 255 , reference = None , <nl> raise TypeError ( ' Reference dataset should be None or dataset instance ' ) <nl> " " " start construct data " " " <nl> if is_str ( data ) : <nl> + " " " check data has header or not " " " <nl> + if " has_header " in params or " header " in params : <nl> + if params [ " has_header " ] . lower ( ) = = " true " or params [ " header " ] . lower ( ) = = " true " : <nl> + data_has_header = True <nl> self . handle = ctypes . c_void_p ( ) <nl> _safe_call ( _LIB . LGBM_CreateDatasetFromFile ( <nl> c_str ( data ) , <nl> def __init__ ( self , data , max_bin = 255 , reference = None , <nl> ref_dataset , <nl> ctypes . byref ( self . handle ) ) ) <nl> elif isinstance ( data , scipy . sparse . csr_matrix ) : <nl> - self . _init_from_csr ( data , params_str , ref_dataset ) <nl> - elif isinstance ( data , scipy . sparse . csc_matrix ) : <nl> - self . _init_from_csc ( data , params_str , ref_dataset ) <nl> + self . __init_from_csr ( data , params_str , ref_dataset ) <nl> elif isinstance ( data , np . ndarray ) : <nl> - self . _init_from_npy2d ( data , params_str , ref_dataset ) <nl> + self . __init_from_np2d ( data , params_str , ref_dataset ) <nl> else : <nl> try : <nl> csr = scipy . sparse . csr_matrix ( data ) <nl> - self . _init_from_csr ( csr ) <nl> + if self . raw_data is not None : <nl> + self . raw_data = csr <nl> + self . __init_from_csr ( csr ) <nl> except : <nl> raise TypeError ( ' can not initialize Dataset from { } ' . format ( type ( data ) . __name__ ) ) <nl> + self . __label = None <nl> + self . __weight = None <nl> + self . __init_score = None <nl> + self . __group = None <nl> if label is not None : <nl> self . set_label ( label ) <nl> if weight is not None : <nl> def __init__ ( self , data , max_bin = 255 , reference = None , <nl> def free_raw_data ( self ) : <nl> self . raw_data = None <nl> <nl> - def _init_from_csr ( self , csr , params_str , ref_dataset ) : <nl> - " " " <nl> - Initialize data from a CSR matrix . <nl> - " " " <nl> - if len ( csr . indices ) ! = len ( csr . data ) : <nl> - raise ValueError ( ' length mismatch : { } vs { } ' . format ( len ( csr . indices ) , len ( csr . data ) ) ) <nl> - self . handle = ctypes . c_void_p ( ) <nl> - <nl> - ptr_indptr , type_ptr_indptr = c_int_array ( csr . indptr ) <nl> - ptr_data , type_ptr_data = c_float_array ( csr . data ) <nl> - <nl> - _safe_call ( _LIB . LGBM_CreateDatasetFromCSR ( <nl> - ptr_indptr , <nl> - type_ptr_indptr , <nl> - c_array ( ctypes . c_int32 , csr . indices ) , <nl> - ptr_data , <nl> - type_ptr_data , <nl> - len ( csr . indptr ) , <nl> - len ( csr . data ) , <nl> - csr . shape [ 1 ] , <nl> - c_str ( params_str ) , <nl> - ref_dataset , <nl> - ctypes . byref ( self . handle ) ) ) <nl> - <nl> - def _init_from_csc ( self , csr , params_str , ref_dataset ) : <nl> - " " " <nl> - Initialize data from a CSC matrix . <nl> - " " " <nl> - if len ( csc . indices ) ! = len ( csc . data ) : <nl> - raise ValueError ( ' length mismatch : { } vs { } ' . format ( len ( csc . indices ) , len ( csc . data ) ) ) <nl> - self . handle = ctypes . c_void_p ( ) <nl> - <nl> - ptr_indptr , type_ptr_indptr = c_int_array ( csc . indptr ) <nl> - ptr_data , type_ptr_data = c_float_array ( csc . data ) <nl> - <nl> - _safe_call ( _LIB . LGBM_CreateDatasetFromCSC ( <nl> - ptr_indptr , <nl> - type_ptr_indptr , <nl> - c_array ( ctypes . c_int32 , csc . indices ) , <nl> - ptr_data , <nl> - type_ptr_data , <nl> - len ( csc . indptr ) , <nl> - len ( csc . data ) , <nl> - csc . shape [ 0 ] , <nl> - c_str ( params_str ) , <nl> - ref_dataset , <nl> - ctypes . byref ( self . handle ) ) ) <nl> - <nl> - def _init_from_npy2d ( self , mat , params_str , ref_dataset ) : <nl> + def __init_from_np2d ( self , mat , params_str , ref_dataset ) : <nl> " " " <nl> Initialize data from a 2 - D numpy matrix . <nl> " " " <nl> def _init_from_npy2d ( self , mat , params_str , ref_dataset ) : <nl> ref_dataset , <nl> ctypes . byref ( self . handle ) ) ) <nl> <nl> + def __init_from_csr ( self , csr , params_str , ref_dataset ) : <nl> + " " " <nl> + Initialize data from a CSR matrix . <nl> + " " " <nl> + if len ( csr . indices ) ! = len ( csr . data ) : <nl> + raise ValueError ( ' length mismatch : { } vs { } ' . format ( len ( csr . indices ) , len ( csr . data ) ) ) <nl> + self . handle = ctypes . c_void_p ( ) <nl> + <nl> + ptr_indptr , type_ptr_indptr = c_int_array ( csr . indptr ) <nl> + ptr_data , type_ptr_data = c_float_array ( csr . data ) <nl> + <nl> + _safe_call ( _LIB . LGBM_CreateDatasetFromCSR ( <nl> + ptr_indptr , <nl> + type_ptr_indptr , <nl> + csr . indices . ctypes . data_as ( ctypes . c_int32 ) , <nl> + ptr_data , <nl> + type_ptr_data , <nl> + len ( csr . indptr ) , <nl> + len ( csr . data ) , <nl> + csr . shape [ 1 ] , <nl> + c_str ( params_str ) , <nl> + ref_dataset , <nl> + ctypes . byref ( self . handle ) ) ) <nl> + <nl> def __del__ ( self ) : <nl> _safe_call ( _LIB . LGBM_DatasetFree ( self . handle ) ) <nl> <nl> def set_field ( self , field_name , data ) : <nl> if not is_numpy_1d_array ( data ) : <nl> raise TypeError ( " Unknow type ( { } ) " . format ( type ( data ) . __name__ ) ) <nl> if data . dtype = = np . float32 : <nl> - ptr_data = c_array ( ctypes . c_float , data ) <nl> + ptr_data = data . ctypes . data_as ( ctypes . c_float ) <nl> type_data = C_API_DTYPE_FLOAT32 <nl> elif data . dtype = = np . int32 : <nl> - ptr_data = c_array ( ctypes . c_int32 , data ) <nl> + ptr_data = data . ctypes . data_as ( ctypes . c_int32 ) <nl> type_data = C_API_DTYPE_INT32 <nl> else : <nl> raise TypeError ( " excepted np . float32 or np . int32 , met type ( { } ) " . format ( data . dtype ) ) <nl> def set_label ( self , label ) : <nl> label = list_to_1d_numpy ( label , np . float32 ) <nl> if label . dtype ! = np . float32 : <nl> label = label . astype ( np . float32 , copy = False ) <nl> + self . __label = label <nl> self . set_field ( ' label ' , label ) <nl> <nl> def set_weight ( self , weight ) : <nl> def set_weight ( self , weight ) : <nl> weight = list_to_1d_numpy ( weight , np . float32 ) <nl> if weight . dtype ! = np . float32 : <nl> weight = weight . astype ( np . float32 , copy = False ) <nl> + self . __weight = weight <nl> self . set_field ( ' weight ' , weight ) <nl> <nl> def set_init_score ( self , score ) : <nl> def set_init_score ( self , score ) : <nl> score = list_to_1d_numpy ( score , np . float32 ) <nl> if score . dtype ! = np . float32 : <nl> score = score . astype ( np . float32 , copy = False ) <nl> + self . __init_score = init_score <nl> self . set_field ( ' init_score ' , score ) <nl> <nl> def set_group ( self , group ) : <nl> def set_group ( self , group ) : <nl> group = list_to_1d_numpy ( group , np . int32 ) <nl> if group . dtype ! = np . int32 : <nl> group = group . astype ( np . int32 , copy = False ) <nl> + self . __group = group <nl> self . set_field ( ' group ' , group ) <nl> <nl> def set_group_id ( self , group_id ) : <nl> def get_label ( self ) : <nl> mmmmmm - <nl> label : array <nl> " " " <nl> - return self . get_field ( ' label ' ) <nl> + if self . __label is None : <nl> + self . __label = self . get_field ( ' label ' ) <nl> + return self . __label <nl> <nl> def get_weight ( self ) : <nl> " " " Get the weight of the Dataset . <nl> def get_weight ( self ) : <nl> mmmmmm - <nl> weight : array <nl> " " " <nl> - return self . get_field ( ' weight ' ) <nl> + if self . __weight is None : <nl> + self . __weight = self . get_field ( ' weight ' ) <nl> + return self . __weight <nl> <nl> def get_init_score ( self ) : <nl> " " " Get the initial score of the Dataset . <nl> def get_init_score ( self ) : <nl> mmmmmm - <nl> init_score : array <nl> " " " <nl> - return self . get_field ( ' init_score ' ) <nl> + if self . __init_score is None : <nl> + self . __init_score = self . get_field ( ' init_score ' ) <nl> + return self . __init_score <nl> + <nl> + def get_group ( self ) : <nl> + " " " Get the initial score of the Dataset . <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + init_score : array <nl> + " " " <nl> + if self . __group is None : <nl> + self . __group = self . get_field ( ' group ' ) <nl> + return self . __group <nl> <nl> def num_data ( self ) : <nl> " " " Get the number of rows in the Dataset . <nl> def feature_names ( self , feature_names ) : <nl> else : <nl> self . _feature_names = None <nl> <nl> + C_API_PREDICT_NORMAL = 0 <nl> + C_API_PREDICT_RAW_SCORE = 1 <nl> + C_API_PREDICT_LEAF_INDEX = 2 <nl> <nl> class Booster ( object ) : <nl> " " " " A Booster of of LightGBM . <nl> class Booster ( object ) : <nl> <nl> feature_names = None <nl> <nl> - def __init__ ( self , params = None , <nl> - train_set = None , <nl> - valid_sets = None , <nl> - name_valid_sets = None , <nl> - model_file = None , <nl> - fobj = None ) : <nl> + def __init__ ( self , params = None , <nl> + train_set = None , valid_sets = None , <nl> + name_valid_sets = None , model_file = None ) : <nl> # pylint : disable = invalid - name <nl> " " " Initialize the Booster . <nl> <nl> def __init__ ( self , params = None , <nl> name_valid_sets : List of string <nl> name of validation datasets <nl> model_file : string <nl> - Path to the model file . <nl> + Path to the model file . <nl> + If tarin_set is not None , used for continued train . <nl> + else used for loading model prediction task <nl> " " " <nl> self . handle = ctypes . c_void_p ( ) <nl> if train_set is not None : <nl> + " " " Training task " " " <nl> if not isinstance ( train_set , Dataset ) : <nl> raise TypeError ( ' training data should be Dataset instance , met { } ' . format ( type ( train_set ) . __name__ ) ) <nl> <nl> valid_handles = None <nl> - valid_cnames = None <nl> n_valid = 0 <nl> if valid_sets is not None : <nl> for valid in valid_sets : <nl> def __init__ ( self , params = None , <nl> raise TypeError ( ' valid data should be Dataset instance , met { } ' . format ( type ( valid ) . __name__ ) ) <nl> valid_handles = c_array ( ctypes . c_void_p , [ valid . handle for valid in valid_sets ] ) <nl> if name_valid_sets is None : <nl> - name_valid_sets = [ " valid_ { } " . format ( x ) for x in range ( len ( valid_sets ) ) ] <nl> + name_valid_sets = [ " valid_ { } " . format ( x + 1 ) for x in range ( len ( valid_sets ) ) ] <nl> if len ( valid_sets ) ! = len ( name_valid_sets ) : <nl> raise Exception ( ' len of valid_sets should be equal with len of name_valid_sets ' ) <nl> - valid_cnames = c_array ( ctypes . c_char_p , [ c_str ( x ) for x in name_valid_sets ] ) <nl> n_valid = len ( valid_sets ) <nl> ref_input_model = None <nl> params_str = dict_to_str ( params ) <nl> if model_file is not None : <nl> ref_input_model = c_str ( model_file ) <nl> " " " construct booster object " " " <nl> - _safe_call ( LIB . LGBM_BoosterCreate ( <nl> + _safe_call ( _LIB . LGBM_BoosterCreate ( <nl> train_set . handle , <nl> valid_handles , <nl> - valid_cnames , <nl> n_valid , <nl> - params_str , <nl> + c_str ( params_str ) , <nl> ref_input_model , <nl> ctypes . byref ( self . handle ) ) ) <nl> " " " if need to continue train " " " <nl> if model_file is not None : <nl> - self . init_continue_train ( train_set ) <nl> + self . __init_continue_train ( train_set ) <nl> if valid_sets is not None : <nl> for valid in valid_sets : <nl> - self . init_continue_train ( valid ) <nl> + self . __init_continue_train ( valid ) <nl> + " " " save reference to data " " " <nl> + self . train_set = train_set <nl> + self . valid_sets = valid_sets <nl> + self . name_valid_sets = name_valid_sets <nl> + self . __num_dataset = 1 + n_valid <nl> + self . __training_score = None <nl> + out_len = ctypes . c_int64 ( 0 ) <nl> + _safe_call ( _LIB . LGBM_BoosterGetNumClasses ( <nl> + self . handle , <nl> + ctypes . byref ( out_len ) ) ) <nl> + self . __num_class = out_len . value <nl> + " " " buffer for inner predict " " " <nl> + self . __inner_predict_buffer = [ None for _ in range ( self . __num_dataset ) ] <nl> + " " " Get num of inner evals " " " <nl> + _safe_call ( _LIB . LGBM_BoosterGetEvalCounts ( <nl> + self . handle , <nl> + ctypes . byref ( out_len ) ) ) <nl> + self . __num_inner_eval = out_len . value <nl> + if self . __num_inner_eval > 0 : <nl> + " " " Get name of evals " " " <nl> + string_buffers = [ ctypes . create_string_buffer ( 255 ) for i in range ( self . __num_inner_eval ) ] <nl> + ptr_string_buffers = ( ctypes . c_char_p * self . __num_inner_eval ) ( * map ( ctypes . addressof , string_buffers ) ) <nl> + _safe_call ( _LIB . LGBM_BoosterGetEvalNames ( <nl> + self . handle , <nl> + ctypes . byref ( out_len ) , <nl> + ptr_string_buffers ) ) <nl> + if self . __num_inner_eval ! = out_len . value : <nl> + raise ValueError ( " size of eval names doesn ' t equal with num_evals " ) <nl> + self . __name_inner_eval = [ ] <nl> + for i in range ( self . __num_inner_eval ) : <nl> + self . __name_inner_eval . append ( string_buffers [ i ] . value . decode ( ) ) <nl> <nl> elif model_file is not None : <nl> - _safe_call ( _LIB . LGBM_BoosterCreateFromModelfile ( c_str ( model_file ) , ctypes . byref ( self . handle ) ) ) <nl> + " " " Prediction task " " " <nl> + out_num_total_model = ctypes . c_int64 ( 0 ) <nl> + _safe_call ( _LIB . LGBM_BoosterCreateFromModelfile ( <nl> + c_str ( model_file ) , <nl> + ctypes . byref ( out_num_total_model ) , <nl> + ctypes . byref ( self . handle ) ) ) <nl> + self . __num_total_model = out_num_total_model . value <nl> + out_len = ctypes . c_int64 ( 0 ) <nl> + _safe_call ( _LIB . LGBM_BoosterGetNumClasses ( <nl> + self . handle , <nl> + ctypes . byref ( out_len ) ) ) <nl> + self . __num_class = out_len . value <nl> else : <nl> raise TypeError ( ' At least need training dataset or model file to create booster instance ' ) <nl> <nl> def __del__ ( self ) : <nl> - _LIB . LGBM_BoosterFree ( self . handle ) <nl> + _safe_call ( _LIB . LGBM_BoosterFree ( self . handle ) ) <nl> + <nl> + def update ( self , fobj = None ) : <nl> + " " " <nl> + Update for one iteration <nl> + Note : for multi - class task , the score is group by class_id first , then group by row_id <nl> + if you want to get i - th row score in j - th class , the access way is score [ j * num_data + i ] <nl> + and you should group grad and hess in this way as well <nl> + Parameters <nl> + mmmmmmmmm - <nl> + fobj : function <nl> + Customized objective function . <nl> <nl> + Returns <nl> + mmmmmm - <nl> + is_finished , bool <nl> + " " " <nl> + is_finished = ctypes . c_int ( 0 ) <nl> + if fobj is None : <nl> + _safe_call ( _LIB . LGBM_BoosterUpdateOneIter ( <nl> + self . handle , <nl> + ctypes . byref ( is_finished ) ) ) <nl> + return is_finished . value = = 1 <nl> + else : <nl> + grad , hess = fobj ( self . __inner_predict ( 0 ) , self . train_set ) <nl> + return self . boost ( grad , hess ) <nl> + <nl> + def boost ( self , grad , hess ) : <nl> + " " " <nl> + Boost the booster for one iteration , with customized gradient statistics . <nl> + Note : for multi - class task , the score is group by class_id first , then group by row_id <nl> + if you want to get i - th row score in j - th class , the access way is score [ j * num_data + i ] <nl> + and you should group grad and hess in this way as well <nl> + Parameters <nl> + mmmmmmmmm - <nl> + grad : 1d numpy with dtype = float32 <nl> + The first order of gradient . <nl> + hess : 1d numpy with dtype = float32 <nl> + The second order of gradient . <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + is_finished , bool <nl> + " " " <nl> + if not is_numpy_1d_array ( grad ) and not is_numpy_1d_array ( hess ) : <nl> + raise TypeError ( ' type of grad / hess should be 1d numpy object ' ) <nl> + if not grad . dtype = = np . float32 and not hess . dtype = = np . float32 : <nl> + raise TypeError ( ' type of grad / hess should be np . float32 ' ) <nl> + if len ( grad ) ! = len ( hess ) : <nl> + raise ValueError ( ' grad / hess length mismatch : { } / { } ' . format ( len ( grad ) , len ( hess ) ) ) <nl> + is_finished = ctypes . c_int ( 0 ) <nl> + _safe_call ( _LIB . LGBM_BoosterUpdateOneIterCustom ( <nl> + self . handle , <nl> + grad . ctypes . data_as ( ctypes . c_float ) , <nl> + hess . ctypes . data_as ( ctypes . c_float ) , <nl> + ctypes . byref ( is_finished ) ) ) <nl> + return is_finished . value = = 1 <nl> + <nl> + def eval_train ( self , feval = None ) : <nl> + " " " Evaluate for training data <nl> + <nl> + Parameters <nl> + mmmmmmmmm - <nl> + feval : function <nl> + Custom evaluation function . <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + result : str <nl> + Evaluation result string . <nl> + " " " <nl> + return self . __inner_eval ( " training " , 0 , feval ) <nl> + <nl> + def eval_valid ( self , feval = None ) : <nl> + " " " Evaluate for validation data <nl> + <nl> + Parameters <nl> + mmmmmmmmm - <nl> + feval : function <nl> + Custom evaluation function . <nl> + <nl> + Returns <nl> + mmmmmm - <nl> + result : str <nl> + Evaluation result string . <nl> + " " " <nl> + ret = [ ] <nl> + for i in range ( 1 , self . __num_dataset ) : <nl> + ret . append ( self . __inner_eval ( self . name_valid_sets [ i - 1 ] , i , feval ) ) <nl> + return ' \ n ' . join ( ret ) <nl> + <nl> + def save_model ( self , filename , num_iteration = - 1 ) : <nl> + _safe_call ( _LIB . LGBM_BoosterSaveModel ( <nl> + self . handle , <nl> + num_iteration , <nl> + c_str ( filename ) ) ) <nl> + <nl> + def predict ( self , data , num_iteration = - 1 , raw_score = False , pred_leaf = False , data_has_header = False , is_reshape = True ) : <nl> + if isinstance ( data , Dataset ) : <nl> + raise TypeError ( " cannot use Dataset instance for prediction , please use raw data instead " ) <nl> + predict_type = C_API_PREDICT_NORMAL <nl> + if raw_score : <nl> + predict_type = cC_API_PREDICT_RAW_SCORE <nl> + if pred_leaf : <nl> + predict_type = C_API_PREDICT_LEAF_INDEX <nl> + int_data_has_header = 0 <nl> + if data_has_header : <nl> + int_data_has_header = 1 <nl> + if is_str ( data ) : <nl> + tmp_pred_fname = tempfile . NamedTemporaryFile ( prefix = " lightgbm_tmp_pred_ " ) . name <nl> + _safe_call ( _LIB . LGBM_BoosterPredictForFile ( <nl> + self . handle , <nl> + c_str ( data ) , <nl> + int_data_has_header , <nl> + predict_type , <nl> + num_iteration , <nl> + c_str ( tmp_pred_fname ) ) ) <nl> + lines = open ( tmp_pred_fname , " r " ) . readlines ( ) <nl> + nrow = len ( lines ) <nl> + preds = [ ] <nl> + for line in lines : <nl> + for token in line . split ( ' \ t ' ) : <nl> + preds . append ( float ( token ) ) <nl> + preds = np . array ( preds , copy = False ) <nl> + os . remove ( tmp_pred_fname ) <nl> + elif isinstance ( data , scipy . sparse . csr_matrix ) : <nl> + preds , nrow = self . __pred_for_csr ( data , num_iteration , predict_type ) <nl> + elif isinstance ( data , np . ndarray ) : <nl> + preds , nrow = self . __pred_for_np2d ( data , num_iteration , predict_type ) <nl> + else : <nl> + try : <nl> + csr = scipy . sparse . csr_matrix ( data ) <nl> + res = self . __pred_for_csr ( csr , num_iteration , predict_type ) <nl> + except : <nl> + raise TypeError ( ' can not predict data for type { } ' . format ( type ( data ) . __name__ ) ) <nl> + if pred_leaf : <nl> + preds = preds . astype ( np . int32 ) <nl> + if preds . size ! = nrow and is_reshape : <nl> + if preds . size % nrow = = 0 : <nl> + ncol = int ( preds . size / nrow ) <nl> + preds = preds . reshape ( nrow , ncol ) <nl> + else : <nl> + raise ValueError ( ' len of predict result ( % d ) cannot be divide nrow ( % d ) ' % ( preds . size , nrow ) ) <nl> + return preds <nl> + <nl> + def __pred_for_np2d ( self , mat , num_iteration , predict_type ) : <nl> + " " " <nl> + Predict for a 2 - D numpy matrix . <nl> + " " " <nl> + if len ( mat . shape ) ! = 2 : <nl> + raise ValueError ( ' Input numpy . ndarray must be 2 dimensional ' ) <nl> + <nl> + if mat . dtype = = np . float32 or mat . dtype = = np . float64 : <nl> + data = np . array ( mat . reshape ( mat . size ) , dtype = mat . dtype , copy = False ) <nl> + else : <nl> + " " " change non - float data to float data , need to copy " " " <nl> + data = np . array ( mat . reshape ( mat . size ) , dtype = np . float32 ) <nl> + ptr_data , type_ptr_data = c_float_array ( data ) <nl> + n_preds = self . __num_class * mat . shape [ 0 ] <nl> + if predict_type = = C_API_PREDICT_LEAF_INDEX : <nl> + if num_iteration > 0 : <nl> + n_preds * = num_iteration <nl> + else : <nl> + used_iteration = self . __num_total_model / self . __num_class <nl> + n_preds * = used_iteration <nl> + preds = np . zeros ( n_preds , dtype = np . float32 ) <nl> + out_num_preds = ctypes . c_int64 ( 0 ) <nl> + _safe_call ( LIB . LGBM_BoosterPredictForMat ( <nl> + self . handle , <nl> + ptr_data , <nl> + type_ptr_data , <nl> + mat . shape [ 0 ] , <nl> + mat . shape [ 1 ] , <nl> + C_API_IS_ROW_MAJOR , <nl> + predict_type , <nl> + num_iteration , <nl> + ctypes . byref ( out_num_preds ) , <nl> + preds . ctypes . data_as ( ctypes . POINTER ( ctypes . c_float ) ) <nl> + ) ) <nl> + if n_preds ! = out_num_preds . value : <nl> + raise ValueError ( " incorrect number for predict result " ) <nl> + return preds , mat . shape [ 0 ] <nl> + <nl> + def __pred_for_csr ( self , csr , num_iteration , predict_type ) : <nl> + " " " <nl> + Predict for a csr data <nl> + " " " <nl> + nrow = len ( csr . indptr ) - 1 <nl> + n_preds = self . __num_class * nrow <nl> + if predict_type = = C_API_PREDICT_LEAF_INDEX : <nl> + if num_iteration > 0 : <nl> + n_preds * = num_iteration <nl> + else : <nl> + used_iteration = self . __num_total_model / self . __num_class <nl> + n_preds * = used_iteration <nl> + preds = np . zeros ( n_preds , dtype = np . float32 ) <nl> + out_num_preds = ctypes . c_int64 ( 0 ) <nl> + <nl> + ptr_indptr , type_ptr_indptr = c_int_array ( csr . indptr ) <nl> + ptr_data , type_ptr_data = c_float_array ( csr . data ) <nl> + <nl> + _safe_call ( LIB . LGBM_BoosterPredictForCSR ( <nl> + self . handle , <nl> + ptr_indptr , <nl> + type_ptr_indptr , <nl> + csr . indices . ctypes . data_as ( ctypes . c_int32 ) , <nl> + ptr_data , <nl> + type_ptr_data , <nl> + len ( csr . indptr ) , <nl> + len ( csr . data ) , <nl> + csr . shape [ 1 ] , <nl> + predict_type , <nl> + num_iteration , <nl> + ctypes . byref ( out_num_preds ) , <nl> + preds . ctypes . data_as ( ctypes . POINTER ( ctypes . c_float ) ) <nl> + ) ) <nl> + if n_preds ! = out_num_preds . value : <nl> + raise ValueError ( " incorrect number for predict result " ) <nl> + return preds , nrow <nl> + <nl> + def __inner_eval ( self , data_name , data_idx , feval = None ) : <nl> + if data_idx > = self . __num_dataset : <nl> + raise ValueError ( " data_idx should be smaller than number of dataset " ) <nl> + ret = [ ] <nl> + if self . __num_inner_eval > 0 : <nl> + result = np . array ( [ 0 . 0 for _ in range ( self . __num_inner_eval ) ] , dtype = np . float32 ) <nl> + out_len = ctypes . c_int64 ( 0 ) <nl> + _safe_call ( _LIB . LGBM_BoosterGetEval ( <nl> + self . handle , <nl> + data_idx , <nl> + ctypes . byref ( out_len ) , <nl> + result . ctypes . data_as ( ctypes . POINTER ( ctypes . c_float ) ) ) ) <nl> + if out_len . value ! = self . __num_inner_eval : <nl> + raise ValueError ( " incorrect number of eval results " ) <nl> + for i in range ( self . __num_inner_eval ) : <nl> + ret . append ( ' % s % s : % f ' % ( data_name , self . __name_inner_eval [ i ] , result [ i ] ) ) <nl> + if feval is not None : <nl> + if data_idx = = 0 : <nl> + cur_data = self . train_set <nl> + else : <nl> + cur_data = self . valid_sets [ data_idx - 1 ] <nl> + feval_ret = feval ( self . __inner_predict ( data_idx ) , cur_data ) <nl> + if isinstance ( feval_ret , list ) : <nl> + for name , val in feval_ret : <nl> + ret . append ( ' % s % s : % f ' % ( data_name , name , val ) ) <nl> + else : <nl> + name , val = feval_ret <nl> + ret . append ( ' % s % s : % f ' % ( data_name , name , val ) ) <nl> + return ' \ t ' . join ( ret ) <nl> + <nl> + def __inner_predict ( self , data_idx ) : <nl> + if data_idx > = self . __num_dataset : <nl> + raise ValueError ( " data_idx should be smaller than number of dataset " ) <nl> + if self . __inner_predict_buffer [ data_idx ] is None : <nl> + if data_idx = = 0 : <nl> + num_data = self . train_set . num_data ( ) * self . __num_class <nl> + else : <nl> + num_data = self . valid_sets [ data_idx - 1 ] . num_data ( ) * self . __num_class <nl> + self . __inner_predict_buffer [ data_idx ] = \ <nl> + np . array ( [ 0 . 0 for _ in range ( num_data ) ] , dtype = np . float32 , copy = False ) <nl> + out_len = ctypes . c_int64 ( 0 ) <nl> + data_ptr = self . __inner_predict_buffer [ data_idx ] . ctypes . data_as ( ctypes . POINTER ( ctypes . c_float ) ) <nl> + _safe_call ( _LIB . LGBM_BoosterGetPredict ( <nl> + self . handle , <nl> + data_idx , <nl> + ctypes . byref ( out_len ) , <nl> + data_ptr ) ) <nl> + if out_len . value ! = len ( self . __inner_predict_buffer [ data_idx ] ) : <nl> + raise ValueError ( " incorrect number of predict results for data % d " % ( data_idx ) ) <nl> + return self . __inner_predict_buffer [ data_idx ] <nl> + <nl> + <nl> + def __init_continue_train ( self , dataset ) : <nl> + if dataset . raw_data is None : <nl> + raise ValueError ( " should set is_continue_train = True in dataset while need to continue train " ) <nl> + init_score = self . predict ( dataset . raw_data , raw_score = True , data_has_header = dataset . data_has_header , is_reshape = False ) <nl> + dataset . set_init_score ( init_score ) <nl> + dataset . free_raw_data ( ) <nl> + <nl> + <nl> + # tmp test <nl> + train_data = Dataset ( ' . . / . . / examples / binary_classification / binary . train ' ) <nl> + test_data = Dataset ( ' . . / . . / examples / binary_classification / binary . test ' , reference = train_data ) <nl> + param = { " metric " : " l2 , l1 " } <nl> + lgb = Booster ( train_set = train_data , valid_sets = [ test_data ] , params = param ) <nl> + for i in range ( 100 ) : <nl> + lgb . update ( ) <nl> + print ( lgb . eval_valid ( ) ) <nl> + print ( lgb . eval_train ( ) ) <nl> + print ( lgb . predict ( ' . . / . . / examples / binary_classification / binary . train ' ) ) <nl> \ No newline at end of file <nl> mmm a / src / application / application . cpp <nl> ppp b / src / application / application . cpp <nl> void Application : : LoadData ( ) { <nl> / / prediction is needed if using input initial model ( continued train ) <nl> PredictFunction predict_fun = nullptr ; <nl> / / need to continue training <nl> - if ( boosting_ - > NumberOfSubModels ( ) > 0 ) { <nl> + if ( boosting_ - > NumberOfTotalModel ( ) > 0 ) { <nl> Predictor predictor ( boosting_ . get ( ) , true , false ) ; <nl> predict_fun = predictor . GetPredictFunction ( ) ; <nl> } <nl> void Application : : Train ( ) { <nl> <nl> <nl> void Application : : Predict ( ) { <nl> - boosting_ - > SetNumUsedModel ( config_ . io_config . num_model_predict ) ; <nl> + boosting_ - > SetNumIterationForPred ( config_ . io_config . num_iteration_predict ) ; <nl> / / create predictor <nl> Predictor predictor ( boosting_ . get ( ) , config_ . io_config . is_predict_raw_score , <nl> config_ . io_config . is_predict_leaf_index ) ; <nl> mmm a / src / boosting / dart . hpp <nl> ppp b / src / boosting / dart . hpp <nl> class DART : public GBDT { <nl> * \ brief one training iteration <nl> * / <nl> bool TrainOneIter ( const score_t * gradient , const score_t * hessian , bool is_eval ) override { <nl> + is_update_score_cur_iter_ = false ; <nl> GBDT : : TrainOneIter ( gradient , hessian , false ) ; <nl> / / normalize <nl> Normalize ( ) ; <nl> class DART : public GBDT { <nl> * \ return training score <nl> * / <nl> const score_t * GetTrainingScore ( data_size_t * out_len ) override { <nl> - DroppingTrees ( ) ; <nl> + if ( ! is_update_score_cur_iter_ ) { <nl> + / / only drop one time in one iteration <nl> + DroppingTrees ( ) ; <nl> + is_update_score_cur_iter_ = true ; <nl> + } <nl> * out_len = train_score_updater_ - > num_data ( ) * num_class_ ; <nl> return train_score_updater_ - > score ( ) ; <nl> } <nl> / * ! <nl> * \ brief save model to file <nl> - * \ param num_used_model number of model that want to save , - 1 means save all <nl> + * \ param num_iteration - 1 means save all <nl> * \ param is_finish is training finished or not <nl> * \ param filename filename that want to save to <nl> * / <nl> - void SaveModelToFile ( int num_used_model , bool is_finish , const char * filename ) override { <nl> + void SaveModelToFile ( int num_iteration , bool is_finish , const char * filename ) override { <nl> / / only save model once when is_finish = true <nl> if ( is_finish & & saved_model_size_ < 0 ) { <nl> - GBDT : : SaveModelToFile ( num_used_model , is_finish , filename ) ; <nl> + GBDT : : SaveModelToFile ( num_iteration , is_finish , filename ) ; <nl> } <nl> } <nl> / * ! <nl> class DART : public GBDT { <nl> double drop_rate_ ; <nl> / * ! \ brief Random generator , used to select dropping trees * / <nl> Random random_for_drop_ ; <nl> + / * ! \ brief Flag that the score is update on current iter or not * / <nl> + bool is_update_score_cur_iter_ ; <nl> } ; <nl> <nl> } / / namespace LightGBM <nl> mmm a / src / boosting / gbdt . cpp <nl> ppp b / src / boosting / gbdt . cpp <nl> <nl> <nl> namespace LightGBM { <nl> <nl> - GBDT : : GBDT ( ) : saved_model_size_ ( - 1 ) , num_used_model_ ( 0 ) { <nl> + GBDT : : GBDT ( ) : saved_model_size_ ( - 1 ) , num_iteration_for_pred_ ( 0 ) { <nl> <nl> } <nl> <nl> void GBDT : : Init ( const BoostingConfig * config , const Dataset * train_data , const O <nl> gbdt_config_ = config ; <nl> iter_ = 0 ; <nl> saved_model_size_ = - 1 ; <nl> - num_used_model_ = 0 ; <nl> + num_iteration_for_pred_ = 0 ; <nl> max_feature_idx_ = 0 ; <nl> early_stopping_round_ = gbdt_config_ - > early_stopping_round ; <nl> shrinkage_rate_ = gbdt_config_ - > learning_rate ; <nl> const score_t * GBDT : : GetTrainingScore ( data_size_t * out_len ) { <nl> return train_score_updater_ - > score ( ) ; <nl> } <nl> <nl> - void GBDT : : GetPredictAt ( int data_idx , score_t * out_result , data_size_t * out_len ) const { <nl> + void GBDT : : GetPredictAt ( int data_idx , score_t * out_result , data_size_t * out_len ) { <nl> CHECK ( data_idx > = 0 & & data_idx < = static_cast < int > ( valid_metrics_ . size ( ) ) ) ; <nl> std : : vector < double > ret ; <nl> <nl> const score_t * raw_scores = nullptr ; <nl> data_size_t num_data = 0 ; <nl> if ( data_idx = = 0 ) { <nl> - raw_scores = train_score_updater_ - > score ( ) ; <nl> + raw_scores = GetTrainingScore ( out_len ) ; <nl> num_data = train_score_updater_ - > num_data ( ) ; <nl> } else { <nl> auto used_idx = data_idx - 1 ; <nl> raw_scores = valid_score_updater_ [ used_idx ] - > score ( ) ; <nl> num_data = valid_score_updater_ [ used_idx ] - > num_data ( ) ; <nl> + * out_len = num_data * num_class_ ; <nl> } <nl> - * out_len = num_data * num_class_ ; <nl> - <nl> if ( num_class_ > 1 ) { <nl> - # pragma omp parallel for schedule ( guided ) <nl> + # pragma omp parallel for schedule ( static ) <nl> for ( data_size_t i = 0 ; i < num_data ; + + i ) { <nl> std : : vector < double > tmp_result ; <nl> for ( int j = 0 ; j < num_class_ ; + + j ) { <nl> void GBDT : : GetPredictAt ( int data_idx , score_t * out_result , data_size_t * out_len ) <nl> } <nl> } <nl> } else if ( sigmoid_ > 0 . 0f ) { <nl> - # pragma omp parallel for schedule ( guided ) <nl> + # pragma omp parallel for schedule ( static ) <nl> for ( data_size_t i = 0 ; i < num_data ; + + i ) { <nl> out_result [ i ] = static_cast < score_t > ( 1 . 0f / ( 1 . 0f + std : : exp ( - 2 . 0f * sigmoid_ * raw_scores [ i ] ) ) ) ; <nl> } <nl> } else { <nl> - # pragma omp parallel for schedule ( guided ) <nl> + # pragma omp parallel for schedule ( static ) <nl> for ( data_size_t i = 0 ; i < num_data ; + + i ) { <nl> out_result [ i ] = raw_scores [ i ] ; <nl> } <nl> void GBDT : : Boosting ( ) { <nl> GetGradients ( GetTrainingScore ( & num_score ) , gradients_ . data ( ) , hessians_ . data ( ) ) ; <nl> } <nl> <nl> - void GBDT : : SaveModelToFile ( int num_used_model , bool is_finish , const char * filename ) { <nl> + void GBDT : : SaveModelToFile ( int num_iteration , bool is_finish , const char * filename ) { <nl> / / first time to this function , open file <nl> if ( saved_model_size_ < 0 ) { <nl> model_output_file_ . open ( filename ) ; <nl> void GBDT : : SaveModelToFile ( int num_used_model , bool is_finish , const char * filen <nl> if ( ! model_output_file_ . is_open ( ) ) { <nl> return ; <nl> } <nl> - if ( num_used_model = = NO_LIMIT ) { <nl> + int num_used_model = 0 ; <nl> + if ( num_iteration = = NO_LIMIT ) { <nl> num_used_model = static_cast < int > ( models_ . size ( ) ) ; <nl> } else { <nl> - num_used_model = num_used_model * num_class_ ; <nl> + num_used_model = num_iteration * num_class_ ; <nl> } <nl> int rest = num_used_model - early_stopping_round_ * num_class_ ; <nl> / / output tree models <nl> void GBDT : : LoadModelFromString ( const std : : string & model_str ) { <nl> } <nl> } <nl> Log : : Info ( " Finished loading % d models " , models_ . size ( ) ) ; <nl> - num_used_model_ = static_cast < int > ( models_ . size ( ) ) / num_class_ ; <nl> + num_iteration_for_pred_ = static_cast < int > ( models_ . size ( ) ) / num_class_ ; <nl> } <nl> <nl> std : : string GBDT : : FeatureImportance ( ) const { <nl> std : : string GBDT : : FeatureImportance ( ) const { <nl> <nl> std : : vector < double > GBDT : : PredictRaw ( const double * value ) const { <nl> std : : vector < double > ret ( num_class_ , 0 . 0f ) ; <nl> - for ( int i = 0 ; i < num_used_model_ ; + + i ) { <nl> + for ( int i = 0 ; i < num_iteration_for_pred_ ; + + i ) { <nl> for ( int j = 0 ; j < num_class_ ; + + j ) { <nl> ret [ j ] + = models_ [ i * num_class_ + j ] - > Predict ( value ) ; <nl> } <nl> std : : vector < double > GBDT : : PredictRaw ( const double * value ) const { <nl> <nl> std : : vector < double > GBDT : : Predict ( const double * value ) const { <nl> std : : vector < double > ret ( num_class_ , 0 . 0f ) ; <nl> - for ( int i = 0 ; i < num_used_model_ ; + + i ) { <nl> + for ( int i = 0 ; i < num_iteration_for_pred_ ; + + i ) { <nl> for ( int j = 0 ; j < num_class_ ; + + j ) { <nl> ret [ j ] + = models_ [ i * num_class_ + j ] - > Predict ( value ) ; <nl> } <nl> std : : vector < double > GBDT : : Predict ( const double * value ) const { <nl> <nl> std : : vector < int > GBDT : : PredictLeafIndex ( const double * value ) const { <nl> std : : vector < int > ret ; <nl> - for ( int i = 0 ; i < num_used_model_ ; + + i ) { <nl> + for ( int i = 0 ; i < num_iteration_for_pred_ ; + + i ) { <nl> for ( int j = 0 ; j < num_class_ ; + + j ) { <nl> ret . push_back ( models_ [ i * num_class_ + j ] - > PredictLeafIndex ( value ) ) ; <nl> } <nl> mmm a / src / boosting / gbdt . h <nl> ppp b / src / boosting / gbdt . h <nl> class GBDT : public Boosting { <nl> * \ param result used to store prediction result , should allocate memory before call this function <nl> * \ param out_len lenght of returned score <nl> * / <nl> - void GetPredictAt ( int data_idx , score_t * out_result , data_size_t * out_len ) const override ; <nl> + void GetPredictAt ( int data_idx , score_t * out_result , data_size_t * out_len ) override ; <nl> <nl> / * ! <nl> * \ brief Predtion for one record without sigmoid transformation <nl> class GBDT : public Boosting { <nl> <nl> / * ! <nl> * \ brief save model to file <nl> - * \ param num_used_model number of model that want to save , - 1 means save all <nl> + * \ param num_iteration - 1 means save all <nl> * \ param is_finish is training finished or not <nl> * \ param filename filename that want to save to <nl> * / <nl> - virtual void SaveModelToFile ( int num_used_model , bool is_finish , const char * filename ) override ; <nl> + virtual void SaveModelToFile ( int num_iteration , bool is_finish , const char * filename ) override ; <nl> / * ! <nl> * \ brief Restore from a serialized string <nl> * / <nl> class GBDT : public Boosting { <nl> * / <nl> inline int LabelIdx ( ) const override { return label_idx_ ; } <nl> <nl> + <nl> / * ! <nl> * \ brief Get number of weak sub - models <nl> * \ return Number of weak sub - models <nl> * / <nl> - inline int NumberOfSubModels ( ) const override { return static_cast < int > ( models_ . size ( ) ) ; } <nl> + inline int NumberOfTotalModel ( ) const override { return static_cast < int > ( models_ . size ( ) ) ; } <nl> <nl> / * ! <nl> * \ brief Get number of classes <nl> class GBDT : public Boosting { <nl> inline int NumberOfClasses ( ) const override { return num_class_ ; } <nl> <nl> / * ! <nl> - * \ brief Set number of used model for prediction <nl> + * \ brief Set number of iterations for prediction <nl> * / <nl> - inline void SetNumUsedModel ( int num_used_model ) { <nl> - if ( num_used_model > = 0 ) { <nl> - num_used_model_ = static_cast < int > ( num_used_model / num_class_ ) ; <nl> + inline void SetNumIterationForPred ( int num_iteration ) override { <nl> + if ( num_iteration > 0 ) { <nl> + num_iteration_for_pred_ = num_iteration ; <nl> + } else { <nl> + num_iteration_for_pred_ = static_cast < int > ( models_ . size ( ) ) / num_class_ ; <nl> } <nl> } <nl> <nl> class GBDT : public Boosting { <nl> / * ! \ brief File to write models * / <nl> std : : ofstream model_output_file_ ; <nl> / * ! \ brief number of used model * / <nl> - int num_used_model_ ; <nl> + int num_iteration_for_pred_ ; <nl> / * ! \ brief Shrinkage rate for one iteration * / <nl> double shrinkage_rate_ ; <nl> } ; <nl> mmm a / src / c_api . cpp <nl> ppp b / src / c_api . cpp <nl> class Booster { <nl> return boosting_ - > TrainOneIter ( gradients , hessians , false ) ; <nl> } <nl> <nl> - void PrepareForPrediction ( int num_used_model , int predict_type ) { <nl> - boosting_ - > SetNumUsedModel ( num_used_model ) ; <nl> + void PrepareForPrediction ( int num_iteration , int predict_type ) { <nl> + boosting_ - > SetNumIterationForPred ( num_iteration ) ; <nl> bool is_predict_leaf = false ; <nl> bool is_raw_score = false ; <nl> if ( predict_type = = C_API_PREDICT_LEAF_INDEX ) { <nl> class Booster { <nl> predictor_ . reset ( new Predictor ( boosting_ . get ( ) , is_raw_score , is_predict_leaf ) ) ; <nl> } <nl> <nl> + void GetPredictAt ( int data_idx , score_t * out_result , data_size_t * out_len ) { <nl> + boosting_ - > GetPredictAt ( data_idx , out_result , out_len ) ; <nl> + } <nl> + <nl> std : : vector < double > Predict ( const std : : vector < std : : pair < int , double > > & features ) { <nl> return predictor_ - > GetPredictFunction ( ) ( features ) ; <nl> } <nl> class Booster { <nl> predictor_ - > Predict ( data_filename , result_filename , data_has_header ) ; <nl> } <nl> <nl> - void SaveModelToFile ( int num_used_model , const char * filename ) { <nl> - boosting_ - > SaveModelToFile ( num_used_model , true , filename ) ; <nl> + void SaveModelToFile ( int num_iteration , const char * filename ) { <nl> + boosting_ - > SaveModelToFile ( num_iteration , true , filename ) ; <nl> } <nl> <nl> int GetEvalCounts ( ) const { <nl> class Booster { <nl> return ret ; <nl> } <nl> <nl> - int GetEvalNames ( const char * * * out_strs ) const { <nl> + int GetEvalNames ( char * * out_strs ) const { <nl> int idx = 0 ; <nl> for ( const auto & metric : train_metric_ ) { <nl> for ( const auto & name : metric - > GetName ( ) ) { <nl> - * ( out_strs [ idx + + ] ) = name . c_str ( ) ; <nl> + int j = 0 ; <nl> + auto name_cstr = name . c_str ( ) ; <nl> + while ( name_cstr [ j ] ! = ' \ 0 ' ) { <nl> + out_strs [ idx ] [ j ] = name_cstr [ j ] ; <nl> + + + j ; <nl> + } <nl> + out_strs [ idx ] [ j ] = ' \ 0 ' ; <nl> + + + idx ; <nl> } <nl> } <nl> return idx ; <nl> } <nl> <nl> const Boosting * GetBoosting ( ) const { return boosting_ . get ( ) ; } <nl> - <nl> - const float * GetTrainingScore ( int * out_len ) const { return boosting_ - > GetTrainingScore ( out_len ) ; } <nl> - <nl> - const inline int NumberOfClasses ( ) const { return boosting_ - > NumberOfClasses ( ) ; } <nl> - <nl> + <nl> private : <nl> <nl> std : : unique_ptr < Boosting > boosting_ ; <nl> DllExport int LGBM_BoosterCreate ( const DatesetHandle train_data , <nl> <nl> DllExport int LGBM_BoosterCreateFromModelfile ( <nl> const char * filename , <nl> + int64_t * num_total_model , <nl> BoosterHandle * out ) { <nl> API_BEGIN ( ) ; <nl> - * out = new Booster ( filename ) ; <nl> + auto ret = std : : unique_ptr < Booster > ( new Booster ( filename ) ) ; <nl> + * num_total_model = static_cast < int64_t > ( ret - > GetBoosting ( ) - > NumberOfTotalModel ( ) ) ; <nl> + * out = ret . release ( ) ; <nl> API_END ( ) ; <nl> } <nl> <nl> DllExport int LGBM_BoosterFree ( BoosterHandle handle ) { <nl> API_END ( ) ; <nl> } <nl> <nl> + DllExport int LGBM_BoosterGetNumClasses ( BoosterHandle handle , int64_t * out_len ) { <nl> + API_BEGIN ( ) ; <nl> + Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> + * out_len = ref_booster - > GetBoosting ( ) - > NumberOfClasses ( ) ; <nl> + API_END ( ) ; <nl> + } <nl> + <nl> DllExport int LGBM_BoosterUpdateOneIter ( BoosterHandle handle , int * is_finished ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> DllExport int LGBM_BoosterGetEvalCounts ( BoosterHandle handle , int64_t * out_len ) <nl> * \ brief Get number of eval <nl> * \ return total number of eval result <nl> * / <nl> - DllExport int LGBM_BoosterGetEvalNames ( BoosterHandle handle , int64_t * out_len , const char * * * out_strs ) { <nl> + DllExport int LGBM_BoosterGetEvalNames ( BoosterHandle handle , int64_t * out_len , char * * out_strs ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> * out_len = ref_booster - > GetEvalNames ( out_strs ) ; <nl> DllExport int LGBM_BoosterGetEval ( BoosterHandle handle , <nl> API_END ( ) ; <nl> } <nl> <nl> - DllExport int LGBM_BoosterGetTrainingScore ( BoosterHandle handle , <nl> - int64_t * out_len , <nl> - const float * * out_result ) { <nl> - API_BEGIN ( ) ; <nl> - Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> - int len = 0 ; <nl> - * out_result = ref_booster - > GetTrainingScore ( & len ) ; <nl> - * out_len = static_cast < int64_t > ( len ) ; <nl> - API_END ( ) ; <nl> - } <nl> - <nl> DllExport int LGBM_BoosterGetPredict ( BoosterHandle handle , <nl> int data , <nl> int64_t * out_len , <nl> float * out_result ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> - auto boosting = ref_booster - > GetBoosting ( ) ; <nl> int len = 0 ; <nl> - boosting - > GetPredictAt ( data , out_result , & len ) ; <nl> + ref_booster - > GetPredictAt ( data , out_result , & len ) ; <nl> * out_len = static_cast < int64_t > ( len ) ; <nl> API_END ( ) ; <nl> } <nl> <nl> DllExport int LGBM_BoosterPredictForFile ( BoosterHandle handle , <nl> - int predict_type , <nl> - int64_t n_used_trees , <nl> - int data_has_header , <nl> const char * data_filename , <nl> + int data_has_header , <nl> + int predict_type , <nl> + int64_t num_iteration , <nl> const char * result_filename ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> - ref_booster - > PrepareForPrediction ( static_cast < int > ( n_used_trees ) , predict_type ) ; <nl> + ref_booster - > PrepareForPrediction ( static_cast < int > ( num_iteration ) , predict_type ) ; <nl> bool bool_data_has_header = data_has_header > 0 ? true : false ; <nl> ref_booster - > PredictForFile ( data_filename , result_filename , bool_data_has_header ) ; <nl> API_END ( ) ; <nl> DllExport int LGBM_BoosterPredictForCSR ( BoosterHandle handle , <nl> int64_t nelem , <nl> int64_t , <nl> int predict_type , <nl> - int64_t n_used_trees , <nl> - double * out_result ) { <nl> + int64_t num_iteration , <nl> + int64_t * out_len , <nl> + float * out_result ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> - ref_booster - > PrepareForPrediction ( static_cast < int > ( n_used_trees ) , predict_type ) ; <nl> + ref_booster - > PrepareForPrediction ( static_cast < int > ( num_iteration ) , predict_type ) ; <nl> <nl> auto get_row_fun = RowFunctionFromCSR ( indptr , indptr_type , indices , data , data_type , nindptr , nelem ) ; <nl> - int num_class = ref_booster - > NumberOfClasses ( ) ; <nl> + int num_preb_in_one_row = ref_booster - > GetBoosting ( ) - > NumberOfClasses ( ) ; <nl> + if ( predict_type = = C_API_PREDICT_LEAF_INDEX ) { <nl> + if ( num_iteration > 0 ) { <nl> + num_preb_in_one_row * = static_cast < int > ( num_iteration ) ; <nl> + } else { <nl> + num_preb_in_one_row * = ref_booster - > GetBoosting ( ) - > NumberOfTotalModel ( ) / num_preb_in_one_row ; <nl> + } <nl> + } <nl> int nrow = static_cast < int > ( nindptr - 1 ) ; <nl> # pragma omp parallel for schedule ( guided ) <nl> for ( int i = 0 ; i < nrow ; + + i ) { <nl> auto one_row = get_row_fun ( i ) ; <nl> auto predicton_result = ref_booster - > Predict ( one_row ) ; <nl> - for ( int j = 0 ; j < num_class ; + + j ) { <nl> - out_result [ i * num_class + j ] = predicton_result [ j ] ; <nl> + for ( int j = 0 ; j < static_cast < int > ( predicton_result . size ( ) ) ; + + j ) { <nl> + out_result [ i * num_preb_in_one_row + j ] = static_cast < float > ( predicton_result [ j ] ) ; <nl> } <nl> } <nl> + * out_len = nrow * num_preb_in_one_row ; <nl> API_END ( ) ; <nl> } <nl> <nl> DllExport int LGBM_BoosterPredictForMat ( BoosterHandle handle , <nl> int32_t ncol , <nl> int is_row_major , <nl> int predict_type , <nl> - int64_t n_used_trees , <nl> - double * out_result ) { <nl> + int64_t num_iteration , <nl> + int64_t * out_len , <nl> + float * out_result ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> - ref_booster - > PrepareForPrediction ( static_cast < int > ( n_used_trees ) , predict_type ) ; <nl> + ref_booster - > PrepareForPrediction ( static_cast < int > ( num_iteration ) , predict_type ) ; <nl> <nl> auto get_row_fun = RowPairFunctionFromDenseMatric ( data , nrow , ncol , data_type , is_row_major ) ; <nl> - int num_class = ref_booster - > NumberOfClasses ( ) ; <nl> + int num_preb_in_one_row = ref_booster - > GetBoosting ( ) - > NumberOfClasses ( ) ; <nl> + if ( predict_type = = C_API_PREDICT_LEAF_INDEX ) { <nl> + if ( num_iteration > 0 ) { <nl> + num_preb_in_one_row * = static_cast < int > ( num_iteration ) ; <nl> + } else { <nl> + num_preb_in_one_row * = ref_booster - > GetBoosting ( ) - > NumberOfTotalModel ( ) / num_preb_in_one_row ; <nl> + } <nl> + } <nl> # pragma omp parallel for schedule ( guided ) <nl> for ( int i = 0 ; i < nrow ; + + i ) { <nl> auto one_row = get_row_fun ( i ) ; <nl> auto predicton_result = ref_booster - > Predict ( one_row ) ; <nl> - for ( int j = 0 ; j < num_class ; + + j ) { <nl> - out_result [ i * num_class + j ] = predicton_result [ j ] ; <nl> + for ( int j = 0 ; j < static_cast < int > ( predicton_result . size ( ) ) ; + + j ) { <nl> + out_result [ i * num_preb_in_one_row + j ] = static_cast < float > ( predicton_result [ j ] ) ; <nl> } <nl> } <nl> + * out_len = nrow * num_preb_in_one_row ; <nl> API_END ( ) ; <nl> } <nl> <nl> DllExport int LGBM_BoosterSaveModel ( BoosterHandle handle , <nl> - int num_used_model , <nl> + int num_iteration , <nl> const char * filename ) { <nl> API_BEGIN ( ) ; <nl> Booster * ref_booster = reinterpret_cast < Booster * > ( handle ) ; <nl> - ref_booster - > SaveModelToFile ( num_used_model , filename ) ; <nl> + ref_booster - > SaveModelToFile ( num_iteration , filename ) ; <nl> API_END ( ) ; <nl> } <nl> <nl> mmm a / src / io / config . cpp <nl> ppp b / src / io / config . cpp <nl> void IOConfig : : Set ( const std : : unordered_map < std : : string , std : : string > & params ) { <nl> GetInt ( params , " data_random_seed " , & data_random_seed ) ; <nl> GetString ( params , " data " , & data_filename ) ; <nl> GetInt ( params , " verbose " , & verbosity ) ; <nl> - GetInt ( params , " num_model_predict " , & num_model_predict ) ; <nl> + GetInt ( params , " num_iteration_predict " , & num_iteration_predict ) ; <nl> GetInt ( params , " bin_construct_sample_cnt " , & bin_construct_sample_cnt ) ; <nl> GetBool ( params , " is_pre_partition " , & is_pre_partition ) ; <nl> GetBool ( params , " is_enable_sparse " , & is_enable_sparse ) ; <nl> mmm a / tests / c_api_test / test . py <nl> ppp b / tests / c_api_test / test . py <nl> def test_booster ( ) : <nl> test_free_dataset ( train ) <nl> test_free_dataset ( test [ 0 ] ) <nl> booster2 = ctypes . c_void_p ( ) <nl> - LIB . LGBM_BoosterCreateFromModelfile ( c_str ( ' model . txt ' ) , ctypes . byref ( booster2 ) ) <nl> + num_total_model = ctypes . c_long ( ) <nl> + LIB . LGBM_BoosterCreateFromModelfile ( c_str ( ' model . txt ' ) , ctypes . byref ( num_total_model ) , ctypes . byref ( booster2 ) ) <nl> data = [ ] <nl> inp = open ( ' . . / . . / examples / binary_classification / binary . test ' , ' r ' ) <nl> for line in inp . readlines ( ) : <nl> data . append ( [ float ( x ) for x in line . split ( ' \ t ' ) [ 1 : ] ] ) <nl> inp . close ( ) <nl> mat = np . array ( data ) <nl> - preb = np . zeros ( ( mat . shape [ 0 ] , 1 ) , dtype = np . float64 ) <nl> + preb = np . zeros ( mat . shape [ 0 ] , dtype = np . float32 ) <nl> + num_preb = ctypes . c_long ( ) <nl> data = np . array ( mat . reshape ( mat . size ) , copy = False ) <nl> LIB . LGBM_BoosterPredictForMat ( booster2 , <nl> data . ctypes . data_as ( ctypes . POINTER ( ctypes . c_void_p ) ) , <nl> def test_booster ( ) : <nl> 1 , <nl> 1 , <nl> 50 , <nl> + ctypes . byref ( num_preb ) , <nl> preb . ctypes . data_as ( ctypes . POINTER ( ctypes . c_double ) ) ) <nl> - LIB . LGBM_BoosterPredictForFile ( booster2 , 1 , 50 , 0 , c_str ( ' . . / . . / examples / binary_classification / binary . test ' ) , c_str ( ' preb . txt ' ) ) <nl> + LIB . LGBM_BoosterPredictForFile ( booster2 , c_str ( ' . . / . . / examples / binary_classification / binary . test ' ) , 0 , 0 , 50 , c_str ( ' preb . txt ' ) ) <nl> LIB . LGBM_BoosterFree ( booster2 ) <nl> <nl> test_dataset ( ) <nl>
|
almost finish , need some tests
|
microsoft/LightGBM
|
422c0ef728e1748d96c017f8de41105cc4a1df67
|
2016-11-23T14:04:46Z
|
mmm a / libraries / chain / wasm_interface . cpp <nl> ppp b / libraries / chain / wasm_interface . cpp <nl> DEFINE_INTRINSIC_FUNCTION4 ( env , store , store , none , i32 , keyptr , i32 , keylen , i32 , valuep <nl> char * value = & memoryRef < char > ( mem , valueptr ) ; <nl> string keystr ( key , key + keylen ) ; <nl> <nl> - / / idump ( ( keystr ) ) ; <nl> / / if ( valuelen = = 8 ) idump ( ( * ( ( int64_t * ) value ) ) ) ; <nl> <nl> <nl> DEFINE_INTRINSIC_FUNCTION4 ( env , store , store , none , i32 , keyptr , i32 , keylen , i32 , valuep <nl> if ( obj ) { <nl> db . modify ( * obj , [ & ] ( auto & o ) { <nl> o . value . resize ( valuelen ) ; <nl> - o . value . replace ( 0 , valuelen , value ) ; <nl> + memcpy ( o . value . data ( ) , value , valuelen ) ; <nl> } ) ; <nl> } else { <nl> db . create < key_value_object > ( [ & ] ( auto & o ) { <nl> DEFINE_INTRINSIC_FUNCTION4 ( env , load , load , i32 , i32 , keyptr , i32 , keylen , i32 , valueptr , <nl> char * key = & memoryRef < char > ( mem , keyptr ) ; <nl> char * value = & memoryRef < char > ( mem , valueptr ) ; <nl> string keystr ( key , key + keylen ) ; <nl> - / / idump ( ( keystr ) ) ; <nl> + / / idump ( ( keystr ) ) ; <nl> <nl> const auto * obj = db . find < key_value_object , by_scope_key > ( boost : : make_tuple ( scope , keystr ) ) ; <nl> if ( obj = = nullptr ) return - 1 ; <nl> auto copylen = std : : min < size_t > ( obj - > value . size ( ) , valuelen ) ; <nl> + / / idump ( ( copylen ) ( valuelen ) ( obj - > value . size ( ) ) ) ; <nl> if ( copylen ) { <nl> memcpy ( value , obj - > value . data ( ) , copylen ) ; <nl> - / / if ( copylen = = 8 ) idump ( ( * ( ( int64_t * ) value ) ) ) ; <nl> + / / if ( copylen = = 8 ) idump ( ( * ( ( int64_t * ) value ) ) ) ; <nl> } <nl> return copylen ; <nl> } <nl> DEFINE_INTRINSIC_FUNCTION1 ( env , toUpper , toUpper , none , i32 , charptr ) { <nl> void wasm_interface : : vm_onInit ( ) <nl> { try { <nl> try { <nl> - wlog ( " onInit " ) ; <nl> + / / wlog ( " onInit " ) ; <nl> FunctionInstance * apply = asFunctionNullable ( getInstanceExport ( current_module , " onInit " ) ) ; <nl> if ( ! apply ) { <nl> wlog ( " no onInit method found " ) ; <nl> mmm a / tests / tests / block_tests . cpp <nl> ppp b / tests / tests / block_tests . cpp <nl> R " ( <nl> ( type $ FUNCSIG $ viiii ( func ( param i32 i32 i32 i32 ) ) ) <nl> ( type $ FUNCSIG $ iii ( func ( param i32 i32 ) ( result i32 ) ) ) <nl> ( type $ FUNCSIG $ iiiii ( func ( param i32 i32 i32 i32 ) ( result i32 ) ) ) <nl> - ( type $ FUNCSIG $ i ( func ( result i32 ) ) ) <nl> ( type $ FUNCSIG $ iiii ( func ( param i32 i32 i32 ) ( result i32 ) ) ) <nl> ( import " env " " AccountName_unpack " ( func $ AccountName_unpack ( param i32 i32 ) ) ) <nl> ( import " env " " Varint_unpack " ( func $ Varint_unpack ( param i32 i32 ) ) ) <nl> R " ( <nl> ) <nl> ) <nl> <nl> - <nl> ) " ; <nl> <nl> types : : SetCode handler ; <nl>
|
Fix
|
EOSIO/eos
|
41f8f2d35b941fbc2e92fe9356e3a66ccbf94cf1
|
2017-06-22T00:25:39Z
|
mmm a / include / grpc + + / dynamic_thread_pool . h <nl> ppp b / include / grpc + + / dynamic_thread_pool . h <nl> <nl> # include < grpc + + / thread_pool_interface . h > <nl> <nl> # include < list > <nl> + # include < memory > <nl> # include < queue > <nl> <nl> namespace grpc { <nl>
|
Fixing Windows build
|
grpc/grpc
|
b3d69c3c0fbd6a833214898fd172b8e1d2102b56
|
2015-07-29T04:12:51Z
|
mmm a / . jenkins / pytorch / win - test . sh <nl> ppp b / . jenkins / pytorch / win - test . sh <nl> export TMP_DIR_WIN = $ ( cygpath - w " $ { TMP_DIR } " ) <nl> mkdir - p $ TMP_DIR / ci_scripts / <nl> mkdir - p $ TMP_DIR / build / torch <nl> <nl> + if [ ! - z " $ ( ls $ TMP_DIR / ci_scripts / * ) " ] ; then <nl> + rm $ TMP_DIR / ci_scripts / * <nl> + fi <nl> + <nl> cat > $ TMP_DIR / ci_scripts / download_image . py < < EOL <nl> <nl> import os <nl> EOL <nl> <nl> cat > $ TMP_DIR / ci_scripts / setup_pytorch_env . bat < < EOL <nl> <nl> + if exist " % TMP_DIR % / ci_scripts / pytorch_env_restore . bat " ( <nl> + call % TMP_DIR % / ci_scripts / pytorch_env_restore . bat <nl> + exit / b 0 <nl> + ) <nl> + <nl> set PATH = C : \ \ Program Files \ \ CMake \ \ bin ; C : \ \ Program Files \ \ 7 - Zip ; C : \ \ ProgramData \ \ chocolatey \ \ bin ; C : \ \ Program Files \ \ Git \ \ cmd ; C : \ \ Program Files \ \ Amazon \ \ AWSCLI ; % PATH % <nl> <nl> : : Install Miniconda3 <nl> if NOT " % BUILD_ENVIRONMENT % " = = " " ( <nl> xcopy / s % CONDA_PARENT_DIR % \ \ Miniconda3 \ \ Lib \ \ site - packages \ \ torch % TMP_DIR_WIN % \ \ build \ \ torch \ \ <nl> ) <nl> <nl> + for / f " usebackq tokens = * " % % i in ( \ ` set \ ` ) do echo set " % % i " > > % TMP_DIR % / ci_scripts / pytorch_env_restore . bat <nl> + <nl> EOL <nl> <nl> cat > $ TMP_DIR / ci_scripts / test_python_nn . bat < < EOL <nl>
|
Simplify env creation when running Windows tests ( )
|
pytorch/pytorch
|
bba906c2cb166c4c303989383ba814c6392543b9
|
2019-03-14T17:10:31Z
|
mmm a / emcc <nl> ppp b / emcc <nl> stderr = PIPE if not DEBUG else None # unless we are in DEBUG mode <nl> EMCC_CXX = ' - - emscripten - cxx ' in sys . argv <nl> sys . argv = filter ( lambda x : x ! = ' - - emscripten - cxx ' , sys . argv ) <nl> <nl> - shared . check_sanity ( force = DEBUG ) <nl> + if len ( sys . argv ) ! = 1 and ' - - help ' not in sys . argv and sys . argv [ 1 ] ! = ' - - version ' : <nl> + shared . check_sanity ( force = DEBUG ) <nl> <nl> # Handle some global flags <nl> <nl> try : <nl> fastcomp_opts + = [ ' - emscripten - asyncify - whitelist = ' + ' , ' . join ( shared . Settings . ASYNCIFY_WHITELIST ) ] <nl> <nl> else : # non - fastcomp <nl> - logging . warning ( ' Using old ( non - fastcomp ) compiler due to EMCC_FAST_COMPILER = 0 in the env . This is dangerous , as there are known bugs , and this code path is no longer maintained . Please use emscripten in the default configuration ( i . e . , do not disable fastcomp ) ' ) <nl> + logging . critical ( ' Non - fastcomp compiler is no longer available , please use fastcomp or an older version of emscripten ' ) <nl> + sys . exit ( 1 ) <nl> <nl> if shared . Settings . ASM_JS : <nl> assert opt_level > = 1 or fastcomp , ' asm . js requires - O1 or above ' <nl> mmm a / emscripten . py <nl> ppp b / emscripten . py <nl> def fix ( m ) : <nl> <nl> if os . environ . get ( ' EMCC_FAST_COMPILER ' ) ! = ' 0 ' : <nl> emscript = emscript_fast <nl> + else : <nl> + logging . critical ( ' Non - fastcomp compiler is no longer available , please use fastcomp or an older version of emscripten ' ) <nl> + sys . exit ( 1 ) <nl> <nl> def main ( args , compiler_engine , cache , jcache , relooper , temp_files , DEBUG , DEBUG_CACHE ) : <nl> # Prepare settings for serialization to JSON . <nl> mmm a / tests / test_other . py <nl> ppp b / tests / test_other . py <nl> def test_emcc ( self ) : <nl> # TODO : test normal project linking , static and dynamic : get_library should not need to be told what to link ! <nl> # TODO : deprecate llvm optimizations , dlmalloc , etc . in emscripten . py . <nl> <nl> + def test_emcc_nonfastcomp_fails ( self ) : <nl> + open ( os . path . join ( self . get_dir ( ) , ' test . c ' ) , ' w ' ) . write ( r ' ' ' <nl> + int main ( ) { <nl> + return 0 ; <nl> + } <nl> + ' ' ' ) <nl> + def check_errors ( command ) : <nl> + process = Popen ( command , stdout = PIPE , stderr = PIPE ) <nl> + stdout , stderr = process . communicate ( ) <nl> + self . assertEqual ( stdout , ' ' ) <nl> + self . assertIn ( ' Non - fastcomp compiler is no longer available ' , stderr ) <nl> + self . assertEqual ( process . returncode , 1 ) <nl> + def check_success ( command ) : <nl> + process = Popen ( command , stdout = PIPE , stderr = PIPE ) <nl> + stdout , stderr = process . communicate ( ) <nl> + self . assertEqual ( stderr , ' ' ) <nl> + self . assertEqual ( process . returncode , 0 ) <nl> + nonfastcomp ( lambda : check_success ( [ PYTHON , EMCC , ' - - version ' ] ) ) <nl> + nonfastcomp ( lambda : check_success ( [ PYTHON , EMCC , ' - - help ' ] ) ) <nl> + nonfastcomp ( lambda : check_errors ( [ PYTHON , EMCC , ' - v ' ] ) ) <nl> + nonfastcomp ( lambda : check_errors ( [ PYTHON , EMCC , os . path . join ( self . get_dir ( ) , ' test . c ' ) ] ) ) <nl> + self . assertFalse ( os . path . exists ( ' a . out . js ' ) ) <nl> + <nl> def test_emcc_nonfastcomp ( self ) : <nl> return self . skip ( ' non - fastcomp is deprecated and fails in 3 . 5 ' ) <nl> nonfastcomp ( self . test_emcc ) <nl> mmm a / tools / js_optimizer . py <nl> ppp b / tools / js_optimizer . py <nl> <nl> <nl> - import os , sys , subprocess , multiprocessing , re , string , json , shutil <nl> + import os , sys , subprocess , multiprocessing , re , string , json , shutil , logging <nl> import shared <nl> <nl> configuration = shared . configuration <nl> def find_msbuild ( sln_file , make_env ) : <nl> return [ None , make_env ] <nl> <nl> def get_native_optimizer ( ) : <nl> - if os . environ . get ( ' EMCC_FAST_COMPILER ' ) = = ' 0 ' : return None # need fastcomp for native optimizer <nl> + if os . environ . get ( ' EMCC_FAST_COMPILER ' ) = = ' 0 ' : <nl> + logging . critical ( ' Non - fastcomp compiler is no longer available , please use fastcomp or an older version of emscripten ' ) <nl> + sys . exit ( 1 ) <nl> <nl> # Allow users to override the location of the optimizer executable by setting an environment variable EMSCRIPTEN_NATIVE_OPTIMIZER = / path / to / optimizer ( . exe ) <nl> if os . environ . get ( ' EMSCRIPTEN_NATIVE_OPTIMIZER ' ) and len ( os . environ . get ( ' EMSCRIPTEN_NATIVE_OPTIMIZER ' ) ) > 0 : return os . environ . get ( ' EMSCRIPTEN_NATIVE_OPTIMIZER ' ) <nl> mmm a / tools / shared . py <nl> ppp b / tools / shared . py <nl> def check_sanity ( force = False ) : <nl> # some warning , mostly not fatal checks - do them even if EM_IGNORE_SANITY is on <nl> check_llvm_version ( ) <nl> check_node_version ( ) <nl> - if os . environ . get ( ' EMCC_FAST_COMPILER ' ) ! = ' 0 ' : <nl> - fastcomp_ok = check_fastcomp ( ) <nl> + <nl> + if os . environ . get ( ' EMCC_FAST_COMPILER ' ) = = ' 0 ' : <nl> + logging . critical ( ' Non - fastcomp compiler is no longer available , please use fastcomp or an older version of emscripten ' ) <nl> + sys . exit ( 1 ) <nl> + <nl> + fastcomp_ok = check_fastcomp ( ) <nl> <nl> if os . environ . get ( ' EM_IGNORE_SANITY ' ) : <nl> logging . info ( ' EM_IGNORE_SANITY set , ignoring sanity checks ' ) <nl> def set_logging ( ) : <nl> # Target choice . Must be synced with src / settings . js ( TARGET_ * ) <nl> def get_llvm_target ( ) : <nl> if os . environ . get ( ' EMCC_FAST_COMPILER ' ) = = ' 0 ' : <nl> - if not os . environ . get ( ' EMCC_LLVM_TARGET ' ) : <nl> - os . environ [ ' EMCC_LLVM_TARGET ' ] = ' le32 - unknown - nacl ' <nl> - return os . environ . get ( ' EMCC_LLVM_TARGET ' ) <nl> + return ' unavailable - non - fastcomp ' <nl> return os . environ . get ( ' EMCC_LLVM_TARGET ' ) or ' asmjs - unknown - emscripten ' <nl> LLVM_TARGET = get_llvm_target ( ) <nl> <nl>
|
Error on fastcomp = 0 in key places where EMCC_FAST_COMPILER is checked
|
emscripten-core/emscripten
|
44fb04635892e62bbc17a7b2d9c0f13217a64b41
|
2015-03-16T02:19:45Z
|
mmm a / bazel / protobuf . patch <nl> ppp b / bazel / protobuf . patch <nl> <nl> pppmmm a / src / google / protobuf / stubs / strutil . cc <nl> ppp + b / src / google / protobuf / stubs / strutil . cc <nl> + char * FastUInt32ToBufferLeft ( uint32 u , char * buffer ) { <nl> + } <nl> + <nl> + char * FastInt32ToBufferLeft ( int32 i , char * buffer ) { <nl> + - uint32 u = i ; <nl> + + uint32 u = 0 ; <nl> + if ( i < 0 ) { <nl> + * buffer + + = ' - ' ; <nl> + - u = - i ; <nl> + + u - = i ; <nl> + + } else { <nl> + + u = i ; <nl> + } <nl> + return FastUInt32ToBufferLeft ( u , buffer ) ; <nl> + } <nl> + <nl> mmm a / BUILD <nl> mmm a / bazel / repositories . bzl <nl> ppp b / bazel / repositories . bzl <nl> def _com_google_protobuf ( ) : <nl> # The patch includes <nl> # https : / / github . com / protocolbuffers / protobuf / pull / 6333 and also uses <nl> # foreign_cc build for zlib as its dependency . <nl> - # TODO ( asraa ) : remove this when > protobuf 3 . 8 . 0 is released . <nl> + # TODO ( asraa ) : remove this when protobuf 3 . 10 is released . <nl> patch_args = [ " - p1 " ] , <nl> patches = [ " @ envoy / / bazel : protobuf . patch " ] , <nl> ) <nl> def _com_google_protobuf ( ) : <nl> # The patch includes <nl> # https : / / github . com / protocolbuffers / protobuf / pull / 6333 and also uses <nl> # foreign_cc build for zlib as its dependency . <nl> - # TODO ( asraa ) : remove this when > protobuf 3 . 8 . 0 is released . <nl> + # TODO ( asraa ) : remove this when protobuf 3 . 10 is released . <nl> patch_args = [ " - p1 " ] , <nl> patches = [ " @ envoy / / bazel : protobuf . patch " ] , <nl> ) <nl> new file mode 100644 <nl> index 00000000000 . . dbae8654ccb <nl> mmm / dev / null <nl> ppp b / test / common / router / route_corpus / clusterfuzz - testcase - minimized - route_fuzz_test - 5650952886943744 <nl> <nl> + config { <nl> + vhds { <nl> + config_source { <nl> + api_config_source { <nl> + request_timeout { <nl> + nanos : - 2147483648 <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl>
|
protobuf : add back ubsan patch ( )
|
envoyproxy/envoy
|
ef9139a991fb9f7df676430f626b32db93a06932
|
2019-09-27T18:40:33Z
|
mmm a / include / swift / Serialization / SerializedModuleLoader . h <nl> ppp b / include / swift / Serialization / SerializedModuleLoader . h <nl> class SerializedASTFile final : public LoadedFile { <nl> SmallVectorImpl < GenericSignature > & genericSignatures ) <nl> override ; <nl> <nl> + StringRef getTargetTriple ( ) const ; <nl> + <nl> static bool classof ( const FileUnit * file ) { <nl> return file - > getKind ( ) = = FileUnitKind : : SerializedAST ; <nl> } <nl> mmm a / include / swift / SymbolGraphGen / SymbolGraphGen . h <nl> ppp b / include / swift / SymbolGraphGen / SymbolGraphGen . h <nl> class ModuleDecl ; <nl> namespace symbolgraphgen { <nl> <nl> struct SymbolGraphOptions { <nl> - / / / The path to output the symbol graph JSON . <nl> - StringRef OutputPath ; <nl> + / / / The directory to output the symbol graph JSON files . <nl> + StringRef OutputDir ; <nl> <nl> / / / The target of the module . <nl> llvm : : Triple Target ; <nl> mmm a / lib / Serialization / ModuleFile . h <nl> ppp b / lib / Serialization / ModuleFile . h <nl> class ModuleFile <nl> return ModuleInputBuffer - > getBufferIdentifier ( ) ; <nl> } <nl> <nl> + StringRef getTargetTriple ( ) const { <nl> + return TargetTriple ; <nl> + } <nl> + <nl> / / / AST - verify imported decls . <nl> / / / <nl> / / / Has no effect in NDEBUG builds . <nl> mmm a / lib / Serialization / SerializedModuleLoader . cpp <nl> ppp b / lib / Serialization / SerializedModuleLoader . cpp <nl> StringRef SerializedASTFile : : getFilename ( ) const { <nl> return File . getModuleFilename ( ) ; <nl> } <nl> <nl> + StringRef SerializedASTFile : : getTargetTriple ( ) const { <nl> + return File . getTargetTriple ( ) ; <nl> + } <nl> + <nl> const clang : : Module * SerializedASTFile : : getUnderlyingClangModule ( ) const { <nl> if ( auto * UnderlyingModule = File . getUnderlyingModule ( ) ) <nl> return UnderlyingModule - > findUnderlyingClangModule ( ) ; <nl> mmm a / lib / SymbolGraphGen / DeclarationFragmentPrinter . cpp <nl> ppp b / lib / SymbolGraphGen / DeclarationFragmentPrinter . cpp <nl> void DeclarationFragmentPrinter : : printTypeRef ( Type T , const TypeDecl * RefTo , <nl> PrintNameContext NameContext ) { <nl> openFragment ( FragmentKind : : TypeIdentifier ) ; <nl> printText ( Name . str ( ) ) ; <nl> - USR = Walker . getUSR ( RefTo ) ; <nl> + USR = Graph . getUSR ( RefTo ) ; <nl> closeFragment ( ) ; <nl> } <nl> <nl> mmm a / lib / SymbolGraphGen / DeclarationFragmentPrinter . h <nl> ppp b / lib / SymbolGraphGen / DeclarationFragmentPrinter . h <nl> class TypeDecl ; <nl> <nl> namespace symbolgraphgen { <nl> <nl> - struct SymbolGraphASTWalker ; <nl> + struct SymbolGraph ; <nl> <nl> / / / Prints AST nodes as a stream of tagged fragments for syntax highlighting . <nl> / / / <nl> class DeclarationFragmentPrinter : public ASTPrinter { <nl> Text , <nl> } ; <nl> <nl> - SymbolGraphASTWalker & Walker ; <nl> + SymbolGraph & Graph ; <nl> <nl> / / / The output stream to print fragment objects to . <nl> llvm : : json : : OStream & OS ; <nl> class DeclarationFragmentPrinter : public ASTPrinter { <nl> void closeFragment ( ) ; <nl> <nl> public : <nl> - DeclarationFragmentPrinter ( SymbolGraphASTWalker & Walker , <nl> + DeclarationFragmentPrinter ( SymbolGraph & Graph , <nl> llvm : : json : : OStream & OS , <nl> Optional < StringRef > Key = None ) <nl> - : Walker ( Walker ) , <nl> + : Graph ( Graph ) , <nl> OS ( OS ) , <nl> Kind ( FragmentKind : : None ) { <nl> if ( Key ) { <nl> mmm a / lib / SymbolGraphGen / Edge . cpp <nl> ppp b / lib / SymbolGraphGen / Edge . cpp <nl> using namespace symbolgraphgen ; <nl> void Edge : : serialize ( llvm : : json : : OStream & OS ) const { <nl> OS . object ( [ & ] ( ) { <nl> OS . attribute ( " kind " , Kind . Name ) ; <nl> - OS . attribute ( " source " , Walker - > getUSR ( Source ) ) ; <nl> - OS . attribute ( " target " , Walker - > getUSR ( Target ) ) ; <nl> + OS . attribute ( " source " , Graph - > getUSR ( Source ) ) ; <nl> + OS . attribute ( " target " , Graph - > getUSR ( Target ) ) ; <nl> <nl> / / In case a dependent module isn ' t available , serialize a fallback name . <nl> auto TargetModuleName = Target - > getModuleContext ( ) - > getName ( ) . str ( ) ; <nl> <nl> - if ( TargetModuleName ! = Walker - > M . getName ( ) . str ( ) ) { <nl> + if ( TargetModuleName ! = Graph - > M . getName ( ) . str ( ) ) { <nl> SmallVector < SmallString < 32 > , 8 > TargetPathComponents ; <nl> - Walker - > getPathComponents ( Target , TargetPathComponents ) ; <nl> + Graph - > getPathComponents ( Target , TargetPathComponents ) ; <nl> <nl> SmallString < 128 > Scratch ( TargetModuleName ) ; <nl> for ( auto it = TargetPathComponents . begin ( ) ; <nl> mmm a / lib / SymbolGraphGen / Edge . h <nl> ppp b / lib / SymbolGraphGen / Edge . h <nl> <nl> # include " swift / Basic / LLVM . h " <nl> <nl> # include " JSON . h " <nl> - # include " Symbol . h " <nl> <nl> namespace swift { <nl> namespace symbolgraphgen { <nl> + <nl> + struct SymbolGraph ; <nl> <nl> / / / The kind of relationship , tagging an edge in the graph . <nl> struct RelationshipKind { <nl> struct RelationshipKind { <nl> <nl> / / / A relationship between two symbols : an edge in a directed graph . <nl> struct Edge { <nl> - SymbolGraphASTWalker * Walker ; <nl> + SymbolGraph * Graph ; <nl> <nl> / / / The kind of relationship this edge represents . <nl> RelationshipKind Kind ; <nl> mmm a / lib / SymbolGraphGen / FormatVersion . h <nl> ppp b / lib / SymbolGraphGen / FormatVersion . h <nl> <nl> # define SWIFT_SYMBOLGRAPHGEN_FORMATVERSION_H <nl> <nl> # define SWIFT_SYMBOLGRAPH_FORMAT_MAJOR 0 <nl> - # define SWIFT_SYMBOLGRAPH_FORMAT_MINOR 2 <nl> + # define SWIFT_SYMBOLGRAPH_FORMAT_MINOR 3 <nl> # define SWIFT_SYMBOLGRAPH_FORMAT_PATCH 0 <nl> <nl> # endif / / SWIFT_SYMBOLGRAPHGEN_FORMATVERSION_H <nl> mmm a / lib / SymbolGraphGen / Symbol . cpp <nl> ppp b / lib / SymbolGraphGen / Symbol . cpp <nl> void Symbol : : serializeKind ( llvm : : json : : OStream & OS ) const { <nl> } <nl> } <nl> <nl> - void Symbol : : serializeIdentifier ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serializeIdentifier ( llvm : : json : : OStream & OS ) const { <nl> OS . attributeObject ( " identifier " , [ & ] ( ) { <nl> - OS . attribute ( " precise " , Walker . getUSR ( VD ) ) ; <nl> + OS . attribute ( " precise " , Graph . getUSR ( VD ) ) ; <nl> OS . attribute ( " interfaceLanguage " , " swift " ) ; <nl> } ) ; <nl> } <nl> <nl> - void Symbol : : serializePathComponents ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serializePathComponents ( llvm : : json : : OStream & OS ) const { <nl> OS . attributeArray ( " pathComponents " , [ & ] ( ) { <nl> SmallVector < SmallString < 32 > , 8 > PathComponents ; <nl> - Walker . getPathComponents ( VD , PathComponents ) ; <nl> + Graph . getPathComponents ( VD , PathComponents ) ; <nl> for ( auto Component : PathComponents ) { <nl> OS . value ( Component ) ; <nl> } <nl> } ) ; <nl> } <nl> <nl> - void Symbol : : serializeNames ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serializeNames ( llvm : : json : : OStream & OS ) const { <nl> OS . attributeObject ( " names " , [ & ] ( ) { <nl> SmallVector < SmallString < 32 > , 8 > PathComponents ; <nl> - Walker . getPathComponents ( VD , PathComponents ) ; <nl> + Graph . getPathComponents ( VD , PathComponents ) ; <nl> <nl> OS . attribute ( " title " , PathComponents . back ( ) ) ; <nl> / / " navigator " : null <nl> - Walker . serializeSubheadingDeclarationFragments ( " subheading " , VD , OS ) ; <nl> + Graph . serializeSubheadingDeclarationFragments ( " subheading " , VD , OS ) ; <nl> / / " prose " : null <nl> } ) ; <nl> } <nl> void Symbol : : serializeRange ( size_t InitialIndentation , <nl> } ) ; <nl> } <nl> <nl> - void Symbol : : serializeDocComment ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serializeDocComment ( llvm : : json : : OStream & OS ) const { <nl> OS . attributeObject ( " docComment " , [ & ] ( ) { <nl> - auto LL = Walker . Ctx . getLineList ( VD - > getRawComment ( ) ) ; <nl> + auto LL = Graph . Ctx . getLineList ( VD - > getRawComment ( ) ) ; <nl> size_t InitialIndentation = LL . getLines ( ) . empty ( ) <nl> ? 0 <nl> : markup : : measureIndentation ( LL . getLines ( ) . front ( ) . Text ) ; <nl> void Symbol : : serializeDocComment ( SymbolGraphASTWalker & Walker , <nl> / / text and start of its source range , if it has one . <nl> if ( Line . Range . isValid ( ) ) { <nl> serializeRange ( InitialIndentation , <nl> - Line . Range , Walker . M . getASTContext ( ) . SourceMgr , OS ) ; <nl> + Line . Range , Graph . M . getASTContext ( ) . SourceMgr , OS ) ; <nl> } <nl> auto TrimmedLine = Line . Text . drop_front ( std : : min ( InitialIndentation , <nl> Line . FirstNonspaceOffset ) ) ; <nl> void Symbol : : serializeDocComment ( SymbolGraphASTWalker & Walker , <nl> } ) ; / / end docComment : <nl> } <nl> <nl> - void Symbol : : serializeFunctionSignature ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serializeFunctionSignature ( llvm : : json : : OStream & OS ) const { <nl> if ( const auto * FD = dyn_cast_or_null < FuncDecl > ( VD ) ) { <nl> OS . attributeObject ( " functionSignature " , [ & ] ( ) { <nl> <nl> void Symbol : : serializeFunctionSignature ( SymbolGraphASTWalker & Walker , <nl> OS . attribute ( " internalName " , InternalName ) ; <nl> } <nl> } <nl> - Walker . serializeDeclarationFragments ( " declarationFragments " , <nl> + Graph . serializeDeclarationFragments ( " declarationFragments " , <nl> Param , OS ) ; <nl> } ) ; / / end parameter object <nl> } <nl> void Symbol : : serializeFunctionSignature ( SymbolGraphASTWalker & Walker , <nl> <nl> / / Returns <nl> if ( const auto ReturnType = FD - > getResultInterfaceType ( ) ) { <nl> - Walker . serializeDeclarationFragments ( " returns " , ReturnType , OS ) ; <nl> + Graph . serializeDeclarationFragments ( " returns " , ReturnType , OS ) ; <nl> } <nl> } ) ; <nl> } <nl> void Symbol : : serializeSwiftGenericMixin ( llvm : : json : : OStream & OS ) const { <nl> } <nl> } <nl> <nl> - void Symbol : : serializeSwiftExtensionMixin ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serializeSwiftExtensionMixin ( llvm : : json : : OStream & OS ) const { <nl> if ( const auto * Extension <nl> = dyn_cast_or_null < ExtensionDecl > ( VD - > getInnermostDeclContext ( ) ) ) { <nl> OS . attributeObject ( " swiftExtension " , [ & ] ( ) { <nl> - OS . attribute ( " definedInModule " , Walker . M . getNameStr ( ) ) ; <nl> + if ( const auto * ExtendedNominal = Extension - > getExtendedNominal ( ) ) { <nl> + if ( const auto * ExtendedModule = ExtendedNominal - > getModuleContext ( ) ) { <nl> + OS . attribute ( " extendedModule " , ExtendedModule - > getNameStr ( ) ) ; <nl> + } <nl> + } <nl> auto Generics = Extension - > getGenericSignature ( ) ; <nl> if ( Generics & & ! Generics - > getRequirements ( ) . empty ( ) ) { <nl> OS . attributeArray ( " constraints " , [ & ] ( ) { <nl> void Symbol : : serializeSwiftExtensionMixin ( SymbolGraphASTWalker & Walker , <nl> } <nl> } <nl> <nl> - void Symbol : : serializeDeclarationFragmentMixin ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> - Walker . serializeDeclarationFragments ( " declarationFragments " , VD , OS ) ; <nl> + void Symbol : : serializeDeclarationFragmentMixin ( llvm : : json : : OStream & OS ) const { <nl> + Graph . serializeDeclarationFragments ( " declarationFragments " , VD , OS ) ; <nl> } <nl> <nl> void Symbol : : serializeAccessLevelMixin ( llvm : : json : : OStream & OS ) const { <nl> void Symbol : : serializeAvailabilityMixin ( llvm : : json : : OStream & OS ) const { <nl> } ) ; / / end availability : [ ] <nl> } <nl> <nl> - void Symbol : : serialize ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + void Symbol : : serialize ( llvm : : json : : OStream & OS ) const { <nl> OS . object ( [ & ] ( ) { <nl> serializeKind ( OS ) ; <nl> - serializeIdentifier ( Walker , OS ) ; <nl> - serializePathComponents ( Walker , OS ) ; <nl> - serializeNames ( Walker , OS ) ; <nl> - serializeDocComment ( Walker , OS ) ; <nl> + serializeIdentifier ( OS ) ; <nl> + serializePathComponents ( OS ) ; <nl> + serializeNames ( OS ) ; <nl> + serializeDocComment ( OS ) ; <nl> <nl> / / " Mixins " <nl> - serializeFunctionSignature ( Walker , OS ) ; <nl> + serializeFunctionSignature ( OS ) ; <nl> serializeSwiftGenericMixin ( OS ) ; <nl> - serializeSwiftExtensionMixin ( Walker , OS ) ; <nl> - serializeDeclarationFragmentMixin ( Walker , OS ) ; <nl> + serializeSwiftExtensionMixin ( OS ) ; <nl> + serializeDeclarationFragmentMixin ( OS ) ; <nl> serializeAccessLevelMixin ( OS ) ; <nl> serializeAvailabilityMixin ( OS ) ; <nl> } ) ; <nl> mmm a / lib / SymbolGraphGen / Symbol . h <nl> ppp b / lib / SymbolGraphGen / Symbol . h <nl> <nl> # include " swift / AST / Attr . h " <nl> # include " swift / Basic / LLVM . h " <nl> # include " swift / Markup / Markup . h " <nl> + # include " SymbolGraph . h " <nl> <nl> namespace swift { <nl> namespace symbolgraphgen { <nl> struct SymbolGraphASTWalker ; <nl> <nl> / / / A symbol from a module : a node in a graph . <nl> struct Symbol { <nl> + / / / The symbol graph in which this symbol resides . <nl> + SymbolGraph & Graph ; <nl> + <nl> const ValueDecl * VD ; <nl> <nl> void serializeKind ( StringRef Identifier , StringRef DisplayName , <nl> struct Symbol { <nl> <nl> void serializeKind ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializeIdentifier ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializeIdentifier ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializePathComponents ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializePathComponents ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializeNames ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializeNames ( llvm : : json : : OStream & OS ) const ; <nl> <nl> void serializePosition ( StringRef Key , unsigned Line , unsigned ByteOffset , <nl> llvm : : json : : OStream & OS ) const ; <nl> struct Symbol { <nl> SourceRange Range , SourceManager & SourceMgr , <nl> llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializeDocComment ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializeDocComment ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializeFunctionSignature ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializeFunctionSignature ( llvm : : json : : OStream & OS ) const ; <nl> <nl> void serializeGenericParam ( const swift : : GenericTypeParamType & Param , <nl> llvm : : json : : OStream & OS ) const ; <nl> struct Symbol { <nl> <nl> void serializeSwiftGenericMixin ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializeSwiftExtensionMixin ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializeSwiftExtensionMixin ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serializeDeclarationFragmentMixin ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serializeDeclarationFragmentMixin ( llvm : : json : : OStream & OS ) const ; <nl> <nl> void serializeAccessLevelMixin ( llvm : : json : : OStream & OS ) const ; <nl> <nl> struct Symbol { <nl> <nl> void serializeAvailabilityMixin ( llvm : : json : : OStream & OS ) const ; <nl> <nl> - void serialize ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + void serialize ( llvm : : json : : OStream & OS ) const ; <nl> <nl> bool operator = = ( const Symbol & Other ) const { <nl> return VD = = Other . VD ; <nl> mmm a / lib / SymbolGraphGen / SymbolGraph . cpp <nl> ppp b / lib / SymbolGraphGen / SymbolGraph . cpp <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> + # include " clang / AST / DeclObjC . h " <nl> + # include " swift / AST / Decl . h " <nl> # include " swift / AST / Module . h " <nl> + # include " swift / AST / ProtocolConformance . h " <nl> + # include " swift / AST / USRGeneration . h " <nl> # include " swift / Basic / Version . h " <nl> + # include " swift / ClangImporter / ClangModule . h " <nl> + # include " swift / Serialization / SerializedModuleLoader . h " <nl> <nl> + # include " DeclarationFragmentPrinter . h " <nl> # include " FormatVersion . h " <nl> + # include " Symbol . h " <nl> # include " SymbolGraph . h " <nl> <nl> using namespace swift ; <nl> using namespace symbolgraphgen ; <nl> <nl> - SymbolGraph : : SymbolGraph ( ModuleDecl & M , llvm : : Triple Target , <nl> + SymbolGraph : : SymbolGraph ( ModuleDecl & M , <nl> + Optional < ModuleDecl * > ExtendedModule , <nl> + llvm : : Triple Target , <nl> + markup : : MarkupContext & Ctx , <nl> Optional < llvm : : VersionTuple > ModuleVersion ) <nl> - : M ( M ) , Target ( Target ) , ModuleVersion ( ModuleVersion ) { } <nl> + : M ( M ) , <nl> + ExtendedModule ( ExtendedModule ) , <nl> + Target ( Target ) , <nl> + Ctx ( Ctx ) , <nl> + ModuleVersion ( ModuleVersion ) { } <nl> <nl> - void SymbolGraph : : serialize ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const { <nl> + / / MARK : - Utilities <nl> + <nl> + StringRef SymbolGraph : : getUSR ( const ValueDecl * VD ) { <nl> + auto Found = USRCache . find ( VD ) ; <nl> + if ( Found ! = USRCache . end ( ) ) { <nl> + return Found - > second ; <nl> + } <nl> + llvm : : SmallString < 32 > Scratch ; <nl> + llvm : : raw_svector_ostream OS ( Scratch ) ; <nl> + ide : : printDeclUSR ( VD , OS ) ; <nl> + auto USR = Ctx . allocateCopy ( Scratch . str ( ) ) ; <nl> + USRCache . insert ( { VD , USR } ) ; <nl> + return USR ; <nl> + } <nl> + <nl> + void <nl> + SymbolGraph : : getPathComponents ( const ValueDecl * VD , <nl> + SmallVectorImpl < SmallString < 32 > > & Components ) { <nl> + / / Collect the spellings of the fully qualified identifier components . <nl> + auto Decl = VD ; <nl> + while ( Decl & & ! isa < ModuleDecl > ( Decl ) ) { <nl> + SmallString < 32 > Scratch ; <nl> + Decl - > getFullName ( ) . getString ( Scratch ) ; <nl> + Components . push_back ( Scratch ) ; <nl> + if ( const auto * DC = Decl - > getDeclContext ( ) ) { <nl> + if ( const auto * Proto = DC - > getExtendedProtocolDecl ( ) ) { <nl> + Decl = Proto ; <nl> + } else if ( const auto * Ext = dyn_cast_or_null < ExtensionDecl > ( DC - > getAsDecl ( ) ) ) { <nl> + Decl = Ext - > getExtendedNominal ( ) ; <nl> + } else { <nl> + Decl = dyn_cast_or_null < ValueDecl > ( DC - > getAsDecl ( ) ) ; <nl> + } <nl> + } else { <nl> + Decl = nullptr ; <nl> + } <nl> + } <nl> + <nl> + / / The list is leaf - to - root , but our list is root - to - leaf , so reverse it . <nl> + std : : reverse ( Components . begin ( ) , Components . end ( ) ) ; <nl> + } <nl> + <nl> + PrintOptions SymbolGraph : : getDeclarationFragmentsPrintOptions ( ) const { <nl> + PrintOptions Opts ; <nl> + Opts . FunctionDefinitions = false ; <nl> + Opts . ArgAndParamPrinting = <nl> + PrintOptions : : ArgAndParamPrintingMode : : ArgumentOnly ; <nl> + Opts . PrintGetSetOnRWProperties = false ; <nl> + Opts . PrintPropertyAccessors = false ; <nl> + Opts . PrintSubscriptAccessors = false ; <nl> + Opts . SkipUnderscoredKeywords = true ; <nl> + Opts . SkipAttributes = true ; <nl> + Opts . PrintOverrideKeyword = true ; <nl> + Opts . PrintImplicitAttrs = false ; <nl> + Opts . PrintFunctionRepresentationAttrs = <nl> + PrintOptions : : FunctionRepresentationMode : : None ; <nl> + Opts . PrintUserInaccessibleAttrs = false ; <nl> + Opts . SkipPrivateStdlibDecls = true ; <nl> + Opts . SkipUnderscoredStdlibProtocols = true ; <nl> + <nl> + Opts . ExclusiveAttrList . clear ( ) ; <nl> + <nl> + # define DECL_ATTR ( SPELLING , CLASS , OPTIONS , CODE ) Opts . ExcludeAttrList . push_back ( DAK_ # # CLASS ) ; <nl> + # define TYPE_ATTR ( X ) Opts . ExcludeAttrList . push_back ( TAK_ # # X ) ; <nl> + # include " swift / AST / Attr . def " <nl> + <nl> + return Opts ; <nl> + } <nl> + <nl> + / / MARK : - Symbols ( Nodes ) <nl> + <nl> + void SymbolGraph : : recordNode ( const ValueDecl * VD ) { <nl> + Nodes . insert ( VD ) ; <nl> + <nl> + / / Record all of the possible relationships ( edges ) originating <nl> + / / with this declaration . <nl> + recordMemberRelationship ( VD ) ; <nl> + recordConformanceRelationships ( VD ) ; <nl> + recordInheritanceRelationships ( VD ) ; <nl> + recordDefaultImplementationRelationships ( VD ) ; <nl> + recordOverrideRelationship ( VD ) ; <nl> + recordRequirementRelationships ( VD ) ; <nl> + recordOptionalRequirementRelationships ( VD ) ; <nl> + } <nl> + <nl> + / / MARK : - Relationships ( Edges ) <nl> + <nl> + void SymbolGraph : : recordEdge ( const ValueDecl * Source , <nl> + const ValueDecl * Target , <nl> + RelationshipKind Kind ) { <nl> + if ( Target - > isPrivateStdlibDecl ( <nl> + / * treatNonBuiltinProtocolsAsPublic = * / false ) ) { <nl> + return ; <nl> + } <nl> + <nl> + / / There might be relationships on implicit declarations , <nl> + / / such as overriding implicit @ objc init ( ) . <nl> + if ( Target - > isImplicit ( ) ) { <nl> + return ; <nl> + } <nl> + <nl> + Nodes . insert ( Source ) ; <nl> + if ( Target - > getModuleContext ( ) ! = & M ) { <nl> + / / Don ' t claim a symbol just because we have a relationship to it . <nl> + / / For example , if we conform to ` Sequence ` , that symbol ' s node should be <nl> + / / under Swift , not this module . <nl> + Nodes . insert ( Target ) ; <nl> + } <nl> + <nl> + Edges . insert ( { this , Kind , Source , Target } ) ; <nl> + } <nl> + <nl> + void SymbolGraph : : recordMemberRelationship ( const ValueDecl * VD ) { <nl> + auto * DC = VD - > getDeclContext ( ) ; <nl> + switch ( DC - > getContextKind ( ) ) { <nl> + case DeclContextKind : : GenericTypeDecl : <nl> + case DeclContextKind : : ExtensionDecl : <nl> + case swift : : DeclContextKind : : EnumElementDecl : <nl> + return recordEdge ( VD , VD - > getDeclContext ( ) - > getSelfNominalTypeDecl ( ) , <nl> + RelationshipKind : : MemberOf ( ) ) ; <nl> + case swift : : DeclContextKind : : AbstractClosureExpr : <nl> + case swift : : DeclContextKind : : Initializer : <nl> + case swift : : DeclContextKind : : TopLevelCodeDecl : <nl> + case swift : : DeclContextKind : : SubscriptDecl : <nl> + case swift : : DeclContextKind : : AbstractFunctionDecl : <nl> + case swift : : DeclContextKind : : SerializedLocal : <nl> + case swift : : DeclContextKind : : Module : <nl> + case swift : : DeclContextKind : : FileUnit : <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + void <nl> + SymbolGraph : : recordInheritanceRelationships ( const ValueDecl * VD ) { <nl> + if ( const auto * NTD = dyn_cast < NominalTypeDecl > ( VD ) ) { <nl> + for ( const auto & InheritanceLoc : NTD - > getInherited ( ) ) { <nl> + auto Ty = InheritanceLoc . getType ( ) ; <nl> + if ( ! Ty ) { <nl> + continue ; <nl> + } <nl> + auto * InheritedTypeDecl = <nl> + dyn_cast_or_null < ClassDecl > ( Ty - > getAnyNominal ( ) ) ; <nl> + if ( ! InheritedTypeDecl ) { <nl> + continue ; <nl> + } <nl> + <nl> + recordEdge ( VD , InheritedTypeDecl , RelationshipKind : : InheritsFrom ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void SymbolGraph : : recordDefaultImplementationRelationships ( <nl> + const ValueDecl * VD ) { <nl> + if ( const auto * Extension = dyn_cast < ExtensionDecl > ( VD - > getDeclContext ( ) ) ) { <nl> + if ( const auto * Protocol = Extension - > getExtendedProtocolDecl ( ) ) { <nl> + for ( const auto * Member : Protocol - > getMembers ( ) ) { <nl> + if ( const auto * MemberVD = dyn_cast < ValueDecl > ( Member ) ) { <nl> + if ( MemberVD - > getFullName ( ) . compare ( VD - > getFullName ( ) ) = = 0 ) { <nl> + recordEdge ( VD , MemberVD , <nl> + RelationshipKind : : DefaultImplementationOf ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + void <nl> + SymbolGraph : : recordRequirementRelationships ( const ValueDecl * VD ) { <nl> + if ( const auto * Protocol = dyn_cast < ProtocolDecl > ( VD - > getDeclContext ( ) ) ) { <nl> + if ( VD - > isProtocolRequirement ( ) ) { <nl> + recordEdge ( VD , Protocol , RelationshipKind : : RequirementOf ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void SymbolGraph : : recordOptionalRequirementRelationships ( <nl> + const ValueDecl * VD ) { <nl> + if ( const auto * Protocol = dyn_cast < ProtocolDecl > ( VD - > getDeclContext ( ) ) ) { <nl> + if ( VD - > isProtocolRequirement ( ) ) { <nl> + if ( const auto * ClangDecl = VD - > getClangDecl ( ) ) { <nl> + if ( const auto * Method = dyn_cast < clang : : ObjCMethodDecl > ( ClangDecl ) ) { <nl> + if ( Method - > isOptional ( ) ) { <nl> + recordEdge ( VD , Protocol , <nl> + RelationshipKind : : OptionalRequirementOf ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + void <nl> + SymbolGraph : : recordConformanceRelationships ( const ValueDecl * VD ) { <nl> + if ( const auto * NTD = dyn_cast < NominalTypeDecl > ( VD ) ) { <nl> + for ( const auto * Conformance : NTD - > getAllConformances ( ) ) { <nl> + recordEdge ( VD , Conformance - > getProtocol ( ) , <nl> + RelationshipKind : : ConformsTo ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void SymbolGraph : : recordOverrideRelationship ( const ValueDecl * VD ) { <nl> + if ( const auto * Override = VD - > getOverriddenDecl ( ) ) { <nl> + recordEdge ( VD , Override , RelationshipKind : : Overrides ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + / / MARK : - Serialization <nl> + <nl> + void SymbolGraph : : serialize ( llvm : : json : : OStream & OS ) { <nl> OS . object ( [ & ] ( ) { <nl> OS . attributeObject ( " metadata " , [ & ] ( ) { <nl> { <nl> void SymbolGraph : : serialize ( SymbolGraphASTWalker & Walker , <nl> OS . attributeObject ( " module " , [ & ] ( ) { <nl> OS . attribute ( " name " , M . getNameStr ( ) ) ; <nl> AttributeRAII Platform ( " platform " , OS ) ; <nl> - symbolgraphgen : : serialize ( Target , OS ) ; <nl> + <nl> + auto * MainFile = M . getFiles ( ) . front ( ) ; <nl> + switch ( MainFile - > getKind ( ) ) { <nl> + case FileUnitKind : : Builtin : <nl> + llvm_unreachable ( " Unexpected module kind : Builtin " ) ; <nl> + case FileUnitKind : : DWARFModule : <nl> + llvm_unreachable ( " Unexpected module kind : DWARFModule " ) ; <nl> + case FileUnitKind : : Source : <nl> + llvm_unreachable ( " Unexpected module kind : Source " ) ; <nl> + break ; <nl> + case FileUnitKind : : SerializedAST : { <nl> + auto SerializedAST = cast < SerializedASTFile > ( MainFile ) ; <nl> + auto Target = llvm : : Triple ( SerializedAST - > getTargetTriple ( ) ) ; <nl> + symbolgraphgen : : serialize ( Target , OS ) ; <nl> + break ; <nl> + } <nl> + case FileUnitKind : : ClangModule : { <nl> + auto ClangModule = cast < ClangModuleUnit > ( MainFile ) ; <nl> + if ( const auto * Overlay = ClangModule - > getOverlayModule ( ) ) { <nl> + auto & OverlayMainFile = <nl> + Overlay - > getMainFile ( FileUnitKind : : SerializedAST ) ; <nl> + auto SerializedAST = cast < SerializedASTFile > ( OverlayMainFile ) ; <nl> + auto Target = llvm : : Triple ( SerializedAST . getTargetTriple ( ) ) ; <nl> + symbolgraphgen : : serialize ( Target , OS ) ; <nl> + } else { <nl> + symbolgraphgen : : serialize ( Target , OS ) ; <nl> + } <nl> + break ; <nl> + } <nl> + } <nl> } ) ; <nl> <nl> if ( ModuleVersion ) { <nl> void SymbolGraph : : serialize ( SymbolGraphASTWalker & Walker , <nl> <nl> OS . attributeArray ( " symbols " , [ & ] ( ) { <nl> for ( const auto * VD : Nodes ) { <nl> - Symbol S { VD } ; <nl> - S . serialize ( Walker , OS ) ; <nl> + Symbol S { * this , VD } ; <nl> + S . serialize ( OS ) ; <nl> } <nl> } ) ; <nl> <nl> void SymbolGraph : : serialize ( SymbolGraphASTWalker & Walker , <nl> <nl> } ) ; <nl> } <nl> + <nl> + void <nl> + SymbolGraph : : serializeDeclarationFragments ( StringRef Key , <nl> + const ValueDecl * VD , <nl> + llvm : : json : : OStream & OS ) { <nl> + DeclarationFragmentPrinter Printer ( * this , OS , Key ) ; <nl> + VD - > print ( Printer , getDeclarationFragmentsPrintOptions ( ) ) ; <nl> + } <nl> + <nl> + void <nl> + SymbolGraph : : serializeSubheadingDeclarationFragments ( StringRef Key , <nl> + const ValueDecl * VD , <nl> + llvm : : json : : OStream & OS ) { <nl> + DeclarationFragmentPrinter Printer ( * this , OS , Key ) ; <nl> + auto Options = getDeclarationFragmentsPrintOptions ( ) ; <nl> + Options . VarInitializers = false ; <nl> + Options . PrintDefaultArgumentValue = false ; <nl> + Options . PrintEmptyArgumentNames = false ; <nl> + Options . PrintOverrideKeyword = false ; <nl> + VD - > print ( Printer , Options ) ; <nl> + } <nl> + <nl> + void <nl> + SymbolGraph : : serializeDeclarationFragments ( StringRef Key , Type T , <nl> + llvm : : json : : OStream & OS ) { <nl> + DeclarationFragmentPrinter Printer ( * this , OS , Key ) ; <nl> + T - > print ( Printer , getDeclarationFragmentsPrintOptions ( ) ) ; <nl> + } <nl> mmm a / lib / SymbolGraphGen / SymbolGraph . h <nl> ppp b / lib / SymbolGraphGen / SymbolGraph . h <nl> <nl> # include " llvm / Support / JSON . h " <nl> # include " llvm / Support / VersionTuple . h " <nl> # include " swift / Basic / LLVM . h " <nl> + # include " swift / Markup / Markup . h " <nl> # include " Edge . h " <nl> # include " JSON . h " <nl> <nl> struct SymbolGraph { <nl> * / <nl> ModuleDecl & M ; <nl> <nl> + / * * <nl> + The module whose types were extended in ` M ` . <nl> + * / <nl> + Optional < ModuleDecl * > ExtendedModule ; <nl> + <nl> / * * <nl> The module ' s target triple . <nl> * / <nl> llvm : : Triple Target ; <nl> <nl> + / * * <nl> + A context for allocations . <nl> + * / <nl> + markup : : MarkupContext & Ctx ; <nl> + <nl> / * * <nl> The semantic version of the module that this symbol graph describes , <nl> if known . <nl> struct SymbolGraph { <nl> * / <nl> llvm : : DenseSet < Edge > Edges ; <nl> <nl> - SymbolGraph ( ModuleDecl & M , llvm : : Triple Target , <nl> + / / / A cache of USRs for declarations . <nl> + llvm : : DenseMap < const ValueDecl * , StringRef > USRCache ; <nl> + <nl> + SymbolGraph ( ModuleDecl & M , <nl> + Optional < ModuleDecl * > ExtendedModule , <nl> + llvm : : Triple Target , <nl> + markup : : MarkupContext & Ctx , <nl> Optional < llvm : : VersionTuple > ModuleVersion = None ) ; <nl> <nl> - void serialize ( SymbolGraphASTWalker & Walker , <nl> - llvm : : json : : OStream & OS ) const ; <nl> + / / MARK : - Utilities <nl> + <nl> + / / / Get the USR of a declaration and add it to the local allocator . <nl> + StringRef getUSR ( const ValueDecl * VD ) ; <nl> + <nl> + / / / Returns an array of path components for a declaration . <nl> + void getPathComponents ( const ValueDecl * VD , <nl> + SmallVectorImpl < SmallString < 32 > > & Components ) ; <nl> + <nl> + / / / Get the base print options for declaration fragments . <nl> + PrintOptions getDeclarationFragmentsPrintOptions ( ) const ; <nl> + <nl> + / / MARK : - Symbols ( Nodes ) <nl> + <nl> + / * * <nl> + Record a symbol as a node in the graph . <nl> + * / <nl> + void recordNode ( const ValueDecl * VD ) ; <nl> + <nl> + / / MARK : - Relationships ( Edges ) <nl> + <nl> + / * * <nl> + Record a relationship between two declarations as an edge in the graph . <nl> + <nl> + \ param Source The declaration serving as the source of the edge in the <nl> + directed graph . <nl> + \ param Target The declaration serving as the target of the edge in the <nl> + directed graph . <nl> + \ param Kind The kind of relationship the edge represents . <nl> + * / <nl> + void recordEdge ( const ValueDecl * Source , const ValueDecl * Target , <nl> + RelationshipKind Kind ) ; <nl> + <nl> + / * * <nl> + Record a MemberOf relationship , if the given declaration is nested <nl> + in another . <nl> + * / <nl> + void recordMemberRelationship ( const ValueDecl * VD ) ; <nl> + <nl> + / * * <nl> + Record InheritsFrom relationships for every class from which the <nl> + declaration inherits . <nl> + * / <nl> + void recordInheritanceRelationships ( const ValueDecl * VD ) ; <nl> + <nl> + / * * <nl> + If the declaration is a default implementation in a protocol extension , <nl> + record a DefaultImplementationOf relationship between the declaration and <nl> + the requirement . <nl> + * / <nl> + void recordDefaultImplementationRelationships ( const ValueDecl * VD ) ; <nl> + <nl> + / * * <nl> + Record a RequirementOf relationship if the declaration is a requirement <nl> + of a protocol . <nl> + * / <nl> + void recordRequirementRelationships ( const ValueDecl * VD ) ; <nl> + <nl> + / * * <nl> + If the declaration is an Objective - C - based optional protocol requirement , <nl> + record an OptionalRequirementOf relationship between the declaration <nl> + and its containing protocol . <nl> + * / <nl> + void recordOptionalRequirementRelationships ( const ValueDecl * VD ) ; <nl> + <nl> + / * * <nl> + Record ConformsTo relationships for each protocol conformance of <nl> + the declaration . <nl> + * / <nl> + void recordConformanceRelationships ( const ValueDecl * VD ) ; <nl> + <nl> + / * * <nl> + Records an Overrides relationship if the given declaration <nl> + overrides another . <nl> + * / <nl> + void recordOverrideRelationship ( const ValueDecl * VD ) ; <nl> + <nl> + / / MARK : - Serialization <nl> + <nl> + / / / Serialize this symbol graph ' s JSON to an output stream . <nl> + void serialize ( llvm : : json : : OStream & OS ) ; <nl> + <nl> + / / / Serialize the overall declaration fragments for a ` ValueDecl ` . <nl> + void <nl> + serializeDeclarationFragments ( StringRef Key , const ValueDecl * VD , <nl> + llvm : : json : : OStream & OS ) ; <nl> + <nl> + / / / Get the overall declaration fragments for a ` ValueDecl ` when it is viewed <nl> + / / / as a subheading and / or part of a larger group of symbol listings . <nl> + void <nl> + serializeSubheadingDeclarationFragments ( StringRef Key , const ValueDecl * VD , <nl> + llvm : : json : : OStream & OS ) ; <nl> + <nl> + / / / Get the overall declaration for a type declaration . <nl> + void <nl> + serializeDeclarationFragments ( StringRef Key , Type T , <nl> + llvm : : json : : OStream & OS ) ; <nl> } ; <nl> <nl> } / / end namespace symbolgraphgen <nl> mmm a / lib / SymbolGraphGen / SymbolGraphASTWalker . cpp <nl> ppp b / lib / SymbolGraphGen / SymbolGraphASTWalker . cpp <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - # include " clang / AST / DeclObjC . h " <nl> # include " llvm / ADT / StringSwitch . h " <nl> - # include " swift / AST / ASTContext . h " <nl> - # include " swift / AST / ASTPrinter . h " <nl> # include " swift / AST / Decl . h " <nl> - # include " swift / AST / GenericSignature . h " <nl> # include " swift / AST / Module . h " <nl> - # include " swift / AST / ParameterList . h " <nl> - # include " swift / AST / ProtocolConformance . h " <nl> - # include " swift / AST / USRGeneration . h " <nl> - # include " swift / Basic / PrimitiveParsing . h " <nl> - # include " swift / Markup / Markup . h " <nl> + # include " swift / Serialization / SerializedModuleLoader . h " <nl> # include " swift / SymbolGraphGen / SymbolGraphGen . h " <nl> <nl> - # include " DeclarationFragmentPrinter . h " <nl> # include " SymbolGraphASTWalker . h " <nl> <nl> using namespace swift ; <nl> SymbolGraphASTWalker : : SymbolGraphASTWalker ( ModuleDecl & M , <nl> const SymbolGraphOptions & Options ) <nl> : Options ( Options ) , <nl> M ( M ) , <nl> - Graph ( M , Options . Target ) { } <nl> + Graph ( M , None , Options . Target , Ctx ) { } <nl> <nl> / / / Returns ` true ` if the symbol should be included as a node in the graph . <nl> bool SymbolGraphASTWalker : : shouldIncludeNode ( const Decl * D ) const { <nl> bool SymbolGraphASTWalker : : shouldIncludeNode ( const Decl * D ) const { <nl> return ShouldInclude ; <nl> } <nl> <nl> + / / / Get a " sub " symbol graph for the parent module of a type that the main module ` M ` is extending . <nl> + SymbolGraph & SymbolGraphASTWalker : : getExtendedModuleSymbolGraph ( ModuleDecl * M ) { <nl> + auto Found = ExtendedModuleGraphs . find ( M ) ; <nl> + if ( Found ! = ExtendedModuleGraphs . end ( ) ) { <nl> + return * Found - > getSecond ( ) ; <nl> + } <nl> + auto * Memory = Ctx . allocate ( sizeof ( SymbolGraph ) , alignof ( SymbolGraph ) ) ; <nl> + auto * SG = new ( Memory ) SymbolGraph ( Graph . M , <nl> + Optional < ModuleDecl * > ( M ) , <nl> + Options . Target , <nl> + Ctx ) ; <nl> + ExtendedModuleGraphs . insert ( { M , SG } ) ; <nl> + return * SG ; <nl> + } <nl> + <nl> bool SymbolGraphASTWalker : : walkToDeclPre ( Decl * D , CharSourceRange Range ) { <nl> <nl> switch ( D - > getKind ( ) ) { <nl> bool SymbolGraphASTWalker : : walkToDeclPre ( Decl * D , CharSourceRange Range ) { <nl> } <nl> <nl> auto * VD = cast < ValueDecl > ( D ) ; <nl> - Graph . Nodes . insert ( VD ) ; <nl> - <nl> - / / Record all of the possible relationships ( edges ) originating <nl> - / / with this declaration . <nl> - recordMemberRelationship ( VD ) ; <nl> - recordConformanceRelationships ( VD ) ; <nl> - recordInheritanceRelationships ( VD ) ; <nl> - recordDefaultImplementationRelationships ( VD ) ; <nl> - recordOverrideRelationship ( VD ) ; <nl> - recordRequirementRelationships ( VD ) ; <nl> - recordOptionalRequirementRelationships ( VD ) ; <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - StringRef SymbolGraphASTWalker : : getUSR ( const ValueDecl * VD ) { <nl> - auto Found = USRCache . find ( VD ) ; <nl> - if ( Found ! = USRCache . end ( ) ) { <nl> - return Found - > second ; <nl> - } <nl> - llvm : : SmallString < 32 > Scratch ; <nl> - llvm : : raw_svector_ostream OS ( Scratch ) ; <nl> - ide : : printDeclUSR ( VD , OS ) ; <nl> - auto USR = Ctx . allocateCopy ( Scratch . str ( ) ) ; <nl> - USRCache . insert ( { VD , USR } ) ; <nl> - return USR ; <nl> - } <nl> - <nl> - void <nl> - SymbolGraphASTWalker : : getPathComponents ( const ValueDecl * VD , <nl> - SmallVectorImpl < SmallString < 32 > > & Components ) { <nl> - / / Collect the spellings of the fully qualified identifier components . <nl> - auto Decl = VD ; <nl> - while ( Decl & & ! isa < ModuleDecl > ( Decl ) ) { <nl> - SmallString < 32 > Scratch ; <nl> - Decl - > getFullName ( ) . getString ( Scratch ) ; <nl> - Components . push_back ( Scratch ) ; <nl> - if ( const auto * DC = Decl - > getDeclContext ( ) ) { <nl> - if ( const auto * Proto = DC - > getExtendedProtocolDecl ( ) ) { <nl> - Decl = Proto ; <nl> - } else if ( const auto * Ext = dyn_cast_or_null < ExtensionDecl > ( DC - > getAsDecl ( ) ) ) { <nl> - Decl = Ext - > getExtendedNominal ( ) ; <nl> - } else { <nl> - Decl = dyn_cast_or_null < ValueDecl > ( DC - > getAsDecl ( ) ) ; <nl> - } <nl> - } else { <nl> - Decl = nullptr ; <nl> - } <nl> - } <nl> - <nl> - / / The list is leaf - to - root , but our list is root - to - leaf , so reverse it . <nl> - std : : reverse ( Components . begin ( ) , Components . end ( ) ) ; <nl> - } <nl> - <nl> - PrintOptions SymbolGraphASTWalker : : getDeclarationFragmentsPrintOptions ( ) const { <nl> - PrintOptions Opts ; <nl> - Opts . FunctionDefinitions = false ; <nl> - Opts . ArgAndParamPrinting = <nl> - PrintOptions : : ArgAndParamPrintingMode : : ArgumentOnly ; <nl> - Opts . PrintGetSetOnRWProperties = false ; <nl> - Opts . PrintPropertyAccessors = false ; <nl> - Opts . PrintSubscriptAccessors = false ; <nl> - Opts . SkipUnderscoredKeywords = true ; <nl> - Opts . SkipAttributes = true ; <nl> - Opts . PrintOverrideKeyword = true ; <nl> - Opts . PrintImplicitAttrs = false ; <nl> - Opts . PrintFunctionRepresentationAttrs = <nl> - PrintOptions : : FunctionRepresentationMode : : None ; <nl> - Opts . PrintUserInaccessibleAttrs = false ; <nl> - Opts . SkipPrivateStdlibDecls = true ; <nl> - Opts . SkipUnderscoredStdlibProtocols = true ; <nl> - <nl> - Opts . ExclusiveAttrList . clear ( ) ; <nl> - <nl> - # define DECL_ATTR ( SPELLING , CLASS , OPTIONS , CODE ) Opts . ExcludeAttrList . push_back ( DAK_ # # CLASS ) ; <nl> - # define TYPE_ATTR ( X ) Opts . ExcludeAttrList . push_back ( TAK_ # # X ) ; <nl> - # include " swift / AST / Attr . def " <nl> - <nl> - return Opts ; <nl> - } <nl> - <nl> - void <nl> - SymbolGraphASTWalker : : serializeDeclarationFragments ( StringRef Key , <nl> - const ValueDecl * VD , <nl> - llvm : : json : : OStream & OS ) { <nl> - DeclarationFragmentPrinter Printer ( * this , OS , Key ) ; <nl> - VD - > print ( Printer , getDeclarationFragmentsPrintOptions ( ) ) ; <nl> - } <nl> - <nl> - void <nl> - SymbolGraphASTWalker : : serializeSubheadingDeclarationFragments ( StringRef Key , <nl> - const ValueDecl * VD , <nl> - llvm : : json : : OStream & OS ) { <nl> - DeclarationFragmentPrinter Printer ( * this , OS , Key ) ; <nl> - auto Options = getDeclarationFragmentsPrintOptions ( ) ; <nl> - Options . VarInitializers = false ; <nl> - Options . PrintDefaultArgumentValue = false ; <nl> - Options . PrintEmptyArgumentNames = false ; <nl> - Options . PrintOverrideKeyword = false ; <nl> - VD - > print ( Printer , Options ) ; <nl> - } <nl> - <nl> - void <nl> - SymbolGraphASTWalker : : serializeDeclarationFragments ( StringRef Key , Type T , <nl> - llvm : : json : : OStream & OS ) { <nl> - DeclarationFragmentPrinter Printer ( * this , OS , Key ) ; <nl> - T - > print ( Printer , getDeclarationFragmentsPrintOptions ( ) ) ; <nl> - } <nl> - <nl> - void SymbolGraphASTWalker : : recordEdge ( const ValueDecl * Source , <nl> - const ValueDecl * Target , <nl> - RelationshipKind Kind ) { <nl> - if ( Target - > isPrivateStdlibDecl ( <nl> - / * treatNonBuiltinProtocolsAsPublic = * / false ) ) { <nl> - return ; <nl> - } <nl> - <nl> - / / There might be relationships on implicit declarations , <nl> - / / such as overriding implicit @ objc init ( ) . <nl> - if ( Target - > isImplicit ( ) ) { <nl> - return ; <nl> - } <nl> - <nl> - Graph . Nodes . insert ( Source ) ; <nl> - Graph . Nodes . insert ( Target ) ; <nl> - <nl> - Graph . Edges . insert ( { this , Kind , Source , Target } ) ; <nl> - } <nl> - <nl> - void SymbolGraphASTWalker : : recordMemberRelationship ( const ValueDecl * VD ) { <nl> - auto * DC = VD - > getDeclContext ( ) ; <nl> - switch ( DC - > getContextKind ( ) ) { <nl> - case DeclContextKind : : GenericTypeDecl : <nl> - case DeclContextKind : : ExtensionDecl : <nl> - case swift : : DeclContextKind : : EnumElementDecl : <nl> - return recordEdge ( VD , VD - > getDeclContext ( ) - > getSelfNominalTypeDecl ( ) , <nl> - RelationshipKind : : MemberOf ( ) ) ; <nl> - case swift : : DeclContextKind : : AbstractClosureExpr : <nl> - case swift : : DeclContextKind : : Initializer : <nl> - case swift : : DeclContextKind : : TopLevelCodeDecl : <nl> - case swift : : DeclContextKind : : SubscriptDecl : <nl> - case swift : : DeclContextKind : : AbstractFunctionDecl : <nl> - case swift : : DeclContextKind : : SerializedLocal : <nl> - case swift : : DeclContextKind : : Module : <nl> - case swift : : DeclContextKind : : FileUnit : <nl> - break ; <nl> - } <nl> - } <nl> - <nl> - void <nl> - SymbolGraphASTWalker : : recordInheritanceRelationships ( const ValueDecl * VD ) { <nl> - if ( const auto * NTD = dyn_cast < NominalTypeDecl > ( VD ) ) { <nl> - for ( const auto & InheritanceLoc : NTD - > getInherited ( ) ) { <nl> - auto Ty = InheritanceLoc . getType ( ) ; <nl> - if ( ! Ty ) { <nl> - continue ; <nl> - } <nl> - auto * InheritedTypeDecl = <nl> - dyn_cast_or_null < ClassDecl > ( Ty - > getAnyNominal ( ) ) ; <nl> - if ( ! InheritedTypeDecl ) { <nl> - continue ; <nl> - } <nl> - <nl> - recordEdge ( VD , InheritedTypeDecl , RelationshipKind : : InheritsFrom ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> <nl> - void SymbolGraphASTWalker : : recordDefaultImplementationRelationships ( <nl> - const ValueDecl * VD ) { <nl> - if ( const auto * Extension = dyn_cast < ExtensionDecl > ( VD - > getDeclContext ( ) ) ) { <nl> - if ( const auto * Protocol = Extension - > getExtendedProtocolDecl ( ) ) { <nl> - for ( const auto * Member : Protocol - > getMembers ( ) ) { <nl> - if ( const auto * MemberVD = dyn_cast < ValueDecl > ( Member ) ) { <nl> - if ( MemberVD - > getFullName ( ) . compare ( VD - > getFullName ( ) ) = = 0 ) { <nl> - recordEdge ( VD , MemberVD , <nl> - RelationshipKind : : DefaultImplementationOf ( ) ) ; <nl> - } <nl> - } <nl> + / / If this symbol extends a type from another module , record it in that <nl> + / / module ' s symbol graph , which will be emitted separately . <nl> + if ( const auto * Extension <nl> + = dyn_cast_or_null < ExtensionDecl > ( VD - > getInnermostDeclContext ( ) ) ) { <nl> + if ( const auto * ExtendedNominal = Extension - > getExtendedNominal ( ) ) { <nl> + auto ExtendedModule = ExtendedNominal - > getModuleContext ( ) ; <nl> + if ( ExtendedModule ! = & M ) { <nl> + auto & SG = getExtendedModuleSymbolGraph ( ExtendedModule ) ; <nl> + SG . recordNode ( VD ) ; <nl> + return true ; <nl> } <nl> } <nl> } <nl> - } <nl> <nl> - void <nl> - SymbolGraphASTWalker : : recordRequirementRelationships ( const ValueDecl * VD ) { <nl> - if ( const auto * Protocol = dyn_cast < ProtocolDecl > ( VD - > getDeclContext ( ) ) ) { <nl> - if ( VD - > isProtocolRequirement ( ) ) { <nl> - recordEdge ( VD , Protocol , RelationshipKind : : RequirementOf ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> + / / Otherwise , record this in the main module ` M ` ' s symbol graph . <nl> + Graph . recordNode ( VD ) ; <nl> <nl> - void SymbolGraphASTWalker : : recordOptionalRequirementRelationships ( <nl> - const ValueDecl * VD ) { <nl> - if ( const auto * Protocol = dyn_cast < ProtocolDecl > ( VD - > getDeclContext ( ) ) ) { <nl> - if ( VD - > isProtocolRequirement ( ) ) { <nl> - if ( const auto * ClangDecl = VD - > getClangDecl ( ) ) { <nl> - if ( const auto * Method = dyn_cast < clang : : ObjCMethodDecl > ( ClangDecl ) ) { <nl> - if ( Method - > isOptional ( ) ) { <nl> - recordEdge ( VD , Protocol , <nl> - RelationshipKind : : OptionalRequirementOf ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - <nl> - void <nl> - SymbolGraphASTWalker : : recordConformanceRelationships ( const ValueDecl * VD ) { <nl> - if ( const auto * NTD = dyn_cast < NominalTypeDecl > ( VD ) ) { <nl> - for ( const auto * Conformance : NTD - > getAllConformances ( ) ) { <nl> - recordEdge ( VD , Conformance - > getProtocol ( ) , <nl> - RelationshipKind : : ConformsTo ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void SymbolGraphASTWalker : : recordOverrideRelationship ( const ValueDecl * VD ) { <nl> - if ( const auto * Override = VD - > getOverriddenDecl ( ) ) { <nl> - recordEdge ( VD , Override , RelationshipKind : : Overrides ( ) ) ; <nl> - } <nl> + return true ; <nl> } <nl> mmm a / lib / SymbolGraphGen / SymbolGraphASTWalker . h <nl> ppp b / lib / SymbolGraphGen / SymbolGraphASTWalker . h <nl> <nl> # ifndef SWIFT_SYMBOLGRAPHGEN_SYMBOLGRAPHASTWALKER_H <nl> # define SWIFT_SYMBOLGRAPHGEN_SYMBOLGRAPHASTWALKER_H <nl> <nl> + # include " llvm / ADT / DenseMap . h " <nl> + # include " swift / AST / Module . h " <nl> # include " swift / Basic / LLVM . h " <nl> # include " swift / IDE / SourceEntityWalker . h " <nl> # include " swift / Markup / Markup . h " <nl> struct SymbolGraphASTWalker : public SourceEntityWalker { <nl> / / / Options for collecting and serialization . <nl> const SymbolGraphOptions & Options ; <nl> <nl> + / / / A context for allocations . <nl> + markup : : MarkupContext Ctx ; <nl> + <nl> / / / The module that this symbol graph will represent . <nl> const ModuleDecl & M ; <nl> <nl> - / / / The symbol graph . <nl> + / / / The symbol graph for a module . <nl> SymbolGraph Graph ; <nl> <nl> - / / / A context for allocations . <nl> - markup : : MarkupContext Ctx ; <nl> - <nl> - / / / A cache of USRs for declarations . <nl> - llvm : : DenseMap < const ValueDecl * , StringRef > USRCache ; <nl> + / / / A map of modules whose types were extended by the main module of interest ` M ` . <nl> + llvm : : DenseMap < ModuleDecl * , SymbolGraph * > ExtendedModuleGraphs ; <nl> <nl> - / / MARK : - <nl> + / / MARK : - Initialization <nl> <nl> SymbolGraphASTWalker ( ModuleDecl & M , const SymbolGraphOptions & Options ) ; <nl> virtual ~ SymbolGraphASTWalker ( ) { } <nl> <nl> - / / MARK : - <nl> + / / MARK : - Utilities <nl> <nl> / / / Returns ` true ` if the symbol should be included as a node in the graph . <nl> bool shouldIncludeNode ( const Decl * D ) const ; <nl> <nl> - virtual bool walkToDeclPre ( Decl * D , CharSourceRange Range ) ; <nl> - <nl> - / / MARK : - Utilities and Conversions <nl> - <nl> - / / / Get the USR of a declaration and add it to the local allocator . <nl> - StringRef getUSR ( const ValueDecl * VD ) ; <nl> - <nl> - / / / Returns an array of path components for a declaration . <nl> - void getPathComponents ( const ValueDecl * VD , SmallVectorImpl < SmallString < 32 > > & Components ) ; <nl> - <nl> - / / MARK : - Declaration Fragments <nl> + / / / Get a " sub " symbol graph for the parent module of a type that the main module ` M ` is extending . <nl> + SymbolGraph & getExtendedModuleSymbolGraph ( ModuleDecl * M ) ; <nl> <nl> - / / / Get the base print options for declaration fragments . <nl> - PrintOptions getDeclarationFragmentsPrintOptions ( ) const ; <nl> + / / MARK : - SourceEntityWalker <nl> <nl> - / / / Serialize the overall declaration fragments for a ` ValueDecl ` . <nl> - void <nl> - serializeDeclarationFragments ( StringRef Key , const ValueDecl * VD , <nl> - llvm : : json : : OStream & OS ) ; <nl> - <nl> - / / / Get the overall declaration fragments for a ` ValueDecl ` when it is viewed <nl> - / / / as a subheading and / or part of a larger group of symbol listings . <nl> - void <nl> - serializeSubheadingDeclarationFragments ( StringRef Key , const ValueDecl * VD , <nl> - llvm : : json : : OStream & OS ) ; <nl> - <nl> - / / / Get the overall declaration for a type declaration . <nl> - void <nl> - serializeDeclarationFragments ( StringRef Key , Type T , <nl> - llvm : : json : : OStream & OS ) ; <nl> - <nl> - / / MARK : - Relationships ( Edges ) <nl> - <nl> - / * * <nl> - Record a relationship between two declarations as an edge in the graph . <nl> - <nl> - \ param Source The declaration serving as the source of the edge in the <nl> - directed graph . <nl> - \ param Target The declaration serving as the target of the edge in the <nl> - directed graph . <nl> - \ param Kind The kind of relationship the edge represents . <nl> - * / <nl> - void recordEdge ( const ValueDecl * Source , const ValueDecl * Target , <nl> - RelationshipKind Kind ) ; <nl> - <nl> - / * * <nl> - Record a MemberOf relationship , if the given declaration is nested <nl> - in another . <nl> - * / <nl> - void recordMemberRelationship ( const ValueDecl * VD ) ; <nl> - <nl> - / * * <nl> - Record InheritsFrom relationships for every class from which the <nl> - declaration inherits . <nl> - * / <nl> - void recordInheritanceRelationships ( const ValueDecl * VD ) ; <nl> - <nl> - / * * <nl> - If the declaration is a default implementation in a protocol extension , <nl> - record a DefaultImplementationOf relationship between the declaration and <nl> - the requirement . <nl> - * / <nl> - void recordDefaultImplementationRelationships ( const ValueDecl * VD ) ; <nl> - <nl> - / * * <nl> - Record a RequirementOf relationship if the declaration is a requirement <nl> - of a protocol . <nl> - * / <nl> - void recordRequirementRelationships ( const ValueDecl * VD ) ; <nl> - <nl> - / * * <nl> - If the declaration is an Objective - C - based optional protocol requirement , <nl> - record an OptionalRequirementOf relationship between the declaration <nl> - and its containing protocol . <nl> - * / <nl> - void recordOptionalRequirementRelationships ( const ValueDecl * VD ) ; <nl> - <nl> - / * * <nl> - Record ConformsTo relationships for each protocol conformance of <nl> - the declaration . <nl> - * / <nl> - void recordConformanceRelationships ( const ValueDecl * VD ) ; <nl> - <nl> - / * * <nl> - Records an Overrides relationship if the given declaration <nl> - overrides another . <nl> - * / <nl> - void recordOverrideRelationship ( const ValueDecl * VD ) ; <nl> + virtual bool walkToDeclPre ( Decl * D , CharSourceRange Range ) ; <nl> } ; <nl> <nl> } / / end namespace symbolgraphgen <nl> mmm a / lib / SymbolGraphGen / SymbolGraphGen . cpp <nl> ppp b / lib / SymbolGraphGen / SymbolGraphGen . cpp <nl> <nl> using namespace swift ; <nl> using namespace symbolgraphgen ; <nl> <nl> + namespace { <nl> + int serializeSymbolGraph ( SymbolGraph & SG , <nl> + const SymbolGraphOptions & Options ) { <nl> + SmallString < 256 > FileName ( SG . M . getNameStr ( ) ) ; <nl> + if ( SG . ExtendedModule . hasValue ( ) ) { <nl> + FileName . push_back ( ' @ ' ) ; <nl> + FileName . append ( SG . ExtendedModule . getValue ( ) - > getNameStr ( ) ) ; <nl> + } <nl> + FileName . append ( " . symbols . json " ) ; <nl> + <nl> + SmallString < 1024 > OutputPath ( Options . OutputDir ) ; <nl> + llvm : : sys : : path : : append ( OutputPath , FileName ) ; <nl> + <nl> + std : : error_code Error ; <nl> + llvm : : raw_fd_ostream OS ( OutputPath , Error , llvm : : sys : : fs : : FA_Write ) ; <nl> + if ( Error ) { <nl> + llvm : : errs ( ) < < " Couldn ' t open output file ' " < < OutputPath <nl> + < < " for writing : " <nl> + < < Error . message ( ) < < " \ n " ; <nl> + return EXIT_FAILURE ; <nl> + } <nl> + <nl> + llvm : : json : : OStream J ( OS , Options . PrettyPrint ? 2 : 0 ) ; <nl> + SG . serialize ( J ) ; <nl> + return EXIT_SUCCESS ; <nl> + } <nl> + <nl> + } / / end anonymous namespace <nl> + <nl> / / MARK : - Main Entry Point <nl> <nl> / / / Emit a symbol graph JSON file for a ` ModuleDecl ` . <nl> symbolgraphgen : : emitSymbolGraphForModule ( ModuleDecl * M , <nl> < < " Found " < < Walker . Graph . Nodes . size ( ) < < " symbols and " <nl> < < Walker . Graph . Edges . size ( ) < < " relationships . \ n " ; <nl> <nl> - std : : error_code Error ; <nl> - llvm : : raw_fd_ostream OS ( Options . OutputPath , Error , llvm : : sys : : fs : : FA_Write ) ; <nl> - if ( Error ) { <nl> - llvm : : errs ( ) < < " Couldn ' t open output file for writing : " <nl> - < < Error . message ( ) < < " \ n " ; <nl> - return EXIT_FAILURE ; <nl> - } <nl> + int Success = EXIT_SUCCESS ; <nl> <nl> - llvm : : json : : OStream J ( OS , Options . PrettyPrint ? 2 : 0 ) ; <nl> - Walker . Graph . serialize ( Walker , J ) ; <nl> + Success | = serializeSymbolGraph ( Walker . Graph , Options ) ; <nl> <nl> - return EXIT_SUCCESS ; <nl> + for ( auto Pair : Walker . ExtendedModuleGraphs ) { <nl> + Success | = serializeSymbolGraph ( * Pair . getSecond ( ) , Options ) ; <nl> + } <nl> + <nl> + return Success ; <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . eec6a779e91e <nl> mmm / dev / null <nl> ppp b / test / SymbolGraph / Module / Extension . swift <nl> <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - build - swift % s - module - name Extension - emit - module - emit - module - path % t / <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name Extension - I % t - pretty - print - output - dir % t <nl> + / / RUN : % FileCheck % s - - input - file % t / Extension @ Swift . symbols . json <nl> + <nl> + public extension String { <nl> + / / / Return something . <nl> + var something : String { <nl> + return " something " <nl> + } <nl> + } <nl> + <nl> + / / CHECK : module <nl> + / / CHECK - NEXT : " name " : " Extension " <nl> + <nl> + / / CHECK : " precise " : " s : SS9ExtensionE9somethingSSvp " <nl> + <nl> + / / CHECK : " kind " : " memberOf " <nl> + / / CHECK - NEXT : " source " : " s : SS9ExtensionE9somethingSSvp " <nl> + / / CHECK - NEXT : " target " : " s : SS " <nl> + <nl> + / / Extending ` String ` creates a memberOf relationship above . <nl> + / / However , it should not be included as a node because ` String ` <nl> + / / is owned by the Swift module . <nl> + / / rdar : / / 58876107 <nl> + / / CHECK - NOT : " precise " : " s : SS " <nl> similarity index 90 % <nl> rename from test / SymbolGraph / Module . swift <nl> rename to test / SymbolGraph / Module / Module . swift <nl> mmm a / test / SymbolGraph / Module . swift <nl> ppp b / test / SymbolGraph / Module / Module . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name SymbolGraphModule - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name SymbolGraphModule - I % t - pretty - print - o % t / SymbolGraphModule . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name SymbolGraphModule - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / SymbolGraphModule . symbols . json <nl> <nl> public struct S { <nl> mmm a / test / SymbolGraph / Relationships / ConformsTo . swift <nl> ppp b / test / SymbolGraph / Relationships / ConformsTo . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name ConformsTo - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name ConformsTo - I % t - pretty - print - o % t / ConformsTo . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name ConformsTo - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / ConformsTo . symbols . json <nl> <nl> public protocol P { <nl> mmm a / test / SymbolGraph / Relationships / DefaultImplementationOf . swift <nl> ppp b / test / SymbolGraph / Relationships / DefaultImplementationOf . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name DefaultImplementationOf - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name DefaultImplementationOf - I % t - pretty - print - o % t / DefaultImplementationOf . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name DefaultImplementationOf - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / DefaultImplementationOf . symbols . json <nl> <nl> public protocol P { <nl> mmm a / test / SymbolGraph / Relationships / InheritsFrom . swift <nl> ppp b / test / SymbolGraph / Relationships / InheritsFrom . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name InheritsFrom - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name InheritsFrom - I % t - pretty - print - o % t / InheritsFrom . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name InheritsFrom - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / InheritsFrom . symbols . json <nl> <nl> public class Base { } <nl> mmm a / test / SymbolGraph / Relationships / MemberOf . swift <nl> ppp b / test / SymbolGraph / Relationships / MemberOf . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name MemberOf - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name MemberOf - I % t - pretty - print - o % t / MemberOf . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name MemberOf - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / MemberOf . symbols . json <nl> <nl> public struct S { <nl> mmm a / test / SymbolGraph / Relationships / Overrides . swift <nl> ppp b / test / SymbolGraph / Relationships / Overrides . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name Overrides - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name Overrides - I % t - pretty - print - o % t / Overrides . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name Overrides - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / Overrides . symbols . json <nl> <nl> public class Base { <nl> mmm a / test / SymbolGraph / Relationships / RequirementOf . swift <nl> ppp b / test / SymbolGraph / Relationships / RequirementOf . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name ConformsTo - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name ConformsTo - I % t - pretty - print - o % t / ConformsTo . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name ConformsTo - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / ConformsTo . symbols . json <nl> <nl> public protocol P { <nl> mmm a / test / SymbolGraph / Relationships / TargetFallback . swift <nl> ppp b / test / SymbolGraph / Relationships / TargetFallback . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name TargetFallback - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name TargetFallback - I % t - pretty - print - o % t / TargetFallback . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name TargetFallback - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / TargetFallback . symbols . json <nl> <nl> public struct S : CustomStringConvertible { <nl> mmm a / test / SymbolGraph / Symbols / AccessLevelFilter / IncludeInternal . swift <nl> ppp b / test / SymbolGraph / Symbols / AccessLevelFilter / IncludeInternal . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name IncludeInternal - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name IncludeInternal - I % t - pretty - print - o % t / IncludeInternal . symbols . json - minimum - access - level internal <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name IncludeInternal - I % t - pretty - print - output - dir % t - minimum - access - level internal <nl> / / RUN : % FileCheck % s - - input - file % t / IncludeInternal . symbols . json <nl> <nl> public struct ShouldAppear { <nl> mmm a / test / SymbolGraph / Symbols / AccessLevelFilter / PublicDefault . swift <nl> ppp b / test / SymbolGraph / Symbols / AccessLevelFilter / PublicDefault . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name PublicDefault - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name PublicDefault - I % t - pretty - print - o % t / PublicDefault . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name PublicDefault - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / PublicDefault . symbols . json <nl> <nl> public struct ShouldAppear { <nl> mmm a / test / SymbolGraph / Symbols / AccessLevels . swift <nl> ppp b / test / SymbolGraph / Symbols / AccessLevels . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name AccessLevels - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name AccessLevels - I % t - pretty - print - o % t / AccessLevels . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name AccessLevels - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / AccessLevels . symbols . json <nl> <nl> / / CHECK : " accessLevel " : " public " <nl> mmm a / test / SymbolGraph / Symbols / DocComment . swift <nl> ppp b / test / SymbolGraph / Symbols / DocComment . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name DocComment - emit - module - path % t / DocComment . swiftmodule <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name DocComment - I % t - pretty - print - o % t / DocComment . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name DocComment - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / DocComment . symbols . json <nl> <nl> / / CHECK : " text " : " Single line . " <nl> new file mode 100644 <nl> index 000000000000 . . b0fbf5a28e64 <nl> mmm / dev / null <nl> ppp b / test / SymbolGraph / Symbols / Identifier . swift <nl> <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - build - swift % s - module - name Identifier - emit - module - emit - module - path % t / <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name Identifier - I % t - pretty - print - output - dir % t <nl> + / / RUN : % FileCheck % s - - input - file % t / Identifier . symbols . json <nl> + <nl> + public struct MyStruct { } <nl> + <nl> + / / CHECK : " precise " : " s : 10Identifier8MyStructV " , <nl> + / / CHECK - NEXT : " interfaceLanguage " : " swift " <nl> mmm a / test / SymbolGraph / Symbols / Kinds . swift <nl> ppp b / test / SymbolGraph / Symbols / Kinds . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name Kinds - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name Kinds - I % t - pretty - print - o % t / Kinds . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name Kinds - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / Kinds . symbols . json <nl> <nl> / / CHECK : " identifier " : " swift . class " <nl> mmm a / test / SymbolGraph / Symbols / Mixins / Availability / Availability . swift <nl> ppp b / test / SymbolGraph / Symbols / Mixins / Availability / Availability . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name Availability - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name Availability - I % t - pretty - print - o % t / Availability . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name Availability - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / Availability . symbols . json <nl> <nl> @ available ( macOS , introduced : 10 . 9 , deprecated : 10 . 10 , obsoleted : 10 . 11 , message : " Everyone makes mistakes " , renamed : " S2 " ) <nl> mmm a / test / SymbolGraph / Symbols / Mixins / Availability / UnconditionallyDeprecated . swift <nl> ppp b / test / SymbolGraph / Symbols / Mixins / Availability / UnconditionallyDeprecated . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name UnconditionallyDeprecated - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name UnconditionallyDeprecated - I % t - pretty - print - o % t / UnconditionallyDeprecated . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name UnconditionallyDeprecated - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / UnconditionallyDeprecated . symbols . json <nl> <nl> @ available ( * , deprecated ) <nl> mmm a / test / SymbolGraph / Symbols / Mixins / Availability / UnconditionallyUnavailable . swift <nl> ppp b / test / SymbolGraph / Symbols / Mixins / Availability / UnconditionallyUnavailable . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name UnconditionallyUnavailable - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name UnconditionallyUnavailable - I % t - pretty - print - o % t / UnconditionallyUnavailable . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name UnconditionallyUnavailable - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / UnconditionallyUnavailable . symbols . json <nl> <nl> @ available ( * , unavailable ) <nl> mmm a / test / SymbolGraph / Symbols / Mixins / DeclarationFragments . swift <nl> ppp b / test / SymbolGraph / Symbols / Mixins / DeclarationFragments . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name DeclarationFragments - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name DeclarationFragments - I % t - pretty - print - o % t / DeclarationFragments . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name DeclarationFragments - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / DeclarationFragments . symbols . json <nl> <nl> public func foo < S > ( f : @ escaping ( ) - > ( ) , x : Int = 2 , s : S ) { } <nl> mmm a / test / SymbolGraph / Symbols / Mixins / FunctionSignature . swift <nl> ppp b / test / SymbolGraph / Symbols / Mixins / FunctionSignature . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name FunctionSignature - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name FunctionSignature - I % t - pretty - print - o % t / FunctionSignature . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name FunctionSignature - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / FunctionSignature . symbols . json <nl> <nl> public func foo ( _ noext : Int , ext int : Int ) - > String { <nl> mmm a / test / SymbolGraph / Symbols / Names . swift <nl> ppp b / test / SymbolGraph / Symbols / Names . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name Names - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name Names - I % t - pretty - print - o % t / Names . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name Names - I % t - pretty - print - output - dir % t <nl> / / RUN : % FileCheck % s - - input - file % t / Names . symbols . json <nl> <nl> public struct MyStruct { } <nl> new file mode 100644 <nl> index 000000000000 . . 14a794bd16c6 <nl> mmm / dev / null <nl> ppp b / test / SymbolGraph / Symbols / PathComponents . swift <nl> <nl> + / / RUN : % empty - directory ( % t ) <nl> + / / RUN : % target - build - swift % s - module - name PathComponents - emit - module - emit - module - path % t / <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name PathComponents - I % t - pretty - print - output - dir % t <nl> + / / RUN : % FileCheck % s - - input - file % t / PathComponents . symbols . json <nl> + <nl> + public struct Outer { <nl> + public struct Inner { <nl> + public var x = 1 <nl> + } <nl> + } <nl> + <nl> + / / CHECK : " precise " : " s : 14PathComponents5OuterV5InnerV1xSivp " <nl> + / / CHECK - NEXT : " interfaceLanguage " : " swift " <nl> + / / CHECK - NEXT : } , <nl> + / / CHECK - NEXT : " pathComponents " : [ <nl> + / / CHECK - NEXT : " Outer " <nl> + / / CHECK - NEXT : " Inner " <nl> + / / CHECK - NEXT : " x " <nl> + / / CHECK - NEXT : ] <nl> mmm a / test / SymbolGraph / Symbols / SkipsPublicUnderscore . swift <nl> ppp b / test / SymbolGraph / Symbols / SkipsPublicUnderscore . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - build - swift % s - module - name SkipsPublicUnderscore - emit - module - emit - module - path % t / <nl> - / / RUN : % target - swift - symbolgraph - extract - module - name SkipsPublicUnderscore - I % t - pretty - print - o % t / SymbolGraphModule . symbols . json <nl> - / / RUN : % FileCheck % s - - input - file % t / SymbolGraphModule . symbols . json <nl> + / / RUN : % target - swift - symbolgraph - extract - module - name SkipsPublicUnderscore - I % t - pretty - print - output - dir % t <nl> + / / RUN : % FileCheck % s - - input - file % t / SkipsPublicUnderscore . symbols . json <nl> <nl> public struct _ShouldntAppear { <nl> public var shouldntAppear : Int <nl> mmm a / tools / driver / swift_symbolgraph_extract_main . cpp <nl> ppp b / tools / driver / swift_symbolgraph_extract_main . cpp <nl> Xcc ( " Xcc " , llvm : : cl : : desc ( " Pass the following command - line flag to Clang " ) , <nl> llvm : : cl : : cat ( Category ) ) ; <nl> <nl> static llvm : : cl : : opt < std : : string > <nl> - OutputPath ( " o " , llvm : : cl : : desc ( " Symbol Graph JSON Output Path ( Required ) " ) , llvm : : cl : : cat ( Category ) ) ; <nl> + OutputDir ( " output - dir " , llvm : : cl : : desc ( " Symbol Graph JSON Output Directory ( Required ) " ) , llvm : : cl : : cat ( Category ) ) ; <nl> } / / end namespace options <nl> <nl> static bool argumentsAreValid ( ) { <nl> static bool argumentsAreValid ( ) { <nl> Valid = false ; <nl> } <nl> <nl> - if ( options : : OutputPath . empty ( ) ) { <nl> - llvm : : errs ( ) < < " Required - o argument is missing \ n " ; <nl> + if ( options : : OutputDir . empty ( ) ) { <nl> + llvm : : errs ( ) < < " Required - output - dir argument is missing \ n " ; <nl> Valid = false ; <nl> } <nl> <nl> int swift_symbolgraph_extract_main ( ArrayRef < const char * > Args , const char * Argv <nl> return EXIT_FAILURE ; <nl> } <nl> <nl> + if ( ! llvm : : sys : : fs : : is_directory ( options : : OutputDir ) ) { <nl> + llvm : : errs ( ) < < " - output - dir argument ' " < < options : : OutputDir <nl> + < < " does not exist or is not a directory \ n " ; <nl> + return EXIT_FAILURE ; <nl> + } <nl> + <nl> CompilerInvocation Invocation ; <nl> <nl> Invocation . setMainExecutablePath ( <nl> int swift_symbolgraph_extract_main ( ArrayRef < const char * > Args , const char * Argv <nl> } <nl> <nl> symbolgraphgen : : SymbolGraphOptions Options { <nl> - options : : OutputPath , <nl> + options : : OutputDir , <nl> llvm : : Triple ( options : : Target ) , <nl> options : : PrettyPrint , <nl> AccessLevel : : Public , <nl>
|
Merge pull request from bitjammer / acgarland / rdar - 58941718 - separate - sgf - extended - modules
|
apple/swift
|
53f494cba2807696feaf3c79813292a25b86bc6a
|
2020-02-12T00:38:03Z
|
mmm a / . github / workflows / linux . yml <nl> ppp b / . github / workflows / linux . yml <nl> jobs : <nl> - uses : actions / checkout @ v2 <nl> <nl> - name : Create Build Environment <nl> - env : <nl> - CXX : $ { { matrix . cxx } } <nl> run : cmake - E make_directory $ { { runner . workspace } } / build <nl> <nl> - name : Configure CMake <nl> working - directory : $ { { runner . workspace } } / build <nl> + env : <nl> + CXX : $ { { matrix . cxx } } <nl> run : cmake $ GITHUB_WORKSPACE - DCMAKE_BUILD_TYPE = $ BUILD_TYPE <nl> <nl> - name : Build <nl>
|
Update CI config
|
fmtlib/fmt
|
fe61b8c6305ac9fb7bc8be9c6f73d2a67dc1d806
|
2020-11-03T19:59:59Z
|
mmm a / depends / hosts / darwin . mk <nl> ppp b / depends / hosts / darwin . mk <nl> XCODE_BUILD_ID = 11C505 <nl> LD64_VERSION = 530 <nl> <nl> OSX_SDK = $ ( SDK_PATH ) / Xcode - $ ( XCODE_VERSION ) - $ ( XCODE_BUILD_ID ) - extracted - SDK - with - libcxx - headers <nl> + <nl> + # When cross - compiling for Darwin using Clang , - mlinker - version must be passed to <nl> + # ensure that modern linker features are enabled . <nl> darwin_CC = clang - target $ ( host ) - mmacosx - version - min = $ ( OSX_MIN_VERSION ) - - sysroot $ ( OSX_SDK ) - mlinker - version = $ ( LD64_VERSION ) <nl> darwin_CXX = clang + + - target $ ( host ) - mmacosx - version - min = $ ( OSX_MIN_VERSION ) - - sysroot $ ( OSX_SDK ) - stdlib = libc + + - mlinker - version = $ ( LD64_VERSION ) <nl> <nl>
|
Merge : doc : explain why passing - mlinker - version is required when cross - compiling
|
bitcoin/bitcoin
|
f61019f5a2c63d0a293e2ba2e57352d834f2c8d6
|
2020-07-03T09:38:27Z
|
mmm a / bindings / python / cntk / ops / __init__ . py <nl> ppp b / bindings / python / cntk / ops / __init__ . py <nl> def alias ( x , name = ' ' ) : <nl> <nl> @ typemap <nl> def cross_entropy_with_softmax ( output_vector , target_vector , axis = - 1 , name = ' ' ) : <nl> - ' ' ' <nl> - This operation computes the cross entropy over the softmax of the ` ` output_vector ` ` . <nl> - It expects the ` ` output_vector ` ` as unscaled , and it computes softmax over <nl> - the ` ` output_vector ` ` internally . Any ` ` output_vector ` ` input over which softmax is <nl> - already computed before passing to this operator will be incorrect . <nl> + r ' ' ' <nl> + This operation computes the cross entropy between the ` ` target_vector ` ` and <nl> + the softmax of the ` ` output_vector ` ` . The elements of ` ` target_vector ` ` <nl> + have to be non - negative and should sum to 1 . The ` ` output_vector ` ` can <nl> + contain any values . The function will internally compute the softmax of <nl> + the ` ` output_vector ` ` . Concretely , <nl> + <nl> + : math : ` \ mathrm { softmax } ( x ) = \ left [ \ frac { \ exp ( x_1 ) } { \ sum_i \ exp ( x_i ) } \ quad \ frac { \ exp ( x_1 ) } { \ sum_i \ exp ( x_i ) } \ quad \ ldots \ quad \ frac { \ exp ( x_1 ) } { \ sum_i \ exp ( x_i ) } \ right ] ` <nl> + <nl> + : math : ` \ mathrm { cross \ _entropy \ _with \ _softmax } ( o , t ) = - \ sum_ { i } t_i \ log ( \ mathrm { softmax } ( o ) _i ) ` <nl> <nl> - : math : ` cross \ _entropy \ _with \ _softmax ( o , t ) = { - { \ sum_ { i \ in \ { 1 , len ( t ) \ } } t_i \ log ( softmax ( o_i ) ) } } ` <nl> + with the understanding that the implementation can use equivalent formulas <nl> + for efficiency and numerical stability . <nl> <nl> Example : <nl> > > > C . cross_entropy_with_softmax ( [ [ 1 . , 1 . , 1 . , 50 . ] ] , [ [ 0 . , 0 . , 0 . , 1 . ] ] ) . eval ( ) <nl> def tanh ( x , name = ' ' ) : <nl> <nl> @ typemap <nl> def softmax ( x , name = ' ' ) : <nl> - ' ' ' <nl> - Squashes the input values ` ` x ` ` such that they add up to 1 : <nl> + r ' ' ' <nl> + Computes the gradient of : math : ` f ( z ) = \ log \ sum_i \ exp ( z_i ) ` at z = ` ` x ` ` . Concretely , <nl> + <nl> + : math : ` \ mathrm { softmax } ( x ) = \ left [ \ frac { \ exp ( x_1 ) } { \ sum_i \ exp ( x_i ) } \ quad \ frac { \ exp ( x_1 ) } { \ sum_i \ exp ( x_i ) } \ quad \ ldots \ quad \ frac { \ exp ( x_1 ) } { \ sum_i \ exp ( x_i ) } \ right ] ` <nl> <nl> - : math : ` softmax ( x ) = { \ exp ( x_i ) - \ max_ { x_i \ in x } ( \ exp ( x_i ) ) \ over { \ sum_ { x_i \ in x } \ exp ( x_i ) - \ max_ { x_i \ in x } ( \ exp ( x_i ) ) } } ` <nl> + with the understanding that the implementation can use equivalent formulas <nl> + for efficiency and numerical stability . <nl> <nl> - The term : math : ` \ max_ { x_i \ in x } ( \ exp ( x_i ) ) ` is subtracted for numerical <nl> - stability . <nl> + The output is a vector of non - negative numbers that sum to 1 and can <nl> + therefore be interpreted as probabilities for mutually exclusive outcomes <nl> + as in the case of multiclass classification . <nl> <nl> Example : <nl> > > > C . softmax ( [ [ 1 , 1 , 2 , 3 ] ] ) . eval ( ) <nl> def dropout ( x , dropout_rate = 0 . 0 , name = ' ' ) : <nl> <nl> Args : <nl> x : input tensor <nl> - dropout_rate ( float , [ 0 , 1 ) ) : fraction of nodes to be set to zero <nl> - name ( ` str ` , optional ) : the name of the Function instance in the network <nl> + dropout_rate ( ` float ` , [ 0 , 1 ) ) : fraction of nodes to be set to zero <nl> + name ( : class : ` str ` , optional ) : the name of the Function instance in the network <nl> <nl> Returns : <nl> : class : ` cntk . ops . functions . Function ` <nl> mmm a / bindings / python / doc / conf . py <nl> ppp b / bindings / python / doc / conf . py <nl> <nl> extensions = [ <nl> ' sphinx . ext . autodoc ' , <nl> ' sphinx . ext . todo ' , <nl> + ' sphinx . ext . mathbase ' , <nl> ' sphinx . ext . mathjax ' , <nl> ' sphinx . ext . napoleon ' , <nl> ' sphinx . ext . viewcode ' , <nl>
|
better softmax docs
|
microsoft/CNTK
|
8f969b90d12f016f0367a079e911a201720ff326
|
2016-10-19T20:46:10Z
|
mmm a / src / userspace / apps / PreferencesWindow / PreferencesWindow / Base . lproj / MainMenu . xib <nl> ppp b / src / userspace / apps / PreferencesWindow / PreferencesWindow / Base . lproj / MainMenu . xib <nl> Please modify karabiner . json by a text editor at the moment . < / string > <nl> < color key = " backgroundColor " name = " controlColor " catalog = " System " colorSpace = " catalog " / > <nl> < / textFieldCell > <nl> < / textField > <nl> + < button verticalHuggingPriority = " 750 " translatesAutoresizingMaskIntoConstraints = " NO " id = " 9Ub - kd - yPy " > <nl> + < rect key = " frame " x = " 11 " y = " 10 " width = " 209 " height = " 32 " / > <nl> + < buttonCell key = " cell " type = " push " title = " Quit Karabiner - Elements " bezelStyle = " rounded " image = " NSStopProgressFreestandingTemplate " imagePosition = " left " alignment = " center " borderStyle = " border " imageScaling = " proportionallyDown " inset = " 2 " id = " DMD - gs - Fc2 " > <nl> + < behavior key = " behavior " pushIn = " YES " lightByBackground = " YES " lightByGray = " YES " / > <nl> + < font key = " font " metaFont = " system " / > <nl> + < / buttonCell > <nl> + < connections > <nl> + < action selector = " quitWithConfirmation : " target = " 5ke - qB - Wr5 " id = " Z2I - rE - DWi " / > <nl> + < / connections > <nl> + < / button > <nl> < / subviews > <nl> < constraints > <nl> + < constraint firstAttribute = " bottom " secondItem = " 9Ub - kd - yPy " secondAttribute = " bottom " constant = " 17 " id = " 1YG - lr - JOU " / > <nl> < constraint firstItem = " 0WE - kI - jqf " firstAttribute = " centerX " secondItem = " aYZ - Ew - w2F " secondAttribute = " centerX " id = " B4f - Mh - jz2 " / > <nl> + < constraint firstItem = " 9Ub - kd - yPy " firstAttribute = " leading " secondItem = " aYZ - Ew - w2F " secondAttribute = " leading " constant = " 17 " id = " T3K - 25 - 0xX " / > <nl> < constraint firstItem = " 0WE - kI - jqf " firstAttribute = " centerY " secondItem = " aYZ - Ew - w2F " secondAttribute = " centerY " id = " sp3 - ft - eJq " / > <nl> < / constraints > <nl> < / view > <nl> This function is not implemented yet . < / string > <nl> < / view > <nl> < / window > <nl> < / objects > <nl> + < resources > <nl> + < image name = " NSStopProgressFreestandingTemplate " width = " 14 " height = " 14 " / > <nl> + < / resources > <nl> < / document > <nl> mmm a / src / userspace / apps / PreferencesWindow / PreferencesWindow / PreferencesWindowController . m <nl> ppp b / src / userspace / apps / PreferencesWindow / PreferencesWindow / PreferencesWindowController . m <nl> <nl> <nl> @ interface PreferencesWindowController ( ) <nl> <nl> - @ property ( weak ) IBOutlet LogFileTextViewController * grabberLogFileTextViewController ; <nl> - @ property ( weak ) IBOutlet LogFileTextViewController * consoleUserServerLogFileTextViewController ; <nl> + @ property ( weak ) IBOutlet LogFileTextViewController * grabberLogFileTextViewController ; <nl> + @ property ( weak ) IBOutlet LogFileTextViewController * consoleUserServerLogFileTextViewController ; <nl> <nl> @ end <nl> <nl> @ implementation PreferencesWindowController <nl> - ( void ) setup { <nl> [ self . grabberLogFileTextViewController monitor : @ " / var / log / karabiner / grabber_log . txt " ] ; <nl> [ self . consoleUserServerLogFileTextViewController monitor : [ NSString stringWithFormat : @ " % @ / . karabiner . d / log / console_user_server_log . txt " , NSHomeDirectory ( ) ] ] ; <nl> + <nl> + [ self launchctlConsoleUserServer : YES ] ; <nl> } <nl> <nl> - ( void ) show { <nl> - ( IBAction ) openURL : ( id ) sender { <nl> [ [ NSWorkspace sharedWorkspace ] openURL : [ NSURL URLWithString : [ sender title ] ] ] ; <nl> } <nl> <nl> + - ( IBAction ) quitWithConfirmation : ( id ) sender { <nl> + NSAlert * alert = [ NSAlert new ] ; <nl> + alert . messageText = @ " Are you sure you want to quit Karabiner - Elements ? " ; <nl> + alert . informativeText = @ " The changed key will be restored after Karabiner - Elements is quit . " ; <nl> + [ alert addButtonWithTitle : @ " Quit " ] ; <nl> + [ alert addButtonWithTitle : @ " Cancel " ] ; <nl> + if ( [ alert runModal ] = = NSAlertFirstButtonReturn ) { <nl> + [ self launchctlConsoleUserServer : NO ] ; <nl> + [ NSApp terminate : nil ] ; <nl> + } <nl> + } <nl> + <nl> + - ( void ) launchctlConsoleUserServer : ( BOOL ) load { <nl> + NSTask * task = [ NSTask launchedTaskWithLaunchPath : @ " / bin / launchctl " <nl> + arguments : @ [ <nl> + load ? @ " load " : @ " unload " , <nl> + @ " - w " , <nl> + @ " / Library / LaunchAgents / org . pqrs . karabiner . karabiner_console_user_server . plist " , <nl> + ] ] ; <nl> + [ task waitUntilExit ] ; <nl> + } <nl> + <nl> @ end <nl> mmm a / src / userspace / core / console_user_server / Makefile <nl> ppp b / src / userspace / core / console_user_server / Makefile <nl> clean : <nl> xcode : <nl> open * . xcodeproj <nl> <nl> - run : <nl> - . / build / Release / karabiner_console_user_server <nl> + install : all <nl> + sudo install - m 755 . / build / Release / karabiner_console_user_server ' / Library / Application Support / org . pqrs / Karabiner - Elements / bin / karabiner_console_user_server ' <nl> + sudo killall karabiner_console_user_server <nl>
|
call launchctl in PreferencesWindowController
|
pqrs-org/Karabiner-Elements
|
13477939c8aee69ea30f1f96854b8e2e39cff17c
|
2016-09-08T08:17:13Z
|
mmm a / math / large_number . h <nl> ppp b / math / large_number . h <nl> class large_number { <nl> size_t i ; <nl> uint64_t carry = 0 , temp ; <nl> for ( i = 0 ; i < this - > num_digits ( ) ; i + + ) { <nl> - temp = ( * this ) [ i ] * n ; <nl> + temp = static_cast < uint64_t > ( ( * this ) [ i ] ) * n ; <nl> temp + = carry ; <nl> if ( temp < 10 ) { <nl> carry = 0 ; <nl>
|
make multiplication 64 - bit
|
TheAlgorithms/C-Plus-Plus
|
8736dce71a47f44c5fc62d32287245c4f0e1d790
|
2020-06-22T20:21:57Z
|
mmm a / utils / swift_build_sdk_interfaces . py <nl> ppp b / utils / swift_build_sdk_interfaces . py <nl> def process_module_files ( pool , module_files ) : <nl> return overall_exit_status <nl> <nl> <nl> + def getSDKVersion ( sdkroot ) : <nl> + settingPath = os . path . join ( sdkroot , ' SDKSettings . json ' ) <nl> + with open ( settingPath ) as json_file : <nl> + data = json . load ( json_file ) <nl> + return data [ ' Version ' ] <nl> + fatal ( " Failed to get SDK version from : " + settingPath ) <nl> + <nl> + <nl> def main ( ) : <nl> global args , shared_output_lock <nl> parser = create_parser ( ) <nl> def main ( ) : <nl> if not os . path . isdir ( args . sdk ) : <nl> fatal ( " invalid SDK : " + args . sdk ) <nl> <nl> + # if the given output dir ends with ' prebuilt - modules ' , we should <nl> + # append the SDK version number so all modules will built into <nl> + # the SDK - versioned sub - directory . <nl> + if os . path . basename ( args . output_dir ) = = ' prebuilt - modules ' : <nl> + args . output_dir = os . path . join ( args . output_dir , getSDKVersion ( args . sdk ) ) <nl> + <nl> xfails = ( ) <nl> if args . ignore_non_stdlib_failures : <nl> if args . xfails : <nl>
|
swift_build_sdk_interfaces . py : adjust output directory path to an SDK - versioned directory
|
apple/swift
|
dbd16d2e05108bc922ab87fc40f2a0b7891ea5e9
|
2020-08-29T04:58:37Z
|
mmm a / PowerEditor / src / NppNotification . cpp <nl> ppp b / PowerEditor / src / NppNotification . cpp <nl> BOOL Notepad_plus : : notify ( SCNotification * notification ) <nl> if ( pBuf ! = currentBufMain & & pBuf ! = currentBufSub ) / / if hover on other tab <nl> { <nl> _documentPeeker . doDialog ( p , pBuf , * ( const_cast < ScintillaEditView * > ( pTabDocView - > getScintillaEditView ( ) ) ) ) ; <nl> + _pEditView - > getFocus ( ) ; <nl> } <nl> else / / if hover on current active tab <nl> { <nl> mmm a / PowerEditor / src / WinControls / DocumentMap / documentSnapshot . cpp <nl> ppp b / PowerEditor / src / WinControls / DocumentMap / documentSnapshot . cpp <nl> void DocumentPeeker : : syncDisplay ( Buffer * buf , ScintillaEditView & scintSource ) <nl> _pPeekerView - > showMargin ( 2 , false ) ; <nl> _pPeekerView - > showMargin ( 3 , false ) ; <nl> <nl> - _pPeekerView - > execute ( SCI_SETREADONLY , true ) ; <nl> _pPeekerView - > execute ( SCI_SETCARETSTYLE , CARETSTYLE_INVISIBLE ) ; <nl> Window : : display ( ) ; <nl> } <nl>
|
Fix replace in files regression .
|
notepad-plus-plus/notepad-plus-plus
|
dbe2b698b14b015a002d69187b3c3e2f1bcac952
|
2017-06-11T11:07:48Z
|
mmm a / googletest / include / gtest / gtest . h <nl> ppp b / googletest / include / gtest / gtest . h <nl> AssertionResult CmpHelperEQ ( const char * lhs_expression , <nl> const char * rhs_expression , <nl> const T1 & lhs , <nl> const T2 & rhs ) { <nl> - GTEST_DISABLE_MSC_WARNINGS_PUSH_ ( 4389 / * signed / unsigned mismatch * / ) <nl> if ( lhs = = rhs ) { <nl> return AssertionSuccess ( ) ; <nl> } <nl> - GTEST_DISABLE_MSC_WARNINGS_POP_ ( ) <nl> <nl> return CmpHelperEQFailure ( lhs_expression , rhs_expression , lhs , rhs ) ; <nl> } <nl>
|
Re - enable MSVC + + C4389 warning in CmdHelperEq ( )
|
google/googletest
|
7990f56637d68573358c22369ec5f1f540cc9532
|
2017-12-21T15:35:38Z
|
mmm a / tensorflow / g3doc / api_docs / python / framework . md <nl> ppp b / tensorflow / g3doc / api_docs / python / framework . md <nl> and computations occur . Using ` DeviceSpec ` allows you to parse device spec <nl> strings to verify their validity , merge them or compose them programmatically . <nl> <nl> Example : <nl> + <nl> ` ` ` python <nl> # Place the operations on device " GPU : 0 " in the " ps " job . <nl> device_spec = DeviceSpec ( job = " ps " , device_type = " GPU " , device_index = 0 ) <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . DeviceSpec . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard9 / tf . DeviceSpec . md <nl> and computations occur . Using ` DeviceSpec ` allows you to parse device spec <nl> strings to verify their validity , merge them or compose them programmatically . <nl> <nl> Example : <nl> + <nl> ` ` ` python <nl> # Place the operations on device " GPU : 0 " in the " ps " job . <nl> device_spec = DeviceSpec ( job = " ps " , device_type = " GPU " , device_index = 0 ) <nl>
|
Update generated Python Op docs .
|
tensorflow/tensorflow
|
2e9a081e22557bad30c7ac46ec4208f65d089758
|
2016-06-08T08:17:38Z
|
mmm a / src / compiler / js - native - context - specialization . cc <nl> ppp b / src / compiler / js - native - context - specialization . cc <nl> Reduction JSNativeContextSpecialization : : ReduceKeyedAccess ( <nl> / / that is non - configurable , non - writable ( i . e . the { receiver } was <nl> / / frozen using Object . freeze ) . <nl> NumberMatcher mindex ( index ) ; <nl> - if ( mindex . IsInteger ( ) & & mindex . IsInRange ( 0 . 0 , kMaxUInt32 ) ) { <nl> + if ( mindex . IsInteger ( ) & & mindex . IsInRange ( 0 . 0 , kMaxUInt32 - 1 . 0 ) ) { <nl> LookupIterator it ( isolate ( ) , mreceiver . Value ( ) , <nl> static_cast < uint32_t > ( mindex . Value ( ) ) , <nl> LookupIterator : : OWN ) ; <nl> new file mode 100644 <nl> index 00000000000 . . d1041f32ce1 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / regress / regress - crbug - 768367 . js <nl> <nl> + / / Copyright 2017 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - allow - natives - syntax <nl> + <nl> + const o = { } ; <nl> + <nl> + function foo ( ) { return o [ 4294967295 ] ; } <nl> + <nl> + assertEquals ( undefined , foo ( ) ) ; <nl> + assertEquals ( undefined , foo ( ) ) ; <nl> + % OptimizeFunctionOnNextCall ( foo ) ; <nl> + assertEquals ( undefined , foo ( ) ) ; <nl>
|
[ turbofan ] Fix off - by - one in constant - folding of frozen elements .
|
v8/v8
|
adfaf74d33b88dfd69a7c7f7a210dc62cf78d525
|
2017-09-27T05:43:25Z
|
mmm a / ci / docker / Dockerfile . build . arm64 <nl> ppp b / ci / docker / Dockerfile . build . arm64 <nl> <nl> # <nl> # Dockerfile to build MXNet for ARM64 / ARMv8 <nl> <nl> - FROM dockcross / linux - arm64 <nl> + # Temporary fix due to https : / / github . com / apache / incubator - mxnet / issues / 10837 <nl> + # FROM dockcross / linux - arm64 <nl> + FROM mxnetci / dockcross - linux - arm64 : 05082018 <nl> <nl> ENV ARCH aarch64 <nl> - ENV CC / usr / bin / aarch64 - linux - gnu - gcc <nl> - ENV CXX / usr / bin / aarch64 - linux - gnu - g + + <nl> - ENV FC / usr / bin / aarch64 - linux - gnu - gfortran - 4 . 9 <nl> + ENV FC / usr / bin / $ { CROSS_TRIPLE } - gfortran <nl> ENV HOSTCC gcc <nl> + ENV TARGET ARMV8 <nl> <nl> WORKDIR / work <nl> <nl> - COPY install / arm64_openblas . sh / work / <nl> - RUN / work / arm64_openblas . sh <nl> + # Build OpenBLAS <nl> + RUN git clone - - recursive - b v0 . 2 . 20 https : / / github . com / xianyi / OpenBLAS . git & & \ <nl> + cd OpenBLAS & & \ <nl> + make - j $ ( nproc ) & & \ <nl> + PREFIX = $ { CROSS_ROOT } make install <nl> <nl> - ENV LD_LIBRARY_PATH / opt / OpenBLAS / lib <nl> - ENV CPLUS_INCLUDE_PATH / opt / OpenBLAS / include <nl> + COPY runtime_functions . sh / work / <nl> WORKDIR / work / mxnet <nl> - <nl> - COPY runtime_functions . sh / work / <nl> \ No newline at end of file <nl> mmm a / ci / docker / Dockerfile . build . jetson <nl> ppp b / ci / docker / Dockerfile . build . jetson <nl> <nl> <nl> FROM nvidia / cuda : 9 . 0 - cudnn7 - devel as cudabuilder <nl> <nl> - FROM dockcross / linux - arm64 <nl> + # Temporary fix due to https : / / github . com / apache / incubator - mxnet / issues / 10837 <nl> + # FROM dockcross / linux - arm64 <nl> + FROM mxnetci / dockcross - linux - arm64 : 05082018 <nl> <nl> ENV ARCH aarch64 <nl> ENV FC / usr / bin / $ { CROSS_TRIPLE } - gfortran <nl> ENV TARGET ARMV8 <nl> WORKDIR / work <nl> <nl> # Build OpenBLAS <nl> - ADD https : / / api . github . com / repos / xianyi / OpenBLAS / git / refs / tags / v0 . 2 . 20 openblas_version . json <nl> RUN git clone - - recursive - b v0 . 2 . 20 https : / / github . com / xianyi / OpenBLAS . git & & \ <nl> cd OpenBLAS & & \ <nl> make - j $ ( nproc ) & & \ <nl>
|
[ MXNET - 409 ] Temporary fix for ARM64 builds - switch to own dockerhub ( )
|
apache/incubator-mxnet
|
4aff7defae1d88eccd49d1a4d01c799dd4477b8d
|
2018-05-08T23:16:36Z
|
mmm a / appendix / control_led / main . cpp <nl> ppp b / appendix / control_led / main . cpp <nl> <nl> # include " hid_manager . hpp " <nl> <nl> namespace { <nl> - class control_led final { <nl> + class control_led final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> public : <nl> control_led ( const control_led & ) = delete ; <nl> <nl> - control_led ( bool led_state ) { <nl> + control_led ( std : : weak_ptr < pqrs : : dispatcher : : dispatcher > weak_dispatcher , <nl> + bool led_state ) : dispatcher_client ( weak_dispatcher ) { <nl> std : : vector < std : : pair < krbn : : hid_usage_page , krbn : : hid_usage > > targets ( { <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_keyboard ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < krbn : : hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < krbn : : hid_manager > ( weak_dispatcher , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detected . connect ( [ led_state ] ( auto & & weak_hid ) { <nl> if ( auto hid = weak_hid . lock ( ) ) { <nl> int main ( int argc , const char * argv [ ] ) { <nl> return 1 ; <nl> } <nl> <nl> - auto p = std : : make_unique < control_led > ( std : : string ( argv [ 1 ] ) = = " on " ) ; <nl> + auto time_source = std : : make_shared < pqrs : : dispatcher : : hardware_time_source > ( ) ; <nl> + auto dispatcher = std : : make_shared < pqrs : : dispatcher : : dispatcher > ( time_source ) ; <nl> + <nl> + auto p = std : : make_unique < control_led > ( dispatcher , <nl> + std : : string ( argv [ 1 ] ) = = " on " ) ; <nl> <nl> CFRunLoopRun ( ) ; <nl> <nl> mmm a / appendix / dump_hid_report / main . cpp <nl> ppp b / appendix / dump_hid_report / main . cpp <nl> <nl> # include " hid_manager . hpp " <nl> <nl> namespace { <nl> - class dump_hid_report final { <nl> + class dump_hid_report final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> public : <nl> dump_hid_report ( const dump_hid_report & ) = delete ; <nl> <nl> - dump_hid_report ( void ) { <nl> + dump_hid_report ( std : : weak_ptr < pqrs : : dispatcher : : dispatcher > weak_dispatcher ) : dispatcher_client ( weak_dispatcher ) { <nl> std : : vector < std : : pair < krbn : : hid_usage_page , krbn : : hid_usage > > targets ( { <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_keyboard ) , <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_mouse ) , <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_pointer ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < krbn : : hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < krbn : : hid_manager > ( weak_dispatcher , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detected . connect ( [ this ] ( auto & & weak_hid ) { <nl> if ( auto hid = weak_hid . lock ( ) ) { <nl> int main ( int argc , const char * argv [ ] ) { <nl> CFRunLoopStop ( CFRunLoopGetMain ( ) ) ; <nl> } ) ; <nl> <nl> - auto d = std : : make_unique < dump_hid_report > ( ) ; <nl> + auto time_source = std : : make_shared < pqrs : : dispatcher : : hardware_time_source > ( ) ; <nl> + auto dispatcher = std : : make_shared < pqrs : : dispatcher : : dispatcher > ( time_source ) ; <nl> + <nl> + auto d = std : : make_unique < dump_hid_report > ( dispatcher ) ; <nl> <nl> CFRunLoopRun ( ) ; <nl> <nl> mmm a / appendix / dump_hid_value / main . cpp <nl> ppp b / appendix / dump_hid_value / main . cpp <nl> class dump_hid_value final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_pointer ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < krbn : : hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < krbn : : hid_manager > ( weak_dispatcher , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detected . connect ( [ this ] ( auto & & weak_hid ) { <nl> enqueue_to_dispatcher ( [ this , weak_hid ] { <nl> mmm a / appendix / grabbable_state_manager / main . cpp <nl> ppp b / appendix / grabbable_state_manager / main . cpp <nl> class grabbable_state_manager_demo final : public pqrs : : dispatcher : : extra : : dispa <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_pointer ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < krbn : : hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < krbn : : hid_manager > ( weak_dispatcher , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detected . connect ( [ this ] ( auto & & weak_hid ) { <nl> enqueue_to_dispatcher ( [ this , weak_hid ] { <nl> mmm a / appendix / hid_set_report / main . cpp <nl> ppp b / appendix / hid_set_report / main . cpp <nl> <nl> # include " hid_manager . hpp " <nl> <nl> namespace { <nl> - class hid_set_report final { <nl> + class hid_set_report final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> public : <nl> hid_set_report ( const hid_set_report & ) = delete ; <nl> <nl> - hid_set_report ( void ) { <nl> + hid_set_report ( std : : weak_ptr < pqrs : : dispatcher : : dispatcher > weak_dispatcher ) : dispatcher_client ( weak_dispatcher ) { <nl> std : : vector < std : : pair < krbn : : hid_usage_page , krbn : : hid_usage > > targets ( { <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_keyboard ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < krbn : : hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < krbn : : hid_manager > ( weak_dispatcher , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detected . connect ( [ ] ( auto & & weak_hid ) { <nl> if ( auto hid = weak_hid . lock ( ) ) { <nl> int main ( int argc , const char * argv [ ] ) { <nl> CFRunLoopStop ( CFRunLoopGetMain ( ) ) ; <nl> } ) ; <nl> <nl> - hid_set_report hid_set_report ; <nl> + auto time_source = std : : make_shared < pqrs : : dispatcher : : hardware_time_source > ( ) ; <nl> + auto dispatcher = std : : make_shared < pqrs : : dispatcher : : dispatcher > ( time_source ) ; <nl> + <nl> + hid_set_report hid_set_report ( dispatcher ) ; <nl> <nl> CFRunLoopRun ( ) ; <nl> <nl> mmm a / src / core / grabber / include / device_grabber . hpp <nl> ppp b / src / core / grabber / include / device_grabber . hpp <nl> class device_grabber final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> std : : make_pair ( hid_usage_page : : generic_desktop , hid_usage : : gd_pointer ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < hid_manager > ( weak_dispatcher_ , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detecting . connect ( [ ] ( auto & & device ) { <nl> if ( iokit_utility : : is_karabiner_virtual_hid_device ( device ) ) { <nl> mmm a / src / core / observer / include / device_observer . hpp <nl> ppp b / src / core / observer / include / device_observer . hpp <nl> class device_observer final : public pqrs : : dispatcher : : extra : : dispatcher_client <nl> std : : make_pair ( hid_usage_page : : generic_desktop , hid_usage : : gd_pointer ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < hid_manager > ( weak_dispatcher_ , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detecting . connect ( [ ] ( auto & & device ) { <nl> if ( iokit_utility : : is_karabiner_virtual_hid_device ( device ) ) { <nl> mmm a / src / lib / libkrbn / src / libkrbn_hid_value_observer . cpp <nl> ppp b / src / lib / libkrbn / src / libkrbn_hid_value_observer . cpp <nl> class libkrbn_hid_value_observer_class final { <nl> std : : make_pair ( krbn : : hid_usage_page : : generic_desktop , krbn : : hid_usage : : gd_keyboard ) , <nl> } ) ; <nl> <nl> - hid_manager_ = std : : make_unique < krbn : : hid_manager > ( targets ) ; <nl> + hid_manager_ = std : : make_unique < krbn : : hid_manager > ( libkrbn_cpp : : get_weak_dispatcher ( ) , <nl> + targets ) ; <nl> <nl> hid_manager_ - > device_detected . connect ( [ this ] ( auto & & weak_hid ) { <nl> if ( auto hid = weak_hid . lock ( ) ) { <nl> mmm a / src / share / hid_manager . hpp <nl> ppp b / src / share / hid_manager . hpp <nl> <nl> # include " boost_utility . hpp " <nl> # include " cf_utility . hpp " <nl> # include " device_detail . hpp " <nl> + # include " dispatcher . hpp " <nl> # include " human_interface_device . hpp " <nl> # include " logger . hpp " <nl> # include " thread_utility . hpp " <nl> <nl> # include < unordered_map > <nl> <nl> namespace krbn { <nl> - class hid_manager final { <nl> + class hid_manager final : public pqrs : : dispatcher : : extra : : dispatcher_client { <nl> public : <nl> / / Signals <nl> <nl> class hid_manager final { <nl> <nl> hid_manager ( const hid_manager & ) = delete ; <nl> <nl> - hid_manager ( const std : : vector < std : : pair < hid_usage_page , hid_usage > > & usage_pairs ) : usage_pairs_ ( usage_pairs ) , <nl> - manager_ ( nullptr ) { <nl> + hid_manager ( std : : weak_ptr < pqrs : : dispatcher : : dispatcher > weak_dispatcher , <nl> + const std : : vector < std : : pair < hid_usage_page , hid_usage > > & usage_pairs ) : dispatcher_client ( weak_dispatcher ) , <nl> + usage_pairs_ ( usage_pairs ) , <nl> + manager_ ( nullptr ) , <nl> + refresh_timer_ ( * this ) { <nl> run_loop_thread_ = std : : make_shared < cf_utility : : run_loop_thread > ( ) ; <nl> } <nl> <nl> class hid_manager final { <nl> run_loop_thread_ - > get_run_loop ( ) , <nl> kCFRunLoopDefaultMode ) ; <nl> <nl> - refresh_timer_ = std : : make_unique < thread_utility : : timer > ( <nl> - std : : chrono : : milliseconds ( 5000 ) , <nl> - thread_utility : : timer : : mode : : repeat , <nl> + refresh_timer_ . start ( <nl> [ this ] { <nl> run_loop_thread_ - > enqueue ( ^ { <nl> refresh_if_needed ( ) ; <nl> } ) ; <nl> - } ) ; <nl> + } , <nl> + std : : chrono : : milliseconds ( 5000 ) ) ; <nl> <nl> logger : : get_logger ( ) . info ( " hid_manager is started . " ) ; <nl> } ) ; <nl> class hid_manager final { <nl> <nl> / / refresh_timer_ <nl> <nl> - refresh_timer_ - > cancel ( ) ; <nl> - refresh_timer_ = nullptr ; <nl> + refresh_timer_ . stop ( ) ; <nl> <nl> / / manager_ <nl> <nl> class hid_manager final { <nl> std : : vector < std : : pair < hid_usage_page , hid_usage > > usage_pairs_ ; <nl> <nl> IOHIDManagerRef _Nullable manager_ ; <nl> - std : : unique_ptr < thread_utility : : timer > refresh_timer_ ; <nl> + pqrs : : dispatcher : : extra : : timer refresh_timer_ ; <nl> <nl> std : : unordered_map < IOHIDDeviceRef , registry_entry_id > registry_entry_ids_ ; <nl> / / We do not need to use registry_entry_ids_mutex_ since it is modified only in run_loop_thread_ . <nl>
|
use dispatcher : : extra : : timer @ hid_manager
|
pqrs-org/Karabiner-Elements
|
938beac434e0cefeed56f30322865dd9d3211b5b
|
2018-10-01T02:00:22Z
|
deleted file mode 100644 <nl> index 27f24d83a7 . . 0000000000 <nl> mmm a / code / advanced_data_structures / FenwickTree . cpp <nl> ppp / dev / null <nl> <nl> - / / Binary indexed tree or fenwick tree <nl> - / / Space Complexity : O ( N ) for declaring another array of N = size num_of_elements <nl> - / / Time Complexity : O ( logN ) for each operation ( update and query as well ) <nl> - / / original array for storing values for later lookup <nl> - # include < bits / stdc + + . h > <nl> - using namespace std ; <nl> - int num_elements ; <nl> - void update ( int x , int delta , vector < int > & bit ) <nl> - { <nl> - / / x & ( - x ) gives the last set bit in a number x <nl> - for ( ; x < = num_elements ; x + = x & - x ) <nl> - bit [ x ] + = delta ; <nl> - } <nl> - long long query ( int x , vector < int > & bit ) <nl> - { <nl> - long long sum = 0 ; <nl> - / / x & ( - x ) gives the last set bit in a number x <nl> - for ( ; x > 0 ; x - = x & - x ) <nl> - sum + = bit [ x ] ; <nl> - return sum ; <nl> - } <nl> - int main ( ) <nl> - { <nl> - int num_queries , left_index , right_index ; <nl> - cout < < " Enter number of elements \ n " ; <nl> - cin > > num_elements ; <nl> - vector < int > container , bit ( num_elements + 1 , 0 ) ; <nl> - for ( int i = 0 ; i < num_elements ; i + + ) <nl> - { <nl> - cout < < " Enter element \ n " ; <nl> - int temp ; <nl> - cin > > temp ; <nl> - container . push_back ( temp ) ; <nl> - update ( i + 1 , temp , bit ) ; <nl> - } <nl> - cout < < " Enter number of queries \ n " ; <nl> - cin > > num_queries ; <nl> - for ( int i = 0 ; i < num_queries ; i + + ) <nl> - { <nl> - cout < < " Enter start index ( 1 indexed ) \ n " ; <nl> - cin > > left_index ; <nl> - cout < < " Enter end index ( 1 indexed ) \ n " ; <nl> - cin > > right_index ; <nl> - if ( left_index > right_index ) <nl> - { <nl> - cout < < " Invalid range \ n " ; <nl> - continue ; <nl> - } <nl> - cout < < " Sum in given range is \ n " ; <nl> - cout < < query ( right_index , bit ) - query ( left_index - 1 , bit ) < < endl ; <nl> - } <nl> - return 0 ; <nl> - } <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 0000000000 . . e214123799 <nl> mmm / dev / null <nl> ppp b / code / data_structures / FenwickTree . cpp <nl> <nl> + # include < iostream > <nl> + # include < time . h > <nl> + # define N 9 <nl> + # define range 5 <nl> + # define infinity 100100100 <nl> + <nl> + using namespace std ; <nl> + <nl> + void add ( int fenwick [ ] , int a , int delta ) ; <nl> + int query ( int fenwick [ ] , int a , int b ) ; <nl> + <nl> + int main ( ) <nl> + { <nl> + / / int input [ N ] ; <nl> + int fenwick [ N + 1 ] ; / / This will contain cumulative frequency ( At index i we have frequency [ 0 . . i ] ) <nl> + fenwick [ 0 ] = 0 ; <nl> + fill ( fenwick , fenwick + 1 + N , 0 ) ; <nl> + int input [ ] = { 1 , 3 , 4 , 8 , 6 , 1 , 4 , 2 } ; <nl> + <nl> + / / for ( int i = 0 ; i < N ; i + + ) <nl> + / / input [ i ] = ( rand ( ) % range ) + 1 ; <nl> + <nl> + for ( int i = 0 ; i < N ; i + + ) <nl> + cout < < input [ i ] < < " " ; <nl> + cout < < endl ; <nl> + <nl> + for ( int i = 1 ; i < N ; i + + ) <nl> + add ( fenwick , i , input [ i - 1 ] ) ; <nl> + <nl> + for ( int i = 1 ; i < N ; i + + ) <nl> + cout < < fenwick [ i ] < < " " ; <nl> + cout < < endl ; <nl> + <nl> + cout < < query ( fenwick , 0 , 5 ) ; / / Sum [ 0 , 5 ] <nl> + <nl> + <nl> + } <nl> + <nl> + void add ( int fenwick [ ] , int a , int delta ) <nl> + { <nl> + for ( ; a < N ; a + = a & ( - a ) ) <nl> + fenwick [ a ] + = delta ; <nl> + } <nl> + <nl> + int query ( int fenwick [ ] , int a , int b ) <nl> + { <nl> + if ( a > 0 ) <nl> + return query ( fenwick , 0 , b ) - query ( fenwick , 0 , a - 1 ) ; <nl> + <nl> + int sum = 0 ; <nl> + for ( ; b > 0 ; b - = b & ( - b ) ) <nl> + sum + = fenwick [ b ] ; <nl> + <nl> + return sum ; <nl> + } <nl> \ No newline at end of file <nl> similarity index 100 % <nl> rename from code / advanced_data_structures / FenwickTree . py <nl> rename to code / data_structures / FenwickTree . py <nl>
|
advanced_data_structures_added
|
OpenGenus/cosmos
|
2901826034bd6b2fad3d281ed23f7b359bed11fd
|
2017-10-02T14:53:11Z
|
mmm a / src / rpc / connectivity / connectivity . cc <nl> ppp b / src / rpc / connectivity / connectivity . cc <nl> void connectivity_cluster_t : : handle ( <nl> / / iterate over it , so I ( Sam ) don ' t see why it wouldn ' t have <nl> / / been superclose to the watchers array . <nl> <nl> - / / TODO THREAD : A big WTF : How are we acquiring the <nl> - / / watchers mutex when we ' re on the wrong thread ? <nl> + / / TODO THREAD : We need to switch threads to access the <nl> + / / watchers array and watchers_mutex . Do we really want <nl> + / / to do that here ? Or should we let the above <nl> + / / on_thread_t ( to the connection thread ) go out of scope <nl> + / / and do it then ? <nl> + <nl> + on_thread_t rpc_threader ( home_thread ( ) ) ; <nl> + <nl> mutex_acquisition_t acq ( & watchers_mutex ) ; <nl> <nl> / * ` event_watcher_t ` s shouldn ' t block . * / <nl> mmm a / src / unittest / rpc_mailbox . cc <nl> ppp b / src / unittest / rpc_mailbox . cc <nl> struct recording_mailbox_cluster_t : public mailbox_cluster_t { <nl> int i ; <nl> stream > > i ; <nl> on_done ( ) ; <nl> + <nl> + on_thread_t rethreader ( home_thread ( ) ) ; <nl> inbox [ i ] = peer ; <nl> } <nl> public : <nl> struct recording_mailbox_cluster_t : public mailbox_cluster_t { <nl> send_utility_message ( peer , boost : : bind ( & write_integer , message , _1 ) ) ; <nl> } <nl> void expect ( int message , peer_id_t peer ) { <nl> + assert_thread ( ) ; <nl> EXPECT_TRUE ( inbox . find ( message ) ! = inbox . end ( ) ) ; <nl> EXPECT_TRUE ( inbox [ message ] = = peer ) ; <nl> } <nl>
|
Rethreaded for disconnect watcher messages in connectivity . cc , made rpc_mailbox . cc unit test update inbox on right thread .
|
rethinkdb/rethinkdb
|
f4ade8ae13b9e72f2306ff3fb0c2f6b340e4f977
|
2011-11-16T00:33:20Z
|
mmm a / test / mjsunit / mjsunit . status <nl> ppp b / test / mjsunit / mjsunit . status <nl> regress / regress - 3247124 : SKIP <nl> # should be platform - independent . <nl> regress / regress - 1132 : SKIP <nl> <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - [ $ arch = = arm & & $ crankshaft ] <nl> - <nl> - # BUG ( 1094 ) <nl> - regress / regress - deopt - gc : SKIP <nl> - <nl> - # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> - [ $ arch = = x64 & & $ crankshaft ] <nl> - <nl> - # BUG ( 1094 ) <nl> - regress / regress - deopt - gc : SKIP <nl> - <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> [ $ arch = = mips ] <nl> <nl>
|
Remove exception for mjsunit / regress / regress - deopt - gc on arm and x64 .
|
v8/v8
|
78a21647b91b72877065dc369b76b0b04bd9a958
|
2011-03-01T11:36:45Z
|
mmm a / tensorflow / compiler / mlir / lite / transforms / prepare_tf . cc <nl> ppp b / tensorflow / compiler / mlir / lite / transforms / prepare_tf . cc <nl> struct ConvertTFStridedSlice : public RewritePattern { <nl> <nl> / / Insert a new reshape op . <nl> Value original_input = strided_slice_op . input ( ) ; <nl> - / / TODO ( b / 174267775 ) : Make sure that the input type has ranked tensor type . <nl> RankedTensorType original_input_type = <nl> original_input . getType ( ) . dyn_cast < RankedTensorType > ( ) ; <nl> if ( ! original_input_type ) { <nl>
|
Remove dangled TODO comment .
|
tensorflow/tensorflow
|
c85dd75cb3a003f945d01e4bff0b28e81889217d
|
2020-12-01T01:03:40Z
|
mmm a / templates / cocos2dx_files . json <nl> ppp b / templates / cocos2dx_files . json <nl> <nl> " cocos / scripting / lua - bindings / auto / api / ActionTimeline . lua " , <nl> " cocos / scripting / lua - bindings / auto / api / ActionTimelineCache . lua " , <nl> " cocos / scripting / lua - bindings / auto / api / ActionTimelineData . lua " , <nl> + " cocos / scripting / lua - bindings / auto / api / ActionTimelineNode . lua " , <nl> " cocos / scripting / lua - bindings / auto / api / ActionTintFrame . lua " , <nl> " cocos / scripting / lua - bindings / auto / api / ActionTween . lua " , <nl> " cocos / scripting / lua - bindings / auto / api / AmbientLight . lua " , <nl>
|
Merge pull request from CocosRobot / update_cocosfiles_1419494730
|
cocos2d/cocos2d-x
|
4291202a7c6349dc32aa36229de4cc9477bb64c9
|
2014-12-26T01:28:45Z
|
mmm a / tensorflow / compiler / jit / graphcycles / ordered_set . h <nl> ppp b / tensorflow / compiler / jit / graphcycles / ordered_set . h <nl> limitations under the License . <nl> # include " tensorflow / core / platform / logging . h " <nl> <nl> namespace tensorflow { <nl> - / / This is a set data structure that provides a stable iteration order . <nl> + / / This is a set data structure that provides a deterministic iteration order . <nl> + / / The iteration order of elements only depends on the sequence of <nl> + / / inserts / deletes , so as long as the inserts / deletes happen in the same <nl> + / / sequence , the set will have the same iteration order . <nl> / / <nl> / / Assumes that T can be cheaply copied for simplicity . <nl> template < typename T > <nl>
|
Fix a comment to be clearer .
|
tensorflow/tensorflow
|
6ec051ec5a00da06e2096a749513489a20048178
|
2019-05-09T15:47:04Z
|
mmm a / xbmc / network / GUIDialogNetworkSetup . h <nl> ppp b / xbmc / network / GUIDialogNetworkSetup . h <nl> class CGUIDialogNetworkSetup : public CGUIDialogSettingsManualBase <nl> virtual void OnSettingAction ( const CSetting * setting ) ; <nl> <nl> / / specialization of CGUIDialogSettingsBase <nl> + bool AllowResettingSettings ( ) const override { return false ; } <nl> virtual void Save ( ) { } <nl> virtual void SetupView ( ) ; <nl> <nl>
|
hide separator image
|
xbmc/xbmc
|
fbbe5b5c5c604ad3b8d098a34133cb05c38b81e4
|
2016-03-07T18:15:08Z
|
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> message ( STATUS " " ) <nl> message ( STATUS " Interfaces : " ) <nl> message ( STATUS " Python : $ { BUILD_NEW_PYTHON_SUPPORT } " ) <nl> message ( STATUS " Python interpreter : $ { PYTHON_EXECUTABLE } " ) <nl> - message ( STATUS " Python numpy : $ { PYTHON_USE_NUMPY } " ) <nl> + if ( PYTHON_USE_NUMPY ) <nl> + message ( STATUS " Python numpy : YES " ) <nl> + else ( ) <nl> + message ( STATUS " Python numpy : NO ( Python interface will not cover OpenCV 2 . x API ) " ) <nl> + endif ( ) <nl> <nl> if ( IPP_FOUND AND USE_IPP ) <nl> message ( STATUS " Use IPP : $ { IPP_PATH } " ) <nl> mmm a / modules / features2d / include / opencv2 / features2d / features2d . hpp <nl> ppp b / modules / features2d / include / opencv2 / features2d / features2d . hpp <nl> class CV_EXPORTS GridAdaptedFeatureDetector : public FeatureDetector <nl> virtual void detect ( const Mat & image , vector < KeyPoint > & keypoints , const Mat & mask = Mat ( ) ) const ; <nl> <nl> / / todo read / write <nl> - virtual void read ( const FileNode & fn ) { } <nl> - virtual void write ( FileStorage & fs ) const { } <nl> + virtual void read ( const FileNode & ) { } <nl> + virtual void write ( FileStorage & ) const { } <nl> <nl> protected : <nl> Ptr < FeatureDetector > detector ; <nl> class PyramidAdaptedFeatureDetector : public FeatureDetector <nl> virtual void detect ( const Mat & image , vector < KeyPoint > & keypoints , const Mat & mask = Mat ( ) ) const ; <nl> <nl> / / todo read / write <nl> - virtual void read ( const FileNode & fn ) { } <nl> - virtual void write ( FileStorage & fs ) const { } <nl> + virtual void read ( const FileNode & ) { } <nl> + virtual void write ( FileStorage & ) const { } <nl> <nl> protected : <nl> Ptr < FeatureDetector > detector ; <nl> mmm a / modules / python / cv . cpp <nl> ppp b / modules / python / cv . cpp <nl> static int convert_to_floats ( PyObject * o , floats * dst , const char * name = " no_na <nl> } else if ( PyNumber_Check ( o ) ) { <nl> dst - > count = 1 ; <nl> dst - > f = new float [ 1 ] ; <nl> - dst - > f [ 0 ] = PyFloat_AsDouble ( o ) ; <nl> + dst - > f [ 0 ] = ( float ) PyFloat_AsDouble ( o ) ; <nl> } else { <nl> return failmsg ( " Expected list of floats , or float for argument ' % s ' " , name ) ; <nl> } <nl> static int zero = 0 ; <nl> <nl> # include " generated0 . i " <nl> <nl> + # if PYTHON_USE_NUMPY <nl> # include " opencv2x . h " <nl> # include " pyopencv_generated_types . h " <nl> # include " pyopencv_generated_funcs . h " <nl> + # endif <nl> <nl> static PyMethodDef methods [ ] = { <nl> <nl> static PyMethodDef methods [ ] = { <nl> { " temp_test " , temp_test , METH_VARARGS } , <nl> <nl> # include " generated1 . i " <nl> + <nl> + # if PYTHON_USE_NUMPY <nl> # include " pyopencv_generated_func_tab . h " <nl> + # endif <nl> <nl> { NULL , NULL } , <nl> } ; <nl> void initcv ( ) <nl> MKTYPE ( memtrack ) ; <nl> <nl> # include " generated4 . i " <nl> + <nl> + # if PYTHON_USE_NUMPY <nl> # include " pyopencv_generated_type_reg . h " <nl> + # endif <nl> <nl> m = Py_InitModule ( MODULESTR " " , methods ) ; <nl> d = PyModule_GetDict ( m ) ; <nl> void initcv ( ) <nl> PUBLISH ( GC_EVAL ) ; <nl> <nl> # include " generated2 . i " <nl> + <nl> + # if PYTHON_USE_NUMPY <nl> # include " pyopencv_generated_const_reg . h " <nl> + # endif <nl> <nl> # if 0 <nl> { <nl> mmm a / modules / python / gen2 . py <nl> ppp b / modules / python / gen2 . py <nl> <nl> <nl> static bool pyopencv_to ( PyObject * src , $ { cname } & dst , const char * name = " < unknown > " ) <nl> { <nl> - if ( src = = NULL or src = = Py_None ) <nl> + if ( src = = NULL | | src = = Py_None ) <nl> return true ; <nl> if ( ! PyObject_TypeCheck ( src , & pyopencv_ $ { name } _Type ) ) <nl> { <nl> mmm a / modules / python / opencv2x . h <nl> ppp b / modules / python / opencv2x . h <nl> static inline PyObject * pyopencv_from ( const Moments & m ) <nl> " nu30 " , m . nu30 , " nu21 " , m . nu21 , " nu12 " , m . nu12 , " mu03 " , m . nu03 ) ; <nl> } <nl> <nl> + static inline PyObject * pyopencv_from ( const CvDTreeNode * node ) <nl> + { <nl> + double value = node - > value ; <nl> + int ivalue = cvRound ( value ) ; <nl> + return value = = ivalue ? PyInt_FromLong ( ivalue ) : PyFloat_FromDouble ( value ) ; <nl> + } <nl> + <nl> # endif <nl>
|
fixed building Python wrappers when Numpy is not available
|
opencv/opencv
|
8a8ba57b20d5764c52acd615a33410092a6f9732
|
2010-11-03T17:57:51Z
|
mmm a / folly / wangle / ManualExecutor . h <nl> ppp b / folly / wangle / ManualExecutor . h <nl> namespace folly { namespace wangle { <nl> run ( ) ; <nl> } <nl> <nl> + template < class F > void waitFor ( F const & f ) { <nl> + while ( ! f . isReady ( ) ) <nl> + makeProgress ( ) ; <nl> + } <nl> + <nl> private : <nl> std : : mutex lock_ ; <nl> std : : queue < std : : function < void ( ) > > runnables_ ; <nl>
|
( wangle ) ManualExecutor : : waitFor ( F & & )
|
facebook/folly
|
618a6ec3134b15b76a76a8c68437df66c525cd82
|
2014-05-20T19:53:58Z
|
mmm a / currentrelease <nl> ppp b / currentrelease <nl> <nl> - 3 . 6 . 0 <nl> + 3 . 5 . 1 <nl> https : / / github . com / sqlitebrowser / sqlitebrowser / releases <nl> <nl>
|
Reverting currentrelease to 3 . 5 . 1 for now
|
sqlitebrowser/sqlitebrowser
|
848f4bf0340f2024f462d4c0d00f06f0f3267a30
|
2015-04-28T00:39:42Z
|
mmm a / docs / api / browser - window . md <nl> ppp b / docs / api / browser - window . md <nl> method : <nl> let url = require ( ' url ' ) . format ( { <nl> protocol : ' file ' , <nl> slashes : true , <nl> - pathname : path . join ( __dirname , ' index . html ' ) <nl> + pathname : require ( ' path ' ) . join ( __dirname , ' index . html ' ) <nl> } ) <nl> <nl> win . loadURL ( url ) <nl>
|
Merge pull request from electron / add - missing - require
|
electron/electron
|
0955c2fd4d058adf9e681ab075df0170831e690c
|
2016-08-22T22:49:48Z
|
mmm a / tensorflow / lite / kernels / while . cc <nl> ppp b / tensorflow / lite / kernels / while . cc <nl> TfLiteStatus Eval ( TfLiteContext * context , TfLiteNode * node ) { <nl> / / subgraph . This is always true before step 2 . <nl> / / <nl> / / This is the best we can do without sharing tensor buffer across subgraph <nl> - / / boundry . Currently we copy the input / output between the subgraphs . This <nl> - / / isn ' t optimized yet and a lot of redundent copies are made . <nl> + / / boundary . Currently we copy the input / output between the subgraphs . This <nl> + / / isn ' t optimized yet and a lot of redundant copies are made . <nl> / / TODO ( b / 120234921 ) : Optimize and avoid copying tensors between subgraphs . <nl> TF_LITE_ENSURE_OK ( <nl> context , <nl>
|
Merge pull request from joyalbin : fix_while
|
tensorflow/tensorflow
|
6d9b0e4c6812767a7eb78afe5be5648dcbb3b8da
|
2019-02-28T23:17:26Z
|
mmm a / modules / core / src / mathfuncs . cpp <nl> ppp b / modules / core / src / mathfuncs . cpp <nl> <nl> <nl> # include " precomp . hpp " <nl> # include " opencl_kernels_core . hpp " <nl> + # include < limits > <nl> <nl> namespace cv <nl> { <nl> mmm a / modules / core / test / test_math . cpp <nl> ppp b / modules / core / test / test_math . cpp <nl> TEST ( Core_Pow , special ) <nl> r0 = std : : pow ( val , power ) ; <nl> if ( cvIsInf ( r0 ) ) <nl> { <nl> - ASSERT_TRUE ( cvIsInf ( r ) ) ; <nl> + ASSERT_TRUE ( cvIsInf ( r ) ! = 0 ) ; <nl> } <nl> else if ( cvIsNaN ( r0 ) ) <nl> { <nl> - ASSERT_TRUE ( cvIsNaN ( r ) ) ; <nl> + ASSERT_TRUE ( cvIsNaN ( r ) ! = 0 ) ; <nl> } <nl> else <nl> { <nl> + ASSERT_TRUE ( cvIsInf ( r ) = = 0 & & cvIsNaN ( r ) = = 0 ) ; <nl> ASSERT_LT ( fabs ( r - r0 ) , eps ) ; <nl> } <nl> } <nl>
|
some more compile warnings fixed
|
opencv/opencv
|
73f760fdf09115009097725a213461c8016c845d
|
2015-05-05T15:03:40Z
|
mmm a / src / net . cpp <nl> ppp b / src / net . cpp <nl> void V1TransportSerializer : : prepareForTransport ( CSerializedNetMsg & msg , std : : vec <nl> uint256 hash = Hash ( msg . data . begin ( ) , msg . data . end ( ) ) ; <nl> <nl> / / create header <nl> - CMessageHeader hdr ( Params ( ) . MessageStart ( ) , msg . command . c_str ( ) , msg . data . size ( ) ) ; <nl> + CMessageHeader hdr ( Params ( ) . MessageStart ( ) , msg . m_type . c_str ( ) , msg . data . size ( ) ) ; <nl> memcpy ( hdr . pchChecksum , hash . begin ( ) , CMessageHeader : : CHECKSUM_SIZE ) ; <nl> <nl> / / serialize header <nl> bool CConnman : : NodeFullyConnected ( const CNode * pnode ) <nl> void CConnman : : PushMessage ( CNode * pnode , CSerializedNetMsg & & msg ) <nl> { <nl> size_t nMessageSize = msg . data . size ( ) ; <nl> - LogPrint ( BCLog : : NET , " sending % s ( % d bytes ) peer = % d \ n " , SanitizeString ( msg . command ) , nMessageSize , pnode - > GetId ( ) ) ; <nl> + LogPrint ( BCLog : : NET , " sending % s ( % d bytes ) peer = % d \ n " , SanitizeString ( msg . m_type ) , nMessageSize , pnode - > GetId ( ) ) ; <nl> <nl> / / make sure we use the appropriate network transport format <nl> std : : vector < unsigned char > serializedHeader ; <nl> void CConnman : : PushMessage ( CNode * pnode , CSerializedNetMsg & & msg ) <nl> LOCK ( pnode - > cs_vSend ) ; <nl> bool optimisticSend ( pnode - > vSendMsg . empty ( ) ) ; <nl> <nl> - / / log total amount of bytes per command <nl> - pnode - > mapSendBytesPerMsgCmd [ msg . command ] + = nTotalSize ; <nl> + / / log total amount of bytes per message type <nl> + pnode - > mapSendBytesPerMsgCmd [ msg . m_type ] + = nTotalSize ; <nl> pnode - > nSendSize + = nTotalSize ; <nl> <nl> if ( pnode - > nSendSize > nSendBufferMaxSize ) <nl> mmm a / src / net . h <nl> ppp b / src / net . h <nl> struct CSerializedNetMsg <nl> CSerializedNetMsg & operator = ( const CSerializedNetMsg & ) = delete ; <nl> <nl> std : : vector < unsigned char > data ; <nl> - std : : string command ; <nl> + std : : string m_type ; <nl> } ; <nl> <nl> <nl> mmm a / src / netmessagemaker . h <nl> ppp b / src / netmessagemaker . h <nl> class CNetMsgMaker <nl> explicit CNetMsgMaker ( int nVersionIn ) : nVersion ( nVersionIn ) { } <nl> <nl> template < typename . . . Args > <nl> - CSerializedNetMsg Make ( int nFlags , std : : string sCommand , Args & & . . . args ) const <nl> + CSerializedNetMsg Make ( int nFlags , std : : string msg_type , Args & & . . . args ) const <nl> { <nl> CSerializedNetMsg msg ; <nl> - msg . command = std : : move ( sCommand ) ; <nl> + msg . m_type = std : : move ( msg_type ) ; <nl> CVectorWriter { SER_NETWORK , nFlags | nVersion , msg . data , 0 , std : : forward < Args > ( args ) . . . } ; <nl> return msg ; <nl> } <nl> <nl> template < typename . . . Args > <nl> - CSerializedNetMsg Make ( std : : string sCommand , Args & & . . . args ) const <nl> + CSerializedNetMsg Make ( std : : string msg_type , Args & & . . . args ) const <nl> { <nl> - return Make ( 0 , std : : move ( sCommand ) , std : : forward < Args > ( args ) . . . ) ; <nl> + return Make ( 0 , std : : move ( msg_type ) , std : : forward < Args > ( args ) . . . ) ; <nl> } <nl> <nl> private : <nl> mmm a / src / test / fuzz / process_messages . cpp <nl> ppp b / src / test / fuzz / process_messages . cpp <nl> void test_one_input ( const std : : vector < uint8_t > & buffer ) <nl> const std : : string random_message_type { fuzzed_data_provider . ConsumeBytesAsString ( CMessageHeader : : COMMAND_SIZE ) . c_str ( ) } ; <nl> <nl> CSerializedNetMsg net_msg ; <nl> - net_msg . command = random_message_type ; <nl> + net_msg . m_type = random_message_type ; <nl> net_msg . data = ConsumeRandomLengthByteVector ( fuzzed_data_provider ) ; <nl> <nl> CNode & random_node = * peers . at ( fuzzed_data_provider . ConsumeIntegralInRange < int > ( 0 , peers . size ( ) - 1 ) ) ; <nl>
|
Merge : refactor : s / command / msg_type / in CNetMsgMaker and CSerializedNetMsg
|
bitcoin/bitcoin
|
62948caf4446246ec2b525e95705bb07b6a8f2bd
|
2020-06-19T10:54:24Z
|
mmm a / doc / base / classes . xml <nl> ppp b / doc / base / classes . xml <nl> This approximation makes straight segments between each point , then subdivides t <nl> < description > <nl> < / description > <nl> < / method > <nl> + < method name = " add_fallback " > <nl> + < argument index = " 0 " name = " data " type = " DynamicFontData " > <nl> + < / argument > <nl> + < description > <nl> + < / description > <nl> + < / method > <nl> + < method name = " set_fallback " > <nl> + < argument index = " 0 " name = " idx " type = " int " > <nl> + < / argument > <nl> + < argument index = " 1 " name = " data " type = " DynamicFontData " > <nl> + < / argument > <nl> + < description > <nl> + < / description > <nl> + < / method > <nl> + < method name = " get_fallback " qualifiers = " const " > <nl> + < return type = " DynamicFontData " > <nl> + < / return > <nl> + < argument index = " 0 " name = " idx " type = " int " > <nl> + < / argument > <nl> + < description > <nl> + < / description > <nl> + < / method > <nl> + < method name = " remove_fallback " > <nl> + < argument index = " 0 " name = " idx " type = " int " > <nl> + < / argument > <nl> + < description > <nl> + < / description > <nl> + < / method > <nl> + < method name = " get_fallback_count " qualifiers = " const " > <nl> + < return type = " int " > <nl> + < / return > <nl> + < description > <nl> + < / description > <nl> + < / method > <nl> < / methods > <nl> < constants > <nl> < / constants > <nl> This approximation makes straight segments between each point , then subdivides t <nl> < / argument > <nl> < argument index = " 2 " name = " headers " type = " StringArray " > <nl> < / argument > <nl> - < argument index = " 3 " name = " body " type = " RawArray " default = " & quot ; & quot ; " > <nl> + < argument index = " 3 " name = " body " type = " RawArray " > <nl> < / argument > <nl> < description > <nl> < / description > <nl> This method controls whether the position between two cached points is interpola <nl> < return type = " int " > <nl> < / return > <nl> < description > <nl> - Return the current axis lock of the body . One of AXIS_LOCK_ * enum . <nl> + Return the current axis lock of the body . One of AXIS_LOCK_ * enum . <nl> < / description > <nl> < / method > <nl> < method name = " get_colliding_bodies " qualifiers = " const " > <nl> This method controls whether the position between two cached points is interpola <nl> < / signals > <nl> < constants > <nl> < constant name = " MODE_STATIC " value = " 1 " > <nl> - Static mode . The body behaves like a [ StaticBody ] , and can only move by user code . <nl> + Static mode . The body behaves like a [ StaticBody ] , and can only move by user code . <nl> < / constant > <nl> < constant name = " MODE_KINEMATIC " value = " 3 " > <nl> - Kinematic body . The body behaves like a [ KinematicBody ] , and can only move by user code . <nl> + Kinematic body . The body behaves like a [ KinematicBody ] , and can only move by user code . <nl> < / constant > <nl> < constant name = " MODE_RIGID " value = " 0 " > <nl> - Rigid body . This is the " natural " state of a rigid body . It is affected by forces , and can move , rotate , and be affected by user code . <nl> + Rigid body . This is the " natural " state of a rigid body . It is affected by forces , and can move , rotate , and be affected by user code . <nl> < / constant > <nl> < constant name = " MODE_CHARACTER " value = " 2 " > <nl> < / constant > <nl> This method controls whether the position between two cached points is interpola <nl> < method name = " reload " > <nl> < return type = " int " > <nl> < / return > <nl> + < argument index = " 0 " name = " keep_state " type = " bool " default = " false " > <nl> + < / argument > <nl> < description > <nl> - Reload the script . This will fail if there are existing instances . <nl> < / description > <nl> < / method > <nl> < / methods > <nl> This method controls whether the position between two cached points is interpola <nl> < / methods > <nl> < constants > <nl> < constant name = " ENABLER_FREEZE_BODIES " value = " 1 " > <nl> - This enabler will freeze [ RigidBody ] nodes . <nl> + This enabler will freeze [ RigidBody ] nodes . <nl> < / constant > <nl> < constant name = " ENABLER_PAUSE_ANIMATIONS " value = " 0 " > <nl> - This enabler will pause [ AnimationPlayer ] nodes . <nl> + This enabler will pause [ AnimationPlayer ] nodes . <nl> < / constant > <nl> < constant name = " ENABLER_MAX " value = " 2 " > <nl> < / constant > <nl> This method controls whether the position between two cached points is interpola <nl> < / methods > <nl> < constants > <nl> < constant name = " ENABLER_FREEZE_BODIES " value = " 1 " > <nl> - This enabler will freeze [ RigidBody2D ] nodes . <nl> + This enabler will freeze [ RigidBody2D ] nodes . <nl> < / constant > <nl> < constant name = " ENABLER_PAUSE_ANIMATIONS " value = " 0 " > <nl> - This enabler will pause [ AnimationPlayer ] nodes . <nl> + This enabler will pause [ AnimationPlayer ] nodes . <nl> < / constant > <nl> < constant name = " ENABLER_PAUSE_PARTICLES " value = " 2 " > <nl> - This enabler will stop [ Particles2D ] nodes . <nl> + This enabler will stop [ Particles2D ] nodes . <nl> < / constant > <nl> < constant name = " ENABLER_PAUSE_ANIMATED_SPRITES " value = " 5 " > <nl> < / constant > <nl> < constant name = " ENABLER_PARENT_PROCESS " value = " 3 " > <nl> - This enabler will stop the parent ' s _process function . <nl> + This enabler will stop the parent ' s _process function . <nl> < / constant > <nl> < constant name = " ENABLER_PARENT_FIXED_PROCESS " value = " 4 " > <nl> - This enabler will stop the parent ' s _fixed_process function . <nl> + This enabler will stop the parent ' s _fixed_process function . <nl> < / constant > <nl> < constant name = " ENABLER_MAX " value = " 6 " > <nl> < / constant > <nl>
|
Sync classref
|
godotengine/godot
|
45163b10bebb07769fdb103a76468915e0066f76
|
2016-06-03T16:03:12Z
|
mmm a / xbmc / video / windows / GUIWindowFullScreen . cpp <nl> ppp b / xbmc / video / windows / GUIWindowFullScreen . cpp <nl> EVENT_RESULT CGUIWindowFullScreen : : OnMouseEvent ( const CPoint & point , const CMous <nl> } <nl> if ( event . m_id ! = ACTION_MOUSE_MOVE | | event . m_offsetX | | event . m_offsetY ) <nl> { / / some other mouse action has occurred - bring up the OSD <nl> + / / if it is not already running <nl> CGUIDialogVideoOSD * pOSD = ( CGUIDialogVideoOSD * ) g_windowManager . GetWindow ( WINDOW_DIALOG_VIDEO_OSD ) ; <nl> - if ( pOSD ) <nl> + if ( pOSD & & ! pOSD - > IsDialogRunning ( ) ) <nl> { <nl> pOSD - > SetAutoClose ( 3000 ) ; <nl> pOSD - > DoModal ( ) ; <nl>
|
[ fix ] - don ' t open the osd modal dialog if it is already opened
|
xbmc/xbmc
|
190f37a4846bb2e74dbf04edabcbff061e05bd45
|
2011-08-03T20:03:11Z
|
mmm a / src / gui / properties / propertieswidget . cpp <nl> ppp b / src / gui / properties / propertieswidget . cpp <nl> void PropertiesWidget : : renameSelectedFile ( ) <nl> path_items . removeLast ( ) ; <nl> path_items < < new_name_last ; <nl> QString new_name = path_items . join ( " / " ) ; <nl> - if ( Utils : : Fs : : sameFileNames ( old_name , new_name ) ) { <nl> + if ( old_name = = new_name ) { <nl> qDebug ( " Name did not change " ) ; <nl> return ; <nl> } <nl>
|
Fix renaming files is not case sensitive on Windows platform . Closes .
|
qbittorrent/qBittorrent
|
0939875ca8f1bc36133dce72b3ac33ebf43c4c8e
|
2017-05-13T08:16:59Z
|
new file mode 100644 <nl> index 00000000000 . . 7b9c8e96c1f <nl> mmm / dev / null <nl> ppp b / thirdparty / oidn / 0001 - window . h - case - sensitive . patch <nl> <nl> pppmmm a / thirdparty / oidn / common / platform . h <nl> ppp + b / thirdparty / oidn / common / platform . h <nl> + <nl> + # if defined ( _WIN32 ) <nl> + # define WIN32_LEAN_AND_MEAN <nl> + # define NOMINMAX <nl> + - # include < Windows . h > <nl> + + # include < windows . h > <nl> + # elif defined ( __APPLE__ ) <nl> + # include < sys / sysctl . h > <nl> + # endif <nl> mmm a / thirdparty / oidn / common / platform . h <nl> ppp b / thirdparty / oidn / common / platform . h <nl> <nl> # if defined ( _WIN32 ) <nl> # define WIN32_LEAN_AND_MEAN <nl> # define NOMINMAX <nl> - # include < Windows . h > <nl> + # include < windows . h > <nl> # elif defined ( __APPLE__ ) <nl> # include < sys / sysctl . h > <nl> # endif <nl>
|
Fix # include < windows . h > for case - senstive cross - compilation .
|
godotengine/godot
|
0aac6a2853452964fdda24c37b6f6a419f1cdbdc
|
2020-05-15T14:36:44Z
|
new file mode 100755 <nl> index 000000000000 . . cd741f55b473 <nl> mmm / dev / null <nl> ppp b / db / commands / aggregate . js <nl> <nl> + / * sample aggregate command queries * / <nl> + / / make sure we ' re using the right db ; this is the same as " use mydb ; " in shell <nl> + db = db . getSisterDB ( " mydb " ) ; <nl> + <nl> + / / renaming a field and keeping an array intact <nl> + var p1 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + tags : 1 , <nl> + pageViews : 1 <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / unwinding an array <nl> + var p2 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + tag : { $ unwind : " tags " } , <nl> + pageViews : 1 <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / pulling values out of subdocuments <nl> + var p3 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + otherfoo : " other . foo " , <nl> + otherbar : " other . bar " <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / projection includes a computed value <nl> + var p4 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + daveWroteIt : { $ eq : [ " $ author " , " dave " ] } <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / projection includes a virtual ( fabricated ) document <nl> + var p5 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + pageViews : 1 , <nl> + tag : { $ unwind : " tags " } <nl> + } } , <nl> + { $ project : { <nl> + author : 1 , <nl> + subDocument : { foo : " pageViews " , bar : " tag " } <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / multi - step aggregate <nl> + / / nested expressions in computed fields <nl> + var p6 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + tag : { $ unwind : " tags " } , <nl> + pageViews : 1 <nl> + } } , <nl> + { $ project : { <nl> + author : 1 , <nl> + tag : 1 , <nl> + pageViews : 1 , <nl> + daveWroteIt : { $ eq : [ " $ author " , " dave " ] } , <nl> + weLikeIt : { $ or : [ { $ eq : [ " $ author " , " dave " ] } , <nl> + { $ eq : [ " $ tag " , " good " ] } ] } <nl> + } } <nl> + ] } ) ; <nl> + <nl> + var p7 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + theSum : { $ add : [ " $ pageViews " , <nl> + { $ ifnull : [ " $ other . foo " , <nl> + " $ other . bar " ] } ] } <nl> + } } <nl> + ] } ) ; <nl> + <nl> + var f1 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ query : { $ eq : [ " $ author " , " dave " ] } } <nl> + ] } ) ; <nl> + <nl> + var f2 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + title : 1 , <nl> + author : 1 , <nl> + pageViews : 1 , <nl> + tag : { $ unwind : " tags " } , <nl> + comments : 1 <nl> + } } , <nl> + { $ query : { $ eq : [ " $ tag " , " nasty " ] } } <nl> + ] } ) ; <nl> + <nl> + var g1 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + tag : { $ unwind : " tags " } , <nl> + pageViews : 1 <nl> + } } , <nl> + { $ group : { <nl> + _id : { tag : 1 } , <nl> + docsByTag : { $ sum : 1 } , <nl> + viewsByTag : { $ sum : " $ pageViews " } <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / $ max , and averaging in a final projection <nl> + var g2 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + tag : { $ unwind : " tags " } , <nl> + pageViews : 1 <nl> + } } , <nl> + { $ group : { <nl> + _id : { tag : 1 } , <nl> + docsByTag : { $ sum : 1 } , <nl> + viewsByTag : { $ sum : " $ pageViews " } , <nl> + mostViewsByTag : { $ max : " $ pageViews " } , <nl> + } } , <nl> + { $ project : { <nl> + tag : " _id . tag " , <nl> + mostViewsByTag : 1 , <nl> + docsByTag : 1 , <nl> + viewsByTag : 1 , <nl> + avgByTag : { $ divide : [ " $ viewsByTag " , " $ docsByTag " ] } <nl> + } } <nl> + ] } ) ; <nl> + <nl> + / / $ push as an accumulator ; can pivot data <nl> + var g3 = db . runCommand ( <nl> + { aggregate : " article " , pipeline : [ <nl> + { $ project : { <nl> + author : 1 , <nl> + tag : { $ unwind : " tags " } <nl> + } } , <nl> + { $ group : { <nl> + _id : { tag : 1 } , <nl> + authors : { $ push : " $ author " } <nl> + } } <nl> + ] } ) ; <nl> new file mode 100755 <nl> index 000000000000 . . f46d0ec6154f <nl> mmm / dev / null <nl> ppp b / db / commands / articles . js <nl> <nl> + / / make sure we ' re using the right db ; this is the same as " use mydb ; " in shell <nl> + db = db . getSisterDB ( " mydb " ) ; <nl> + db . article . drop ( ) ; <nl> + <nl> + db . article . save ( { <nl> + title : " this is my title " , <nl> + author : " bob " , <nl> + posted : new Date ( ) , <nl> + pageViews : 5 , <nl> + tags : [ " fun " , " good " ] , <nl> + comments : [ <nl> + { author : " joe " , text : " this is cool " } , <nl> + { author : " sam " , text : " this is bad " } <nl> + ] , <nl> + other : { foo : 5 } <nl> + } ) ; <nl> + <nl> + db . article . save ( { <nl> + title : " this is your title " , <nl> + author : " dave " , <nl> + posted : new Date ( ) , <nl> + pageViews : 7 , <nl> + tags : [ " fun " , " nasty " ] , <nl> + comments : [ <nl> + { author : " barbarella " , text : " this is hot " } , <nl> + { author : " leia " , text : " i prefer the brass bikini " , votes : 10 } <nl> + ] , <nl> + other : { bar : 14 } <nl> + } ) ; <nl> + <nl> + db . article . save ( { <nl> + title : " this is some other title " , <nl> + author : " jane " , <nl> + posted : new Date ( ) , <nl> + pageViews : 6 , <nl> + tags : [ " nasty " , " filthy " ] , <nl> + comments : [ <nl> + { author : " r2 " , text : " beep boop " } , <nl> + { author : " leia " , text : " this is too smutty " } <nl> + ] , <nl> + other : { bar : 14 } <nl> + } ) ; <nl> mmm a / db / commands / pipeline_command . cpp <nl> ppp b / db / commands / pipeline_command . cpp <nl> namespace mongo { <nl> <nl> / * now hook up the pipeline * / <nl> / * connect up a cursor to the specified collection * / <nl> + string fullName ( db + " . " + pPipeline - > getCollectionName ( ) ) ; <nl> boost : : shared_ptr < Cursor > pCursor ( <nl> - findTableScan ( pPipeline - > getCollectionName ( ) . c_str ( ) , BSONObj ( ) ) ) ; <nl> + findTableScan ( fullName . c_str ( ) , BSONObj ( ) ) ) ; <nl> boost : : shared_ptr < DocumentSource > pSource ( <nl> DocumentSourceCursor : : create ( pCursor ) ) ; <nl> <nl>
|
remove db name from in front of collection for aggregation
|
mongodb/mongo
|
d1b615a4a3b3427865ffe458675a993c7a44b7f0
|
2011-04-18T22:15:28Z
|
mmm a / tensorflow / core / grappler / op_types . cc <nl> ppp b / tensorflow / core / grappler / op_types . cc <nl> bool IsInvolution ( const NodeDef & node ) { <nl> return involution_ops - > count ( node . op ( ) ) > 0 ; <nl> } <nl> <nl> + bool IsValueAndOrderAndShapePreserving ( const NodeDef & node ) { <nl> + if ( NumNonControlInputs ( node ) = = 1 & & IsAggregate ( node ) ) { <nl> + return true ; <nl> + } <nl> + static const std : : unordered_set < string > * <nl> + value_and_order_and_shape_preserving_ops = <nl> + CHECK_NOTNULL ( ( new const std : : unordered_set < string > { <nl> + " CheckNumerics " , <nl> + " DebugGradientIdentity " , <nl> + " DeepCopy " <nl> + " Enter " , <nl> + " Exit " , <nl> + " Identity " , <nl> + " IdentityN " , <nl> + " PreventGradient " , <nl> + " Print " , <nl> + " Snapshot " , <nl> + " StopGradient " , <nl> + } ) ) ; <nl> + return value_and_order_and_shape_preserving_ops - > count ( node . op ( ) ) > 0 ; <nl> + } <nl> + <nl> bool IsValueAndOrderPreserving ( const NodeDef & node ) { <nl> if ( NumNonControlInputs ( node ) = = 1 & & IsAggregate ( node ) ) { <nl> return true ; <nl> } <nl> static const std : : unordered_set < string > * value_and_order_preserving_ops = <nl> CHECK_NOTNULL ( ( new const std : : unordered_set < string > { <nl> - " CheckNumerics " , <nl> - " DebugGradientIdentity " , <nl> - " DeepCopy " <nl> - " Enter " , <nl> - " Exit " , <nl> " ExpandDims " , <nl> - " Identity " , <nl> - " IdentityN " , <nl> - " PreventGradient " , <nl> - " Print " , <nl> - " Reshape " , <nl> " Snapshot " , <nl> " Squeeze " , <nl> - " StopGradient " , <nl> } ) ) ; <nl> - return value_and_order_preserving_ops - > count ( node . op ( ) ) > 0 ; <nl> + return value_and_order_preserving_ops - > count ( node . op ( ) ) > 0 | | <nl> + IsValueAndOrderAndShapePreserving ( node ) ; <nl> } <nl> <nl> bool IsValuePreserving ( const NodeDef & node ) { <nl> bool IsUnaryElementWise ( const NodeDef & node ) { <nl> " Tanh " , <nl> } ) ) ; <nl> return element_wise_ops - > count ( node . op ( ) ) > 0 | | <nl> - ( ! IsIdentityN ( node ) & & IsValueAndOrderPreserving ( node ) ) ; <nl> + ( ! IsIdentityN ( node ) & & IsValueAndOrderAndShapePreserving ( node ) ) ; <nl> } <nl> <nl> bool HasOpDef ( const NodeDef & node ) { <nl> mmm a / tensorflow / core / grappler / op_types . h <nl> ppp b / tensorflow / core / grappler / op_types . h <nl> bool ModifiesInputsInPlace ( const NodeDef & node ) ; <nl> / / own inverse such that f ( f ( x ) ) = = x . <nl> bool IsInvolution ( const NodeDef & node ) ; <nl> <nl> + / / Returns true if the op preserves the order and value of elements <nl> + / / and shape of its first input tensor . <nl> + bool IsValueAndOrderAndShapePreserving ( const NodeDef & node ) ; <nl> + <nl> / / Returns true if the op preserves the order and value of elements in its <nl> / / first input tensor and possible changes its shape . <nl> bool IsValueAndOrderPreserving ( const NodeDef & node ) ; <nl> mmm a / tensorflow / core / grappler / optimizers / arithmetic_optimizer . cc <nl> ppp b / tensorflow / core / grappler / optimizers / arithmetic_optimizer . cc <nl> class HoistCWiseUnaryChainsStage : public ArithmeticOptimizerStage { <nl> return n > 1 ; <nl> } else if ( IsSplit ( * node ) | | IsSplitV ( * node ) ) { <nl> const int num_split = node - > attr ( ) . at ( " num_split " ) . i ( ) ; <nl> + if ( NumNonControlOutputs ( * node , * ctx ( ) . node_map ) > num_split ) { <nl> + / / TODO ( rmlarsen ) : Remove this constraint when we have optimizations <nl> + / / in place for merging slices into splits . <nl> + return false ; <nl> + } <nl> return num_split > 1 & & ! IsAlreadyOptimized ( * node ) ; <nl> } <nl> return false ; <nl> class HoistCWiseUnaryChainsStage : public ArithmeticOptimizerStage { <nl> if ( tails . empty ( ) ) { <nl> return Status : : OK ( ) ; <nl> } <nl> - AddControlInputs ( ctrl_inputs , root_node ) ; <nl> AddToOptimizationQueue ( root_node ) ; <nl> optimized_nodes_ . insert ( root_node - > name ( ) ) ; <nl> if ( node_is_concat_ ) { <nl> + AddControlInputs ( ctrl_inputs , root_node ) ; <nl> return HoistChainForConcat ( prefix_length , tails , root_node ) ; <nl> } else { <nl> - return HoistChainForSplit ( prefix_length , tails , root_node ) ; <nl> + return HoistChainForSplit ( prefix_length , tails , ctrl_inputs , root_node ) ; <nl> } <nl> } <nl> <nl> class HoistCWiseUnaryChainsStage : public ArithmeticOptimizerStage { <nl> IsInPreserveSet ( * op ) ) { <nl> return false ; <nl> } <nl> - if ( node_is_concat_ & & <nl> - ctx ( ) . node_map - > GetOutputs ( op - > name ( ) ) . size ( ) > 1 ) { <nl> - / / TODO ( rmlarsen ) : Allow and hoist outgoing control edges . <nl> + if ( ctx ( ) . node_map - > GetOutputs ( op - > name ( ) ) . size ( ) > 1 ) { <nl> + / / TODO ( rmlarsen ) : Allow outgoing control edges . <nl> return false ; <nl> } <nl> } <nl> class HoistCWiseUnaryChainsStage : public ArithmeticOptimizerStage { <nl> } <nl> <nl> Status HoistChainForSplit ( const int prefix_length , const ChainLinkSet & tails , <nl> + std : : set < string > * ctrl_inputs , <nl> NodeDef * split_node ) { <nl> / / Create a new chain before the split node to process the input tensor . <nl> const string & split_name = split_node - > name ( ) ; <nl> class HoistCWiseUnaryChainsStage : public ArithmeticOptimizerStage { <nl> cur_copy - > add_input ( orig_input ) ; <nl> ctx ( ) . node_map - > UpdateOutput ( NodeName ( orig_input ) , split_name , <nl> cur_copy - > name ( ) ) ; <nl> + / / Make sure all the control inputs are satisfied before running the first <nl> + / / node in the new chain . <nl> + AddControlInputs ( ctrl_inputs , cur_copy ) ; <nl> <nl> / / Connect all consumers of the tail nodes directly to the <nl> / / output port of Split from which the chain started . <nl> mmm a / tensorflow / core / grappler / optimizers / arithmetic_optimizer . h <nl> ppp b / tensorflow / core / grappler / optimizers / arithmetic_optimizer . h <nl> class ArithmeticOptimizer : public GraphOptimizer { <nl> bool remove_redundant_bitcast = true ; <nl> bool remove_redundant_cast = true ; <nl> bool remove_negation = true ; <nl> - bool hoist_cwise_unary_chains = false ; <nl> + bool hoist_cwise_unary_chains = true ; <nl> bool convert_sqrt_div_to_rsqrt_mul = false ; <nl> <nl> / / Choose which arithmetic optimizer stages will be enabled for a given <nl> mmm a / tensorflow / core / grappler / optimizers / arithmetic_optimizer_test . cc <nl> ppp b / tensorflow / core / grappler / optimizers / arithmetic_optimizer_test . cc <nl> TEST_F ( ArithmeticOptimizerTest , HoistCWiseUnaryIntoSplit ) { <nl> EXPECT_NE ( node . name ( ) , " cos_exp_b2 " ) ; <nl> <nl> if ( node . name ( ) = = " split1 " ) { <nl> - EXPECT_EQ ( 3 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( 2 , node . input_size ( ) ) ; <nl> EXPECT_EQ ( " axis " , node . input ( 0 ) ) ; <nl> EXPECT_EQ ( " ArithmeticOptimizer / _sin_a_split1 " , node . input ( 1 ) ) ; <nl> - EXPECT_EQ ( " ^ ctrl1 " , node . input ( 2 ) ) ; <nl> found + + ; <nl> } <nl> if ( node . name ( ) = = " ArithmeticOptimizer / _sin_a_split1 " ) { <nl> EXPECT_EQ ( " Sin " , node . op ( ) ) ; <nl> - EXPECT_EQ ( 1 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( 2 , node . input_size ( ) ) ; <nl> EXPECT_EQ ( " x " , node . input ( 0 ) ) ; <nl> + EXPECT_EQ ( " ^ ctrl1 " , node . input ( 1 ) ) ; <nl> found + + ; <nl> } <nl> if ( node . name ( ) = = " id_a " ) { <nl> TEST_F ( ArithmeticOptimizerTest , HoistCWiseUnaryIntoSplit ) { <nl> } <nl> if ( node . name ( ) = = " ArithmeticOptimizer / _exp_a2_split2 " ) { <nl> EXPECT_EQ ( " Exp " , node . op ( ) ) ; <nl> - EXPECT_EQ ( 1 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( 4 , node . input_size ( ) ) ; <nl> EXPECT_EQ ( " x " , node . input ( 0 ) ) ; <nl> + EXPECT_EQ ( " ^ ctrl1 " , node . input ( 1 ) ) ; <nl> + EXPECT_EQ ( " ^ ctrl2 " , node . input ( 2 ) ) ; <nl> + EXPECT_EQ ( " ^ ctrl3 " , node . input ( 3 ) ) ; <nl> found + + ; <nl> } <nl> if ( node . name ( ) = = " ArithmeticOptimizer / _cos_exp_a2_split2 " ) { <nl> TEST_F ( ArithmeticOptimizerTest , HoistCWiseUnaryIntoSplit ) { <nl> found + + ; <nl> } <nl> if ( node . name ( ) = = " split2 " ) { <nl> - EXPECT_EQ ( 6 , node . input_size ( ) ) ; <nl> + EXPECT_EQ ( 3 , node . input_size ( ) ) ; <nl> EXPECT_EQ ( " ArithmeticOptimizer / _cos_exp_a2_split2 " , node . input ( 0 ) ) ; <nl> EXPECT_EQ ( " size_splits2 " , node . input ( 1 ) ) ; <nl> EXPECT_EQ ( " axis " , node . input ( 2 ) ) ; <nl> - EXPECT_EQ ( " ^ ctrl1 " , node . input ( 3 ) ) ; <nl> - EXPECT_EQ ( " ^ ctrl2 " , node . input ( 4 ) ) ; <nl> - EXPECT_EQ ( " ^ ctrl3 " , node . input ( 5 ) ) ; <nl> found + + ; <nl> } <nl> if ( node . name ( ) = = " id_a2 " ) { <nl>
|
Enable unary chain hoisting optimization for concat / split / splitv by default .
|
tensorflow/tensorflow
|
ceda30408f66a7eea86dc359164deb662d5a32d0
|
2018-05-03T20:40:29Z
|
mmm a / src / tests / fem . cpp <nl> ppp b / src / tests / fem . cpp <nl> auto fem = [ ] ( ) { <nl> GUI gui ( " FEM " , Vector2i ( gui_res + 200 , gui_res ) , false ) ; <nl> int gt = 0 ; <nl> int k = 0 ; <nl> - gui . slider ( " z " , k , 0 , n ) . slider ( " Ground truth " , gt , 0 , 2 ) ; <nl> + gui . slider ( " z " , k , 0 , n - 1 ) . slider ( " Ground truth " , gt , 0 , 1 ) ; <nl> <nl> int scale = gui_res / n ; <nl> auto & canvas = gui . get_canvas ( ) ; <nl>
|
gui fix
|
taichi-dev/taichi
|
50ccf1a1614a400711397d4e621329957d075a95
|
2019-04-30T01:39:59Z
|
mmm a / docs / wiki / deployment / process - auditing . md <nl> ppp b / docs / wiki / deployment / process - auditing . md <nl> <nl> # Process and socket auditing with osquery <nl> <nl> - Enabling these auditing features requires additional configuration of osquery . osquery can leverage the audit subsystems to record process executions and network connections in near real - time on Linux and macOS systems . Although these auditing features are extremely powerful for recording the activity from a host , they may introduce additional computational overhead and greatly increase the number of log events generated by osquery . <nl> + Enabling these auditing features requires additional configuration of osquery . osquery can leverage either BPF or the audit subsystems to record process executions and network connections in near real - time on Linux and macOS systems . Although these auditing features are extremely powerful for recording the activity from a host , they may introduce additional computational overhead and greatly increase the number of log events generated by osquery . <nl> <nl> - The ` process_events ` and ` socket_events ` tables use the same <nl> - event - based architecture as the [ File Integrity Monitoring <nl> - ( FIM ) ] ( . . / deployment / file - integrity - monitoring . md ) . To read more about <nl> + To read more about <nl> how event - based tables are created and designed , check out the osquery <nl> [ Table Pubsub Framework ] ( . . / development / pubsub - framework . md ) . On all <nl> supported platforms , process events are abstracted into the <nl> To collect process events add a query like : <nl> SELECT * FROM process_events ; <nl> ` ` ` <nl> <nl> - to your query schedule , or to a query pack . <nl> + to your query schedule , or to a query pack . If BPF is being used , change the table name to ` bpf_process_events ` . <nl> <nl> Enabling these auditing features requires additional configuration to <nl> osquery , and may have performance impact . See the OS specific sections <nl> Though some testing of underlying operating system configuration can <nl> be performed via ` osqueryi ` ; ` osqueryi ` and ` osqueryd ` operate <nl> independently and do not communicate . <nl> <nl> - On macOS , you should be able to see events using : <nl> - <nl> - ` ` ` bash <nl> - sudo osqueryi - - disable_audit = false - - disable_events = false <nl> - ` ` ` <nl> + The ` - - verbose ` flag can be really useful when trying to debug a problem . <nl> <nl> # # # Examine configuration flags <nl> <nl> osquery > select * from osquery_events ; <nl> + mmmmmmmmmmmmmmmmmmmmmmmm - + mmmmmmmmmmmmmmm - - + mmmmmmmmmmmm + mmmmmmmmmmmmmmm + mmmmmm - - + mmmmmmmmm - - + mmmmmm - - + <nl> ` ` ` <nl> <nl> - # # Linux process auditing <nl> + # # Linux process auditing using Audit <nl> <nl> - On Linux , osquery uses the Audit system to collect and process audit events from the kernel . It accomplishes this by monitoring the ` execve ( ) ` syscall . ` auditd ` should not be running when using osquery ' s process auditing , as it will conflict with ` osqueryd ` over access to the audit netlink socket . You should also ensure ` auditd ` is not configured to start at boot . <nl> + On Linux , osquery can the Audit system to collect and process events . It accomplishes this by monitoring syscalls such as ` execve ( ) ` and ` execveat ( ) ` . ` auditd ` should not be running when using osquery ' s process auditing , as it will conflict with ` osqueryd ` over access to the audit netlink socket . You should also ensure ` auditd ` is not configured to start at boot . <nl> <nl> The only prerequisite for using osquery ' s auditing functionality on Linux is that you must use a kernel version that contains the Audit functionality . Most kernels over version 2 . 6 have this capability . <nl> <nl> To better understand how this works , let ' s walk through 4 configuration options . <nl> 3 . ` - - audit_persist = true ` but default this is ` true ` and instructs osquery to ' regain ' the audit netlink socket if another process also accesses it . However , you should do your best to ensure there will be no other program running which is attempting to access the audit netlink socket . <nl> 4 . ` - - audit_allow_process_events = true ` this flag indicates that you would like to record process events <nl> <nl> - # # Linux socket auditing <nl> + # # Linux socket auditing using Audit <nl> <nl> Osquery can also be used to record network connections by enabling ` socket_events ` . This table uses the syscalls ` bind ( ) ` and ` connect ( ) ` to gather information about network connections . This table is not automatically enabled when process_events are enabled because it can introduce considerable load on the system . <nl> <nl> A sample socket_event log entry looks like this : <nl> <nl> If you would like to log UNIX domain sockets use the hidden flag : ` - - audit_allow_unix ` . This will put considerable strain on the system as many default actions use domain sockets . You will also need to explicitly select the ` socket ` column from the ` socket_events ` table . <nl> <nl> - # # Troubleshooting Auditing on Linux <nl> + # # Troubleshooting Audit - based process and socket auditing on Linux <nl> <nl> There are a few different methods to ensure you have configured auditing correctly . <nl> <nl> osqueryi - - audit_allow_config = true - - audit_allow_sockets = true - - audit_persist = tr <nl> <nl> If you would like to debug the raw audit events as ` osqueryd ` sees them , use the hidden flag ` - - audit_debug ` . This will print all of the RAW audit lines to osquery ' s ` stdout ` . <nl> <nl> - # # User events <nl> + # # User event auditing with Audit <nl> <nl> On Linux , a companion table called ` user_events ` is included that provides several authentication - based events . If you are enabling process auditing it should be trivial to also include this table . <nl> <nl> + # # Linux process and socket auditing using BPF <nl> + <nl> + When osquery is running on a recent kernel ( > = 4 . 18 ) , the BPF eventing framework can be used . This event publisher needs to monitor for more system calls to reach feature parity with the Audit - based tables . For this reason , enabling BPF will also enable both the ` bpf_process_events ` and ` bpf_socket_events ` tables . <nl> + <nl> + In order to start the publisher and enable the subscribers , the following flags must be passed : ` - - disable_events = false - - enable_bpf_events = true ` . The ` - - verbose ` flag can also be extremely useful when setting up the configuration for the first time , since it emit more debug information when something fails . <nl> + <nl> + The BPF framework will make use of a perf event array and several per - cpu maps in order to receive events and correctly capture strings and buffers . These structures can be configured using the following command line flags : <nl> + - * * bpf_perf_event_array_exp * * : size of the perf event array , as a power of two <nl> + - * * bpf_buffer_storage_size * * : how many slots of 4096 bytes should be available in each memory pool <nl> + <nl> + Memory usage depends on both <nl> + 1 . How many processors are currently online <nl> + 2 . How many processors can be added by hotswapping <nl> + <nl> + The BPF event publisher uses 6 memory pools , grouping system calls in order to evenly distribute memory usage . Not counting the internal maps used to merge sys_enter / sys_exit events ( the size for these maps is rather small ) , memory usage can be easily estimated with the following formula : <nl> + <nl> + ` ` ` <nl> + buffer_storage_bytes = memory_pool_count * ( bpf_buffer_storage_size * 4096 ) * possible_cpu_count <nl> + ` ` ` <nl> + <nl> + ` ` ` <nl> + perf_bytes = ( 2 ^ bpf_perf_event_array_exp ) * online_cpu_count <nl> + ` ` ` <nl> + <nl> + The cpu count numbers can be read from the ` / sys ` folder : <nl> + <nl> + ` ` ` <nl> + possible_cpu_count : / sys / devices / system / cpu / possible <nl> + online_cpu_count : / sys / devices / system / cpu / online <nl> + ` ` ` <nl> + <nl> + VMware Fusion ( and possibly other systems as well ) supports CPU hotswapping , raising the ` possible_cpu_count ` to 128 . This causes a huge increase in memory usage , and it is for this reason that the default settings are rather low . <nl> + <nl> + This problem can be easily fixed by disabling hotswapping . This setting is unfortunately not available through the user interface , so it needs to be changed directly in the . vmx file ( ` vcpu . hotadd = FALSE ` ) . <nl> + <nl> + * * Known issues * * <nl> + <nl> + Containers are not well supported yet ; the internal system state tracker relies on being able to correctly correlate processed together by tracing the fork system call and its variants . When a PID namespace is used , the exit code for the function does not match a valid identifier on the host namespace , causing a cache miss in the process context map used track process information . <nl> + <nl> # # macOS process & socket auditing <nl> <nl> osquery supports OpenBSM audit on macOS platforms . To enable it in osquery , you need to set ` - - disable_audit = false ` <nl>
|
Docs : Add BPF to the process auditing wiki page
|
osquery/osquery
|
e0027adf8acaeaf1429e9302d130bab79bcef7d1
|
2020-10-19T12:10:06Z
|
mmm a / test / cpp / end2end / client_callback_end2end_test . cc <nl> ppp b / test / cpp / end2end / client_callback_end2end_test . cc <nl> class TestScenario { <nl> static std : : ostream & operator < < ( std : : ostream & out , <nl> const TestScenario & scenario ) { <nl> return out < < " TestScenario { callback_server = " <nl> - < < ( scenario . callback_server ? " true " : " false " ) < < " } " ; <nl> + < < ( scenario . callback_server ? " true " : " false " ) < < " , protocol = " <nl> + < < ( scenario . protocol = = Protocol : : INPROC ? " INPROC " : " TCP " ) <nl> + < < " , intercept = " < < ( scenario . use_interceptors ? " true " : " false " ) <nl> + < < " , creds = " < < scenario . credentials_type < < " } " ; <nl> } <nl> <nl> void TestScenario : : Log ( ) const { <nl>
|
Merge pull request from yang - g / callback
|
grpc/grpc
|
8bbfbec4e48e27428431553858e2297057a15239
|
2019-03-26T19:24:41Z
|
mmm a / drivers / python / rethinkdb / _export . py <nl> ppp b / drivers / python / rethinkdb / _export . py <nl> def parse_options ( ) : <nl> # connection errors occur . Don ' t bother setting progress , because this is a <nl> # fairly small operation . <nl> def get_tables ( progress , conn , tables ) : <nl> - dbs = r . db_list ( ) . run ( conn ) <nl> + dbs = r . db_list ( ) . filter ( r . row . ne ( ' rethinkdb ' ) ) . run ( conn ) <nl> res = [ ] <nl> <nl> if len ( tables ) = = 0 : <nl> tables = [ ( db , None ) for db in dbs ] <nl> <nl> for db_table in tables : <nl> + if db_table [ 0 ] = = ' rethinkdb ' : <nl> + raise RuntimeError ( " Error : Cannot export tables from the system database : ' rethinkdb ' " ) <nl> if db_table [ 0 ] not in dbs : <nl> raise RuntimeError ( " Error : Database ' % s ' not found " % db_table [ 0 ] ) <nl> <nl> mmm a / drivers / python / rethinkdb / _import . py <nl> ppp b / drivers / python / rethinkdb / _import . py <nl> def tables_check ( progress , conn , files_info , force ) : <nl> # Ensure that all needed databases exist and tables don ' t <nl> db_list = r . db_list ( ) . run ( conn ) <nl> for db in set ( [ file_info [ " db " ] for file_info in files_info ] ) : <nl> + if db = = " rethinkdb " : <nl> + raise RuntimeError ( " Error : Cannot import tables into the system database : ' rethinkdb ' " ) <nl> if db not in db_list : <nl> r . db_create ( db ) . run ( conn ) <nl> <nl> def import_directory ( options ) : <nl> spawn_import_clients ( options , files_info ) <nl> <nl> def table_check ( progress , conn , db , table , pkey , force ) : <nl> + if db = = " rethinkdb " : <nl> + raise RuntimeError ( " Error : Cannot import a table into the system database : ' rethinkdb ' " ) <nl> + <nl> if db not in r . db_list ( ) . run ( conn ) : <nl> r . db_create ( db ) . run ( conn ) <nl> <nl>
|
changing backup scripts to ignore the ' rethinkdb ' database
|
rethinkdb/rethinkdb
|
9cafaf144fdc2bf2c859fc913670241d08cdc64c
|
2014-11-27T03:06:27Z
|
mmm a / osquery / tables / networking / BUCK <nl> ppp b / osquery / tables / networking / BUCK <nl> osquery_cxx_library ( <nl> WINDOWS , <nl> [ <nl> " windows / arp_cache . cpp " , <nl> + " windows / connectivity . cpp " , <nl> " windows / interfaces . cpp " , <nl> " windows / process_open_sockets . cpp " , <nl> " windows / routes . cpp " , <nl> mmm a / osquery / tables / networking / CMakeLists . txt <nl> ppp b / osquery / tables / networking / CMakeLists . txt <nl> function ( generateOsqueryTablesNetworking ) <nl> etc_services . cpp <nl> listening_ports . cpp <nl> ) <nl> - <nl> + <nl> if ( DEFINED PLATFORM_POSIX ) <nl> list ( APPEND source_files <nl> posix / dns_resolvers . cpp <nl> function ( generateOsqueryTablesNetworking ) <nl> elseif ( DEFINED PLATFORM_WINDOWS ) <nl> list ( APPEND source_files <nl> windows / arp_cache . cpp <nl> + windows / connectivity . cpp <nl> windows / interfaces . cpp <nl> windows / process_open_sockets . cpp <nl> windows / routes . cpp <nl> new file mode 100644 <nl> index 0000000000 . . 417620d36c <nl> mmm / dev / null <nl> ppp b / osquery / tables / networking / windows / connectivity . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) 2014 - present , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed in accordance with the terms specified in <nl> + * the LICENSE file found in the root directory of this source tree . <nl> + * / <nl> + <nl> + # include < string > <nl> + <nl> + # include < netlistmgr . h > <nl> + # include < windows . h > <nl> + <nl> + # include < osquery / core . h > <nl> + # include < osquery / logger . h > <nl> + # include < osquery / tables . h > <nl> + <nl> + namespace osquery { <nl> + namespace tables { <nl> + <nl> + QueryData genConnectivity ( QueryContext & context ) { <nl> + QueryData results ; <nl> + <nl> + INetworkListManager * mgr = nullptr ; <nl> + HRESULT res = CoCreateInstance ( CLSID_NetworkListManager , <nl> + NULL , <nl> + CLSCTX_ALL , <nl> + IID_INetworkListManager , <nl> + reinterpret_cast < void * * > ( & mgr ) ) ; <nl> + <nl> + if ( res ! = S_OK ) { <nl> + TLOG < < " Failed to instantiate INetworkListManager " ; <nl> + return results ; <nl> + } <nl> + <nl> + NLM_CONNECTIVITY connectivity ; <nl> + res = mgr - > GetConnectivity ( & connectivity ) ; <nl> + <nl> + if ( res ! = S_OK ) { <nl> + TLOG < < " GetConnectivity ( ) failed " ; <nl> + mgr - > Release ( ) ; <nl> + return results ; <nl> + } <nl> + <nl> + Row r ; <nl> + r [ " disconnected " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_DISCONNECTED ) ) ; <nl> + r [ " ipv4_no_traffic " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV4_NOTRAFFIC ) ) ; <nl> + r [ " ipv6_no_traffic " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV6_NOTRAFFIC ) ) ; <nl> + r [ " ipv4_subnet " ] = INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV4_SUBNET ) ) ; <nl> + r [ " ipv4_local_network " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV4_LOCALNETWORK ) ) ; <nl> + r [ " ipv4_internet " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV4_INTERNET ) ) ; <nl> + r [ " ipv6_subnet " ] = INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV6_SUBNET ) ) ; <nl> + r [ " ipv6_local_network " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV6_LOCALNETWORK ) ) ; <nl> + r [ " ipv6_internet " ] = <nl> + INTEGER ( bool ( connectivity & NLM_CONNECTIVITY_IPV6_INTERNET ) ) ; <nl> + <nl> + mgr - > Release ( ) ; <nl> + results . push_back ( std : : move ( r ) ) ; <nl> + return results ; <nl> + } <nl> + <nl> + } / / namespace tables <nl> + } / / namespace osquery <nl> mmm a / specs / BUCK <nl> ppp b / specs / BUCK <nl> osquery_gentable_cxx_library ( <nl> " windows / chocolatey_packages . table " , <nl> " windows " , <nl> ) , <nl> + ( <nl> + " windows / connectivity . table " , <nl> + " windows " , <nl> + ) , <nl> ( <nl> " windows / logical_drives . table " , <nl> " windows " , <nl> mmm a / specs / CMakeLists . txt <nl> ppp b / specs / CMakeLists . txt <nl> function ( generateNativeTables ) <nl> " windows / physical_disk_performance . table : windows " <nl> " windows / autoexec . table : windows " <nl> " windows / windows_security_products . table : windows " <nl> + " windows / connectivity . table : windows " <nl> " yara / yara_events . table : linux , macos " <nl> " yara / yara . table : linux , macos , freebsd " <nl> ) <nl> new file mode 100644 <nl> index 0000000000 . . d34e3b0c51 <nl> mmm / dev / null <nl> ppp b / specs / windows / connectivity . table <nl> <nl> + table_name ( " connectivity " ) <nl> + description ( " Provides the overall system ' s network state . " ) <nl> + schema ( [ <nl> + Column ( " disconnected " , INTEGER , " True if the all interfaces are not connected to any network " ) , <nl> + Column ( " ipv4_no_traffic " , INTEGER , " True if any interface is connected via IPv4 , but has seen no traffic " ) , <nl> + Column ( " ipv6_no_traffic " , INTEGER , " True if any interface is connected via IPv6 , but has seen no traffic " ) , <nl> + Column ( " ipv4_subnet " , INTEGER , " True if any interface is connected to the local subnet via IPv4 " ) , <nl> + Column ( " ipv4_local_network " , INTEGER , " True if any interface is connected to a routed network via IPv4 " ) , <nl> + Column ( " ipv4_internet " , INTEGER , " True if any interface is connected to the Internet via IPv4 " ) , <nl> + Column ( " ipv6_subnet " , INTEGER , " True if any interface is connected to the local subnet via IPv6 " ) , <nl> + Column ( " ipv6_local_network " , INTEGER , " True if any interface is connected to a routed network via IPv6 " ) , <nl> + Column ( " ipv6_internet " , INTEGER , " True if any interface is connected to the Internet via IPv6 " ) , <nl> + ] ) <nl> + implementation ( " connectivity @ genConnectivity " ) <nl> + examples ( [ <nl> + " select * from connectivity " , <nl> + " select ipv4_internet from connectivity " , <nl> + ] ) <nl> mmm a / tests / integration / tables / BUCK <nl> ppp b / tests / integration / tables / BUCK <nl> osquery_cxx_test ( <nl> " autoexec . cpp " , <nl> " certificates . cpp " , <nl> " chocolatey_packages . cpp " , <nl> + " connectivity . cpp " , <nl> " cpu_info . cpp " , <nl> " disk_info . cpp " , <nl> " drivers . cpp " , <nl> mmm a / tests / integration / tables / CMakeLists . txt <nl> ppp b / tests / integration / tables / CMakeLists . txt <nl> function ( generateTestsIntegrationTablesTestsTest ) <nl> authenticode . cpp <nl> autoexec . cpp <nl> certificates . cpp <nl> + connectivity . cpp <nl> chocolatey_packages . cpp <nl> cpu_info . cpp <nl> disk_info . cpp <nl> new file mode 100644 <nl> index 0000000000 . . 74a9f48ab9 <nl> mmm / dev / null <nl> ppp b / tests / integration / tables / connectivity . cpp <nl> <nl> + / * * <nl> + * Copyright ( c ) 2014 - present , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed as defined on the LICENSE file found in the <nl> + * root directory of this source tree . <nl> + * / <nl> + <nl> + / / Sanity check integration test for connectivity <nl> + / / Spec file : specs / windows / connectivity . table <nl> + <nl> + # include < osquery / tests / integration / tables / helper . h > <nl> + <nl> + namespace osquery { <nl> + namespace table_tests { <nl> + <nl> + class connectivity : public testing : : Test { <nl> + protected : <nl> + void SetUp ( ) override { <nl> + setUpEnvironment ( ) ; <nl> + } <nl> + } ; <nl> + <nl> + TEST_F ( connectivity , test_sanity ) { <nl> + auto const data = execute_query ( " select * from connectivity " ) ; <nl> + <nl> + ASSERT_EQ ( data . size ( ) , 1ul ) ; <nl> + <nl> + ValidationMap row_map = { <nl> + { " disconnected " , IntType } , <nl> + { " ipv4_no_traffic " , IntType } , <nl> + { " ipv6_no_traffic " , IntType } , <nl> + { " ipv4_subnet " , IntType } , <nl> + { " ipv4_local_network " , IntType } , <nl> + { " ipv4_internet " , IntType } , <nl> + { " ipv6_subnet " , IntType } , <nl> + { " ipv6_local_network " , IntType } , <nl> + { " ipv6_internet " , IntType } , <nl> + } ; <nl> + <nl> + validate_rows ( data , row_map ) ; <nl> + } <nl> + <nl> + } / / namespace table_tests <nl> + } / / namespace osquery <nl>
|
( Windows ) New table : connectivity ( )
|
osquery/osquery
|
06dd05cd33538fa04797fe500cf5c29b090b9283
|
2019-10-04T19:18:15Z
|
mmm a / src / clustering / administration / main / serve . cc <nl> ppp b / src / clustering / administration / main / serve . cc <nl> <nl> # include " clustering / administration / persist . hpp " <nl> # include " clustering / administration / proc_stats . hpp " <nl> # include " clustering / administration / reactor_driver . hpp " <nl> + # include " clustering / administration / sys_stats . hpp " <nl> # include " extproc / pool . hpp " <nl> # include " memcached / tcp_conn . hpp " <nl> # include " mock / dummy_protocol . hpp " <nl> try { <nl> <nl> proc_stats_collector_t proc_stats_collector ( & proc_stats_collection ) ; <nl> <nl> + perfmon_collection_t sys_stats_collection ; <nl> + perfmon_membership_t sys_stats_membership ( & get_global_perfmon_collection ( ) , & sys_stats_collection , " sys " ) ; <nl> + <nl> + sys_stats_collector_t sys_stats_collector ( filepath , & sys_stats_collection ) ; <nl> + <nl> scoped_ptr_t < initial_joiner_t > initial_joiner ; <nl> if ( ! joins . empty ( ) ) { <nl> initial_joiner . init ( new initial_joiner_t ( & connectivity_cluster , & connectivity_cluster_run , joins ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . b4c3ccf1a24 <nl> mmm / dev / null <nl> ppp b / src / clustering / administration / sys_stats . cc <nl> <nl> + # include " clustering / administration / sys_stats . hpp " <nl> + # include " errors . hpp " <nl> + <nl> + # include < sys / statvfs . h > <nl> + <nl> + struct disk_stat_t { <nl> + uint64_t disk_space_free ; <nl> + uint64_t disk_space_used ; <nl> + uint64_t disk_space_total ; <nl> + <nl> + disk_stat_t ( const std : : string & filepath ) { <nl> + int res ; <nl> + / / get disk space data using statvfs <nl> + struct statvfs fsdata ; <nl> + <nl> + if ( filepath = = " " ) { <nl> + res = statvfs ( " . " , & fsdata ) ; <nl> + } else { <nl> + res = statvfs ( filepath . c_str ( ) , & fsdata ) ; <nl> + } <nl> + if ( res < 0 ) { <nl> + throw std : : runtime_error ( strprintf ( " Failed to statvfs with filepath ' % s ' : % s " <nl> + " ( errno = % d ) " , filepath . c_str ( ) , strerror ( errno ) , errno ) ) ; <nl> + } <nl> + <nl> + disk_space_total = fsdata . f_bsize * fsdata . f_blocks / KILOBYTE ; <nl> + disk_space_free = fsdata . f_bsize * fsdata . f_bfree / KILOBYTE ; <nl> + disk_space_used = disk_space_total - disk_space_free ; <nl> + } <nl> + } ; <nl> + <nl> + sys_stats_collector_t : : sys_stats_collector_t ( const std : : string & path , perfmon_collection_t * stats ) : <nl> + instantaneous_stats_collector ( path ) , <nl> + stats_membership ( stats , & instantaneous_stats_collector , NULL ) <nl> + { <nl> + } <nl> + <nl> + sys_stats_collector_t : : instantaneous_stats_collector_t : : instantaneous_stats_collector_t ( const std : : string & path ) : <nl> + filepath ( path ) <nl> + { <nl> + } <nl> + <nl> + void * sys_stats_collector_t : : instantaneous_stats_collector_t : : begin_stats ( ) { <nl> + return NULL ; <nl> + } <nl> + <nl> + void sys_stats_collector_t : : instantaneous_stats_collector_t : : visit_stats ( void * ) { <nl> + } <nl> + <nl> + perfmon_result_t * sys_stats_collector_t : : instantaneous_stats_collector_t : : end_stats ( void * ) { <nl> + perfmon_result_t * result ; <nl> + perfmon_result_t : : alloc_map_result ( & result ) ; <nl> + <nl> + disk_stat_t disk_stat = disk_stat_t ( filepath ) ; <nl> + result - > insert ( " global_disk_space_free " , new perfmon_result_t ( strprintf ( " % lu " , disk_stat . disk_space_free ) ) ) ; <nl> + result - > insert ( " global_disk_space_used " , new perfmon_result_t ( strprintf ( " % lu " , disk_stat . disk_space_used ) ) ) ; <nl> + result - > insert ( " global_disk_space_total " , new perfmon_result_t ( strprintf ( " % lu " , disk_stat . disk_space_total ) ) ) ; <nl> + <nl> + return result ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 86bb334cd32 <nl> mmm / dev / null <nl> ppp b / src / clustering / administration / sys_stats . hpp <nl> <nl> + # ifndef CLUSTERING_ADMINISTRATION_SYS_STATS_HPP_ <nl> + # define CLUSTERING_ADMINISTRATION_SYS_STATS_HPP_ <nl> + <nl> + # include " perfmon / perfmon . hpp " <nl> + <nl> + # include < string > <nl> + <nl> + / * Class to get system statistics , such as disk space usage . <nl> + Similar to proc_stats_collector_t , but not based on / proc . * / <nl> + <nl> + class sys_stats_collector_t : public home_thread_mixin_t { <nl> + public : <nl> + explicit sys_stats_collector_t ( const std : : string & path , perfmon_collection_t * stats ) ; <nl> + <nl> + private : <nl> + / / similar to proc_stats_collector_t : : instantaneous_stats_collector_t <nl> + class instantaneous_stats_collector_t : public perfmon_t { <nl> + public : <nl> + instantaneous_stats_collector_t ( const std : : string & path ) ; <nl> + void * begin_stats ( ) ; <nl> + void visit_stats ( void * ) ; <nl> + perfmon_result_t * end_stats ( void * ) ; <nl> + private : <nl> + std : : string filepath ; <nl> + } ; <nl> + <nl> + instantaneous_stats_collector_t instantaneous_stats_collector ; <nl> + perfmon_membership_t stats_membership ; <nl> + } ; <nl> + <nl> + # endif / * CLUSTERING_ADMINISTRATION_SYS_STATS_HPP_ * / <nl>
|
Merge commit ' 8971c82 ' into joe_rdbp_parallelism
|
rethinkdb/rethinkdb
|
7dd1269131bb080deee83f059dd6554c5a2ffdac
|
2012-08-21T18:08:04Z
|
mmm a / 3rdParty / V8 / v8 <nl> ppp b / 3rdParty / V8 / v8 <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit d12a9da7103c17bf14fe2fb42749a2ab1e5dd33f <nl> + Subproject commit f2d3d92e0055c21dd3f8e8473d7921a5cce5a36f <nl>
|
Reverted mistaken V8 change .
|
arangodb/arangodb
|
290f6c182db5e8928ffccecbe093adc4e5747495
|
2017-02-24T21:07:42Z
|
mmm a / doc / manual - src / en / Makefile . am <nl> ppp b / doc / manual - src / en / Makefile . am <nl> <nl> SPHINXOPTS = <nl> SPHINXBUILD = sphinx - build <nl> PAPER = <nl> - BUILDDIR = . . / . . / manual / en <nl> + BUILDDIR = $ ( top_srcdir ) / doc / manual / en <nl> <nl> # Internal variables . <nl> PAPEROPT_a4 = - D latex_paper_size = a4 <nl> HTMLINSTALLDIR = $ ( DESTDIR ) $ ( docdir ) / manual / en <nl> install - data - local : $ ( HTML ) <nl> mkdir - p $ ( HTMLINSTALLDIR ) & & \ <nl> cp - r $ ( BUILDDIR ) / html $ ( HTMLINSTALLDIR ) & & \ <nl> + chmod - R u + w $ ( HTMLINSTALLDIR ) & & \ <nl> rm - f $ ( HTMLINSTALLDIR ) / html / . buildinfo <nl> <nl> uninstall - local : <nl> mmm a / doc / manual - src / ru / Makefile . am <nl> ppp b / doc / manual - src / ru / Makefile . am <nl> <nl> SPHINXOPTS = <nl> SPHINXBUILD = sphinx - build <nl> PAPER = <nl> - BUILDDIR = . . / . . / manual / ru <nl> + BUILDDIR = $ ( top_srcdir ) / doc / manual / ru <nl> <nl> # Internal variables . <nl> PAPEROPT_a4 = - D latex_paper_size = a4 <nl> HTMLINSTALLDIR = $ ( DESTDIR ) $ ( docdir ) / manual / ru <nl> install - data - local : $ ( HTML ) <nl> mkdir - p $ ( HTMLINSTALLDIR ) & & \ <nl> cp - r $ ( BUILDDIR ) / html $ ( HTMLINSTALLDIR ) & & \ <nl> + chmod - R u + w $ ( HTMLINSTALLDIR ) & & \ <nl> rm - f $ ( HTMLINSTALLDIR ) / html / . buildinfo <nl> <nl> uninstall - local : <nl>
|
Fixed ` make distcheck ` error
|
aria2/aria2
|
b4f3f414644312e6578437675d4483414174b22f
|
2012-05-12T10:19:38Z
|
mmm a / html / admin / js / views / collectionInfoView . js <nl> ppp b / html / admin / js / views / collectionInfoView . js <nl> var collectionInfoView = Backbone . View . extend ( { <nl> values : collValues <nl> } ] ; <nl> } , <nl> + roundNumber : function ( number , n ) { <nl> + var faktor ; <nl> + faktor = Math . pow ( 10 , n ) ; <nl> + var returnVal = ( Math . round ( number * faktor ) / faktor ) ; <nl> + return returnVal ; <nl> + } , <nl> appendFigures : function ( ) { <nl> var cssClass = ' modal - text ' ; <nl> <nl> var collectionInfoView = Backbone . View . extend ( { <nl> ' < tr > ' + <nl> ' < th class = " ' + cssClass + ' " > Datafiles < / th > ' + <nl> ' < th class = " ' + cssClass + ' " > ' + this . data . figures . datafiles . count + ' < / th > ' + <nl> - ' < th class = " ' + cssClass + ' " > ' + this . data . figures . datafiles . fileSize / 1024 / 1024 + ' < / th > ' + <nl> + ' < th class = " ' + cssClass + ' " > ' + <nl> + this . roundNumber ( this . data . figures . datafiles . fileSize / 1024 / 1024 , 2 ) + <nl> + ' < / th > ' + <nl> ' < / tr > ' + <nl> ' < tr > ' + <nl> ' < th class = " ' + cssClass + ' " > Journals < / th > ' + <nl> ' < th class = " ' + cssClass + ' " > ' + this . data . figures . journals . count + ' < / th > ' + <nl> - ' < th class = " ' + cssClass + ' " > ' + this . data . figures . journals . fileSize / 1024 / 1024 + ' < / th > ' + <nl> + ' < th class = " ' + cssClass + ' " > ' + <nl> + this . roundNumber ( this . data . figures . journals . fileSize / 1024 / 1024 , 2 ) + <nl> + ' < / th > ' + <nl> ' < / tr > ' + <nl> ' < / table > ' + <nl> <nl> var collectionInfoView = Backbone . View . extend ( { <nl> ' < tr class = " figuresHeader " > ' + <nl> ' < th > Type < / th > ' + <nl> ' < th > Count < / th > ' + <nl> - ' < th > Size < / th > ' + <nl> + ' < th > Size ( MB ) < / th > ' + <nl> ' < th > Deletion < / th > ' + <nl> ' < / tr > ' + <nl> ' < tr > ' + <nl> ' < th class = " ' + cssClass + ' " > Alive < / th > ' + <nl> ' < th class = " ' + cssClass + ' " > ' + this . data . figures . alive . count + ' < / th > ' + <nl> - ' < th class = " ' + cssClass + ' " > ' + this . data . figures . alive . size + ' < / th > ' + <nl> + ' < th class = " ' + cssClass + ' " > ' + <nl> + this . roundNumber ( this . data . figures . alive . size / 1024 / 1024 , 2 ) + <nl> + ' < / th > ' + <nl> ' < th class = " ' + cssClass + ' " > - < / th > ' + <nl> ' < / tr > ' + <nl> ' < tr > ' + <nl> ' < th class = " ' + cssClass + ' " > Dead < / th > ' + <nl> ' < th class = " ' + cssClass + ' " > ' + this . data . figures . dead . count + ' < / th > ' + <nl> - ' < th class = " ' + cssClass + ' " > ' + this . data . figures . dead . size + ' < / th > ' + <nl> + ' < th class = " ' + cssClass + ' " > ' + <nl> + this . roundNumber ( this . data . figures . dead . size / 1024 / 1024 , 2 ) + <nl> + ' < / th > ' + <nl> ' < th class = " ' + cssClass + ' " > ' + this . data . figures . dead . deletion + ' < / th > ' + <nl> ' < / tr > ' + <nl> ' < / table > ' <nl> var collectionInfoView = Backbone . View . extend ( { <nl> $ ( ' # show - collection - sync ' ) . text ( ' true ' ) ; <nl> } <nl> var calculatedSize = data . journalSize / 1024 / 1024 ; <nl> - $ ( ' # show - collection - size ' ) . text ( calculatedSize ) ; <nl> + $ ( ' # show - collection - size ' ) . text ( this . roundNumber ( calculatedSize , 2 ) ) ; <nl> $ ( ' # show - collection - rev ' ) . text ( this . revision . revision ) ; <nl> <nl> this . appendIndex ( ) ; <nl> mmm a / html / admin / js / views / queryView . js <nl> ppp b / html / admin / js / views / queryView . js <nl> var queryView = Backbone . View . extend ( { <nl> } , <nl> <nl> events : { <nl> - ' click # submitQueryIcon ' : ' submitQuery ' , <nl> - ' click # submitQueryButton ' : ' submitQuery ' , <nl> - ' click # commentText ' : ' commentText ' , <nl> - ' click # undoText ' : ' undoText ' , <nl> - ' click # redoText ' : ' redoText ' , <nl> - ' click # smallOutput ' : ' smallOutput ' , <nl> - ' click # bigOutput ' : ' bigOutput ' , <nl> - ' click # clearOutput ' : ' clearOutput ' , <nl> - ' click # addAQL ' : ' addAQL ' , <nl> - ' click # editAQL ' : ' editAQL ' , <nl> - ' click # save - new - query ' : ' saveAQL ' , <nl> - ' click # save - edit - query ' : ' saveAQL ' , <nl> - ' click # delete - edit - query ' : ' showDeleteField ' , <nl> - ' click # confirmDeleteQuery ' : ' deleteAQL ' , <nl> - ' click # abortDeleteQuery ' : ' hideDeleteField ' , <nl> - " keydown # new - query - name " : " listenKey " , <nl> + ' click # submitQueryIcon ' : ' submitQuery ' , <nl> + ' click # submitQueryButton ' : ' submitQuery ' , <nl> + ' click # commentText ' : ' commentText ' , <nl> + ' click # undoText ' : ' undoText ' , <nl> + ' click # redoText ' : ' redoText ' , <nl> + ' click # smallOutput ' : ' smallOutput ' , <nl> + ' click # bigOutput ' : ' bigOutput ' , <nl> + ' click # clearOutput ' : ' clearOutput ' , <nl> + ' click # addAQL ' : ' addAQL ' , <nl> + ' click # editAQL ' : ' editAQL ' , <nl> + ' click # save - new - query ' : ' saveAQL ' , <nl> + ' click # save - edit - query ' : ' saveAQL ' , <nl> + ' click # delete - edit - query ' : ' showDeleteField ' , <nl> + ' click # confirmDeleteQuery ' : ' deleteAQL ' , <nl> + ' click # abortDeleteQuery ' : ' hideDeleteField ' , <nl> + " keydown # new - query - name " : " listenKey " , <nl> ' click # queryModalSelect option ' : " updateEditSelect " , <nl> - ' click # querySelect option ' : ' importSelected ' <nl> + ' click # querySelect option ' : ' importSelected ' <nl> } , <nl> listenKey : function ( e ) { <nl> if ( e . keyCode = = = 13 ) { <nl> var queryView = Backbone . View . extend ( { <nl> updateEditSelect : function ( ) { <nl> var value = this . getCustomQueryValueByName ( $ ( ' # queryModalSelect ' ) . val ( ) ) ; <nl> $ ( ' # edit - aql - textarea ' ) . val ( value ) ; <nl> + $ ( ' # edit - aql - textarea ' ) . focus ( ) ; <nl> } , <nl> getCustomQueryValueByName : function ( qName ) { <nl> var returnVal ; <nl> var queryView = Backbone . View . extend ( { <nl> } ) ; <nl> } , <nl> renderSelectboxes : function ( modal ) { <nl> + this . sortQueries ( ) ; <nl> var selector = ' ' ; <nl> if ( modal = = = true ) { <nl> selector = ' # queryModalSelect ' ; <nl> var queryView = Backbone . View . extend ( { <nl> cursorRange . end . column = cursorRange . end . column + 2 ; <nl> editor . getSelection ( ) . setSelectionRange ( cursorRange , false ) ; <nl> } , <nl> + sortQueries : function ( ) { <nl> + this . queries = _ . sortBy ( this . queries , ' name ' ) ; <nl> + this . customQueries = _ . sortBy ( this . customQueries , ' name ' ) ; <nl> + } , <nl> submitQuery : function ( ) { <nl> var self = this ; <nl> var editor = ace . edit ( " aqlEditor " ) ; <nl>
|
added queryfocus + round option
|
arangodb/arangodb
|
309e66d2f436e0e8b6a01f1e9f7784b5f5f256be
|
2013-07-18T11:24:52Z
|
mmm a / tests / cubegeom . c <nl> ppp b / tests / cubegeom . c <nl> int main ( int argc , char * argv [ ] ) <nl> " void main ( void ) \ n " <nl> " { \ n " <nl> " gl_Position = ftransform ( ) ; \ n " <nl> - " gl_TexCoord [ 0 ] . xy = gl_MultiTexCoord0 . xy / 100 + texgenscroll . xy ; \ n " / / added / 100 here <nl> - " gl_TexCoord [ 1 ] . xy = gl_MultiTexCoord1 . xy / 100 * 3 . 051851e - 05 ; \ n " <nl> + " gl_TexCoord [ 0 ] . xy = gl_MultiTexCoord0 . xy / 100 . 0 + texgenscroll . xy ; \ n " / / added / 100 here <nl> + " gl_TexCoord [ 1 ] . xy = gl_MultiTexCoord1 . xy / 100 . 0 * 3 . 051851e - 05 ; \ n " <nl> " } \ n " ; <nl> const char * fragmentShader = " uniform vec4 colorparams ; \ n " <nl> " uniform sampler2D diffusemap , lightmap ; \ n " <nl>
|
fix cubegeom shader compilation error
|
emscripten-core/emscripten
|
25893baacf19a15a944ad57721a07b04380d733b
|
2012-04-24T18:44:48Z
|
mmm a / include / swift / Parse / Parser . h <nl> ppp b / include / swift / Parse / Parser . h <nl> tokenizeWithTrivia ( const LangOptions & LangOpts , <nl> const SourceManager & SM , <nl> unsigned BufferID , <nl> unsigned Offset = 0 , <nl> - unsigned EndOffset = 0 ) ; <nl> + unsigned EndOffset = 0 , <nl> + DiagnosticEngine * Diags = nullptr ) ; <nl> } / / end namespace swift <nl> <nl> # endif <nl> mmm a / include / swift / Subsystems . h <nl> ppp b / include / swift / Subsystems . h <nl> namespace swift { <nl> std : : vector < Token > tokenize ( const LangOptions & LangOpts , <nl> const SourceManager & SM , unsigned BufferID , <nl> unsigned Offset = 0 , unsigned EndOffset = 0 , <nl> + DiagnosticEngine * Diags = nullptr , <nl> bool KeepComments = true , <nl> bool TokenizeInterpolatedString = true , <nl> ArrayRef < Token > SplitTokens = ArrayRef < Token > ( ) ) ; <nl> mmm a / lib / Parse / Parser . cpp <nl> ppp b / lib / Parse / Parser . cpp <nl> namespace swift { <nl> template < typename DF > <nl> void tokenize ( const LangOptions & LangOpts , const SourceManager & SM , <nl> unsigned BufferID , unsigned Offset , unsigned EndOffset , <nl> + DiagnosticEngine * Diags , <nl> CommentRetentionMode RetainComments , <nl> TriviaRetentionMode TriviaRetention , <nl> bool TokenizeInterpolatedString , ArrayRef < Token > SplitTokens , <nl> void tokenize ( const LangOptions & LangOpts , const SourceManager & SM , <nl> if ( Offset = = 0 & & EndOffset = = 0 ) <nl> EndOffset = SM . getRangeForBuffer ( BufferID ) . getByteLength ( ) ; <nl> <nl> - Lexer L ( LangOpts , SM , BufferID , / * Diags = * / nullptr , / * InSILMode = * / false , <nl> + Lexer L ( LangOpts , SM , BufferID , Diags , / * InSILMode = * / false , <nl> RetainComments , TriviaRetention , Offset , EndOffset ) ; <nl> <nl> auto TokComp = [ & ] ( const Token & A , const Token & B ) { <nl> static void getStringPartTokens ( const Token & Tok , const LangOptions & LangOpts , <nl> <nl> std : : vector < Token > NewTokens = swift : : tokenize ( LangOpts , SM , BufID , <nl> Offset , EndOffset , <nl> + / * Diags = * / nullptr , <nl> / * KeepComments = * / true ) ; <nl> Toks . insert ( Toks . end ( ) , NewTokens . begin ( ) , NewTokens . end ( ) ) ; <nl> <nl> static void getStringPartTokens ( const Token & Tok , const LangOptions & LangOpts , <nl> std : : vector < Token > swift : : tokenize ( const LangOptions & LangOpts , <nl> const SourceManager & SM , unsigned BufferID , <nl> unsigned Offset , unsigned EndOffset , <nl> + DiagnosticEngine * Diags , <nl> bool KeepComments , <nl> bool TokenizeInterpolatedString , <nl> ArrayRef < Token > SplitTokens ) { <nl> std : : vector < Token > Tokens ; <nl> <nl> tokenize ( LangOpts , SM , BufferID , Offset , EndOffset , <nl> + Diags , <nl> KeepComments ? CommentRetentionMode : : ReturnAsTokens <nl> : CommentRetentionMode : : AttachToNextToken , <nl> TriviaRetentionMode : : WithoutTrivia , TokenizeInterpolatedString , <nl> std : : vector < Token > swift : : tokenize ( const LangOptions & LangOpts , <nl> std : : vector < std : : pair < RC < syntax : : RawSyntax > , syntax : : AbsolutePosition > > <nl> swift : : tokenizeWithTrivia ( const LangOptions & LangOpts , const SourceManager & SM , <nl> unsigned BufferID , unsigned Offset , <nl> - unsigned EndOffset ) { <nl> + unsigned EndOffset , <nl> + DiagnosticEngine * Diags ) { <nl> std : : vector < std : : pair < RC < syntax : : RawSyntax > , syntax : : AbsolutePosition > > <nl> Tokens ; <nl> syntax : : AbsolutePosition RunningPos ; <nl> <nl> tokenize ( <nl> LangOpts , SM , BufferID , Offset , EndOffset , <nl> + Diags , <nl> CommentRetentionMode : : AttachToNextToken , TriviaRetentionMode : : WithTrivia , <nl> / * TokenizeInterpolatedString = * / false , <nl> / * SplitTokens = * / ArrayRef < Token > ( ) , <nl> new file mode 100644 <nl> index 000000000000 . . dfd1577300bb <nl> mmm / dev / null <nl> ppp b / test / Syntax / lexer_invalid_nul . swift <nl> <nl> + / / RUN : cat % s | tr ' \ 132 ' ' \ 0 ' > % t <nl> + / / RUN : % swift - syntax - test - input - source - filename % t - dump - full - tokens 2 > & 1 > / dev / null | % FileCheck % t <nl> + <nl> + / / CHECK : 5 : 18 : warning : nul character embedded in middle of file <nl> + let a = 3 / / nul ( Z ) <nl> \ No newline at end of file <nl> mmm a / tools / swift - syntax - test / swift - syntax - test . cpp <nl> ppp b / tools / swift - syntax - test / swift - syntax - test . cpp <nl> namespace { <nl> int getTokensFromFile ( unsigned BufferID , <nl> LangOptions & LangOpts , <nl> SourceManager & SourceMgr , <nl> - DiagnosticEngine & Diags , <nl> + swift : : DiagnosticEngine & Diags , <nl> std : : vector < std : : pair < RC < syntax : : RawSyntax > , <nl> syntax : : AbsolutePosition > > & Tokens ) { <nl> - Tokens = tokenizeWithTrivia ( LangOpts , SourceMgr , BufferID ) ; <nl> - return Diags . hadAnyError ( ) ? EXIT_FAILURE : EXIT_SUCCESS ; <nl> + Tokens = tokenizeWithTrivia ( LangOpts , SourceMgr , BufferID , <nl> + / * Offset = * / 0 , / * EndOffset = * / 0 , <nl> + & Diags ) ; <nl> + return EXIT_SUCCESS ; <nl> } <nl> <nl> <nl> int doFullLexRoundTrip ( const StringRef InputFilename ) { <nl> TokAndPos . first - > print ( llvm : : outs ( ) , { } ) ; <nl> } <nl> <nl> - return Diags . hadAnyError ( ) ? EXIT_FAILURE : EXIT_SUCCESS ; <nl> + return EXIT_SUCCESS ; <nl> } <nl> <nl> int doDumpRawTokenSyntax ( const StringRef InputFilename ) { <nl> int doDumpRawTokenSyntax ( const StringRef InputFilename ) { <nl> llvm : : outs ( ) < < " \ n " ; <nl> } <nl> <nl> - return Diags . hadAnyError ( ) ? EXIT_FAILURE : EXIT_SUCCESS ; <nl> + return EXIT_SUCCESS ; <nl> } <nl> <nl> int doFullParseRoundTrip ( const char * MainExecutablePath , <nl> mmm a / unittests / Parse / LexerTests . cpp <nl> ppp b / unittests / Parse / LexerTests . cpp <nl> class LexerTest : public : : testing : : Test { <nl> if ( KeepEOF ) <nl> Toks = tokenizeAndKeepEOF ( BufID ) ; <nl> else <nl> - Toks = tokenize ( LangOpts , SourceMgr , BufID , 0 , 0 , KeepComments ) ; <nl> + Toks = tokenize ( LangOpts , SourceMgr , BufID , 0 , 0 , / * Diags = * / nullptr , KeepComments ) ; <nl> EXPECT_EQ ( ExpectedTokens . size ( ) , Toks . size ( ) ) ; <nl> for ( unsigned i = 0 , e = ExpectedTokens . size ( ) ; i ! = e ; + + i ) { <nl> EXPECT_EQ ( ExpectedTokens [ i ] , Toks [ i ] . getKind ( ) ) < < " i = " < < i ; <nl> TEST_F ( LexerTest , TokenizePlaceholder ) { <nl> TEST_F ( LexerTest , NoPlaceholder ) { <nl> auto checkTok = [ & ] ( StringRef Source ) { <nl> unsigned BufID = SourceMgr . addMemBufferCopy ( Source ) ; <nl> - std : : vector < Token > Toks = tokenize ( LangOpts , SourceMgr , BufID , 0 , 0 , false ) ; <nl> + std : : vector < Token > Toks = tokenize ( LangOpts , SourceMgr , BufID , 0 , 0 , / * Diags = * / nullptr , false ) ; <nl> ASSERT_FALSE ( Toks . empty ( ) ) ; <nl> EXPECT_NE ( tok : : identifier , Toks [ 0 ] . getKind ( ) ) ; <nl> } ; <nl> mmm a / unittests / Parse / TokenizerTests . cpp <nl> ppp b / unittests / Parse / TokenizerTests . cpp <nl> class TokenizerTest : public : : testing : : Test { <nl> BufID , <nl> / * Offset = * / 0 , <nl> / * EndOffset = * / 0 , <nl> + / * Diags = * / nullptr , <nl> / * KeepComments = * / true , <nl> / * TokenizeInterpolatedString = * / true , <nl> SplitTokens ) ; <nl>
|
Merge remote - tracking branch ' origin / master ' into master - llvm - swift5 - transition
|
apple/swift
|
149a29cb4d54434ab71409461585d2b59ee1ab28
|
2018-03-03T23:57:51Z
|
mmm a / test / IRGen / autorelease_optimized_aarch64 . sil <nl> ppp b / test / IRGen / autorelease_optimized_aarch64 . sil <nl> <nl> / / RUN : % swift - gnone - O - disable - legacy - type - info - target arm64 - apple - ios7 - emit - assembly % s - o - | % FileCheck % s <nl> <nl> / / REQUIRES : CODEGENERATOR = AArch64 <nl> + / / REQUIRES : rdar49791522 <nl> <nl> / / rdar : / / 17999904 <nl> <nl> mmm a / test / IRGen / autorelease_optimized_armv7 . sil <nl> ppp b / test / IRGen / autorelease_optimized_armv7 . sil <nl> <nl> / / RUN : % swift - gnone - O - target armv7 - apple - ios7 - disable - legacy - type - info - emit - assembly % s - o - | % FileCheck % s <nl> <nl> / / REQUIRES : CODEGENERATOR = ARM <nl> + / / REQUIRES : rdar49791522 <nl> <nl> / / rdar : / / 17999904 <nl> <nl>
|
[ test ] Disable failing ARM autorelease optimizations tests
|
apple/swift
|
b851b61d173222456e5ea030928eb75960df106d
|
2019-04-12T17:34:04Z
|
mmm a / xbmc / cores / AudioEngine / Engines / SoftAE / SoftAE . cpp <nl> ppp b / xbmc / cores / AudioEngine / Engines / SoftAE / SoftAE . cpp <nl> bool CSoftAE : : Suspend ( ) <nl> / / This is the only place m_realSuspend gets set true . <nl> / / If you find another one - please call for help . <nl> / / First thing when rewriting : kill this flag and make it generic again . <nl> + m_saveSuspend . Reset ( ) ; <nl> m_realSuspend = true ; <nl> + / / wait until we are looping in ProcessSuspend ( ) <nl> + m_saveSuspend . Wait ( ) ; <nl> m_sink - > Drain ( ) ; <nl> m_sink - > Deinitialize ( ) ; <nl> delete m_sink ; <nl> m_sink = NULL ; <nl> + / / signal anybody , that the sink is closed now <nl> + / / this should help us not to run into deadlocks <nl> + if ( m_closeSink ) <nl> + m_closeEvent . Set ( ) ; <nl> } <nl> / / The device list is now empty and must be reenumerated afterwards . <nl> m_sinkInfoList . clear ( ) ; <nl> void CSoftAE : : Run ( ) <nl> if ( m_reOpen | | restart | | ! m_sink ) <nl> { <nl> CLog : : Log ( LOGDEBUG , " CSoftAE : : Run - Sink restart flagged " ) ; <nl> + / / ProcessSuspending ( ) cannot guarantee that we get our sink back softresumed <nl> + if ( m_sink & & m_softSuspend ) <nl> + { <nl> + m_sink - > SoftResume ( ) ; <nl> + m_softSuspend = false ; <nl> + CLog : : Log ( LOGDEBUG , " CSoftAE : : Run - Soft resumed the sink outside " ) ; <nl> + } <nl> InternalOpenSink ( ) ; <nl> m_isSuspended = false ; / / exit Suspend state <nl> } <nl> inline void CSoftAE : : ProcessSuspend ( ) <nl> } <nl> sinkLock . Leave ( ) ; <nl> } <nl> - <nl> - / / make sure that a outer thread does not have to wait forever <nl> - if ( m_closeSink ) <nl> - { <nl> - InternalCloseSink ( ) ; <nl> - } <nl> + / / Signal that the realSuspend can go on now . <nl> + / / Idea : Outer thread calls Suspend ( ) - but <nl> + / / because of AddPackets does not care about locks , we must make <nl> + / / sure , that our school bus ( AE : : Run ) is currently driving through <nl> + / / some gas station , before we move away the sink . <nl> + if ( m_realSuspend ) <nl> + m_saveSuspend . Set ( ) ; <nl> <nl> / * idle for platform - defined time * / <nl> m_wake . WaitMSec ( SOFTAE_IDLE_WAIT_MSEC ) ; <nl> <nl> - / * check if we need to resume for stream or sound * / <nl> + / * check if we need to resume for stream or sound or somebody wants to open us <nl> + * the suspend checks are only there to : <nl> + * a ) not run out of softSuspend directly when we are sleeping <nl> + * b ) nail ( ! ) the thread during real Suspend into this method <nl> + * Note : It is not enough to check the streams buffer , cause it might not be filled yet <nl> + * We have to check after ProcessSuspending ( ) if the sink is still in softsleep and resume it <nl> + * / <nl> if ( ! m_realSuspend & & ! m_isSuspended & & ( ! m_playingStreams . empty ( ) | | ! m_playing_sounds . empty ( ) ) ) <nl> { <nl> - m_reOpen = ! m_sink - > SoftResume ( ) ; / / sink returns false if it requires reinit <nl> + m_reOpen = m_reOpen | | ! m_sink - > SoftResume ( ) ; / / sink returns false if it requires reinit <nl> sinkIsSuspended = false ; / / sink processing data <nl> - m_softSuspend = false ; / / break suspend loop <nl> + m_softSuspend = false ; / / break suspend loop ( under some conditions ) <nl> CLog : : Log ( LOGDEBUG , " Resumed the Sink " ) ; <nl> break ; <nl> } <nl> mmm a / xbmc / cores / AudioEngine / Engines / SoftAE / SoftAE . h <nl> ppp b / xbmc / cores / AudioEngine / Engines / SoftAE / SoftAE . h <nl> class CSoftAE : public IThreadedAE <nl> CEvent m_reOpenEvent ; <nl> CEvent m_wake ; <nl> CEvent m_closeEvent ; <nl> + CEvent m_saveSuspend ; <nl> <nl> CCriticalSection m_runningLock ; / * released when the thread exits * / <nl> CCriticalSection m_streamLock ; / * m_streams lock * / <nl>
|
AE : Wait until we are in a safe state before further deinitializing the sink
|
xbmc/xbmc
|
fa9b89e73da173d4621c7b49e0b9464a268731ca
|
2013-02-09T16:17:15Z
|
mmm a / appveyor . yml <nl> ppp b / appveyor . yml <nl> build_script : <nl> # - cmd : ' echo use_shared_libs : true > > cppan . yml ' <nl> # - cppan - - build . . <nl> - cmake . . - G " % generator % " - DBUILD_TRAINING_TOOLS = Off - DAPPVEYOR = 1 <nl> - - cmake - - build . - - config Release # > bin \ Release \ log . txt 2 > & 1 <nl> + - cmake - - build . - - config Release > bin \ Release \ log . txt 2 > & 1 <nl> <nl> artifacts : <nl> - path : build \ bin \ Release <nl>
|
Update appveyor . yml
|
tesseract-ocr/tesseract
|
7c27088b6498efbfe570c02485472bf60b6bed1d
|
2017-03-14T15:25:32Z
|
new file mode 100644 <nl> index 000000000000 . . 335b1d400a4d <nl> mmm / dev / null <nl> ppp b / jstests / replsets / server_status_metrics . js <nl> <nl> + / * * <nl> + * Test replication metrics <nl> + * / <nl> + function testSecondaryMetrics ( secondary , opCount ) { <nl> + var ss = secondary . getDB ( " test " ) . serverStatus ( ) <nl> + printjson ( ss . metrics ) <nl> + <nl> + assert ( ss . metrics . repl . network . readersCreated > 0 , " no ( oplog ) readers created " ) <nl> + assert ( ss . metrics . repl . network . getmores . num > 0 , " no getmores " ) <nl> + assert ( ss . metrics . repl . network . getmores . totalMillis > 0 , " no getmores time " ) <nl> + assert ( ss . metrics . repl . network . ops = = opCount , " wrong number of ops retrieved " ) <nl> + assert ( ss . metrics . repl . network . bytes > 0 , " zero or missing network bytes " ) <nl> + <nl> + assert ( ss . metrics . repl . buffer . count > = 0 , " buffer count missing " ) <nl> + assert ( ss . metrics . repl . buffer . sizeBytes > = 0 , " size ( bytes ) ] missing " ) <nl> + assert ( ss . metrics . repl . buffer . maxSizeBytes > = 0 , " maxSize ( bytes ) missing " ) <nl> + <nl> + assert ( ss . metrics . repl . preload . docs . num > = 0 , " preload . docs num missing " ) <nl> + assert ( ss . metrics . repl . preload . docs . totalMillis > = 0 , " preload . docs time missing " ) <nl> + assert ( ss . metrics . repl . preload . docs . num > = 0 , " preload . indexes num missing " ) <nl> + assert ( ss . metrics . repl . preload . indexes . totalMillis > = 0 , " preload . indexes time missing " ) <nl> + <nl> + assert ( ss . metrics . repl . apply . batches . num > 0 , " no batches " ) <nl> + assert ( ss . metrics . repl . apply . batches . totalMillis > 0 , " no batch time " ) <nl> + assert ( ss . metrics . repl . apply . ops = = opCount , " wrong number of applied ops " ) <nl> + } <nl> + <nl> + function testPrimaryMetrics ( primary , opCount ) { <nl> + var ss = primary . getDB ( " test " ) . serverStatus ( ) <nl> + printjson ( ss . metrics ) <nl> + <nl> + assert ( ss . metrics . repl . oplog . insert . num = = = opCount + 1 , " wrong oplog insert count " ) <nl> + assert ( ss . metrics . repl . oplog . insert . totalMillis > 0 , " no oplog inserts time " ) <nl> + assert ( ss . metrics . repl . oplog . insertBytes > 0 , " no oplog inserted bytes " ) <nl> + } <nl> + <nl> + var rt = new ReplSetTest ( { name : " server_status_metrics " , nodes : 2 , oplogSize : 100 } ) ; <nl> + rt . startSet ( ) <nl> + rt . initiate ( ) <nl> + <nl> + rt . awaitSecondaryNodes ( ) ; <nl> + <nl> + var secondary = rt . getSecondary ( ) ; <nl> + var primary = rt . getPrimary ( ) ; <nl> + var testDB = primary . getDB ( " test " ) ; <nl> + <nl> + / / add test docs <nl> + for ( x = 0 ; x < 10000 ; x + + ) { testDB . a . insert ( { } ) } <nl> + <nl> + testPrimaryMetrics ( primary , 10000 ) ; <nl> + testDB . getLastError ( 2 ) ; <nl> + <nl> + testSecondaryMetrics ( secondary , 10000 ) ; <nl> + <nl> + testDB . a . update ( { } , { $ set : { d : new Date ( ) } } , true , true ) <nl> + testDB . getLastError ( 2 ) ; <nl> + <nl> + testSecondaryMetrics ( secondary , 20000 ) ; <nl> + <nl> + rt . stopSet ( ) ; <nl> mmm a / src / mongo / base / counter . h <nl> ppp b / src / mongo / base / counter . h <nl> <nl> # include " mongo / platform / cstdint . h " <nl> <nl> namespace mongo { <nl> - <nl> + / * * <nl> + * A 64bit ( atomic ) counter . <nl> + * <nl> + * The constructor allows setting the start value , and increment ( [ int ] ) is used to change it . <nl> + * <nl> + * The value can be returned using get ( ) or the ( long long ) function operator . <nl> + * / <nl> class Counter64 { <nl> public : <nl> - <nl> + <nl> + / * * Atomically increment ( or decrement via negative value ) . * / <nl> void increment ( uint64_t n = 1 ) { _counter . addAndFetch ( n ) ; } <nl> + <nl> + / * * Atomically set value . * / <nl> + void set ( uint64_t n ) { _counter . store ( n ) ; } <nl> <nl> + / * * Return the current value * / <nl> long long get ( ) const { return _counter . load ( ) ; } <nl> <nl> operator long long ( ) const { return get ( ) ; } <nl> mmm a / src / mongo / db / namespacestring . h <nl> ppp b / src / mongo / db / namespacestring . h <nl> namespace mongo { <nl> string toString ( ) const { return ns ( ) ; } <nl> <nl> / * * <nl> - * @ return true if ns is ' normal ' . $ used for collections holding index data , which do not contain BSON objects in their records . <nl> - * special case for the local . oplog . $ main ns - - naming it as such was a mistake . <nl> + * @ return true if ns is ' normal ' . A " $ " is used for namespaces holding index data , <nl> + * which do not contain BSON objects in their records . ( " oplog . $ main " is the exception ) <nl> * / <nl> static bool normal ( const char * ns ) { <nl> const char * p = strchr ( ns , ' $ ' ) ; <nl> if ( p = = 0 ) <nl> return true ; <nl> - return strcmp ( ns , " local . oplog . $ main " ) = = 0 ; <nl> + return oplog ( ns ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ return true if the ns is an oplog one , otherwise false . <nl> + * / <nl> + static bool oplog ( const char * ns ) { <nl> + return StringData ( ns ) = = StringData ( " local . oplog . rs " ) | | StringData ( ns ) = = StringData ( " local . oplog . $ main " ) ; <nl> } <nl> <nl> static bool special ( const char * ns ) { <nl> mmm a / src / mongo / db / oplogreader . h <nl> ppp b / src / mongo / db / oplogreader . h <nl> namespace mongo { <nl> return cursor - > moreInCurrentBatch ( ) ; <nl> } <nl> <nl> + int currentBatchMessageSize ( ) { <nl> + if ( NULL = = cursor - > getMessage ( ) ) <nl> + return 0 ; <nl> + return cursor - > getMessage ( ) - > size ( ) ; <nl> + } <nl> + <nl> / * old mongod ' s can ' t do the await flag . . . * / <nl> bool awaitCapable ( ) { <nl> return cursor - > hasResultFlag ( ResultFlag_AwaitCapable ) ; <nl> mmm a / src / mongo / db / pdfile . cpp <nl> ppp b / src / mongo / db / pdfile . cpp <nl> _ disallow system * manipulations from the database . <nl> # include " mongo / util / hashtab . h " <nl> # include " mongo / util / mmap . h " <nl> # include " mongo / util / processinfo . h " <nl> + # include " mongo / db / stats / timer_stats . h " <nl> + # include " mongo / db / stats / counters . h " <nl> <nl> namespace mongo { <nl> <nl> BOOST_STATIC_ASSERT ( sizeof ( Extent ) - 4 = = 48 + 128 ) ; <nl> BOOST_STATIC_ASSERT ( sizeof ( DataFileHeader ) - 4 = = 8192 ) ; <nl> <nl> + / / The oplog entries inserted <nl> + static TimerStats oplogInsertStats ; <nl> + static ServerStatusMetricField < TimerStats > displayInsertedOplogEntries ( <nl> + " repl . oplog . insert " , <nl> + & oplogInsertStats ) ; <nl> + static Counter64 oplogInsertBytesStats ; <nl> + static ServerStatusMetricField < Counter64 > displayInsertedOplogEntryBytes ( <nl> + " repl . oplog . insertBytes " , <nl> + & oplogInsertBytesStats ) ; <nl> + <nl> bool isValidNS ( const StringData & ns ) { <nl> / / TODO : should check for invalid characters <nl> <nl> namespace mongo { <nl> < < " but " < < ns < < " is not capped " , <nl> d - > isCapped ( ) ) ; <nl> <nl> + / / record timing on oplog inserts <nl> + boost : : optional < TimerHolder > insertTimer ; <nl> + / / skip non - oplog collections <nl> + if ( NamespaceString : : oplog ( ns ) ) { <nl> + insertTimer = boost : : in_place ( & oplogInsertStats ) ; <nl> + oplogInsertBytesStats . increment ( len ) ; / / record len of inserted records for oplog <nl> + } <nl> + <nl> int lenWHdr = len + Record : : HeaderSize ; <nl> DiskLoc loc = d - > alloc ( ns , lenWHdr ) ; <nl> verify ( ! loc . isNull ( ) ) ; <nl> mmm a / src / mongo / db / prefetch . cpp <nl> ppp b / src / mongo / db / prefetch . cpp <nl> <nl> # include " mongo / db / jsobj . h " <nl> # include " mongo / db / namespace_details . h " <nl> # include " mongo / db / repl / rs . h " <nl> + # include " mongo / db / stats / timer_stats . h " <nl> + # include " mongo / db / commands / server_status . h " <nl> <nl> namespace mongo { <nl> <nl> / / todo / idea : the prefetcher , when it fetches _id , on an upsert , will see if the record exists . if it does not , <nl> / / at write time , we can just do an insert , which will be faster . <nl> <nl> + / / The count ( of batches ) and time spent fetching pages before application <nl> + / / - - meaning depends on the prefetch behavior : all , _id index , none , etc . ) <nl> + static TimerStats prefetchIndexStats ; <nl> + static ServerStatusMetricField < TimerStats > displayPrefetchIndexPages ( <nl> + " repl . preload . indexes " , <nl> + & prefetchIndexStats ) ; <nl> + static TimerStats prefetchDocStats ; <nl> + static ServerStatusMetricField < TimerStats > displayPrefetchDocPages ( <nl> + " repl . preload . docs " , <nl> + & prefetchDocStats ) ; <nl> + <nl> / / prefetch for an oplog operation <nl> void prefetchPagesForReplicatedOp ( const BSONObj & op ) { <nl> const char * opField ; <nl> namespace mongo { <nl> return ; <nl> case ReplSetImpl : : PREFETCH_ID_ONLY : <nl> { <nl> + TimerHolder timer ( & prefetchIndexStats ) ; <nl> / / on the update op case , the call to prefetchRecordPages will touch the _id index . <nl> / / thus perhaps this option isn ' t very useful ? <nl> int indexNo = nsd - > findIdIndex ( ) ; <nl> namespace mongo { <nl> / / in the process of being built <nl> int indexCount = nsd - > getTotalIndexCount ( ) ; <nl> for ( int indexNo = 0 ; indexNo < indexCount ; indexNo + + ) { <nl> + TimerHolder timer ( & prefetchIndexStats ) ; <nl> / / This will page in all index pages for the given object . <nl> try { <nl> fetchIndexInserters ( / * out * / unusedKeys , <nl> namespace mongo { <nl> void prefetchRecordPages ( const char * ns , const BSONObj & obj ) { <nl> BSONElement _id ; <nl> if ( obj . getObjectID ( _id ) ) { <nl> + TimerHolder timer ( & prefetchDocStats ) ; <nl> BSONObjBuilder builder ; <nl> builder . append ( _id ) ; <nl> BSONObj result ; <nl> mmm a / src / mongo / db / repl . cpp <nl> ppp b / src / mongo / db / repl . cpp <nl> <nl> # include " mongo / db / instance . h " <nl> # include " mongo / db / server_parameters . h " <nl> # include " mongo / db / queryutil . h " <nl> + # include " mongo / base / counter . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> } <nl> } replicationInfoServerStatus ; <nl> <nl> - <nl> class CmdIsMaster : public Command { <nl> public : <nl> virtual bool requiresAuth ( ) { return false ; } <nl> namespace mongo { <nl> return true ; <nl> } <nl> <nl> + / / number of readers created ; <nl> + / / this happens when the source source changes , a reconfig / network - error or the cursor dies <nl> + static Counter64 readersCreatedStats ; <nl> + static ServerStatusMetricField < Counter64 > displayReadersCreated ( <nl> + " repl . network . readersCreated " , <nl> + & readersCreatedStats ) ; <nl> + <nl> OplogReader : : OplogReader ( bool doHandshake ) : <nl> _doHandshake ( doHandshake ) { <nl> <nl> namespace mongo { <nl> <nl> / * TODO : slaveOk maybe shouldn ' t use ? * / <nl> _tailingQueryOptions | = QueryOption_AwaitData ; <nl> + <nl> + readersCreatedStats . increment ( ) ; <nl> } <nl> <nl> bool OplogReader : : commonConnect ( const string & hostName ) { <nl> mmm a / src / mongo / db / repl / bgsync . cpp <nl> ppp b / src / mongo / db / repl / bgsync . cpp <nl> <nl> # include " mongo / db / repl / bgsync . h " <nl> # include " mongo / db / repl / rs_sync . h " <nl> # include " mongo / util / fail_point_service . h " <nl> + # include " mongo / base / counter . h " <nl> + # include " mongo / db / stats / timer_stats . h " <nl> <nl> namespace mongo { <nl> namespace replset { <nl> namespace replset { <nl> BackgroundSync * BackgroundSync : : s_instance = 0 ; <nl> boost : : mutex BackgroundSync : : s_mutex ; <nl> <nl> + / / The number and time spent reading batches off the network <nl> + static TimerStats getmoreReplStats ; <nl> + static ServerStatusMetricField < TimerStats > displayBatchesRecieved ( <nl> + " repl . network . getmores " , <nl> + & getmoreReplStats ) ; <nl> + / / The oplog entries read via the oplog reader <nl> + static Counter64 opsReadStats ; <nl> + static ServerStatusMetricField < Counter64 > displayOpsRead ( " repl . network . ops " , <nl> + & opsReadStats ) ; <nl> + / / The bytes read via the oplog reader <nl> + static Counter64 networkByteStats ; <nl> + static ServerStatusMetricField < Counter64 > displayBytesRead ( " repl . network . bytes " , <nl> + & networkByteStats ) ; <nl> + <nl> + / / The count of items in the buffer <nl> + static Counter64 bufferCountGauge ; <nl> + static ServerStatusMetricField < Counter64 > displayBufferCount ( " repl . buffer . count " , <nl> + & bufferCountGauge ) ; <nl> + / / The size ( bytes ) of items in the buffer <nl> + static Counter64 bufferSizeGauge ; <nl> + static ServerStatusMetricField < Counter64 > displayBufferSize ( " repl . buffer . sizeBytes " , <nl> + & bufferSizeGauge ) ; <nl> + / / The max size ( bytes ) of the buffer <nl> + static int bufferMaxSizeGauge = 256 * 1024 * 1024 ; <nl> + static ServerStatusMetricField < int > displayBufferMaxSize ( " repl . buffer . maxSizeBytes " , <nl> + & bufferMaxSizeGauge ) ; <nl> + <nl> + <nl> BackgroundSyncInterface : : ~ BackgroundSyncInterface ( ) { } <nl> <nl> size_t getSize ( const BSONObj & o ) { <nl> return o . objsize ( ) ; <nl> } <nl> <nl> - BackgroundSync : : BackgroundSync ( ) : _buffer ( 256 * 1024 * 1024 , & getSize ) , <nl> + BackgroundSync : : BackgroundSync ( ) : _buffer ( bufferMaxSizeGauge , & getSize ) , <nl> _lastOpTimeFetched ( 0 , 0 ) , <nl> _lastH ( 0 ) , <nl> _pause ( true ) , <nl> namespace replset { <nl> _consumedOpTime ( 0 , 0 ) { <nl> } <nl> <nl> - BackgroundSync : : QueueCounter : : QueueCounter ( ) : waitTime ( 0 ) , numElems ( 0 ) { <nl> - } <nl> - <nl> BackgroundSync * BackgroundSync : : get ( ) { <nl> boost : : unique_lock < boost : : mutex > lock ( s_mutex ) ; <nl> if ( s_instance = = NULL & & ! inShutdown ( ) ) { <nl> namespace replset { <nl> return s_instance ; <nl> } <nl> <nl> - BSONObj BackgroundSync : : getCounters ( ) { <nl> - BSONObjBuilder counters ; <nl> - { <nl> - boost : : unique_lock < boost : : mutex > lock ( _mutex ) ; <nl> - counters . appendIntOrLL ( " waitTimeMs " , _queueCounter . waitTime ) ; <nl> - counters . append ( " numElems " , _queueCounter . numElems ) ; <nl> - } <nl> - / / _buffer is protected by its own mutex <nl> - counters . appendNumber ( " numBytes " , _buffer . size ( ) ) ; <nl> - return counters . obj ( ) ; <nl> - } <nl> - <nl> void BackgroundSync : : shutdown ( ) { <nl> notify ( ) ; <nl> } <nl> namespace replset { <nl> <nl> while ( ! inShutdown ( ) ) { <nl> while ( ! inShutdown ( ) ) { <nl> + <nl> if ( ! r . moreInCurrentBatch ( ) ) { <nl> if ( theReplSet - > gotForceSync ( ) ) { <nl> return ; <nl> namespace replset { <nl> if ( shouldChangeSyncTarget ( ) ) { <nl> return ; <nl> } <nl> + / / record time for each getmore <nl> + { <nl> + TimerHolder batchTimer ( & getmoreReplStats ) ; <nl> + r . more ( ) ; <nl> + } <nl> + / / increment <nl> + networkByteStats . increment ( r . currentBatchMessageSize ( ) ) ; <nl> <nl> - r . more ( ) ; <nl> } <nl> <nl> if ( ! r . more ( ) ) <nl> break ; <nl> <nl> BSONObj o = r . nextSafe ( ) . getOwned ( ) ; <nl> + opsReadStats . increment ( ) ; <nl> <nl> { <nl> boost : : unique_lock < boost : : mutex > lock ( _mutex ) ; <nl> _appliedBuffer = false ; <nl> } <nl> <nl> - Timer timer ; <nl> - / / the blocking queue will wait ( forever ) until there ' s room for us to push <nl> OCCASIONALLY { <nl> LOG ( 2 ) < < " bgsync buffer has " < < _buffer . size ( ) < < " bytes " < < rsLog ; <nl> } <nl> + / / the blocking queue will wait ( forever ) until there ' s room for us to push <nl> _buffer . push ( o ) ; <nl> + bufferCountGauge . increment ( ) ; <nl> + bufferSizeGauge . increment ( getSize ( o ) ) ; <nl> <nl> { <nl> boost : : unique_lock < boost : : mutex > lock ( _mutex ) ; <nl> - <nl> - / / update counters <nl> - _queueCounter . waitTime + = timer . millis ( ) ; <nl> - _queueCounter . numElems + + ; <nl> _lastH = o [ " h " ] . numberLong ( ) ; <nl> _lastOpTimeFetched = o [ " ts " ] . _opTime ( ) ; <nl> } <nl> namespace replset { <nl> } <nl> <nl> void BackgroundSync : : consume ( ) { <nl> - / / this is just to get the op off the queue , it ' s been peeked at <nl> + / / this is just to get the op off the queue , it ' s been peeked at <nl> / / and queued for application already <nl> - _buffer . blockingPop ( ) ; <nl> - <nl> - { <nl> - boost : : unique_lock < boost : : mutex > lock ( _mutex ) ; <nl> - _queueCounter . numElems - - ; <nl> - } <nl> + BSONObj op = _buffer . blockingPop ( ) ; <nl> + bufferCountGauge . increment ( - 1 ) ; <nl> + bufferSizeGauge . increment ( - getSize ( op ) ) ; <nl> } <nl> <nl> bool BackgroundSync : : isStale ( OplogReader & r , BSONObj & remoteOldestOp ) { <nl> namespace replset { <nl> _currentSyncTarget = NULL ; <nl> _lastOpTimeFetched = OpTime ( 0 , 0 ) ; <nl> _lastH = 0 ; <nl> - _queueCounter . numElems = 0 ; <nl> _condvar . notify_all ( ) ; <nl> } <nl> <nl> namespace replset { <nl> _assumingPrimary = false ; <nl> } <nl> <nl> - class ReplNetworkQueueSSS : public ServerStatusSection { <nl> - public : <nl> - ReplNetworkQueueSSS ( ) : ServerStatusSection ( " replNetworkQueue " ) { } <nl> - virtual bool includeByDefault ( ) const { return true ; } <nl> - <nl> - BSONObj generateSection ( const BSONElement & configElement ) const { <nl> - if ( ! theReplSet ) <nl> - return BSONObj ( ) ; <nl> - <nl> - return replset : : BackgroundSync : : get ( ) - > getCounters ( ) ; <nl> - } <nl> - <nl> - } replNetworkQueueSSS ; <nl> - <nl> } / / namespace replset <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / repl / bgsync . h <nl> ppp b / src / mongo / db / repl / bgsync . h <nl> namespace replset { <nl> OplogReader _oplogMarker ; / / not locked , only used by notifier thread <nl> OpTime _consumedOpTime ; / / not locked , only used by notifier thread <nl> <nl> - struct QueueCounter { <nl> - QueueCounter ( ) ; <nl> - unsigned long long waitTime ; <nl> - unsigned int numElems ; <nl> - } _queueCounter ; <nl> - <nl> BackgroundSync ( ) ; <nl> BackgroundSync ( const BackgroundSync & s ) ; <nl> BackgroundSync operator = ( const BackgroundSync & s ) ; <nl> <nl> - <nl> / / Production thread <nl> void _producerThread ( ) ; <nl> / / Adds elements to the list , up to maxSize . <nl> mmm a / src / mongo / db / repl / rs_sync . cpp <nl> ppp b / src / mongo / db / repl / rs_sync . cpp <nl> <nl> # include " mongo / db / repl / rs . h " <nl> # include " mongo / db / repl / rs_sync . h " <nl> # include " mongo / util / fail_point_service . h " <nl> + # include " mongo / db / commands / server_status . h " <nl> + # include " mongo / db / stats / timer_stats . h " <nl> + # include " mongo / base / counter . h " <nl> + <nl> + <nl> <nl> namespace mongo { <nl> <nl> namespace replset { <nl> <nl> MONGO_FP_DECLARE ( rsSyncApplyStop ) ; <nl> <nl> + / / Number and time of each ApplyOps worker pool round <nl> + static TimerStats applyBatchStats ; <nl> + static ServerStatusMetricField < TimerStats > displayOpBatchesApplied ( <nl> + " repl . apply . batches " , <nl> + & applyBatchStats ) ; <nl> + / / The oplog entries applied <nl> + static Counter64 opsAppliedStats ; <nl> + static ServerStatusMetricField < Counter64 > displayOpsApplied ( " repl . apply . ops " , <nl> + & opsAppliedStats ) ; <nl> + <nl> + <nl> SyncTail : : SyncTail ( BackgroundSyncInterface * q ) : <nl> Sync ( " " ) , oplogVersion ( 0 ) , _networkQueue ( q ) <nl> { } <nl> namespace replset { <nl> / / For non - initial - sync , we convert updates to upserts <nl> / / to suppress errors when replaying oplog entries . <nl> bool ok = ! applyOperation_inlock ( op , true , convertUpdateToUpsert ) ; <nl> + opsAppliedStats . increment ( ) ; <nl> getDur ( ) . commitIfNeeded ( ) ; <nl> <nl> return ok ; <nl> namespace replset { <nl> void SyncTail : : applyOps ( const std : : vector < std : : vector < BSONObj > > & writerVectors , <nl> MultiSyncApplyFunc applyFunc ) { <nl> ThreadPool & writerPool = theReplSet - > getWriterPool ( ) ; <nl> + TimerHolder timer ( & applyBatchStats ) ; <nl> for ( std : : vector < std : : vector < BSONObj > > : : const_iterator it = writerVectors . begin ( ) ; <nl> it ! = writerVectors . end ( ) ; <nl> + + it ) { <nl> mmm a / src / mongo / db / stats / timer_stats . h <nl> ppp b / src / mongo / db / stats / timer_stats . h <nl> namespace mongo { <nl> * / <nl> class TimerHolder { <nl> public : <nl> + / * * Destructor will record to TimerStats * / <nl> TimerHolder ( TimerStats * stats ) ; <nl> + / * * Will record stats if recordMillis hasn ' t ( based on _recorded ) * / <nl> ~ TimerHolder ( ) ; <nl> <nl> / * * <nl> namespace mongo { <nl> * / <nl> int millis ( ) const { return _t . millis ( ) ; } <nl> <nl> - <nl> / * * <nl> * records the time in the TimerStats and marks that we ' ve <nl> * already recorded so the destructor doesn ' t <nl> mmm a / src / mongo / util / queue . h <nl> ppp b / src / mongo / util / queue . h <nl> namespace mongo { <nl> } <nl> <nl> / * * <nl> - * Simple blocking queue with optional max size . <nl> - * A custom sizing function can optionally be given . By default , size is calculated as <nl> - * _queue . size ( ) . <nl> + * Simple blocking queue with optional max size ( by count or custom sizing function ) . <nl> + * A custom sizing function can optionally be given . By default the getSize function <nl> + * returns 1 for each item , resulting in size equaling the number of items queued . <nl> * / <nl> template < typename T > <nl> class BlockingQueue : boost : : noncopyable { <nl> namespace mongo { <nl> return _queue . empty ( ) ; <nl> } <nl> <nl> + / * * <nl> + * The size as measured by the size function . Default to counting each item <nl> + * / <nl> size_t size ( ) const { <nl> scoped_lock l ( _lock ) ; <nl> return _currentSize ; <nl> } <nl> <nl> + / * * <nl> + * The max size for this queue <nl> + * / <nl> + size_t maxSize ( ) const { <nl> + return _maxSize ; <nl> + } <nl> + <nl> + / * * <nl> + * The number / count of items in the queue ( _queue . size ( ) ) <nl> + * / <nl> + int count ( ) const { <nl> + scoped_lock l ( _lock ) ; <nl> + return _queue . size ( ) ; <nl> + } <nl> + <nl> void clear ( ) { <nl> scoped_lock l ( _lock ) ; <nl> _queue = std : : queue < T > ( ) ; <nl>
|
SERVER - 8203 : replication metrics
|
mongodb/mongo
|
84df92e10e2458cd4ff3751c620535aa9f9a92b6
|
2013-01-29T19:36:24Z
|
mmm a / src / compiler / osr . cc <nl> ppp b / src / compiler / osr . cc <nl> void SetTypeForOsrValue ( Node * osr_value , Node * loop , <nl> } <nl> } <nl> <nl> - OsrGuardType guard_type = OsrGuardType : : kAny ; <nl> - / / Find the phi that uses the OsrGuard node and get the type from <nl> - / / there . Skip the search if the OsrGuard does not have value use <nl> - / / ( i . e . , if there is other use beyond the effect use ) . <nl> - if ( OsrGuardTypeOf ( osr_guard - > op ( ) ) = = OsrGuardType : : kUninitialized & & <nl> - osr_guard - > UseCount ( ) > 1 ) { <nl> - Type * type = nullptr ; <nl> - for ( Node * use : osr_guard - > uses ( ) ) { <nl> - if ( use - > opcode ( ) = = IrOpcode : : kPhi ) { <nl> - if ( NodeProperties : : GetControlInput ( use ) ! = loop ) continue ; <nl> - CHECK_NULL ( type ) ; <nl> - type = NodeProperties : : GetType ( use ) ; <nl> - } <nl> - } <nl> - CHECK_NOT_NULL ( type ) ; <nl> - <nl> - if ( type - > Is ( Type : : SignedSmall ( ) ) ) { <nl> - guard_type = OsrGuardType : : kSignedSmall ; <nl> - } <nl> - } <nl> - <nl> - NodeProperties : : ChangeOp ( osr_guard , common - > OsrGuard ( guard_type ) ) ; <nl> + NodeProperties : : ChangeOp ( osr_guard , common - > OsrGuard ( OsrGuardType : : kAny ) ) ; <nl> } <nl> <nl> } / / namespace <nl> mmm a / src / compiler / pipeline . cc <nl> ppp b / src / compiler / pipeline . cc <nl> struct TyperPhase { <nl> } <nl> } ; <nl> <nl> - struct OsrTyperPhase { <nl> - static const char * phase_name ( ) { return " osr typer " ; } <nl> - <nl> - void Run ( PipelineData * data , Zone * temp_zone ) { <nl> - NodeVector roots ( temp_zone ) ; <nl> - data - > jsgraph ( ) - > GetCachedNodes ( & roots ) ; <nl> - / / Dummy induction variable optimizer : at the moment , we do not try <nl> - / / to compute loop variable bounds on OSR . <nl> - LoopVariableOptimizer induction_vars ( data - > jsgraph ( ) - > graph ( ) , <nl> - data - > common ( ) , temp_zone ) ; <nl> - Typer typer ( data - > isolate ( ) , Typer : : kNoFlags , data - > graph ( ) ) ; <nl> - typer . Run ( roots , & induction_vars ) ; <nl> - } <nl> - } ; <nl> - <nl> struct UntyperPhase { <nl> static const char * phase_name ( ) { return " untyper " ; } <nl> <nl> bool PipelineImpl : : CreateGraph ( ) { <nl> <nl> / / Perform OSR deconstruction . <nl> if ( info ( ) - > is_osr ( ) ) { <nl> - Run < OsrTyperPhase > ( ) ; <nl> - <nl> Run < OsrDeconstructionPhase > ( ) ; <nl> <nl> Run < UntyperPhase > ( ) ; <nl>
|
[ turbofan ] removed osr typer
|
v8/v8
|
251c7313a08b3119e38ce7f83ff2c1878be10fb4
|
2016-12-05T13:04:34Z
|
mmm a / include / rapidjson / schema . h <nl> ppp b / include / rapidjson / schema . h <nl> class Schema { <nl> template < typename ValueType > <nl> RegexType * CreatePattern ( const ValueType & value ) { <nl> if ( value . IsString ( ) ) <nl> + RegexType * r = static_cast < RegexType * > ( allocator_ - > Malloc ( sizeof ( RegexType ) ) ) ; <nl> try { <nl> - return new ( allocator_ - > Malloc ( sizeof ( RegexType ) ) ) RegexType ( value . GetString ( ) , std : : size_t ( value . GetStringLength ( ) ) , std : : regex_constants : : ECMAScript ) ; <nl> + return new ( r ) RegexType ( value . GetString ( ) , std : : size_t ( value . GetStringLength ( ) ) , std : : regex_constants : : ECMAScript ) ; <nl> } <nl> catch ( const std : : regex_error & ) { <nl> + AllocatorType : : Free ( r ) ; <nl> } <nl> return 0 ; <nl> } <nl>
|
Merge pull request from ylavic / schema_regex_leak
|
Tencent/rapidjson
|
30d92a6399b6077006d976b1dc05ee13305bf1c4
|
2018-12-03T01:53:39Z
|
mmm a / trunk / auto / depends . sh <nl> ppp b / trunk / auto / depends . sh <nl> function Centos_prepare ( ) <nl> return 0 <nl> } <nl> Centos_prepare ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then echo " CentOS prepare failed , ret = $ ret " ; exit $ ret ; fi <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + # for OSX , auto install tools by brew <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + OS_IS_OSX = NO <nl> + function OSX_prepare ( ) <nl> + { <nl> + SYS_NAME = ` uname - s ` <nl> + if [ $ SYS_NAME ! = Darwin ] ; then <nl> + echo " This is not Darwin OSX " <nl> + return 0 ; <nl> + fi <nl> + <nl> + OS_IS_OSX = YES <nl> + echo " OSX detected , install tools if needed " <nl> + <nl> + gcc - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install gcc " <nl> + require_sudoer " sudo brew install gcc " <nl> + sudo brew install gcc ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install gcc success " <nl> + fi <nl> + <nl> + g + + - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install gcc - c + + " <nl> + require_sudoer " sudo brew install gcc - c + + " <nl> + sudo brew install gcc - c + + ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install gcc - c + + success " <nl> + fi <nl> + <nl> + make - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install make " <nl> + require_sudoer " sudo brew install make " <nl> + sudo brew install make ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install make success " <nl> + fi <nl> + <nl> + patch - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install patch " <nl> + require_sudoer " sudo brew install patch " <nl> + sudo brew install patch ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install patch success " <nl> + fi <nl> + <nl> + if [ $ SRS_FFMPEG_TOOL = YES ] ; then <nl> + automake - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install automake " <nl> + require_sudoer " sudo brew install automake " <nl> + sudo brew install automake ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install automake success " <nl> + fi <nl> + <nl> + autoconf - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install autoconf " <nl> + require_sudoer " sudo brew install autoconf " <nl> + sudo brew install autoconf ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install autoconf success " <nl> + fi <nl> + <nl> + libtool - - help > / dev / null 2 > & 1 ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then <nl> + echo " install libtool " <nl> + require_sudoer " sudo brew install libtool " <nl> + sudo brew install libtool ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install libtool success " <nl> + fi <nl> + <nl> + if [ [ ! - f / usr / include / pcre . h ] ] ; then <nl> + echo " install pcre - devel " <nl> + require_sudoer " sudo brew install pcre - devel " <nl> + sudo brew install pcre - devel ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install pcre - devel success " <nl> + fi <nl> + <nl> + if [ [ ! - f / usr / include / zlib . h ] ] ; then <nl> + echo " install zlib - devel " <nl> + require_sudoer " sudo brew install zlib - devel " <nl> + sudo brew install zlib - devel ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then return $ ret ; fi <nl> + echo " install zlib - devel success " <nl> + fi <nl> + fi <nl> + <nl> + echo " OSX install tools success " <nl> + return 0 <nl> + } <nl> + OSX_prepare ; ret = $ ? ; if [ [ 0 - ne $ ret ] ] ; then echo " OSX prepare failed , ret = $ ret " ; exit $ ret ; fi <nl> + <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # st - 1 . 9 <nl> if [ $ SRS_EMBEDED_CPU = YES ] ; then <nl> ) <nl> fi <nl> else <nl> - # arm not specified , if exists flag , need to rebuild for no - arm platform . <nl> - if [ [ ! - f $ { SRS_OBJS } / _flag . st . arm . tmp & & - f $ { SRS_OBJS } / st / libst . a & & - f $ { SRS_OBJS } / st / libst . so ] ] ; then <nl> - echo " st - 1 . 9t is ok . " ; <nl> + if [ $ OS_IS_OSX = YES ] ; then <nl> + if [ [ ! - f $ { SRS_OBJS } / _flag . st . arm . tmp & & - f $ { SRS_OBJS } / st / libst . a & & - f $ { SRS_OBJS } / st / libst . so ] ] ; then <nl> + echo " st - 1 . 9t is ok . " ; <nl> + else <nl> + echo " build st - 1 . 9t " ; <nl> + ( <nl> + rm - rf $ { SRS_OBJS } / st - 1 . 9 & & cd $ { SRS_OBJS } & & <nl> + unzip - q . . / 3rdparty / st - 1 . 9 . zip & & cd st - 1 . 9 & & <nl> + echo " we alaways patch the st , for we may build srs under arm directly " & & <nl> + echo " the 1 . st . arm . patch is ok for x86 because it ' s only modify code under macro linux arm " & & <nl> + patch - p0 < . . / . . / 3rdparty / patches / 1 . st . arm . patch & & <nl> + make darwin - debug & & <nl> + cd . . & & rm - rf st & & ln - sf st - 1 . 9 / obj st & & <nl> + cd . . & & rm - f $ { SRS_OBJS } / _flag . st . arm . tmp <nl> + ) <nl> + fi <nl> else <nl> - echo " build st - 1 . 9t " ; <nl> - ( <nl> - rm - rf $ { SRS_OBJS } / st - 1 . 9 & & cd $ { SRS_OBJS } & & <nl> - unzip - q . . / 3rdparty / st - 1 . 9 . zip & & cd st - 1 . 9 & & <nl> - echo " we alaways patch the st , for we may build srs under arm directly " & & <nl> - echo " the 1 . st . arm . patch is ok for x86 because it ' s only modify code under macro linux arm " & & <nl> - patch - p0 < . . / . . / 3rdparty / patches / 1 . st . arm . patch & & <nl> - make EXTRA_CFLAGS = " - DMD_HAVE_EPOLL " linux - debug & & <nl> - cd . . & & rm - rf st & & ln - sf st - 1 . 9 / obj st & & <nl> - cd . . & & rm - f $ { SRS_OBJS } / _flag . st . arm . tmp <nl> - ) <nl> + # arm not specified , if exists flag , need to rebuild for no - arm platform . <nl> + if [ [ ! - f $ { SRS_OBJS } / _flag . st . arm . tmp & & - f $ { SRS_OBJS } / st / libst . a & & - f $ { SRS_OBJS } / st / libst . so ] ] ; then <nl> + echo " st - 1 . 9t is ok . " ; <nl> + else <nl> + echo " build st - 1 . 9t " ; <nl> + ( <nl> + rm - rf $ { SRS_OBJS } / st - 1 . 9 & & cd $ { SRS_OBJS } & & <nl> + unzip - q . . / 3rdparty / st - 1 . 9 . zip & & cd st - 1 . 9 & & <nl> + echo " we alaways patch the st , for we may build srs under arm directly " & & <nl> + echo " the 1 . st . arm . patch is ok for x86 because it ' s only modify code under macro linux arm " & & <nl> + patch - p0 < . . / . . / 3rdparty / patches / 1 . st . arm . patch & & <nl> + make EXTRA_CFLAGS = " - DMD_HAVE_EPOLL " linux - debug & & <nl> + cd . . & & rm - rf st & & ln - sf st - 1 . 9 / obj st & & <nl> + cd . . & & rm - f $ { SRS_OBJS } / _flag . st . arm . tmp <nl> + ) <nl> + fi <nl> fi <nl> fi <nl> # check status <nl> ret = $ ? ; if [ [ $ ret - ne 0 ] ] ; then echo " build st - 1 . 9 failed , ret = $ ret " ; exit $ ret ; fi <nl> - if [ ! - f $ { SRS_OBJS } / st / libst . a ] ; then echo " build st - 1 . 9 failed . " ; exit - 1 ; fi <nl> - if [ ! - f $ { SRS_OBJS } / st / libst . so ] ; then echo " build st - 1 . 9 failed . " ; exit - 1 ; fi <nl> + if [ ! - f $ { SRS_OBJS } / st / libst . a ] ; then echo " build st - 1 . 9 static lib failed . " ; exit - 1 ; fi <nl> + if [ OS_IS_OSX = Darwin ] then <nl> + if [ ! - f $ { SRS_OBJS } / st / libst . dylib ] ; then echo " build st - 1 . 9 shared lib failed . " ; exit - 1 ; fi <nl> + else <nl> + if [ ! - f $ { SRS_OBJS } / st / libst . so ] ; then echo " build st - 1 . 9 shared lib failed . " ; exit - 1 ; fi <nl> + fi <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # http - parser - 2 . 1 <nl> mmm a / trunk / auto / options . sh <nl> ppp b / trunk / auto / options . sh <nl> function parse_user_option ( ) { <nl> - - cubie ) SRS_CUBIE = YES ; ; <nl> - - dev ) SRS_DEV = YES ; ; <nl> - - fast - dev ) SRS_FAST_DEV = YES ; ; <nl> + - - osx - dev ) SRS_OSX_DEV = YES ; ; <nl> - - demo ) SRS_DEMO = YES ; ; <nl> - - fast ) SRS_FAST = YES ; ; <nl> - - disable - all ) SRS_DISABLE_ALL = YES ; ; <nl> function apply_user_presets ( ) { <nl> SRS_STATIC = NO <nl> fi <nl> <nl> + # if osx dev specified , open main server features . <nl> + if [ $ SRS_OSX_DEV = YES ] ; then <nl> + SRS_HLS = YES <nl> + SRS_DVR = YES <nl> + SRS_NGINX = NO <nl> + SRS_SSL = NO <nl> + SRS_FFMPEG_TOOL = NO <nl> + SRS_TRANSCODE = YES <nl> + SRS_INGEST = NO <nl> + SRS_HTTP_PARSER = NO <nl> + SRS_HTTP_CALLBACK = NO <nl> + SRS_HTTP_SERVER = NO <nl> + SRS_HTTP_API = NO <nl> + SRS_LIBRTMP = NO <nl> + SRS_RESEARCH = NO <nl> + SRS_UTEST = NO <nl> + SRS_GPERF = NO <nl> + SRS_GPERF_MC = NO <nl> + SRS_GPERF_MP = NO <nl> + SRS_GPERF_CP = NO <nl> + SRS_GPROF = NO <nl> + SRS_STATIC = NO <nl> + fi <nl> + <nl> + <nl> + <nl> # for srs demo <nl> if [ $ SRS_DEMO = YES ] ; then <nl> SRS_HLS = YES <nl> mmm a / trunk / src / app / srs_app_json . cpp <nl> ppp b / trunk / src / app / srs_app_json . cpp <nl> extern " C " { <nl> # include < stdlib . h > <nl> # include < stdio . h > <nl> # include < string . h > <nl> - # include < malloc . h > <nl> # include < assert . h > <nl> <nl> / / # include " nxjson . h " <nl> mmm a / trunk / src / app / srs_app_server . cpp <nl> ppp b / trunk / src / app / srs_app_server . cpp <nl> int SrsServer : : acquire_pid_file ( ) <nl> } <nl> <nl> / / require write lock <nl> - flock lock ; <nl> + struct flock lock ; <nl> <nl> lock . l_type = F_WRLCK ; / / F_RDLCK , F_WRLCK , F_UNLCK <nl> lock . l_start = 0 ; / / type offset , relative to l_whence <nl>
|
Support porting srs on MacOS OSX system Platform
|
ossrs/srs
|
b50ecd6dbeef62913d38fa9e758cdb29fcdf8c6f
|
2014-07-27T11:32:12Z
|
mmm a / tensorflow / python / util / deprecation . py <nl> ppp b / tensorflow / python / util / deprecation . py <nl> <nl> from tensorflow . python . util import tf_contextlib <nl> from tensorflow . python . util import tf_decorator <nl> from tensorflow . python . util import tf_inspect <nl> + from tensorflow . python . util import tf_stack <nl> <nl> <nl> # Allow deprecation warnings to be silenced temporarily with a context manager . <nl> def _validate_deprecation_args ( date , instructions ) : <nl> <nl> def _call_location ( outer = False ) : <nl> " " " Returns call location given level up from current call . " " " <nl> - frame = tf_inspect . currentframe ( ) <nl> - if frame : <nl> - # CPython internals are available , use them for performance . <nl> - # walk back two frames to get to deprecated function caller . <nl> - frame = frame . f_back <nl> - if frame . f_back : <nl> - frame = frame . f_back <nl> - if outer and frame . f_back : <nl> - frame = frame . f_back <nl> - return ' % s : % d ' % ( frame . f_code . co_filename , frame . f_lineno ) <nl> - else : <nl> - # Slow fallback path <nl> - stack = tf_inspect . stack ( 0 ) # 0 avoids generating unused context <nl> - entry = stack [ 3 if outer else 2 ] <nl> - return ' % s : % d ' % ( entry [ 1 ] , entry [ 2 ] ) <nl> + stack = tf_stack . extract_stack ( ) <nl> + frame = stack [ - 4 if outer else - 3 ] <nl> + return ' { filename } : { lineno } ' . format ( filename = frame [ 0 ] , lineno = frame [ 1 ] ) <nl> <nl> <nl> def _wrap_decorator ( wrapped_function ) : <nl>
|
Use tf_stack instead of tf_inspect to get file and lineno in deprecation . py
|
tensorflow/tensorflow
|
b97727bc3c7a9216670f361b639a60ed516917e0
|
2018-11-29T01:40:31Z
|
mmm a / src / google / protobuf / util / internal / datapiece . cc <nl> ppp b / src / google / protobuf / util / internal / datapiece . cc <nl> StatusOr < string > DataPiece : : ToBytes ( ) const { <nl> } <nl> <nl> StatusOr < int > DataPiece : : ToEnum ( const google : : protobuf : : Enum * enum_type , <nl> - bool use_lower_camel_for_enums ) const { <nl> + bool use_lower_camel_for_enums , <nl> + bool ignore_unknown_enum_values ) const { <nl> if ( type_ = = TYPE_NULL ) return google : : protobuf : : NULL_VALUE ; <nl> <nl> if ( type_ = = TYPE_STRING ) { <nl> StatusOr < int > DataPiece : : ToEnum ( const google : : protobuf : : Enum * enum_type , <nl> value = FindEnumValueByNameWithoutUnderscoreOrNull ( enum_type , enum_name ) ; <nl> if ( value ! = NULL ) return value - > number ( ) ; <nl> } <nl> + <nl> + / / If ignore_unknown_enum_values is true an unknown enum value is treated <nl> + / / as the default <nl> + if ( ignore_unknown_enum_values ) return enum_type - > enumvalue ( 0 ) . number ( ) ; <nl> } else { <nl> / / We don ' t need to check whether the value is actually declared in the <nl> / / enum because we preserve unknown enum values as well . <nl> mmm a / src / google / protobuf / util / internal / datapiece . h <nl> ppp b / src / google / protobuf / util / internal / datapiece . h <nl> class LIBPROTOBUF_EXPORT DataPiece { <nl> / / If the value is not a string , attempts to convert to a 32 - bit integer . <nl> / / If none of these succeeds , returns a conversion error status . <nl> util : : StatusOr < int > ToEnum ( const google : : protobuf : : Enum * enum_type , <nl> - bool use_lower_camel_for_enums ) const ; <nl> + bool use_lower_camel_for_enums , <nl> + bool ignore_unknown_enum_values ) const ; <nl> <nl> private : <nl> / / Disallow implicit constructor . <nl> mmm a / src / google / protobuf / util / internal / proto_writer . cc <nl> ppp b / src / google / protobuf / util / internal / proto_writer . cc <nl> inline Status WriteString ( int field_number , const DataPiece & data , <nl> inline Status WriteEnum ( int field_number , const DataPiece & data , <nl> const google : : protobuf : : Enum * enum_type , <nl> CodedOutputStream * stream , <nl> - bool use_lower_camel_for_enums ) { <nl> - StatusOr < int > e = data . ToEnum ( enum_type , use_lower_camel_for_enums ) ; <nl> + bool use_lower_camel_for_enums , <nl> + bool ignore_unknown_values ) { <nl> + StatusOr < int > e = data . ToEnum ( enum_type , use_lower_camel_for_enums , ignore_unknown_values ) ; <nl> if ( e . ok ( ) ) { <nl> WireFormatLite : : WriteEnum ( field_number , e . ValueOrDie ( ) , stream ) ; <nl> } <nl> ProtoWriter * ProtoWriter : : RenderPrimitiveField ( <nl> case google : : protobuf : : Field_Kind_TYPE_ENUM : { <nl> status = WriteEnum ( field . number ( ) , data , <nl> typeinfo_ - > GetEnumByTypeUrl ( field . type_url ( ) ) , <nl> - stream_ . get ( ) , use_lower_camel_for_enums_ ) ; <nl> + stream_ . get ( ) , use_lower_camel_for_enums_ , <nl> + ignore_unknown_fields_ ) ; <nl> break ; <nl> } <nl> default : / / TYPE_GROUP or TYPE_MESSAGE <nl> mmm a / src / google / protobuf / util / internal / proto_writer . h <nl> ppp b / src / google / protobuf / util / internal / proto_writer . h <nl> class LIBPROTOBUF_EXPORT ProtoWriter : public StructuredObjectWriter { <nl> / / Indicates whether we finished writing root message completely . <nl> bool done_ ; <nl> <nl> - / / If true , don ' t report unknown field names to the listener . <nl> + / / If true , don ' t report unknown field names and enum values to the listener . <nl> bool ignore_unknown_fields_ ; <nl> <nl> / / If true , check if enum name in camel case or without underscore matches the <nl> mmm a / src / google / protobuf / util / json_util_test . cc <nl> ppp b / src / google / protobuf / util / json_util_test . cc <nl> TEST_F ( JsonUtilTest , TestDynamicMessage ) { <nl> EXPECT_EQ ( ToJson ( generated , options ) , ToJson ( * message , options ) ) ; <nl> } <nl> <nl> + TEST_F ( JsonUtilTest , TestParsingUnknownEnumsAs0 ) { <nl> + TestMessage m ; <nl> + { <nl> + JsonParseOptions options ; <nl> + ASSERT_FALSE ( options . ignore_unknown_fields ) ; <nl> + string input = <nl> + " { \ n " <nl> + " \ " enum_value \ " : \ " UNKNOWN_VALUE \ " \ n " <nl> + " } " ; <nl> + m . set_enum_value ( proto3 : : BAR ) ; <nl> + EXPECT_FALSE ( FromJson ( input , & m , options ) ) ; <nl> + ASSERT_EQ ( proto3 : : BAR , m . enum_value ( ) ) ; / / Keep previous value <nl> + <nl> + options . ignore_unknown_fields = true ; <nl> + EXPECT_TRUE ( FromJson ( input , & m , options ) ) ; <nl> + EXPECT_EQ ( 0 , m . enum_value ( ) ) ; / / Unknown enum value must be decoded as 0 <nl> + } <nl> + / / Integer values are read as usual <nl> + { <nl> + JsonParseOptions options ; <nl> + string input = <nl> + " { \ n " <nl> + " \ " enum_value \ " : 12345 \ n " <nl> + " } " ; <nl> + m . set_enum_value ( proto3 : : BAR ) ; <nl> + EXPECT_TRUE ( FromJson ( input , & m , options ) ) ; <nl> + ASSERT_EQ ( 12345 , m . enum_value ( ) ) ; <nl> + <nl> + options . ignore_unknown_fields = true ; <nl> + EXPECT_TRUE ( FromJson ( input , & m , options ) ) ; <nl> + EXPECT_EQ ( 12345 , m . enum_value ( ) ) ; <nl> + } <nl> + <nl> + / / Trying to pass an object as an enum field value is always treated as an error <nl> + { <nl> + JsonParseOptions options ; <nl> + string input = <nl> + " { \ n " <nl> + " \ " enum_value \ " : { } \ n " <nl> + " } " ; <nl> + options . ignore_unknown_fields = true ; <nl> + EXPECT_FALSE ( FromJson ( input , & m , options ) ) ; <nl> + options . ignore_unknown_fields = false ; <nl> + EXPECT_FALSE ( FromJson ( input , & m , options ) ) ; <nl> + } <nl> + / / Trying to pass an array as an enum field value is always treated as an error <nl> + { <nl> + JsonParseOptions options ; <nl> + string input = <nl> + " { \ n " <nl> + " \ " enum_value \ " : [ ] \ n " <nl> + " } " ; <nl> + EXPECT_FALSE ( FromJson ( input , & m , options ) ) ; <nl> + options . ignore_unknown_fields = true ; <nl> + EXPECT_FALSE ( FromJson ( input , & m , options ) ) ; <nl> + } <nl> + } <nl> + <nl> typedef std : : pair < char * , int > Segment ; <nl> / / A ZeroCopyOutputStream that writes to multiple buffers . <nl> class SegmentedZeroCopyOutputStream : public io : : ZeroCopyOutputStream { <nl>
|
Merge pull request from alexey - malov / IgnoreUnknownEnumsInJson
|
protocolbuffers/protobuf
|
3aaed9698de4d482aeec6b854e8c750eb555d8fb
|
2018-02-26T18:42:31Z
|
mmm a / js / server / modules / org / arangodb / foxx / manager . js <nl> ppp b / js / server / modules / org / arangodb / foxx / manager . js <nl> exports . unmount = function ( mount ) { <nl> <nl> var doc = mountFromId ( mount ) ; <nl> <nl> - if ( doc . isSystem & & ( mount . charAt ( 1 ) = = = ' _ ' | | mount . indexOf ( ' system ' ) = = = 1 ) ) { <nl> + if ( doc . isSystem & & mount . charAt ( 1 ) = = = ' _ ' ) { <nl> throw new Error ( " Cannot unmount system application " ) ; <nl> } <nl> <nl>
|
Simplified is - mounted - as - system - app check .
|
arangodb/arangodb
|
3be0e11110b65d6678ee50d619d42df501b0c0ba
|
2014-08-20T08:57:47Z
|
mmm a / editor / script_create_dialog . cpp <nl> ppp b / editor / script_create_dialog . cpp <nl> void ScriptCreateDialog : : _browse_path ( bool browse_parent ) { <nl> file_browse - > clear_filters ( ) ; <nl> List < String > extensions ; <nl> <nl> - / / get all possible extensions for script <nl> - for ( int l = 0 ; l < language_menu - > get_item_count ( ) ; l + + ) { <nl> - ScriptServer : : get_language ( l ) - > get_recognized_extensions ( & extensions ) ; <nl> - } <nl> + int lang = language_menu - > get_selected ( ) ; <nl> + ScriptServer : : get_language ( lang ) - > get_recognized_extensions ( & extensions ) ; <nl> <nl> for ( List < String > : : Element * E = extensions . front ( ) ; E ; E = E - > next ( ) ) { <nl> file_browse - > add_filter ( " * . " + E - > get ( ) ) ; <nl>
|
Script create diag : Show extensions for currently selected language only .
|
godotengine/godot
|
8e8e4b30e51cfc063daa57e031b7f427703e0eb8
|
2017-07-01T13:19:34Z
|
mmm a / libraries / chain / include / eosio / chain / wasm_eosio_injection . hpp <nl> ppp b / libraries / chain / include / eosio / chain / wasm_eosio_injection . hpp <nl> namespace eosio { namespace chain { namespace wasm_injections { <nl> if ( module . exports [ i ] . kind = = IR : : ObjectKind : : function ) <nl> module . exports [ i ] . index + + ; <nl> } <nl> + / / shift the start index by 1 <nl> + module . startFunctionIndex + + ; <nl> + <nl> / / shift all table entries for call indirect <nl> for ( TableSegment & ts : module . tableSegments ) { <nl> for ( auto & idx : ts . indices ) <nl> namespace eosio { namespace chain { namespace wasm_injections { <nl> } <nl> } <nl> } ; <nl> - <nl> + <nl> struct noop_injection_visitor { <nl> static void inject ( IR : : Module & m ) ; <nl> static void initializer ( ) ; <nl> namespace eosio { namespace chain { namespace wasm_injections { <nl> static void accept ( wasm_ops : : instr * inst , wasm_ops : : visitor_arg & arg ) { <nl> wasm_ops : : op_types < > : : call_t * call_inst = reinterpret_cast < wasm_ops : : op_types < > : : call_t * > ( inst ) ; <nl> auto mapped_index = injector_utils : : injected_index_mapping . find ( call_inst - > field ) ; <nl> + <nl> if ( mapped_index ! = injector_utils : : injected_index_mapping . end ( ) ) { <nl> call_inst - > field = mapped_index - > second ; <nl> } <nl> mmm a / libraries / chain / wasm_eosio_injection . cpp <nl> ppp b / libraries / chain / wasm_eosio_injection . cpp <nl> void max_memory_injection_visitor : : initializer ( ) { } <nl> <nl> uint32_t instruction_counter : : icnt = 0 ; <nl> int32_t checktime_injector : : checktime_idx = - 1 ; <nl> - <nl> } } } / / namespace eosio , chain , injectors <nl> mmm a / tests / wasm_tests / test_wasts . hpp <nl> ppp b / tests / wasm_tests / test_wasts . hpp <nl> static const char f32_add_wast [ ] = R " = = = = = ( <nl> ) <nl> ) = = = = = " ; <nl> * / <nl> + <nl> + static const char start_index_wast [ ] = R " = = = = = ( <nl> + ( module <nl> + ( import " env " " require_auth " ( func $ require_auth ( param i64 ) ) ) <nl> + ( import " env " " eosio_assert " ( func $ eosio_assert ( param i32 i32 ) ) ) <nl> + ( import " env " " now " ( func $ now ( result i32 ) ) ) <nl> + ( table 0 anyfunc ) <nl> + ( memory $ 0 1 ) <nl> + ( export " memory " ( memory $ 0 ) ) <nl> + ( export " entry " ( func $ entry ) ) <nl> + ( export " apply " ( func $ apply ) ) <nl> + ( func $ entry <nl> + ( block <nl> + ( call $ eosio_assert <nl> + ( i32 . eq <nl> + ( i32 . load offset = 4 <nl> + ( i32 . const 0 ) <nl> + ) <nl> + ( call $ now ) <nl> + ) <nl> + ( i32 . const 0 ) <nl> + ) <nl> + ) <nl> + ) <nl> + ( func $ apply ( param $ 0 i64 ) ( param $ 1 i64 ) ( param $ 2 i64 ) <nl> + ( call $ require_auth ( i64 . const 6121376101093867520 ) ) <nl> + ( block <nl> + ( call $ eosio_assert <nl> + ( i32 . eq <nl> + ( i32 . load offset = 4 <nl> + ( i32 . const 0 ) <nl> + ) <nl> + ( call $ now ) <nl> + ) <nl> + ( i32 . const 0 ) <nl> + ) <nl> + ) <nl> + ) <nl> + ( start $ entry ) <nl> + ) <nl> + ) = = = = = " ; <nl> + <nl> static const char entry_wast [ ] = R " = = = = = ( <nl> ( module <nl> ( import " env " " require_auth " ( func $ require_auth ( param i64 ) ) ) <nl> mmm a / tests / wasm_tests / wasm_tests . cpp <nl> ppp b / tests / wasm_tests / wasm_tests . cpp <nl> BOOST_FIXTURE_TEST_CASE ( check_entry_behavior , TESTER ) try { <nl> BOOST_CHECK_EQUAL ( transaction_receipt : : executed , receipt . status ) ; <nl> } FC_LOG_AND_RETHROW ( ) <nl> <nl> + BOOST_FIXTURE_TEST_CASE ( check_start_index_behavior , TESTER ) try { <nl> + produce_blocks ( 2 ) ; <nl> + create_accounts ( { N ( check ) } ) ; <nl> + produce_block ( ) ; <nl> + <nl> + set_code ( N ( check ) , start_index_wast ) ; <nl> + produce_blocks ( 10 ) ; <nl> + <nl> + signed_transaction trx ; <nl> + action act ; <nl> + act . account = N ( check ) ; <nl> + act . name = N ( ) ; <nl> + act . authorization = vector < permission_level > { { N ( check ) , config : : active_name } } ; <nl> + trx . actions . push_back ( act ) ; <nl> + <nl> + set_transaction_headers ( trx ) ; <nl> + trx . sign ( get_private_key ( N ( check ) , " active " ) , chain_id_type ( ) ) ; <nl> + push_transaction ( trx ) ; <nl> + produce_blocks ( 1 ) ; <nl> + BOOST_REQUIRE_EQUAL ( true , chain_has_transaction ( trx . id ( ) ) ) ; <nl> + const auto & receipt = get_transaction_receipt ( trx . id ( ) ) ; <nl> + BOOST_CHECK_EQUAL ( transaction_receipt : : executed , receipt . status ) ; <nl> + } FC_LOG_AND_RETHROW ( ) <nl> + <nl> + <nl> / * * <nl> * Ensure we can load a wasm w / o memory <nl> * / <nl>
|
Fix start index
|
EOSIO/eos
|
88c893504cd08df0bac8c912ef3ddae169115c45
|
2018-04-20T06:38:36Z
|
mmm a / src / AggregateFunctions / AggregateFunctionDistinct . h <nl> ppp b / src / AggregateFunctions / AggregateFunctionDistinct . h <nl> struct AggregateFunctionDistinctMultipleGenericData : public AggregateFunctionDi <nl> { <nl> const char * begin = nullptr ; <nl> StringRef value ( begin , 0 ) ; <nl> - SipHash hash ; <nl> for ( size_t i = 0 ; i < columns_num ; + + i ) <nl> { <nl> - columns [ i ] - > updateHashWithValue ( row_num , hash ) ; <nl> auto cur_ref = columns [ i ] - > serializeValueIntoArena ( row_num , * arena , begin ) ; <nl> value . data = cur_ref . data - value . size ; <nl> value . size + = cur_ref . size ; <nl> new file mode 100644 <nl> index 00000000000 . . 096d5703292 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01259_combinator_distinct_distributed . reference <nl> <nl> + 78 <nl> + [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 ] <nl> + 20 <nl> + 0 . 49237 <nl> new file mode 100644 <nl> index 00000000000 . . f851e64dbcb <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01259_combinator_distinct_distributed . sql <nl> <nl> + SELECT sum ( DISTINCT number % 13 ) FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , numbers_mt ( 100000 ) ) ; <nl> + SELECT arraySort ( groupArray ( DISTINCT number % 13 ) ) FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , numbers_mt ( 100000 ) ) ; <nl> + SELECT finalizeAggregation ( countState ( DISTINCT toString ( number % 20 ) ) ) FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , numbers_mt ( 100000 ) ) ; <nl> + SELECT round ( corrStable ( DISTINCT x , y ) , 5 ) FROM ( SELECT number % 10 AS x , number % 5 AS y FROM remote ( ' 127 . 0 . 0 . { 1 , 2 } ' , numbers ( 1000 ) ) ) ; <nl>
|
tests with distributed
|
ClickHouse/ClickHouse
|
685d14863b398946b58793a71633c539e88b87fd
|
2020-06-25T22:39:54Z
|
mmm a / core / image . cpp <nl> ppp b / core / image . cpp <nl> int Image : : get_image_required_mipmaps ( int p_width , int p_height , Format p_format <nl> return mm ; <nl> } <nl> <nl> + int Image : : get_image_mipmap_offset ( int p_width , int p_height , Format p_format , int p_mipmap ) { <nl> + <nl> + if ( p_mipmap < = 0 ) { <nl> + return 0 ; <nl> + } <nl> + int mm ; <nl> + return _get_dst_image_size ( p_width , p_height , p_format , mm , p_mipmap - 1 ) ; <nl> + } <nl> + <nl> bool Image : : is_compressed ( ) const { <nl> return format > FORMAT_RGBE9995 ; <nl> } <nl> mmm a / core / image . h <nl> ppp b / core / image . h <nl> class Image : public Resource { <nl> <nl> static int get_image_data_size ( int p_width , int p_height , Format p_format , bool p_mipmaps = false ) ; <nl> static int get_image_required_mipmaps ( int p_width , int p_height , Format p_format ) ; <nl> + static int get_image_mipmap_offset ( int p_width , int p_height , Format p_format , int p_mipmap ) ; <nl> <nl> enum CompressMode { <nl> COMPRESS_S3TC , <nl> mmm a / modules / squish / image_compress_squish . cpp <nl> ppp b / modules / squish / image_compress_squish . cpp <nl> void image_decompress_squish ( Image * p_image ) { <nl> return ; <nl> } <nl> <nl> - int dst_ofs = 0 ; <nl> - <nl> for ( int i = 0 ; i < = mm_count ; i + + ) { <nl> int src_ofs = 0 , mipmap_size = 0 , mipmap_w = 0 , mipmap_h = 0 ; <nl> p_image - > get_mipmap_offset_size_and_dimensions ( i , src_ofs , mipmap_size , mipmap_w , mipmap_h ) ; <nl> - squish : : DecompressImage ( & wb [ dst_ofs ] , mipmap_w , mipmap_h , & rb [ src_ofs ] , squish_flags ) ; <nl> + int dst_ofs = Image : : get_image_mipmap_offset ( p_image - > get_width ( ) , p_image - > get_height ( ) , target_format , i ) ; <nl> + squish : : DecompressImage ( & wb [ dst_ofs ] , w , h , & rb [ src_ofs ] , squish_flags ) ; <nl> + w > > = 1 ; <nl> + h > > = 1 ; <nl> } <nl> <nl> p_image - > create ( p_image - > get_width ( ) , p_image - > get_height ( ) , p_image - > has_mipmaps ( ) , target_format , data ) ; <nl>
|
Fix Squish decompression , closes
|
godotengine/godot
|
f141f747de7045dcc4cbb6643ce5c38393ab3625
|
2018-11-16T22:12:00Z
|
mmm a / generic / THTensorMath . c <nl> ppp b / generic / THTensorMath . c <nl> void THTensor_ ( indexSelect ) ( THTensor * tensor , THTensor * src , int dim , THLongTens <nl> if ( index_data [ i ] < 1 | | index_data [ i ] > max ) <nl> THError ( " index out of range " ) ; <nl> <nl> - # pragma omp parallel for if ( numel * stride > TH_OMP_OVERHEAD_THRESHOLD ) private ( i ) <nl> - for ( i = 0 ; i < numel ; i + + ) <nl> - memcpy ( tensor_data + i * stride , src_data + ( index_data [ i ] - 1 ) * stride , stride * sizeof ( real ) ) ; <nl> + if ( src - > nDimension = = 1 ) { <nl> + # pragma omp parallel for if ( numel > TH_OMP_OVERHEAD_THRESHOLD ) private ( i ) <nl> + for ( i = 0 ; i < numel ; i + + ) <nl> + tensor_data [ i ] = src_data [ index_data [ i ] - 1 ] ; <nl> + } else { <nl> + # pragma omp parallel for if ( numel * stride > TH_OMP_OVERHEAD_THRESHOLD ) private ( i ) <nl> + for ( i = 0 ; i < numel ; i + + ) <nl> + memcpy ( tensor_data + i * stride , src_data + ( index_data [ i ] - 1 ) * stride , stride * sizeof ( real ) ) ; <nl> + } <nl> } <nl> else if ( src - > nDimension = = 1 ) <nl> { <nl>
|
Use assignment in indexSelect for 1 - dimensional contiguous input .
|
pytorch/pytorch
|
49fe800281fefeff225244fc21e2ce5e289d71cf
|
2015-05-22T16:24:22Z
|
mmm a / drivers / unix / dir_access_unix . cpp <nl> ppp b / drivers / unix / dir_access_unix . cpp <nl> Error DirAccessUnix : : rename ( String p_path , String p_new_path ) { <nl> <nl> return : : rename ( p_path . utf8 ( ) . get_data ( ) , p_new_path . utf8 ( ) . get_data ( ) ) = = 0 ? OK : FAILED ; <nl> } <nl> + <nl> Error DirAccessUnix : : remove ( String p_path ) { <nl> <nl> if ( p_path . is_rel_path ( ) ) <nl> mmm a / platform / x11 / os_x11 . cpp <nl> ppp b / platform / x11 / os_x11 . cpp <nl> <nl> # include " drivers / gles3 / rasterizer_gles3 . h " <nl> # include " errno . h " <nl> # include " key_mapping_x11 . h " <nl> + # include " os / dir_access . h " <nl> # include " print_string . h " <nl> # include " servers / visual / visual_server_raster . h " <nl> # include " servers / visual / visual_server_wrap_mt . h " <nl> static String get_mountpoint ( const String & p_path ) { <nl> } <nl> <nl> Error OS_X11 : : move_to_trash ( const String & p_path ) { <nl> - String trashcan = " " ; <nl> + String trash_can = " " ; <nl> String mnt = get_mountpoint ( p_path ) ; <nl> <nl> + / / If there is a directory " [ Mountpoint ] / . Trash - [ UID ] / files " , use it as the trash can . <nl> if ( mnt ! = " " ) { <nl> String path ( mnt + " / . Trash - " + itos ( getuid ( ) ) + " / files " ) ; <nl> struct stat s ; <nl> if ( ! stat ( path . utf8 ( ) . get_data ( ) , & s ) ) { <nl> - trashcan = path ; <nl> + trash_can = path ; <nl> } <nl> } <nl> <nl> - if ( trashcan = = " " ) { <nl> + / / Otherwise , if $ { XDG_DATA_HOME } is defined , use " $ { XDG_DATA_HOME } / Trash / files " as the trash can . <nl> + if ( trash_can = = " " ) { <nl> char * dhome = getenv ( " XDG_DATA_HOME " ) ; <nl> if ( dhome ) { <nl> - trashcan = String ( dhome ) + " / Trash / files " ; <nl> + trash_can = String ( dhome ) + " / Trash / files " ; <nl> } <nl> } <nl> <nl> - if ( trashcan = = " " ) { <nl> + / / Otherwise , if $ { HOME } is defined , use " $ { HOME } / . local / share / Trash / files " as the trash can . <nl> + if ( trash_can = = " " ) { <nl> char * home = getenv ( " HOME " ) ; <nl> if ( home ) { <nl> - trashcan = String ( home ) + " / . local / share / Trash / files " ; <nl> + trash_can = String ( home ) + " / . local / share / Trash / files " ; <nl> } <nl> } <nl> <nl> - if ( trashcan = = " " ) { <nl> - ERR_PRINTS ( " move_to_trash : Could not determine trashcan location " ) ; <nl> + / / Issue an error if none of the previous locations is appropriate for the trash can . <nl> + if ( trash_can = = " " ) { <nl> + ERR_PRINTS ( " move_to_trash : Could not determine the trash can location " ) ; <nl> return FAILED ; <nl> } <nl> <nl> - List < String > args ; <nl> - args . push_back ( " - p " ) ; <nl> - args . push_back ( trashcan ) ; <nl> - Error err = execute ( " mkdir " , args , true ) ; <nl> - if ( err = = OK ) { <nl> - List < String > args2 ; <nl> - args2 . push_back ( p_path ) ; <nl> - args2 . push_back ( trashcan ) ; <nl> - err = execute ( " mv " , args2 , true ) ; <nl> + / / Create needed directories for decided trash can location . <nl> + DirAccess * dir_access = DirAccess : : create ( DirAccess : : ACCESS_FILESYSTEM ) ; <nl> + Error err = dir_access - > make_dir_recursive ( trash_can ) ; <nl> + memdelete ( dir_access ) ; <nl> + <nl> + / / Issue an error if trash can is not created proprely . <nl> + if ( err ! = OK ) { <nl> + ERR_PRINTS ( " move_to_trash : Could not create the trash can \ " " + trash_can + " \ " " ) ; <nl> + return err ; <nl> } <nl> <nl> - return err ; <nl> + / / The trash can is successfully created , now move the given resource to it . <nl> + / / Do not use DirAccess : rename ( ) because it can ' t move files across multiple mountpoints . <nl> + List < String > mv_args ; <nl> + mv_args . push_back ( p_path ) ; <nl> + mv_args . push_back ( trash_can ) ; <nl> + int retval ; <nl> + err = execute ( " mv " , mv_args , true , NULL , NULL , & retval ) ; <nl> + <nl> + / / Issue an error if " mv " failed to move the given resource to the trash can . <nl> + if ( err ! = OK | | retval ! = 0 ) { <nl> + ERR_PRINTS ( " move_to_trash : Could not move the resource \ " " + p_path + " \ " to the trash can \ " " + trash_can + " \ " " ) ; <nl> + return FAILED ; <nl> + } <nl> + <nl> + return OK ; <nl> } <nl> <nl> OS : : LatinKeyboardVariant OS_X11 : : get_latin_keyboard_variant ( ) const { <nl>
|
[ X11 ] Improving error detection in move_to_trash
|
godotengine/godot
|
268d7c7c5b4249c0281cbb7f7fc8a66b2ebdc969
|
2018-02-18T16:02:19Z
|
mmm a / tools / swift - demangle / CMakeLists . txt <nl> ppp b / tools / swift - demangle / CMakeLists . txt <nl> add_swift_executable ( swift - demangle <nl> DEPENDS swiftBasic <nl> COMPONENT_DEPENDS support $ { LLVM_TARGETS_TO_BUILD } ) <nl> <nl> - if ( HAVE_DARWIN_MODULES_SDK ) <nl> - add_definitions ( - DSWIFT_MODULES_SDK = " $ { MODULES_SDK } " ) <nl> - endif ( ) <nl> - <nl> install ( TARGETS swift - demangle <nl> RUNTIME DESTINATION bin ) <nl> mmm a / tools / swift - demangle / Makefile <nl> ppp b / tools / swift - demangle / Makefile <nl> SWIFT_LEVEL : = . . / . . <nl> TOOLNAME = swift - demangle <nl> <nl> LINK_COMPONENTS : = support <nl> - USEDLIBS = swiftSIL . a swiftBasic . a <nl> - <nl> - # if $ ( SWIFT_SDK ) is installed , include that <nl> - SWIFT_SDK : = / Applications / Xcode . app / Contents / Developer / Platforms / MacOSX . platform / Developer / SDKs / MacOSX10 . 9 . sdk <nl> - ifeq ( $ ( shell test - d $ ( SWIFT_SDK ) & & echo OK ) , OK ) <nl> - CXXFLAGS + = - DSWIFT_MODULES_SDK = " \ " $ ( SWIFT_SDK ) \ " " <nl> - endif <nl> + USEDLIBS = swiftBasic . a <nl> <nl> include $ ( SWIFT_LEVEL ) / Makefile <nl> <nl>
|
fix the makefiles to only include swiftBasic in this .
|
apple/swift
|
b7a6cbc6388e55693f6a6f32134304d44f5d19b6
|
2013-07-29T22:49:29Z
|
mmm a / src / mongo / db / SConscript <nl> ppp b / src / mongo / db / SConscript <nl> env . Library ( <nl> ' background ' , <nl> ' bson / dotted_path_support ' , <nl> ' catalog / collection ' , <nl> + ' catalog / collection_query_info ' , <nl> ' catalog / document_validation ' , <nl> ' catalog / index_catalog_entry ' , <nl> ' catalog / index_catalog ' , <nl> mmm a / src / mongo / db / catalog / SConscript <nl> ppp b / src / mongo / db / catalog / SConscript <nl> env . Library ( <nl> ] , <nl> LIBDEPS = [ <nl> ' $ BUILD_DIR / mongo / base ' , <nl> + ' $ BUILD_DIR / mongo / db / catalog / collection_query_info ' , <nl> ' $ BUILD_DIR / mongo / db / index / index_descriptor ' , <nl> ' $ BUILD_DIR / mongo / db / index_names ' , <nl> ' $ BUILD_DIR / mongo / db / ttl_collection_cache ' , <nl> env . Library ( <nl> target = ' catalog_impl ' , <nl> source = [ <nl> " collection_impl . cpp " , <nl> - " collection_info_cache_impl . cpp " , <nl> " database_holder_impl . cpp " , <nl> " database_impl . cpp " , <nl> " index_catalog_entry_impl . cpp " , <nl> env . Library ( <nl> ] , <nl> ) <nl> <nl> + env . Library ( <nl> + target = " collection_query_info " , <nl> + source = [ <nl> + " $ BUILD_DIR / mongo / db / query / collection_query_info . cpp " , <nl> + ] , <nl> + LIBDEPS = [ <nl> + ' $ BUILD_DIR / mongo / base ' , <nl> + ' $ BUILD_DIR / mongo / db / collection_index_usage_tracker ' , <nl> + ' $ BUILD_DIR / mongo / db / concurrency / lock_manager ' , <nl> + ' $ BUILD_DIR / mongo / db / curop ' , <nl> + ' $ BUILD_DIR / mongo / db / query / query_planner ' , <nl> + ' $ BUILD_DIR / mongo / db / update_index_data ' , <nl> + ' $ BUILD_DIR / mongo / db / service_context ' , <nl> + ] , <nl> + ) <nl> + <nl> env . CppUnitTest ( <nl> target = ' db_catalog_test ' , <nl> source = [ <nl> mmm a / src / mongo / db / catalog / collection . h <nl> ppp b / src / mongo / db / catalog / collection . h <nl> <nl> # include " mongo / base / string_data . h " <nl> # include " mongo / bson / mutable / damage_vector . h " <nl> # include " mongo / bson / timestamp . h " <nl> - # include " mongo / db / catalog / collection_info_cache . h " <nl> # include " mongo / db / catalog / collection_options . h " <nl> # include " mongo / db / concurrency / d_concurrency . h " <nl> # include " mongo / db / logical_session_id . h " <nl> <nl> # include " mongo / db / storage / snapshot . h " <nl> # include " mongo / stdx / condition_variable . h " <nl> # include " mongo / stdx / mutex . h " <nl> + # include " mongo / util / decorable . h " <nl> <nl> namespace mongo { <nl> class CappedCallback ; <nl> class CappedInsertNotifier { <nl> bool _dead = false ; <nl> } ; <nl> <nl> - class Collection { <nl> + class Collection : public Decorable < Collection > { <nl> public : <nl> enum class StoreDeletedDoc { Off , On } ; <nl> <nl> class Collection { <nl> <nl> virtual bool ok ( ) const = 0 ; <nl> <nl> - virtual CollectionInfoCache * infoCache ( ) = 0 ; <nl> - virtual const CollectionInfoCache * infoCache ( ) const = 0 ; <nl> - <nl> virtual const NamespaceString & ns ( ) const = 0 ; <nl> <nl> / * * <nl> mmm a / src / mongo / db / catalog / collection_impl . cpp <nl> ppp b / src / mongo / db / catalog / collection_impl . cpp <nl> <nl> # include " mongo / bson / simple_bsonobj_comparator . h " <nl> # include " mongo / db / background . h " <nl> # include " mongo / db / catalog / collection_catalog . h " <nl> - # include " mongo / db / catalog / collection_info_cache_impl . h " <nl> # include " mongo / db / catalog / collection_options . h " <nl> # include " mongo / db / catalog / document_validation . h " <nl> # include " mongo / db / catalog / index_catalog_impl . h " <nl> <nl> # include " mongo / db / ops / update_request . h " <nl> # include " mongo / db / query / collation / collator_factory_interface . h " <nl> # include " mongo / db / query / collation / collator_interface . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / internal_plans . h " <nl> # include " mongo / db / repl / oplog . h " <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> CollectionImpl : : CollectionImpl ( OperationContext * opCtx , <nl> _recordStore ( std : : move ( recordStore ) ) , <nl> _needCappedLock ( supportsDocLocking ( ) & & _recordStore & & _recordStore - > isCapped ( ) & & <nl> _ns . db ( ) ! = " local " ) , <nl> - _infoCache ( std : : make_unique < CollectionInfoCacheImpl > ( this , _ns ) ) , <nl> _indexCatalog ( std : : make_unique < IndexCatalogImpl > ( this ) ) , <nl> _cappedNotifier ( _recordStore & & _recordStore - > isCapped ( ) <nl> ? std : : make_unique < CappedInsertNotifier > ( ) <nl> void CollectionImpl : : init ( OperationContext * opCtx ) { <nl> _validationLevel = uassertStatusOK ( _parseValidationLevel ( collectionOptions . validationLevel ) ) ; <nl> <nl> getIndexCatalog ( ) - > init ( opCtx ) . transitional_ignore ( ) ; <nl> - infoCache ( ) - > init ( opCtx ) ; <nl> _initialized = true ; <nl> } <nl> <nl> std : : unique_ptr < PlanExecutor , PlanExecutor : : Deleter > CollectionImpl : : makePlanExe <nl> <nl> void CollectionImpl : : setNs ( NamespaceString nss ) { <nl> _ns = std : : move ( nss ) ; <nl> - _infoCache - > setNs ( _ns ) ; <nl> _recordStore . get ( ) - > setNs ( _ns ) ; <nl> } <nl> <nl> mmm a / src / mongo / db / catalog / collection_impl . h <nl> ppp b / src / mongo / db / catalog / collection_impl . h <nl> class CollectionImpl final : public Collection , public CappedCallback { <nl> return _magic = = kMagicNumber ; <nl> } <nl> <nl> - CollectionInfoCache * infoCache ( ) final { <nl> - return _infoCache . get ( ) ; <nl> - } <nl> - <nl> - const CollectionInfoCache * infoCache ( ) const final { <nl> - return _infoCache . get ( ) ; <nl> - } <nl> - <nl> const NamespaceString & ns ( ) const final { <nl> return _ns ; <nl> } <nl> class CollectionImpl final : public Collection , public CappedCallback { <nl> / / The RecordStore may be null during a repair operation . <nl> std : : unique_ptr < RecordStore > _recordStore ; / / owned <nl> const bool _needCappedLock ; <nl> - std : : unique_ptr < CollectionInfoCache > _infoCache ; <nl> std : : unique_ptr < IndexCatalog > _indexCatalog ; <nl> <nl> <nl> deleted file mode 100644 <nl> index 8ae84fb3eec9 . . 000000000000 <nl> mmm a / src / mongo / db / catalog / collection_info_cache . h <nl> ppp / dev / null <nl> <nl> - / * * <nl> - * Copyright ( C ) 2018 - present MongoDB , Inc . <nl> - * <nl> - * This program is free software : you can redistribute it and / or modify <nl> - * it under the terms of the Server Side Public License , version 1 , <nl> - * as published by MongoDB , Inc . <nl> - * <nl> - * This program is distributed in the hope that it will be useful , <nl> - * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> - * Server Side Public License for more details . <nl> - * <nl> - * You should have received a copy of the Server Side Public License <nl> - * along with this program . If not , see <nl> - * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> - * <nl> - * As a special exception , the copyright holders give permission to link the <nl> - * code of portions of this program with the OpenSSL library under certain <nl> - * conditions as described in each individual source file and distribute <nl> - * linked combinations including the program with the OpenSSL library . You <nl> - * must comply with the Server Side Public License in all respects for <nl> - * all of the code used other than as permitted herein . If you modify file ( s ) <nl> - * with this exception , you may extend this exception to your version of the <nl> - * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> - * delete this exception statement from your version . If you delete this <nl> - * exception statement from all source files in the program , then also delete <nl> - * it in the license file . <nl> - * / <nl> - <nl> - # pragma once <nl> - <nl> - # include " mongo / db / collection_index_usage_tracker . h " <nl> - # include " mongo / db / query / plan_cache . h " <nl> - # include " mongo / db / query / plan_summary_stats . h " <nl> - # include " mongo / db / query / query_settings . h " <nl> - # include " mongo / db / update_index_data . h " <nl> - <nl> - namespace mongo { <nl> - class Collection ; <nl> - class IndexDescriptor ; <nl> - class OperationContext ; <nl> - <nl> - / * * <nl> - * this is for storing things that you want to cache about a single collection <nl> - * life cycle is managed for you from inside Collection . <nl> - * / <nl> - class CollectionInfoCache { <nl> - public : <nl> - virtual ~ CollectionInfoCache ( ) = default ; <nl> - <nl> - / * * <nl> - * Builds internal cache state based on the current state of the Collection ' s IndexCatalog . <nl> - * / <nl> - virtual void init ( OperationContext * const opCtx ) = 0 ; <nl> - <nl> - / * * <nl> - * Get the PlanCache for this collection . <nl> - * / <nl> - virtual PlanCache * getPlanCache ( ) const = 0 ; <nl> - <nl> - / * * <nl> - * Get the QuerySettings for this collection . <nl> - * / <nl> - virtual QuerySettings * getQuerySettings ( ) const = 0 ; <nl> - <nl> - / * get set of index keys for this namespace . handy to quickly check if a given <nl> - field is indexed ( Note it might be a secondary component of a compound index . ) <nl> - * / <nl> - virtual const UpdateIndexData & getIndexKeys ( OperationContext * const opCtx ) const = 0 ; <nl> - <nl> - / * * <nl> - * Returns cached index usage statistics for this collection . The map returned will contain <nl> - * entry for each index in the collection along with both a usage counter and a timestamp <nl> - * representing the date / time the counter is valid from . <nl> - * <nl> - * Note for performance that this method returns a copy of a StringMap . <nl> - * / <nl> - virtual CollectionIndexUsageMap getIndexUsageStats ( ) const = 0 ; <nl> - <nl> - / * * <nl> - * Returns a struct containing information on the number of collection scans that have been <nl> - * performed . <nl> - * / <nl> - virtual CollectionIndexUsageTracker : : CollectionScanStats getCollectionScanStats ( ) const = 0 ; <nl> - <nl> - / * * <nl> - * Register a newly - created index with the cache . Must be called whenever an index is <nl> - * built on the associated collection . <nl> - * <nl> - * Must be called under exclusive collection lock . <nl> - * / <nl> - virtual void addedIndex ( OperationContext * const opCtx , const IndexDescriptor * const desc ) = 0 ; <nl> - <nl> - / * * <nl> - * Deregister a newly - dropped index with the cache . Must be called whenever an index is <nl> - * dropped on the associated collection . <nl> - * <nl> - * Must be called under exclusive collection lock . <nl> - * / <nl> - virtual void droppedIndex ( OperationContext * const opCtx , const StringData indexName ) = 0 ; <nl> - <nl> - / * * <nl> - * Removes all cached query plans . <nl> - * / <nl> - virtual void clearQueryCache ( ) = 0 ; <nl> - <nl> - / * * <nl> - * Signal to the cache that a query operation has completed . ' indexesUsed ' should list the <nl> - * set of indexes used by the winning plan , if any . ' summaryStats . collectionScans ' and <nl> - * ' summaryStats . collectionScansNonTailable ' should be the number of collections scans and <nl> - * non - tailable collection scans that occured while executing the winning plan . <nl> - * / <nl> - virtual void notifyOfQuery ( OperationContext * const opCtx , <nl> - const PlanSummaryStats & summaryStats ) = 0 ; <nl> - <nl> - virtual void setNs ( NamespaceString ns ) = 0 ; <nl> - } ; <nl> - } / / namespace mongo <nl> mmm a / src / mongo / db / catalog / collection_mock . h <nl> ppp b / src / mongo / db / catalog / collection_mock . h <nl> class CollectionMock : public Collection { <nl> std : : abort ( ) ; <nl> } <nl> <nl> - CollectionInfoCache * infoCache ( ) { <nl> - std : : abort ( ) ; <nl> - } <nl> - const CollectionInfoCache * infoCache ( ) const { <nl> - std : : abort ( ) ; <nl> - } <nl> const IndexCatalog * getIndexCatalog ( ) const { <nl> return _indexCatalog . get ( ) ; <nl> } <nl> mmm a / src / mongo / db / catalog / index_build_block . cpp <nl> ppp b / src / mongo / db / catalog / index_build_block . cpp <nl> <nl> # include " mongo / db / index / index_descriptor . h " <nl> # include " mongo / db / logical_clock . h " <nl> # include " mongo / db / operation_context . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / storage / durable_catalog . h " <nl> # include " mongo / db / ttl_collection_cache . h " <nl> # include " mongo / util / assert_util . h " <nl> Status IndexBuildBlock : : init ( OperationContext * opCtx , Collection * collection ) { <nl> } ) ; <nl> } <nl> <nl> - / / Register this index with the CollectionInfoCache to regenerate the cache . This way , updates <nl> + / / Register this index with the CollectionQueryInfo to regenerate the cache . This way , updates <nl> / / occurring while an index is being build in the background will be aware of whether or not <nl> / / they need to modify any indexes . <nl> - collection - > infoCache ( ) - > addedIndex ( opCtx , _indexCatalogEntry - > descriptor ( ) ) ; <nl> + CollectionQueryInfo : : get ( collection ) . addedIndex ( opCtx , _indexCatalogEntry - > descriptor ( ) ) ; <nl> <nl> return Status : : OK ( ) ; <nl> } <nl> mmm a / src / mongo / db / catalog / index_catalog_entry . h <nl> ppp b / src / mongo / db / catalog / index_catalog_entry . h <nl> <nl> namespace mongo { <nl> class CollatorInterface ; <nl> class CollectionCatalogEntry ; <nl> - class CollectionInfoCache ; <nl> class IndexAccessMethod ; <nl> class IndexBuildInterceptor ; <nl> class IndexDescriptor ; <nl> mmm a / src / mongo / db / catalog / index_catalog_entry_impl . cpp <nl> ppp b / src / mongo / db / catalog / index_catalog_entry_impl . cpp <nl> <nl> <nl> # include " mongo / base / init . h " <nl> # include " mongo / db / catalog / collection . h " <nl> - # include " mongo / db / catalog / collection_info_cache_impl . h " <nl> # include " mongo / db / concurrency / d_concurrency . h " <nl> # include " mongo / db / concurrency / write_conflict_exception . h " <nl> # include " mongo / db / index / index_access_method . h " <nl> <nl> # include " mongo / db / multi_key_path_tracker . h " <nl> # include " mongo / db / operation_context . h " <nl> # include " mongo / db / query / collation / collator_factory_interface . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / service_context . h " <nl> # include " mongo / db / storage / durable_catalog . h " <nl> # include " mongo / db / transaction_participant . h " <nl> using std : : string ; <nl> <nl> IndexCatalogEntryImpl : : IndexCatalogEntryImpl ( OperationContext * const opCtx , <nl> std : : unique_ptr < IndexDescriptor > descriptor , <nl> - CollectionInfoCache * const infoCache ) <nl> + CollectionQueryInfo * const queryInfo ) <nl> : _descriptor ( std : : move ( descriptor ) ) , <nl> - _infoCache ( infoCache ) , <nl> + _queryInfo ( queryInfo ) , <nl> _ordering ( Ordering : : make ( _descriptor - > keyPattern ( ) ) ) , <nl> _isReady ( false ) , <nl> _prefix ( DurableCatalog : : get ( opCtx ) - > getIndexPrefix ( <nl> void IndexCatalogEntryImpl : : setMultikey ( OperationContext * opCtx , <nl> } <nl> } <nl> <nl> - if ( indexMetadataHasChanged & & _infoCache ) { <nl> + if ( indexMetadataHasChanged & & _queryInfo ) { <nl> LOG ( 1 ) < < ns ( ) < < " : clearing plan cache - index " < < _descriptor - > keyPattern ( ) <nl> < < " set to multi key . " ; <nl> - _infoCache - > clearQueryCache ( ) ; <nl> + _queryInfo - > clearQueryCache ( ) ; <nl> } <nl> } ; <nl> <nl> mmm a / src / mongo / db / catalog / index_catalog_entry_impl . h <nl> ppp b / src / mongo / db / catalog / index_catalog_entry_impl . h <nl> <nl> namespace mongo { <nl> <nl> class CollatorInterface ; <nl> - class CollectionInfoCache ; <nl> + class CollectionQueryInfo ; <nl> class IndexAccessMethod ; <nl> class IndexDescriptor ; <nl> class MatchExpression ; <nl> class IndexCatalogEntryImpl : public IndexCatalogEntry { <nl> IndexCatalogEntryImpl & operator = ( const IndexCatalogEntryImpl & ) = delete ; <nl> <nl> public : <nl> - explicit IndexCatalogEntryImpl ( <nl> - OperationContext * opCtx , <nl> - std : : unique_ptr < IndexDescriptor > descriptor , / / ownership passes to me <nl> - CollectionInfoCache * infoCache ) ; / / not owned , optional <nl> + IndexCatalogEntryImpl ( OperationContext * opCtx , <nl> + std : : unique_ptr < IndexDescriptor > descriptor , / / ownership passes to me <nl> + CollectionQueryInfo * queryInfo ) ; / / not owned , optional <nl> <nl> ~ IndexCatalogEntryImpl ( ) final ; <nl> <nl> class IndexCatalogEntryImpl : public IndexCatalogEntry { <nl> <nl> std : : unique_ptr < IndexDescriptor > _descriptor ; / / owned here <nl> <nl> - CollectionInfoCache * _infoCache ; / / not owned here <nl> + CollectionQueryInfo * _queryInfo ; / / not owned here <nl> <nl> std : : unique_ptr < IndexAccessMethod > _accessMethod ; <nl> <nl> mmm a / src / mongo / db / catalog / index_catalog_impl . cpp <nl> ppp b / src / mongo / db / catalog / index_catalog_impl . cpp <nl> <nl> # include " mongo / db / ops / delete . h " <nl> # include " mongo / db / query / collation / collation_spec . h " <nl> # include " mongo / db / query / collation / collator_factory_interface . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / internal_plans . h " <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / server_options . h " <nl> Status IndexCatalogImpl : : init ( OperationContext * opCtx ) { <nl> fassert ( 17340 , entry - > isReady ( opCtx ) ) ; <nl> } <nl> <nl> + CollectionQueryInfo : : get ( _collection ) . init ( opCtx ) ; <nl> + <nl> _magic = INDEX_CATALOG_INIT ; <nl> return Status : : OK ( ) ; <nl> } <nl> IndexCatalogEntry * IndexCatalogImpl : : createIndexEntry ( OperationContext * opCtx , <nl> <nl> auto * const descriptorPtr = descriptor . get ( ) ; <nl> auto entry = std : : make_shared < IndexCatalogEntryImpl > ( <nl> - opCtx , std : : move ( descriptor ) , _collection - > infoCache ( ) ) ; <nl> + opCtx , std : : move ( descriptor ) , & CollectionQueryInfo : : get ( _collection ) ) ; <nl> <nl> IndexDescriptor * desc = entry - > descriptor ( ) ; <nl> <nl> IndexCatalogEntry * IndexCatalogImpl : : createIndexEntry ( OperationContext * opCtx , <nl> } else { <nl> _buildingIndexes . remove ( descriptor ) ; <nl> } <nl> - _collection - > infoCache ( ) - > droppedIndex ( opCtx , indexName ) ; <nl> + CollectionQueryInfo : : get ( _collection ) . droppedIndex ( opCtx , indexName ) ; <nl> } ) ; <nl> } <nl> <nl> class IndexRemoveChange final : public RecoveryUnit : : Change { <nl> auto indexDescriptor = _entry - > descriptor ( ) ; <nl> _entries - > add ( std : : move ( _entry ) ) ; <nl> <nl> - / / Refresh the CollectionInfoCache ' s knowledge of what indices are present . This must be <nl> + / / Refresh the CollectionQueryInfo ' s knowledge of what indices are present . This must be <nl> / / done after re - adding our IndexCatalogEntry to the ' _entries ' list , since ' addedIndex ( ) ' <nl> / / refreshes its knowledge by iterating the list of indices currently in the catalog . <nl> - _collection - > infoCache ( ) - > addedIndex ( _opCtx , indexDescriptor ) ; <nl> + CollectionQueryInfo : : get ( _collection ) . addedIndex ( _opCtx , indexDescriptor ) ; <nl> } <nl> <nl> private : <nl> Status IndexCatalogImpl : : dropIndexEntry ( OperationContext * opCtx , IndexCatalogEnt <nl> new IndexRemoveChange ( opCtx , _collection , & _buildingIndexes , std : : move ( released ) ) ) ; <nl> } <nl> <nl> - _collection - > infoCache ( ) - > droppedIndex ( opCtx , indexName ) ; <nl> + CollectionQueryInfo : : get ( _collection ) . droppedIndex ( opCtx , indexName ) ; <nl> entry = nullptr ; <nl> deleteIndexFromDisk ( opCtx , indexName ) ; <nl> <nl> const IndexDescriptor * IndexCatalogImpl : : refreshEntry ( OperationContext * opCtx , <nl> <nl> / / Delete the IndexCatalogEntry that owns this descriptor . After deletion , ' oldDesc ' is <nl> / / invalid and should not be dereferenced . Also , invalidate the index from the <nl> - / / CollectionInfoCache . <nl> + / / CollectionQueryInfo . <nl> auto oldEntry = _readyIndexes . release ( oldDesc ) ; <nl> invariant ( oldEntry ) ; <nl> opCtx - > recoveryUnit ( ) - > registerChange ( <nl> new IndexRemoveChange ( opCtx , _collection , & _readyIndexes , std : : move ( oldEntry ) ) ) ; <nl> - _collection - > infoCache ( ) - > droppedIndex ( opCtx , indexName ) ; <nl> + CollectionQueryInfo : : get ( _collection ) . droppedIndex ( opCtx , indexName ) ; <nl> <nl> / / Ask the CollectionCatalogEntry for the new index spec . <nl> BSONObj spec = durableCatalog - > getIndexSpec ( opCtx , _collection - > ns ( ) , indexName ) . getOwned ( ) ; <nl> BSONObj keyPattern = spec . getObjectField ( " key " ) ; <nl> <nl> / / Re - register this index in the index catalog with the new spec . Also , add the new index <nl> - / / to the CollectionInfoCache . <nl> + / / to the CollectionQueryInfo . <nl> auto newDesc = <nl> std : : make_unique < IndexDescriptor > ( _collection , _getAccessMethodName ( keyPattern ) , spec ) ; <nl> const bool initFromDisk = false ; <nl> const IndexDescriptor * IndexCatalogImpl : : refreshEntry ( OperationContext * opCtx , <nl> const IndexCatalogEntry * newEntry = <nl> createIndexEntry ( opCtx , std : : move ( newDesc ) , initFromDisk , isReadyIndex ) ; <nl> invariant ( newEntry - > isReady ( opCtx ) ) ; <nl> - _collection - > infoCache ( ) - > addedIndex ( opCtx , newEntry - > descriptor ( ) ) ; <nl> + CollectionQueryInfo : : get ( _collection ) . addedIndex ( opCtx , newEntry - > descriptor ( ) ) ; <nl> <nl> / / Return the new descriptor . <nl> return newEntry - > descriptor ( ) ; <nl> mmm a / src / mongo / db / catalog / multi_index_block . cpp <nl> ppp b / src / mongo / db / catalog / multi_index_block . cpp <nl> <nl> # include " mongo / db / multi_key_path_tracker . h " <nl> # include " mongo / db / op_observer . h " <nl> # include " mongo / db / operation_context . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / repl / repl_set_config . h " <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / storage / storage_options . h " <nl> void MultiIndexBlock : : cleanUpAfterBuild ( OperationContext * opCtx , Collection * col <nl> } <nl> <nl> if ( ! _needToCleanup & & ! _indexes . empty ( ) ) { <nl> - collection - > infoCache ( ) - > clearQueryCache ( ) ; <nl> + CollectionQueryInfo : : get ( collection ) . clearQueryCache ( ) ; <nl> } <nl> <nl> / / Make lock acquisition uninterruptible . <nl> mmm a / src / mongo / db / commands / SConscript <nl> ppp b / src / mongo / db / commands / SConscript <nl> env . Library ( <nl> ' $ BUILD_DIR / mongo / base ' , <nl> ' $ BUILD_DIR / mongo / db / catalog / catalog_helpers ' , <nl> ' $ BUILD_DIR / mongo / db / catalog / collection_catalog_helper ' , <nl> + ' $ BUILD_DIR / mongo / db / catalog / collection_query_info ' , <nl> ' $ BUILD_DIR / mongo / db / catalog / collection_validation ' , <nl> ' $ BUILD_DIR / mongo / db / catalog / database_holder ' , <nl> ' $ BUILD_DIR / mongo / db / catalog / index_key_validate ' , <nl> mmm a / src / mongo / db / commands / count_cmd . cpp <nl> ppp b / src / mongo / db / commands / count_cmd . cpp <nl> <nl> # include " mongo / db / curop_failpoint_helpers . h " <nl> # include " mongo / db / db_raii . h " <nl> # include " mongo / db / exec / count . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / count_command_as_aggregation_command . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / get_executor . h " <nl> class CmdCount : public BasicCommand { <nl> PlanSummaryStats summaryStats ; <nl> Explain : : getSummaryStats ( * exec , & summaryStats ) ; <nl> if ( collection ) { <nl> - collection - > infoCache ( ) - > notifyOfQuery ( opCtx , summaryStats ) ; <nl> + CollectionQueryInfo : : get ( collection ) . notifyOfQuery ( opCtx , summaryStats ) ; <nl> } <nl> curOp - > debug ( ) . setPlanSummaryMetrics ( summaryStats ) ; <nl> <nl> mmm a / src / mongo / db / commands / distinct . cpp <nl> ppp b / src / mongo / db / commands / distinct . cpp <nl> <nl> # include " mongo / db / jsobj . h " <nl> # include " mongo / db / matcher / extensions_callback_real . h " <nl> # include " mongo / db / namespace_string . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / cursor_response . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / find_common . h " <nl> class DistinctCommand : public BasicCommand { <nl> PlanSummaryStats stats ; <nl> Explain : : getSummaryStats ( * executor . getValue ( ) , & stats ) ; <nl> if ( collection ) { <nl> - collection - > infoCache ( ) - > notifyOfQuery ( opCtx , stats ) ; <nl> + CollectionQueryInfo : : get ( collection ) . notifyOfQuery ( opCtx , stats ) ; <nl> } <nl> curOp - > debug ( ) . setPlanSummaryMetrics ( stats ) ; <nl> <nl> mmm a / src / mongo / db / commands / find_and_modify . cpp <nl> ppp b / src / mongo / db / commands / find_and_modify . cpp <nl> <nl> # include " mongo / db / ops / parsed_update . h " <nl> # include " mongo / db / ops / update_request . h " <nl> # include " mongo / db / ops / write_ops_retryability . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / find_and_modify_request . h " <nl> # include " mongo / db / query / get_executor . h " <nl> class CmdFindAndModify : public BasicCommand { <nl> PlanSummaryStats summaryStats ; <nl> Explain : : getSummaryStats ( * exec , & summaryStats ) ; <nl> if ( collection ) { <nl> - collection - > infoCache ( ) - > notifyOfQuery ( opCtx , summaryStats ) ; <nl> + CollectionQueryInfo : : get ( collection ) . notifyOfQuery ( opCtx , summaryStats ) ; <nl> } <nl> opDebug - > setPlanSummaryMetrics ( summaryStats ) ; <nl> <nl> class CmdFindAndModify : public BasicCommand { <nl> PlanSummaryStats summaryStats ; <nl> Explain : : getSummaryStats ( * exec , & summaryStats ) ; <nl> if ( collection ) { <nl> - collection - > infoCache ( ) - > notifyOfQuery ( opCtx , summaryStats ) ; <nl> + CollectionQueryInfo : : get ( collection ) . notifyOfQuery ( opCtx , summaryStats ) ; <nl> } <nl> UpdateStage : : recordUpdateStatsInOpDebug ( UpdateStage : : getUpdateStats ( exec . get ( ) ) , <nl> opDebug ) ; <nl> mmm a / src / mongo / db / commands / index_filter_commands . cpp <nl> ppp b / src / mongo / db / commands / index_filter_commands . cpp <nl> <nl> # include " mongo / db / matcher / expression_parser . h " <nl> # include " mongo / db / matcher / extensions_callback_real . h " <nl> # include " mongo / db / namespace_string . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / stdx / unordered_set . h " <nl> # include " mongo / util / log . h " <nl> <nl> static Status getQuerySettingsAndPlanCache ( OperationContext * opCtx , <nl> return Status ( ErrorCodes : : BadValue , " no such collection " ) ; <nl> } <nl> <nl> - CollectionInfoCache * infoCache = collection - > infoCache ( ) ; <nl> - invariant ( infoCache ) ; <nl> - <nl> - QuerySettings * querySettings = infoCache - > getQuerySettings ( ) ; <nl> + QuerySettings * querySettings = CollectionQueryInfo : : get ( collection ) . getQuerySettings ( ) ; <nl> invariant ( querySettings ) ; <nl> <nl> * querySettingsOut = querySettings ; <nl> <nl> - PlanCache * planCache = infoCache - > getPlanCache ( ) ; <nl> + PlanCache * planCache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> invariant ( planCache ) ; <nl> <nl> * planCacheOut = planCache ; <nl> mmm a / src / mongo / db / commands / mr . cpp <nl> ppp b / src / mongo / db / commands / mr . cpp <nl> <nl> # include " mongo / db / matcher / extensions_callback_real . h " <nl> # include " mongo / db / op_observer . h " <nl> # include " mongo / db / ops / insert . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / plan_summary_stats . h " <nl> # include " mongo / db / query / query_planner . h " <nl> bool runMapReduce ( OperationContext * opCtx , <nl> / / TODO SERVER - 23261 : Confirm whether this is the correct place to gather all <nl> / / metrics . There is no harm adding here for the time being . <nl> curOp - > debug ( ) . setPlanSummaryMetrics ( stats ) ; <nl> - scopedAutoColl - > getCollection ( ) - > infoCache ( ) - > notifyOfQuery ( opCtx , stats ) ; <nl> + CollectionQueryInfo : : get ( scopedAutoColl - > getCollection ( ) ) . notifyOfQuery ( opCtx , stats ) ; <nl> <nl> if ( curOp - > shouldDBProfile ( ) ) { <nl> BSONObjBuilder execStatsBob ; <nl> mmm a / src / mongo / db / commands / plan_cache_commands . cpp <nl> ppp b / src / mongo / db / commands / plan_cache_commands . cpp <nl> <nl> # include " mongo / db / jsobj . h " <nl> # include " mongo / db / matcher / extensions_callback_real . h " <nl> # include " mongo / db / namespace_string . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / plan_ranker . h " <nl> # include " mongo / util / hex . h " <nl> static Status getPlanCache ( OperationContext * opCtx , <nl> return Status ( ErrorCodes : : BadValue , " no such collection " ) ; <nl> } <nl> <nl> - CollectionInfoCache * infoCache = collection - > infoCache ( ) ; <nl> - invariant ( infoCache ) ; <nl> - <nl> - PlanCache * planCache = infoCache - > getPlanCache ( ) ; <nl> + PlanCache * planCache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> invariant ( planCache ) ; <nl> <nl> * planCacheOut = planCache ; <nl> mmm a / src / mongo / db / commands / run_aggregate . cpp <nl> ppp b / src / mongo / db / commands / run_aggregate . cpp <nl> <nl> # include " mongo / db / pipeline / pipeline . h " <nl> # include " mongo / db / pipeline / pipeline_d . h " <nl> # include " mongo / db / query / collation / collator_factory_interface . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / cursor_response . h " <nl> # include " mongo / db / query / find_common . h " <nl> # include " mongo / db / query / get_executor . h " <nl> Status runAggregate ( OperationContext * opCtx , <nl> / / For an optimized away pipeline , signal the cache that a query operation has completed . <nl> / / For normal pipelines this is done in DocumentSourceCursor . <nl> if ( ctx & & ctx - > getCollection ( ) ) { <nl> - ctx - > getCollection ( ) - > infoCache ( ) - > notifyOfQuery ( opCtx , stats ) ; <nl> + CollectionQueryInfo : : get ( ctx - > getCollection ( ) ) . notifyOfQuery ( opCtx , stats ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / db / exec / cached_plan . cpp <nl> ppp b / src / mongo / db / exec / cached_plan . cpp <nl> <nl> # include " mongo / db / exec / multi_plan . h " <nl> # include " mongo / db / exec / scoped_timer . h " <nl> # include " mongo / db / exec / working_set_common . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / plan_cache . h " <nl> # include " mongo / db / query / plan_ranker . h " <nl> Status CachedPlanStage : : replan ( PlanYieldPolicy * yieldPolicy , bool shouldCache ) { <nl> <nl> if ( shouldCache ) { <nl> / / Deactivate the current cache entry . <nl> - PlanCache * cache = collection ( ) - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ( ) ) . getPlanCache ( ) ; <nl> cache - > deactivate ( * _canonicalQuery ) ; <nl> } <nl> <nl> const SpecificStats * CachedPlanStage : : getSpecificStats ( ) const { <nl> void CachedPlanStage : : updatePlanCache ( ) { <nl> const double score = PlanRanker : : scoreTree ( getStats ( ) - > children [ 0 ] . get ( ) ) ; <nl> <nl> - PlanCache * cache = collection ( ) - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ( ) ) . getPlanCache ( ) ; <nl> Status fbs = cache - > feedback ( * _canonicalQuery , score ) ; <nl> if ( ! fbs . isOK ( ) ) { <nl> LOG ( 5 ) < < _canonicalQuery - > ns ( ) < < " : Failed to update cache with feedback : " < < redact ( fbs ) <nl> mmm a / src / mongo / db / exec / multi_plan . cpp <nl> ppp b / src / mongo / db / exec / multi_plan . cpp <nl> <nl> # include " mongo / db / concurrency / write_conflict_exception . h " <nl> # include " mongo / db / exec / scoped_timer . h " <nl> # include " mongo / db / exec / working_set_common . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / plan_cache . h " <nl> # include " mongo / db / query / plan_ranker . h " <nl> PlanStage : : StageState MultiPlanStage : : doWork ( WorkingSetID * out ) { <nl> / / if the best solution fails . Alternatively we could try to <nl> / / defer cache insertion to be after the first produced result . <nl> <nl> - collection ( ) - > infoCache ( ) - > getPlanCache ( ) - > remove ( * _query ) . transitional_ignore ( ) ; <nl> + CollectionQueryInfo : : get ( collection ( ) ) <nl> + . getPlanCache ( ) <nl> + - > remove ( * _query ) <nl> + . transitional_ignore ( ) ; <nl> <nl> _bestPlanIdx = _backupPlanIdx ; <nl> _backupPlanIdx = kNoSuchPlan ; <nl> Status MultiPlanStage : : pickBestPlan ( PlanYieldPolicy * yieldPolicy ) { <nl> } <nl> <nl> if ( validSolutions ) { <nl> - collection ( ) <nl> - - > infoCache ( ) <nl> - - > getPlanCache ( ) <nl> + CollectionQueryInfo : : get ( collection ( ) ) <nl> + . getPlanCache ( ) <nl> - > set ( * _query , <nl> solutions , <nl> std : : move ( ranking ) , <nl> mmm a / src / mongo / db / exec / subplan . cpp <nl> ppp b / src / mongo / db / exec / subplan . cpp <nl> <nl> # include " mongo / db / exec / multi_plan . h " <nl> # include " mongo / db / exec / scoped_timer . h " <nl> # include " mongo / db / matcher / extensions_callback_real . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / plan_executor . h " <nl> # include " mongo / db / query / planner_access . h " <nl> Status SubplanStage : : planSubqueries ( ) { <nl> <nl> / / Plan the i - th child . We might be able to find a plan for the i - th child in the plan <nl> / / cache . If there ' s no cached plan , then we generate and rank plans using the MPS . <nl> - const auto * planCache = collection ( ) - > infoCache ( ) - > getPlanCache ( ) ; <nl> + const auto * planCache = CollectionQueryInfo : : get ( collection ( ) ) . getPlanCache ( ) ; <nl> <nl> / / Populate branchResult - > cachedSolution if an active cachedSolution entry exists . <nl> if ( planCache - > shouldCacheQuery ( * branchResult - > canonicalQuery ) ) { <nl> mmm a / src / mongo / db / exec / update_stage . cpp <nl> ppp b / src / mongo / db / exec / update_stage . cpp <nl> <nl> # include " mongo / db / exec / working_set_common . h " <nl> # include " mongo / db / exec / write_stage_common . h " <nl> # include " mongo / db / op_observer . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / repl / replication_coordinator . h " <nl> # include " mongo / db / s / operation_sharding_state . h " <nl> void UpdateStage : : doRestoreStateRequiresCollection ( ) { <nl> <nl> / / The set of indices may have changed during yield . Make sure that the update driver has up to <nl> / / date index information . <nl> - const auto & updateIndexData = collection ( ) - > infoCache ( ) - > getIndexKeys ( getOpCtx ( ) ) ; <nl> + const auto & updateIndexData = CollectionQueryInfo : : get ( collection ( ) ) . getIndexKeys ( getOpCtx ( ) ) ; <nl> _params . driver - > refreshIndexKeys ( & updateIndexData ) ; <nl> } <nl> <nl> mmm a / src / mongo / db / ops / write_ops_exec . cpp <nl> ppp b / src / mongo / db / ops / write_ops_exec . cpp <nl> <nl> # include " mongo / db / ops / write_ops_exec . h " <nl> # include " mongo / db / ops / write_ops_gen . h " <nl> # include " mongo / db / ops / write_ops_retryability . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / plan_summary_stats . h " <nl> # include " mongo / db / repl / repl_client_info . h " <nl> static SingleWriteResult performSingleUpdateOp ( OperationContext * opCtx , <nl> PlanSummaryStats summary ; <nl> Explain : : getSummaryStats ( * exec , & summary ) ; <nl> if ( collection - > getCollection ( ) ) { <nl> - collection - > getCollection ( ) - > infoCache ( ) - > notifyOfQuery ( opCtx , summary ) ; <nl> + CollectionQueryInfo : : get ( collection - > getCollection ( ) ) . notifyOfQuery ( opCtx , summary ) ; <nl> } <nl> <nl> if ( curOp . shouldDBProfile ( ) ) { <nl> static SingleWriteResult performSingleDeleteOp ( OperationContext * opCtx , <nl> PlanSummaryStats summary ; <nl> Explain : : getSummaryStats ( * exec , & summary ) ; <nl> if ( collection . getCollection ( ) ) { <nl> - collection . getCollection ( ) - > infoCache ( ) - > notifyOfQuery ( opCtx , summary ) ; <nl> + CollectionQueryInfo : : get ( collection . getCollection ( ) ) . notifyOfQuery ( opCtx , summary ) ; <nl> } <nl> curOp . debug ( ) . setPlanSummaryMetrics ( summary ) ; <nl> <nl> mmm a / src / mongo / db / pipeline / document_source_cursor . cpp <nl> ppp b / src / mongo / db / pipeline / document_source_cursor . cpp <nl> <nl> # include " mongo / db / catalog / collection . h " <nl> # include " mongo / db / exec / working_set_common . h " <nl> # include " mongo / db / pipeline / document . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / find_common . h " <nl> # include " mongo / db / storage / storage_options . h " <nl> DocumentSourceCursor : : DocumentSourceCursor ( <nl> } <nl> <nl> if ( collection ) { <nl> - collection - > infoCache ( ) - > notifyOfQuery ( pExpCtx - > opCtx , _planSummaryStats ) ; <nl> + CollectionQueryInfo : : get ( collection ) . notifyOfQuery ( pExpCtx - > opCtx , _planSummaryStats ) ; <nl> } <nl> } <nl> <nl> mmm a / src / mongo / db / pipeline / process_interface_standalone . cpp <nl> ppp b / src / mongo / db / pipeline / process_interface_standalone . cpp <nl> <nl> # include " mongo / db / pipeline / document_source_cursor . h " <nl> # include " mongo / db / pipeline / lite_parsed_pipeline . h " <nl> # include " mongo / db / pipeline / pipeline_d . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / repl / speculative_majority_read_info . h " <nl> # include " mongo / db / s / collection_sharding_state . h " <nl> # include " mongo / db / s / sharding_state . h " <nl> CollectionIndexUsageMap MongoInterfaceStandalone : : getIndexStats ( OperationContext <nl> return CollectionIndexUsageMap ( ) ; <nl> } <nl> <nl> - return collection - > infoCache ( ) - > getIndexUsageStats ( ) ; <nl> + return CollectionQueryInfo : : get ( collection ) . getIndexUsageStats ( ) ; <nl> } <nl> <nl> void MongoInterfaceStandalone : : appendLatencyStats ( OperationContext * opCtx , <nl> Status MongoInterfaceStandalone : : appendQueryExecStats ( OperationContext * opCtx , <nl> str : : stream ( ) < < " Collection [ " < < nss . toString ( ) < < " ] not found . " } ; <nl> } <nl> <nl> - auto collectionScanStats = collection - > infoCache ( ) - > getCollectionScanStats ( ) ; <nl> + auto collectionScanStats = CollectionQueryInfo : : get ( collection ) . getCollectionScanStats ( ) ; <nl> <nl> dassert ( collectionScanStats . collectionScans < = <nl> static_cast < unsigned long long > ( std : : numeric_limits < long long > : : max ( ) ) ) ; <nl> std : : vector < BSONObj > MongoInterfaceStandalone : : getMatchingPlanCacheEntryStats ( <nl> uassert ( <nl> 50933 , str : : stream ( ) < < " collection ' " < < nss . toString ( ) < < " ' does not exist " , collection ) ; <nl> <nl> - const auto infoCache = collection - > infoCache ( ) ; <nl> - invariant ( infoCache ) ; <nl> - const auto planCache = infoCache - > getPlanCache ( ) ; <nl> + const auto planCache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> invariant ( planCache ) ; <nl> <nl> return planCache - > getMatchingStats ( serializer , predicate ) ; <nl> similarity index 72 % <nl> rename from src / mongo / db / catalog / collection_info_cache_impl . cpp <nl> rename to src / mongo / db / query / collection_query_info . cpp <nl> mmm a / src / mongo / db / catalog / collection_info_cache_impl . cpp <nl> ppp b / src / mongo / db / query / collection_query_info . cpp <nl> <nl> <nl> # include " mongo / platform / basic . h " <nl> <nl> - # include " mongo / db / catalog / collection_info_cache_impl . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> <nl> # include < memory > <nl> <nl> <nl> <nl> namespace mongo { <nl> <nl> - CollectionInfoCacheImpl : : CollectionInfoCacheImpl ( Collection * collection , const NamespaceString & ns ) <nl> - : _collection ( collection ) , <nl> - _ns ( ns ) , <nl> - _keysComputed ( false ) , <nl> - _planCache ( std : : make_unique < PlanCache > ( ns . ns ( ) ) ) , <nl> + namespace { <nl> + CoreIndexInfo indexInfoFromIndexCatalogEntry ( const IndexCatalogEntry & ice ) { <nl> + auto desc = ice . descriptor ( ) ; <nl> + invariant ( desc ) ; <nl> + <nl> + auto accessMethod = ice . accessMethod ( ) ; <nl> + invariant ( accessMethod ) ; <nl> + <nl> + const ProjectionExecAgg * projExec = nullptr ; <nl> + if ( desc - > getIndexType ( ) = = IndexType : : INDEX_WILDCARD ) <nl> + projExec = static_cast < const WildcardAccessMethod * > ( accessMethod ) - > getProjectionExec ( ) ; <nl> + <nl> + return { desc - > keyPattern ( ) , <nl> + desc - > getIndexType ( ) , <nl> + desc - > isSparse ( ) , <nl> + IndexEntry : : Identifier { desc - > indexName ( ) } , <nl> + ice . getFilterExpression ( ) , <nl> + ice . getCollator ( ) , <nl> + projExec } ; <nl> + } <nl> + } / / namespace <nl> + <nl> + CollectionQueryInfo : : CollectionQueryInfo ( ) <nl> + : _keysComputed ( false ) , <nl> + _planCache ( std : : make_unique < PlanCache > ( ) ) , <nl> _querySettings ( std : : make_unique < QuerySettings > ( ) ) , <nl> _indexUsageTracker ( getGlobalServiceContext ( ) - > getPreciseClockSource ( ) ) { } <nl> <nl> - const UpdateIndexData & CollectionInfoCacheImpl : : getIndexKeys ( OperationContext * opCtx ) const { <nl> + const UpdateIndexData & CollectionQueryInfo : : getIndexKeys ( OperationContext * opCtx ) const { <nl> + const Collection * coll = get . owner ( this ) ; <nl> / / This requires " some " lock , and MODE_IS is an expression for that , for now . <nl> - dassert ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( _collection - > ns ( ) , MODE_IS ) ) ; <nl> + dassert ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( coll - > ns ( ) , MODE_IS ) ) ; <nl> invariant ( _keysComputed ) ; <nl> return _indexedPaths ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : computeIndexKeys ( OperationContext * opCtx ) { <nl> + void CollectionQueryInfo : : computeIndexKeys ( OperationContext * opCtx ) { <nl> _indexedPaths . clear ( ) ; <nl> <nl> + const Collection * coll = get . owner ( this ) ; <nl> std : : unique_ptr < IndexCatalog : : IndexIterator > it = <nl> - _collection - > getIndexCatalog ( ) - > getIndexIterator ( opCtx , true ) ; <nl> + coll - > getIndexCatalog ( ) - > getIndexIterator ( opCtx , true ) ; <nl> while ( it - > more ( ) ) { <nl> const IndexCatalogEntry * entry = it - > next ( ) ; <nl> const IndexDescriptor * descriptor = entry - > descriptor ( ) ; <nl> void CollectionInfoCacheImpl : : computeIndexKeys ( OperationContext * opCtx ) { <nl> _keysComputed = true ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : notifyOfQuery ( OperationContext * opCtx , <nl> - const PlanSummaryStats & summaryStats ) { <nl> + void CollectionQueryInfo : : notifyOfQuery ( OperationContext * opCtx , <nl> + const PlanSummaryStats & summaryStats ) { <nl> _indexUsageTracker . recordCollectionScans ( summaryStats . collectionScans ) ; <nl> _indexUsageTracker . recordCollectionScansNonTailable ( summaryStats . collectionScansNonTailable ) ; <nl> <nl> const auto & indexesUsed = summaryStats . indexesUsed ; <nl> + const Collection * coll = get . owner ( this ) ; <nl> / / Record indexes used to fulfill query . <nl> for ( auto it = indexesUsed . begin ( ) ; it ! = indexesUsed . end ( ) ; + + it ) { <nl> / / This index should still exist , since the PlanExecutor would have been killed if the <nl> / / index was dropped ( and we would not get here ) . <nl> - dassert ( nullptr ! = _collection - > getIndexCatalog ( ) - > findIndexByName ( opCtx , * it ) ) ; <nl> + dassert ( nullptr ! = coll - > getIndexCatalog ( ) - > findIndexByName ( opCtx , * it ) ) ; <nl> <nl> _indexUsageTracker . recordIndexAccess ( * it ) ; <nl> } <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : clearQueryCache ( ) { <nl> - LOG ( 1 ) < < _collection - > ns ( ) < < " : clearing plan cache - collection info cache reset " ; <nl> + void CollectionQueryInfo : : clearQueryCache ( ) { <nl> + const Collection * coll = get . owner ( this ) ; <nl> + LOG ( 1 ) < < coll - > ns ( ) < < " : clearing plan cache - collection info cache reset " ; <nl> if ( nullptr ! = _planCache . get ( ) ) { <nl> _planCache - > clear ( ) ; <nl> } <nl> } <nl> <nl> - PlanCache * CollectionInfoCacheImpl : : getPlanCache ( ) const { <nl> + PlanCache * CollectionQueryInfo : : getPlanCache ( ) const { <nl> return _planCache . get ( ) ; <nl> } <nl> <nl> - QuerySettings * CollectionInfoCacheImpl : : getQuerySettings ( ) const { <nl> + QuerySettings * CollectionQueryInfo : : getQuerySettings ( ) const { <nl> return _querySettings . get ( ) ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : updatePlanCacheIndexEntries ( OperationContext * opCtx ) { <nl> + void CollectionQueryInfo : : updatePlanCacheIndexEntries ( OperationContext * opCtx ) { <nl> std : : vector < CoreIndexInfo > indexCores ; <nl> <nl> / / TODO We shouldn ' t need to include unfinished indexes , but we must here because the index <nl> / / catalog may be in an inconsistent state . SERVER - 18346 . <nl> const bool includeUnfinishedIndexes = true ; <nl> + const Collection * coll = get . owner ( this ) ; <nl> std : : unique_ptr < IndexCatalog : : IndexIterator > ii = <nl> - _collection - > getIndexCatalog ( ) - > getIndexIterator ( opCtx , includeUnfinishedIndexes ) ; <nl> + coll - > getIndexCatalog ( ) - > getIndexIterator ( opCtx , includeUnfinishedIndexes ) ; <nl> while ( ii - > more ( ) ) { <nl> const IndexCatalogEntry * ice = ii - > next ( ) ; <nl> indexCores . emplace_back ( indexInfoFromIndexCatalogEntry ( * ice ) ) ; <nl> void CollectionInfoCacheImpl : : updatePlanCacheIndexEntries ( OperationContext * opCt <nl> _planCache - > notifyOfIndexUpdates ( indexCores ) ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : init ( OperationContext * opCtx ) { <nl> + void CollectionQueryInfo : : init ( OperationContext * opCtx ) { <nl> + const Collection * coll = get . owner ( this ) ; <nl> / / Requires exclusive collection lock . <nl> - invariant ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( _collection - > ns ( ) , MODE_X ) ) ; <nl> + invariant ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( coll - > ns ( ) , MODE_X ) ) ; <nl> <nl> const bool includeUnfinishedIndexes = false ; <nl> std : : unique_ptr < IndexCatalog : : IndexIterator > ii = <nl> - _collection - > getIndexCatalog ( ) - > getIndexIterator ( opCtx , includeUnfinishedIndexes ) ; <nl> + coll - > getIndexCatalog ( ) - > getIndexIterator ( opCtx , includeUnfinishedIndexes ) ; <nl> while ( ii - > more ( ) ) { <nl> const IndexDescriptor * desc = ii - > next ( ) - > descriptor ( ) ; <nl> _indexUsageTracker . registerIndex ( desc - > indexName ( ) , desc - > keyPattern ( ) ) ; <nl> void CollectionInfoCacheImpl : : init ( OperationContext * opCtx ) { <nl> rebuildIndexData ( opCtx ) ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : addedIndex ( OperationContext * opCtx , const IndexDescriptor * desc ) { <nl> + void CollectionQueryInfo : : addedIndex ( OperationContext * opCtx , const IndexDescriptor * desc ) { <nl> + const Collection * coll = get . owner ( this ) ; <nl> / / Requires exclusive collection lock . <nl> - invariant ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( _collection - > ns ( ) , MODE_X ) ) ; <nl> + invariant ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( coll - > ns ( ) , MODE_X ) ) ; <nl> invariant ( desc ) ; <nl> <nl> rebuildIndexData ( opCtx ) ; <nl> void CollectionInfoCacheImpl : : addedIndex ( OperationContext * opCtx , const IndexDes <nl> _indexUsageTracker . registerIndex ( desc - > indexName ( ) , desc - > keyPattern ( ) ) ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : droppedIndex ( OperationContext * opCtx , StringData indexName ) { <nl> + void CollectionQueryInfo : : droppedIndex ( OperationContext * opCtx , StringData indexName ) { <nl> + const Collection * coll = get . owner ( this ) ; <nl> / / Requires exclusive collection lock . <nl> - invariant ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( _collection - > ns ( ) , MODE_X ) ) ; <nl> + invariant ( opCtx - > lockState ( ) - > isCollectionLockedForMode ( coll - > ns ( ) , MODE_X ) ) ; <nl> <nl> rebuildIndexData ( opCtx ) ; <nl> _indexUsageTracker . unregisterIndex ( indexName ) ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : rebuildIndexData ( OperationContext * opCtx ) { <nl> + void CollectionQueryInfo : : rebuildIndexData ( OperationContext * opCtx ) { <nl> clearQueryCache ( ) ; <nl> <nl> _keysComputed = false ; <nl> void CollectionInfoCacheImpl : : rebuildIndexData ( OperationContext * opCtx ) { <nl> updatePlanCacheIndexEntries ( opCtx ) ; <nl> } <nl> <nl> - CollectionIndexUsageMap CollectionInfoCacheImpl : : getIndexUsageStats ( ) const { <nl> + CollectionIndexUsageMap CollectionQueryInfo : : getIndexUsageStats ( ) const { <nl> return _indexUsageTracker . getUsageStats ( ) ; <nl> } <nl> <nl> - void CollectionInfoCacheImpl : : setNs ( NamespaceString ns ) { <nl> - auto oldNs = _ns ; <nl> - _ns = std : : move ( ns ) ; <nl> - <nl> - _planCache - > setNs ( _ns ) ; <nl> - } <nl> - <nl> - CollectionIndexUsageTracker : : CollectionScanStats CollectionInfoCacheImpl : : getCollectionScanStats ( ) <nl> + CollectionIndexUsageTracker : : CollectionScanStats CollectionQueryInfo : : getCollectionScanStats ( ) <nl> const { <nl> return _indexUsageTracker . getCollectionScanStats ( ) ; <nl> } <nl> similarity index 90 % <nl> rename from src / mongo / db / catalog / collection_info_cache_impl . h <nl> rename to src / mongo / db / query / collection_query_info . h <nl> mmm a / src / mongo / db / catalog / collection_info_cache_impl . h <nl> ppp b / src / mongo / db / query / collection_query_info . h <nl> <nl> / * * <nl> - * Copyright ( C ) 2018 - present MongoDB , Inc . <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> * <nl> * This program is free software : you can redistribute it and / or modify <nl> * it under the terms of the Server Side Public License , version 1 , <nl> <nl> <nl> # pragma once <nl> <nl> - # include " mongo / db / catalog / collection_info_cache . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> <nl> + # include " mongo / db / catalog / collection . h " <nl> # include " mongo / db / collection_index_usage_tracker . h " <nl> # include " mongo / db / query / plan_cache . h " <nl> + # include " mongo / db / query / plan_summary_stats . h " <nl> # include " mongo / db / query / query_settings . h " <nl> # include " mongo / db / update_index_data . h " <nl> <nl> namespace mongo { <nl> <nl> - class Collection ; <nl> class IndexDescriptor ; <nl> class OperationContext ; <nl> <nl> class OperationContext ; <nl> * this is for storing things that you want to cache about a single collection <nl> * life cycle is managed for you from inside Collection <nl> * / <nl> - class CollectionInfoCacheImpl : public CollectionInfoCache { <nl> + class CollectionQueryInfo { <nl> public : <nl> - explicit CollectionInfoCacheImpl ( Collection * collection , const NamespaceString & ns ) ; <nl> + CollectionQueryInfo ( ) ; <nl> + <nl> + inline static const auto get = Collection : : declareDecoration < CollectionQueryInfo > ( ) ; <nl> <nl> / * * <nl> * Get the PlanCache for this collection . <nl> class CollectionInfoCacheImpl : public CollectionInfoCache { <nl> * / <nl> CollectionIndexUsageMap getIndexUsageStats ( ) const ; <nl> <nl> - CollectionIndexUsageTracker : : CollectionScanStats getCollectionScanStats ( ) const override ; <nl> + CollectionIndexUsageTracker : : CollectionScanStats getCollectionScanStats ( ) const ; <nl> <nl> / * * <nl> - * Builds internal cache state based on the current state of the Collection ' s IndexCatalog <nl> + * Builds internal cache state based on the current state of the Collection ' s IndexCatalog . <nl> * / <nl> void init ( OperationContext * opCtx ) ; <nl> <nl> class CollectionInfoCacheImpl : public CollectionInfoCache { <nl> <nl> void notifyOfQuery ( OperationContext * opCtx , const PlanSummaryStats & summaryStats ) ; <nl> <nl> - void setNs ( NamespaceString ns ) override ; <nl> - <nl> private : <nl> void computeIndexKeys ( OperationContext * opCtx ) ; <nl> void updatePlanCacheIndexEntries ( OperationContext * opCtx ) ; <nl> class CollectionInfoCacheImpl : public CollectionInfoCache { <nl> * / <nl> void rebuildIndexData ( OperationContext * opCtx ) ; <nl> <nl> - Collection * _collection ; / / not owned <nl> - <nl> - NamespaceString _ns ; <nl> - <nl> / / mmm index keys cache <nl> bool _keysComputed ; <nl> UpdateIndexData _indexedPaths ; <nl> mmm a / src / mongo / db / query / explain . cpp <nl> ppp b / src / mongo / db / query / explain . cpp <nl> <nl> # include " mongo / db / exec / working_set_common . h " <nl> # include " mongo / db / keypattern . h " <nl> # include " mongo / db / query / canonical_query_encoder . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / plan_executor . h " <nl> # include " mongo / db / query / plan_summary_stats . h " <nl> void Explain : : generatePlannerInfo ( PlanExecutor * exec , <nl> boost : : optional < uint32_t > queryHash ; <nl> boost : : optional < uint32_t > planCacheKeyHash ; <nl> if ( collection & & exec - > getCanonicalQuery ( ) ) { <nl> - const CollectionInfoCache * infoCache = collection - > infoCache ( ) ; <nl> - const QuerySettings * querySettings = infoCache - > getQuerySettings ( ) ; <nl> - PlanCacheKey planCacheKey = <nl> - infoCache - > getPlanCache ( ) - > computeKey ( * exec - > getCanonicalQuery ( ) ) ; <nl> + const QuerySettings * querySettings = <nl> + CollectionQueryInfo : : get ( collection ) . getQuerySettings ( ) ; <nl> + PlanCacheKey planCacheKey = CollectionQueryInfo : : get ( collection ) <nl> + . getPlanCache ( ) <nl> + - > computeKey ( * exec - > getCanonicalQuery ( ) ) ; <nl> planCacheKeyHash = canonical_query_encoder : : computeHash ( planCacheKey . toString ( ) ) ; <nl> queryHash = canonical_query_encoder : : computeHash ( planCacheKey . getStableKeyStringData ( ) ) ; <nl> <nl> mmm a / src / mongo / db / query / find . cpp <nl> ppp b / src / mongo / db / query / find . cpp <nl> <nl> # include " mongo / db / exec / working_set_common . h " <nl> # include " mongo / db / keypattern . h " <nl> # include " mongo / db / matcher / extensions_callback_real . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / find_common . h " <nl> # include " mongo / db / query / get_executor . h " <nl> void endQueryOp ( OperationContext * opCtx , <nl> curOp - > debug ( ) . setPlanSummaryMetrics ( summaryStats ) ; <nl> <nl> if ( collection ) { <nl> - collection - > infoCache ( ) - > notifyOfQuery ( opCtx , summaryStats ) ; <nl> + CollectionQueryInfo : : get ( collection ) . notifyOfQuery ( opCtx , summaryStats ) ; <nl> } <nl> <nl> if ( curOp - > shouldDBProfile ( ) ) { <nl> mmm a / src / mongo / db / query / find . h <nl> ppp b / src / mongo / db / query / find . h <nl> void beginQueryOp ( OperationContext * opCtx , <nl> <nl> / * * <nl> * 1 ) Fills out CurOp for " opCtx " with information regarding this query ' s execution . <nl> - * 2 ) Reports index usage to the CollectionInfoCache . <nl> + * 2 ) Reports index usage to the CollectionQueryInfo . <nl> * <nl> * Uses explain functionality to extract stats from ' exec ' . <nl> * / <nl> mmm a / src / mongo / db / query / get_executor . cpp <nl> ppp b / src / mongo / db / query / get_executor . cpp <nl> <nl> # include " mongo / db / query / canonical_query . h " <nl> # include " mongo / db / query / canonical_query_encoder . h " <nl> # include " mongo / db / query / collation / collator_factory_interface . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / explain . h " <nl> # include " mongo / db / query / index_bounds_builder . h " <nl> # include " mongo / db / query / internal_plans . h " <nl> IndexEntry indexEntryFromIndexCatalogEntry ( OperationContext * opCtx , <nl> projExec } ; <nl> } <nl> <nl> - CoreIndexInfo indexInfoFromIndexCatalogEntry ( const IndexCatalogEntry & ice ) { <nl> - auto desc = ice . descriptor ( ) ; <nl> - invariant ( desc ) ; <nl> - <nl> - auto accessMethod = ice . accessMethod ( ) ; <nl> - invariant ( accessMethod ) ; <nl> - <nl> - const ProjectionExecAgg * projExec = nullptr ; <nl> - if ( desc - > getIndexType ( ) = = IndexType : : INDEX_WILDCARD ) <nl> - projExec = static_cast < const WildcardAccessMethod * > ( accessMethod ) - > getProjectionExec ( ) ; <nl> - <nl> - return { desc - > keyPattern ( ) , <nl> - desc - > getIndexType ( ) , <nl> - desc - > isSparse ( ) , <nl> - IndexEntry : : Identifier { desc - > indexName ( ) } , <nl> - ice . getFilterExpression ( ) , <nl> - ice . getCollator ( ) , <nl> - projExec } ; <nl> - } <nl> - <nl> / * * <nl> * If query supports index filters , filter params . indices according to any index filters that have <nl> * been configured . In addition , sets that there were indeed index filters applied . <nl> void applyIndexFilters ( Collection * collection , <nl> const CanonicalQuery & canonicalQuery , <nl> QueryPlannerParams * plannerParams ) { <nl> if ( ! IDHackStage : : supportsQuery ( collection , canonicalQuery ) ) { <nl> - QuerySettings * querySettings = collection - > infoCache ( ) - > getQuerySettings ( ) ; <nl> + QuerySettings * querySettings = CollectionQueryInfo : : get ( collection ) . getQuerySettings ( ) ; <nl> const auto key = canonicalQuery . encodeKey ( ) ; <nl> <nl> / / Filter index catalog if index filters are specified for query . <nl> StatusWith < PrepareExecutionResult > prepareExecution ( OperationContext * opCtx , <nl> } <nl> <nl> / / Check that the query should be cached . <nl> - if ( collection - > infoCache ( ) - > getPlanCache ( ) - > shouldCacheQuery ( * canonicalQuery ) ) { <nl> + if ( CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) - > shouldCacheQuery ( * canonicalQuery ) ) { <nl> / / Fill in opDebug information . <nl> const auto planCacheKey = <nl> - collection - > infoCache ( ) - > getPlanCache ( ) - > computeKey ( * canonicalQuery ) ; <nl> + CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) - > computeKey ( * canonicalQuery ) ; <nl> CurOp : : get ( opCtx ) - > debug ( ) . queryHash = <nl> canonical_query_encoder : : computeHash ( planCacheKey . getStableKeyStringData ( ) ) ; <nl> CurOp : : get ( opCtx ) - > debug ( ) . planCacheKey = <nl> canonical_query_encoder : : computeHash ( planCacheKey . toString ( ) ) ; <nl> <nl> / / Try to look up a cached solution for the query . <nl> - if ( auto cs = <nl> - collection - > infoCache ( ) - > getPlanCache ( ) - > getCacheEntryIfActive ( planCacheKey ) ) { <nl> + if ( auto cs = CollectionQueryInfo : : get ( collection ) <nl> + . getPlanCache ( ) <nl> + - > getCacheEntryIfActive ( planCacheKey ) ) { <nl> / / We have a CachedSolution . Have the planner turn it into a QuerySolution . <nl> auto statusWithQs = QueryPlanner : : planFromCache ( * canonicalQuery , plannerParams , * cs ) ; <nl> <nl> StatusWith < unique_ptr < PlanExecutor , PlanExecutor : : Deleter > > getExecutorUpdate ( <nl> <nl> / / Pass index information to the update driver , so that it can determine for us whether the <nl> / / update affects indices . <nl> - const auto & updateIndexData = collection - > infoCache ( ) - > getIndexKeys ( opCtx ) ; <nl> + const auto & updateIndexData = CollectionQueryInfo : : get ( collection ) . getIndexKeys ( opCtx ) ; <nl> driver - > refreshIndexKeys ( & updateIndexData ) ; <nl> <nl> if ( ! parsedUpdate - > hasParsedQuery ( ) ) { <nl> mmm a / src / mongo / db / query / get_executor . h <nl> ppp b / src / mongo / db / query / get_executor . h <nl> IndexEntry indexEntryFromIndexCatalogEntry ( OperationContext * opCtx , <nl> const IndexCatalogEntry & ice , <nl> const CanonicalQuery * canonicalQuery = nullptr ) ; <nl> <nl> - / * * <nl> - * Converts the catalog metadata for an index into a CoreIndexInfo , which is a format that is meant <nl> - * to be used to update the plan cache . This function has no side effects and is safe to call in <nl> - * all contexts . <nl> - * / <nl> - CoreIndexInfo indexInfoFromIndexCatalogEntry ( const IndexCatalogEntry & ice ) ; <nl> - <nl> / * * <nl> * Determines whether or not to wait for oplog visibility for a query . This is only used for <nl> * collection scans on the oplog . <nl> mmm a / src / mongo / db / query / plan_cache . cpp <nl> ppp b / src / mongo / db / query / plan_cache . cpp <nl> PlanCache : : PlanCache ( ) : PlanCache ( internalQueryCacheSize . load ( ) ) { } <nl> <nl> PlanCache : : PlanCache ( size_t size ) : _cache ( size ) { } <nl> <nl> - PlanCache : : PlanCache ( const std : : string & ns ) : _cache ( internalQueryCacheSize . load ( ) ) , _ns ( ns ) { } <nl> - <nl> PlanCache : : ~ PlanCache ( ) { } <nl> <nl> std : : unique_ptr < CachedSolution > PlanCache : : getCacheEntryIfActive ( const PlanCacheKey & key ) const { <nl> Status PlanCache : : set ( const CanonicalQuery & query , <nl> std : : unique_ptr < PlanCacheEntry > evictedEntry = _cache . add ( key , newEntry . release ( ) ) ; <nl> <nl> if ( nullptr ! = evictedEntry . get ( ) ) { <nl> - LOG ( 1 ) < < _ns < < " : plan cache maximum size exceeded - " <nl> + LOG ( 1 ) < < query . nss ( ) < < " : plan cache maximum size exceeded - " <nl> < < " removed least recently used entry " < < redact ( evictedEntry - > toString ( ) ) ; <nl> } <nl> <nl> mmm a / src / mongo / db / query / plan_cache . h <nl> ppp b / src / mongo / db / query / plan_cache . h <nl> class PlanCache { <nl> <nl> PlanCache ( size_t size ) ; <nl> <nl> - PlanCache ( const std : : string & ns ) ; <nl> - <nl> ~ PlanCache ( ) ; <nl> <nl> / * * <nl> class PlanCache { <nl> const std : : function < BSONObj ( const PlanCacheEntry & ) > & serializationFunc , <nl> const std : : function < bool ( const BSONObj & ) > & filterFunc ) const ; <nl> <nl> - void setNs ( NamespaceString ns ) { <nl> - _ns = ns . toString ( ) ; <nl> - } <nl> - <nl> private : <nl> struct NewEntryState { <nl> bool shouldBeCreated = false ; <nl> class PlanCache { <nl> / / Protects _cache . <nl> mutable stdx : : mutex _cacheMutex ; <nl> <nl> - / / Full namespace of collection . <nl> - std : : string _ns ; <nl> - <nl> / / Holds computed information about the collection ' s indexes . Used for generating plan <nl> / / cache keys . <nl> / / <nl> mmm a / src / mongo / dbtests / plan_ranking . cpp <nl> ppp b / src / mongo / dbtests / plan_ranking . cpp <nl> <nl> # include " mongo / db / index / index_descriptor . h " <nl> # include " mongo / db / json . h " <nl> # include " mongo / db / namespace_string . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / query_knobs_gen . h " <nl> # include " mongo / db / query / query_planner . h " <nl> class PlanRankingPreferNonFailed : public PlanRankingTestBase { <nl> Collection * collection = ctx . getCollection ( ) ; <nl> <nl> StatusWith < std : : unique_ptr < PlanCacheEntry > > planCacheEntryWithStatus = <nl> - collection - > infoCache ( ) - > getPlanCache ( ) - > getEntry ( * ( cq . get ( ) ) ) ; <nl> + CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) - > getEntry ( * ( cq . get ( ) ) ) ; <nl> ASSERT_OK ( planCacheEntryWithStatus . getStatus ( ) ) ; <nl> <nl> / / We assert that there was only one plan scored , implying that there was only one <nl> mmm a / src / mongo / dbtests / query_stage_cached_plan . cpp <nl> ppp b / src / mongo / dbtests / query_stage_cached_plan . cpp <nl> <nl> # include " mongo / db / json . h " <nl> # include " mongo / db / namespace_string . h " <nl> # include " mongo / db / query / canonical_query . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / plan_cache . h " <nl> # include " mongo / db / query / plan_yield_policy . h " <nl> TEST_F ( QueryStageCachedPlan , QueryStageCachedPlanFailure ) { <nl> const std : : unique_ptr < CanonicalQuery > cq = std : : move ( statusWithCQ . getValue ( ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> ASSERT_EQ ( cache - > get ( * cq ) . state , PlanCache : : CacheEntryState : : kNotPresent ) ; <nl> <nl> TEST_F ( QueryStageCachedPlan , QueryStageCachedPlanHitMaxWorks ) { <nl> const std : : unique_ptr < CanonicalQuery > cq = std : : move ( statusWithCQ . getValue ( ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> ASSERT_EQ ( cache - > get ( * cq ) . state , PlanCache : : CacheEntryState : : kNotPresent ) ; <nl> <nl> TEST_F ( QueryStageCachedPlan , QueryStageCachedPlanAddsActiveCacheEntries ) { <nl> canonicalQueryFromFilterObj ( opCtx ( ) , nss , fromjson ( " { a : { $ gte : 11 } , b : { $ gte : 11 } } " ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> ASSERT_EQ ( cache - > get ( * shapeCq ) . state , PlanCache : : CacheEntryState : : kNotPresent ) ; <nl> <nl> TEST_F ( QueryStageCachedPlan , DeactivatesEntriesOnReplan ) { <nl> canonicalQueryFromFilterObj ( opCtx ( ) , nss , fromjson ( " { a : { $ gte : 11 } , b : { $ gte : 11 } } " ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> ASSERT_EQ ( cache - > get ( * shapeCq ) . state , PlanCache : : CacheEntryState : : kNotPresent ) ; <nl> <nl> TEST_F ( QueryStageCachedPlan , EntriesAreNotDeactivatedWhenInactiveEntriesDisabled <nl> canonicalQueryFromFilterObj ( opCtx ( ) , nss , fromjson ( " { a : { $ gte : 11 } , b : { $ gte : 11 } } " ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> ASSERT_EQ ( cache - > get ( * shapeCq ) . state , PlanCache : : CacheEntryState : : kNotPresent ) ; <nl> <nl> TEST_F ( QueryStageCachedPlan , ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe <nl> const std : : unique_ptr < CanonicalQuery > cq = std : : move ( statusWithCQ . getValue ( ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> <nl> / / Get planner params . <nl> TEST_F ( QueryStageCachedPlan , DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl <nl> const std : : unique_ptr < CanonicalQuery > cq = std : : move ( statusWithCQ . getValue ( ) ) ; <nl> <nl> / / We shouldn ' t have anything in the plan cache for this shape yet . <nl> - PlanCache * cache = collection - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( collection ) . getPlanCache ( ) ; <nl> ASSERT ( cache ) ; <nl> <nl> / / Get planner params . <nl> mmm a / src / mongo / dbtests / query_stage_multiplan . cpp <nl> ppp b / src / mongo / dbtests / query_stage_multiplan . cpp <nl> <nl> # include " mongo / db / json . h " <nl> # include " mongo / db / matcher / expression_parser . h " <nl> # include " mongo / db / namespace_string . h " <nl> + # include " mongo / db / query / collection_query_info . h " <nl> # include " mongo / db / query / get_executor . h " <nl> # include " mongo / db / query / mock_yield_policies . h " <nl> # include " mongo / db / query / plan_executor . h " <nl> TEST_F ( QueryStageMultiPlanTest , MPSDoesNotCreateActiveCacheEntryImmediately ) { <nl> auto mps = runMultiPlanner ( _opCtx . get ( ) , nss , coll , 7 ) ; <nl> <nl> / / Be sure that an inactive cache entry was added . <nl> - PlanCache * cache = coll - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( coll ) . getPlanCache ( ) ; <nl> ASSERT_EQ ( cache - > size ( ) , 1U ) ; <nl> auto entry = assertGet ( cache - > getEntry ( * cq ) ) ; <nl> ASSERT_FALSE ( entry - > isActive ) ; <nl> TEST_F ( QueryStageMultiPlanTest , MPSDoesCreatesActiveEntryWhenInactiveEntriesDisa <nl> auto mps = runMultiPlanner ( _opCtx . get ( ) , nss , coll , 7 ) ; <nl> <nl> / / Be sure that an _active_ cache entry was added . <nl> - PlanCache * cache = coll - > infoCache ( ) - > getPlanCache ( ) ; <nl> + PlanCache * cache = CollectionQueryInfo : : get ( coll ) . getPlanCache ( ) ; <nl> ASSERT_EQ ( cache - > get ( * cq ) . state , PlanCache : : CacheEntryState : : kPresentActive ) ; <nl> <nl> / / Run the multi - planner again . The entry should still be active . <nl>
|
SERVER - 40714 Change CollectionInfoCache to be a decoration ( rather than a member ) on Collection object
|
mongodb/mongo
|
455b66c00bc5f885621477b63a7bfb2997ef59ae
|
2019-08-07T18:29:56Z
|
mmm a / include / swift / AST / DiagnosticsFrontend . def <nl> ppp b / include / swift / AST / DiagnosticsFrontend . def <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - # if ! ( defined ( DIAG ) | | ( defined ( ERROR ) & & defined ( WARNING ) & & defined ( NOTE ) ) ) <nl> - # error Must define either DIAG or the set { ERROR , WARNING , NOTE } <nl> + # if ! ( defined ( DIAG ) | | ( defined ( ERROR ) & & defined ( WARNING ) & & defined ( NOTE ) & & \ <nl> + defined ( REMARK ) ) ) <nl> + # error Must define either DIAG or the set { ERROR , WARNING , NOTE , REMARK } <nl> # endif <nl> <nl> # ifndef ERROR <nl> <nl> DIAG ( NOTE , ID , Options , Text , Signature ) <nl> # endif <nl> <nl> + # ifndef REMARK <nl> + # define REMARK ( ID , Options , Text , Signature ) \ <nl> + DIAG ( REMARK , ID , Options , Text , Signature ) <nl> + # endif <nl> + <nl> WARNING ( warning_no_such_sdk , none , <nl> " no such SDK : ' % 0 ' " , ( StringRef ) ) <nl> <nl> ERROR ( error_extracting_flags_from_module_interface , none , <nl> ERROR ( missing_dependency_of_module_interface , none , <nl> " missing dependency ' % 0 ' of module interface ' % 1 ' : % 2 " , <nl> ( StringRef , StringRef , StringRef ) ) <nl> + REMARK ( rebuilding_module_from_interface , none , <nl> + " rebuilding module ' % 0 ' from interface ' % 1 ' " , ( StringRef , StringRef ) ) <nl> + NOTE ( out_of_date_module_here , none , <nl> + " % select { compiled | cached | forwarding | prebuilt } 0 module is out of date : ' % 1 ' " , <nl> + ( unsigned , StringRef ) ) <nl> + NOTE ( module_interface_dependency_out_of_date , none , <nl> + " dependency is out of date : ' % 0 ' " , <nl> + ( StringRef ) ) <nl> + NOTE ( compiled_module_invalid , none , <nl> + " unable to load compiled module ' % 0 ' " , <nl> + ( StringRef ) ) <nl> + NOTE ( compiled_module_invalid_reason , none , <nl> + " unable to load compiled module ' % 0 ' : % 1 " , <nl> + ( StringRef , StringRef ) ) <nl> ERROR ( error_extracting_dependencies_from_cached_module , none , <nl> " error extracting dependencies from cached module ' % 0 ' " , <nl> ( StringRef ) ) <nl> ERROR ( unknown_forced_module_loading_mode , none , <nl> # if defined ( DIAG ) <nl> # undef DIAG <nl> # endif <nl> + # undef REMARK <nl> # undef NOTE <nl> # undef WARNING <nl> # undef ERROR <nl> mmm a / include / swift / Frontend / FrontendOptions . h <nl> ppp b / include / swift / Frontend / FrontendOptions . h <nl> class FrontendOptions { <nl> / / / times ) when compiling a module interface ? <nl> bool SerializeModuleInterfaceDependencyHashes = false ; <nl> <nl> + / / / Should we warn if an imported module needed to be rebuilt from a <nl> + / / / module interface file ? <nl> + bool RemarkOnRebuildFromModuleInterface = false ; <nl> + <nl> / / / The different modes for validating TBD against the LLVM IR . <nl> enum class TBDValidationMode { <nl> Default , / / / < Do the default validation for the current platform . <nl> mmm a / include / swift / Frontend / ParseableInterfaceModuleLoader . h <nl> ppp b / include / swift / Frontend / ParseableInterfaceModuleLoader . h <nl> namespace swift { <nl> / / / directory , and loading the serialized . swiftmodules from there . <nl> class ParseableInterfaceModuleLoader : public SerializedModuleLoaderBase { <nl> friend class unittest : : ParseableInterfaceModuleLoaderTest ; <nl> - explicit ParseableInterfaceModuleLoader ( ASTContext & ctx , StringRef cacheDir , <nl> - StringRef prebuiltCacheDir , <nl> - DependencyTracker * tracker , <nl> - ModuleLoadingMode loadMode ) <nl> + explicit ParseableInterfaceModuleLoader ( <nl> + ASTContext & ctx , StringRef cacheDir , StringRef prebuiltCacheDir , <nl> + DependencyTracker * tracker , ModuleLoadingMode loadMode , <nl> + bool RemarkOnRebuildFromInterface ) <nl> : SerializedModuleLoaderBase ( ctx , tracker , loadMode ) , <nl> - CacheDir ( cacheDir ) , PrebuiltCacheDir ( prebuiltCacheDir ) <nl> + CacheDir ( cacheDir ) , PrebuiltCacheDir ( prebuiltCacheDir ) , <nl> + RemarkOnRebuildFromInterface ( RemarkOnRebuildFromInterface ) <nl> { } <nl> <nl> std : : string CacheDir ; <nl> std : : string PrebuiltCacheDir ; <nl> + bool RemarkOnRebuildFromInterface ; <nl> <nl> std : : error_code findModuleFilesInDirectory ( <nl> AccessPathElem ModuleID , StringRef DirPath , StringRef ModuleFilename , <nl> class ParseableInterfaceModuleLoader : public SerializedModuleLoaderBase { <nl> public : <nl> static std : : unique_ptr < ParseableInterfaceModuleLoader > <nl> create ( ASTContext & ctx , StringRef cacheDir , StringRef prebuiltCacheDir , <nl> - DependencyTracker * tracker , ModuleLoadingMode loadMode ) { <nl> + DependencyTracker * tracker , ModuleLoadingMode loadMode , <nl> + bool RemarkOnRebuildFromInterface = false ) { <nl> return std : : unique_ptr < ParseableInterfaceModuleLoader > ( <nl> new ParseableInterfaceModuleLoader ( ctx , cacheDir , prebuiltCacheDir , <nl> - tracker , loadMode ) ) ; <nl> + tracker , loadMode , <nl> + RemarkOnRebuildFromInterface ) ) ; <nl> } <nl> <nl> / / / Unconditionally build \ p InPath ( a swiftinterface file ) to \ p OutPath ( as <nl> mmm a / include / swift / Option / FrontendOptions . td <nl> ppp b / include / swift / Option / FrontendOptions . td <nl> def warn_long_expression_type_checking : Separate < [ " - " ] , " warn - long - expression - t <nl> def warn_long_expression_type_checking_EQ : Joined < [ " - " ] , " warn - long - expression - type - checking = " > , <nl> Alias < warn_long_expression_type_checking > ; <nl> <nl> + def Rmodule_interface_rebuild : Flag < [ " - " ] , " Rmodule - interface - rebuild " > , <nl> + HelpText < " Emits a remark if an imported module needs to be re - compiled from its module interface " > ; <nl> + <nl> def solver_expression_time_threshold_EQ : Joined < [ " - " ] , " solver - expression - time - threshold = " > ; <nl> <nl> def solver_disable_shrink : <nl> mmm a / lib / Frontend / ArgsToFrontendOptionsConverter . cpp <nl> ppp b / lib / Frontend / ArgsToFrontendOptionsConverter . cpp <nl> bool ArgsToFrontendOptionsConverter : : convert ( <nl> Opts . SerializeModuleInterfaceDependencyHashes | = <nl> Args . hasArg ( OPT_serialize_module_interface_dependency_hashes ) ; <nl> <nl> + Opts . RemarkOnRebuildFromModuleInterface | = <nl> + Args . hasArg ( OPT_Rmodule_interface_rebuild ) ; <nl> + <nl> computePrintStatsOptions ( ) ; <nl> computeDebugTimeOptions ( ) ; <nl> computeTBDOptions ( ) ; <nl> mmm a / lib / Frontend / DiagnosticVerifier . cpp <nl> ppp b / lib / Frontend / DiagnosticVerifier . cpp <nl> bool DiagnosticVerifier : : verifyFile ( unsigned BufferID , <nl> } else if ( MatchStart . startswith ( " expected - error " ) ) { <nl> ExpectedClassification = llvm : : SourceMgr : : DK_Error ; <nl> MatchStart = MatchStart . substr ( strlen ( " expected - error " ) ) ; <nl> + } else if ( MatchStart . startswith ( " expected - remark " ) ) { <nl> + ExpectedClassification = llvm : : SourceMgr : : DK_Remark ; <nl> + MatchStart = MatchStart . substr ( strlen ( " expected - remark " ) ) ; <nl> } else <nl> continue ; <nl> <nl> mmm a / lib / Frontend / Frontend . cpp <nl> ppp b / lib / Frontend / Frontend . cpp <nl> bool CompilerInstance : : setUpModuleLoaders ( ) { <nl> if ( MLM ! = ModuleLoadingMode : : OnlySerialized ) { <nl> auto const & Clang = clangImporter - > getClangInstance ( ) ; <nl> std : : string ModuleCachePath = getModuleCachePathFromClang ( Clang ) ; <nl> - StringRef PrebuiltModuleCachePath = <nl> - Invocation . getFrontendOptions ( ) . PrebuiltModuleCachePath ; <nl> - auto PIML = ParseableInterfaceModuleLoader : : create ( * Context , <nl> - ModuleCachePath , <nl> - PrebuiltModuleCachePath , <nl> - getDependencyTracker ( ) , <nl> - MLM ) ; <nl> + auto & FEOpts = Invocation . getFrontendOptions ( ) ; <nl> + StringRef PrebuiltModuleCachePath = FEOpts . PrebuiltModuleCachePath ; <nl> + auto PIML = ParseableInterfaceModuleLoader : : create ( <nl> + * Context , ModuleCachePath , PrebuiltModuleCachePath , <nl> + getDependencyTracker ( ) , MLM , FEOpts . RemarkOnRebuildFromModuleInterface ) ; <nl> Context - > addModuleLoader ( std : : move ( PIML ) ) ; <nl> } <nl> Context - > addModuleLoader ( std : : move ( SML ) ) ; <nl> mmm a / lib / Frontend / ParseableInterfaceModuleLoader . cpp <nl> ppp b / lib / Frontend / ParseableInterfaceModuleLoader . cpp <nl> class swift : : ParseableInterfaceBuilder { <nl> <nl> namespace { <nl> <nl> + / / / Keeps track of the various reasons the module interface loader needed to <nl> + / / / fall back and rebuild a module from its interface . <nl> + struct ModuleRebuildInfo { <nl> + enum class ModuleKind { <nl> + Normal , <nl> + Cached , <nl> + Forwarding , <nl> + Prebuilt <nl> + } ; <nl> + struct OutOfDateModule { <nl> + std : : string path ; <nl> + Optional < serialization : : Status > serializationStatus ; <nl> + ModuleKind kind ; <nl> + SmallVector < std : : string , 10 > outOfDateDependencies ; <nl> + } ; <nl> + SmallVector < OutOfDateModule , 3 > outOfDateModules ; <nl> + <nl> + OutOfDateModule & getOrInsertOutOfDateModule ( StringRef path ) { <nl> + for ( auto & mod : outOfDateModules ) { <nl> + if ( mod . path = = path ) return mod ; <nl> + } <nl> + outOfDateModules . push_back ( { path , None , ModuleKind : : Normal , { } } ) ; <nl> + return outOfDateModules . back ( ) ; <nl> + } <nl> + <nl> + / / / Sets the kind of a module that failed to load . <nl> + void setModuleKind ( StringRef path , ModuleKind kind ) { <nl> + getOrInsertOutOfDateModule ( path ) . kind = kind ; <nl> + } <nl> + <nl> + / / / Sets the serialization status of the module at \ c path . If this is <nl> + / / / anything other than \ c Valid , a note will be added stating why the module <nl> + / / / was invalid . <nl> + void setSerializationStatus ( StringRef path , serialization : : Status status ) { <nl> + getOrInsertOutOfDateModule ( path ) . serializationStatus = status ; <nl> + } <nl> + <nl> + / / / Registers an out - of - date dependency at \ c depPath for the module <nl> + / / / at \ c modulePath . <nl> + void addOutOfDateDependency ( StringRef modulePath , StringRef depPath ) { <nl> + getOrInsertOutOfDateModule ( modulePath ) <nl> + . outOfDateDependencies . push_back ( depPath ) ; <nl> + } <nl> + <nl> + const char * invalidModuleReason ( serialization : : Status status ) { <nl> + using namespace serialization ; <nl> + switch ( status ) { <nl> + case Status : : FormatTooOld : <nl> + return " compiled with an older version of the compiler " ; <nl> + case Status : : FormatTooNew : <nl> + return " compiled with a newer version of the compiler " ; <nl> + case Status : : Malformed : <nl> + return " malformed " ; <nl> + case Status : : TargetIncompatible : <nl> + return " compiled for a different target platform " ; <nl> + case Status : : TargetTooNew : <nl> + return " target platform newer than current platform " ; <nl> + default : return nullptr ; <nl> + } <nl> + } <nl> + <nl> + / / / Emits a diagnostic for all out - of - date compiled or forwarding modules <nl> + / / / encountered while trying to load a module . <nl> + void diagnose ( ASTContext & ctx , SourceLoc loc , StringRef moduleName , <nl> + StringRef interfacePath ) { <nl> + ctx . Diags . diagnose ( loc , diag : : rebuilding_module_from_interface , <nl> + moduleName , interfacePath ) ; <nl> + <nl> + / / We may have found multiple failing modules , that failed for different <nl> + / / reasons . Emit a note for each of them . <nl> + for ( auto & mod : outOfDateModules ) { <nl> + ctx . Diags . diagnose ( loc , diag : : out_of_date_module_here , <nl> + ( unsigned ) mod . kind , mod . path ) ; <nl> + <nl> + / / Diagnose any out - of - date dependencies in this module . <nl> + for ( auto & dep : mod . outOfDateDependencies ) { <nl> + ctx . Diags . diagnose ( loc , diag : : module_interface_dependency_out_of_date , <nl> + dep ) ; <nl> + } <nl> + <nl> + / / If there was a compiled module that wasn ' t able to be read , diagnose <nl> + / / the reason we couldn ' t read it . <nl> + if ( auto status = mod . serializationStatus ) { <nl> + if ( auto reason = invalidModuleReason ( * status ) ) { <nl> + ctx . Diags . diagnose ( loc , diag : : compiled_module_invalid_reason , <nl> + mod . path , reason ) ; <nl> + } else { <nl> + ctx . Diags . diagnose ( loc , diag : : compiled_module_invalid , mod . path ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } ; <nl> + <nl> / / / Handles the details of loading parseable interfaces as modules , and will <nl> / / / do the necessary lookup to determine if we should be loading from the <nl> / / / normal cache , the prebuilt cache , a module adjacent to the interface , or <nl> class ParseableInterfaceModuleLoaderImpl { <nl> ASTContext & ctx ; <nl> llvm : : vfs : : FileSystem & fs ; <nl> DiagnosticEngine & diags ; <nl> + ModuleRebuildInfo rebuildInfo ; <nl> const StringRef modulePath ; <nl> const std : : string interfacePath ; <nl> const StringRef moduleName ; <nl> class ParseableInterfaceModuleLoaderImpl { <nl> const SourceLoc diagnosticLoc ; <nl> DependencyTracker * const dependencyTracker ; <nl> const ModuleLoadingMode loadMode ; <nl> + const bool remarkOnRebuildFromInterface ; <nl> <nl> ParseableInterfaceModuleLoaderImpl ( <nl> ASTContext & ctx , StringRef modulePath , StringRef interfacePath , <nl> StringRef moduleName , StringRef cacheDir , StringRef prebuiltCacheDir , <nl> - SourceLoc diagLoc , DependencyTracker * dependencyTracker = nullptr , <nl> + SourceLoc diagLoc , bool remarkOnRebuildFromInterface , <nl> + DependencyTracker * dependencyTracker = nullptr , <nl> ModuleLoadingMode loadMode = ModuleLoadingMode : : PreferSerialized ) <nl> : ctx ( ctx ) , fs ( * ctx . SourceMgr . getFileSystem ( ) ) , diags ( ctx . Diags ) , <nl> modulePath ( modulePath ) , interfacePath ( interfacePath ) , <nl> moduleName ( moduleName ) , prebuiltCacheDir ( prebuiltCacheDir ) , <nl> cacheDir ( cacheDir ) , diagnosticLoc ( diagLoc ) , <nl> - dependencyTracker ( dependencyTracker ) , loadMode ( loadMode ) { } <nl> + dependencyTracker ( dependencyTracker ) , loadMode ( loadMode ) , <nl> + remarkOnRebuildFromInterface ( remarkOnRebuildFromInterface ) { } <nl> <nl> / / / Construct a cache key for the . swiftmodule being generated . There is a <nl> / / / balance to be struck here between things that go in the cache key and <nl> class ParseableInterfaceModuleLoaderImpl { <nl> <nl> / / Check if all the provided file dependencies are up - to - date compared to <nl> / / what ' s currently on disk . <nl> - bool dependenciesAreUpToDate ( ArrayRef < FileDependency > deps ) { <nl> + bool dependenciesAreUpToDate ( StringRef modulePath , <nl> + ArrayRef < FileDependency > deps ) { <nl> SmallString < 128 > SDKRelativeBuffer ; <nl> for ( auto & in : deps ) { <nl> StringRef fullPath = getFullDependencyPath ( in , SDKRelativeBuffer ) ; <nl> if ( ! dependencyIsUpToDate ( in , fullPath ) ) { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Dep " < < fullPath <nl> < < " is directly out of date \ n " ) ; <nl> + rebuildInfo . addOutOfDateDependency ( modulePath , fullPath ) ; <nl> return false ; <nl> } <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Dep " < < fullPath < < " is up to date \ n " ) ; <nl> class ParseableInterfaceModuleLoaderImpl { <nl> / / Check that the output . swiftmodule file is at least as new as all the <nl> / / dependencies it read when it was built last time . <nl> bool serializedASTBufferIsUpToDate ( <nl> - const llvm : : MemoryBuffer & buf , SmallVectorImpl < FileDependency > & allDeps ) { <nl> + StringRef path , const llvm : : MemoryBuffer & buf , <nl> + SmallVectorImpl < FileDependency > & allDeps ) { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Validating deps of " < < modulePath < < " \ n " ) ; <nl> auto validationInfo = serialization : : validateSerializedAST ( <nl> buf . getBuffer ( ) , / * ExtendedValidationInfo = * / nullptr , & allDeps ) ; <nl> <nl> - if ( validationInfo . status ! = serialization : : Status : : Valid ) <nl> + if ( validationInfo . status ! = serialization : : Status : : Valid ) { <nl> + rebuildInfo . setSerializationStatus ( path , validationInfo . status ) ; <nl> return false ; <nl> + } <nl> <nl> - return dependenciesAreUpToDate ( allDeps ) ; <nl> + return dependenciesAreUpToDate ( path , allDeps ) ; <nl> } <nl> <nl> / / Check that the output . swiftmodule file is at least as new as all the <nl> class ParseableInterfaceModuleLoaderImpl { <nl> if ( ! OutBuf ) <nl> return false ; <nl> moduleBuffer = std : : move ( * OutBuf ) ; <nl> - return serializedASTBufferIsUpToDate ( * moduleBuffer , AllDeps ) ; <nl> + return serializedASTBufferIsUpToDate ( modulePath , * moduleBuffer , AllDeps ) ; <nl> } <nl> <nl> / / Check that a " forwarding " . swiftmodule file is at least as new as all the <nl> / / dependencies it read when it was built last time . Requires that the <nl> / / forwarding module has been loaded from disk . <nl> bool forwardingModuleIsUpToDate ( <nl> - const ForwardingModule & fwd , SmallVectorImpl < FileDependency > & deps , <nl> - std : : unique_ptr < llvm : : MemoryBuffer > & moduleBuffer ) { <nl> + StringRef path , const ForwardingModule & fwd , <nl> + SmallVectorImpl < FileDependency > & deps , <nl> + std : : unique_ptr < llvm : : MemoryBuffer > & moduleBuffer ) { <nl> / / First , make sure the underlying module path exists and is valid . <nl> auto modBuf = fs . getBufferForFile ( fwd . underlyingModulePath ) ; <nl> if ( ! modBuf | | ! serializedASTLooksValid ( * modBuf . get ( ) ) ) <nl> class ParseableInterfaceModuleLoaderImpl { <nl> dep . path , / * isSDKRelative = * / false , dep . size , <nl> dep . lastModificationTime ) ) ; <nl> } <nl> - if ( ! dependenciesAreUpToDate ( deps ) ) <nl> + if ( ! dependenciesAreUpToDate ( path , deps ) ) <nl> return false ; <nl> <nl> moduleBuffer = std : : move ( * modBuf ) ; <nl> class ParseableInterfaceModuleLoaderImpl { <nl> llvm_unreachable ( " module interface loader should not have been created " ) ; <nl> } <nl> <nl> + <nl> / / First , check the cached module path . Whatever ' s in this cache represents <nl> / / the most up - to - date knowledge we have about the module . <nl> if ( auto cachedBufOrError = fs . getBufferForFile ( cachedOutputPath ) ) { <nl> class ParseableInterfaceModuleLoaderImpl { <nl> if ( isForwardingModule ) { <nl> if ( auto forwardingModule = ForwardingModule : : load ( * buf ) ) { <nl> std : : unique_ptr < llvm : : MemoryBuffer > moduleBuffer ; <nl> - if ( forwardingModuleIsUpToDate ( * forwardingModule , deps , <nl> + if ( forwardingModuleIsUpToDate ( cachedOutputPath , <nl> + * forwardingModule , deps , <nl> moduleBuffer ) ) { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found up - to - date forwarding module at " <nl> < < cachedOutputPath < < " \ n " ) ; <nl> class ParseableInterfaceModuleLoaderImpl { <nl> <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found out - of - date forwarding module at " <nl> < < cachedOutputPath < < " \ n " ) ; <nl> + rebuildInfo . setModuleKind ( cachedOutputPath , <nl> + ModuleRebuildInfo : : ModuleKind : : Forwarding ) ; <nl> } <nl> / / Otherwise , check if the AST buffer itself is up to date . <nl> - } else if ( serializedASTBufferIsUpToDate ( * buf , deps ) ) { <nl> + } else if ( serializedASTBufferIsUpToDate ( cachedOutputPath , * buf , deps ) ) { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found up - to - date cached module at " <nl> < < cachedOutputPath < < " \ n " ) ; <nl> return DiscoveredModule : : normal ( cachedOutputPath , std : : move ( buf ) ) ; <nl> } else { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found out - of - date cached module at " <nl> < < cachedOutputPath < < " \ n " ) ; <nl> + rebuildInfo . setModuleKind ( cachedOutputPath , <nl> + ModuleRebuildInfo : : ModuleKind : : Cached ) ; <nl> } <nl> } <nl> <nl> class ParseableInterfaceModuleLoaderImpl { <nl> } else { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found out - of - date prebuilt module at " <nl> < < path - > str ( ) < < " \ n " ) ; <nl> + rebuildInfo . setModuleKind ( * path , <nl> + ModuleRebuildInfo : : ModuleKind : : Prebuilt ) ; <nl> } <nl> } <nl> } <nl> class ParseableInterfaceModuleLoaderImpl { <nl> <nl> auto adjacentModuleBuffer = fs . getBufferForFile ( modulePath ) ; <nl> if ( adjacentModuleBuffer ) { <nl> - if ( serializedASTBufferIsUpToDate ( * adjacentModuleBuffer . get ( ) , deps ) ) { <nl> + if ( serializedASTBufferIsUpToDate ( modulePath , * adjacentModuleBuffer . get ( ) , <nl> + deps ) ) { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found up - to - date module at " <nl> < < modulePath <nl> < < " ; deferring to serialized module loader \ n " ) ; <nl> class ParseableInterfaceModuleLoaderImpl { <nl> } else { <nl> LLVM_DEBUG ( llvm : : dbgs ( ) < < " Found out - of - date module at " <nl> < < modulePath < < " \ n " ) ; <nl> + rebuildInfo . setModuleKind ( modulePath , <nl> + ModuleRebuildInfo : : ModuleKind : : Normal ) ; <nl> } <nl> } else if ( adjacentModuleBuffer . getError ( ) ! = notFoundError ) { <nl> return std : : make_error_code ( std : : errc : : not_supported ) ; <nl> class ParseableInterfaceModuleLoaderImpl { <nl> } <nl> <nl> std : : unique_ptr < llvm : : MemoryBuffer > moduleBuffer ; <nl> - / / We didn ' t discover a module corresponding to this interface . Build one . <nl> + <nl> + / / We didn ' t discover a module corresponding to this interface . <nl> + <nl> + / / Diagnose that we didn ' t find a loadable module , if we were asked to . <nl> + if ( remarkOnRebuildFromInterface ) { <nl> + rebuildInfo . diagnose ( ctx , diagnosticLoc , moduleName , <nl> + interfacePath ) ; <nl> + } <nl> + <nl> if ( builder . buildSwiftModule ( cachedOutputPath , / * shouldSerializeDeps * / true , <nl> & moduleBuffer ) ) <nl> return std : : make_error_code ( std : : errc : : invalid_argument ) ; <nl> std : : error_code ParseableInterfaceModuleLoader : : findModuleFilesInDirectory ( <nl> / / Create an instance of the Impl to do the heavy lifting . <nl> ParseableInterfaceModuleLoaderImpl Impl ( <nl> Ctx , ModPath , InPath , ModuleID . first . str ( ) , <nl> - CacheDir , PrebuiltCacheDir , ModuleID . second , dependencyTracker , <nl> + CacheDir , PrebuiltCacheDir , ModuleID . second , <nl> + RemarkOnRebuildFromInterface , dependencyTracker , <nl> LoadMode ) ; <nl> <nl> / / Ask the impl to find us a module that we can load or give us an error <nl> new file mode 100644 <nl> index 000000000000 . . 8f0b43c50156 <nl> mmm / dev / null <nl> ppp b / test / ParseableInterface / ModuleCache / RebuildRemarks / malformed - compiled - module . swift <nl> <nl> + / / RUN : % empty - directory ( % t / ModuleCache ) <nl> + / / RUN : % empty - directory ( % t / Build ) <nl> + / / RUN : % empty - directory ( % t / PrebuiltCache ) <nl> + <nl> + / / 1 . Create a dummy module <nl> + / / RUN : echo ' public func publicFunction ( ) { } ' > % t / TestModule . swift <nl> + <nl> + / / 2 . Create an interface for it <nl> + / / RUN : % target - swift - frontend - typecheck % t / TestModule . swift - emit - module - interface - path % t / Build / TestModule . swiftinterface - swift - version 5 <nl> + <nl> + / / 3 . Create an empty . swiftmodule , which will force recompiling from the interface <nl> + / / RUN : touch % t / Build / TestModule . swiftmodule <nl> + <nl> + / / 4 . Try to import the malformed compiled module <nl> + / / RUN : % target - swift - frontend - typecheck - verify % s - I % t / Build - Rmodule - interface - rebuild - module - cache - path % t / ModuleCache <nl> + <nl> + import TestModule / / expected - remark { { rebuilding module ' TestModule ' from interface } } <nl> + / / expected - note @ - 1 { { is out of date } } <nl> + / / expected - note @ - 2 { { malformed } } <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000000 . . e3b71929cd18 <nl> mmm / dev / null <nl> ppp b / test / ParseableInterface / ModuleCache / RebuildRemarks / out - of - date - cached - module . swift <nl> <nl> + / / RUN : % empty - directory ( % t / ModuleCache ) <nl> + / / RUN : % empty - directory ( % t / Build ) <nl> + <nl> + / / 1 . Create a dummy module <nl> + / / RUN : echo ' public func publicFunction ( ) { } ' > % t / TestModule . swift <nl> + <nl> + / / 2 . Create an interface for it <nl> + / / RUN : % target - swift - frontend - typecheck % t / TestModule . swift - emit - module - interface - path % t / Build / TestModule . swiftinterface - swift - version 5 <nl> + <nl> + / / 3 . Try to import the interface , which will pass and create a cached module <nl> + / / RUN : % target - swift - frontend - typecheck % s - I % t / Build - module - cache - path % t / ModuleCache <nl> + <nl> + / / 4 . Touch the interface so the cached module is no longer up - to - date <nl> + / / RUN : touch % t / Build / TestModule . swiftinterface <nl> + <nl> + / / 5 . Try to import the now out - of - date cached module <nl> + / / RUN : % target - swift - frontend - typecheck - verify % s - I % t / Build - Rmodule - interface - rebuild - module - cache - path % t / ModuleCache - Xllvm - debug - only = textual - module - interface <nl> + <nl> + import TestModule / / expected - remark { { rebuilding module ' TestModule ' from interface } } <nl> + / / expected - note @ - 1 { { cached module is out of date } } <nl> + / / expected - note @ - 2 { { dependency is out of date } } <nl> new file mode 100644 <nl> index 000000000000 . . fe2b7fcad1ee <nl> mmm / dev / null <nl> ppp b / test / ParseableInterface / ModuleCache / RebuildRemarks / out - of - date - compiled - module . swift <nl> <nl> + / / RUN : % empty - directory ( % t / ModuleCache ) <nl> + / / RUN : % empty - directory ( % t / Build ) <nl> + <nl> + / / 1 . Create a dummy module <nl> + / / RUN : echo ' public func publicFunction ( ) { } ' > % t / TestModule . swift <nl> + <nl> + / / 2 . Create an interface for it <nl> + / / RUN : % target - swift - frontend - typecheck % t / TestModule . swift - emit - module - interface - path % t / Build / TestModule . swiftinterface - swift - version 5 <nl> + <nl> + / / 3 . Build the . swiftinterface to a . swiftmodule , which will have a dependency on the interface <nl> + / / RUN : % target - swift - frontend - compile - module - from - interface - o % t / Build / TestModule . swiftmodule % t / Build / TestModule . swiftinterface <nl> + <nl> + / / 4 . Touch the interface so the module is no longer up - to - date <nl> + / / RUN : touch % t / Build / TestModule . swiftinterface <nl> + <nl> + / / 5 . Try to import the out - of - date compiled module <nl> + / / RUN : % target - swift - frontend - typecheck - verify % s - I % t / Build - Rmodule - interface - rebuild - module - cache - path % t / ModuleCache <nl> + <nl> + import TestModule / / expected - remark { { rebuilding module ' TestModule ' from interface } } <nl> + / / expected - note @ - 1 { { compiled module is out of date } } <nl> + / / expected - note @ - 2 { { dependency is out of date } } <nl> new file mode 100644 <nl> index 000000000000 . . 680dbb1ddd64 <nl> mmm / dev / null <nl> ppp b / test / ParseableInterface / ModuleCache / RebuildRemarks / out - of - date - forwarding - module . swift <nl> <nl> + / / RUN : % empty - directory ( % t / ModuleCache ) <nl> + / / RUN : % empty - directory ( % t / Build ) <nl> + / / RUN : % empty - directory ( % t / PrebuiltCache ) <nl> + <nl> + / / 1 . Create a dummy module <nl> + / / RUN : echo ' public func publicFunction ( ) { } ' > % t / TestModule . swift <nl> + <nl> + / / 2 . Create an interface for it <nl> + / / RUN : % target - swift - frontend - typecheck % t / TestModule . swift - emit - module - interface - path % t / Build / TestModule . swiftinterface - swift - version 5 <nl> + <nl> + / / 3 . Build the . swiftinterface to a . swiftmodule in the prebuilt cache , which will have a dependency on the interface <nl> + / / RUN : % target - swift - frontend - compile - module - from - interface % t / Build / TestModule . swiftinterface - o % t / PrebuiltCache / TestModule . swiftmodule <nl> + <nl> + / / 5 . Try to import the prebuilt module ( this should pass ) <nl> + / / RUN : % target - swift - frontend - typecheck % s - I % t / Build - sdk % t - prebuilt - module - cache - path % t / PrebuiltCache - module - cache - path % t / ModuleCache <nl> + <nl> + / / 6 . Make sure we installed a forwarding module in the cache <nl> + / / RUN : % { python } % S / . . / Inputs / check - is - forwarding - module . py % t / ModuleCache / TestModule - * . swiftmodule <nl> + <nl> + / / 7 . Modify the interface so the forwarding module and prebuilt modules are no longer up - to - date <nl> + / / RUN : echo ' ' > > % t / Build / TestModule . swiftinterface <nl> + <nl> + / / 8 . Try to import the now out - of - date forwarding module , which will fail . <nl> + / / It will also fail to load the prebuilt module after the forwarding module <nl> + / / is rejected , meaning we ' ll get a second set of notes about the prebuilt module . <nl> + / / RUN : % target - swift - frontend - typecheck - verify % s - I % t / Build - Rmodule - interface - rebuild - sdk % t - prebuilt - module - cache - path % t / PrebuiltCache - module - cache - path % t / ModuleCache <nl> + <nl> + import TestModule / / expected - remark { { rebuilding module ' TestModule ' from interface } } <nl> + / / expected - note @ - 1 { { forwarding module is out of date } } <nl> + / / expected - note @ - 2 { { dependency is out of date } } <nl> + / / expected - note @ - 3 { { prebuilt module is out of date } } <nl> + / / expected - note @ - 4 { { dependency is out of date } } <nl>
|
Merge pull request from harlanhaskins / an - absolutely - remarkable - thing
|
apple/swift
|
f978cb0ba32acad5cc434331fa6a6b0c39ea9358
|
2019-05-03T05:45:15Z
|
mmm a / . gitignore <nl> ppp b / . gitignore <nl> googletest / m4 / lt ~ obsolete . m4 <nl> # Ignore generated directories . <nl> googlemock / fused - src / <nl> googletest / fused - src / <nl> + <nl> + # macOS files <nl> + . DS_Store <nl>
|
Ignore . DS_Store file
|
google/googletest
|
a091b753325322d329dc89262bfe5ad0ab20f1bf
|
2018-07-17T09:39:29Z
|
mmm a / drivers / python / MANIFEST . in <nl> ppp b / drivers / python / MANIFEST . in <nl> @ @ - 1 + 1 @ @ <nl> - recursive - include rethinkdb * . pb . h <nl> + include ql2 . proto <nl> mmm a / drivers / python / Makefile <nl> ppp b / drivers / python / Makefile <nl> <nl> - PROTOC = protoc <nl> RETHINKDB_HOME = . . / . . <nl> - PROTO_FILE_DIR = $ ( RETHINKDB_HOME ) / src / rdb_protocol <nl> - PROTO_BASE = ql2 <nl> - PROTO_FILE = $ ( PROTO_FILE_DIR ) / $ ( PROTO_BASE ) . proto <nl> + PROTO_FILE_SRC = $ ( RETHINKDB_HOME ) / src / rdb_protocol / ql2 . proto <nl> <nl> - PYTHON_SRC = rethinkdb <nl> - PBCPP_SRC = rethinkdb <nl> - PYTHON_PB_FILE = $ ( PYTHON_SRC ) / $ ( PROTO_BASE ) _pb2 . py <nl> - CPP_PB_FILE = $ ( PBCPP_SRC ) / $ ( PROTO_BASE ) . pb . cc <nl> - PBCPP = rethinkdb_pbcpp . so <nl> - PBCPP_BUILT = . / build / lib . linux - x86_64 - 2 . 7 / rethinkdb_pbcpp . so <nl> - PYTHON_DOCS = $ ( PYTHON_SRC ) / docs . py <nl> + PYTHON_PB_FILE = rethinkdb / ql2_pb2 . py <nl> + PROTO_FILE = ql2 . proto <nl> + PYTHON_DOCS = rethinkdb / docs . py <nl> <nl> - all : $ ( PYTHON_PB_FILE ) $ ( PBCPP ) $ ( PYTHON_DOCS ) <nl> + all : $ ( PYTHON_PB_FILE ) $ ( PYTHON_DOCS ) $ ( PROTO_FILE ) <nl> <nl> - $ ( PYTHON_DOCS ) : . . / . . / docs / rql / py_docs . json <nl> + $ ( PYTHON_DOCS ) : $ ( RETHINKDB_HOME ) / docs / rql / py_docs . json <nl> python gendocs . py > $ @ <nl> <nl> $ ( PYTHON_PB_FILE ) : $ ( PROTO_FILE ) <nl> - $ ( PROTOC ) - - python_out = $ ( PYTHON_SRC ) - I $ ( PROTO_FILE_DIR ) $ ( PROTO_FILE ) <nl> + protoc - - python_out = rethinkdb $ ( PROTO_FILE ) <nl> <nl> - $ ( CPP_PB_FILE ) : $ ( PROTO_FILE ) <nl> - $ ( PROTOC ) - - cpp_out = $ ( PBCPP_SRC ) - I $ ( PROTO_FILE_DIR ) $ ( PROTO_FILE ) <nl> - <nl> - $ ( PBCPP ) : $ ( PBCPP_BUILT ) <nl> - test ! - e $ < | | cp $ < $ @ <nl> - <nl> - $ ( PBCPP_BUILT ) : $ ( CPP_PB_FILE ) <nl> - python setup . py build <nl> + $ ( PROTO_FILE ) : $ ( PROTO_FILE_SRC ) <nl> + cp $ < $ @ <nl> <nl> clean : <nl> rm - f $ ( PYTHON_PB_FILE ) <nl> - rm - f $ ( CPP_PB_FILE ) <nl> - rm - f $ ( PBCPP ) <nl> + rm - f $ ( PROTO_FILE ) <nl> rm - f $ ( PYTHON_DOCS ) <nl> rm - rf . / build <nl> rm - rf . / dist <nl> clean : <nl> <nl> PY_PKG_DIR = $ ( RETHINKDB_HOME ) / build / packages / python <nl> <nl> - sdist : $ ( PYTHON_PB_FILE ) $ ( CPP_PB_FILE ) $ ( PYTHON_DOCS ) <nl> + sdist : $ ( PYTHON_PB_FILE ) $ ( PYTHON_DOCS ) $ ( PROTO_FILE ) <nl> + rm - rf $ ( PY_PKG_DIR ) <nl> mkdir - p $ ( PY_PKG_DIR ) <nl> cp setup . py $ ( PY_PKG_DIR ) <nl> cp MANIFEST . in $ ( PY_PKG_DIR ) <nl> cp - r rethinkdb $ ( PY_PKG_DIR ) <nl> cp $ ( PYTHON_PB_FILE ) $ ( PY_PKG_DIR ) / rethinkdb <nl> + cp $ ( PROTO_FILE ) $ ( PY_PKG_DIR ) / $ ( PROTO_FILE ) <nl> cd $ ( PY_PKG_DIR ) & & python setup . py sdist <nl> <nl> publish : sdist <nl> cd $ ( PY_PKG_DIR ) & & python setup . py register upload <nl> <nl> install : sdist <nl> - cd $ ( PY_PKG_DIR ) & & python setup . py install <nl> + cd $ ( PY_PKG_DIR ) & & PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION = cpp python setup . py install <nl> <nl> . PHONY : all clean publish sdist install <nl> new file mode 100644 <nl> index 00000000000 . . 4579946abab <nl> mmm / dev / null <nl> ppp b / drivers / python / rethinkdb / _pbcpp . cpp <nl> <nl> + / / This is an empty python module . It gets linked to ql2 . pb . o , whose functions get <nl> + / / exposed and used by the C + + implementation of the google . protobuf package . <nl> + <nl> + # include < python2 . 7 / Python . h > <nl> + <nl> + static PyMethodDef PbMethods [ ] = { <nl> + { NULL , NULL , 0 , NULL } <nl> + } ; <nl> + <nl> + PyMODINIT_FUNC init_pbcpp ( ) { <nl> + PyObject * m ; <nl> + m = Py_InitModule ( " rethinkdb . _pbcpp " , PbMethods ) ; <nl> + if ( m = = NULL ) <nl> + return ; <nl> + } <nl> mmm a / drivers / python / rethinkdb / net . py <nl> ppp b / drivers / python / rethinkdb / net . py <nl> <nl> # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> <nl> - __all__ = [ ' connect ' , ' Connection ' , ' Cursor ' , ' protobuf_implementation ' ] <nl> + __all__ = [ ' connect ' , ' Connection ' , ' Cursor ' , ' protobuf_implementation ' ] <nl> <nl> import errno <nl> import socket <nl> import struct <nl> from os import environ <nl> <nl> - if ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' in environ : <nl> - protobuf_implementation = environ [ ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ] <nl> - if environ [ ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ] = = ' cpp ' : <nl> - import rethinkdb_pbcpp <nl> - else : <nl> - try : <nl> - # Set an environment variable telling the protobuf library <nl> - # to use the fast C + + based serializer implementation <nl> - # over the pure python one if it is available . <nl> - environ [ ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ] = ' cpp ' <nl> - <nl> - # The cpp_message module could change between versions of the <nl> - # protobuf module <nl> - from google . protobuf . internal import cpp_message <nl> - import rethinkdb_pbcpp <nl> - protobuf_implementation = ' cpp ' <nl> - except ImportError as e : <nl> - # Default to using the python implementation of protobuf <nl> - environ [ ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ] = ' python ' <nl> - protobuf_implementation = ' python ' <nl> - <nl> - from . import ql2_pb2 as p <nl> - <nl> - from . import repl # For the repl connection <nl> - from . errors import * <nl> - from . ast import Datum , DB , expr <nl> + try : <nl> + import rethinkdb . pbcpp <nl> + protobuf_implementation = ' cpp ' <nl> + except ImportError : <nl> + protobuf_implementation = ' python ' <nl> + <nl> + from rethinkdb import ql2_pb2 as p <nl> + <nl> + from rethinkdb import repl # For the repl connection <nl> + from rethinkdb . errors import * <nl> + from rethinkdb . ast import Datum , DB , expr <nl> <nl> class Cursor ( object ) : <nl> def __init__ ( self , conn , query , term , opts ) : <nl> deleted file mode 100644 <nl> index 2646ac9e591 . . 00000000000 <nl> mmm a / drivers / python / rethinkdb / pbcpp . cpp <nl> ppp / dev / null <nl> <nl> - # include < python2 . 7 / Python . h > <nl> - <nl> - static PyMethodDef PbMethods [ ] = { <nl> - { NULL , NULL , 0 , NULL } <nl> - } ; <nl> - <nl> - PyMODINIT_FUNC initrethinkdb_pbcpp ( ) { <nl> - PyObject * m ; <nl> - m = Py_InitModule ( " rethinkdb_pbcpp " , PbMethods ) ; <nl> - if ( m = = NULL ) <nl> - return ; <nl> - } <nl> new file mode 100644 <nl> index 00000000000 . . 8c67506153d <nl> mmm / dev / null <nl> ppp b / drivers / python / rethinkdb / pbcpp . py <nl> <nl> + # Load the C + + protobuf backend <nl> + <nl> + from os import environ <nl> + <nl> + # This rethinkdb - specific library is required to fully benefit from <nl> + # the C + + backend <nl> + import rethinkdb . _pbcpp <nl> + <nl> + # The google . protobuf package will activate the C + + backend only if this <nl> + # variable is set to ' cpp ' <nl> + environ [ ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ] = ' cpp ' <nl> + <nl> + try : <nl> + import google . protobuf <nl> + <nl> + # In protobuf 2 . 4 . * and 2 . 5 . 0 , this module imports correctly only if <nl> + # the C + + backend is active in the google . protobuf package <nl> + from google . protobuf . internal import cpp_message <nl> + <nl> + except ImportError : <nl> + del environ [ ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ] <nl> + raise <nl> mmm a / drivers / python / setup . py <nl> ppp b / drivers / python / setup . py <nl> <nl> - # Copyright 2010 - 2012 RethinkDB , all rights reserved . <nl> + # Copyright 2010 - 2013 RethinkDB , all rights reserved . <nl> <nl> from setuptools import setup , Extension <nl> from distutils . command . build_ext import build_ext <nl> - from distutils . errors import DistutilsPlatformError , CCompilerError , DistutilsExecError <nl> - import sys <nl> + import os <nl> + from subprocess import check_call <nl> <nl> - class build_ext_nofail ( build_ext ) : <nl> - # This class can replace the build_ext command with one that does not fail <nl> - # when the extension fails to build . <nl> - <nl> - def run ( self ) : <nl> - try : <nl> - build_ext . run ( self ) <nl> - except DistutilsPlatformError as e : <nl> - self . _failed ( e ) <nl> + class build_ext_genproto ( build_ext ) : <nl> + # This class replaces the build_ext command with one that <nl> + # first generates the ql2 . pb . { cpp , h } files if the correct <nl> + # environment variable is set . <nl> <nl> def build_extension ( self , ext ) : <nl> - try : <nl> + if os . environ . get ( ' PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION ' ) = = ' cpp ' : <nl> + print " Calling protoc to generate ql2 . pb . cc and ql2 . pb . h " <nl> + check_call ( [ ' protoc ' , ' ql2 . proto ' , ' - - cpp_out = . ' ] ) <nl> + ext . sources = [ ' . / ql2 . pb . cc ' ] + ext . sources <nl> build_ext . build_extension ( self , ext ) <nl> - except ( CCompilerError , DistutilsExecError ) as e : <nl> - self . _failed ( e ) <nl> else : <nl> - try : <nl> - import google . protobuf . internal . cpp_message <nl> - except ImportError : <nl> - sys . stderr . write ( " * * * WARNING : The installed protobuf library does not seem to include the C + + extension \ n " ) <nl> - sys . stderr . write ( " * * * WARNING : The RethinkDB driver will fallback to using the pure python implementation \ n " ) <nl> - <nl> - def _failed ( self , e ) : <nl> - sys . stderr . write ( " * * * WARNING : Unable to compile the C + + extension \ n " ) <nl> - sys . stderr . write ( str ( e ) + " \ n " ) <nl> - sys . stderr . write ( " * * * WARNING : Defaulting to the python implementation \ n " ) <nl> + print " * * * * * * * * * * * * * * * * * " <nl> + print " * WARNING : The faster C + + protobuf backend is not enabled . " <nl> + print " * WARNING : To enable it , run ` export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION = cpp ' and reinstall the rethinkdb package . " <nl> + print " * WARNING : See http : / / rethinkdb . com / docs / driver - performance / for more information . " <nl> + print " * * * * * * * * * * * * * * * * * " <nl> <nl> setup ( name = " rethinkdb " <nl> - , version = " 1 . 10 . 0 - 0 " <nl> - , description = " This package provides the Python driver library for the RethinkDB database server . " <nl> - , url = " http : / / rethinkdb . com " <nl> - , maintainer = " RethinkDB Inc . " <nl> - , maintainer_email = " bugs @ rethinkdb . com " <nl> - , packages = [ ' rethinkdb ' ] <nl> - , install_requires = [ ' protobuf ' ] <nl> - , ext_modules = [ Extension ( ' rethinkdb_pbcpp ' , sources = [ ' . / rethinkdb / pbcpp . cpp ' , ' . / rethinkdb / ql2 . pb . cc ' ] , <nl> - include_dirs = [ ' . / rethinkdb ' ] , libraries = [ ' protobuf ' ] ) ] <nl> - , cmdclass = { " build_ext " : build_ext_nofail } <nl> - , entry_points = { ' console_scripts ' : [ <nl> - ' rethinkdb - import = rethinkdb . _import : main ' , <nl> - ' rethinkdb - dump = rethinkdb . _dump : main ' , <nl> - ' rethinkdb - export = rethinkdb . _export : main ' , <nl> - ' rethinkdb - restore = rethinkdb . _restore : main ' ] } <nl> - ) <nl> + , version = " 1 . 10 . 0 - 999 " <nl> + , description = " This package provides the Python driver library for the RethinkDB database server . " <nl> + , url = " http : / / rethinkdb . com " <nl> + , maintainer = " RethinkDB Inc . " <nl> + , maintainer_email = " bugs @ rethinkdb . com " <nl> + , packages = [ ' rethinkdb ' ] <nl> + , install_requires = [ ' protobuf ' ] <nl> + , entry_points = { ' console_scripts ' : [ <nl> + ' rethinkdb - import = rethinkdb . _import : main ' , <nl> + ' rethinkdb - dump = rethinkdb . _dump : main ' , <nl> + ' rethinkdb - export = rethinkdb . _export : main ' , <nl> + ' rethinkdb - restore = rethinkdb . _restore : main ' ] } <nl> + , cmdclass = { " build_ext " : build_ext_genproto } <nl> + , ext_modules = [ Extension ( <nl> + ' rethinkdb / _pbcpp ' , <nl> + sources = [ ' . / rethinkdb / _pbcpp . cpp ' ] , <nl> + include_dirs = [ ' . / ' ] , <nl> + libraries = [ ' protobuf ' ] ) ] ) <nl>
|
Python driver : do not build C + + extension by default
|
rethinkdb/rethinkdb
|
21cefbe03325124642ea6a2e1f50d2aa55c32538
|
2013-11-13T02:58:49Z
|
mmm a / tests / runner . py <nl> ppp b / tests / runner . py <nl> def do_emscripten ( self , filename , output_processor = None , append_ext = True , extra_ <nl> except : <nl> pass <nl> settings = [ ' - s % s = % s ' % ( k , json . dumps ( v ) ) for k , v in exported_settings . items ( ) ] <nl> - try : <nl> - os . getcwd ( ) <nl> - except OSError : <nl> - os . chdir ( self . get_dir ( ) ) # ensure the current working directory is valid <nl> compiler_output = timeout_run ( Popen ( [ ' python ' , EMSCRIPTEN , filename + ( ' . o . ll ' if append_ext else ' ' ) , ' - o ' , filename + ' . o . js ' ] + settings + extra_args , stdout = PIPE , stderr = STDOUT ) , TIMEOUT , ' Compiling ' ) <nl> # print compiler_output <nl> <nl> def setUp ( self ) : <nl> self . pick_llvm_opts ( 3 , True ) <nl> COMPILER_TEST_OPTS = [ ' - g ' ] <nl> shutil . rmtree ( self . get_dir ( ) ) # Useful in debugging sometimes to comment this out <nl> - self . get_dir ( ) # make sure it exists <nl> + os . chdir ( self . get_dir ( ) ) # Ensure the directory exists and go there <nl> TT = % s <nl> ' ' ' % ( fullname , compiler , llvm_opts , embetter , quantum_size , typed_arrays , fullname ) ) <nl> return TT <nl>
|
fix bug with no current working dir
|
emscripten-core/emscripten
|
526f43d34325b01845bb98d379ff02b46b942e35
|
2011-10-05T17:32:48Z
|
mmm a / trunk / src / app / srs_app_thread . cpp <nl> ppp b / trunk / src / app / srs_app_thread . cpp <nl> void SrsThread : : thread_cycle ( ) <nl> <nl> / / to improve performance , donot sleep when interval is zero . <nl> / / @ see : https : / / github . com / winlinvip / simple - rtmp - server / issues / 237 <nl> - if ( cycle_interval_us > 0 ) { <nl> + if ( cycle_interval_us ! = 0 ) { <nl> st_usleep ( cycle_interval_us ) ; <nl> } <nl> } <nl>
|
for bug , thread donot sleep when timeout is 0 .
|
ossrs/srs
|
6cbf732e1a2ed0a3473bb422207cae70a1fc3e55
|
2014-12-03T06:25:02Z
|
mmm a / documentation / sphinx / source / configuration . rst <nl> ppp b / documentation / sphinx / source / configuration . rst <nl> The ` ` foundationdb . conf ` ` file contains several sections , detailed below . Note t <nl> # # foundationdb . conf <nl> # # <nl> # # Configuration file for FoundationDB server processes <nl> - # # Full documentation is available in the FoundationDB Administration document . <nl> <nl> [ fdbmonitor ] <nl> - restart_delay = 60 <nl> user = foundationdb <nl> group = foundationdb <nl> <nl> - Contains basic configuration parameters of the ` ` fdbmonitor ` ` process . ` ` restart_delay ` ` specifies the number of seconds that ` ` fdbmonitor ` ` waits before restarting a failed process . ` ` user ` ` and ` ` group ` ` are used on Linux systems to control the privilege level of child processes . <nl> + Contains basic configuration parameters of the ` ` fdbmonitor ` ` process . ` ` user ` ` and ` ` group ` ` are used on Linux systems to control the privilege level of child processes . <nl> <nl> ` ` [ general ] ` ` section <nl> mmmmmmmmmmmmmmmmmmmmm - - <nl> Contains basic configuration parameters of the ` ` fdbmonitor ` ` process . ` ` restart <nl> <nl> [ general ] <nl> cluster_file = / etc / foundationdb / fdb . cluster <nl> + restart_delay = 60 <nl> + # # restart_backoff and restart_delay_reset_interval default to the value that is used for restart_delay <nl> + # initial_restart_delay = 0 <nl> + # restart_backoff = 60 . 0 <nl> + # restart_delay_reset_interval = 60 <nl> + # delete_envvars = <nl> + # kill_on_configuration_change = true <nl> + # disable_lifecycle_logging = false <nl> + <nl> + Contains settings applicable to all processes ( e . g . fdbserver , backup_agent ) . <nl> + <nl> + * ` ` cluster_file ` ` : Specifies the location of the cluster file . This file and the directory that contains it must be writable by all processes ( i . e . by the user or group set in the ` ` [ fdbmonitor ] ` ` section ) . <nl> + * ` ` delete_envvars ` ` : A space separated list of environment variables to remove from the environments of child processes . This can be used if the ` ` fdbmonitor ` ` process needs to be run with environment variables that are undesired in its children . <nl> + * ` ` kill_on_configuration_change ` ` : If ` ` true ` ` , affected processes will be restarted whenever the configuration file changes . Defaults to ` ` true ` ` . <nl> + * ` ` disable_lifecycle_logging ` ` : If ` ` true ` ` , ` ` fdbmonitor ` ` will not write log events when processes start or terminate . Defaults to ` ` false ` ` . <nl> + <nl> + The ` ` [ general ] ` ` section also contains some parameters to control how processes are restarted when they die . ` ` fdbmonitor ` ` uses backoff logic to prevent a process that dies repeatedly from cycling too quickly , and it also introduces up to + / - 10 % random jitter into the delay to avoid multiple processes all restarting simultaneously . ` ` fdbmonitor ` ` tracks separate backoff state for each process , so the restarting of one process will have no effect on the backoff behavior of another . <nl> + <nl> + * ` ` restart_delay ` ` : The maximum number of seconds ( subject to jitter ) that fdbmonitor will delay before restarting a failed process . <nl> + * ` ` initial_restart_delay ` ` : The number of seconds ` ` fdbmonitor ` ` waits to restart a process the first time it dies . Defaults to 0 ( i . e . the process gets restarted immediately ) . <nl> + * ` ` restart_backoff ` ` : Controls how quickly ` ` fdbmonitor ` ` backs off when a process dies repeatedly . The previous delay ( or 1 , if the previous delay is 0 ) is multiplied by ` ` restart_backoff ` ` to get the next delay , maxing out at the value of ` ` restart_delay ` ` . Defaults to the value of ` ` restart_delay ` ` , meaning that the second and subsequent failures will all delay ` ` restart_delay ` ` between restarts . <nl> + * ` ` restart_delay_reset_interval ` ` : The number of seconds a process must be running before resetting the backoff back to the value of ` ` initial_restart_delay ` ` . Defaults to the value of ` ` restart_delay ` ` . <nl> + <nl> + As an example , let ' s say the following parameters have been set : <nl> + <nl> + . . code - block : : ini <nl> + <nl> + restart_delay = 60 <nl> + initial_restart_delay = 0 <nl> + restart_backoff = 2 . 0 <nl> + restart_delay_reset_interval = 180 <nl> + <nl> + The progression of delays for a process that fails repeatedly would be ` ` 0 , 2 , 4 , 8 , 16 , 32 , 60 , 60 , . . . ` ` , each subject to a 10 % random jitter . After the process stays alive for 180 seconds , the backoff would reset and the next failure would restart the process immediately . <nl> <nl> - Contains settings applicable to all processes ( e . g . fdbserver , backup_agent ) . The main setting of interest is ` ` cluster_file ` ` , which specifies the location of the cluster file . This file and the directory that contains it must be writable by all processes ( i . e . by the user or group set in the [ fdbmonitor ] section ) . <nl> + Using the default parameters , a process will restart immediately if it fails and then delay ` ` restart_delay ` ` seconds if it fails again within ` ` restart_delay ` ` seconds . <nl> <nl> . . _foundationdb - conf - fdbserver : <nl> <nl> mmm a / documentation / sphinx / source / downloads . rst <nl> ppp b / documentation / sphinx / source / downloads . rst <nl> macOS <nl> <nl> The macOS installation package is supported on macOS 10 . 7 + . It includes the client and ( optionally ) the server . <nl> <nl> - * ` FoundationDB - 6 . 2 . 2 . pkg < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / macOS / installers / FoundationDB - 6 . 2 . 2 . pkg > ` _ <nl> + * ` FoundationDB - 6 . 2 . 3 . pkg < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / macOS / installers / FoundationDB - 6 . 2 . 3 . pkg > ` _ <nl> <nl> Ubuntu <nl> mmmmmm <nl> <nl> The Ubuntu packages are supported on 64 - bit Ubuntu 12 . 04 + , but beware of the Linux kernel bug in Ubuntu 12 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 2 . 2 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / ubuntu / installers / foundationdb - clients_6 . 2 . 2 - 1_amd64 . deb > ` _ <nl> - * ` foundationdb - server - 6 . 2 . 2 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / ubuntu / installers / foundationdb - server_6 . 2 . 2 - 1_amd64 . deb > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 2 . 3 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / ubuntu / installers / foundationdb - clients_6 . 2 . 3 - 1_amd64 . deb > ` _ <nl> + * ` foundationdb - server - 6 . 2 . 3 - 1_amd64 . deb < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / ubuntu / installers / foundationdb - server_6 . 2 . 3 - 1_amd64 . deb > ` _ ( depends on the clients package ) <nl> <nl> RHEL / CentOS EL6 <nl> mmmmmmmmmmmmmmm <nl> <nl> The RHEL / CentOS EL6 packages are supported on 64 - bit RHEL / CentOS 6 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 2 . 2 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / rhel6 / installers / foundationdb - clients - 6 . 2 . 2 - 1 . el6 . x86_64 . rpm > ` _ <nl> - * ` foundationdb - server - 6 . 2 . 2 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / rhel6 / installers / foundationdb - server - 6 . 2 . 2 - 1 . el6 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 2 . 3 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / rhel6 / installers / foundationdb - clients - 6 . 2 . 3 - 1 . el6 . x86_64 . rpm > ` _ <nl> + * ` foundationdb - server - 6 . 2 . 3 - 1 . el6 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / rhel6 / installers / foundationdb - server - 6 . 2 . 3 - 1 . el6 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> <nl> RHEL / CentOS EL7 <nl> mmmmmmmmmmmmmmm <nl> <nl> The RHEL / CentOS EL7 packages are supported on 64 - bit RHEL / CentOS 7 . x . <nl> <nl> - * ` foundationdb - clients - 6 . 2 . 2 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / rhel7 / installers / foundationdb - clients - 6 . 2 . 2 - 1 . el7 . x86_64 . rpm > ` _ <nl> - * ` foundationdb - server - 6 . 2 . 2 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / rhel7 / installers / foundationdb - server - 6 . 2 . 2 - 1 . el7 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> + * ` foundationdb - clients - 6 . 2 . 3 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / rhel7 / installers / foundationdb - clients - 6 . 2 . 3 - 1 . el7 . x86_64 . rpm > ` _ <nl> + * ` foundationdb - server - 6 . 2 . 3 - 1 . el7 . x86_64 . rpm < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / rhel7 / installers / foundationdb - server - 6 . 2 . 3 - 1 . el7 . x86_64 . rpm > ` _ ( depends on the clients package ) <nl> <nl> Windows <nl> mmmmmm - <nl> <nl> The Windows installer is supported on 64 - bit Windows XP and later . It includes the client and ( optionally ) the server . <nl> <nl> - * ` foundationdb - 6 . 2 . 2 - x64 . msi < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / windows / installers / foundationdb - 6 . 2 . 2 - x64 . msi > ` _ <nl> + * ` foundationdb - 6 . 2 . 3 - x64 . msi < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / windows / installers / foundationdb - 6 . 2 . 3 - x64 . msi > ` _ <nl> <nl> API Language Bindings <nl> = = = = = = = = = = = = = = = = = = = = = <nl> On macOS and Windows , the FoundationDB Python API bindings are installed as part <nl> <nl> If you need to use the FoundationDB Python API from other Python installations or paths , download the Python package : <nl> <nl> - * ` foundationdb - 6 . 2 . 2 . tar . gz < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / bindings / python / foundationdb - 6 . 2 . 2 . tar . gz > ` _ <nl> + * ` foundationdb - 6 . 2 . 3 . tar . gz < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / bindings / python / foundationdb - 6 . 2 . 3 . tar . gz > ` _ <nl> <nl> Ruby 1 . 9 . 3 / 2 . 0 . 0 + <nl> mmmmmmmmmmmmmmm - - <nl> <nl> - * ` fdb - 6 . 2 . 2 . gem < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / bindings / ruby / fdb - 6 . 2 . 2 . gem > ` _ <nl> + * ` fdb - 6 . 2 . 3 . gem < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / bindings / ruby / fdb - 6 . 2 . 3 . gem > ` _ <nl> <nl> Java 8 + <nl> mmmmmm - <nl> <nl> - * ` fdb - java - 6 . 2 . 2 . jar < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / bindings / java / fdb - java - 6 . 2 . 2 . jar > ` _ <nl> - * ` fdb - java - 6 . 2 . 2 - javadoc . jar < https : / / www . foundationdb . org / downloads / 6 . 2 . 2 / bindings / java / fdb - java - 6 . 2 . 2 - javadoc . jar > ` _ <nl> + * ` fdb - java - 6 . 2 . 3 . jar < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / bindings / java / fdb - java - 6 . 2 . 3 . jar > ` _ <nl> + * ` fdb - java - 6 . 2 . 3 - javadoc . jar < https : / / www . foundationdb . org / downloads / 6 . 2 . 3 / bindings / java / fdb - java - 6 . 2 . 3 - javadoc . jar > ` _ <nl> <nl> Go 1 . 11 + <nl> mmmmmm - - <nl> mmm a / documentation / sphinx / source / mr - status - json - schemas . rst . inc <nl> ppp b / documentation / sphinx / source / mr - status - json - schemas . rst . inc <nl> <nl> " cluster_controller " , <nl> " data_distributor " , <nl> " ratekeeper " , <nl> - " router " <nl> + " router " , <nl> + " coordinator " <nl> ] <nl> } , <nl> " data_version " : 12341234 , <nl> <nl> " limiting_queue_bytes_storage_server " : 0 , <nl> " worst_queue_bytes_storage_server " : 0 , <nl> " limiting_version_lag_storage_server " : 0 , <nl> - " worst_version_lag_storage_server " : 0 <nl> + " worst_version_lag_storage_server " : 0 , <nl> + " limiting_data_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } , <nl> + " worst_data_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } , <nl> + " limiting_durability_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } , <nl> + " worst_durability_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } <nl> } , <nl> " incompatible_connections " : [ <nl> ] , <nl> <nl> " full_replication " : true , <nl> " maintenance_zone " : " 0ccb4e0fdbdb5583010f6b77d9d10ece " , <nl> " maintenance_seconds_remaining " : 1 . 0 , <nl> + " data_distribution_disabled_for_ss_failures " : true , <nl> + " data_distribution_disabled_for_rebalance " : true , <nl> + " data_distribution_disabled " : true , <nl> " configuration " : { <nl> " log_anti_quorum " : 0 , <nl> " log_replicas " : 2 , <nl> mmm a / documentation / sphinx / source / old - release - notes / release - notes - 620 . rst <nl> ppp b / documentation / sphinx / source / old - release - notes / release - notes - 620 . rst <nl> <nl> Release Notes <nl> # # # # # # # # # # # # # <nl> <nl> - 6 . 2 . 2 <nl> + 6 . 2 . 3 <nl> = = = = = <nl> <nl> Performance <nl> Performance <nl> * Made the storage cache eviction policy configurable , and added an LRU policy . ` ( PR # 1506 ) < https : / / github . com / apple / foundationdb / pull / 1506 > ` _ . <nl> * Improved the speed of recoveries on large clusters at ` ` log_version > = 4 ` ` . ` ( PR # 1729 ) < https : / / github . com / apple / foundationdb / pull / 1729 > ` _ . <nl> * Log routers will prefer to peek from satellites at ` ` log_version > = 4 ` ` . ` ( PR # 1795 ) < https : / / github . com / apple / foundationdb / pull / 1795 > ` _ . <nl> + * In clusters using a region configuration , clients will read from the remote region if all of the servers in the primary region are overloaded . [ 6 . 2 . 3 ] ` ( PR # 2019 ) < https : / / github . com / apple / foundationdb / pull / 2019 > ` _ . <nl> <nl> Fixes <nl> mmm - - <nl> Fixes <nl> * In very rare scenarios , master recovery would restart because system metadata was loaded incorrectly . ` ( PR # 1919 ) < https : / / github . com / apple / foundationdb / pull / 1919 > ` _ . <nl> * Ratekeeper will aggressively throttle when unable to fetch the list of storage servers for a considerable period of time . ` ( PR # 1858 ) < https : / / github . com / apple / foundationdb / pull / 1858 > ` _ . <nl> * Proxies could become overloaded when all storage servers on a team fail . [ 6 . 2 . 1 ] ` ( PR # 1976 ) < https : / / github . com / apple / foundationdb / pull / 1976 > ` _ . <nl> + * Proxies could start too few transactions if they didn ' t receive get read version requests frequently enough . [ 6 . 2 . 3 ] ` ( PR # 1999 ) < https : / / github . com / apple / foundationdb / pull / 1999 > ` _ . <nl> + * The ` ` fileconfigure ` ` command in ` ` fdbcli ` ` could fail with an unknown error if the file did not contain a valid JSON object . ` ( PR # 2017 ) < https : / / github . com / apple / foundationdb / pull / 2017 > ` _ . <nl> + * Configuring regions would fail with an internal error if the cluster contained storage servers that didn ' t set a datacenter ID . ` ( PR # 2017 ) < https : / / github . com / apple / foundationdb / pull / 2017 > ` _ . <nl> + * Clients no longer prefer reading from servers with the same zone ID , because it could create hot shards . [ 6 . 2 . 3 ] ` ( PR # 2019 ) < https : / / github . com / apple / foundationdb / pull / 2019 > ` _ . <nl> <nl> Status <nl> mmmmmm <nl> Status <nl> * ` ` connected_clients ` ` is now only a sample of the connected clients , rather than a complete list . ` ( PR # 1902 ) < https : / / github . com / apple / foundationdb / pull / 1902 > ` _ . <nl> * Added ` ` max_protocol_clients ` ` to the ` ` supported_versions ` ` section , which provides a sample of connected clients which cannot connect to any higher protocol version . ` ( PR # 1902 ) < https : / / github . com / apple / foundationdb / pull / 1902 > ` _ . <nl> * Clients which connect without specifying their supported versions are tracked as an ` ` Unknown ` ` version in the ` ` supported_versions ` ` section . [ 6 . 2 . 2 ] ` ( PR # 1990 ) < https : / / github . com / apple / foundationdb / pull / 1990 > ` _ . <nl> + * Add ` ` coordinator ` ` to the list of roles that can be reported for a process . [ 6 . 2 . 3 ] ` ( PR # 2006 ) < https : / / github . com / apple / foundationdb / pull / 2006 > ` _ . <nl> + * Added ` ` worst_durability_lag_storage_server ` ` and ` ` limiting_durability_lag_storage_server ` ` to the ` ` cluster . qos ` ` section , each with subfields ` ` versions ` ` and ` ` seconds ` ` . These report the durability lag values being used by ratekeeper to potentially limit the transaction rate . [ 6 . 2 . 3 ] ` ( PR # 2003 ) < https : / / github . com / apple / foundationdb / pull / 2003 > ` _ . <nl> + * Added ` ` worst_data_lag_storage_server ` ` and ` ` limiting_data_lag_storage_server ` ` to the ` ` cluster . qos ` ` section , each with subfields ` ` versions ` ` and ` ` seconds ` ` . These are meant to replace ` ` worst_version_lag_storage_server ` ` and ` ` limiting_version_lag_storage_server ` ` , which are now deprecated . [ 6 . 2 . 3 ] ` ( PR # 2003 ) < https : / / github . com / apple / foundationdb / pull / 2003 > ` _ . <nl> <nl> Bindings <nl> mmmmmm - - <nl> Fixes only impacting 6 . 2 . 0 + <nl> * Clients could crash when closing connections with incompatible servers . [ 6 . 2 . 1 ] ` ( PR # 1976 ) < https : / / github . com / apple / foundationdb / pull / 1976 > ` _ . <nl> * Do not close idle network connections with incompatible servers . [ 6 . 2 . 1 ] ` ( PR # 1976 ) < https : / / github . com / apple / foundationdb / pull / 1976 > ` _ . <nl> * In status , ` ` max_protocol_clients ` ` were incorrectly added to the ` ` connected_clients ` ` list . [ 6 . 2 . 2 ] ` ( PR # 1990 ) < https : / / github . com / apple / foundationdb / pull / 1990 > ` _ . <nl> + * Ratekeeper ignores the ( default 5 second ) MVCC window when controlling on durability lag . [ 6 . 2 . 3 ] ` ( PR # 2012 ) < https : / / github . com / apple / foundationdb / pull / 2012 > ` _ . <nl> + * The macOS client was not compatible with a Linux server . [ 6 . 2 . 3 ] ` ( PR # 2045 ) < https : / / github . com / apple / foundationdb / pull / 2045 > ` _ . <nl> + * Incompatible clients would continually reconnect with coordinators . [ 6 . 2 . 3 ] ` ( PR # 2048 ) < https : / / github . com / apple / foundationdb / pull / 2048 > ` _ . <nl> + * Connections were being closed as idle when there were still unreliable requests waiting for a response . [ 6 . 2 . 3 ] ` ( PR # 2048 ) < https : / / github . com / apple / foundationdb / pull / 2048 > ` _ . <nl> <nl> Earlier release notes <nl> mmmmmmmmmmmmmmmmmmmmm <nl> mmm a / fdbbackup / backup . actor . cpp <nl> ppp b / fdbbackup / backup . actor . cpp <nl> CSimpleOpt : : SOption g_rgDBAgentOptions [ ] = { <nl> # ifdef _WIN32 <nl> { OPT_PARENTPID , " - - parentpid " , SO_REQ_SEP } , <nl> # endif <nl> - { OPT_TRACE_LOG_GROUP , " - - loggroup " , SO_REQ_SEP } , <nl> { OPT_SOURCE_CLUSTER , " - s " , SO_REQ_SEP } , <nl> { OPT_SOURCE_CLUSTER , " - - source " , SO_REQ_SEP } , <nl> { OPT_DEST_CLUSTER , " - d " , SO_REQ_SEP } , <nl> static void printAgentUsage ( bool devhelp ) { <nl> " - - logdir PATH Specifes the output directory for trace files . If \ n " <nl> " unspecified , defaults to the current directory . Has \ n " <nl> " no effect unless - - log is specified . \ n " ) ; <nl> + printf ( " - - loggroup LOG_GROUP \ n " <nl> + " Sets the LogGroup field with the specified value for all \ n " <nl> + " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> printf ( " - - trace_format FORMAT \ n " <nl> " Select the format of the trace files . xml ( the default ) and json are supported . \ n " <nl> " Has no effect unless - - log is specified . \ n " ) ; <nl> static void printBackupUsage ( bool devhelp ) { <nl> " - - logdir PATH Specifes the output directory for trace files . If \ n " <nl> " unspecified , defaults to the current directory . Has \ n " <nl> " no effect unless - - log is specified . \ n " ) ; <nl> + printf ( " - - loggroup LOG_GROUP \ n " <nl> + " Sets the LogGroup field with the specified value for all \ n " <nl> + " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> printf ( " - - trace_format FORMAT \ n " <nl> " Select the format of the trace files . xml ( the default ) and json are supported . \ n " <nl> " Has no effect unless - - log is specified . \ n " ) ; <nl> static void printRestoreUsage ( bool devhelp ) { <nl> " - - logdir PATH Specifes the output directory for trace files . If \ n " <nl> " unspecified , defaults to the current directory . Has \ n " <nl> " no effect unless - - log is specified . \ n " ) ; <nl> + printf ( " - - loggroup LOG_GROUP \ n " <nl> + " Sets the LogGroup field with the specified value for all \ n " <nl> + " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> printf ( " - - trace_format FORMAT \ n " <nl> " Select the format of the trace files . xml ( the default ) and json are supported . \ n " <nl> " Has no effect unless - - log is specified . \ n " ) ; <nl> static void printDBAgentUsage ( bool devhelp ) { <nl> " - - logdir PATH Specifes the output directory for trace files . If \ n " <nl> " unspecified , defaults to the current directory . Has \ n " <nl> " no effect unless - - log is specified . \ n " ) ; <nl> + printf ( " - - loggroup LOG_GROUP \ n " <nl> + " Sets the LogGroup field with the specified value for all \ n " <nl> + " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> printf ( " - - trace_format FORMAT \ n " <nl> " Select the format of the trace files . xml ( the default ) and json are supported . \ n " <nl> " Has no effect unless - - log is specified . \ n " ) ; <nl> static void printDBBackupUsage ( bool devhelp ) { <nl> " - - logdir PATH Specifes the output directory for trace files . If \ n " <nl> " unspecified , defaults to the current directory . Has \ n " <nl> " no effect unless - - log is specified . \ n " ) ; <nl> + printf ( " - - loggroup LOG_GROUP \ n " <nl> + " Sets the LogGroup field with the specified value for all \ n " <nl> + " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> printf ( " - - trace_format FORMAT \ n " <nl> " Select the format of the trace files . xml ( the default ) and json are supported . \ n " <nl> " Has no effect unless - - log is specified . \ n " ) ; <nl> mmm a / fdbcli / fdbcli . actor . cpp <nl> ppp b / fdbcli / fdbcli . actor . cpp <nl> void initHelp ( ) { <nl> helpMap [ " quit " ] = CommandHelp ( ) ; <nl> helpMap [ " waitconnected " ] = CommandHelp ( ) ; <nl> helpMap [ " waitopen " ] = CommandHelp ( ) ; <nl> + helpMap [ " sleep " ] = CommandHelp ( <nl> + " sleep < SECONDS > " , <nl> + " sleep for a period of time " , <nl> + " " ) ; <nl> helpMap [ " get " ] = CommandHelp ( <nl> " get < KEY > " , <nl> " fetch the value for a given key " , <nl> void printStatus ( StatusObjectReader statusObj , StatusClient : : StatusLevel level , <nl> outputString + = " \ n \ nWARNING : A single process is both a transaction log and a storage server . \ n For best performance use dedicated disks for the transaction logs by setting process classes . " ; <nl> } <nl> <nl> + if ( statusObjCluster . has ( " data_distribution_disabled " ) ) { <nl> + outputString + = " \ n \ nWARNING : Data distribution is off . " ; <nl> + } else { <nl> + if ( statusObjCluster . has ( " data_distribution_disabled_for_ss_failures " ) ) { <nl> + outputString + = " \ n \ nWARNING : Data distribution is currently turned on but disabled for all storage server failures . " ; <nl> + } <nl> + if ( statusObjCluster . has ( " data_distribution_disabled_for_rebalance " ) ) { <nl> + outputString + = " \ n \ nWARNING : Data distribution is currently turned on but shard size balancing is currently disabled . " ; <nl> + } <nl> + } <nl> + <nl> printf ( " % s \ n " , outputString . c_str ( ) ) ; <nl> } <nl> <nl> ACTOR Future < bool > fileConfigure ( Database db , std : : string filePath , bool isNewDa <nl> printf ( " ERROR : Invalid JSON \ n " ) ; <nl> return true ; <nl> } <nl> + if ( config . type ( ) ! = json_spirit : : obj_type ) { <nl> + printf ( " ERROR : Configuration file must contain a JSON object \ n " ) ; <nl> + return true ; <nl> + } <nl> StatusObject configJSON = config . get_obj ( ) ; <nl> <nl> json_spirit : : mValue schema ; <nl> ACTOR Future < bool > exclude ( Database db , std : : vector < StringRef > tokens , Referenc <nl> } <nl> } <nl> <nl> - ACTOR Future < bool > createSnapshot ( Database db , StringRef snapCmd ) { <nl> + ACTOR Future < bool > createSnapshot ( Database db , std : : vector < StringRef > tokens ) { <nl> + state Standalone < StringRef > snapCmd ; <nl> + for ( int i = 1 ; i < tokens . size ( ) ; i + + ) { <nl> + snapCmd = snapCmd . withSuffix ( tokens [ i ] ) ; <nl> + if ( i ! = tokens . size ( ) - 1 ) { <nl> + snapCmd = snapCmd . withSuffix ( LiteralStringRef ( " " ) ) ; <nl> + } <nl> + } <nl> try { <nl> UID snapUID = wait ( makeInterruptable ( mgmtSnapCreate ( db , snapCmd ) ) ) ; <nl> printf ( " Snapshot command succeeded with UID % s \ n " , snapUID . toString ( ) . c_str ( ) ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> if ( ! opt . exec . present ( ) ) { <nl> if ( opt . initialStatusCheck ) { <nl> Future < Void > checkStatusF = checkStatus ( Void ( ) , db - > getConnectionFile ( ) ) ; <nl> - Future < Void > checkDDStatusF = checkDataDistributionStatus ( db , true ) ; <nl> - wait ( makeInterruptable ( success ( checkStatusF ) & & success ( checkDDStatusF ) ) ) ; <nl> + wait ( makeInterruptable ( success ( checkStatusF ) ) ) ; <nl> } <nl> else { <nl> printf ( " \ n " ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> continue ; <nl> } <nl> <nl> + if ( tokencmp ( tokens [ 0 ] , " sleep " ) ) { <nl> + if ( tokens . size ( ) ! = 2 ) { <nl> + printUsage ( tokens [ 0 ] ) ; <nl> + is_error = true ; <nl> + } else { <nl> + double v ; <nl> + int n = 0 ; <nl> + if ( sscanf ( tokens [ 1 ] . toString ( ) . c_str ( ) , " % lf % n " , & v , & n ) ! = 1 | | n ! = tokens [ 1 ] . size ( ) ) { <nl> + printUsage ( tokens [ 0 ] ) ; <nl> + is_error = true ; <nl> + } else { <nl> + wait ( delay ( v ) ) ; <nl> + } <nl> + } <nl> + continue ; <nl> + } <nl> + <nl> if ( tokencmp ( tokens [ 0 ] , " status " ) ) { <nl> / / Warn at 7 seconds since status will spend as long as 5 seconds trying to read / write from the database <nl> warn = timeWarning ( 7 . 0 , " \ nWARNING : Long delay ( Ctrl - C to interrupt ) \ n " ) ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> } <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " snapshot " ) ) { <nl> - if ( tokens . size ( ) ! = 2 ) { <nl> + if ( tokens . size ( ) < 2 ) { <nl> printUsage ( tokens [ 0 ] ) ; <nl> is_error = true ; <nl> } else { <nl> - bool err = wait ( createSnapshot ( db , tokens [ 1 ] ) ) ; <nl> + bool err = wait ( createSnapshot ( db , tokens ) ) ; <nl> if ( err ) is_error = true ; <nl> } <nl> continue ; <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> <nl> if ( tokencmp ( tokens [ 0 ] , " datadistribution " ) ) { <nl> if ( tokens . size ( ) ! = 2 & & tokens . size ( ) ! = 3 ) { <nl> - printf ( " Usage : datadistribution < status | on | off | disable < ssfailure | rebalance > | enable " <nl> + printf ( " Usage : datadistribution < on | off | disable < ssfailure | rebalance > | enable " <nl> " < ssfailure | rebalance > > \ n " ) ; <nl> is_error = true ; <nl> } else { <nl> - if ( tokencmp ( tokens [ 1 ] , " status " ) ) { <nl> - wait ( makeInterruptable ( checkDataDistributionStatus ( db ) ) ) ; <nl> - } else if ( tokencmp ( tokens [ 1 ] , " on " ) ) { <nl> + if ( tokencmp ( tokens [ 1 ] , " on " ) ) { <nl> wait ( success ( setDDMode ( db , 1 ) ) ) ; <nl> printf ( " Data distribution is turned on . \ n " ) ; <nl> } else if ( tokencmp ( tokens [ 1 ] , " off " ) ) { <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> wait ( makeInterruptable ( setDDIgnoreRebalanceSwitch ( db , true ) ) ) ; <nl> printf ( " Data distribution is disabled for rebalance . \ n " ) ; <nl> } else { <nl> - printf ( " Usage : datadistribution < status | on | off | disable < ssfailure | rebalance > | enable " <nl> + printf ( " Usage : datadistribution < on | off | disable < ssfailure | rebalance > | enable " <nl> " < ssfailure | rebalance > > \ n " ) ; <nl> is_error = true ; <nl> } <nl> ACTOR Future < int > cli ( CLIOptions opt , LineNoise * plinenoise ) { <nl> wait ( makeInterruptable ( setDDIgnoreRebalanceSwitch ( db , false ) ) ) ; <nl> printf ( " Data distribution is enabled for rebalance . \ n " ) ; <nl> } else { <nl> - printf ( " Usage : datadistribution < status | on | off | disable < ssfailure | rebalance > | enable " <nl> + printf ( " Usage : datadistribution < on | off | disable < ssfailure | rebalance > | enable " <nl> " < ssfailure | rebalance > > \ n " ) ; <nl> is_error = true ; <nl> } <nl> } else { <nl> - printf ( " Usage : datadistribution < status | on | off | disable < ssfailure | rebalance > | enable " <nl> + printf ( " Usage : datadistribution < on | off | disable < ssfailure | rebalance > | enable " <nl> " < ssfailure | rebalance > > \ n " ) ; <nl> is_error = true ; <nl> } <nl> mmm a / fdbclient / DatabaseContext . h <nl> ppp b / fdbclient / DatabaseContext . h <nl> class DatabaseContext : public ReferenceCounted < DatabaseContext > , public FastAll <nl> Counter transactionsMaybeCommitted ; <nl> Counter transactionsResourceConstrained ; <nl> Counter transactionsProcessBehind ; <nl> - Counter transactionWaitsForFullRecovery ; <nl> <nl> ContinuousSample < double > latencies , readLatencies , commitLatencies , GRVLatencies , mutationsPerCommit , bytesPerCommit ; <nl> <nl> mmm a / fdbclient / ManagementAPI . actor . cpp <nl> ppp b / fdbclient / ManagementAPI . actor . cpp <nl> ACTOR Future < ConfigurationResult : : Type > changeConfig ( Database cx , std : : map < std : <nl> for ( auto & it : newConfig . regions ) { <nl> newDcIds . insert ( it . dcId ) ; <nl> } <nl> - std : : set < Key > missingDcIds ; <nl> + std : : set < Optional < Key > > missingDcIds ; <nl> for ( auto & s : serverList ) { <nl> auto ssi = decodeServerListValue ( s . value ) ; <nl> if ( ! ssi . locality . dcId ( ) . present ( ) | | ! newDcIds . count ( ssi . locality . dcId ( ) . get ( ) ) ) { <nl> - missingDcIds . insert ( ssi . locality . dcId ( ) . get ( ) ) ; <nl> + missingDcIds . insert ( ssi . locality . dcId ( ) ) ; <nl> } <nl> } <nl> if ( missingDcIds . size ( ) > ( oldReplicationUsesDcId ? 1 : 0 ) ) { <nl> ACTOR Future < vector < AddressExclusion > > getExcludedServers ( Database cx ) { <nl> } <nl> } <nl> <nl> - ACTOR Future < Void > checkDataDistributionStatus ( Database cx , bool printWarningOnly ) { <nl> - state Transaction tr ( cx ) ; <nl> - loop { <nl> - try { <nl> - tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> - state Future < Optional < Value > > overallSwitchF = tr . get ( dataDistributionModeKey ) ; <nl> - state Future < Optional < Value > > healthyZoneValueF = tr . get ( healthyZoneKey ) ; <nl> - state Future < Optional < Value > > rebalanceDDIgnoreValueF = tr . get ( rebalanceDDIgnoreKey ) ; <nl> - wait ( success ( overallSwitchF ) & & success ( healthyZoneValueF ) & & success ( rebalanceDDIgnoreValueF ) ) ; <nl> - if ( overallSwitchF . get ( ) . present ( ) ) { <nl> - BinaryReader rd ( overallSwitchF . get ( ) . get ( ) , Unversioned ( ) ) ; <nl> - int currentMode ; <nl> - rd > > currentMode ; <nl> - if ( currentMode = = 0 ) { <nl> - printf ( " WARNING : Data distribution is off . \ n " ) ; <nl> - return Void ( ) ; <nl> - } <nl> - } <nl> - if ( ! printWarningOnly ) { <nl> - printf ( " Data distribution is on . \ n " ) ; <nl> - } <nl> - if ( healthyZoneValueF . get ( ) . present ( ) ) { <nl> - auto healthyZoneKV = decodeHealthyZoneValue ( healthyZoneValueF . get ( ) . get ( ) ) ; <nl> - if ( healthyZoneKV . first = = ignoreSSFailuresZoneString ) { <nl> - printf ( " WARNING : Data distribution is currently turned on but disabled for all storage server " <nl> - " failures . \ n " ) ; <nl> - } else { <nl> - printf ( " WARNING : Data distribution is currently turned on but zone % s is under maintenance and " <nl> - " will continue for % " PRId64 " seconds . \ n " , <nl> - healthyZoneKV . first . toString ( ) . c_str ( ) , <nl> - ( healthyZoneKV . second - tr . getReadVersion ( ) . get ( ) ) / CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ; <nl> - } <nl> - } <nl> - if ( rebalanceDDIgnoreValueF . get ( ) . present ( ) ) { <nl> - printf ( " WARNING : Data distribution is currently turned on but shard size balancing is currently " <nl> - " disabled . \ n " ) ; <nl> - } <nl> - return Void ( ) ; <nl> - } catch ( Error & e ) { <nl> - wait ( tr . onError ( e ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> ACTOR Future < Void > printHealthyZone ( Database cx ) { <nl> state Transaction tr ( cx ) ; <nl> loop { <nl> try { <nl> tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ; <nl> Optional < Value > val = wait ( tr . get ( healthyZoneKey ) ) ; <nl> - if ( ! val . present ( ) | | decodeHealthyZoneValue ( val . get ( ) ) . second < = tr . getReadVersion ( ) . get ( ) ) { <nl> - printf ( " No ongoing maintenance . \ n " ) ; <nl> - } else if ( val . present ( ) & & decodeHealthyZoneValue ( val . get ( ) ) . first = = ignoreSSFailuresZoneString ) { <nl> + if ( val . present ( ) & & decodeHealthyZoneValue ( val . get ( ) ) . first = = ignoreSSFailuresZoneString ) { <nl> printf ( " Data distribution has been disabled for all storage server failures in this cluster and thus " <nl> " maintenance mode is not active . \ n " ) ; <nl> + } else if ( ! val . present ( ) | | decodeHealthyZoneValue ( val . get ( ) ) . second < = tr . getReadVersion ( ) . get ( ) ) { <nl> + printf ( " No ongoing maintenance . \ n " ) ; <nl> } else { <nl> auto healthyZone = decodeHealthyZoneValue ( val . get ( ) ) ; <nl> printf ( " Maintenance for zone % s will continue for % " PRId64 " seconds . \ n " , healthyZone . first . toString ( ) . c_str ( ) , ( healthyZone . second - tr . getReadVersion ( ) . get ( ) ) / CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ; <nl> ACTOR Future < std : : set < NetworkAddress > > checkForExcludingServers ( Database cx , vec <nl> return inProgressExclusion ; <nl> } <nl> <nl> - ACTOR Future < UID > mgmtSnapCreate ( Database cx , StringRef snapCmd ) { <nl> + ACTOR Future < UID > mgmtSnapCreate ( Database cx , Standalone < StringRef > snapCmd ) { <nl> state UID snapUID = deterministicRandom ( ) - > randomUniqueID ( ) ; <nl> try { <nl> wait ( snapCreate ( cx , snapCmd , snapUID ) ) ; <nl> mmm a / fdbclient / ManagementAPI . actor . h <nl> ppp b / fdbclient / ManagementAPI . actor . h <nl> ACTOR Future < int > setDDMode ( Database cx , int mode ) ; <nl> <nl> ACTOR Future < Void > forceRecovery ( Reference < ClusterConnectionFile > clusterFile , Standalone < StringRef > dcId ) ; <nl> <nl> - ACTOR Future < Void > checkDataDistributionStatus ( Database cx , bool printWarningOnly = false ) ; <nl> ACTOR Future < Void > printHealthyZone ( Database cx ) ; <nl> ACTOR Future < Void > setDDIgnoreRebalanceSwitch ( Database cx , bool ignoreRebalance ) ; <nl> ACTOR Future < bool > clearHealthyZone ( Database cx , bool printWarning = false , bool clearSSFailureZoneString = false ) ; <nl> bool schemaMatch ( json_spirit : : mValue const & schema , json_spirit : : mValue const & <nl> <nl> / / execute payload in ' snapCmd ' on all the coordinators , TLogs and <nl> / / storage nodes <nl> - ACTOR Future < UID > mgmtSnapCreate ( Database cx , StringRef snapCmd ) ; <nl> + ACTOR Future < UID > mgmtSnapCreate ( Database cx , Standalone < StringRef > snapCmd ) ; <nl> <nl> # include " flow / unactorcompiler . h " <nl> # endif <nl> mmm a / fdbclient / NativeAPI . actor . cpp <nl> ppp b / fdbclient / NativeAPI . actor . cpp <nl> DatabaseContext : : DatabaseContext ( <nl> transactionCommittedMutations ( " CommittedMutations " , cc ) , transactionCommittedMutationBytes ( " CommittedMutationBytes " , cc ) , transactionsCommitStarted ( " CommitStarted " , cc ) , <nl> transactionsCommitCompleted ( " CommitCompleted " , cc ) , transactionsTooOld ( " TooOld " , cc ) , transactionsFutureVersions ( " FutureVersions " , cc ) , <nl> transactionsNotCommitted ( " NotCommitted " , cc ) , transactionsMaybeCommitted ( " MaybeCommitted " , cc ) , transactionsResourceConstrained ( " ResourceConstrained " , cc ) , <nl> - transactionsProcessBehind ( " ProcessBehind " , cc ) , transactionWaitsForFullRecovery ( " WaitsForFullRecovery " , cc ) , outstandingWatches ( 0 ) , <nl> + transactionsProcessBehind ( " ProcessBehind " , cc ) , outstandingWatches ( 0 ) , <nl> latencies ( 1000 ) , readLatencies ( 1000 ) , commitLatencies ( 1000 ) , GRVLatencies ( 1000 ) , mutationsPerCommit ( 1000 ) , bytesPerCommit ( 1000 ) , mvCacheInsertLocation ( 0 ) , <nl> healthMetricsLastUpdated ( 0 ) , detailedHealthMetricsLastUpdated ( 0 ) , internal ( internal ) <nl> { <nl> DatabaseContext : : DatabaseContext ( const Error & err ) : deferredError ( err ) , cc ( " T <nl> transactionCommittedMutations ( " CommittedMutations " , cc ) , transactionCommittedMutationBytes ( " CommittedMutationBytes " , cc ) , transactionsCommitStarted ( " CommitStarted " , cc ) , <nl> transactionsCommitCompleted ( " CommitCompleted " , cc ) , transactionsTooOld ( " TooOld " , cc ) , transactionsFutureVersions ( " FutureVersions " , cc ) , <nl> transactionsNotCommitted ( " NotCommitted " , cc ) , transactionsMaybeCommitted ( " MaybeCommitted " , cc ) , transactionsResourceConstrained ( " ResourceConstrained " , cc ) , <nl> - transactionsProcessBehind ( " ProcessBehind " , cc ) , transactionWaitsForFullRecovery ( " WaitsForFullRecovery " , cc ) , latencies ( 1000 ) , readLatencies ( 1000 ) , commitLatencies ( 1000 ) , <nl> + transactionsProcessBehind ( " ProcessBehind " , cc ) , latencies ( 1000 ) , readLatencies ( 1000 ) , commitLatencies ( 1000 ) , <nl> GRVLatencies ( 1000 ) , mutationsPerCommit ( 1000 ) , bytesPerCommit ( 1000 ) , <nl> internal ( false ) { } <nl> <nl> ACTOR Future < Void > watchValue ( Future < Version > version , Key key , Optional < Value > <nl> g_traceBatch . addAttach ( " WatchValueAttachID " , info . debugID . get ( ) . first ( ) , watchValueID . get ( ) . first ( ) ) ; <nl> g_traceBatch . addEvent ( " WatchValueDebug " , watchValueID . get ( ) . first ( ) , " NativeAPI . watchValue . Before " ) ; / / . detail ( " TaskID " , g_network - > getCurrentTask ( ) ) ; <nl> } <nl> - state Version resp ; <nl> + state WatchValueReply resp ; <nl> choose { <nl> - when ( Version r = wait ( loadBalance ( ssi . second , & StorageServerInterface : : watchValue , <nl> - WatchValueRequest ( key , value , ver , watchValueID ) , <nl> - TaskPriority : : DefaultPromiseEndpoint ) ) ) { <nl> + when ( WatchValueReply r = wait ( loadBalance ( ssi . second , & StorageServerInterface : : watchValue , <nl> + WatchValueRequest ( key , value , ver , watchValueID ) , <nl> + TaskPriority : : DefaultPromiseEndpoint ) ) ) { <nl> resp = r ; <nl> } <nl> when ( wait ( cx - > connectionFile ? cx - > connectionFile - > onChange ( ) : Never ( ) ) ) { wait ( Never ( ) ) ; } <nl> ACTOR Future < Void > watchValue ( Future < Version > version , Key key , Optional < Value > <nl> <nl> / / FIXME : wait for known committed version on the storage server before replying , <nl> / / cannot do this until the storage server is notified on knownCommittedVersion changes from tlog ( faster than the current update loop ) <nl> - Version v = wait ( waitForCommittedVersion ( cx , resp ) ) ; <nl> + Version v = wait ( waitForCommittedVersion ( cx , resp . version ) ) ; <nl> <nl> - / / TraceEvent ( " WatcherCommitted " ) . detail ( " CommittedVersion " , v ) . detail ( " WatchVersion " , resp ) . detail ( " Key " , key ) . detail ( " Value " , value ) ; <nl> + / / TraceEvent ( " WatcherCommitted " ) . detail ( " CommittedVersion " , v ) . detail ( " WatchVersion " , resp . version ) . detail ( " Key " , key ) . detail ( " Value " , value ) ; <nl> <nl> - if ( v - resp < 50000000 ) / / False if there is a master failure between getting the response and getting the committed version , Dependent on SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT <nl> - return Void ( ) ; <nl> + / / False if there is a master failure between getting the response and getting the committed version , <nl> + / / Dependent on SERVER_KNOBS - > MAX_VERSIONS_IN_FLIGHT <nl> + if ( v - resp . version < 50000000 ) return Void ( ) ; <nl> ver = v ; <nl> } catch ( Error & e ) { <nl> if ( e . code ( ) = = error_code_wrong_shard_server | | e . code ( ) = = error_code_all_alternatives_failed ) { <nl> ACTOR static Future < Void > tryCommit ( Database cx , Reference < TransactionLogInfo > <nl> if ( e . code ( ) ! = error_code_transaction_too_old <nl> & & e . code ( ) ! = error_code_not_committed <nl> & & e . code ( ) ! = error_code_database_locked <nl> - & & e . code ( ) ! = error_code_proxy_memory_limit_exceeded <nl> - & & e . code ( ) ! = error_code_transaction_not_permitted <nl> - & & e . code ( ) ! = error_code_cluster_not_fully_recovered <nl> - & & e . code ( ) ! = error_code_txn_exec_log_anti_quorum ) <nl> + & & e . code ( ) ! = error_code_proxy_memory_limit_exceeded ) <nl> TraceEvent ( SevError , " TryCommitError " ) . error ( e ) ; <nl> if ( trLogInfo ) <nl> trLogInfo - > addLog ( FdbClientLogEvents : : EventCommitError ( startTime , static_cast < int > ( e . code ( ) ) , req ) ) ; <nl> Future < Void > Transaction : : onError ( Error const & e ) { <nl> e . code ( ) = = error_code_commit_unknown_result | | <nl> e . code ( ) = = error_code_database_locked | | <nl> e . code ( ) = = error_code_proxy_memory_limit_exceeded | | <nl> - e . code ( ) = = error_code_process_behind | | <nl> - e . code ( ) = = error_code_cluster_not_fully_recovered ) <nl> + e . code ( ) = = error_code_process_behind ) <nl> { <nl> if ( e . code ( ) = = error_code_not_committed ) <nl> + + cx - > transactionsNotCommitted ; <nl> Future < Void > Transaction : : onError ( Error const & e ) { <nl> + + cx - > transactionsResourceConstrained ; <nl> if ( e . code ( ) = = error_code_process_behind ) <nl> + + cx - > transactionsProcessBehind ; <nl> - if ( e . code ( ) = = error_code_cluster_not_fully_recovered ) { <nl> - + + cx - > transactionWaitsForFullRecovery ; <nl> - } <nl> <nl> double backoff = getBackoff ( e . code ( ) ) ; <nl> reset ( ) ; <nl> void enableClientInfoLogging ( ) { <nl> TraceEvent ( SevInfo , " ClientInfoLoggingEnabled " ) ; <nl> } <nl> <nl> - ACTOR Future < Void > snapshotDatabase ( Reference < DatabaseContext > cx , StringRef snapPayload , UID snapUID , Optional < UID > debugID ) { <nl> - TraceEvent ( " SnapshotDatabaseEnter " ) <nl> - . detail ( " SnapPayload " , snapPayload ) <nl> - . detail ( " SnapUID " , snapUID ) ; <nl> - try { <nl> - if ( debugID . present ( ) ) { <nl> - g_traceBatch . addEvent ( " TransactionDebug " , debugID . get ( ) . first ( ) , " NativeAPI . snapshotDatabase . Before " ) ; <nl> - } <nl> - <nl> - choose { <nl> - when ( wait ( cx - > onMasterProxiesChanged ( ) ) ) { throw operation_failed ( ) ; } <nl> - when ( wait ( loadBalance ( cx - > getMasterProxies ( false ) , & MasterProxyInterface : : proxySnapReq , ProxySnapRequest ( snapPayload , snapUID , debugID ) , cx - > taskID , true / * atmostOnce * / ) ) ) { <nl> - if ( debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " TransactionDebug " , debugID . get ( ) . first ( ) , <nl> - " NativeAPI . SnapshotDatabase . After " ) ; <nl> - } <nl> - } <nl> - } catch ( Error & e ) { <nl> - TraceEvent ( " SnapshotDatabaseError " ) <nl> - . error ( e ) <nl> - . detail ( " SnapPayload " , snapPayload ) <nl> - . detail ( " SnapUID " , snapUID ) ; <nl> - throw ; <nl> - } <nl> - return Void ( ) ; <nl> - } <nl> - <nl> - ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID ) { <nl> - / / remember the client ID before the snap operation <nl> - state UID preSnapClientUID = cx - > clientInfo - > get ( ) . id ; <nl> - <nl> + ACTOR Future < Void > snapCreate ( Database cx , Standalone < StringRef > snapCmd , UID snapUID ) { <nl> TraceEvent ( " SnapCreateEnter " ) <nl> . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> - . detail ( " UID " , snapUID ) <nl> - . detail ( " PreSnapClientUID " , preSnapClientUID ) ; <nl> - <nl> - StringRef snapCmdArgs = snapCmd ; <nl> - StringRef snapCmdPart = snapCmdArgs . eat ( " : " ) ; <nl> - Standalone < StringRef > snapUIDRef ( snapUID . toString ( ) ) ; <nl> - Standalone < StringRef > snapPayloadRef = snapCmdPart <nl> - . withSuffix ( LiteralStringRef ( " : uid = " ) ) <nl> - . withSuffix ( snapUIDRef ) <nl> - . withSuffix ( LiteralStringRef ( " , " ) ) <nl> - . withSuffix ( snapCmdArgs ) ; <nl> - <nl> + . detail ( " UID " , snapUID ) ; <nl> try { <nl> - Future < Void > exec = snapshotDatabase ( Reference < DatabaseContext > : : addRef ( cx . getPtr ( ) ) , snapPayloadRef , snapUID , snapUID ) ; <nl> - wait ( exec ) ; <nl> + loop { <nl> + choose { <nl> + when ( wait ( cx - > onMasterProxiesChanged ( ) ) ) { } <nl> + when ( wait ( loadBalance ( cx - > getMasterProxies ( false ) , & MasterProxyInterface : : proxySnapReq , ProxySnapRequest ( snapCmd , snapUID , snapUID ) , cx - > taskID , true / * atmostOnce * / ) ) ) { <nl> + TraceEvent ( " SnapCreateExit " ) <nl> + . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> + . detail ( " UID " , snapUID ) ; <nl> + return Void ( ) ; <nl> + } <nl> + } <nl> + } <nl> } catch ( Error & e ) { <nl> TraceEvent ( " SnapCreateError " ) <nl> . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID ) { <nl> . error ( e ) ; <nl> throw ; <nl> } <nl> - <nl> - UID postSnapClientUID = cx - > clientInfo - > get ( ) . id ; <nl> - if ( preSnapClientUID ! = postSnapClientUID ) { <nl> - / / if the client IDs changed then we fail the snapshot <nl> - TraceEvent ( " SnapCreateUIDMismatch " ) <nl> - . detail ( " SnapPreSnapClientUID " , preSnapClientUID ) <nl> - . detail ( " SnapPostSnapClientUID " , postSnapClientUID ) ; <nl> - throw coordinators_changed ( ) ; <nl> - } <nl> - <nl> - TraceEvent ( " SnapCreateExit " ) <nl> - . detail ( " SnapCmd " , snapCmd . toString ( ) ) <nl> - . detail ( " UID " , snapUID ) <nl> - . detail ( " PreSnapClientUID " , preSnapClientUID ) ; <nl> - return Void ( ) ; <nl> } <nl> mmm a / fdbclient / NativeAPI . actor . h <nl> ppp b / fdbclient / NativeAPI . actor . h <nl> int64_t extractIntOption ( Optional < StringRef > value , int64_t minValue = std : : num <nl> <nl> / / Takes a snapshot of the cluster , specifically the following persistent <nl> / / states : coordinator , TLog and storage state <nl> - ACTOR Future < Void > snapCreate ( Database cx , StringRef snapCmd , UID snapUID ) ; <nl> + ACTOR Future < Void > snapCreate ( Database cx , Standalone < StringRef > snapCmd , UID snapUID ) ; <nl> <nl> # include " flow / unactorcompiler . h " <nl> # endif <nl> mmm a / fdbclient / Schemas . cpp <nl> ppp b / fdbclient / Schemas . cpp <nl> const KeyRef JSONSchemas : : statusSchema = LiteralStringRef ( R " statusSchema ( <nl> " cluster_controller " , <nl> " data_distributor " , <nl> " ratekeeper " , <nl> - " router " <nl> + " router " , <nl> + " coordinator " <nl> ] <nl> } , <nl> " data_version " : 12341234 , <nl> const KeyRef JSONSchemas : : statusSchema = LiteralStringRef ( R " statusSchema ( <nl> " limiting_queue_bytes_storage_server " : 0 , <nl> " worst_queue_bytes_storage_server " : 0 , <nl> " limiting_version_lag_storage_server " : 0 , <nl> - " worst_version_lag_storage_server " : 0 <nl> + " worst_version_lag_storage_server " : 0 , <nl> + " limiting_data_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } , <nl> + " worst_data_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } , <nl> + " limiting_durability_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } , <nl> + " worst_durability_lag_storage_server " : { <nl> + " versions " : 0 , <nl> + " seconds " : 0 . 0 <nl> + } <nl> } , <nl> " incompatible_connections " : [ <nl> <nl> const KeyRef JSONSchemas : : statusSchema = LiteralStringRef ( R " statusSchema ( <nl> " full_replication " : true , <nl> " maintenance_zone " : " 0ccb4e0fdbdb5583010f6b77d9d10ece " , <nl> " maintenance_seconds_remaining " : 1 . 0 , <nl> + " data_distribution_disabled_for_ss_failures " : true , <nl> + " data_distribution_disabled_for_rebalance " : true , <nl> + " data_distribution_disabled " : true , <nl> " configuration " : { <nl> " log_anti_quorum " : 0 , <nl> " log_replicas " : 2 , <nl> mmm a / fdbclient / StorageServerInterface . h <nl> ppp b / fdbclient / StorageServerInterface . h <nl> <nl> # include " flow / Stats . h " <nl> # include " fdbrpc / TimedRequest . h " <nl> <nl> + / / Dead code , removed in the next protocol version <nl> + struct VersionReply { <nl> + constexpr static FileIdentifier file_identifier = 3 ; <nl> + <nl> + Version version ; <nl> + VersionReply ( ) = default ; <nl> + explicit VersionReply ( Version version ) : version ( version ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , version ) ; <nl> + } <nl> + } ; <nl> + <nl> struct StorageServerInterface { <nl> constexpr static FileIdentifier file_identifier = 15302073 ; <nl> enum { BUSY_ALLOWED = 0 , BUSY_FORCE = 1 , BUSY_LOCAL = 2 } ; <nl> struct StorageServerInterface { <nl> LocalityData locality ; <nl> UID uniqueID ; <nl> <nl> - RequestStream < ReplyPromise < Version > > getVersion ; <nl> + RequestStream < ReplyPromise < VersionReply > > getVersion ; <nl> RequestStream < struct GetValueRequest > getValue ; <nl> RequestStream < struct GetKeyRequest > getKey ; <nl> <nl> struct GetValueRequest : TimedRequest { <nl> } <nl> } ; <nl> <nl> + struct WatchValueReply { <nl> + constexpr static FileIdentifier file_identifier = 3 ; <nl> + <nl> + Version version ; <nl> + WatchValueReply ( ) = default ; <nl> + explicit WatchValueReply ( Version version ) : version ( version ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , version ) ; <nl> + } <nl> + } ; <nl> + <nl> struct WatchValueRequest { <nl> constexpr static FileIdentifier file_identifier = 14747733 ; <nl> Key key ; <nl> Optional < Value > value ; <nl> Version version ; <nl> Optional < UID > debugID ; <nl> - ReplyPromise < Version > reply ; <nl> - <nl> + ReplyPromise < WatchValueReply > reply ; <nl> + <nl> WatchValueRequest ( ) { } <nl> WatchValueRequest ( const Key & key , Optional < Value > value , Version ver , Optional < UID > debugID ) : key ( key ) , value ( value ) , version ( ver ) , debugID ( debugID ) { } <nl> <nl> struct GetKeyRequest : TimedRequest { <nl> } <nl> } ; <nl> <nl> + struct GetShardStateReply { <nl> + constexpr static FileIdentifier file_identifier = 0 ; <nl> + <nl> + Version first ; <nl> + Version second ; <nl> + GetShardStateReply ( ) = default ; <nl> + GetShardStateReply ( Version first , Version second ) : first ( first ) , second ( second ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , first , second ) ; <nl> + } <nl> + } ; <nl> + <nl> struct GetShardStateRequest { <nl> constexpr static FileIdentifier file_identifier = 15860168 ; <nl> enum waitMode { <nl> struct GetShardStateRequest { <nl> <nl> KeyRange keys ; <nl> int32_t mode ; <nl> - ReplyPromise < std : : pair < Version , Version > > reply ; <nl> + ReplyPromise < GetShardStateReply > reply ; <nl> GetShardStateRequest ( ) { } <nl> GetShardStateRequest ( KeyRange const & keys , waitMode mode ) : keys ( keys ) , mode ( mode ) { } <nl> <nl> mmm a / fdbrpc / FlowTests . actor . cpp <nl> ppp b / fdbrpc / FlowTests . actor . cpp <nl> TEST_CASE ( " / flow / flow / cancel2 " ) <nl> return Void ( ) ; <nl> } <nl> <nl> + namespace { <nl> + / / Simple message for flatbuffers unittests <nl> + struct Int { <nl> + constexpr static FileIdentifier file_identifier = 12345 ; <nl> + uint32_t value ; <nl> + Int ( ) = default ; <nl> + Int ( uint32_t value ) : value ( value ) { } <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , value ) ; <nl> + } <nl> + } ; <nl> + } / / namespace <nl> + <nl> TEST_CASE ( " / flow / flow / nonserializable futures " ) <nl> { <nl> / / Types no longer need to be statically serializable to make futures , promises , actors <nl> TEST_CASE ( " / flow / flow / nonserializable futures " ) <nl> <nl> / / ReplyPromise can be used like a normal promise <nl> { <nl> - ReplyPromise < int > rpInt ; <nl> - Future < int > f = rpInt . getFuture ( ) ; <nl> + ReplyPromise < Int > rpInt ; <nl> + Future < Int > f = rpInt . getFuture ( ) ; <nl> ASSERT ( ! f . isReady ( ) ) ; <nl> rpInt . send ( 123 ) ; <nl> - ASSERT ( f . get ( ) = = 123 ) ; <nl> + ASSERT ( f . get ( ) . value = = 123 ) ; <nl> } <nl> <nl> { <nl> - RequestStream < int > rsInt ; <nl> - FutureStream < int > f = rsInt . getFuture ( ) ; <nl> + RequestStream < Int > rsInt ; <nl> + FutureStream < Int > f = rsInt . getFuture ( ) ; <nl> rsInt . send ( 1 ) ; <nl> rsInt . send ( 2 ) ; <nl> - ASSERT ( f . pop ( ) = = 1 ) ; <nl> - ASSERT ( f . pop ( ) = = 2 ) ; <nl> + ASSERT ( f . pop ( ) . value = = 1 ) ; <nl> + ASSERT ( f . pop ( ) . value = = 2 ) ; <nl> } <nl> <nl> return Void ( ) ; <nl> TEST_CASE ( " / flow / flow / networked futures " ) <nl> { <nl> / / RequestStream can be serialized <nl> { <nl> - RequestStream < int > locInt ; <nl> + RequestStream < Int > locInt ; <nl> BinaryWriter wr ( IncludeVersion ( ) ) ; <nl> wr < < locInt ; <nl> <nl> ASSERT ( locInt . getEndpoint ( ) . isValid ( ) & & locInt . getEndpoint ( ) . isLocal ( ) & & locInt . getEndpoint ( ) . getPrimaryAddress ( ) = = FlowTransport : : transport ( ) . getLocalAddress ( ) ) ; <nl> <nl> BinaryReader rd ( wr . toValue ( ) , IncludeVersion ( ) ) ; <nl> - RequestStream < int > remoteInt ; <nl> + RequestStream < Int > remoteInt ; <nl> rd > > remoteInt ; <nl> <nl> ASSERT ( remoteInt . getEndpoint ( ) = = locInt . getEndpoint ( ) ) ; <nl> TEST_CASE ( " / flow / flow / networked futures " ) <nl> / / ReplyPromise can be serialized <nl> / / TODO : This needs to fiddle with g_currentDeliveryPeerAddress <nl> if ( 0 ) { <nl> - ReplyPromise < int > locInt ; <nl> + ReplyPromise < Int > locInt ; <nl> BinaryWriter wr ( IncludeVersion ( ) ) ; <nl> wr < < locInt ; <nl> <nl> ASSERT ( locInt . getEndpoint ( ) . isValid ( ) & & locInt . getEndpoint ( ) . isLocal ( ) ) ; <nl> <nl> BinaryReader rd ( wr . toValue ( ) , IncludeVersion ( ) ) ; <nl> - ReplyPromise < int > remoteInt ; <nl> + ReplyPromise < Int > remoteInt ; <nl> rd > > remoteInt ; <nl> <nl> ASSERT ( remoteInt . getEndpoint ( ) = = locInt . getEndpoint ( ) ) ; <nl> mmm a / fdbrpc / FlowTransport . actor . cpp <nl> ppp b / fdbrpc / FlowTransport . actor . cpp <nl> struct ConnectPacket { <nl> ACTOR static Future < Void > connectionReader ( TransportData * transport , Reference < IConnection > conn , Reference < struct Peer > peer , <nl> Promise < Reference < struct Peer > > onConnected ) ; <nl> <nl> - static PacketID sendPacket ( TransportData * self , ISerializeSource const & what , const Endpoint & destination , bool reliable , bool openConnection ) ; <nl> - <nl> - struct Peer : public ReferenceCounted < Peer > { <nl> - TransportData * transport ; <nl> - NetworkAddress destination ; <nl> - UnsentPacketQueue unsent ; <nl> - ReliablePacketList reliable ; <nl> - AsyncTrigger dataToSend ; / / Triggered when unsent . empty ( ) becomes false <nl> - Future < Void > connect ; <nl> - AsyncTrigger resetPing ; <nl> - bool compatible ; <nl> - bool outgoingConnectionIdle ; / / We don ' t actually have a connection open and aren ' t trying to open one because we don ' t have anything to send <nl> - double lastConnectTime ; <nl> - double reconnectionDelay ; <nl> - int peerReferences ; <nl> - bool incompatibleProtocolVersionNewer ; <nl> - int64_t bytesReceived ; <nl> - double lastDataPacketSentTime ; <nl> - <nl> - explicit Peer ( TransportData * transport , NetworkAddress const & destination ) <nl> - : transport ( transport ) , destination ( destination ) , outgoingConnectionIdle ( false ) , lastConnectTime ( 0 . 0 ) , <nl> - reconnectionDelay ( FLOW_KNOBS - > INITIAL_RECONNECTION_TIME ) , compatible ( true ) , <nl> - incompatibleProtocolVersionNewer ( false ) , peerReferences ( - 1 ) , bytesReceived ( 0 ) , lastDataPacketSentTime ( now ( ) ) { } <nl> - <nl> - void send ( PacketBuffer * pb , ReliablePacket * rp , bool firstUnsent ) { <nl> - unsent . setWriteBuffer ( pb ) ; <nl> - if ( rp ) reliable . insert ( rp ) ; <nl> - if ( firstUnsent ) dataToSend . trigger ( ) ; <nl> - } <nl> + static void sendLocal ( TransportData * self , ISerializeSource const & what , const Endpoint & destination ) ; <nl> + static ReliablePacket * sendPacket ( TransportData * self , Reference < Peer > peer , ISerializeSource const & what , const Endpoint & destination , bool reliable ) ; <nl> <nl> - void prependConnectPacket ( ) { <nl> - / / Send the ConnectPacket expected at the beginning of a new connection <nl> - ConnectPacket pkt ; <nl> - if ( transport - > localAddresses . address . isTLS ( ) = = destination . isTLS ( ) ) { <nl> - pkt . canonicalRemotePort = transport - > localAddresses . address . port ; <nl> - pkt . setCanonicalRemoteIp ( transport - > localAddresses . address . ip ) ; <nl> - } else if ( transport - > localAddresses . secondaryAddress . present ( ) ) { <nl> - pkt . canonicalRemotePort = transport - > localAddresses . secondaryAddress . get ( ) . port ; <nl> - pkt . setCanonicalRemoteIp ( transport - > localAddresses . secondaryAddress . get ( ) . ip ) ; <nl> - } else { <nl> - / / a " mixed " TLS / non - TLS connection is like a client / server connection - there ' s no way to reverse it <nl> - pkt . canonicalRemotePort = 0 ; <nl> - pkt . setCanonicalRemoteIp ( IPAddress ( 0 ) ) ; <nl> + ACTOR Future < Void > connectionMonitor ( Reference < Peer > peer ) { <nl> + state Endpoint remotePingEndpoint ( { peer - > destination } , WLTOKEN_PING_PACKET ) ; <nl> + loop { <nl> + if ( ! FlowTransport : : transport ( ) . isClient ( ) & & ! peer - > destination . isPublic ( ) & & peer - > compatible ) { <nl> + / / Don ' t send ping messages to clients unless necessary . Instead monitor incoming client pings . <nl> + / / We ignore this block for incompatible clients because pings from server would trigger the <nl> + / / peer - > resetPing and prevent ' connection_failed ' due to ping timeout . <nl> + state double lastRefreshed = now ( ) ; <nl> + state int64_t lastBytesReceived = peer - > bytesReceived ; <nl> + loop { <nl> + wait ( delay ( FLOW_KNOBS - > CONNECTION_MONITOR_LOOP_TIME ) ) ; <nl> + if ( lastBytesReceived < peer - > bytesReceived ) { <nl> + lastRefreshed = now ( ) ; <nl> + lastBytesReceived = peer - > bytesReceived ; <nl> + } else if ( lastRefreshed < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_IDLE_TIMEOUT * <nl> + FLOW_KNOBS - > CONNECTION_MONITOR_INCOMING_IDLE_MULTIPLIER ) { <nl> + / / If we have not received anything in this period , client must have closed <nl> + / / connection by now . Break loop to check if it is still alive by sending a ping . <nl> + break ; <nl> + } <nl> + } <nl> } <nl> <nl> - pkt . connectPacketLength = sizeof ( pkt ) - sizeof ( pkt . connectPacketLength ) ; <nl> - pkt . protocolVersion = currentProtocolVersion ; <nl> - if ( FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) { <nl> - pkt . protocolVersion . addObjectSerializerFlag ( ) ; <nl> + / / We cannot let an error be thrown from connectionMonitor while still on the stack from scanPackets in connectionReader <nl> + / / because then it would not call the destructor of connectionReader when connectionReader is cancelled . <nl> + wait ( delay ( 0 ) ) ; <nl> + <nl> + if ( peer - > reliable . empty ( ) & & peer - > unsent . empty ( ) & & peer - > outstandingReplies = = 0 ) { <nl> + if ( peer - > peerReferences = = 0 & & <nl> + ( peer - > lastDataPacketSentTime < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_UNREFERENCED_CLOSE_DELAY ) ) { <nl> + / / TODO : What about when peerReference = = - 1 ? <nl> + throw connection_unreferenced ( ) ; <nl> + } else if ( FlowTransport : : transport ( ) . isClient ( ) & & peer - > compatible & & peer - > destination . isPublic ( ) & & <nl> + ( peer - > lastConnectTime < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_IDLE_TIMEOUT ) & & <nl> + ( peer - > lastDataPacketSentTime < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_IDLE_TIMEOUT ) ) { <nl> + / / First condition is necessary because we may get here if we are server . <nl> + throw connection_idle ( ) ; <nl> + } <nl> } <nl> - pkt . connectionId = transport - > transportId ; <nl> <nl> - PacketBuffer * pb_first = PacketBuffer : : create ( ) ; <nl> - PacketWriter wr ( pb_first , nullptr , Unversioned ( ) ) ; <nl> - pkt . serialize ( wr ) ; <nl> - unsent . prependWriteBuffer ( pb_first , wr . finish ( ) ) ; <nl> - } <nl> + wait ( delayJittered ( FLOW_KNOBS - > CONNECTION_MONITOR_LOOP_TIME ) ) ; <nl> <nl> - void discardUnreliablePackets ( ) { <nl> - / / Throw away the current unsent list , dropping the reference count on each PacketBuffer that accounts for presence in the unsent list <nl> - unsent . discardAll ( ) ; <nl> - <nl> - / / If there are reliable packets , compact reliable packets into a new unsent range <nl> - if ( ! reliable . empty ( ) ) { <nl> - PacketBuffer * pb = unsent . getWriteBuffer ( ) ; <nl> - pb = reliable . compact ( pb , nullptr ) ; <nl> - unsent . setWriteBuffer ( pb ) ; <nl> + / / TODO : Stop monitoring and close the connection with no onDisconnect requests outstanding <nl> + state ReplyPromise < Void > reply ; <nl> + FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < ReplyPromise < Void > > ( reply ) , remotePingEndpoint , true ) ; <nl> + state int64_t startingBytes = peer - > bytesReceived ; <nl> + state int timeouts = 0 ; <nl> + loop { <nl> + choose { <nl> + when ( wait ( delay ( FLOW_KNOBS - > CONNECTION_MONITOR_TIMEOUT ) ) ) { <nl> + if ( startingBytes = = peer - > bytesReceived ) { <nl> + TraceEvent ( " ConnectionTimeout " ) . suppressFor ( 1 . 0 ) . detail ( " WithAddr " , peer - > destination ) ; <nl> + throw connection_failed ( ) ; <nl> + } <nl> + if ( timeouts > 1 ) { <nl> + TraceEvent ( SevWarnAlways , " ConnectionSlowPing " ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " WithAddr " , peer - > destination ) <nl> + . detail ( " Timeouts " , timeouts ) ; <nl> + } <nl> + startingBytes = peer - > bytesReceived ; <nl> + timeouts + + ; <nl> + } <nl> + when ( wait ( reply . getFuture ( ) ) ) { <nl> + break ; <nl> + } <nl> + when ( wait ( peer - > resetPing . onTrigger ( ) ) ) { <nl> + break ; <nl> + } <nl> + } <nl> } <nl> } <nl> + } <nl> <nl> - void onIncomingConnection ( Reference < Peer > self , Reference < IConnection > conn , Future < Void > reader ) { <nl> - / / In case two processes are trying to connect to each other simultaneously , the process with the larger canonical NetworkAddress <nl> - / / gets to keep its outgoing connection . <nl> - if ( ! destination . isPublic ( ) & & ! outgoingConnectionIdle ) throw address_in_use ( ) ; <nl> - NetworkAddress compatibleAddr = transport - > localAddresses . address ; <nl> - if ( transport - > localAddresses . secondaryAddress . present ( ) & & transport - > localAddresses . secondaryAddress . get ( ) . isTLS ( ) = = destination . isTLS ( ) ) { <nl> - compatibleAddr = transport - > localAddresses . secondaryAddress . get ( ) ; <nl> - } <nl> - <nl> - if ( ! destination . isPublic ( ) | | outgoingConnectionIdle | | destination > compatibleAddr ) { <nl> - / / Keep the new connection <nl> - TraceEvent ( " IncomingConnection " , conn - > getDebugID ( ) ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " FromAddr " , conn - > getPeerAddress ( ) ) <nl> - . detail ( " CanonicalAddr " , destination ) <nl> - . detail ( " IsPublic " , destination . isPublic ( ) ) ; <nl> + ACTOR Future < Void > connectionWriter ( Reference < Peer > self , Reference < IConnection > conn ) { <nl> + state double lastWriteTime = now ( ) ; <nl> + loop { <nl> + / / wait ( delay ( 0 , TaskPriority : : WriteSocket ) ) ; <nl> + wait ( delayJittered ( std : : max < double > ( FLOW_KNOBS - > MIN_COALESCE_DELAY , FLOW_KNOBS - > MAX_COALESCE_DELAY - ( now ( ) - lastWriteTime ) ) , TaskPriority : : WriteSocket ) ) ; <nl> + / / wait ( delay ( 500e - 6 , TaskPriority : : WriteSocket ) ) ; <nl> + / / wait ( yield ( TaskPriority : : WriteSocket ) ) ; <nl> <nl> - connect . cancel ( ) ; <nl> - prependConnectPacket ( ) ; <nl> - connect = connectionKeeper ( self , conn , reader ) ; <nl> - } else { <nl> - TraceEvent ( " RedundantConnection " , conn - > getDebugID ( ) ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " FromAddr " , conn - > getPeerAddress ( ) . toString ( ) ) <nl> - . detail ( " CanonicalAddr " , destination ) <nl> - . detail ( " LocalAddr " , compatibleAddr ) ; <nl> + / / Send until there is nothing left to send <nl> + loop { <nl> + lastWriteTime = now ( ) ; <nl> <nl> - / / Keep our prior connection <nl> - reader . cancel ( ) ; <nl> - conn - > close ( ) ; <nl> + int sent = conn - > write ( self - > unsent . getUnsent ( ) , / * limit = * / FLOW_KNOBS - > MAX_PACKET_SEND_BYTES ) ; <nl> + if ( sent ) { <nl> + self - > transport - > bytesSent + = sent ; <nl> + self - > unsent . sent ( sent ) ; <nl> + } <nl> + if ( self - > unsent . empty ( ) ) break ; <nl> <nl> - / / Send an ( ignored ) packet to make sure that , if our outgoing connection died before the peer made this connection attempt , <nl> - / / we eventually find out that our connection is dead , close it , and then respond to the next connection reattempt from peer . <nl> + TEST ( true ) ; / / We didn ' t write everything , so apparently the write buffer is full . Wait for it to be nonfull . <nl> + wait ( conn - > onWritable ( ) ) ; <nl> + wait ( yield ( TaskPriority : : WriteSocket ) ) ; <nl> } <nl> + <nl> + / / Wait until there is something to send <nl> + while ( self - > unsent . empty ( ) ) <nl> + wait ( self - > dataToSend . onTrigger ( ) ) ; <nl> } <nl> + } <nl> <nl> - ACTOR static Future < Void > connectionMonitor ( Reference < Peer > peer ) { <nl> - state Endpoint remotePingEndpoint ( { peer - > destination } , WLTOKEN_PING_PACKET ) ; <nl> - loop { <nl> - if ( ! FlowTransport : : transport ( ) . isClient ( ) & & ! peer - > destination . isPublic ( ) ) { <nl> - / / Don ' t send ping messages to clients unless necessary . Instead monitor incoming client pings . <nl> - state double lastRefreshed = now ( ) ; <nl> - state int64_t lastBytesReceived = peer - > bytesReceived ; <nl> - loop { <nl> - wait ( delay ( FLOW_KNOBS - > CONNECTION_MONITOR_LOOP_TIME ) ) ; <nl> - if ( lastBytesReceived < peer - > bytesReceived ) { <nl> - lastRefreshed = now ( ) ; <nl> - lastBytesReceived = peer - > bytesReceived ; <nl> - } else if ( lastRefreshed < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_IDLE_TIMEOUT * <nl> - FLOW_KNOBS - > CONNECTION_MONITOR_INCOMING_IDLE_MULTIPLIER ) { <nl> - / / If we have not received anything in this period , client must have closed <nl> - / / connection by now . Break loop to check if it is still alive by sending a ping . <nl> + ACTOR Future < Void > connectionKeeper ( Reference < Peer > self , <nl> + Reference < IConnection > conn = Reference < IConnection > ( ) , <nl> + Future < Void > reader = Void ( ) ) { <nl> + TraceEvent ( SevDebug , " ConnectionKeeper " , conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> + . detail ( " PeerAddr " , self - > destination ) <nl> + . detail ( " ConnSet " , ( bool ) conn ) ; <nl> + <nl> + / / This is used only at client side and is used to override waiting for unsent data to update failure monitoring <nl> + / / status . At client , if an existing connection fails , we retry making a connection and if that fails , then only <nl> + / / we report that address as failed . <nl> + state bool clientReconnectDelay = false ; <nl> + loop { <nl> + try { <nl> + if ( ! conn ) { / / Always , except for the first loop with an incoming connection <nl> + self - > outgoingConnectionIdle = true ; <nl> + <nl> + / / Wait until there is something to send . <nl> + while ( self - > unsent . empty ( ) ) { <nl> + if ( FlowTransport : : transport ( ) . isClient ( ) & & self - > destination . isPublic ( ) & & <nl> + clientReconnectDelay ) { <nl> break ; <nl> } <nl> + wait ( self - > dataToSend . onTrigger ( ) ) ; <nl> } <nl> - } <nl> <nl> - / / We cannot let an error be thrown from connectionMonitor while still on the stack from scanPackets in connectionReader <nl> - / / because then it would not call the destructor of connectionReader when connectionReader is cancelled . <nl> - wait ( delay ( 0 ) ) ; <nl> - <nl> - if ( peer - > reliable . empty ( ) & & peer - > unsent . empty ( ) ) { <nl> - if ( peer - > peerReferences = = 0 & & <nl> - ( peer - > lastDataPacketSentTime < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_UNREFERENCED_CLOSE_DELAY ) ) { <nl> - / / TODO : What about when peerReference = = - 1 ? <nl> - throw connection_unreferenced ( ) ; <nl> - } else if ( FlowTransport : : transport ( ) . isClient ( ) & & peer - > compatible & & peer - > destination . isPublic ( ) & & <nl> - ( peer - > lastConnectTime < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_IDLE_TIMEOUT ) & & <nl> - ( peer - > lastDataPacketSentTime < now ( ) - FLOW_KNOBS - > CONNECTION_MONITOR_IDLE_TIMEOUT ) ) { <nl> - / / First condition is necessary because we may get here if we are server . <nl> - throw connection_idle ( ) ; <nl> - } <nl> - } <nl> - <nl> - wait ( delayJittered ( FLOW_KNOBS - > CONNECTION_MONITOR_LOOP_TIME ) ) ; <nl> - <nl> - / / TODO : Stop monitoring and close the connection with no onDisconnect requests outstanding <nl> - state ReplyPromise < Void > reply ; <nl> - FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < ReplyPromise < Void > > ( reply ) , remotePingEndpoint ) ; <nl> - state int64_t startingBytes = peer - > bytesReceived ; <nl> - state int timeouts = 0 ; <nl> - loop { <nl> - choose { <nl> - when ( wait ( delay ( FLOW_KNOBS - > CONNECTION_MONITOR_TIMEOUT ) ) ) { <nl> - if ( startingBytes = = peer - > bytesReceived ) { <nl> - TraceEvent ( " ConnectionTimeout " ) . suppressFor ( 1 . 0 ) . detail ( " WithAddr " , peer - > destination ) ; <nl> - throw connection_failed ( ) ; <nl> - } <nl> - if ( timeouts > 1 ) { <nl> - TraceEvent ( SevWarnAlways , " ConnectionSlowPing " ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " WithAddr " , peer - > destination ) <nl> - . detail ( " Timeouts " , timeouts ) ; <nl> - } <nl> - startingBytes = peer - > bytesReceived ; <nl> - timeouts + + ; <nl> + ASSERT ( self - > destination . isPublic ( ) ) ; <nl> + self - > outgoingConnectionIdle = false ; <nl> + wait ( delayJittered ( <nl> + std : : max ( 0 . 0 , self - > lastConnectTime + self - > reconnectionDelay - <nl> + now ( ) ) ) ) ; / / Don ' t connect ( ) to the same peer more than once per 2 sec <nl> + self - > lastConnectTime = now ( ) ; <nl> + <nl> + TraceEvent ( " ConnectingTo " , conn ? conn - > getDebugID ( ) : UID ( ) ) . suppressFor ( 1 . 0 ) . detail ( " PeerAddr " , self - > destination ) ; <nl> + Reference < IConnection > _conn = wait ( timeout ( INetworkConnections : : net ( ) - > connect ( self - > destination ) , FLOW_KNOBS - > CONNECTION_MONITOR_TIMEOUT , Reference < IConnection > ( ) ) ) ; <nl> + if ( _conn ) { <nl> + if ( FlowTransport : : transport ( ) . isClient ( ) ) { <nl> + IFailureMonitor : : failureMonitor ( ) . setStatus ( self - > destination , FailureStatus ( false ) ) ; <nl> } <nl> - when ( wait ( reply . getFuture ( ) ) ) { <nl> - break ; <nl> + if ( self - > unsent . empty ( ) ) { <nl> + _conn - > close ( ) ; <nl> + clientReconnectDelay = false ; <nl> + continue ; <nl> + } else { <nl> + conn = _conn ; <nl> + TraceEvent ( " ConnectionExchangingConnectPacket " , conn - > getDebugID ( ) ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " PeerAddr " , self - > destination ) ; <nl> + self - > prependConnectPacket ( ) ; <nl> } <nl> - when ( wait ( peer - > resetPing . onTrigger ( ) ) ) { <nl> - break ; <nl> + } else { <nl> + TraceEvent ( " ConnectionTimedOut " , conn ? conn - > getDebugID ( ) : UID ( ) ) . suppressFor ( 1 . 0 ) . detail ( " PeerAddr " , self - > destination ) ; <nl> + if ( FlowTransport : : transport ( ) . isClient ( ) ) { <nl> + IFailureMonitor : : failureMonitor ( ) . setStatus ( self - > destination , FailureStatus ( true ) ) ; <nl> } <nl> + throw connection_failed ( ) ; <nl> } <nl> + <nl> + reader = connectionReader ( self - > transport , conn , self , Promise < Reference < Peer > > ( ) ) ; <nl> + } else { <nl> + self - > outgoingConnectionIdle = false ; <nl> } <nl> - } <nl> - } <nl> <nl> - ACTOR static Future < Void > connectionWriter ( Reference < Peer > self , Reference < IConnection > conn ) { <nl> - state double lastWriteTime = now ( ) ; <nl> - loop { <nl> - / / wait ( delay ( 0 , TaskPriority : : WriteSocket ) ) ; <nl> - wait ( delayJittered ( std : : max < double > ( FLOW_KNOBS - > MIN_COALESCE_DELAY , FLOW_KNOBS - > MAX_COALESCE_DELAY - ( now ( ) - lastWriteTime ) ) , TaskPriority : : WriteSocket ) ) ; <nl> - / / wait ( delay ( 500e - 6 , TaskPriority : : WriteSocket ) ) ; <nl> - / / wait ( yield ( TaskPriority : : WriteSocket ) ) ; <nl> + try { <nl> + self - > transport - > countConnEstablished + + ; <nl> + wait ( connectionWriter ( self , conn ) | | reader | | connectionMonitor ( self ) ) ; <nl> + } catch ( Error & e ) { <nl> + if ( e . code ( ) = = error_code_connection_failed | | e . code ( ) = = error_code_actor_cancelled | | <nl> + e . code ( ) = = error_code_connection_unreferenced | | <nl> + ( g_network - > isSimulated ( ) & & e . code ( ) = = error_code_checksum_failed ) ) <nl> + self - > transport - > countConnClosedWithoutError + + ; <nl> + else <nl> + self - > transport - > countConnClosedWithError + + ; <nl> + throw e ; <nl> + } <nl> <nl> - / / Send until there is nothing left to send <nl> - loop { <nl> - lastWriteTime = now ( ) ; <nl> + ASSERT ( false ) ; <nl> + } catch ( Error & e ) { <nl> + if ( now ( ) - self - > lastConnectTime > FLOW_KNOBS - > RECONNECTION_RESET_TIME ) { <nl> + self - > reconnectionDelay = FLOW_KNOBS - > INITIAL_RECONNECTION_TIME ; <nl> + } else { <nl> + self - > reconnectionDelay = std : : min ( FLOW_KNOBS - > MAX_RECONNECTION_TIME , self - > reconnectionDelay * FLOW_KNOBS - > RECONNECTION_TIME_GROWTH_RATE ) ; <nl> + } <nl> + self - > discardUnreliablePackets ( ) ; <nl> + reader = Future < Void > ( ) ; <nl> + bool ok = e . code ( ) = = error_code_connection_failed | | e . code ( ) = = error_code_actor_cancelled | | <nl> + e . code ( ) = = error_code_connection_unreferenced | | e . code ( ) = = error_code_connection_idle | | <nl> + ( g_network - > isSimulated ( ) & & e . code ( ) = = error_code_checksum_failed ) ; <nl> + <nl> + if ( self - > compatible ) { <nl> + TraceEvent ( ok ? SevInfo : SevWarnAlways , " ConnectionClosed " , conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> + . error ( e , true ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " PeerAddr " , self - > destination ) ; <nl> + } <nl> + else { <nl> + TraceEvent ( ok ? SevInfo : SevWarnAlways , " IncompatibleConnectionClosed " , <nl> + conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> + . error ( e , true ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " PeerAddr " , self - > destination ) ; <nl> + } <nl> <nl> - int sent = conn - > write ( self - > unsent . getUnsent ( ) , / * limit = * / FLOW_KNOBS - > MAX_PACKET_SEND_BYTES ) ; <nl> - if ( sent ) { <nl> - self - > transport - > bytesSent + = sent ; <nl> - self - > unsent . sent ( sent ) ; <nl> + if ( self - > destination . isPublic ( ) & & IFailureMonitor : : failureMonitor ( ) . getState ( self - > destination ) . isAvailable ( ) ) { <nl> + auto & it = self - > transport - > closedPeers [ self - > destination ] ; <nl> + if ( now ( ) - it . second > FLOW_KNOBS - > TOO_MANY_CONNECTIONS_CLOSED_RESET_DELAY ) { <nl> + it . first = now ( ) ; <nl> + } else if ( now ( ) - it . first > FLOW_KNOBS - > TOO_MANY_CONNECTIONS_CLOSED_TIMEOUT ) { <nl> + TraceEvent ( SevWarnAlways , " TooManyConnectionsClosed " , conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> + . suppressFor ( 5 . 0 ) <nl> + . detail ( " PeerAddr " , self - > destination ) ; <nl> + self - > transport - > degraded - > set ( true ) ; <nl> } <nl> - if ( self - > unsent . empty ( ) ) break ; <nl> + it . second = now ( ) ; <nl> + } <nl> <nl> - TEST ( true ) ; / / We didn ' t write everything , so apparently the write buffer is full . Wait for it to be nonfull . <nl> - wait ( conn - > onWritable ( ) ) ; <nl> - wait ( yield ( TaskPriority : : WriteSocket ) ) ; <nl> + if ( conn ) { <nl> + if ( FlowTransport : : transport ( ) . isClient ( ) & & e . code ( ) ! = error_code_connection_idle ) { <nl> + clientReconnectDelay = true ; <nl> + } <nl> + conn - > close ( ) ; <nl> + conn = Reference < IConnection > ( ) ; <nl> } <nl> <nl> - / / Wait until there is something to send <nl> - while ( self - > unsent . empty ( ) ) <nl> - wait ( self - > dataToSend . onTrigger ( ) ) ; <nl> + / / Clients might send more packets in response , which needs to go out on the next connection <nl> + IFailureMonitor : : failureMonitor ( ) . notifyDisconnect ( self - > destination ) ; <nl> + <nl> + if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> + / / Try to recover , even from serious errors , by retrying <nl> + <nl> + if ( self - > peerReferences < = 0 & & self - > reliable . empty ( ) & & self - > unsent . empty ( ) & & self - > outstandingReplies = = 0 ) { <nl> + TraceEvent ( " PeerDestroy " ) . error ( e ) . suppressFor ( 1 . 0 ) . detail ( " PeerAddr " , self - > destination ) ; <nl> + self - > connect . cancel ( ) ; <nl> + self - > transport - > peers . erase ( self - > destination ) ; <nl> + return Void ( ) ; <nl> + } <nl> } <nl> } <nl> + } <nl> <nl> - ACTOR static Future < Void > connectionKeeper ( Reference < Peer > self , <nl> - Reference < IConnection > conn = Reference < IConnection > ( ) , <nl> - Future < Void > reader = Void ( ) ) { <nl> - TraceEvent ( SevDebug , " ConnectionKeeper " , conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> - . detail ( " PeerAddr " , self - > destination ) <nl> - . detail ( " ConnSet " , ( bool ) conn ) ; <nl> - <nl> - / / This is used only at client side and is used to override waiting for unsent data to update failure monitoring <nl> - / / status . At client , if an existing connection fails , we retry making a connection and if that fails , then only <nl> - / / we report that address as failed . <nl> - state bool clientReconnectDelay = false ; <nl> - loop { <nl> - try { <nl> - if ( ! conn ) { / / Always , except for the first loop with an incoming connection <nl> - self - > outgoingConnectionIdle = true ; <nl> - <nl> - / / Wait until there is something to send . <nl> - while ( self - > unsent . empty ( ) ) { <nl> - if ( FlowTransport : : transport ( ) . isClient ( ) & & self - > destination . isPublic ( ) & & <nl> - clientReconnectDelay ) { <nl> - break ; <nl> - } <nl> - wait ( self - > dataToSend . onTrigger ( ) ) ; <nl> - } <nl> - <nl> - ASSERT ( self - > destination . isPublic ( ) ) ; <nl> - self - > outgoingConnectionIdle = false ; <nl> - wait ( delayJittered ( <nl> - std : : max ( 0 . 0 , self - > lastConnectTime + self - > reconnectionDelay - <nl> - now ( ) ) ) ) ; / / Don ' t connect ( ) to the same peer more than once per 2 sec <nl> - self - > lastConnectTime = now ( ) ; <nl> - <nl> - TraceEvent ( " ConnectingTo " , conn ? conn - > getDebugID ( ) : UID ( ) ) . suppressFor ( 1 . 0 ) . detail ( " PeerAddr " , self - > destination ) ; <nl> - Reference < IConnection > _conn = wait ( timeout ( INetworkConnections : : net ( ) - > connect ( self - > destination ) , FLOW_KNOBS - > CONNECTION_MONITOR_TIMEOUT , Reference < IConnection > ( ) ) ) ; <nl> - if ( _conn ) { <nl> - if ( FlowTransport : : transport ( ) . isClient ( ) ) { <nl> - IFailureMonitor : : failureMonitor ( ) . setStatus ( self - > destination , FailureStatus ( false ) ) ; <nl> - } <nl> - if ( self - > unsent . empty ( ) ) { <nl> - _conn - > close ( ) ; <nl> - clientReconnectDelay = false ; <nl> - continue ; <nl> - } else { <nl> - conn = _conn ; <nl> - TraceEvent ( " ConnectionExchangingConnectPacket " , conn - > getDebugID ( ) ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " PeerAddr " , self - > destination ) ; <nl> - self - > prependConnectPacket ( ) ; <nl> - } <nl> - } else { <nl> - TraceEvent ( " ConnectionTimedOut " , conn ? conn - > getDebugID ( ) : UID ( ) ) . suppressFor ( 1 . 0 ) . detail ( " PeerAddr " , self - > destination ) ; <nl> - if ( FlowTransport : : transport ( ) . isClient ( ) ) { <nl> - IFailureMonitor : : failureMonitor ( ) . setStatus ( self - > destination , FailureStatus ( true ) ) ; <nl> - } <nl> - throw connection_failed ( ) ; <nl> - } <nl> + void Peer : : send ( PacketBuffer * pb , ReliablePacket * rp , bool firstUnsent ) { <nl> + unsent . setWriteBuffer ( pb ) ; <nl> + if ( rp ) reliable . insert ( rp ) ; <nl> + if ( firstUnsent ) dataToSend . trigger ( ) ; <nl> + } <nl> <nl> - reader = connectionReader ( self - > transport , conn , self , Promise < Reference < Peer > > ( ) ) ; <nl> - } else { <nl> - self - > outgoingConnectionIdle = false ; <nl> - } <nl> + void Peer : : prependConnectPacket ( ) { <nl> + / / Send the ConnectPacket expected at the beginning of a new connection <nl> + ConnectPacket pkt ; <nl> + if ( transport - > localAddresses . address . isTLS ( ) = = destination . isTLS ( ) ) { <nl> + pkt . canonicalRemotePort = transport - > localAddresses . address . port ; <nl> + pkt . setCanonicalRemoteIp ( transport - > localAddresses . address . ip ) ; <nl> + } else if ( transport - > localAddresses . secondaryAddress . present ( ) ) { <nl> + pkt . canonicalRemotePort = transport - > localAddresses . secondaryAddress . get ( ) . port ; <nl> + pkt . setCanonicalRemoteIp ( transport - > localAddresses . secondaryAddress . get ( ) . ip ) ; <nl> + } else { <nl> + / / a " mixed " TLS / non - TLS connection is like a client / server connection - there ' s no way to reverse it <nl> + pkt . canonicalRemotePort = 0 ; <nl> + pkt . setCanonicalRemoteIp ( IPAddress ( 0 ) ) ; <nl> + } <nl> <nl> - try { <nl> - self - > transport - > countConnEstablished + + ; <nl> - wait ( connectionWriter ( self , conn ) | | reader | | connectionMonitor ( self ) ) ; <nl> - } catch ( Error & e ) { <nl> - if ( e . code ( ) = = error_code_connection_failed | | e . code ( ) = = error_code_actor_cancelled | | <nl> - e . code ( ) = = error_code_connection_unreferenced | | <nl> - ( g_network - > isSimulated ( ) & & e . code ( ) = = error_code_checksum_failed ) ) <nl> - self - > transport - > countConnClosedWithoutError + + ; <nl> - else <nl> - self - > transport - > countConnClosedWithError + + ; <nl> - throw e ; <nl> - } <nl> + pkt . connectPacketLength = sizeof ( pkt ) - sizeof ( pkt . connectPacketLength ) ; <nl> + pkt . protocolVersion = currentProtocolVersion ; <nl> + if ( FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) { <nl> + pkt . protocolVersion . addObjectSerializerFlag ( ) ; <nl> + } <nl> + pkt . connectionId = transport - > transportId ; <nl> <nl> - ASSERT ( false ) ; <nl> - } catch ( Error & e ) { <nl> - if ( now ( ) - self - > lastConnectTime > FLOW_KNOBS - > RECONNECTION_RESET_TIME ) { <nl> - self - > reconnectionDelay = FLOW_KNOBS - > INITIAL_RECONNECTION_TIME ; <nl> - } else { <nl> - self - > reconnectionDelay = std : : min ( FLOW_KNOBS - > MAX_RECONNECTION_TIME , self - > reconnectionDelay * FLOW_KNOBS - > RECONNECTION_TIME_GROWTH_RATE ) ; <nl> - } <nl> - self - > discardUnreliablePackets ( ) ; <nl> - reader = Future < Void > ( ) ; <nl> - bool ok = e . code ( ) = = error_code_connection_failed | | e . code ( ) = = error_code_actor_cancelled | | <nl> - e . code ( ) = = error_code_connection_unreferenced | | e . code ( ) = = error_code_connection_idle | | <nl> - ( g_network - > isSimulated ( ) & & e . code ( ) = = error_code_checksum_failed ) ; <nl> - <nl> - if ( self - > compatible ) { <nl> - TraceEvent ( ok ? SevInfo : SevWarnAlways , " ConnectionClosed " , conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> - . error ( e , true ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " PeerAddr " , self - > destination ) ; <nl> - } <nl> - else { <nl> - TraceEvent ( ok ? SevInfo : SevWarnAlways , " IncompatibleConnectionClosed " , <nl> - conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> - . error ( e , true ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " PeerAddr " , self - > destination ) ; <nl> - } <nl> + PacketBuffer * pb_first = PacketBuffer : : create ( ) ; <nl> + PacketWriter wr ( pb_first , nullptr , Unversioned ( ) ) ; <nl> + pkt . serialize ( wr ) ; <nl> + unsent . prependWriteBuffer ( pb_first , wr . finish ( ) ) ; <nl> + } <nl> <nl> - if ( self - > destination . isPublic ( ) & & IFailureMonitor : : failureMonitor ( ) . getState ( self - > destination ) . isAvailable ( ) ) { <nl> - auto & it = self - > transport - > closedPeers [ self - > destination ] ; <nl> - if ( now ( ) - it . second > FLOW_KNOBS - > TOO_MANY_CONNECTIONS_CLOSED_RESET_DELAY ) { <nl> - it . first = now ( ) ; <nl> - } else if ( now ( ) - it . first > FLOW_KNOBS - > TOO_MANY_CONNECTIONS_CLOSED_TIMEOUT ) { <nl> - TraceEvent ( SevWarnAlways , " TooManyConnectionsClosed " , conn ? conn - > getDebugID ( ) : UID ( ) ) <nl> - . suppressFor ( 5 . 0 ) <nl> - . detail ( " PeerAddr " , self - > destination ) ; <nl> - self - > transport - > degraded - > set ( true ) ; <nl> - } <nl> - it . second = now ( ) ; <nl> - } <nl> + void Peer : : discardUnreliablePackets ( ) { <nl> + / / Throw away the current unsent list , dropping the reference count on each PacketBuffer that accounts for presence in the unsent list <nl> + unsent . discardAll ( ) ; <nl> <nl> - if ( conn ) { <nl> - if ( FlowTransport : : transport ( ) . isClient ( ) & & e . code ( ) ! = error_code_connection_idle ) { <nl> - clientReconnectDelay = true ; <nl> - } <nl> - conn - > close ( ) ; <nl> - conn = Reference < IConnection > ( ) ; <nl> - } <nl> + / / If there are reliable packets , compact reliable packets into a new unsent range <nl> + if ( ! reliable . empty ( ) ) { <nl> + PacketBuffer * pb = unsent . getWriteBuffer ( ) ; <nl> + pb = reliable . compact ( pb , nullptr ) ; <nl> + unsent . setWriteBuffer ( pb ) ; <nl> + } <nl> + } <nl> <nl> - / / Clients might send more packets in response , which needs to go out on the next connection <nl> - IFailureMonitor : : failureMonitor ( ) . notifyDisconnect ( self - > destination ) ; <nl> + void Peer : : onIncomingConnection ( Reference < Peer > self , Reference < IConnection > conn , Future < Void > reader ) { <nl> + / / In case two processes are trying to connect to each other simultaneously , the process with the larger canonical NetworkAddress <nl> + / / gets to keep its outgoing connection . <nl> + if ( ! destination . isPublic ( ) & & ! outgoingConnectionIdle ) throw address_in_use ( ) ; <nl> + NetworkAddress compatibleAddr = transport - > localAddresses . address ; <nl> + if ( transport - > localAddresses . secondaryAddress . present ( ) & & transport - > localAddresses . secondaryAddress . get ( ) . isTLS ( ) = = destination . isTLS ( ) ) { <nl> + compatibleAddr = transport - > localAddresses . secondaryAddress . get ( ) ; <nl> + } <nl> <nl> - if ( e . code ( ) = = error_code_actor_cancelled ) throw ; <nl> - / / Try to recover , even from serious errors , by retrying <nl> + if ( ! destination . isPublic ( ) | | outgoingConnectionIdle | | destination > compatibleAddr ) { <nl> + / / Keep the new connection <nl> + TraceEvent ( " IncomingConnection " , conn - > getDebugID ( ) ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " FromAddr " , conn - > getPeerAddress ( ) ) <nl> + . detail ( " CanonicalAddr " , destination ) <nl> + . detail ( " IsPublic " , destination . isPublic ( ) ) ; <nl> + <nl> + connect . cancel ( ) ; <nl> + prependConnectPacket ( ) ; <nl> + connect = connectionKeeper ( self , conn , reader ) ; <nl> + } else { <nl> + TraceEvent ( " RedundantConnection " , conn - > getDebugID ( ) ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " FromAddr " , conn - > getPeerAddress ( ) . toString ( ) ) <nl> + . detail ( " CanonicalAddr " , destination ) <nl> + . detail ( " LocalAddr " , compatibleAddr ) ; <nl> + <nl> + / / Keep our prior connection <nl> + reader . cancel ( ) ; <nl> + conn - > close ( ) ; <nl> <nl> - if ( self - > peerReferences < = 0 & & self - > reliable . empty ( ) & & self - > unsent . empty ( ) ) { <nl> - TraceEvent ( " PeerDestroy " ) . error ( e ) . suppressFor ( 1 . 0 ) . detail ( " PeerAddr " , self - > destination ) ; <nl> - self - > connect . cancel ( ) ; <nl> - self - > transport - > peers . erase ( self - > destination ) ; <nl> - return Void ( ) ; <nl> - } <nl> - } <nl> - } <nl> + / / Send an ( ignored ) packet to make sure that , if our outgoing connection died before the peer made this connection attempt , <nl> + / / we eventually find out that our connection is dead , close it , and then respond to the next connection reattempt from peer . <nl> } <nl> - } ; <nl> + } <nl> <nl> TransportData : : ~ TransportData ( ) { <nl> for ( auto & p : peers ) { <nl> ACTOR static void deliver ( TransportData * self , Endpoint destination , ArenaReader <nl> } else if ( destination . token . first ( ) & TOKEN_STREAM_FLAG ) { <nl> / / We don ' t have the ( stream ) endpoint ' token ' , notify the remote machine <nl> if ( destination . token . first ( ) ! = - 1 ) { <nl> - sendPacket ( self , <nl> - SerializeSource < Endpoint > ( Endpoint ( self - > localAddresses , destination . token ) ) , <nl> - Endpoint ( destination . addresses , WLTOKEN_ENDPOINT_NOT_FOUND ) , false , true ) ; <nl> + if ( self - > isLocalAddress ( destination . getPrimaryAddress ( ) ) ) { <nl> + sendLocal ( self , SerializeSource < Endpoint > ( Endpoint ( self - > localAddresses , destination . token ) ) , Endpoint ( destination . addresses , WLTOKEN_ENDPOINT_NOT_FOUND ) ) ; <nl> + } else { <nl> + Reference < Peer > peer = self - > getPeer ( destination . getPrimaryAddress ( ) ) ; <nl> + sendPacket ( self , peer , SerializeSource < Endpoint > ( Endpoint ( self - > localAddresses , destination . token ) ) , Endpoint ( destination . addresses , WLTOKEN_ENDPOINT_NOT_FOUND ) , false ) ; <nl> + } <nl> } <nl> } <nl> <nl> Reference < Peer > TransportData : : getPeer ( NetworkAddress const & address , bool open <nl> return Reference < Peer > ( ) ; <nl> } <nl> Reference < Peer > newPeer = Reference < Peer > ( new Peer ( this , address ) ) ; <nl> - newPeer - > connect = Peer : : connectionKeeper ( newPeer ) ; <nl> + newPeer - > connect = connectionKeeper ( newPeer ) ; <nl> peers [ address ] = newPeer ; <nl> return newPeer ; <nl> } <nl> void FlowTransport : : removePeerReference ( const Endpoint & endpoint , bool isStream ) <nl> . detail ( " Address " , endpoint . getPrimaryAddress ( ) ) <nl> . detail ( " Token " , endpoint . token ) ; <nl> } <nl> - if ( peer - > peerReferences = = 0 & & peer - > reliable . empty ( ) & & peer - > unsent . empty ( ) ) { <nl> + if ( peer - > peerReferences = = 0 & & peer - > reliable . empty ( ) & & peer - > unsent . empty ( ) & & peer - > outstandingReplies = = 0 ) { <nl> peer - > resetPing . trigger ( ) ; <nl> } <nl> } <nl> void FlowTransport : : addWellKnownEndpoint ( Endpoint & endpoint , NetworkMessageRece <nl> ASSERT ( endpoint . token = = otoken ) ; <nl> } <nl> <nl> - static PacketID sendPacket ( TransportData * self , ISerializeSource const & what , const Endpoint & destination , bool reliable , bool openConnection ) { <nl> - if ( self - > isLocalAddress ( destination . getPrimaryAddress ( ) ) ) { <nl> - TEST ( true ) ; / / " Loopback " delivery <nl> - / / SOMEDAY : Would it be better to avoid ( de ) serialization by doing this check in flow ? <nl> - <nl> - Standalone < StringRef > copy ; <nl> - if ( FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) { <nl> - ObjectWriter wr ( AssumeVersion ( currentProtocolVersion ) ) ; <nl> - what . serializeObjectWriter ( wr ) ; <nl> - copy = wr . toStringRef ( ) ; <nl> - } else { <nl> - BinaryWriter wr ( AssumeVersion ( currentProtocolVersion ) ) ; <nl> - what . serializeBinaryWriter ( wr ) ; <nl> - copy = wr . toValue ( ) ; <nl> - } <nl> + static void sendLocal ( TransportData * self , ISerializeSource const & what , const Endpoint & destination ) { <nl> + TEST ( true ) ; / / " Loopback " delivery <nl> + / / SOMEDAY : Would it be better to avoid ( de ) serialization by doing this check in flow ? <nl> + <nl> + Standalone < StringRef > copy ; <nl> + if ( FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) { <nl> + ObjectWriter wr ( AssumeVersion ( currentProtocolVersion ) ) ; <nl> + what . serializeObjectWriter ( wr ) ; <nl> + copy = wr . toStringRef ( ) ; <nl> + } else { <nl> + BinaryWriter wr ( AssumeVersion ( currentProtocolVersion ) ) ; <nl> + what . serializeBinaryWriter ( wr ) ; <nl> + copy = wr . toValue ( ) ; <nl> + } <nl> # if VALGRIND <nl> VALGRIND_CHECK_MEM_IS_DEFINED ( copy . begin ( ) , copy . size ( ) ) ; <nl> # endif <nl> <nl> - ASSERT ( copy . size ( ) > 0 ) ; <nl> - deliver ( self , destination , ArenaReader ( copy . arena ( ) , copy , AssumeVersion ( currentProtocolVersion ) ) , false ) ; <nl> + ASSERT ( copy . size ( ) > 0 ) ; <nl> + deliver ( self , destination , ArenaReader ( copy . arena ( ) , copy , AssumeVersion ( currentProtocolVersion ) ) , false ) ; <nl> + } <nl> <nl> - return ( PacketID ) nullptr ; <nl> - } else { <nl> - const bool checksumEnabled = ! destination . getPrimaryAddress ( ) . isTLS ( ) ; <nl> - + + self - > countPacketsGenerated ; <nl> + static ReliablePacket * sendPacket ( TransportData * self , Reference < Peer > peer , ISerializeSource const & what , const Endpoint & destination , bool reliable ) { <nl> + const bool checksumEnabled = ! destination . getPrimaryAddress ( ) . isTLS ( ) ; <nl> + + + self - > countPacketsGenerated ; <nl> <nl> - Reference < Peer > peer = self - > getPeer ( destination . getPrimaryAddress ( ) , openConnection ) ; <nl> + / / If there isn ' t an open connection , a public address , or the peer isn ' t compatible , we can ' t send <nl> + if ( ! peer | | ( peer - > outgoingConnectionIdle & & ! destination . getPrimaryAddress ( ) . isPublic ( ) ) | | ( peer - > incompatibleProtocolVersionNewer & & destination . token ! = WLTOKEN_PING_PACKET ) ) { <nl> + TEST ( true ) ; / / Can ' t send to private address without a compatible open connection <nl> + return nullptr ; <nl> + } <nl> <nl> - / / If there isn ' t an open connection , a public address , or the peer isn ' t compatible , we can ' t send <nl> - if ( ! peer | | ( peer - > outgoingConnectionIdle & & ! destination . getPrimaryAddress ( ) . isPublic ( ) ) | | ( peer - > incompatibleProtocolVersionNewer & & destination . token ! = WLTOKEN_PING_PACKET ) ) { <nl> - TEST ( true ) ; / / Can ' t send to private address without a compatible open connection <nl> - return ( PacketID ) nullptr ; <nl> - } <nl> + bool firstUnsent = peer - > unsent . empty ( ) ; <nl> <nl> - bool firstUnsent = peer - > unsent . empty ( ) ; <nl> + PacketBuffer * pb = peer - > unsent . getWriteBuffer ( ) ; <nl> + ReliablePacket * rp = reliable ? new ReliablePacket : 0 ; <nl> <nl> - PacketBuffer * pb = peer - > unsent . getWriteBuffer ( ) ; <nl> - ReliablePacket * rp = reliable ? new ReliablePacket : 0 ; <nl> + int prevBytesWritten = pb - > bytes_written ; <nl> + PacketBuffer * checksumPb = pb ; <nl> <nl> - int prevBytesWritten = pb - > bytes_written ; <nl> - PacketBuffer * checksumPb = pb ; <nl> + PacketWriter wr ( pb , rp , AssumeVersion ( currentProtocolVersion ) ) ; / / SOMEDAY : Can we downgrade to talk to older peers ? <nl> <nl> - PacketWriter wr ( pb , rp , AssumeVersion ( currentProtocolVersion ) ) ; / / SOMEDAY : Can we downgrade to talk to older peers ? <nl> + / / Reserve some space for packet length and checksum , write them after serializing data <nl> + SplitBuffer packetInfoBuffer ; <nl> + uint32_t len , checksum = 0 ; <nl> + int packetInfoSize = sizeof ( len ) ; <nl> + if ( checksumEnabled ) { <nl> + packetInfoSize + = sizeof ( checksum ) ; <nl> + } <nl> <nl> - / / Reserve some space for packet length and checksum , write them after serializing data <nl> - SplitBuffer packetInfoBuffer ; <nl> - uint32_t len , checksum = 0 ; <nl> - int packetInfoSize = sizeof ( len ) ; <nl> - if ( checksumEnabled ) { <nl> - packetInfoSize + = sizeof ( checksum ) ; <nl> + wr . writeAhead ( packetInfoSize , & packetInfoBuffer ) ; <nl> + wr < < destination . token ; <nl> + what . serializePacketWriter ( wr , FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) ; <nl> + pb = wr . finish ( ) ; <nl> + len = wr . size ( ) - packetInfoSize ; <nl> + <nl> + if ( checksumEnabled ) { <nl> + / / Find the correct place to start calculating checksum <nl> + uint32_t checksumUnprocessedLength = len ; <nl> + prevBytesWritten + = packetInfoSize ; <nl> + if ( prevBytesWritten > = checksumPb - > bytes_written ) { <nl> + prevBytesWritten - = checksumPb - > bytes_written ; <nl> + checksumPb = checksumPb - > nextPacketBuffer ( ) ; <nl> } <nl> <nl> - wr . writeAhead ( packetInfoSize , & packetInfoBuffer ) ; <nl> - wr < < destination . token ; <nl> - what . serializePacketWriter ( wr , FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) ; <nl> - pb = wr . finish ( ) ; <nl> - len = wr . size ( ) - packetInfoSize ; <nl> - <nl> - if ( checksumEnabled ) { <nl> - / / Find the correct place to start calculating checksum <nl> - uint32_t checksumUnprocessedLength = len ; <nl> - prevBytesWritten + = packetInfoSize ; <nl> - if ( prevBytesWritten > = checksumPb - > bytes_written ) { <nl> - prevBytesWritten - = checksumPb - > bytes_written ; <nl> - checksumPb = checksumPb - > nextPacketBuffer ( ) ; <nl> - } <nl> - <nl> - / / Checksum calculation <nl> - while ( checksumUnprocessedLength > 0 ) { <nl> - uint32_t processLength = <nl> - std : : min ( checksumUnprocessedLength , ( uint32_t ) ( checksumPb - > bytes_written - prevBytesWritten ) ) ; <nl> - checksum = crc32c_append ( checksum , checksumPb - > data ( ) + prevBytesWritten , processLength ) ; <nl> - checksumUnprocessedLength - = processLength ; <nl> - checksumPb = checksumPb - > nextPacketBuffer ( ) ; <nl> - prevBytesWritten = 0 ; <nl> - } <nl> - } <nl> - <nl> - / / Write packet length and checksum into packet buffer <nl> - packetInfoBuffer . write ( & len , sizeof ( len ) ) ; <nl> - if ( checksumEnabled ) { <nl> - packetInfoBuffer . write ( & checksum , sizeof ( checksum ) , sizeof ( len ) ) ; <nl> + / / Checksum calculation <nl> + while ( checksumUnprocessedLength > 0 ) { <nl> + uint32_t processLength = <nl> + std : : min ( checksumUnprocessedLength , ( uint32_t ) ( checksumPb - > bytes_written - prevBytesWritten ) ) ; <nl> + checksum = crc32c_append ( checksum , checksumPb - > data ( ) + prevBytesWritten , processLength ) ; <nl> + checksumUnprocessedLength - = processLength ; <nl> + checksumPb = checksumPb - > nextPacketBuffer ( ) ; <nl> + prevBytesWritten = 0 ; <nl> } <nl> + } <nl> <nl> - if ( len > FLOW_KNOBS - > PACKET_LIMIT ) { <nl> - TraceEvent ( SevError , " Net2_PacketLimitExceeded " ) . detail ( " ToPeer " , destination . getPrimaryAddress ( ) ) . detail ( " Length " , ( int ) len ) ; <nl> - / / throw platform_error ( ) ; / / FIXME : How to recover from this situation ? <nl> - } <nl> - else if ( len > FLOW_KNOBS - > PACKET_WARNING ) { <nl> - TraceEvent ( self - > warnAlwaysForLargePacket ? SevWarnAlways : SevWarn , " Net2_LargePacket " ) <nl> - . suppressFor ( 1 . 0 ) <nl> - . detail ( " ToPeer " , destination . getPrimaryAddress ( ) ) <nl> - . detail ( " Length " , ( int ) len ) <nl> - . detail ( " Token " , destination . token ) <nl> - . backtrace ( ) ; <nl> + / / Write packet length and checksum into packet buffer <nl> + packetInfoBuffer . write ( & len , sizeof ( len ) ) ; <nl> + if ( checksumEnabled ) { <nl> + packetInfoBuffer . write ( & checksum , sizeof ( checksum ) , sizeof ( len ) ) ; <nl> + } <nl> <nl> - if ( g_network - > isSimulated ( ) ) <nl> - self - > warnAlwaysForLargePacket = false ; <nl> - } <nl> + if ( len > FLOW_KNOBS - > PACKET_LIMIT ) { <nl> + TraceEvent ( SevError , " Net2_PacketLimitExceeded " ) . detail ( " ToPeer " , destination . getPrimaryAddress ( ) ) . detail ( " Length " , ( int ) len ) ; <nl> + / / throw platform_error ( ) ; / / FIXME : How to recover from this situation ? <nl> + } <nl> + else if ( len > FLOW_KNOBS - > PACKET_WARNING ) { <nl> + TraceEvent ( self - > warnAlwaysForLargePacket ? SevWarnAlways : SevWarn , " Net2_LargePacket " ) <nl> + . suppressFor ( 1 . 0 ) <nl> + . detail ( " ToPeer " , destination . getPrimaryAddress ( ) ) <nl> + . detail ( " Length " , ( int ) len ) <nl> + . detail ( " Token " , destination . token ) <nl> + . backtrace ( ) ; <nl> + <nl> + if ( g_network - > isSimulated ( ) ) <nl> + self - > warnAlwaysForLargePacket = false ; <nl> + } <nl> <nl> # if VALGRIND <nl> - SendBuffer * checkbuf = pb ; <nl> - while ( checkbuf ) { <nl> - int size = checkbuf - > bytes_written ; <nl> - const uint8_t * data = checkbuf - > data ; <nl> - VALGRIND_CHECK_MEM_IS_DEFINED ( data , size ) ; <nl> - checkbuf = checkbuf - > next ; <nl> - } <nl> + SendBuffer * checkbuf = pb ; <nl> + while ( checkbuf ) { <nl> + int size = checkbuf - > bytes_written ; <nl> + const uint8_t * data = checkbuf - > data ; <nl> + VALGRIND_CHECK_MEM_IS_DEFINED ( data , size ) ; <nl> + checkbuf = checkbuf - > next ; <nl> + } <nl> # endif <nl> <nl> - peer - > send ( pb , rp , firstUnsent ) ; <nl> - if ( destination . token ! = WLTOKEN_PING_PACKET ) { <nl> - peer - > lastDataPacketSentTime = now ( ) ; <nl> - } <nl> - return ( PacketID ) rp ; <nl> + peer - > send ( pb , rp , firstUnsent ) ; <nl> + if ( destination . token ! = WLTOKEN_PING_PACKET ) { <nl> + peer - > lastDataPacketSentTime = now ( ) ; <nl> } <nl> + return rp ; <nl> } <nl> <nl> - PacketID FlowTransport : : sendReliable ( ISerializeSource const & what , const Endpoint & destination ) { <nl> - return sendPacket ( self , what , destination , true , true ) ; <nl> + ReliablePacket * FlowTransport : : sendReliable ( ISerializeSource const & what , const Endpoint & destination ) { <nl> + if ( self - > isLocalAddress ( destination . getPrimaryAddress ( ) ) ) { <nl> + sendLocal ( self , what , destination ) ; <nl> + return nullptr ; <nl> + } <nl> + Reference < Peer > peer = self - > getPeer ( destination . getPrimaryAddress ( ) ) ; <nl> + return sendPacket ( self , peer , what , destination , true ) ; <nl> } <nl> <nl> - void FlowTransport : : cancelReliable ( PacketID pid ) { <nl> - ReliablePacket * p = ( ReliablePacket * ) pid ; <nl> + void FlowTransport : : cancelReliable ( ReliablePacket * p ) { <nl> if ( p ) p - > remove ( ) ; <nl> / / SOMEDAY : Call reliable . compact ( ) if a lot of memory is wasted in PacketBuffers by formerly reliable packets mixed with a few reliable ones . Don ' t forget to delref the new PacketBuffers since they are unsent . <nl> } <nl> <nl> - void FlowTransport : : sendUnreliable ( ISerializeSource const & what , const Endpoint & destination , bool openConnection ) { <nl> - sendPacket ( self , what , destination , false , openConnection ) ; <nl> + Reference < Peer > FlowTransport : : sendUnreliable ( ISerializeSource const & what , const Endpoint & destination , bool openConnection ) { <nl> + if ( self - > isLocalAddress ( destination . getPrimaryAddress ( ) ) ) { <nl> + sendLocal ( self , what , destination ) ; <nl> + return Reference < Peer > ( ) ; <nl> + } <nl> + Reference < Peer > peer = self - > getPeer ( destination . getPrimaryAddress ( ) , openConnection ) ; <nl> + sendPacket ( self , peer , what , destination , false ) ; <nl> + return peer ; <nl> } <nl> <nl> int FlowTransport : : getEndpointCount ( ) { <nl> mmm a / fdbrpc / FlowTransport . h <nl> ppp b / fdbrpc / FlowTransport . h <nl> <nl> # include " flow / genericactors . actor . h " <nl> # include " flow / network . h " <nl> # include " flow / FileIdentifier . h " <nl> + # include " flow / Net2Packet . h " <nl> <nl> # pragma pack ( push , 4 ) <nl> class Endpoint { <nl> class NetworkMessageReceiver { <nl> virtual bool isStream ( ) const { return false ; } <nl> } ; <nl> <nl> - typedef struct NetworkPacket * PacketID ; <nl> + struct TransportData ; <nl> + <nl> + struct Peer : public ReferenceCounted < Peer > { <nl> + TransportData * transport ; <nl> + NetworkAddress destination ; <nl> + UnsentPacketQueue unsent ; <nl> + ReliablePacketList reliable ; <nl> + AsyncTrigger dataToSend ; / / Triggered when unsent . empty ( ) becomes false <nl> + Future < Void > connect ; <nl> + AsyncTrigger resetPing ; <nl> + bool compatible ; <nl> + bool outgoingConnectionIdle ; / / We don ' t actually have a connection open and aren ' t trying to open one because we don ' t have anything to send <nl> + double lastConnectTime ; <nl> + double reconnectionDelay ; <nl> + int peerReferences ; <nl> + bool incompatibleProtocolVersionNewer ; <nl> + int64_t bytesReceived ; <nl> + double lastDataPacketSentTime ; <nl> + int outstandingReplies ; <nl> + <nl> + explicit Peer ( TransportData * transport , NetworkAddress const & destination ) <nl> + : transport ( transport ) , destination ( destination ) , outgoingConnectionIdle ( false ) , lastConnectTime ( 0 . 0 ) , <nl> + reconnectionDelay ( FLOW_KNOBS - > INITIAL_RECONNECTION_TIME ) , compatible ( true ) , outstandingReplies ( 0 ) , <nl> + incompatibleProtocolVersionNewer ( false ) , peerReferences ( - 1 ) , bytesReceived ( 0 ) , lastDataPacketSentTime ( now ( ) ) { } <nl> + <nl> + void send ( PacketBuffer * pb , ReliablePacket * rp , bool firstUnsent ) ; <nl> + <nl> + void prependConnectPacket ( ) ; <nl> + <nl> + void discardUnreliablePackets ( ) ; <nl> + <nl> + void onIncomingConnection ( Reference < Peer > self , Reference < IConnection > conn , Future < Void > reader ) ; <nl> + } ; <nl> <nl> class FlowTransport { <nl> public : <nl> class FlowTransport { <nl> / / Sets endpoint to a new local endpoint ( without changing its token ) which delivers messages to the given receiver <nl> / / Implementations may have limitations on when this function is called and what endpoint . token may be ! <nl> <nl> - PacketID sendReliable ( ISerializeSource const & what , const Endpoint & destination ) ; <nl> + ReliablePacket * sendReliable ( ISerializeSource const & what , const Endpoint & destination ) ; <nl> / / sendReliable will keep trying to deliver the data to the destination until cancelReliable is <nl> / / called . It will retry sending if the connection is closed or the failure manager reports <nl> / / the destination become available ( edge triggered ) . <nl> <nl> - void cancelReliable ( PacketID ) ; <nl> - / / Makes PacketID " unreliable " ( either the data or a connection close event will be delivered <nl> + void cancelReliable ( ReliablePacket * ) ; <nl> + / / Makes Packet " unreliable " ( either the data or a connection close event will be delivered <nl> / / eventually ) . It can still be used safely to send a reply to a " reliable " request . <nl> <nl> Reference < AsyncVar < bool > > getDegraded ( ) ; <nl> / / This async var will be set to true when the process cannot connect to a public network address that the failure monitor thinks is healthy . <nl> <nl> - void sendUnreliable ( ISerializeSource const & what , const Endpoint & destination , bool openConnection = true ) ; / / { cancelReliable ( sendReliable ( what , destination ) ) ; } <nl> + Reference < Peer > sendUnreliable ( ISerializeSource const & what , const Endpoint & destination , bool openConnection ) ; / / { cancelReliable ( sendReliable ( what , destination ) ) ; } <nl> <nl> int getEndpointCount ( ) ; <nl> / / for tracing only <nl> mmm a / fdbrpc / LoadBalance . actor . h <nl> ppp b / fdbrpc / LoadBalance . actor . h <nl> Future < REPLY_TYPE ( Request ) > loadBalance ( <nl> double nextMetric = 1e9 ; <nl> double bestTime = 1e9 ; <nl> double nextTime = 1e9 ; <nl> + int badServers = 0 ; <nl> + <nl> for ( int i = 0 ; i < alternatives - > size ( ) ; i + + ) { <nl> - if ( bestMetric < 1e8 & & i = = alternatives - > countBest ( ) ) { <nl> + if ( badServers < std : : min ( i , FLOW_KNOBS - > LOAD_BALANCE_MAX_BAD_OPTIONS + 1 ) & & i = = alternatives - > countBest ( ) ) { <nl> break ; <nl> } <nl> <nl> Future < REPLY_TYPE ( Request ) > loadBalance ( <nl> if ( now ( ) > qd . failedUntil ) { <nl> double thisMetric = qd . smoothOutstanding . smoothTotal ( ) ; <nl> double thisTime = qd . latency ; <nl> + if ( FLOW_KNOBS - > LOAD_BALANCE_PENALTY_IS_BAD & & qd . penalty > 1 . 001 ) { <nl> + + + badServers ; <nl> + } <nl> <nl> if ( thisMetric < bestMetric ) { <nl> if ( i ! = bestAlt ) { <nl> Future < REPLY_TYPE ( Request ) > loadBalance ( <nl> nextMetric = thisMetric ; <nl> nextTime = thisTime ; <nl> } <nl> + } else { <nl> + + + badServers ; <nl> } <nl> + } else { <nl> + + + badServers ; <nl> } <nl> } <nl> if ( nextMetric > 1e8 ) { <nl> mmm a / fdbrpc / fdbrpc . h <nl> ppp b / fdbrpc / fdbrpc . h <nl> class RequestStream { <nl> <nl> void send ( const T & value ) const { <nl> if ( queue - > isRemoteEndpoint ( ) ) { <nl> - FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < T > ( value ) , getEndpoint ( ) ) ; <nl> + FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < T > ( value ) , getEndpoint ( ) , true ) ; <nl> } <nl> else <nl> queue - > send ( value ) ; <nl> class RequestStream { <nl> if ( disc . isReady ( ) ) { <nl> return ErrorOr < REPLY_TYPE ( X ) > ( request_maybe_delivered ( ) ) ; <nl> } <nl> - FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < T > ( value ) , getEndpoint ( taskID ) ) ; <nl> + Reference < Peer > peer = FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < T > ( value ) , getEndpoint ( taskID ) , true ) ; <nl> auto & p = getReplyPromise ( value ) ; <nl> - return waitValueOrSignal ( p . getFuture ( ) , disc , getEndpoint ( taskID ) , p ) ; <nl> + return waitValueOrSignal ( p . getFuture ( ) , disc , getEndpoint ( taskID ) , p , peer ) ; <nl> } <nl> send ( value ) ; <nl> auto & p = getReplyPromise ( value ) ; <nl> class RequestStream { <nl> if ( disc . isReady ( ) ) { <nl> return ErrorOr < REPLY_TYPE ( X ) > ( request_maybe_delivered ( ) ) ; <nl> } <nl> - FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < T > ( value ) , getEndpoint ( ) ) ; <nl> + Reference < Peer > peer = FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < T > ( value ) , getEndpoint ( ) , true ) ; <nl> auto & p = getReplyPromise ( value ) ; <nl> - return waitValueOrSignal ( p . getFuture ( ) , disc , getEndpoint ( ) , p ) ; <nl> + return waitValueOrSignal ( p . getFuture ( ) , disc , getEndpoint ( ) , p , peer ) ; <nl> } <nl> else { <nl> send ( value ) ; <nl> mmm a / fdbrpc / genericactors . actor . h <nl> ppp b / fdbrpc / genericactors . actor . h <nl> ACTOR template < class T > Future < Void > incrementalBroadcast ( Future < T > input , std <nl> / / Needed for the call to endpointNotFound ( ) <nl> # include " fdbrpc / FailureMonitor . h " <nl> <nl> + struct PeerHolder { <nl> + Reference < Peer > peer ; <nl> + explicit PeerHolder ( Reference < Peer > peer ) : peer ( peer ) { <nl> + if ( peer ) { <nl> + peer - > outstandingReplies + + ; <nl> + } <nl> + } <nl> + ~ PeerHolder ( ) { <nl> + if ( peer ) { <nl> + peer - > outstandingReplies - - ; <nl> + } <nl> + } <nl> + } ; <nl> + <nl> / / Implements tryGetReply , getReplyUnlessFailedFor <nl> ACTOR template < class X > <nl> - Future < ErrorOr < X > > waitValueOrSignal ( Future < X > value , Future < Void > signal , Endpoint endpoint , ReplyPromise < X > holdme = ReplyPromise < X > ( ) ) { <nl> + Future < ErrorOr < X > > waitValueOrSignal ( Future < X > value , Future < Void > signal , Endpoint endpoint , ReplyPromise < X > holdme = ReplyPromise < X > ( ) , Reference < Peer > peer = Reference < Peer > ( ) ) { <nl> + state PeerHolder holder = PeerHolder ( peer ) ; <nl> loop { <nl> try { <nl> choose { <nl> Future < ErrorOr < X > > waitValueOrSignal ( Future < X > value , Future < Void > signal , Endp <nl> } <nl> <nl> ACTOR template < class T > <nl> - Future < T > sendCanceler ( ReplyPromise < T > reply , PacketID send , Endpoint endpoint ) { <nl> + Future < T > sendCanceler ( ReplyPromise < T > reply , ReliablePacket * send , Endpoint endpoint ) { <nl> try { <nl> T t = wait ( reply . getFuture ( ) ) ; <nl> FlowTransport : : transport ( ) . cancelReliable ( send ) ; <nl> mmm a / fdbrpc / networksender . actor . h <nl> ppp b / fdbrpc / networksender . actor . h <nl> void networkSender ( Future < T > input , Endpoint endpoint ) { <nl> try { <nl> T value = wait ( input ) ; <nl> if ( FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) { <nl> - FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < ErrorOr < EnsureTable < T > > > ( value ) , endpoint ) ; <nl> + FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < ErrorOr < EnsureTable < T > > > ( value ) , endpoint , false ) ; <nl> } else { <nl> FlowTransport : : transport ( ) . sendUnreliable ( SerializeBoolAnd < T > ( true , value ) , endpoint , false ) ; <nl> } <nl> void networkSender ( Future < T > input , Endpoint endpoint ) { <nl> / / if ( err . code ( ) = = error_code_broken_promise ) return ; <nl> ASSERT ( err . code ( ) ! = error_code_actor_cancelled ) ; <nl> if ( FLOW_KNOBS - > USE_OBJECT_SERIALIZER ) { <nl> - FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < ErrorOr < EnsureTable < T > > > ( err ) , endpoint ) ; <nl> + FlowTransport : : transport ( ) . sendUnreliable ( SerializeSource < ErrorOr < EnsureTable < T > > > ( err ) , endpoint , false ) ; <nl> } else { <nl> FlowTransport : : transport ( ) . sendUnreliable ( SerializeBoolAnd < Error > ( false , err ) , endpoint , false ) ; <nl> } <nl> mmm a / fdbserver / Coordination . actor . cpp <nl> ppp b / fdbserver / Coordination . actor . cpp <nl> ACTOR Future < Void > leaderRegister ( LeaderElectionRegInterface interf , Key key ) { <nl> / / TODO : use notify to only send a heartbeat once per interval <nl> availableLeaders . erase ( LeaderInfo ( req . prevChangeID ) ) ; <nl> availableLeaders . insert ( req . myInfo ) ; <nl> - req . reply . send ( currentNominee . present ( ) & & currentNominee . get ( ) . equalInternalId ( req . myInfo ) ) ; <nl> + req . reply . send ( <nl> + LeaderHeartbeatReply { currentNominee . present ( ) & & currentNominee . get ( ) . equalInternalId ( req . myInfo ) } ) ; <nl> } <nl> when ( ForwardRequest req = waitNext ( interf . forward . getFuture ( ) ) ) { <nl> LeaderInfo newInfo ; <nl> ACTOR Future < Void > leaderServer ( LeaderElectionRegInterface interf , OnDemandStore <nl> when ( LeaderHeartbeatRequest req = waitNext ( interf . leaderHeartbeat . getFuture ( ) ) ) { <nl> Optional < LeaderInfo > forward = regs . getForward ( req . key ) ; <nl> if ( forward . present ( ) ) <nl> - req . reply . send ( false ) ; <nl> + req . reply . send ( LeaderHeartbeatReply { false } ) ; <nl> else <nl> regs . getInterface ( req . key , id ) . leaderHeartbeat . send ( req ) ; <nl> } <nl> mmm a / fdbserver / CoordinationInterface . h <nl> ppp b / fdbserver / CoordinationInterface . h <nl> struct CandidacyRequest { <nl> } <nl> } ; <nl> <nl> + struct LeaderHeartbeatReply { <nl> + constexpr static FileIdentifier file_identifier = 11 ; <nl> + <nl> + bool value = false ; <nl> + LeaderHeartbeatReply ( ) = default ; <nl> + explicit LeaderHeartbeatReply ( bool value ) : value ( value ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , value ) ; <nl> + } <nl> + } ; <nl> + <nl> + inline bool operator = = ( const LeaderHeartbeatReply & lhs , const LeaderHeartbeatReply & rhs ) { <nl> + return lhs . value = = rhs . value ; <nl> + } <nl> + <nl> struct LeaderHeartbeatRequest { <nl> constexpr static FileIdentifier file_identifier = 9495992 ; <nl> Key key ; <nl> LeaderInfo myInfo ; <nl> UID prevChangeID ; <nl> - ReplyPromise < bool > reply ; <nl> + ReplyPromise < LeaderHeartbeatReply > reply ; <nl> <nl> LeaderHeartbeatRequest ( ) { } <nl> explicit LeaderHeartbeatRequest ( Key key , LeaderInfo const & myInfo , UID prevChangeID ) : key ( key ) , myInfo ( myInfo ) , prevChangeID ( prevChangeID ) { } <nl> mmm a / fdbserver / DataDistribution . actor . cpp <nl> ppp b / fdbserver / DataDistribution . actor . cpp <nl> ACTOR Future < Void > teamTracker ( DDTeamCollection * self , Reference < TCTeamInfo > tea <nl> } <nl> } catch ( Error & e ) { <nl> if ( logTeamEvents ) { <nl> - TraceEvent ( " TeamTrackerStopping " , self - > distributorId ) . detail ( " Team " , team - > getDesc ( ) ) ; <nl> + TraceEvent ( " TeamTrackerStopping " , self - > distributorId ) . detail ( " Team " , team - > getDesc ( ) ) . detail ( " Priority " , team - > getPriority ( ) ) ; <nl> } <nl> self - > priority_teams [ team - > getPriority ( ) ] - - ; <nl> if ( team - > isHealthy ( ) ) { <nl> ACTOR Future < Void > ddSnapCreateCore ( DistributorSnapRequest snapReq , Reference < As <nl> std : : vector < Future < Void > > disablePops ; <nl> for ( const auto & tlog : tlogs ) { <nl> disablePops . push_back ( <nl> - transformErrors ( throwErrorOr ( tlog . disablePopRequest . tryGetReply ( TLogDisablePopRequest ( snapReq . snapUID ) ) ) , operation_failed ( ) ) <nl> + transformErrors ( throwErrorOr ( tlog . disablePopRequest . tryGetReply ( TLogDisablePopRequest ( snapReq . snapUID ) ) ) , snap_disable_tlog_pop_failed ( ) ) <nl> ) ; <nl> } <nl> wait ( waitForAll ( disablePops ) ) ; <nl> ACTOR Future < Void > ddSnapCreateCore ( DistributorSnapRequest snapReq , Reference < As <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> / / snap local storage nodes <nl> - std : : vector < WorkerInterface > storageWorkers = wait ( getStorageWorkers ( cx , db , true / * localOnly * / ) ) ; <nl> + std : : vector < WorkerInterface > storageWorkers = wait ( transformErrors ( getStorageWorkers ( cx , db , true / * localOnly * / ) , snap_storage_failed ( ) ) ) ; <nl> TraceEvent ( " SnapDataDistributor_GotStorageWorkers " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> std : : vector < Future < Void > > storageSnapReqs ; <nl> for ( const auto & worker : storageWorkers ) { <nl> storageSnapReqs . push_back ( <nl> - transformErrors ( throwErrorOr ( worker . workerSnapReq . tryGetReply ( WorkerSnapRequest ( snapReq . snapPayload , snapReq . snapUID , LiteralStringRef ( " storage " ) ) ) ) , operation_failed ( ) ) <nl> + transformErrors ( throwErrorOr ( worker . workerSnapReq . tryGetReply ( WorkerSnapRequest ( snapReq . snapPayload , snapReq . snapUID , LiteralStringRef ( " storage " ) ) ) ) , snap_storage_failed ( ) ) <nl> ) ; <nl> } <nl> wait ( waitForAll ( storageSnapReqs ) ) ; <nl> ACTOR Future < Void > ddSnapCreateCore ( DistributorSnapRequest snapReq , Reference < As <nl> std : : vector < Future < Void > > tLogSnapReqs ; <nl> for ( const auto & tlog : tlogs ) { <nl> tLogSnapReqs . push_back ( <nl> - transformErrors ( throwErrorOr ( tlog . snapRequest . tryGetReply ( TLogSnapRequest ( snapReq . snapPayload , snapReq . snapUID , LiteralStringRef ( " tlog " ) ) ) ) , operation_failed ( ) ) <nl> + transformErrors ( throwErrorOr ( tlog . snapRequest . tryGetReply ( TLogSnapRequest ( snapReq . snapPayload , snapReq . snapUID , LiteralStringRef ( " tlog " ) ) ) ) , snap_tlog_failed ( ) ) <nl> ) ; <nl> } <nl> wait ( waitForAll ( tLogSnapReqs ) ) ; <nl> ACTOR Future < Void > ddSnapCreateCore ( DistributorSnapRequest snapReq , Reference < As <nl> std : : vector < Future < Void > > enablePops ; <nl> for ( const auto & tlog : tlogs ) { <nl> enablePops . push_back ( <nl> - transformErrors ( throwErrorOr ( tlog . enablePopRequest . tryGetReply ( TLogEnablePopRequest ( snapReq . snapUID ) ) ) , operation_failed ( ) ) <nl> + transformErrors ( throwErrorOr ( tlog . enablePopRequest . tryGetReply ( TLogEnablePopRequest ( snapReq . snapUID ) ) ) , snap_enable_tlog_pop_failed ( ) ) <nl> ) ; <nl> } <nl> wait ( waitForAll ( enablePops ) ) ; <nl> ACTOR Future < Void > ddSnapCreateCore ( DistributorSnapRequest snapReq , Reference < As <nl> std : : vector < Future < Void > > coordSnapReqs ; <nl> for ( const auto & worker : coordWorkers ) { <nl> coordSnapReqs . push_back ( <nl> - transformErrors ( throwErrorOr ( worker . workerSnapReq . tryGetReply ( WorkerSnapRequest ( snapReq . snapPayload , snapReq . snapUID , LiteralStringRef ( " coord " ) ) ) ) , operation_failed ( ) ) <nl> + transformErrors ( throwErrorOr ( worker . workerSnapReq . tryGetReply ( WorkerSnapRequest ( snapReq . snapPayload , snapReq . snapUID , LiteralStringRef ( " coord " ) ) ) ) , snap_coord_failed ( ) ) <nl> ) ; <nl> } <nl> wait ( waitForAll ( coordSnapReqs ) ) ; <nl> TraceEvent ( " SnapDataDistributor_AfterSnapCoords " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> - } catch ( Error & e ) { <nl> + } catch ( Error & err ) { <nl> + state Error e = err ; <nl> TraceEvent ( " SnapDataDistributor_SnapReqExit " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) <nl> . error ( e , true / * includeCancelled * / ) ; <nl> + if ( e . code ( ) = = error_code_snap_storage_failed <nl> + | | e . code ( ) = = error_code_snap_tlog_failed <nl> + | | e . code ( ) = = error_code_operation_cancelled ) { <nl> + / / enable tlog pop on local tlog nodes <nl> + std : : vector < TLogInterface > tlogs = db - > get ( ) . logSystemConfig . allLocalLogs ( false ) ; <nl> + try { <nl> + std : : vector < Future < Void > > enablePops ; <nl> + for ( const auto & tlog : tlogs ) { <nl> + enablePops . push_back ( <nl> + transformErrors ( throwErrorOr ( tlog . enablePopRequest . tryGetReply ( TLogEnablePopRequest ( snapReq . snapUID ) ) ) , snap_enable_tlog_pop_failed ( ) ) <nl> + ) ; <nl> + } <nl> + wait ( waitForAll ( enablePops ) ) ; <nl> + } catch ( Error & error ) { <nl> + TraceEvent ( SevDebug , " IgnoreEnableTLogPopFailure " ) ; <nl> + } <nl> + } <nl> throw e ; <nl> } <nl> return Void ( ) ; <nl> ACTOR Future < Void > ddSnapCreate ( DistributorSnapRequest snapReq , Reference < AsyncV <nl> TraceEvent ( " SnapDDCreateDBInfoChanged " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> - snapReq . reply . sendError ( operation_failed ( ) ) ; <nl> + snapReq . reply . sendError ( snap_with_recovery_unsupported ( ) ) ; <nl> } <nl> when ( wait ( ddSnapCreateCore ( snapReq , db ) ) ) { <nl> TraceEvent ( " SnapDDCreateSuccess " ) <nl> mmm a / fdbserver / DataDistributionQueue . actor . cpp <nl> ppp b / fdbserver / DataDistributionQueue . actor . cpp <nl> ACTOR Future < Void > dataDistributionRelocator ( DDQueueData * self , RelocateData rd <nl> state bool allHealthy = true ; <nl> state bool anyWithSource = false ; <nl> state std : : vector < std : : pair < Reference < IDataDistributionTeam > , bool > > bestTeams ; <nl> + state double startTime = now ( ) ; <nl> + state std : : vector < UID > destIds ; <nl> <nl> try { <nl> if ( now ( ) - self - > lastInterval < 1 . 0 ) { <nl> ACTOR Future < Void > dataDistributionRelocator ( DDQueueData * self , RelocateData rd <nl> wait ( delay ( SERVER_KNOBS - > BEST_TEAM_STUCK_DELAY , TaskPriority : : DataDistributionLaunch ) ) ; <nl> } <nl> <nl> - state std : : vector < UID > destIds ; <nl> + destIds . clear ( ) ; <nl> state std : : vector < UID > healthyIds ; <nl> state std : : vector < UID > extraIds ; <nl> state std : : vector < ShardsAffectedByTeamFailure : : Team > destinationTeams ; <nl> ACTOR Future < Void > dataDistributionRelocator ( DDQueueData * self , RelocateData rd <nl> <nl> / / onFinished . send ( rs ) ; <nl> if ( ! error . code ( ) ) { <nl> - TraceEvent ( relocateShardInterval . end ( ) , distributorId ) . detail ( " Result " , " Success " ) ; <nl> + TraceEvent ( relocateShardInterval . end ( ) , distributorId ) . detail ( " Duration " , now ( ) - startTime ) . detail ( " Result " , " Success " ) ; <nl> + if ( now ( ) - startTime > 600 ) { <nl> + TraceEvent ( SevWarnAlways , " RelocateShardTooLong " ) . detail ( " Duration " , now ( ) - startTime ) . detail ( " Dest " , describe ( destIds ) ) . detail ( " Src " , describe ( rd . src ) ) ; <nl> + } <nl> if ( rd . keys . begin = = keyServersPrefix ) { <nl> TraceEvent ( " MovedKeyServerKeys " ) . detail ( " Dest " , describe ( destIds ) ) . trackLatest ( " MovedKeyServers " ) ; <nl> } <nl> ACTOR Future < Void > dataDistributionRelocator ( DDQueueData * self , RelocateData rd <nl> } <nl> } <nl> } catch ( Error & e ) { <nl> - TraceEvent ( relocateShardInterval . end ( ) , distributorId ) . error ( e , true ) ; <nl> + TraceEvent ( relocateShardInterval . end ( ) , distributorId ) . error ( e , true ) . detail ( " Duration " , now ( ) - startTime ) ; <nl> + if ( now ( ) - startTime > 600 ) { <nl> + TraceEvent ( SevWarnAlways , " RelocateShardTooLong " ) . error ( e , true ) . detail ( " Duration " , now ( ) - startTime ) . detail ( " Dest " , describe ( destIds ) ) . detail ( " Src " , describe ( rd . src ) ) ; <nl> + } <nl> if ( ! signalledTransferComplete ) <nl> dataTransferComplete . send ( rd ) ; <nl> <nl> mmm a / fdbserver / FDBExecHelper . actor . cpp <nl> ppp b / fdbserver / FDBExecHelper . actor . cpp <nl> ExecCmdValueString : : ExecCmdValueString ( StringRef pCmdValueString ) { <nl> void ExecCmdValueString : : setCmdValueString ( StringRef pCmdValueString ) { <nl> / / reset everything <nl> binaryPath = StringRef ( ) ; <nl> - keyValueMap . clear ( ) ; <nl> <nl> / / set the new cmdValueString <nl> cmdValueString = pCmdValueString ; <nl> VectorRef < StringRef > ExecCmdValueString : : getBinaryArgs ( ) { <nl> return binaryArgs ; <nl> } <nl> <nl> - StringRef ExecCmdValueString : : getBinaryArgValue ( StringRef key ) { <nl> - StringRef res ; <nl> - if ( keyValueMap . find ( key ) ! = keyValueMap . end ( ) ) { <nl> - res = keyValueMap [ key ] ; <nl> - } <nl> - return res ; <nl> - } <nl> - <nl> void ExecCmdValueString : : parseCmdValue ( ) { <nl> StringRef param = this - > cmdValueString ; <nl> / / get the binary path <nl> - this - > binaryPath = param . eat ( LiteralStringRef ( " : " ) ) ; <nl> + this - > binaryPath = param . eat ( LiteralStringRef ( " " ) ) ; <nl> <nl> / / no arguments provided <nl> if ( param = = StringRef ( ) ) { <nl> void ExecCmdValueString : : parseCmdValue ( ) { <nl> <nl> / / extract the arguments <nl> while ( param ! = StringRef ( ) ) { <nl> - StringRef token = param . eat ( LiteralStringRef ( " , " ) ) ; <nl> + StringRef token = param . eat ( LiteralStringRef ( " " ) ) ; <nl> this - > binaryArgs . push_back ( this - > binaryArgs . arena ( ) , token ) ; <nl> - <nl> - StringRef key = token . eat ( LiteralStringRef ( " = " ) ) ; <nl> - keyValueMap . insert ( std : : make_pair ( key , token ) ) ; <nl> } <nl> return ; <nl> } <nl> ACTOR Future < int > spawnProcess ( std : : string binPath , std : : vector < std : : string > par <nl> } <nl> # endif <nl> <nl> - ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , std : : string role ) { <nl> - state StringRef uidStr = execArg - > getBinaryArgValue ( LiteralStringRef ( " uid " ) ) ; <nl> + ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , UID snapUID , std : : string folder , std : : string role ) { <nl> + state Standalone < StringRef > uidStr = snapUID . toString ( ) ; <nl> state int err = 0 ; <nl> state Future < int > cmdErr ; <nl> state double maxWaitTime = SERVER_KNOBS - > SNAP_CREATE_MAX_TIMEOUT ; <nl> if ( ! g_network - > isSimulated ( ) ) { <nl> / / get bin path <nl> auto snapBin = execArg - > getBinaryPath ( ) ; <nl> - auto dataFolder = " path = " + folder ; <nl> std : : vector < std : : string > paramList ; <nl> - paramList . push_back ( snapBin . toString ( ) ) ; <nl> / / get user passed arguments <nl> auto listArgs = execArg - > getBinaryArgs ( ) ; <nl> for ( auto elem : listArgs ) { <nl> paramList . push_back ( elem . toString ( ) ) ; <nl> } <nl> / / get additional arguments <nl> - paramList . push_back ( dataFolder ) ; <nl> + paramList . push_back ( " - - path " ) ; <nl> + paramList . push_back ( folder ) ; <nl> const char * version = FDB_VT_VERSION ; <nl> - std : : string versionString = " version = " ; <nl> - versionString + = version ; <nl> - paramList . push_back ( versionString ) ; <nl> + paramList . push_back ( " - - version " ) ; <nl> + paramList . push_back ( version ) ; <nl> + paramList . push_back ( " - - role " ) ; <nl> paramList . push_back ( role ) ; <nl> + paramList . push_back ( " - - uid " ) ; <nl> + paramList . push_back ( uidStr . toString ( ) ) ; <nl> cmdErr = spawnProcess ( snapBin . toString ( ) , paramList , maxWaitTime , false / * isSync * / , 0 ) ; <nl> wait ( success ( cmdErr ) ) ; <nl> err = cmdErr . get ( ) ; <nl> mmm a / fdbserver / FDBExecHelper . actor . h <nl> ppp b / fdbserver / FDBExecHelper . actor . h <nl> class ExecCmdValueString { <nl> public : / / interfaces <nl> StringRef getBinaryPath ( ) ; <nl> VectorRef < StringRef > getBinaryArgs ( ) ; <nl> - StringRef getBinaryArgValue ( StringRef key ) ; <nl> void setCmdValueString ( StringRef cmdValueString ) ; <nl> StringRef getCmdValueString ( void ) ; <nl> <nl> class ExecCmdValueString { <nl> Standalone < StringRef > cmdValueString ; <nl> Standalone < VectorRef < StringRef > > binaryArgs ; <nl> StringRef binaryPath ; <nl> - std : : map < StringRef , StringRef > keyValueMap ; <nl> } ; <nl> <nl> / / FIXME : move this function to a common location <nl> class ExecCmdValueString { <nl> ACTOR Future < int > spawnProcess ( std : : string binPath , std : : vector < std : : string > paramList , double maxWaitTime , bool isSync , double maxSimDelayTime ) ; <nl> <nl> / / helper to run all the work related to running the exec command <nl> - ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , std : : string folder , std : : string role ) ; <nl> + ACTOR Future < int > execHelper ( ExecCmdValueString * execArg , UID snapUID , std : : string folder , std : : string role ) ; <nl> <nl> / / returns true if the execUID op is in progress <nl> bool isExecOpInProgress ( UID execUID ) ; <nl> mmm a / fdbserver / Knobs . cpp <nl> ppp b / fdbserver / Knobs . cpp <nl> ServerKnobs : : ServerKnobs ( bool randomize , ClientKnobs * clientKnobs ) { <nl> init ( MAX_TEAMS_PER_SERVER , 5 * DESIRED_TEAMS_PER_SERVER ) ; <nl> init ( DD_SHARD_SIZE_GRANULARITY , 5000000 ) ; <nl> init ( DD_SHARD_SIZE_GRANULARITY_SIM , 500000 ) ; if ( randomize & & BUGGIFY ) DD_SHARD_SIZE_GRANULARITY_SIM = 0 ; <nl> - init ( DD_MOVE_KEYS_PARALLELISM , 20 ) ; if ( randomize & & BUGGIFY ) DD_MOVE_KEYS_PARALLELISM = 1 ; <nl> + init ( DD_MOVE_KEYS_PARALLELISM , 15 ) ; if ( randomize & & BUGGIFY ) DD_MOVE_KEYS_PARALLELISM = 1 ; <nl> init ( DD_MERGE_LIMIT , 2000 ) ; if ( randomize & & BUGGIFY ) DD_MERGE_LIMIT = 2 ; <nl> init ( DD_SHARD_METRICS_TIMEOUT , 60 . 0 ) ; if ( randomize & & BUGGIFY ) DD_SHARD_METRICS_TIMEOUT = 0 . 1 ; <nl> init ( DD_LOCATION_CACHE_SIZE , 2000000 ) ; if ( randomize & & BUGGIFY ) DD_LOCATION_CACHE_SIZE = 3 ; <nl> ServerKnobs : : ServerKnobs ( bool randomize , ClientKnobs * clientKnobs ) { <nl> init ( RATEKEEPER_FAILURE_TIME , 1 . 0 ) ; <nl> init ( REPLACE_INTERFACE_DELAY , 60 . 0 ) ; <nl> init ( REPLACE_INTERFACE_CHECK_DELAY , 5 . 0 ) ; <nl> - init ( COORDINATOR_REGISTER_INTERVAL , 30 . 0 ) ; <nl> - init ( CLIENT_REGISTER_INTERVAL , 300 . 0 ) ; <nl> + init ( COORDINATOR_REGISTER_INTERVAL , 5 . 0 ) ; <nl> + init ( CLIENT_REGISTER_INTERVAL , 600 . 0 ) ; <nl> <nl> init ( INCOMPATIBLE_PEERS_LOGGING_INTERVAL , 600 ) ; if ( randomize & & BUGGIFY ) INCOMPATIBLE_PEERS_LOGGING_INTERVAL = 60 . 0 ; <nl> init ( EXPECTED_MASTER_FITNESS , ProcessClass : : UnsetFit ) ; <nl> ServerKnobs : : ServerKnobs ( bool randomize , ClientKnobs * clientKnobs ) { <nl> <nl> init ( MAX_TPS_HISTORY_SAMPLES , 600 ) ; <nl> init ( NEEDED_TPS_HISTORY_SAMPLES , 200 ) ; <nl> - init ( TARGET_DURABILITY_LAG_VERSIONS , 200e6 ) ; <nl> - init ( TARGET_DURABILITY_LAG_VERSIONS_BATCH , 100e6 ) ; <nl> + init ( TARGET_DURABILITY_LAG_VERSIONS , 350e6 ) ; / / Should be larger than STORAGE_DURABILITY_LAG_SOFT_MAX <nl> + init ( TARGET_DURABILITY_LAG_VERSIONS_BATCH , 250e6 ) ; / / Should be larger than STORAGE_DURABILITY_LAG_SOFT_MAX <nl> init ( DURABILITY_LAG_UNLIMITED_THRESHOLD , 50e6 ) ; <nl> init ( INITIAL_DURABILITY_LAG_MULTIPLIER , 1 . 02 ) ; <nl> init ( DURABILITY_LAG_REDUCTION_RATE , 0 . 9999 ) ; <nl> mmm a / fdbserver / LeaderElection . actor . cpp <nl> ppp b / fdbserver / LeaderElection . actor . cpp <nl> ACTOR Future < Void > tryBecomeLeaderInternal ( ServerCoordinators coordinators , Valu <nl> state vector < Future < Void > > true_heartbeats ; <nl> state vector < Future < Void > > false_heartbeats ; <nl> for ( int i = 0 ; i < coordinators . leaderElectionServers . size ( ) ; i + + ) { <nl> - Future < bool > hb = retryBrokenPromise ( coordinators . leaderElectionServers [ i ] . leaderHeartbeat , LeaderHeartbeatRequest ( coordinators . clusterKey , myInfo , prevChangeID ) , TaskPriority : : CoordinationReply ) ; <nl> - true_heartbeats . push_back ( onEqual ( hb , true ) ) ; <nl> - false_heartbeats . push_back ( onEqual ( hb , false ) ) ; <nl> + Future < LeaderHeartbeatReply > hb = retryBrokenPromise ( <nl> + coordinators . leaderElectionServers [ i ] . leaderHeartbeat , <nl> + LeaderHeartbeatRequest ( coordinators . clusterKey , myInfo , prevChangeID ) , TaskPriority : : CoordinationReply ) ; <nl> + true_heartbeats . push_back ( onEqual ( hb , LeaderHeartbeatReply { true } ) ) ; <nl> + false_heartbeats . push_back ( onEqual ( hb , LeaderHeartbeatReply { false } ) ) ; <nl> } <nl> <nl> state Future < Void > rate = delay ( SERVER_KNOBS - > HEARTBEAT_FREQUENCY , TaskPriority : : CoordinationReply ) | | asyncPriorityInfo - > onChange ( ) ; / / SOMEDAY : Move to server side ? <nl> mmm a / fdbserver / MasterInterface . h <nl> ppp b / fdbserver / MasterInterface . h <nl> struct MasterInterface { <nl> } <nl> } ; <nl> <nl> + struct TLogRejoinReply { <nl> + constexpr static FileIdentifier file_identifier = 11 ; <nl> + <nl> + / / false means someone else registered , so we should re - register . true means this master is recovered , so don ' t <nl> + / / send again to the same master . <nl> + bool masterIsRecovered ; <nl> + TLogRejoinReply ( ) = default ; <nl> + explicit TLogRejoinReply ( bool masterIsRecovered ) : masterIsRecovered ( masterIsRecovered ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , masterIsRecovered ) ; <nl> + } <nl> + } ; <nl> + <nl> struct TLogRejoinRequest { <nl> constexpr static FileIdentifier file_identifier = 15692200 ; <nl> TLogInterface myInterface ; <nl> - ReplyPromise < bool > reply ; / / false means someone else registered , so we should re - register . true means this master is recovered , so don ' t send again to the same master . <nl> + ReplyPromise < TLogRejoinReply > reply ; <nl> <nl> TLogRejoinRequest ( ) { } <nl> explicit TLogRejoinRequest ( const TLogInterface & interf ) : myInterface ( interf ) { } <nl> mmm a / fdbserver / MasterProxyServer . actor . cpp <nl> ppp b / fdbserver / MasterProxyServer . actor . cpp <nl> ACTOR Future < Void > getRate ( UID myID , Reference < AsyncVar < ServerDBInfo > > db , int64 <nl> when ( wait ( leaseTimeout ) ) { <nl> * outTransactionRate = 0 ; <nl> * outBatchTransactionRate = 0 ; <nl> - / / TraceEvent ( " MasterProxyRate " , myID ) . detail ( " Rate " , 0 ) . detail ( " BatchRate " , 0 ) . detail ( " Lease " , " Expired " ) ; <nl> + / / TraceEvent ( " MasterProxyRate " , myID ) . detail ( " Rate " , 0 . 0 ) . detail ( " BatchRate " , 0 . 0 ) . detail ( " Lease " , " Expired " ) ; <nl> leaseTimeout = Never ( ) ; <nl> } <nl> } <nl> ACTOR Future < Void > queueTransactionStartRequests ( <nl> stats - > txnBatchPriorityStartIn + = req . transactionCount ; <nl> <nl> if ( transactionQueue - > empty ( ) ) { <nl> - if ( now ( ) - * lastGRVTime > * GRVBatchTime ) <nl> - * lastGRVTime = now ( ) - * GRVBatchTime ; <nl> - <nl> - forwardPromise ( GRVTimer , delayJittered ( * GRVBatchTime - ( now ( ) - * lastGRVTime ) , TaskPriority : : ProxyGRVTimer ) ) ; <nl> + forwardPromise ( GRVTimer , delayJittered ( std : : max ( 0 . 0 , * GRVBatchTime - ( now ( ) - * lastGRVTime ) ) , TaskPriority : : ProxyGRVTimer ) ) ; <nl> } <nl> <nl> transactionQueue - > push ( std : : make_pair ( req , counter - - ) ) ; <nl> struct TransactionRateInfo { <nl> TransactionRateInfo ( double rate ) : rate ( rate ) , limit ( 0 ) { } <nl> <nl> void reset ( double elapsed ) { <nl> - limit = std : : min ( 0 . 0 , limit ) + std : : min ( rate * elapsed , SERVER_KNOBS - > START_TRANSACTION_MAX_TRANSACTIONS_TO_START ) ; <nl> + limit = std : : min ( 0 . 0 , limit ) + rate * elapsed ; / / Adjust the limit based on the full elapsed interval in order to properly erase a deficit <nl> + limit = std : : min ( limit , rate * SERVER_KNOBS - > START_TRANSACTION_BATCH_INTERVAL_MAX ) ; / / Don ' t allow the rate to exceed what would be allowed in the maximum batch interval <nl> + limit = std : : min ( limit , SERVER_KNOBS - > START_TRANSACTION_MAX_TRANSACTIONS_TO_START ) ; <nl> } <nl> <nl> bool canStart ( int64_t numAlreadyStarted ) { <nl> ACTOR static Future < Void > transactionStarter ( <nl> waitNext ( GRVTimer . getFuture ( ) ) ; <nl> / / Select zero or more transactions to start <nl> double t = now ( ) ; <nl> - double elapsed = std : : min < double > ( now ( ) - lastGRVTime , SERVER_KNOBS - > START_TRANSACTION_BATCH_INTERVAL_MAX ) ; <nl> + double elapsed = now ( ) - lastGRVTime ; <nl> lastGRVTime = t ; <nl> <nl> if ( elapsed = = 0 ) elapsed = 1e - 15 ; / / resolve a possible indeterminant multiplication with infinite transaction rate <nl> ACTOR Future < Void > proxySnapCreate ( ProxySnapRequest snapReq , ProxyCommitData * co <nl> TraceEvent ( " SnapMasterProxy_WhiteListCheckFailed " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> - throw transaction_not_permitted ( ) ; <nl> + throw snap_path_not_whitelisted ( ) ; <nl> } <nl> / / db fully recovered check <nl> if ( commitData - > db - > get ( ) . recoveryState ! = RecoveryState : : FULLY_RECOVERED ) { <nl> ACTOR Future < Void > proxySnapCreate ( ProxySnapRequest snapReq , ProxyCommitData * co <nl> TraceEvent ( " SnapMasterProxy_ClusterNotFullyRecovered " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> - throw cluster_not_fully_recovered ( ) ; <nl> + throw snap_not_fully_recovered_unsupported ( ) ; <nl> } <nl> <nl> auto result = <nl> ACTOR Future < Void > proxySnapCreate ( ProxySnapRequest snapReq , ProxyCommitData * co <nl> TraceEvent ( " SnapMasterProxy_LogAnitQuorumNotSupported " ) <nl> . detail ( " SnapPayload " , snapReq . snapPayload ) <nl> . detail ( " SnapUID " , snapReq . snapUID ) ; <nl> - throw txn_exec_log_anti_quorum ( ) ; <nl> + throw snap_log_anti_quorum_unsupported ( ) ; <nl> } <nl> <nl> / / send a snap request to DD <nl> mmm a / fdbserver / MoveKeys . actor . cpp <nl> ppp b / fdbserver / MoveKeys . actor . cpp <nl> Future < Void > checkMoveKeysLockReadOnly ( Transaction * tr , MoveKeysLock lock ) { <nl> return checkMoveKeysLock ( tr , lock , false ) ; <nl> } <nl> <nl> - ACTOR Future < Optional < UID > > checkReadWrite ( Future < ErrorOr < std : : pair < Version , Version > > > fReply , UID uid , Version version ) { <nl> - ErrorOr < std : : pair < Version , Version > > reply = wait ( fReply ) ; <nl> + ACTOR Future < Optional < UID > > checkReadWrite ( Future < ErrorOr < GetShardStateReply > > fReply , UID uid , Version version ) { <nl> + ErrorOr < GetShardStateReply > reply = wait ( fReply ) ; <nl> if ( ! reply . present ( ) | | reply . get ( ) . first < version ) <nl> return Optional < UID > ( ) ; <nl> return Optional < UID > ( uid ) ; <nl> ACTOR Future < vector < vector < UID > > > additionalSources ( Standalone < RangeResultRef > s <nl> return result ; <nl> } <nl> <nl> + ACTOR Future < Void > logWarningAfter ( const char * context , double duration , vector < UID > servers ) { <nl> + state double startTime = now ( ) ; <nl> + loop { <nl> + wait ( delay ( duration ) ) ; <nl> + TraceEvent ( SevWarnAlways , context ) . detail ( " Duration " , now ( ) - startTime ) . detail ( " Servers " , describe ( servers ) ) ; <nl> + } <nl> + } <nl> + <nl> / / Set keyServers [ keys ] . dest = servers <nl> / / Set serverKeys [ servers ] [ keys ] = active for each subrange of keys that the server did not already have , complete for each subrange that it already has <nl> / / Set serverKeys [ dest ] [ keys ] = " " for the dest servers of each existing shard in keys ( unless that destination is a member of servers OR if the source list is sufficiently degraded ) <nl> ACTOR Future < Void > startMoveKeys ( Database occ , KeyRange keys , vector < UID > servers , MoveKeysLock lock , FlowLock * startMoveKeysLock , UID relocationIntervalId ) { <nl> state TraceInterval interval ( " RelocateShard_StartMoveKeys " ) ; <nl> + state Future < Void > warningLogger = logWarningAfter ( " StartMoveKeysTooLong " , 600 , servers ) ; <nl> / / state TraceInterval waitInterval ( " " ) ; <nl> <nl> wait ( startMoveKeysLock - > take ( TaskPriority : : DataDistributionLaunch ) ) ; <nl> ACTOR Future < Void > startMoveKeys ( Database occ , KeyRange keys , vector < UID > serve <nl> ACTOR Future < Void > waitForShardReady ( StorageServerInterface server , KeyRange keys , Version minVersion , GetShardStateRequest : : waitMode mode ) { <nl> loop { <nl> try { <nl> - std : : pair < Version , Version > rep = wait ( server . getShardState . getReply ( GetShardStateRequest ( keys , mode ) , TaskPriority : : MoveKeys ) ) ; <nl> + GetShardStateReply rep = <nl> + wait ( server . getShardState . getReply ( GetShardStateRequest ( keys , mode ) , TaskPriority : : MoveKeys ) ) ; <nl> if ( rep . first > = minVersion ) { <nl> return Void ( ) ; <nl> } <nl> ACTOR Future < Void > finishMoveKeys ( Database occ , KeyRange keys , vector < UID > dest <nl> { <nl> state TraceInterval interval ( " RelocateShard_FinishMoveKeys " ) ; <nl> state TraceInterval waitInterval ( " " ) ; <nl> + state Future < Void > warningLogger = logWarningAfter ( " FinishMoveKeysTooLong " , 600 , destinationTeam ) ; <nl> state Key begin = keys . begin ; <nl> state Key endKey ; <nl> state int retries = 0 ; <nl> mmm a / fdbserver / OldTLogServer_4_6 . actor . cpp <nl> ppp b / fdbserver / OldTLogServer_4_6 . actor . cpp <nl> namespace oldTLog_4_6 { <nl> req . myInterface = tli ; <nl> TraceEvent ( " TLogRejoining " , self - > dbgid ) . detail ( " Master " , self - > dbInfo - > get ( ) . master . id ( ) ) ; <nl> choose { <nl> - when ( bool success = wait ( brokenPromiseToNever ( self - > dbInfo - > get ( ) . master . tlogRejoin . getReply ( req ) ) ) ) { <nl> - if ( success ) <nl> - lastMasterID = self - > dbInfo - > get ( ) . master . id ( ) ; <nl> - } <nl> - when ( wait ( self - > dbInfo - > onChange ( ) ) ) { } <nl> + when ( TLogRejoinReply rep = <nl> + wait ( brokenPromiseToNever ( self - > dbInfo - > get ( ) . master . tlogRejoin . getReply ( req ) ) ) ) { <nl> + if ( rep . masterIsRecovered ) lastMasterID = self - > dbInfo - > get ( ) . master . id ( ) ; <nl> + } <nl> + when ( wait ( self - > dbInfo - > onChange ( ) ) ) { } <nl> } <nl> } else { <nl> wait ( self - > dbInfo - > onChange ( ) ) ; <nl> mmm a / fdbserver / OldTLogServer_6_0 . actor . cpp <nl> ppp b / fdbserver / OldTLogServer_6_0 . actor . cpp <nl> ACTOR Future < Void > rejoinMasters ( TLogData * self , TLogInterface tli , DBRecoveryC <nl> TLogRejoinRequest req ( tli ) ; <nl> TraceEvent ( " TLogRejoining " , self - > dbgid ) . detail ( " Master " , self - > dbInfo - > get ( ) . master . id ( ) ) ; <nl> choose { <nl> - when ( bool success = wait ( brokenPromiseToNever ( self - > dbInfo - > get ( ) . master . tlogRejoin . getReply ( req ) ) ) ) { <nl> - if ( success ) <nl> - lastMasterID = self - > dbInfo - > get ( ) . master . id ( ) ; <nl> + when ( TLogRejoinReply rep = <nl> + wait ( brokenPromiseToNever ( self - > dbInfo - > get ( ) . master . tlogRejoin . getReply ( req ) ) ) ) { <nl> + if ( rep . masterIsRecovered ) lastMasterID = self - > dbInfo - > get ( ) . master . id ( ) ; <nl> } <nl> when ( wait ( self - > dbInfo - > onChange ( ) ) ) { } <nl> } <nl> tLogSnapCreate ( TLogSnapRequest snapReq , TLogData * self , Reference < LogData > logDa <nl> } <nl> ExecCmdValueString snapArg ( snapReq . snapPayload ) ; <nl> try { <nl> - Standalone < StringRef > role = LiteralStringRef ( " role = " ) . withSuffix ( snapReq . role ) ; <nl> - int err = wait ( execHelper ( & snapArg , self - > dataFolder , role . toString ( ) ) ) ; <nl> + int err = wait ( execHelper ( & snapArg , snapReq . snapUID , self - > dataFolder , snapReq . role . toString ( ) ) ) ; <nl> <nl> std : : string uidStr = snapReq . snapUID . toString ( ) ; <nl> TraceEvent ( " ExecTraceTLog " ) <nl> mmm a / fdbserver / Ratekeeper . actor . cpp <nl> ppp b / fdbserver / Ratekeeper . actor . cpp <nl> struct StorageQueueInfo { <nl> Smoother verySmoothDurableVersion , smoothLatestVersion ; <nl> Smoother smoothFreeSpace ; <nl> Smoother smoothTotalSpace ; <nl> - double localRateLimit ; <nl> limitReason_t limitReason ; <nl> StorageQueueInfo ( UID id , LocalityData locality ) : valid ( false ) , id ( id ) , locality ( locality ) , smoothDurableBytes ( SERVER_KNOBS - > SMOOTHING_AMOUNT ) , <nl> smoothInputBytes ( SERVER_KNOBS - > SMOOTHING_AMOUNT ) , verySmoothDurableBytes ( SERVER_KNOBS - > SLOW_SMOOTHING_AMOUNT ) , <nl> struct RatekeeperLimits { <nl> logTargetBytes ( logTargetBytes ) , <nl> logSpringBytes ( logSpringBytes ) , <nl> maxVersionDifference ( maxVersionDifference ) , <nl> - durabilityLagTargetVersions ( durabilityLagTargetVersions ) , <nl> + durabilityLagTargetVersions ( durabilityLagTargetVersions + SERVER_KNOBS - > MAX_READ_TRANSACTION_LIFE_VERSIONS ) , / / The read transaction life versions are expected to not be durable on the storage servers <nl> durabilityLagLimit ( std : : numeric_limits < double > : : infinity ( ) ) , <nl> lastDurabilityLag ( 0 ) , <nl> context ( context ) <nl> ACTOR Future < Void > trackStorageServerQueueInfo ( RatekeeperData * self , StorageSer <nl> myQueueInfo - > value . valid = true ; <nl> myQueueInfo - > value . prevReply = myQueueInfo - > value . lastReply ; <nl> myQueueInfo - > value . lastReply = reply . get ( ) ; <nl> - myQueueInfo - > value . localRateLimit = reply . get ( ) . localRateLimit ; <nl> if ( myQueueInfo - > value . prevReply . instanceID ! = reply . get ( ) . instanceID ) { <nl> myQueueInfo - > value . smoothDurableBytes . reset ( reply . get ( ) . bytesDurable ) ; <nl> myQueueInfo - > value . verySmoothDurableBytes . reset ( reply . get ( ) . bytesDurable ) ; <nl> void updateRate ( RatekeeperData * self , RatekeeperLimits * limits ) { <nl> int64_t worstStorageQueueStorageServer = 0 ; <nl> int64_t limitingStorageQueueStorageServer = 0 ; <nl> int64_t worstDurabilityLag = 0 ; <nl> - double worstStorageLocalLimit = 0 ; <nl> - double limitingStorageLocalLimit = 0 ; <nl> <nl> std : : multimap < double , StorageQueueInfo * > storageTpsLimitReverseIndex ; <nl> std : : multimap < int64_t , StorageQueueInfo * > storageDurabilityLagReverseIndex ; <nl> void updateRate ( RatekeeperData * self , RatekeeperLimits * limits ) { <nl> <nl> int64_t storageQueue = ss . lastReply . bytesInput - ss . smoothDurableBytes . smoothTotal ( ) ; <nl> worstStorageQueueStorageServer = std : : max ( worstStorageQueueStorageServer , storageQueue ) ; <nl> - worstStorageLocalLimit = std : : min ( worstStorageLocalLimit , ss . localRateLimit ) ; <nl> <nl> int64_t storageDurabilityLag = ss . smoothLatestVersion . smoothTotal ( ) - ss . verySmoothDurableVersion . smoothTotal ( ) ; <nl> worstDurabilityLag = std : : max ( worstDurabilityLag , storageDurabilityLag ) ; <nl> void updateRate ( RatekeeperData * self , RatekeeperLimits * limits ) { <nl> } <nl> <nl> limitingStorageQueueStorageServer = ss - > second - > lastReply . bytesInput - ss - > second - > smoothDurableBytes . smoothTotal ( ) ; <nl> - limitingStorageLocalLimit = ss - > second - > lastReply . localRateLimit ; <nl> limits - > tpsLimit = ss - > first ; <nl> reasonID = storageTpsLimitReverseIndex . begin ( ) - > second - > id ; / / Although we aren ' t controlling based on the worst SS , we still report it as the limiting process <nl> limitReason = ssReasons [ reasonID ] ; <nl> void updateRate ( RatekeeperData * self , RatekeeperLimits * limits ) { <nl> . detail ( " WorstFreeSpaceTLog " , worstFreeSpaceTLog ) <nl> . detail ( " WorstStorageServerQueue " , worstStorageQueueStorageServer ) <nl> . detail ( " LimitingStorageServerQueue " , limitingStorageQueueStorageServer ) <nl> - . detail ( " WorstStorageLocalLimit " , worstStorageLocalLimit ) <nl> - . detail ( " LimitingStorageLocalLimit " , limitingStorageLocalLimit ) <nl> . detail ( " WorstTLogQueue " , worstStorageQueueTLog ) <nl> . detail ( " TotalDiskUsageBytes " , totalDiskUsageBytes ) <nl> . detail ( " WorstStorageServerVersionLag " , worstVersionLag ) <nl> . detail ( " LimitingStorageServerVersionLag " , limitingVersionLag ) <nl> - . detail ( " WorstDurabilityLag " , worstDurabilityLag ) <nl> - . detail ( " LimitingDurabilityLag " , limitingDurabilityLag ) <nl> + . detail ( " WorstStorageServerDurabilityLag " , worstDurabilityLag ) <nl> + . detail ( " LimitingStorageServerDurabilityLag " , limitingDurabilityLag ) <nl> . trackLatest ( name . c_str ( ) ) ; <nl> } <nl> } <nl> mmm a / fdbserver / ResolverInterface . h <nl> ppp b / fdbserver / ResolverInterface . h <nl> struct ResolveTransactionBatchRequest { <nl> } <nl> } ; <nl> <nl> + struct ResolutionMetricsReply { <nl> + constexpr static FileIdentifier file_identifier = 3 ; <nl> + <nl> + int64_t value ; <nl> + ResolutionMetricsReply ( ) = default ; <nl> + explicit ResolutionMetricsReply ( int64_t value ) : value ( value ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , value ) ; <nl> + } <nl> + } ; <nl> + <nl> struct ResolutionMetricsRequest { <nl> constexpr static FileIdentifier file_identifier = 11663527 ; <nl> - ReplyPromise < int64_t > reply ; <nl> + ReplyPromise < ResolutionMetricsReply > reply ; <nl> <nl> template < class Archive > <nl> void serialize ( Archive & ar ) { <nl> mmm a / fdbserver / Status . actor . cpp <nl> ppp b / fdbserver / Status . actor . cpp <nl> static JsonBuilderObject machineStatusFetcher ( WorkerEvents mMetrics , vector < Work <nl> return machineMap ; <nl> } <nl> <nl> + JsonBuilderObject getLagObject ( int64_t versions ) { <nl> + JsonBuilderObject lag ; <nl> + lag [ " versions " ] = versions ; <nl> + lag [ " seconds " ] = versions / ( double ) SERVER_KNOBS - > VERSIONS_PER_SECOND ; <nl> + return lag ; <nl> + } <nl> + <nl> struct MachineMemoryInfo { <nl> double memoryUsage ; <nl> double numProcesses ; <nl> struct RolesInfo { <nl> obj [ " read_latency_bands " ] = addLatencyBandInfo ( readLatencyMetrics ) ; <nl> } <nl> <nl> - JsonBuilderObject dataLag ; <nl> - dataLag [ " versions " ] = versionLag ; <nl> - dataLagSeconds = versionLag / ( double ) SERVER_KNOBS - > VERSIONS_PER_SECOND ; <nl> - dataLag [ " seconds " ] = dataLagSeconds ; <nl> - <nl> - JsonBuilderObject durabilityLag ; <nl> - durabilityLag [ " versions " ] = version - durableVersion ; <nl> - durabilityLag [ " seconds " ] = ( version - durableVersion ) / ( double ) SERVER_KNOBS - > VERSIONS_PER_SECOND ; <nl> - <nl> - obj [ " data_lag " ] = dataLag ; <nl> - obj [ " durability_lag " ] = durabilityLag ; <nl> + obj [ " data_lag " ] = getLagObject ( versionLag ) ; <nl> + obj [ " durability_lag " ] = getLagObject ( version - durableVersion ) ; <nl> <nl> } catch ( Error & e ) { <nl> if ( e . code ( ) ! = error_code_attribute_not_found ) <nl> struct RolesInfo { <nl> JsonBuilderObject & addRole ( std : : string const & role , InterfaceType & iface ) { <nl> return addRole ( iface . address ( ) , role , iface . id ( ) ) ; <nl> } <nl> + JsonBuilderObject & addCoordinatorRole ( NetworkAddress addr ) { <nl> + JsonBuilderObject obj ; <nl> + obj [ " role " ] = " coordinator " ; <nl> + return roles . insert ( std : : make_pair ( addr , obj ) ) - > second ; <nl> + } <nl> JsonBuilderArray getStatusForAddress ( NetworkAddress a ) { <nl> JsonBuilderArray v ; <nl> auto it = roles . lower_bound ( a ) ; <nl> struct RolesInfo { <nl> ACTOR static Future < JsonBuilderObject > processStatusFetcher ( <nl> Reference < AsyncVar < struct ServerDBInfo > > db , std : : vector < WorkerDetails > workers , WorkerEvents pMetrics , <nl> WorkerEvents mMetrics , WorkerEvents nMetrics , WorkerEvents errors , WorkerEvents traceFileOpenErrors , <nl> - WorkerEvents programStarts , std : : map < std : : string , std : : vector < JsonBuilderObject > > processIssues , <nl> + WorkerEvents programStarts , std : : map < std : : string , std : : vector < JsonBuilderObject > > processIssues , <nl> vector < std : : pair < StorageServerInterface , EventMap > > storageServers , <nl> vector < std : : pair < TLogInterface , EventMap > > tLogs , vector < std : : pair < MasterProxyInterface , EventMap > > proxies , <nl> - Database cx , Optional < DatabaseConfiguration > configuration , Optional < Key > healthyZone , std : : set < std : : string > * incomplete_reasons ) { <nl> + ServerCoordinators coordinators , Database cx , Optional < DatabaseConfiguration > configuration , <nl> + Optional < Key > healthyZone , std : : set < std : : string > * incomplete_reasons ) { <nl> <nl> state JsonBuilderObject processMap ; <nl> <nl> ACTOR static Future < JsonBuilderObject > processStatusFetcher ( <nl> } <nl> } <nl> <nl> + for ( auto & coordinator : coordinators . ccf - > getConnectionString ( ) . coordinators ( ) ) { <nl> + roles . addCoordinatorRole ( coordinator ) ; <nl> + } <nl> + <nl> state std : : vector < std : : pair < MasterProxyInterface , EventMap > > : : iterator proxy ; <nl> for ( proxy = proxies . begin ( ) ; proxy ! = proxies . end ( ) ; + + proxy ) { <nl> roles . addRole ( " proxy " , proxy - > first , proxy - > second ) ; <nl> struct LoadConfigurationResult { <nl> Optional < Key > healthyZone ; <nl> double healthyZoneSeconds ; <nl> bool rebalanceDDIgnored ; <nl> + bool dataDistributionDisabled ; <nl> <nl> - LoadConfigurationResult ( ) : fullReplication ( true ) , healthyZoneSeconds ( 0 ) , rebalanceDDIgnored ( false ) { } <nl> + LoadConfigurationResult ( ) : fullReplication ( true ) , healthyZoneSeconds ( 0 ) , rebalanceDDIgnored ( false ) , dataDistributionDisabled ( false ) { } <nl> } ; <nl> <nl> ACTOR static Future < std : : pair < Optional < DatabaseConfiguration > , Optional < LoadConfigurationResult > > > loadConfiguration ( Database cx , JsonBuilderArray * messages , std : : set < std : : string > * status_incomplete_reasons ) { <nl> ACTOR static Future < std : : pair < Optional < DatabaseConfiguration > , Optional < LoadConfi <nl> } <nl> state Future < Optional < Value > > healthyZoneValue = tr . get ( healthyZoneKey ) ; <nl> state Future < Optional < Value > > rebalanceDDIgnored = tr . get ( rebalanceDDIgnoreKey ) ; <nl> + state Future < Optional < Value > > ddModeKey = tr . get ( dataDistributionModeKey ) ; <nl> <nl> choose { <nl> - when ( wait ( waitForAll ( replicasFutures ) & & success ( healthyZoneValue ) & & success ( rebalanceDDIgnored ) ) ) { <nl> + when ( wait ( waitForAll ( replicasFutures ) & & success ( healthyZoneValue ) & & success ( rebalanceDDIgnored ) & & success ( ddModeKey ) ) ) { <nl> int unreplicated = 0 ; <nl> for ( int i = 0 ; i < result . get ( ) . regions . size ( ) ; i + + ) { <nl> - if ( ! replicasFutures [ i ] . get ( ) . present ( ) | | decodeDatacenterReplicasValue ( replicasFutures [ i ] . get ( ) . get ( ) ) < result . get ( ) . storageTeamSize ) { <nl> + if ( ! replicasFutures [ i ] . get ( ) . present ( ) | | decodeDatacenterReplicasValue ( replicasFutures [ i ] . get ( ) . get ( ) ) < result . get ( ) . storageTeamSize ) { <nl> unreplicated + + ; <nl> } <nl> } <nl> ACTOR static Future < std : : pair < Optional < DatabaseConfiguration > , Optional < LoadConfi <nl> res . fullReplication = ( ! unreplicated | | ( result . get ( ) . usableRegions = = 1 & & unreplicated < result . get ( ) . regions . size ( ) ) ) ; <nl> if ( healthyZoneValue . get ( ) . present ( ) ) { <nl> auto healthyZone = decodeHealthyZoneValue ( healthyZoneValue . get ( ) . get ( ) ) ; <nl> - if ( healthyZone . second > tr . getReadVersion ( ) . get ( ) ) { <nl> + if ( healthyZone . first = = ignoreSSFailuresZoneString ) { <nl> + res . healthyZone = healthyZone . first ; <nl> + } <nl> + else if ( healthyZone . second > tr . getReadVersion ( ) . get ( ) ) { <nl> res . healthyZone = healthyZone . first ; <nl> res . healthyZoneSeconds = ( healthyZone . second - tr . getReadVersion ( ) . get ( ) ) / CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ; <nl> } <nl> } <nl> res . rebalanceDDIgnored = rebalanceDDIgnored . get ( ) . present ( ) ; <nl> + if ( ddModeKey . get ( ) . present ( ) ) { <nl> + BinaryReader rd ( ddModeKey . get ( ) . get ( ) , Unversioned ( ) ) ; <nl> + int currentMode ; <nl> + rd > > currentMode ; <nl> + if ( currentMode = = 0 ) { <nl> + res . dataDistributionDisabled = true ; <nl> + } <nl> + } <nl> loadResult = res ; <nl> } <nl> when ( wait ( getConfTimeout ) ) { <nl> ACTOR static Future < JsonBuilderObject > workloadStatusFetcher ( Reference < AsyncVar < <nl> ( * data_overlay ) [ " least_operating_space_bytes_storage_server " ] = std : : max ( worstFreeSpaceStorageServer , ( int64_t ) 0 ) ; <nl> ( * qos ) . setKeyRawNumber ( " worst_queue_bytes_storage_server " , ratekeeper . getValue ( " WorstStorageServerQueue " ) ) ; <nl> ( * qos ) . setKeyRawNumber ( " limiting_queue_bytes_storage_server " , ratekeeper . getValue ( " LimitingStorageServerQueue " ) ) ; <nl> + <nl> + / / TODO : These can be removed in the next release after 6 . 2 <nl> ( * qos ) . setKeyRawNumber ( " worst_version_lag_storage_server " , ratekeeper . getValue ( " WorstStorageServerVersionLag " ) ) ; <nl> ( * qos ) . setKeyRawNumber ( " limiting_version_lag_storage_server " , ratekeeper . getValue ( " LimitingStorageServerVersionLag " ) ) ; <nl> + <nl> + ( * qos ) [ " worst_data_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " WorstStorageServerVersionLag " ) ) ; <nl> + ( * qos ) [ " limiting_data_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " LimitingStorageServerVersionLag " ) ) ; <nl> + ( * qos ) [ " worst_durability_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " WorstStorageServerDurabilityLag " ) ) ; <nl> + ( * qos ) [ " limiting_durability_lag_storage_server " ] = getLagObject ( ratekeeper . getInt64 ( " LimitingStorageServerDurabilityLag " ) ) ; <nl> } <nl> <nl> if ( tlogCount > 0 ) { <nl> ACTOR Future < StatusReply > clusterGetStatus ( <nl> if ( loadResult . get ( ) . rebalanceDDIgnored ) { <nl> statusObj [ " data_distribution_disabled_for_rebalance " ] = true ; <nl> } <nl> + if ( loadResult . get ( ) . dataDistributionDisabled ) { <nl> + statusObj [ " data_distribution_disabled " ] = true ; <nl> + } <nl> } <nl> <nl> statusObj [ " machines " ] = machineStatusFetcher ( mMetrics , workers , configuration , & status_incomplete_reasons ) ; <nl> ACTOR Future < StatusReply > clusterGetStatus ( <nl> futures2 . push_back ( layerStatusFetcher ( cx , & messages , & status_incomplete_reasons ) ) ; <nl> futures2 . push_back ( lockedStatusFetcher ( db , & messages , & status_incomplete_reasons ) ) ; <nl> futures2 . push_back ( clusterSummaryStatisticsFetcher ( pMetrics , storageServerFuture , tLogFuture , & status_incomplete_reasons ) ) ; <nl> - <nl> state std : : vector < JsonBuilderObject > workerStatuses = wait ( getAll ( futures2 ) ) ; <nl> <nl> int oldLogFaultTolerance = 100 ; <nl> ACTOR Future < StatusReply > clusterGetStatus ( <nl> <nl> JsonBuilderObject processStatus = wait ( processStatusFetcher ( db , workers , pMetrics , mMetrics , networkMetrics , <nl> latestError , traceFileOpenErrors , programStarts , <nl> - processIssues , storageServers , tLogs , proxies , cx , <nl> - configuration , loadResult . present ( ) ? loadResult . get ( ) . healthyZone : Optional < Key > ( ) , <nl> + processIssues , storageServers , tLogs , proxies , <nl> + coordinators , cx , configuration , <nl> + loadResult . present ( ) ? loadResult . get ( ) . healthyZone : Optional < Key > ( ) , <nl> & status_incomplete_reasons ) ) ; <nl> statusObj [ " processes " ] = processStatus ; <nl> statusObj [ " clients " ] = clientStatusFetcher ( clientStatus ) ; <nl> ACTOR Future < StatusReply > clusterGetStatus ( <nl> incompatibleConnectionsArray . push_back ( it . toString ( ) ) ; <nl> } <nl> statusObj [ " incompatible_connections " ] = incompatibleConnectionsArray ; <nl> - <nl> - StatusObject datacenterLag ; <nl> - datacenterLag [ " versions " ] = datacenterVersionDifference ; <nl> - datacenterLag [ " seconds " ] = datacenterVersionDifference / ( double ) SERVER_KNOBS - > VERSIONS_PER_SECOND ; <nl> - statusObj [ " datacenter_lag " ] = datacenterLag ; <nl> + statusObj [ " datacenter_lag " ] = getLagObject ( datacenterVersionDifference ) ; <nl> <nl> int totalDegraded = 0 ; <nl> for ( auto & it : workers ) { <nl> mmm a / fdbserver / TLogInterface . h <nl> ppp b / fdbserver / TLogInterface . h <nl> struct TagMessagesRef { <nl> } <nl> } ; <nl> <nl> + struct TLogCommitReply { <nl> + constexpr static FileIdentifier file_identifier = 3 ; <nl> + <nl> + Version version ; <nl> + TLogCommitReply ( ) = default ; <nl> + explicit TLogCommitReply ( Version version ) : version ( version ) { } <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , version ) ; <nl> + } <nl> + } ; <nl> + <nl> struct TLogCommitRequest { <nl> constexpr static FileIdentifier file_identifier = 4022206 ; <nl> Arena arena ; <nl> struct TLogCommitRequest { <nl> <nl> StringRef messages ; / / Each message prefixed by a 4 - byte length <nl> <nl> - ReplyPromise < Version > reply ; <nl> + ReplyPromise < TLogCommitReply > reply ; <nl> Optional < UID > debugID ; <nl> <nl> TLogCommitRequest ( ) { } <nl> mmm a / fdbserver / TLogServer . actor . cpp <nl> ppp b / fdbserver / TLogServer . actor . cpp <nl> ACTOR Future < Void > rejoinMasters ( TLogData * self , TLogInterface tli , DBRecoveryC <nl> TLogRejoinRequest req ( tli ) ; <nl> TraceEvent ( " TLogRejoining " , self - > dbgid ) . detail ( " Master " , self - > dbInfo - > get ( ) . master . id ( ) ) ; <nl> choose { <nl> - when ( bool success = wait ( brokenPromiseToNever ( self - > dbInfo - > get ( ) . master . tlogRejoin . getReply ( req ) ) ) ) { <nl> - if ( success ) <nl> - lastMasterID = self - > dbInfo - > get ( ) . master . id ( ) ; <nl> + when ( TLogRejoinReply rep = <nl> + wait ( brokenPromiseToNever ( self - > dbInfo - > get ( ) . master . tlogRejoin . getReply ( req ) ) ) ) { <nl> + if ( rep . masterIsRecovered ) lastMasterID = self - > dbInfo - > get ( ) . master . id ( ) ; <nl> } <nl> when ( wait ( self - > dbInfo - > onChange ( ) ) ) { } <nl> } <nl> tLogSnapCreate ( TLogSnapRequest snapReq , TLogData * self , Reference < LogData > logDa <nl> } <nl> ExecCmdValueString snapArg ( snapReq . snapPayload ) ; <nl> try { <nl> - Standalone < StringRef > role = LiteralStringRef ( " role = " ) . withSuffix ( snapReq . role ) ; <nl> - int err = wait ( execHelper ( & snapArg , self - > dataFolder , role . toString ( ) ) ) ; <nl> + int err = wait ( execHelper ( & snapArg , snapReq . snapUID , self - > dataFolder , snapReq . role . toString ( ) ) ) ; <nl> <nl> std : : string uidStr = snapReq . snapUID . toString ( ) ; <nl> TraceEvent ( " ExecTraceTLog " ) <nl> mmm a / fdbserver / TagPartitionedLogSystem . actor . cpp <nl> ppp b / fdbserver / TagPartitionedLogSystem . actor . cpp <nl> <nl> # include " fdbserver / RecoveryState . h " <nl> # include " flow / actorcompiler . h " / / This must be the last # include . <nl> <nl> - ACTOR Future < Version > minVersionWhenReady ( Future < Void > f , std : : vector < Future < Version > > replies ) { <nl> + ACTOR Future < Version > minVersionWhenReady ( Future < Void > f , std : : vector < Future < TLogCommitReply > > replies ) { <nl> wait ( f ) ; <nl> Version minVersion = std : : numeric_limits < Version > : : max ( ) ; <nl> for ( auto & reply : replies ) { <nl> if ( reply . isReady ( ) & & ! reply . isError ( ) ) { <nl> - minVersion = std : : min ( minVersion , reply . get ( ) ) ; <nl> + minVersion = std : : min ( minVersion , reply . get ( ) . version ) ; <nl> } <nl> } <nl> return minVersion ; <nl> struct TagPartitionedLogSystem : ILogSystem , ReferenceCounted < TagPartitionedLogS <nl> virtual Future < Version > push ( Version prevVersion , Version version , Version knownCommittedVersion , Version minKnownCommittedVersion , LogPushData & data , Optional < UID > debugID ) { <nl> / / FIXME : Randomize request order as in LegacyLogSystem ? <nl> vector < Future < Void > > quorumResults ; <nl> - vector < Future < Version > > allReplies ; <nl> + vector < Future < TLogCommitReply > > allReplies ; <nl> int location = 0 ; <nl> for ( auto & it : tLogs ) { <nl> if ( it - > isLocal & & it - > logServers . size ( ) ) { <nl> struct TagPartitionedLogSystem : ILogSystem , ReferenceCounted < TagPartitionedLogS <nl> } <nl> <nl> ACTOR static Future < Void > trackRejoins ( UID dbgid , std : : vector < Reference < AsyncVar < OptionalInterface < TLogInterface > > > > logServers , FutureStream < struct TLogRejoinRequest > rejoinRequests ) { <nl> - state std : : map < UID , ReplyPromise < bool > > lastReply ; <nl> + state std : : map < UID , ReplyPromise < TLogRejoinReply > > lastReply ; <nl> <nl> try { <nl> loop { <nl> struct TagPartitionedLogSystem : ILogSystem , ReferenceCounted < TagPartitionedLogS <nl> TraceEvent ( " TLogJoinedMe " , dbgid ) . detail ( " TLog " , req . myInterface . id ( ) ) . detail ( " Address " , req . myInterface . commit . getEndpoint ( ) . getPrimaryAddress ( ) . toString ( ) ) ; <nl> if ( ! logServers [ pos ] - > get ( ) . present ( ) | | req . myInterface . commit . getEndpoint ( ) ! = logServers [ pos ] - > get ( ) . interf ( ) . commit . getEndpoint ( ) ) <nl> logServers [ pos ] - > setUnconditional ( OptionalInterface < TLogInterface > ( req . myInterface ) ) ; <nl> - lastReply [ req . myInterface . id ( ) ] . send ( false ) ; <nl> + lastReply [ req . myInterface . id ( ) ] . send ( TLogRejoinReply { false } ) ; <nl> lastReply [ req . myInterface . id ( ) ] = req . reply ; <nl> } <nl> else { <nl> struct TagPartitionedLogSystem : ILogSystem , ReferenceCounted < TagPartitionedLogS <nl> } <nl> } <nl> } catch ( . . . ) { <nl> - for ( auto it = lastReply . begin ( ) ; it ! = lastReply . end ( ) ; + + it ) <nl> - it - > second . send ( true ) ; <nl> + for ( auto it = lastReply . begin ( ) ; it ! = lastReply . end ( ) ; + + it ) it - > second . send ( TLogRejoinReply { true } ) ; <nl> throw ; <nl> } <nl> } <nl> mmm a / fdbserver / TesterInterface . actor . h <nl> ppp b / fdbserver / TesterInterface . actor . h <nl> <nl> # include " fdbrpc / PerfMetric . h " <nl> # include " fdbclient / NativeAPI . actor . h " <nl> # include " flow / actorcompiler . h " / / has to be last include <nl> + struct CheckReply { <nl> + constexpr static FileIdentifier file_identifier = 11 ; <nl> + <nl> + bool value = false ; <nl> + <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , value ) ; <nl> + } <nl> + } ; <nl> <nl> struct WorkloadInterface { <nl> constexpr static FileIdentifier file_identifier = 4454551 ; <nl> RequestStream < ReplyPromise < Void > > setup ; <nl> RequestStream < ReplyPromise < Void > > start ; <nl> - RequestStream < ReplyPromise < bool > > check ; <nl> + RequestStream < ReplyPromise < CheckReply > > check ; <nl> RequestStream < ReplyPromise < std : : vector < PerfMetric > > > metrics ; <nl> RequestStream < ReplyPromise < Void > > stop ; <nl> <nl> struct WorkloadRequest { <nl> <nl> VectorRef < VectorRef < KeyValueRef > > options ; <nl> <nl> - int clientId ; / / the " id " of the client recieving the request ( 0 indexed ) <nl> + int clientId ; / / the " id " of the client receiving the request ( 0 indexed ) <nl> int clientCount ; / / the total number of test clients participating in the workload <nl> ReplyPromise < struct WorkloadInterface > reply ; <nl> <nl> mmm a / fdbserver / fdbserver . actor . cpp <nl> ppp b / fdbserver / fdbserver . actor . cpp <nl> static void printUsage ( const char * name , bool devhelp ) { <nl> " Delete the oldest log file when the total size of all log \ n " <nl> " files exceeds SIZE bytes . If set to 0 , old log files will not \ n " <nl> " be deleted . The default value is 100MiB . \ n " ) ; <nl> + printf ( " - - loggroup LOG_GROUP \ n " <nl> + " Sets the LogGroup field with the specified value for all \ n " <nl> + " events in the trace output ( defaults to ` default ' ) . \ n " ) ; <nl> printf ( " - - trace_format FORMAT \ n " <nl> " Select the format of the log files . xml ( the default ) and json \ n " <nl> " are supported . \ n " ) ; <nl> mmm a / fdbserver / masterserver . actor . cpp <nl> ppp b / fdbserver / masterserver . actor . cpp <nl> ACTOR Future < Void > resolutionBalancing ( Reference < MasterData > self ) { <nl> wait ( delay ( SERVER_KNOBS - > MIN_BALANCE_TIME , TaskPriority : : ResolutionMetrics ) ) ; <nl> while ( self - > resolverChanges . get ( ) . size ( ) ) <nl> wait ( self - > resolverChanges . onChange ( ) ) ; <nl> - state std : : vector < Future < int64_t > > futures ; <nl> + state std : : vector < Future < ResolutionMetricsReply > > futures ; <nl> for ( auto & p : self - > resolvers ) <nl> futures . push_back ( brokenPromiseToNever ( p . metrics . getReply ( ResolutionMetricsRequest ( ) , TaskPriority : : ResolutionMetrics ) ) ) ; <nl> wait ( waitForAll ( futures ) ) ; <nl> ACTOR Future < Void > resolutionBalancing ( Reference < MasterData > self ) { <nl> <nl> int64_t total = 0 ; <nl> for ( int i = 0 ; i < futures . size ( ) ; i + + ) { <nl> - total + = futures [ i ] . get ( ) ; <nl> - metrics . insert ( std : : make_pair ( futures [ i ] . get ( ) , i ) , NoMetric ( ) ) ; <nl> + total + = futures [ i ] . get ( ) . value ; <nl> + metrics . insert ( std : : make_pair ( futures [ i ] . get ( ) . value , i ) , NoMetric ( ) ) ; <nl> / / TraceEvent ( " ResolverMetric " ) . detail ( " I " , i ) . detail ( " Metric " , futures [ i ] . get ( ) ) ; <nl> } <nl> if ( metrics . lastItem ( ) - > first - metrics . begin ( ) - > first > SERVER_KNOBS - > MIN_BALANCE_DIFFERENCE ) { <nl> mmm a / fdbserver / storageserver . actor . cpp <nl> ppp b / fdbserver / storageserver . actor . cpp <nl> ACTOR Future < Void > watchValue_impl ( StorageServer * data , WatchValueRequest req ) <nl> g_traceBatch . addEvent ( " WatchValueDebug " , req . debugID . get ( ) . first ( ) , " watchValueQ . AfterRead " ) ; / / . detail ( " TaskID " , g_network - > getCurrentTask ( ) ) ; <nl> <nl> if ( reply . value ! = req . value ) { <nl> - req . reply . send ( latest ) ; <nl> + req . reply . send ( WatchValueReply { latest } ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> ACTOR Future < Void > getShardState_impl ( StorageServer * data , GetShardStateRequest <nl> } <nl> <nl> if ( ! onChange . size ( ) ) { <nl> - req . reply . send ( std : : make_pair ( data - > version . get ( ) , data - > durableVersion . get ( ) ) ) ; <nl> + req . reply . send ( GetShardStateReply { data - > version . get ( ) , data - > durableVersion . get ( ) } ) ; <nl> return Void ( ) ; <nl> } <nl> <nl> void splitMutation ( StorageServer * data , KeyRangeMap < T > & map , MutationRef const & <nl> ASSERT ( false ) ; / / Unknown mutation type in splitMutations <nl> } <nl> <nl> + ACTOR Future < Void > logFetchKeysWarning ( AddingShard * shard ) { <nl> + state double startTime = now ( ) ; <nl> + loop { <nl> + wait ( delay ( 600 ) ) ; <nl> + TraceEvent ( SevWarnAlways , " FetchKeysTooLong " ) . detail ( " Duration " , now ( ) - startTime ) . detail ( " Phase " , shard - > phase ) . detail ( " Begin " , shard - > keys . begin . printable ( ) ) . detail ( " End " , shard - > keys . end . printable ( ) ) ; <nl> + } <nl> + } <nl> + <nl> ACTOR Future < Void > fetchKeys ( StorageServer * data , AddingShard * shard ) { <nl> state TraceInterval interval ( " FetchKeys " ) ; <nl> state KeyRange keys = shard - > keys ; <nl> + state Future < Void > warningLogger = logFetchKeysWarning ( shard ) ; <nl> state double startt = now ( ) ; <nl> state int fetchBlockBytes = BUGGIFY ? SERVER_KNOBS - > BUGGIFY_BLOCK_BYTES : SERVER_KNOBS - > FETCH_BLOCK_BYTES ; <nl> <nl> ACTOR Future < Void > waitMetrics ( StorageServerMetrics * self , WaitMetricsRequest r <nl> when ( StorageMetrics c = waitNext ( change . getFuture ( ) ) ) { <nl> metrics + = c ; <nl> <nl> - / / SOMEDAY : validation ! The changes here are possibly partial changes ( we recieve multiple messages per <nl> + / / SOMEDAY : validation ! The changes here are possibly partial changes ( we receive multiple messages per <nl> / / update to our requested range ) . This means that the validation would have to occur after all <nl> / / the messages for one clear or set have been dispatched . <nl> <nl> ACTOR Future < Void > storageServerCore ( StorageServer * self , StorageServerInterfac <nl> when ( GetValueRequest req = waitNext ( ssi . getValue . getFuture ( ) ) ) { <nl> / / Warning : This code is executed at extremely high priority ( TaskPriority : : LoadBalancedEndpoint ) , so downgrade before doing real work <nl> if ( req . debugID . present ( ) ) <nl> - g_traceBatch . addEvent ( " GetValueDebug " , req . debugID . get ( ) . first ( ) , " storageServer . recieved " ) ; / / . detail ( " TaskID " , g_network - > getCurrentTask ( ) ) ; <nl> + g_traceBatch . addEvent ( " GetValueDebug " , req . debugID . get ( ) . first ( ) , " storageServer . received " ) ; / / . detail ( " TaskID " , g_network - > getCurrentTask ( ) ) ; <nl> <nl> if ( SHORT_CIRCUT_ACTUAL_STORAGE & & normalKeys . contains ( req . key ) ) <nl> req . reply . send ( GetValueReply ( ) ) ; <nl> ACTOR Future < Void > storageServerCore ( StorageServer * self , StorageServerInterfac <nl> when ( GetShardStateRequest req = waitNext ( ssi . getShardState . getFuture ( ) ) ) { <nl> if ( req . mode = = GetShardStateRequest : : NO_WAIT ) { <nl> if ( self - > isReadable ( req . keys ) ) <nl> - req . reply . send ( std : : make_pair ( self - > version . get ( ) , self - > durableVersion . get ( ) ) ) ; <nl> + req . reply . send ( GetShardStateReply { self - > version . get ( ) , self - > durableVersion . get ( ) } ) ; <nl> else <nl> req . reply . sendError ( wrong_shard_server ( ) ) ; <nl> } else { <nl> ACTOR Future < Void > storageServerCore ( StorageServer * self , StorageServerInterfac <nl> when ( StorageQueuingMetricsRequest req = waitNext ( ssi . getQueuingMetrics . getFuture ( ) ) ) { <nl> getQueuingMetrics ( self , req ) ; <nl> } <nl> - when ( ReplyPromise < Version > reply = waitNext ( ssi . getVersion . getFuture ( ) ) ) { <nl> + when ( ReplyPromise < VersionReply > reply = waitNext ( ssi . getVersion . getFuture ( ) ) ) { <nl> reply . send ( self - > version . get ( ) ) ; <nl> } <nl> when ( ReplyPromise < KeyValueStoreType > reply = waitNext ( ssi . getKeyValueStoreType . getFuture ( ) ) ) { <nl> mmm a / fdbserver / tester . actor . cpp <nl> ppp b / fdbserver / tester . actor . cpp <nl> ACTOR Future < Void > runWorkloadAsync ( Database cx , WorkloadInterface workIface , T <nl> state unique_ptr < TestWorkload > delw ( workload ) ; <nl> state Optional < ErrorOr < Void > > setupResult ; <nl> state Optional < ErrorOr < Void > > startResult ; <nl> - state Optional < ErrorOr < bool > > checkResult ; <nl> + state Optional < ErrorOr < CheckReply > > checkResult ; <nl> state ReplyPromise < Void > setupReq ; <nl> state ReplyPromise < Void > startReq ; <nl> - state ReplyPromise < bool > checkReq ; <nl> + state ReplyPromise < CheckReply > checkReq ; <nl> <nl> TraceEvent ( " TestBeginAsync " , workIface . id ( ) ) . detail ( " Workload " , workload - > description ( ) ) . detail ( " DatabasePingDelay " , databasePingDelay ) ; <nl> <nl> ACTOR Future < Void > runWorkloadAsync ( Database cx , WorkloadInterface workIface , T <nl> } <nl> sendResult ( startReq , startResult ) ; <nl> } <nl> - when ( ReplyPromise < bool > req = waitNext ( workIface . check . getFuture ( ) ) ) { <nl> + when ( ReplyPromise < CheckReply > req = waitNext ( workIface . check . getFuture ( ) ) ) { <nl> checkReq = req ; <nl> if ( ! checkResult . present ( ) ) { <nl> try { <nl> bool check = wait ( timeoutError ( workload - > check ( cx ) , workload - > getCheckTimeout ( ) ) ) ; <nl> - checkResult = ( ! startResult . present ( ) | | ! startResult . get ( ) . isError ( ) ) & & check ; <nl> + checkResult = CheckReply { ( ! startResult . present ( ) | | ! startResult . get ( ) . isError ( ) ) & & check } ; <nl> } catch ( Error & e ) { <nl> checkResult = operation_failed ( ) ; / / was : checkResult = false ; <nl> if ( e . code ( ) = = error_code_please_reboot | | e . code ( ) = = error_code_please_reboot_delete ) throw ; <nl> ACTOR Future < DistributedTestResults > runWorkload ( Database cx , std : : vector < Test <nl> wait ( delay ( 3 . 0 ) ) ; <nl> } <nl> <nl> - state std : : vector < Future < ErrorOr < bool > > > checks ; <nl> + state std : : vector < Future < ErrorOr < CheckReply > > > checks ; <nl> TraceEvent ( " CheckingResults " ) ; <nl> printf ( " checking test ( % s ) . . . \ n " , printable ( spec . title ) . c_str ( ) ) ; <nl> for ( int i = 0 ; i < workloads . size ( ) ; i + + ) <nl> - checks . push_back ( workloads [ i ] . check . template getReplyUnlessFailedFor < bool > ( waitForFailureTime , 0 ) ) ; <nl> + checks . push_back ( workloads [ i ] . check . template getReplyUnlessFailedFor < CheckReply > ( waitForFailureTime , 0 ) ) ; <nl> wait ( waitForAll ( checks ) ) ; <nl> throwIfError ( checks , " CheckFailedForWorkload " + printable ( spec . title ) ) ; <nl> <nl> for ( int i = 0 ; i < checks . size ( ) ; i + + ) { <nl> - if ( checks [ i ] . get ( ) . get ( ) ) <nl> + if ( checks [ i ] . get ( ) . get ( ) . value ) <nl> success + + ; <nl> else <nl> failure + + ; <nl> mmm a / fdbserver / worker . actor . cpp <nl> ppp b / fdbserver / worker . actor . cpp <nl> void endRole ( const Role & role , UID id , std : : string reason , bool ok , Error e ) { <nl> ACTOR Future < Void > workerSnapCreate ( WorkerSnapRequest snapReq , StringRef snapFolder ) { <nl> state ExecCmdValueString snapArg ( snapReq . snapPayload ) ; <nl> try { <nl> - Standalone < StringRef > role = LiteralStringRef ( " role = " ) . withSuffix ( snapReq . role ) ; <nl> - int err = wait ( execHelper ( & snapArg , snapFolder . toString ( ) , role . toString ( ) ) ) ; <nl> + int err = wait ( execHelper ( & snapArg , snapReq . snapUID , snapFolder . toString ( ) , snapReq . role . toString ( ) ) ) ; <nl> std : : string uidStr = snapReq . snapUID . toString ( ) ; <nl> TraceEvent ( " ExecTraceWorker " ) <nl> . detail ( " Uid " , uidStr ) <nl> mmm a / fdbserver / workloads / SnapTest . actor . cpp <nl> ppp b / fdbserver / workloads / SnapTest . actor . cpp <nl> struct SnapTestWorkload : TestWorkload { <nl> wait ( status ) ; <nl> break ; <nl> } catch ( Error & e ) { <nl> - if ( e . code ( ) = = error_code_txn_exec_log_anti_quorum ) { <nl> + if ( e . code ( ) = = error_code_snap_log_anti_quorum_unsupported ) { <nl> snapFailed = true ; <nl> break ; <nl> } <nl> struct SnapTestWorkload : TestWorkload { <nl> wait ( status ) ; <nl> break ; <nl> } catch ( Error & e ) { <nl> - if ( e . code ( ) = = error_code_cluster_not_fully_recovered | | <nl> - e . code ( ) = = error_code_txn_exec_log_anti_quorum ) { <nl> + if ( e . code ( ) = = error_code_snap_not_fully_recovered_unsupported | | <nl> + e . code ( ) = = error_code_snap_log_anti_quorum_unsupported ) { <nl> snapFailed = true ; <nl> break ; <nl> } <nl> - if ( e . code ( ) = = error_code_transaction_not_permitted ) { <nl> + if ( e . code ( ) = = error_code_snap_path_not_whitelisted ) { <nl> testedFailure = true ; <nl> break ; <nl> } <nl> mmm a / flow / AsioReactor . h <nl> ppp b / flow / AsioReactor . h <nl> class ASIOReactor { <nl> public : <nl> explicit ASIOReactor ( Net2 * ) ; <nl> <nl> - void sleepAndReact ( double timeout ) ; <nl> + void sleep ( double timeout ) ; <nl> + void react ( ) ; <nl> <nl> void wake ( ) ; <nl> <nl> mmm a / flow / FileIdentifier . h <nl> ppp b / flow / FileIdentifier . h <nl> template < class T , uint32_t B > <nl> struct ComposedIdentifierExternal < T , B , true > { <nl> static constexpr FileIdentifier value = ComposedIdentifier < T , B > : : file_identifier ; <nl> } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < int > { <nl> - constexpr static FileIdentifier value = 1 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < unsigned > { <nl> - constexpr static FileIdentifier value = 2 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < long > { <nl> - constexpr static FileIdentifier value = 3 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < unsigned long > { <nl> - constexpr static FileIdentifier value = 4 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < long long > { <nl> - constexpr static FileIdentifier value = 5 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < unsigned long long > { <nl> - constexpr static FileIdentifier value = 6 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < short > { <nl> - constexpr static FileIdentifier value = 7 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < unsigned short > { <nl> - constexpr static FileIdentifier value = 8 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < signed char > { <nl> - constexpr static FileIdentifier value = 9 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < unsigned char > { <nl> - constexpr static FileIdentifier value = 10 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < bool > { <nl> - constexpr static FileIdentifier value = 11 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < float > { <nl> - constexpr static FileIdentifier value = 7266212 ; <nl> - } ; <nl> - <nl> - template < > <nl> - struct FileIdentifierFor < double > { <nl> - constexpr static FileIdentifier value = 9348150 ; <nl> - } ; <nl> mmm a / flow / Knobs . cpp <nl> ppp b / flow / Knobs . cpp <nl> FlowKnobs : : FlowKnobs ( bool randomize , bool isSimulated ) { <nl> init ( METRIC_LIMIT_RESPONSE_FACTOR , 10 ) ; / / The additional queue size at which to disable logging of another level ( higher = = less restrictive ) <nl> <nl> / / Load Balancing <nl> - init ( LOAD_BALANCE_ZONE_ID_LOCALITY_ENABLED , 1 ) ; <nl> + init ( LOAD_BALANCE_ZONE_ID_LOCALITY_ENABLED , 0 ) ; <nl> init ( LOAD_BALANCE_DC_ID_LOCALITY_ENABLED , 1 ) ; <nl> init ( LOAD_BALANCE_MAX_BACKOFF , 5 . 0 ) ; <nl> init ( LOAD_BALANCE_START_BACKOFF , 0 . 01 ) ; <nl> FlowKnobs : : FlowKnobs ( bool randomize , bool isSimulated ) { <nl> init ( FUTURE_VERSION_INITIAL_BACKOFF , 1 . 0 ) ; <nl> init ( FUTURE_VERSION_MAX_BACKOFF , 8 . 0 ) ; <nl> init ( FUTURE_VERSION_BACKOFF_GROWTH , 2 . 0 ) ; <nl> + init ( LOAD_BALANCE_MAX_BAD_OPTIONS , 1 ) ; / / should be the same as MAX_MACHINES_FALLING_BEHIND <nl> + init ( LOAD_BALANCE_PENALTY_IS_BAD , true ) ; <nl> } <nl> <nl> static std : : string toLower ( std : : string const & name ) { <nl> mmm a / flow / Knobs . h <nl> ppp b / flow / Knobs . h <nl> class FlowKnobs : public Knobs { <nl> double FUTURE_VERSION_INITIAL_BACKOFF ; <nl> double FUTURE_VERSION_MAX_BACKOFF ; <nl> double FUTURE_VERSION_BACKOFF_GROWTH ; <nl> + int LOAD_BALANCE_MAX_BAD_OPTIONS ; <nl> + bool LOAD_BALANCE_PENALTY_IS_BAD ; <nl> <nl> FlowKnobs ( bool randomize = false , bool isSimulated = false ) ; <nl> } ; <nl> mmm a / flow / Net2 . actor . cpp <nl> ppp b / flow / Net2 . actor . cpp <nl> class Net2 sealed : public INetwork , public INetworkConnections { <nl> Int64MetricHandle countASIOEvents ; <nl> Int64MetricHandle countSlowTaskSignals ; <nl> Int64MetricHandle priorityMetric ; <nl> + DoubleMetricHandle countLaunchTime ; <nl> + DoubleMetricHandle countReactTime ; <nl> BoolMetricHandle awakeMetric ; <nl> <nl> EventMetricHandle < SlowTask > slowTaskMetric ; <nl> void Net2 : : initMetrics ( ) { <nl> priorityMetric . init ( LiteralStringRef ( " Net2 . Priority " ) ) ; <nl> awakeMetric . init ( LiteralStringRef ( " Net2 . Awake " ) ) ; <nl> slowTaskMetric . init ( LiteralStringRef ( " Net2 . SlowTask " ) ) ; <nl> + countLaunchTime . init ( LiteralStringRef ( " Net2 . CountLaunchTime " ) ) ; <nl> + countReactTime . init ( LiteralStringRef ( " Net2 . CountReactTime " ) ) ; <nl> } <nl> <nl> void Net2 : : run ( ) { <nl> void Net2 : : run ( ) { <nl> taskBegin = nnow ; <nl> trackMinPriority ( TaskPriority : : RunCycleFunction , taskBegin ) ; <nl> runFunc ( ) ; <nl> - checkForSlowTask ( tsc_begin , __rdtsc ( ) , timer_monotonic ( ) - taskBegin , TaskPriority : : RunCycleFunction ) ; <nl> + double taskEnd = timer_monotonic ( ) ; <nl> + countLaunchTime + = taskEnd - taskBegin ; <nl> + checkForSlowTask ( tsc_begin , __rdtsc ( ) , taskEnd - taskBegin , TaskPriority : : RunCycleFunction ) ; <nl> } <nl> <nl> double sleepTime = 0 ; <nl> void Net2 : : run ( ) { <nl> if ( ! timers . empty ( ) ) { <nl> sleepTime = timers . top ( ) . at - sleepStart ; / / + 500e - 6 ? <nl> } <nl> - trackMinPriority ( TaskPriority : : Zero , sleepStart ) ; <nl> + if ( sleepTime > 0 ) { <nl> + trackMinPriority ( TaskPriority : : Zero , sleepStart ) ; <nl> + awakeMetric = false ; <nl> + priorityMetric = 0 ; <nl> + reactor . sleep ( sleepTime ) ; <nl> + awakeMetric = true ; <nl> + } <nl> } <nl> <nl> - awakeMetric = false ; <nl> - if ( sleepTime > 0 ) <nl> - priorityMetric = 0 ; <nl> - reactor . sleepAndReact ( sleepTime ) ; <nl> - awakeMetric = true ; <nl> - <nl> + tsc_begin = __rdtsc ( ) ; <nl> + taskBegin = timer_monotonic ( ) ; <nl> + trackMinPriority ( TaskPriority : : ASIOReactor , taskBegin ) ; <nl> + reactor . react ( ) ; <nl> + <nl> updateNow ( ) ; <nl> double now = this - > currentTime ; <nl> <nl> + countReactTime + = now - taskBegin ; <nl> + checkForSlowTask ( tsc_begin , __rdtsc ( ) , now - taskBegin , TaskPriority : : ASIOReactor ) ; <nl> + <nl> if ( ( now - nnow ) > FLOW_KNOBS - > SLOW_LOOP_CUTOFF & & nondeterministicRandom ( ) - > random01 ( ) < ( now - nnow ) * FLOW_KNOBS - > SLOW_LOOP_SAMPLING_RATE ) <nl> TraceEvent ( " SomewhatSlowRunLoopTop " ) . detail ( " Elapsed " , now - nnow ) ; <nl> <nl> ASIOReactor : : ASIOReactor ( Net2 * net ) <nl> # endif <nl> } <nl> <nl> - void ASIOReactor : : sleepAndReact ( double sleepTime ) { <nl> + void ASIOReactor : : sleep ( double sleepTime ) { <nl> if ( sleepTime > FLOW_KNOBS - > BUSY_WAIT_THRESHOLD ) { <nl> if ( FLOW_KNOBS - > REACTOR_FLAGS & 4 ) { <nl> # ifdef __linux <nl> void ASIOReactor : : sleepAndReact ( double sleepTime ) { <nl> if ( ! ( FLOW_KNOBS - > REACTOR_FLAGS & 8 ) ) <nl> threadYield ( ) ; <nl> } <nl> + } <nl> + <nl> + void ASIOReactor : : react ( ) { <nl> while ( ios . poll_one ( ) ) + + network - > countASIOEvents ; / / Make this a task ? <nl> } <nl> <nl> mmm a / flow / SystemMonitor . cpp <nl> ppp b / flow / SystemMonitor . cpp <nl> SystemStatistics customSystemMonitor ( std : : string eventName , StatisticsState * sta <nl> . detail ( " WriteProbes " , netData . countWriteProbes - statState - > networkState . countWriteProbes ) <nl> . detail ( " PacketsRead " , netData . countPacketsReceived - statState - > networkState . countPacketsReceived ) <nl> . detail ( " PacketsGenerated " , netData . countPacketsGenerated - statState - > networkState . countPacketsGenerated ) <nl> - . detail ( " WouldBlock " , netData . countWouldBlock - statState - > networkState . countWouldBlock ) ; <nl> + . detail ( " WouldBlock " , netData . countWouldBlock - statState - > networkState . countWouldBlock ) <nl> + . detail ( " LaunchTime " , netData . countLaunchTime - statState - > networkState . countLaunchTime ) <nl> + . detail ( " ReactTime " , netData . countReactTime - statState - > networkState . countReactTime ) ; <nl> <nl> for ( int i = 0 ; i < NetworkMetrics : : SLOW_EVENT_BINS ; i + + ) { <nl> if ( int c = g_network - > networkMetrics . countSlowEvents [ i ] - statState - > networkMetricsState . countSlowEvents [ i ] ) { <nl> mmm a / flow / SystemMonitor . h <nl> ppp b / flow / SystemMonitor . h <nl> struct NetworkData { <nl> int64_t countConnEstablished ; <nl> int64_t countConnClosedWithError ; <nl> int64_t countConnClosedWithoutError ; <nl> + double countLaunchTime ; <nl> + double countReactTime ; <nl> <nl> void init ( ) { <nl> - auto getValue = [ ] ( StringRef name ) - > int64_t { <nl> - Reference < Int64Metric > r = Int64Metric : : getOrCreateInstance ( name ) ; <nl> - int64_t v = 0 ; <nl> - if ( r ) <nl> - v = r - > getValue ( ) ; <nl> - return v ; <nl> - } ; <nl> - <nl> - bytesSent = getValue ( LiteralStringRef ( " Net2 . BytesSent " ) ) ; <nl> - countPacketsReceived = getValue ( LiteralStringRef ( " Net2 . CountPacketsReceived " ) ) ; <nl> - countPacketsGenerated = getValue ( LiteralStringRef ( " Net2 . CountPacketsGenerated " ) ) ; <nl> - bytesReceived = getValue ( LiteralStringRef ( " Net2 . BytesReceived " ) ) ; <nl> - countWriteProbes = getValue ( LiteralStringRef ( " Net2 . CountWriteProbes " ) ) ; <nl> - countReadProbes = getValue ( LiteralStringRef ( " Net2 . CountReadProbes " ) ) ; <nl> - countReads = getValue ( LiteralStringRef ( " Net2 . CountReads " ) ) ; <nl> - countWouldBlock = getValue ( LiteralStringRef ( " Net2 . CountWouldBlock " ) ) ; <nl> - countWrites = getValue ( LiteralStringRef ( " Net2 . CountWrites " ) ) ; <nl> - countRunLoop = getValue ( LiteralStringRef ( " Net2 . CountRunLoop " ) ) ; <nl> - countCantSleep = getValue ( LiteralStringRef ( " Net2 . CountCantSleep " ) ) ; <nl> - countWontSleep = getValue ( LiteralStringRef ( " Net2 . CountWontSleep " ) ) ; <nl> - countTimers = getValue ( LiteralStringRef ( " Net2 . CountTimers " ) ) ; <nl> - countTasks = getValue ( LiteralStringRef ( " Net2 . CountTasks " ) ) ; <nl> - countYields = getValue ( LiteralStringRef ( " Net2 . CountYields " ) ) ; <nl> - countYieldBigStack = getValue ( LiteralStringRef ( " Net2 . CountYieldBigStack " ) ) ; <nl> - countYieldCalls = getValue ( LiteralStringRef ( " Net2 . CountYieldCalls " ) ) ; <nl> - countASIOEvents = getValue ( LiteralStringRef ( " Net2 . CountASIOEvents " ) ) ; <nl> - countYieldCallsTrue = getValue ( LiteralStringRef ( " Net2 . CountYieldCallsTrue " ) ) ; <nl> - countSlowTaskSignals = getValue ( LiteralStringRef ( " Net2 . CountSlowTaskSignals " ) ) ; <nl> - countConnEstablished = getValue ( LiteralStringRef ( " Net2 . CountConnEstablished " ) ) ; <nl> - countConnClosedWithError = getValue ( LiteralStringRef ( " Net2 . CountConnClosedWithError " ) ) ; <nl> - countConnClosedWithoutError = getValue ( LiteralStringRef ( " Net2 . CountConnClosedWithoutError " ) ) ; <nl> - countFileLogicalWrites = getValue ( LiteralStringRef ( " AsyncFile . CountLogicalWrites " ) ) ; <nl> - countFileLogicalReads = getValue ( LiteralStringRef ( " AsyncFile . CountLogicalReads " ) ) ; <nl> - countAIOSubmit = getValue ( LiteralStringRef ( " AsyncFile . CountAIOSubmit " ) ) ; <nl> - countAIOCollect = getValue ( LiteralStringRef ( " AsyncFile . CountAIOCollect " ) ) ; <nl> - countFileCacheWrites = getValue ( LiteralStringRef ( " AsyncFile . CountCacheWrites " ) ) ; <nl> - countFileCacheReads = getValue ( LiteralStringRef ( " AsyncFile . CountCacheReads " ) ) ; <nl> - countFileCacheWritesBlocked = getValue ( LiteralStringRef ( " AsyncFile . CountCacheWritesBlocked " ) ) ; <nl> - countFileCacheReadsBlocked = getValue ( LiteralStringRef ( " AsyncFile . CountCacheReadsBlocked " ) ) ; <nl> - countFileCachePageReadsMerged = getValue ( LiteralStringRef ( " AsyncFile . CountCachePageReadsMerged " ) ) ; <nl> - countFileCacheFinds = getValue ( LiteralStringRef ( " AsyncFile . CountCacheFinds " ) ) ; <nl> - countFileCacheReadBytes = getValue ( LiteralStringRef ( " AsyncFile . CountCacheReadBytes " ) ) ; <nl> - countFilePageCacheHits = getValue ( LiteralStringRef ( " AsyncFile . CountCachePageReadsHit " ) ) ; <nl> - countFilePageCacheMisses = getValue ( LiteralStringRef ( " AsyncFile . CountCachePageReadsMissed " ) ) ; <nl> - countFilePageCacheEvictions = getValue ( LiteralStringRef ( " EvictablePageCache . CacheEvictions " ) ) ; <nl> + bytesSent = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . BytesSent " ) ) ; <nl> + countPacketsReceived = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountPacketsReceived " ) ) ; <nl> + countPacketsGenerated = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountPacketsGenerated " ) ) ; <nl> + bytesReceived = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . BytesReceived " ) ) ; <nl> + countWriteProbes = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountWriteProbes " ) ) ; <nl> + countReadProbes = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountReadProbes " ) ) ; <nl> + countReads = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountReads " ) ) ; <nl> + countWouldBlock = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountWouldBlock " ) ) ; <nl> + countWrites = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountWrites " ) ) ; <nl> + countRunLoop = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountRunLoop " ) ) ; <nl> + countCantSleep = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountCantSleep " ) ) ; <nl> + countWontSleep = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountWontSleep " ) ) ; <nl> + countTimers = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountTimers " ) ) ; <nl> + countTasks = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountTasks " ) ) ; <nl> + countYields = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountYields " ) ) ; <nl> + countYieldBigStack = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountYieldBigStack " ) ) ; <nl> + countYieldCalls = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountYieldCalls " ) ) ; <nl> + countASIOEvents = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountASIOEvents " ) ) ; <nl> + countYieldCallsTrue = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountYieldCallsTrue " ) ) ; <nl> + countSlowTaskSignals = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountSlowTaskSignals " ) ) ; <nl> + countConnEstablished = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountConnEstablished " ) ) ; <nl> + countConnClosedWithError = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountConnClosedWithError " ) ) ; <nl> + countConnClosedWithoutError = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountConnClosedWithoutError " ) ) ; <nl> + countLaunchTime = DoubleMetric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountLaunchTime " ) ) ; <nl> + countReactTime = DoubleMetric : : getValueOrDefault ( LiteralStringRef ( " Net2 . CountReactTime " ) ) ; <nl> + countFileLogicalWrites = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountLogicalWrites " ) ) ; <nl> + countFileLogicalReads = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountLogicalReads " ) ) ; <nl> + countAIOSubmit = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountAIOSubmit " ) ) ; <nl> + countAIOCollect = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountAIOCollect " ) ) ; <nl> + countFileCacheWrites = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCacheWrites " ) ) ; <nl> + countFileCacheReads = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCacheReads " ) ) ; <nl> + countFileCacheWritesBlocked = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCacheWritesBlocked " ) ) ; <nl> + countFileCacheReadsBlocked = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCacheReadsBlocked " ) ) ; <nl> + countFileCachePageReadsMerged = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCachePageReadsMerged " ) ) ; <nl> + countFileCacheFinds = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCacheFinds " ) ) ; <nl> + countFileCacheReadBytes = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCacheReadBytes " ) ) ; <nl> + countFilePageCacheHits = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCachePageReadsHit " ) ) ; <nl> + countFilePageCacheMisses = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " AsyncFile . CountCachePageReadsMissed " ) ) ; <nl> + countFilePageCacheEvictions = Int64Metric : : getValueOrDefault ( LiteralStringRef ( " EvictablePageCache . CacheEvictions " ) ) ; <nl> } <nl> } ; <nl> <nl> mmm a / flow / TDMetric . actor . h <nl> ppp b / flow / TDMetric . actor . h <nl> struct MetricUtil { <nl> return m ; <nl> } <nl> <nl> + static ValueType getValueOrDefault ( StringRef const & name , StringRef const & id = StringRef ( ) , ValueType defaultValue = ValueType ( ) ) { <nl> + Reference < T > r = getOrCreateInstance ( name , id ) ; <nl> + if ( r ) { <nl> + return r - > getValue ( ) ; <nl> + } <nl> + return defaultValue ; <nl> + } <nl> + <nl> / / Lookup the T metric by name and return its value ( or nullptr if it doesn ' t exist ) <nl> static T * lookupMetric ( MetricNameRef const & name ) { <nl> auto it = T : : metricMap ( ) . find ( name ) ; <nl> struct ContinuousMetric : NonCopyable , ReferenceCounted < ContinuousMetric < T > > , Met <nl> } ; <nl> <nl> typedef ContinuousMetric < int64_t > Int64Metric ; <nl> + typedef ContinuousMetric < double > DoubleMetric ; <nl> typedef Int64Metric VersionMetric ; <nl> typedef ContinuousMetric < bool > BoolMetric ; <nl> typedef ContinuousMetric < Standalone < StringRef > > StringMetric ; <nl> typedef MetricHandle < Int64Metric > Int64MetricHandle ; <nl> typedef MetricHandle < VersionMetric > VersionMetricHandle ; <nl> typedef MetricHandle < BoolMetric > BoolMetricHandle ; <nl> typedef MetricHandle < StringMetric > StringMetricHandle ; <nl> + typedef MetricHandle < DoubleMetric > DoubleMetricHandle ; <nl> <nl> template < typename E > <nl> using EventMetricHandle = MetricHandle < EventMetric < E > > ; <nl> mmm a / flow / TDMetric . cpp <nl> ppp b / flow / TDMetric . cpp <nl> <nl> <nl> const StringRef BaseEventMetric : : metricType = LiteralStringRef ( " Event " ) ; <nl> template < > const StringRef Int64Metric : : metricType = LiteralStringRef ( " Int64 " ) ; <nl> + template < > const StringRef DoubleMetric : : metricType = LiteralStringRef ( " Double " ) ; <nl> template < > const StringRef BoolMetric : : metricType = LiteralStringRef ( " Bool " ) ; <nl> template < > const StringRef StringMetric : : metricType = LiteralStringRef ( " String " ) ; <nl> <nl> mmm a / flow / error_definitions . h <nl> ppp b / flow / error_definitions . h <nl> ERROR ( lookup_failed , 1041 , " DNS lookup failed " ) <nl> ERROR ( proxy_memory_limit_exceeded , 1042 , " Proxy commit memory limit exceeded " ) <nl> ERROR ( shutdown_in_progress , 1043 , " Operation no longer supported due to shutdown " ) <nl> ERROR ( serialization_failed , 1044 , " Failed to deserialize an object " ) <nl> - ERROR ( transaction_not_permitted , 1045 , " Operation not permitted " ) <nl> - ERROR ( cluster_not_fully_recovered , 1046 , " Cluster not fully recovered " ) <nl> - ERROR ( txn_exec_log_anti_quorum , 1047 , " Execute Transaction not supported when log anti quorum is configured " ) <nl> ERROR ( connection_unreferenced , 1048 , " No peer references for connection " ) <nl> ERROR ( connection_idle , 1049 , " Connection closed after idle timeout " ) <nl> ERROR ( disk_adapter_reset , 1050 , " The disk queue adpater reset " ) <nl> ERROR ( key_not_found , 2400 , " Expected key is missing " ) <nl> ERROR ( json_malformed , 2401 , " JSON string was malformed " ) <nl> ERROR ( json_eof_expected , 2402 , " JSON string did not terminate where expected " ) <nl> <nl> + / / 2500 - disk snapshot based backup errors <nl> + ERROR ( snap_disable_tlog_pop_failed , 2500 , " Disk Snapshot error " ) <nl> + ERROR ( snap_storage_failed , 2501 , " Failed to snapshot storage nodes " ) <nl> + ERROR ( snap_tlog_failed , 2502 , " Failed to snapshot TLog nodes " ) <nl> + ERROR ( snap_coord_failed , 2503 , " Failed to snapshot coordinator nodes " ) <nl> + ERROR ( snap_enable_tlog_pop_failed , 2504 , " Disk Snapshot error " ) <nl> + ERROR ( snap_path_not_whitelisted , 2505 , " Snapshot create binary path not whitelisted " ) <nl> + ERROR ( snap_not_fully_recovered_unsupported , 2506 , " Unsupported when the cluster is not fully recovered " ) <nl> + ERROR ( snap_log_anti_quorum_unsupported , 2507 , " Unsupported when log anti quorum is configured " ) <nl> + ERROR ( snap_with_recovery_unsupported , 2508 , " Cluster recovery during snapshot operation not supported " ) <nl> + <nl> / / 4xxx Internal errors ( those that should be generated only by bugs ) are decimal 4xxx <nl> ERROR ( unknown_error , 4000 , " An unknown error occurred " ) / / C + + exception not of type Error <nl> ERROR ( internal_error , 4100 , " An internal error occurred " ) <nl> mmm a / flow / flat_buffers . h <nl> ppp b / flow / flat_buffers . h <nl> inline FileIdentifier read_file_identifier ( const uint8_t * in ) { <nl> return result ; <nl> } <nl> <nl> + namespace detail { <nl> + template < class T > <nl> + struct YesFileIdentifier { <nl> + constexpr static FileIdentifier file_identifier = FileIdentifierFor < T > : : value ; <nl> + } ; <nl> + struct NoFileIdentifier { } ; <nl> + } ; / / namespace detail <nl> + <nl> / / members of unions must be tables in flatbuffers , so you can use this to <nl> / / introduce the indirection only when necessary . <nl> template < class T > <nl> - struct EnsureTable { <nl> - static_assert ( HasFileIdentifier < T > : : value ) ; <nl> - constexpr static FileIdentifier file_identifier = FileIdentifierFor < T > : : value ; <nl> + struct EnsureTable <nl> + : std : : conditional_t < HasFileIdentifier < T > : : value , detail : : YesFileIdentifier < T > , detail : : NoFileIdentifier > { <nl> EnsureTable ( ) = default ; <nl> EnsureTable ( const T & t ) : t ( t ) { } <nl> template < class Archive > <nl> mmm a / flow / flow . cpp <nl> ppp b / flow / flow . cpp <nl> void enableBuggify ( bool enabled , BuggifyType type ) { <nl> buggifyActivated [ int ( type ) ] = enabled ; <nl> } <nl> <nl> + namespace { <nl> + / / Simple message for flatbuffers unittests <nl> + struct Int { <nl> + constexpr static FileIdentifier file_identifier = 12345 ; <nl> + uint32_t value ; <nl> + Int ( ) = default ; <nl> + Int ( uint32_t value ) : value ( value ) { } <nl> + template < class Ar > <nl> + void serialize ( Ar & ar ) { <nl> + serializer ( ar , value ) ; <nl> + } <nl> + } ; <nl> + } / / namespace <nl> + <nl> TEST_CASE ( " / flow / FlatBuffers / ErrorOr " ) { <nl> { <nl> - ErrorOr < int > in ( worker_removed ( ) ) ; <nl> - ErrorOr < int > out ; <nl> + ErrorOr < Int > in ( worker_removed ( ) ) ; <nl> + ErrorOr < Int > out ; <nl> ObjectWriter writer ( Unversioned ( ) ) ; <nl> writer . serialize ( in ) ; <nl> Standalone < StringRef > copy = writer . toStringRef ( ) ; <nl> TEST_CASE ( " / flow / FlatBuffers / ErrorOr " ) { <nl> ASSERT ( out . getError ( ) . code ( ) = = in . getError ( ) . code ( ) ) ; <nl> } <nl> { <nl> - ErrorOr < uint32_t > in ( deterministicRandom ( ) - > randomUInt32 ( ) ) ; <nl> - ErrorOr < uint32_t > out ; <nl> + ErrorOr < Int > in ( deterministicRandom ( ) - > randomUInt32 ( ) ) ; <nl> + ErrorOr < Int > out ; <nl> ObjectWriter writer ( Unversioned ( ) ) ; <nl> writer . serialize ( in ) ; <nl> Standalone < StringRef > copy = writer . toStringRef ( ) ; <nl> ArenaObjectReader reader ( copy . arena ( ) , copy , Unversioned ( ) ) ; <nl> reader . deserialize ( out ) ; <nl> ASSERT ( ! out . isError ( ) ) ; <nl> - ASSERT ( out . get ( ) = = in . get ( ) ) ; <nl> + ASSERT ( out . get ( ) . value = = in . get ( ) . value ) ; <nl> } <nl> return Void ( ) ; <nl> } <nl> <nl> TEST_CASE ( " / flow / FlatBuffers / Optional " ) { <nl> { <nl> - Optional < int > in ; <nl> - Optional < int > out ; <nl> + Optional < Int > in ; <nl> + Optional < Int > out ; <nl> ObjectWriter writer ( Unversioned ( ) ) ; <nl> writer . serialize ( in ) ; <nl> Standalone < StringRef > copy = writer . toStringRef ( ) ; <nl> TEST_CASE ( " / flow / FlatBuffers / Optional " ) { <nl> ASSERT ( ! out . present ( ) ) ; <nl> } <nl> { <nl> - Optional < uint32_t > in ( deterministicRandom ( ) - > randomUInt32 ( ) ) ; <nl> - Optional < uint32_t > out ; <nl> + Optional < Int > in ( deterministicRandom ( ) - > randomUInt32 ( ) ) ; <nl> + Optional < Int > out ; <nl> ObjectWriter writer ( Unversioned ( ) ) ; <nl> writer . serialize ( in ) ; <nl> Standalone < StringRef > copy = writer . toStringRef ( ) ; <nl> ArenaObjectReader reader ( copy . arena ( ) , copy , Unversioned ( ) ) ; <nl> reader . deserialize ( out ) ; <nl> ASSERT ( out . present ( ) ) ; <nl> - ASSERT ( out . get ( ) = = in . get ( ) ) ; <nl> + ASSERT ( out . get ( ) . value = = in . get ( ) . value ) ; <nl> } <nl> return Void ( ) ; <nl> } <nl> mmm a / flow / network . h <nl> ppp b / flow / network . h <nl> <nl> <nl> enum class TaskPriority { <nl> Max = 1000000 , <nl> + ASIOReactor = 20001 , <nl> RunCycleFunction = 20000 , <nl> FlushTrace = 10500 , <nl> WriteSocket = 10000 , <nl> mmm a / packaging / msi / FDBInstaller . wxs <nl> ppp b / packaging / msi / FDBInstaller . wxs <nl> <nl> <nl> < Wix xmlns = ' http : / / schemas . microsoft . com / wix / 2006 / wi ' > <nl> < Product Name = ' $ ( var . Title ) ' <nl> - Id = ' { E2FB8839 - 9C35 - 4E40 - AFB1 - 7409961781F7 } ' <nl> + Id = ' { 7AD1AE5E - FD5B - 42F3 - A638 - A81A963B1CE4 } ' <nl> UpgradeCode = ' { A95EA002 - 686E - 4164 - 8356 - C715B7F8B1C8 } ' <nl> Version = ' $ ( var . Version ) ' <nl> Manufacturer = ' $ ( var . Manufacturer ) ' <nl>
|
Merge pull request from etschannen / master
|
apple/foundationdb
|
8f912ca4a609275c135fb4c0dbd135e9de5a05f5
|
2019-09-03T18:35:14Z
|
mmm a / atom / browser / api / atom_api_web_view_manager . cc <nl> ppp b / atom / browser / api / atom_api_web_view_manager . cc <nl> <nl> / / Use of this source code is governed by the MIT license that can be <nl> / / found in the LICENSE file . <nl> <nl> - # include " atom / browser / api / atom_api_web_contents . h " <nl> # include " atom / browser / web_contents_preferences . h " <nl> # include " atom / browser / web_view_manager . h " <nl> + # include " atom / common / native_mate_converters / content_converter . h " <nl> # include " atom / common / native_mate_converters / value_converter . h " <nl> # include " atom / common / node_includes . h " <nl> # include " content / public / browser / browser_context . h " <nl> <nl> <nl> using atom : : WebContentsPreferences ; <nl> <nl> - namespace mate { <nl> - <nl> - template < > <nl> - struct Converter < content : : WebContents * > { <nl> - static bool FromV8 ( v8 : : Isolate * isolate , v8 : : Local < v8 : : Value > val , <nl> - content : : WebContents * * out ) { <nl> - atom : : api : : WebContents * contents ; <nl> - if ( ! Converter < atom : : api : : WebContents * > : : FromV8 ( isolate , val , & contents ) ) <nl> - return false ; <nl> - * out = contents - > web_contents ( ) ; <nl> - return true ; <nl> - } <nl> - } ; <nl> - <nl> - } / / namespace mate <nl> - <nl> namespace { <nl> <nl> atom : : WebViewManager * GetWebViewManager ( content : : WebContents * web_contents ) { <nl>
|
Remove duplicated converter for content : : WebContents
|
electron/electron
|
4f21a50d235db3b2cc32b4135a0bc20f20951b83
|
2016-04-26T07:31:56Z
|
mmm a / modules / planning / conf / scenario / pull_over_config . pb . txt <nl> ppp b / modules / planning / conf / scenario / pull_over_config . pb . txt <nl> stage_config : { <nl> task_config : { <nl> task_type : OPEN_SPACE_PRE_STOP_DECIDER <nl> open_space_pre_stop_decider_config { <nl> - stop_type : PARKING <nl> + stop_type : PULL_OVER <nl> + rightaway_stop_distance : 1 . 0 <nl> } <nl> } <nl> task_config : { <nl> mmm a / modules / planning / tasks / deciders / open_space_decider / open_space_pre_stop_decider . cc <nl> ppp b / modules / planning / tasks / deciders / open_space_decider / open_space_pre_stop_decider . cc <nl> Status OpenSpacePreStopDecider : : Process ( <nl> AERROR < < msg ; <nl> return Status ( ErrorCode : : PLANNING_ERROR , msg ) ; <nl> } <nl> + SetParkingSpotStopFence ( target_s , frame , reference_line_info ) ; <nl> break ; <nl> case OpenSpacePreStopDeciderConfig : : PULL_OVER : <nl> if ( ! CheckPullOverPreStop ( frame , reference_line_info , & target_s ) ) { <nl> Status OpenSpacePreStopDecider : : Process ( <nl> AERROR < < msg ; <nl> return Status ( ErrorCode : : PLANNING_ERROR , msg ) ; <nl> } <nl> + SetPullOverStopFence ( target_s , frame , reference_line_info ) ; <nl> break ; <nl> default : <nl> const std : : string msg = " This stop type not implemented " ; <nl> AERROR < < msg ; <nl> return Status ( ErrorCode : : PLANNING_ERROR , msg ) ; <nl> } <nl> - SetStopFence ( target_s , frame , reference_line_info ) ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> bool OpenSpacePreStopDecider : : CheckParkingSpotPreStop ( <nl> return true ; <nl> } <nl> <nl> - void OpenSpacePreStopDecider : : SetStopFence ( <nl> + void OpenSpacePreStopDecider : : SetParkingSpotStopFence ( <nl> const double target_s , Frame * const frame , <nl> ReferenceLineInfo * const reference_line_info ) { <nl> const auto & nearby_path = reference_line_info - > reference_line ( ) . map_path ( ) ; <nl> void OpenSpacePreStopDecider : : SetStopFence ( <nl> reference_line_info ) ; <nl> } <nl> <nl> + void OpenSpacePreStopDecider : : SetPullOverStopFence ( <nl> + const double target_s , Frame * const frame , <nl> + ReferenceLineInfo * const reference_line_info ) { <nl> + const auto & nearby_path = reference_line_info - > reference_line ( ) . map_path ( ) ; <nl> + const double adc_front_edge_s = reference_line_info - > AdcSlBoundary ( ) . end_s ( ) ; <nl> + const VehicleState & vehicle_state = frame - > vehicle_state ( ) ; <nl> + double stop_line_s = 0 . 0 ; <nl> + double stop_distance_to_target = <nl> + open_space_pre_stop_decider_config_ . stop_distance_to_target ( ) ; <nl> + double static_linear_velocity_epsilon = 1 . 0e - 2 ; <nl> + CHECK_GE ( stop_distance_to_target , 1 . 0e - 8 ) ; <nl> + double target_vehicle_offset = target_s - adc_front_edge_s ; <nl> + if ( target_vehicle_offset > stop_distance_to_target ) { <nl> + stop_line_s = target_s - stop_distance_to_target ; <nl> + } else { <nl> + if ( ! frame - > open_space_info ( ) . pre_stop_rightaway_flag ( ) ) { <nl> + / / TODO ( Jinyun ) Use constant comfortable deacceleration rather than <nl> + / / distance by config to set stop fence <nl> + stop_line_s = <nl> + adc_front_edge_s + <nl> + open_space_pre_stop_decider_config_ . rightaway_stop_distance ( ) ; <nl> + if ( std : : abs ( vehicle_state . linear_velocity ( ) ) < <nl> + static_linear_velocity_epsilon ) { <nl> + stop_line_s = adc_front_edge_s ; <nl> + } <nl> + * ( frame - > mutable_open_space_info ( ) - > mutable_pre_stop_rightaway_point ( ) ) = <nl> + nearby_path . GetSmoothPoint ( stop_line_s ) ; <nl> + frame - > mutable_open_space_info ( ) - > set_pre_stop_rightaway_flag ( true ) ; <nl> + } else { <nl> + double stop_point_s = 0 . 0 ; <nl> + double stop_point_l = 0 . 0 ; <nl> + nearby_path . GetNearestPoint ( <nl> + frame - > open_space_info ( ) . pre_stop_rightaway_point ( ) , & stop_point_s , <nl> + & stop_point_l ) ; <nl> + stop_line_s = stop_point_s ; <nl> + } <nl> + } <nl> + <nl> + const std : : string stop_wall_id = OPEN_SPACE_STOP_ID ; <nl> + std : : vector < std : : string > wait_for_obstacles ; <nl> + frame - > mutable_open_space_info ( ) - > set_open_space_pre_stop_fence_s ( <nl> + stop_line_s ) ; <nl> + util : : BuildStopDecision ( stop_wall_id , stop_line_s , 0 . 0 , <nl> + StopReasonCode : : STOP_REASON_PRE_OPEN_SPACE_STOP , <nl> + wait_for_obstacles , " OpenSpacePreStopDecider " , frame , <nl> + reference_line_info ) ; <nl> + } <nl> } / / namespace planning <nl> } / / namespace apollo <nl> mmm a / modules / planning / tasks / deciders / open_space_decider / open_space_pre_stop_decider . h <nl> ppp b / modules / planning / tasks / deciders / open_space_decider / open_space_pre_stop_decider . h <nl> class OpenSpacePreStopDecider : public Decider { <nl> ReferenceLineInfo * const reference_line_info , <nl> double * target_s ) ; <nl> <nl> - void SetStopFence ( const double target_s , Frame * const frame , <nl> + void SetParkingSpotStopFence ( const double target_s , Frame * const frame , <nl> + ReferenceLineInfo * const reference_line_info ) ; <nl> + <nl> + void SetPullOverStopFence ( const double target_s , Frame * const frame , <nl> ReferenceLineInfo * const reference_line_info ) ; <nl> <nl> private : <nl>
|
Planning : fix open space pre stop in pull over
|
ApolloAuto/apollo
|
80955469a268beecc3b4e1f42f3dbcb8ca862475
|
2019-06-07T19:30:33Z
|
mmm a / src / mongo / db / storage / mmap_v1 / repair_database . cpp <nl> ppp b / src / mongo / db / storage / mmap_v1 / repair_database . cpp <nl> <nl> # include " mongo / util / file_allocator . h " <nl> # include " mongo / util / log . h " <nl> # include " mongo / util / mmap . h " <nl> + # include " mongo / util / scopeguard . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> <nl> scoped_ptr < MMAPV1DatabaseCatalogEntry > dbEntry ; <nl> scoped_ptr < Database > tempDatabase ; <nl> + <nl> + / / Must syncDataAndTruncateJournal before closing files as done by <nl> + / / MMAPV1DatabaseCatalogEntry ' s destructor . <nl> + ON_BLOCK_EXIT ( & RecoveryUnit : : syncDataAndTruncateJournal , txn - > recoveryUnit ( ) ) ; <nl> + <nl> { <nl> WriteUnitOfWork wunit ( txn ) ; <nl> dbEntry . reset ( new MMAPV1DatabaseCatalogEntry ( txn , <nl>
|
Fix repair5 . js failures when killOp comes after first file is allocated
|
mongodb/mongo
|
90d025ab9387d08cbb6b2bf369358653ae8977be
|
2014-08-26T18:01:51Z
|
mmm a / osquery / tables / system / darwin / usb_devices . cpp <nl> ppp b / osquery / tables / system / darwin / usb_devices . cpp <nl> <nl> * <nl> * / <nl> <nl> + # include < iomanip > <nl> + # include < sstream > <nl> + <nl> # include < IOKit / IOKitLib . h > <nl> # include < IOKit / usb / IOUSBLib . h > <nl> <nl> std : : string getUSBProperty ( const CFMutableDictionaryRef & details , <nl> return " " ; <nl> } <nl> <nl> + inline void idToHex ( std : : string & id ) { <nl> + int base = AS_LITERAL ( int , id ) ; <nl> + std : : stringstream hex_id ; <nl> + hex_id < < std : : hex < < std : : setw ( 4 ) < < std : : setfill ( ' 0 ' ) < < ( base & 0xFFFF ) ; <nl> + id = hex_id . str ( ) ; <nl> + } <nl> + <nl> void genUSBDevice ( const io_service_t & device , QueryData & results ) { <nl> Row r ; <nl> <nl> void genUSBDevice ( const io_service_t & device , QueryData & results ) { <nl> r [ " usb_port " ] = getUSBProperty ( details , " PortNum " ) ; <nl> <nl> r [ " model " ] = getUSBProperty ( details , " USB Product Name " ) ; <nl> + if ( r . at ( " model " ) . size ( ) = = 0 ) { <nl> + / / Could not find the model name from IOKit , use the label . <nl> + io_name_t name ; <nl> + if ( IORegistryEntryGetName ( device , name ) = = KERN_SUCCESS ) { <nl> + r [ " model " ] = std : : string ( name ) ; <nl> + } <nl> + } <nl> + <nl> r [ " model_id " ] = getUSBProperty ( details , " idProduct " ) ; <nl> r [ " vendor " ] = getUSBProperty ( details , " USB Vendor Name " ) ; <nl> r [ " vendor_id " ] = getUSBProperty ( details , " idVendor " ) ; <nl> - r [ " serial " ] = getUSBProperty ( details , " iSerialNumber " ) ; <nl> + <nl> + r [ " serial " ] = getUSBProperty ( details , " USB Serial Number " ) ; <nl> + if ( r . at ( " serial " ) . size ( ) = = 0 ) { <nl> + r [ " serial " ] = getUSBProperty ( details , " iSerialNumber " ) ; <nl> + } <nl> <nl> auto non_removable = getUSBProperty ( details , " non - removable " ) ; <nl> r [ " removable " ] = ( non_removable = = " yes " ) ? " 0 " : " 1 " ; <nl> <nl> - results . push_back ( r ) ; <nl> + if ( r . at ( " vendor_id " ) . size ( ) > 0 & & r . at ( " model_id " ) . size ( ) > 0 ) { <nl> + / / Only add the USB device on OS X if it contains a Vendor and Model ID . <nl> + / / On OS X 10 . 11 the simulation hubs are PCI devices within IOKit and <nl> + / / lack the useful USB metadata . <nl> + idToHex ( r [ " vendor_id " ] ) ; <nl> + idToHex ( r [ " model_id " ] ) ; <nl> + results . push_back ( r ) ; <nl> + } <nl> CFRelease ( details ) ; <nl> } <nl> <nl>
|
[ Fix ] Improve OS X USB device reporting
|
osquery/osquery
|
014e504fba90468a3758a4baeb9c54e8b4395b9c
|
2015-08-27T23:36:54Z
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.