diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / buildscripts / resmokeconfig / suites / sharding_continuous_config_stepdown . yml <nl> ppp b / buildscripts / resmokeconfig / suites / sharding_continuous_config_stepdown . yml <nl> selector : <nl> # TODO ( SERVER - 42143 ) : The tests below need transactional support for refineCollectionShardKey . <nl> - jstests / sharding / refine_collection_shard_key_basic . js <nl> - jstests / sharding / refine_collection_shard_key_jumbo . js <nl> + - jstests / sharding / refine_collection_shard_key_drops_chunks . js <nl> <nl> executor : <nl> config : <nl> mmm a / buildscripts / resmokeconfig / suites / sharding_last_stable_mongos_and_mixed_shards . yml <nl> ppp b / buildscripts / resmokeconfig / suites / sharding_last_stable_mongos_and_mixed_shards . yml <nl> selector : <nl> - jstests / sharding / explain_exec_stats_on_shards . js <nl> - jstests / sharding / refine_collection_shard_key_basic . js <nl> - jstests / sharding / refine_collection_shard_key_jumbo . js <nl> + - jstests / sharding / refine_collection_shard_key_drops_chunks . js <nl> - jstests / sharding / move_primary_clone_test . js <nl> - jstests / sharding / database_versioning_safe_secondary_reads . js <nl> - jstests / sharding / clone_catalog_data . js <nl> new file mode 100644 <nl> index 000000000000 . . 41d8fd1c27d1 <nl> mmm / dev / null <nl> ppp b / jstests / sharding / refine_collection_shard_key_drops_chunks . js <nl> <nl> + / / <nl> + / / Tests that refineCollectionShardKey deletes all existing chunks in the persisted routing table <nl> + / / cache . <nl> + / / <nl> + <nl> + ( function ( ) { <nl> + ' use strict ' ; <nl> + load ( ' jstests / sharding / libs / sharded_transactions_helpers . js ' ) ; <nl> + <nl> + const st = new ShardingTest ( { shards : 1 } ) ; <nl> + const mongos = st . s0 ; <nl> + const shard = st . shard0 ; <nl> + const kDbName = ' db ' ; <nl> + const kCollName = ' foo ' ; <nl> + const kNsName = kDbName + ' . ' + kCollName ; <nl> + const kConfigCacheChunks = ' config . cache . chunks . ' + kNsName ; <nl> + const oldKeyDoc = { <nl> + a : 1 , <nl> + b : 1 <nl> + } ; <nl> + const newKeyDoc = { <nl> + a : 1 , <nl> + b : 1 , <nl> + c : 1 , <nl> + d : 1 <nl> + } ; <nl> + <nl> + assert . commandWorked ( mongos . adminCommand ( { enableSharding : kDbName } ) ) ; <nl> + assert . commandWorked ( mongos . adminCommand ( { shardCollection : kNsName , key : oldKeyDoc } ) ) ; <nl> + assert . commandWorked ( mongos . getCollection ( kNsName ) . createIndex ( newKeyDoc ) ) ; <nl> + <nl> + / / Ensure that there exist three chunks belonging to ' db . foo ' covering the entire key range . <nl> + / / <nl> + / / Chunk 1 : { a : MinKey , b : MinKey } - - > > { a : 0 , b : 0 } <nl> + / / Chunk 2 : { a : 0 , b : 0 } - - > > { a : 5 , b : 5 } <nl> + / / Chunk 3 : { a : 5 , b : 5 } - - > > { a : MaxKey , b : MaxKey } <nl> + assert . commandWorked ( mongos . adminCommand ( { split : kNsName , middle : { a : 0 , b : 0 } } ) ) ; <nl> + assert . commandWorked ( mongos . adminCommand ( { split : kNsName , middle : { a : 5 , b : 5 } } ) ) ; <nl> + <nl> + / / Flush the routing table cache and verify that ' config . cache . chunks . db . foo ' is as expected <nl> + / / before refineCollectionShardKey . <nl> + assert . commandWorked ( shard . adminCommand ( { _flushRoutingTableCacheUpdates : kNsName } ) ) ; <nl> + let chunkArr = shard . getCollection ( kConfigCacheChunks ) . find ( { } ) . sort ( { min : 1 } ) . toArray ( ) ; <nl> + assert . eq ( 3 , chunkArr . length ) ; <nl> + assert . eq ( { a : MinKey , b : MinKey } , chunkArr [ 0 ] . _id ) ; <nl> + assert . eq ( { a : 0 , b : 0 } , chunkArr [ 0 ] . max ) ; <nl> + assert . eq ( { a : 0 , b : 0 } , chunkArr [ 1 ] . _id ) ; <nl> + assert . eq ( { a : 5 , b : 5 } , chunkArr [ 1 ] . max ) ; <nl> + assert . eq ( { a : 5 , b : 5 } , chunkArr [ 2 ] . _id ) ; <nl> + assert . eq ( { a : MaxKey , b : MaxKey } , chunkArr [ 2 ] . max ) ; <nl> + <nl> + assert . commandWorked ( mongos . adminCommand ( { refineCollectionShardKey : kNsName , key : newKeyDoc } ) ) ; <nl> + <nl> + / / Enable failpoint ' hangPersistCollectionAndChangedChunksAfterDropChunks ' and flush the routing <nl> + / / table cache . <nl> + assert . commandWorked ( shard . adminCommand ( { <nl> + configureFailPoint : ' hangPersistCollectionAndChangedChunksAfterDropChunks ' , <nl> + mode : ' alwaysOn ' <nl> + } ) ) ; <nl> + const awaitShellToFlushRoutingTableCacheUpdates = startParallelShell ( ( ) = > { <nl> + assert . commandWorked ( db . adminCommand ( { _flushRoutingTableCacheUpdates : ' db . foo ' } ) ) ; <nl> + } , st . rs0 . getPrimary ( ) . port ) ; <nl> + <nl> + / / Verify that all chunks belonging to ' db . foo ' have been deleted . <nl> + waitForFailpoint ( ' Hit hangPersistCollectionAndChangedChunksAfterDropChunks ' , 1 ) ; <nl> + chunkArr = shard . getCollection ( kConfigCacheChunks ) . find ( { } ) . sort ( { min : 1 } ) . toArray ( ) ; <nl> + assert . eq ( 0 , chunkArr . length ) ; <nl> + <nl> + / / Disable failpoint ' hangPersistCollectionAndChangedChunksAfterDropChunks ' and continue <nl> + / / flushing the routing table cache . <nl> + assert . commandWorked ( shard . adminCommand ( <nl> + { configureFailPoint : ' hangPersistCollectionAndChangedChunksAfterDropChunks ' , mode : ' off ' } ) ) ; <nl> + awaitShellToFlushRoutingTableCacheUpdates ( ) ; <nl> + <nl> + / / Verify that ' config . cache . chunks . db . foo ' is as expected after refineCollectionShardKey . <nl> + chunkArr = shard . getCollection ( kConfigCacheChunks ) . find ( { } ) . sort ( { min : 1 } ) . toArray ( ) ; <nl> + assert . eq ( 3 , chunkArr . length ) ; <nl> + assert . eq ( { a : MinKey , b : MinKey , c : MinKey , d : MinKey } , chunkArr [ 0 ] . _id ) ; <nl> + assert . eq ( { a : 0 , b : 0 , c : MinKey , d : MinKey } , chunkArr [ 0 ] . max ) ; <nl> + assert . eq ( { a : 0 , b : 0 , c : MinKey , d : MinKey } , chunkArr [ 1 ] . _id ) ; <nl> + assert . eq ( { a : 5 , b : 5 , c : MinKey , d : MinKey } , chunkArr [ 1 ] . max ) ; <nl> + assert . eq ( { a : 5 , b : 5 , c : MinKey , d : MinKey } , chunkArr [ 2 ] . _id ) ; <nl> + assert . eq ( { a : MaxKey , b : MaxKey , c : MaxKey , d : MaxKey } , chunkArr [ 2 ] . max ) ; <nl> + <nl> + st . stop ( ) ; <nl> + } ) ( ) ; <nl> \ No newline at end of file <nl> mmm a / src / mongo / db / s / shard_metadata_util . cpp <nl> ppp b / src / mongo / db / s / shard_metadata_util . cpp <nl> Status dropChunksAndDeleteCollectionsEntry ( OperationContext * opCtx , const Namesp <nl> } <nl> } <nl> <nl> + void dropChunks ( OperationContext * opCtx , const NamespaceString & nss ) { <nl> + DBDirectClient client ( opCtx ) ; <nl> + <nl> + / / Drop the config . chunks collection associated with namespace ' nss ' . <nl> + BSONObj result ; <nl> + if ( ! client . dropCollection ( ChunkType : : ShardNSPrefix + nss . ns ( ) , kLocalWriteConcern , & result ) ) { <nl> + auto status = getStatusFromCommandResult ( result ) ; <nl> + if ( status ! = ErrorCodes : : NamespaceNotFound ) { <nl> + uassertStatusOK ( status ) ; <nl> + } <nl> + } <nl> + <nl> + LOG ( 1 ) < < " Successfully cleared persisted chunk metadata for collection ' " < < nss < < " ' . " ; <nl> + } <nl> + <nl> Status deleteDatabasesEntry ( OperationContext * opCtx , StringData dbName ) { <nl> try { <nl> DBDirectClient client ( opCtx ) ; <nl> mmm a / src / mongo / db / s / shard_metadata_util . h <nl> ppp b / src / mongo / db / s / shard_metadata_util . h <nl> Status updateShardChunks ( OperationContext * opCtx , <nl> * / <nl> Status dropChunksAndDeleteCollectionsEntry ( OperationContext * opCtx , const NamespaceString & nss ) ; <nl> <nl> + / * * <nl> + * Drops locally persisted chunk metadata associated with ' nss ' : only drops the chunks collection . <nl> + * / <nl> + void dropChunks ( OperationContext * opCtx , const NamespaceString & nss ) ; <nl> + <nl> / * * <nl> * Deletes locally persisted database metadata associated with ' dbName ' : removes the databases <nl> * collection entry . <nl> mmm a / src / mongo / db / s / shard_server_catalog_cache_loader . cpp <nl> ppp b / src / mongo / db / s / shard_server_catalog_cache_loader . cpp <nl> <nl> # include " mongo / s / catalog / type_shard_database . h " <nl> # include " mongo / s / client / shard_registry . h " <nl> # include " mongo / s / grid . h " <nl> + # include " mongo / util / fail_point_service . h " <nl> # include " mongo / util / log . h " <nl> <nl> namespace mongo { <nl> using CollectionAndChangedChunks = CatalogCacheLoader : : CollectionAndChangedChunk <nl> <nl> namespace { <nl> <nl> + MONGO_FAIL_POINT_DEFINE ( hangPersistCollectionAndChangedChunksAfterDropChunks ) ; <nl> + <nl> AtomicWord < unsigned long long > taskIdGenerator { 0 } ; <nl> <nl> / * * <nl> ThreadPool : : Options makeDefaultThreadPoolOptions ( ) { <nl> return options ; <nl> } <nl> <nl> + void dropChunksIfEpochChanged ( OperationContext * opCtx , <nl> + const NamespaceString & nss , <nl> + const CollectionAndChangedChunks & collAndChunks , <nl> + const ChunkVersion & maxLoaderVersion ) { <nl> + if ( collAndChunks . epoch ! = maxLoaderVersion . epoch ( ) & & <nl> + maxLoaderVersion ! = ChunkVersion : : UNSHARDED ( ) ) { <nl> + / / If the collection has a new epoch , delete all existing chunks in the persisted routing <nl> + / / table cache . <nl> + dropChunks ( opCtx , nss ) ; <nl> + <nl> + if ( MONGO_FAIL_POINT ( hangPersistCollectionAndChangedChunksAfterDropChunks ) ) { <nl> + log ( ) < < " Hit hangPersistCollectionAndChangedChunksAfterDropChunks failpoint " ; <nl> + MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED ( <nl> + opCtx , hangPersistCollectionAndChangedChunksAfterDropChunks ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> / * * <nl> * Takes a CollectionAndChangedChunks object and persists the changes to the shard ' s metadata <nl> * collections . <nl> ThreadPool : : Options makeDefaultThreadPoolOptions ( ) { <nl> * / <nl> Status persistCollectionAndChangedChunks ( OperationContext * opCtx , <nl> const NamespaceString & nss , <nl> - const CollectionAndChangedChunks & collAndChunks ) { <nl> + const CollectionAndChangedChunks & collAndChunks , <nl> + const ChunkVersion & maxLoaderVersion ) { <nl> / / Update the collections collection entry for ' nss ' in case there are any new updates . <nl> ShardCollectionType update = ShardCollectionType ( <nl> nss , collAndChunks . epoch , collAndChunks . shardKeyPattern , collAndChunks . shardKeyIsUnique ) ; <nl> Status persistCollectionAndChangedChunks ( OperationContext * opCtx , <nl> } <nl> <nl> / / Update the chunks . <nl> + try { <nl> + dropChunksIfEpochChanged ( opCtx , nss , collAndChunks , maxLoaderVersion ) ; <nl> + } catch ( const DBException & ex ) { <nl> + return ex . toStatus ( ) ; <nl> + } <nl> + <nl> status = updateShardChunks ( opCtx , nss , collAndChunks . changedChunks , collAndChunks . epoch ) ; <nl> if ( ! status . isOK ( ) ) { <nl> return status ; <nl> void ShardServerCatalogCacheLoader : : _updatePersistedCollAndChunksMetadata ( <nl> } <nl> <nl> uassertStatusOKWithContext ( <nl> - persistCollectionAndChangedChunks ( opCtx , nss , * task . collectionAndChangedChunks ) , <nl> + persistCollectionAndChangedChunks ( <nl> + opCtx , nss , * task . collectionAndChangedChunks , task . minQueryVersion ) , <nl> str : : stream ( ) < < " Failed to update the persisted chunk metadata for collection ' " <nl> < < nss . ns ( ) < < " ' from ' " < < task . minQueryVersion . toString ( ) < < " ' to ' " <nl> < < task . maxQueryVersion . toString ( ) < < " ' . Will be retried . " ) ; <nl> | SERVER - 42152 Delete existing chunks on new epoch in persisted routing table cache | mongodb/mongo | 5cbe01663884b321c45145c29bb9d9668125392e | 2019-08-01T15:09:29Z |
mmm a / tests / test_browser . py <nl> ppp b / tests / test_browser . py <nl> <nl> - import BaseHTTPServer , multiprocessing , os , shutil , subprocess , unittest , zlib <nl> + import BaseHTTPServer , multiprocessing , os , shutil , subprocess , unittest , zlib , webbrowser , time , shlex <nl> from runner import BrowserCore , path_from_root <nl> from tools . shared import * <nl> <nl> - ' ' ' Enable this code to run in another browser than webbrowser detects as default <nl> - def run_in_other_browser ( url ) : <nl> - execute ( [ ' yourbrowser ' , url ] ) <nl> - webbrowser . open_new = run_in_other_browser <nl> - ' ' ' <nl> + # User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to <nl> + # run using another browser command line than the default system browser . <nl> + emscripten_browser = os . environ . get ( ' EMSCRIPTEN_BROWSER ' ) <nl> + if emscripten_browser : <nl> + cmd = shlex . split ( emscripten_browser ) <nl> + def run_in_other_browser ( url ) : <nl> + Popen ( cmd + [ url ] ) <nl> + webbrowser . open_new = run_in_other_browser <nl> <nl> def test_chunked_synchronous_xhr_server ( support_byte_ranges , chunkSize , data , checksum ) : <nl> class ChunkedServerHandler ( BaseHTTPServer . BaseHTTPRequestHandler ) : <nl> def test_html_source_map ( self ) : <nl> cwd = self . get_dir ( ) ) . communicate ( ) <nl> assert os . path . exists ( html_file ) <nl> assert os . path . exists ( html_file + ' . map ' ) <nl> - import webbrowser , time <nl> webbrowser . open_new ( ' file : / / ' + html_file ) <nl> time . sleep ( 1 ) <nl> print ' ' ' <nl> | Merge pull request from juj / custom_harness_browser | emscripten-core/emscripten | b4c532b5443a34cafefd5c4e09019f861cbed06b | 2013-11-18T19:04:57Z |
mmm a / vsprojects / third_party / openssl / OpenSSL . mak <nl> ppp b / vsprojects / third_party / openssl / OpenSSL . mak <nl> EX_LIBS = ws2_32 . lib gdi32 . lib advapi32 . lib crypt32 . lib user32 . lib <nl> <nl> # The OpenSSL directory <nl> SRC_D = . <nl> - GEN_INC_D = . . \ . . \ vsprojects \ openssl <nl> + GEN_INC_D = . . \ . . \ vsprojects \ third_party \ openssl <nl> <nl> LINK = link <nl> LFLAGS = / nologo / subsystem : console / opt : ref / debug <nl> $ ( INC_D ) : <nl> # This needs to be invoked once , when the makefile is first constructed , or <nl> # after cleaning . <nl> init : $ ( TMP_D ) $ ( LIB_D ) $ ( INC_D ) $ ( INCO_D ) $ ( BIN_D ) $ ( TEST_D ) headers <nl> - $ ( CP ) " $ ( SRC_D ) / crypto / opensslconf . h " " $ ( INCO_D ) / opensslconf . h " <nl> <nl> headers : $ ( HEADER ) $ ( EXHEADER ) <nl> <nl> mmm a / vsprojects / vs2013 / build_openssl_x86 . bat <nl> ppp b / vsprojects / vs2013 / build_openssl_x86 . bat <nl> <nl> @ call " % VS120COMNTOOLS % \ . . \ . . \ vc \ vcvarsall . bat " x86 <nl> <nl> cd . . \ . . \ third_party \ openssl <nl> - nmake / F . . \ . . \ vsprojects \ third_party \ openssl \ OpenSSL . mak out32 \ ssleay32 . lib out32 \ libeay32 . lib <nl> + nmake / F . . \ . . \ vsprojects \ third_party \ openssl \ OpenSSL . mak init out32 \ ssleay32 . lib out32 \ libeay32 . lib <nl> + <nl> + pause <nl> | Fix for the exposed problems after trying a full clean build . | grpc/grpc | d8fd853cba184ecf398134c746858ad38bd32cb3 | 2014-12-17T01:03:01Z |
mmm a / core / bind / core_bind . cpp <nl> ppp b / core / bind / core_bind . cpp <nl> Error _Directory : : change_dir ( String p_dir ) { <nl> ERR_FAIL_COND_V_MSG ( ! d , ERR_UNCONFIGURED , " Directory must be opened before use . " ) ; <nl> return d - > change_dir ( p_dir ) ; <nl> } <nl> - String _Directory : : get_current_dir ( ) { <nl> + String _Directory : : get_current_dir ( bool p_include_drive ) { <nl> <nl> ERR_FAIL_COND_V_MSG ( ! d , " " , " Directory must be opened before use . " ) ; <nl> - return d - > get_current_dir ( ) ; <nl> + return d - > get_current_dir ( p_include_drive ) ; <nl> } <nl> Error _Directory : : make_dir ( String p_dir ) { <nl> <nl> void _Directory : : _bind_methods ( ) { <nl> ClassDB : : bind_method ( D_METHOD ( " get_drive " , " idx " ) , & _Directory : : get_drive ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " get_current_drive " ) , & _Directory : : get_current_drive ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " change_dir " , " todir " ) , & _Directory : : change_dir ) ; <nl> - ClassDB : : bind_method ( D_METHOD ( " get_current_dir " ) , & _Directory : : get_current_dir ) ; <nl> + ClassDB : : bind_method ( D_METHOD ( " get_current_dir " , " include_drive " ) , & _Directory : : get_current_dir , DEFVAL ( true ) ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " make_dir " , " path " ) , & _Directory : : make_dir ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " make_dir_recursive " , " path " ) , & _Directory : : make_dir_recursive ) ; <nl> ClassDB : : bind_method ( D_METHOD ( " file_exists " , " path " ) , & _Directory : : file_exists ) ; <nl> mmm a / core / bind / core_bind . h <nl> ppp b / core / bind / core_bind . h <nl> class _Directory : public Reference { <nl> int get_current_drive ( ) ; <nl> <nl> Error change_dir ( String p_dir ) ; / / Can be relative or absolute , return false on success . <nl> - String get_current_dir ( ) ; / / Return current dir location . <nl> + String get_current_dir ( bool p_include_drive = true ) ; / / Return current dir location . <nl> <nl> Error make_dir ( String p_dir ) ; <nl> Error make_dir_recursive ( String p_dir ) ; <nl> mmm a / core / io / file_access_pack . cpp <nl> ppp b / core / io / file_access_pack . cpp <nl> Error DirAccessPack : : change_dir ( String p_dir ) { <nl> return OK ; <nl> } <nl> <nl> - String DirAccessPack : : get_current_dir ( ) { <nl> + String DirAccessPack : : get_current_dir ( bool p_include_drive ) { <nl> <nl> PackedData : : PackedDir * pd = current ; <nl> String p = current - > name ; <nl> mmm a / core / io / file_access_pack . h <nl> ppp b / core / io / file_access_pack . h <nl> class DirAccessPack : public DirAccess { <nl> virtual String get_drive ( int p_drive ) ; <nl> <nl> virtual Error change_dir ( String p_dir ) ; <nl> - virtual String get_current_dir ( ) ; <nl> + virtual String get_current_dir ( bool p_include_drive = true ) ; <nl> <nl> virtual bool file_exists ( String p_file ) ; <nl> virtual bool dir_exists ( String p_dir ) ; <nl> mmm a / core / os / dir_access . cpp <nl> ppp b / core / os / dir_access . cpp <nl> int DirAccess : : get_current_drive ( ) { <nl> return 0 ; <nl> } <nl> <nl> + bool DirAccess : : drives_are_shortcuts ( ) { <nl> + <nl> + return false ; <nl> + } <nl> + <nl> static Error _erase_recursive ( DirAccess * da ) { <nl> <nl> List < String > dirs ; <nl> mmm a / core / os / dir_access . h <nl> ppp b / core / os / dir_access . h <nl> class DirAccess { <nl> virtual int get_drive_count ( ) = 0 ; <nl> virtual String get_drive ( int p_drive ) = 0 ; <nl> virtual int get_current_drive ( ) ; <nl> + virtual bool drives_are_shortcuts ( ) ; <nl> <nl> virtual Error change_dir ( String p_dir ) = 0 ; / / / < can be relative or absolute , return false on success <nl> - virtual String get_current_dir ( ) = 0 ; / / / < return current dir location <nl> + virtual String get_current_dir ( bool p_include_drive = true ) = 0 ; / / / < return current dir location <nl> virtual Error make_dir ( String p_dir ) = 0 ; <nl> virtual Error make_dir_recursive ( String p_dir ) ; <nl> virtual Error erase_contents_recursive ( ) ; / / super dangerous , use with care ! <nl> mmm a / doc / classes / Directory . xml <nl> ppp b / doc / classes / Directory . xml <nl> <nl> < method name = " get_current_dir " > <nl> < return type = " String " > <nl> < / return > <nl> + < argument index = " 0 " name = " include_drive " type = " bool " default = " true " > <nl> + < / argument > <nl> < description > <nl> Returns the absolute path to the currently opened directory ( e . g . [ code ] res : / / folder [ / code ] or [ code ] C : \ tmp \ folder [ / code ] ) . <nl> + On Windows , if [ code ] include_drive [ / code ] is [ code ] false [ / code ] , the leading drive specificator is omitted from the returned value ( e . g . [ code ] \ tmp \ folder [ / code ] ) . <nl> < / description > <nl> < / method > <nl> < method name = " get_current_drive " > <nl> mmm a / drivers / unix / dir_access_unix . cpp <nl> ppp b / drivers / unix / dir_access_unix . cpp <nl> String DirAccessUnix : : get_drive ( int p_drive ) { <nl> return list [ p_drive ] ; <nl> } <nl> <nl> + bool DirAccessUnix : : drives_are_shortcuts ( ) { <nl> + <nl> + return true ; <nl> + } <nl> + <nl> Error DirAccessUnix : : make_dir ( String p_dir ) { <nl> <nl> GLOBAL_LOCK_FUNCTION <nl> Error DirAccessUnix : : change_dir ( String p_dir ) { <nl> return OK ; <nl> } <nl> <nl> - String DirAccessUnix : : get_current_dir ( ) { <nl> + String DirAccessUnix : : get_current_dir ( bool p_include_drive ) { <nl> <nl> String base = _get_root_path ( ) ; <nl> if ( base ! = " " ) { <nl> mmm a / drivers / unix / dir_access_unix . h <nl> ppp b / drivers / unix / dir_access_unix . h <nl> class DirAccessUnix : public DirAccess { <nl> <nl> virtual int get_drive_count ( ) ; <nl> virtual String get_drive ( int p_drive ) ; <nl> + virtual bool drives_are_shortcuts ( ) ; <nl> <nl> virtual Error change_dir ( String p_dir ) ; / / / < can be relative or absolute , return false on success <nl> - virtual String get_current_dir ( ) ; / / / < return current dir location <nl> + virtual String get_current_dir ( bool p_include_drive = true ) ; / / / < return current dir location <nl> virtual Error make_dir ( String p_dir ) ; <nl> <nl> virtual bool file_exists ( String p_file ) ; <nl> mmm a / drivers / windows / dir_access_windows . cpp <nl> ppp b / drivers / windows / dir_access_windows . cpp <nl> Error DirAccessWindows : : make_dir ( String p_dir ) { <nl> return ERR_CANT_CREATE ; <nl> } <nl> <nl> - String DirAccessWindows : : get_current_dir ( ) { <nl> + String DirAccessWindows : : get_current_dir ( bool p_include_drive ) { <nl> <nl> String base = _get_root_path ( ) ; <nl> if ( base ! = " " ) { <nl> String DirAccessWindows : : get_current_dir ( ) { <nl> } else { <nl> } <nl> <nl> - return current_dir ; <nl> + if ( p_include_drive ) { <nl> + return current_dir ; <nl> + } else { <nl> + return current_dir . right ( current_dir . find ( " : " ) + 1 ) ; <nl> + } <nl> } <nl> <nl> bool DirAccessWindows : : file_exists ( String p_file ) { <nl> mmm a / drivers / windows / dir_access_windows . h <nl> ppp b / drivers / windows / dir_access_windows . h <nl> class DirAccessWindows : public DirAccess { <nl> virtual String get_drive ( int p_drive ) ; <nl> <nl> virtual Error change_dir ( String p_dir ) ; / / / < can be relative or absolute , return false on success <nl> - virtual String get_current_dir ( ) ; / / / < return current dir location <nl> + virtual String get_current_dir ( bool p_include_drive = true ) ; / / / < return current dir location <nl> <nl> virtual bool file_exists ( String p_file ) ; <nl> virtual bool dir_exists ( String p_dir ) ; <nl> mmm a / editor / editor_file_dialog . cpp <nl> ppp b / editor / editor_file_dialog . cpp <nl> Vector < String > EditorFileDialog : : get_selected_files ( ) const { <nl> <nl> void EditorFileDialog : : update_dir ( ) { <nl> <nl> - dir - > set_text ( dir_access - > get_current_dir ( ) ) ; <nl> + if ( drives - > is_visible ( ) ) { <nl> + drives - > select ( dir_access - > get_current_drive ( ) ) ; <nl> + } <nl> + dir - > set_text ( dir_access - > get_current_dir ( false ) ) ; <nl> <nl> / / Disable " Open " button only when selecting file ( s ) mode . <nl> get_ok ( ) - > set_disabled ( _is_open_should_be_disabled ( ) ) ; <nl> void EditorFileDialog : : add_filter ( const String & p_filter ) { <nl> <nl> String EditorFileDialog : : get_current_dir ( ) const { <nl> <nl> - return dir - > get_text ( ) ; <nl> + return dir_access - > get_current_dir ( ) ; <nl> } <nl> String EditorFileDialog : : get_current_file ( ) const { <nl> <nl> String EditorFileDialog : : get_current_file ( ) const { <nl> } <nl> String EditorFileDialog : : get_current_path ( ) const { <nl> <nl> - return dir - > get_text ( ) . plus_file ( file - > get_text ( ) ) ; <nl> + return dir_access - > get_current_dir ( ) . plus_file ( file - > get_text ( ) ) ; <nl> } <nl> void EditorFileDialog : : set_current_dir ( const String & p_dir ) { <nl> <nl> void EditorFileDialog : : _update_drives ( ) { <nl> drives - > hide ( ) ; <nl> } else { <nl> drives - > clear ( ) ; <nl> + Node * dp = drives - > get_parent ( ) ; <nl> + if ( dp ) { <nl> + dp - > remove_child ( drives ) ; <nl> + } <nl> + dp = dir_access - > drives_are_shortcuts ( ) ? shortcuts_container : drives_container ; <nl> + dp - > add_child ( drives ) ; <nl> drives - > show ( ) ; <nl> <nl> for ( int i = 0 ; i < dir_access - > get_drive_count ( ) ; i + + ) { <nl> EditorFileDialog : : EditorFileDialog ( ) { <nl> <nl> pathhb - > add_child ( memnew ( Label ( TTR ( " Path : " ) ) ) ) ; <nl> <nl> + drives_container = memnew ( HBoxContainer ) ; <nl> + pathhb - > add_child ( drives_container ) ; <nl> + <nl> dir = memnew ( LineEdit ) ; <nl> pathhb - > add_child ( dir ) ; <nl> dir - > set_h_size_flags ( SIZE_EXPAND_FILL ) ; <nl> EditorFileDialog : : EditorFileDialog ( ) { <nl> mode_list - > set_tooltip ( TTR ( " View items as a list . " ) ) ; <nl> pathhb - > add_child ( mode_list ) ; <nl> <nl> + shortcuts_container = memnew ( HBoxContainer ) ; <nl> + pathhb - > add_child ( shortcuts_container ) ; <nl> + <nl> drives = memnew ( OptionButton ) ; <nl> - pathhb - > add_child ( drives ) ; <nl> drives - > connect ( " item_selected " , callable_mp ( this , & EditorFileDialog : : _select_drive ) ) ; <nl> <nl> makedir = memnew ( Button ) ; <nl> mmm a / editor / editor_file_dialog . h <nl> ppp b / editor / editor_file_dialog . h <nl> class EditorFileDialog : public ConfirmationDialog { <nl> ToolButton * dir_next ; <nl> ToolButton * dir_up ; <nl> <nl> + HBoxContainer * drives_container ; <nl> + HBoxContainer * shortcuts_container ; <nl> OptionButton * drives ; <nl> ItemList * item_list ; <nl> PopupMenu * item_menu ; <nl> mmm a / platform / android / dir_access_jandroid . cpp <nl> ppp b / platform / android / dir_access_jandroid . cpp <nl> Error DirAccessJAndroid : : change_dir ( String p_dir ) { <nl> return OK ; <nl> } <nl> <nl> - String DirAccessJAndroid : : get_current_dir ( ) { <nl> + String DirAccessJAndroid : : get_current_dir ( bool p_include_drive ) { <nl> <nl> return " res : / / " + current_dir ; <nl> } <nl> mmm a / platform / android / dir_access_jandroid . h <nl> ppp b / platform / android / dir_access_jandroid . h <nl> class DirAccessJAndroid : public DirAccess { <nl> virtual String get_drive ( int p_drive ) ; <nl> <nl> virtual Error change_dir ( String p_dir ) ; / / / < can be relative or absolute , return false on success <nl> - virtual String get_current_dir ( ) ; / / / < return current dir location <nl> + virtual String get_current_dir ( bool p_include_drive = true ) ; / / / < return current dir location <nl> <nl> virtual bool file_exists ( String p_file ) ; <nl> virtual bool dir_exists ( String p_dir ) ; <nl> mmm a / scene / gui / file_dialog . cpp <nl> ppp b / scene / gui / file_dialog . cpp <nl> Vector < String > FileDialog : : get_selected_files ( ) const { <nl> <nl> void FileDialog : : update_dir ( ) { <nl> <nl> - dir - > set_text ( dir_access - > get_current_dir ( ) ) ; <nl> + dir - > set_text ( dir_access - > get_current_dir ( false ) ) ; <nl> + <nl> if ( drives - > is_visible ( ) ) { <nl> drives - > select ( dir_access - > get_current_drive ( ) ) ; <nl> } <nl> void FileDialog : : _update_drives ( ) { <nl> drives - > hide ( ) ; <nl> } else { <nl> drives - > clear ( ) ; <nl> + Node * dp = drives - > get_parent ( ) ; <nl> + if ( dp ) { <nl> + dp - > remove_child ( drives ) ; <nl> + } <nl> + dp = dir_access - > drives_are_shortcuts ( ) ? shortcuts_container : drives_container ; <nl> + dp - > add_child ( drives ) ; <nl> drives - > show ( ) ; <nl> <nl> for ( int i = 0 ; i < dir_access - > get_drive_count ( ) ; i + + ) { <nl> FileDialog : : FileDialog ( ) { <nl> hbc - > add_child ( dir_up ) ; <nl> dir_up - > connect ( " pressed " , callable_mp ( this , & FileDialog : : _go_up ) ) ; <nl> <nl> + hbc - > add_child ( memnew ( Label ( RTR ( " Path : " ) ) ) ) ; <nl> + <nl> + drives_container = memnew ( HBoxContainer ) ; <nl> + hbc - > add_child ( drives_container ) ; <nl> + <nl> drives = memnew ( OptionButton ) ; <nl> - hbc - > add_child ( drives ) ; <nl> drives - > connect ( " item_selected " , callable_mp ( this , & FileDialog : : _select_drive ) ) ; <nl> <nl> - hbc - > add_child ( memnew ( Label ( RTR ( " Path : " ) ) ) ) ; <nl> dir = memnew ( LineEdit ) ; <nl> hbc - > add_child ( dir ) ; <nl> dir - > set_h_size_flags ( SIZE_EXPAND_FILL ) ; <nl> FileDialog : : FileDialog ( ) { <nl> show_hidden - > connect ( " toggled " , callable_mp ( this , & FileDialog : : set_show_hidden_files ) ) ; <nl> hbc - > add_child ( show_hidden ) ; <nl> <nl> + shortcuts_container = memnew ( HBoxContainer ) ; <nl> + hbc - > add_child ( shortcuts_container ) ; <nl> + <nl> makedir = memnew ( Button ) ; <nl> makedir - > set_text ( RTR ( " Create Folder " ) ) ; <nl> makedir - > connect ( " pressed " , callable_mp ( this , & FileDialog : : _make_dir ) ) ; <nl> mmm a / scene / gui / file_dialog . h <nl> ppp b / scene / gui / file_dialog . h <nl> class FileDialog : public ConfirmationDialog { <nl> VBoxContainer * vbox ; <nl> Mode mode ; <nl> LineEdit * dir ; <nl> + HBoxContainer * drives_container ; <nl> + HBoxContainer * shortcuts_container ; <nl> OptionButton * drives ; <nl> Tree * tree ; <nl> HBoxContainer * file_box ; <nl> | Improve UX of drive letters | godotengine/godot | aee586553a4be72b72b669fb489fae72337ab7ad | 2020-03-03T09:38:34Z |
mmm a / test / pages / components / recycler . vue <nl> ppp b / test / pages / components / recycler . vue <nl> <nl> : show - scrollbar = " showScrollbar " : scrollable = " scrollable " <nl> @ scroll = " recylerScroll " @ loadmore = " loadmore " loadmoreoffset = 3000 <nl> > <nl> - < refresh class = " refresh " @ refresh = " onrefresh " @ pullingdown = " onpullingdown " : display = " refreshing ? ' show ' : ' hide ' " > <nl> + < ! - - < refresh class = " refresh " @ refresh = " onrefresh " @ pullingdown = " onpullingdown " : display = " refreshing ? ' show ' : ' hide ' " > <nl> < loading - indicator class = " indicator " > < / loading - indicator > <nl> < text class = " refreshText " > { { refreshText } } < / text > <nl> - < / refresh > <nl> + < / refresh > - - > <nl> < header class = " header " ref = " header " v - if = " showHeader " > <nl> - < image class = " banner " src = " https : / / gw . alicdn . com / tps / TB1ESN1PFXXXXX1apXXXXXXXXXX - 1000 - 600 . jpg " resize = " cover " > <nl> - < div class = " bannerInfo " > <nl> + < div class = " banner " > <nl> + < image class = " absolute " src = " https : / / gw . alicdn . com / tps / TB1ESN1PFXXXXX1apXXXXXXXXXX - 1000 - 600 . jpg " resize = " cover " > < / image > <nl> + < div class = " bannerInfo " > <nl> < image class = " avatar " src = " https : / / gw . alicdn . com / tps / TB1EP9bPFXXXXbpXVXXXXXXXXXX - 150 - 110 . jpg " resize = " cover " > < / image > <nl> < text class = " name " > Adam Cat < / text > <nl> < div class = " titleWrap " > <nl> <nl> < div class = " bannerPhotoWrap " > <nl> < image class = " bannerPhoto " v - for = " photo in banner . photos " : src = " photo . src " > < / image > <nl> < / div > <nl> - < / image > <nl> + < / div > <nl> < / header > <nl> - < header class = " stickyHeader " @ click = " showOrRemoveHeader " > <nl> + < header class = " stickyHeader " > <nl> < div v - if = " stickyHeaderType = = = ' none ' " class = " stickyWrapper " > <nl> < text class = " stickyText " > Sticky Header < / text > <nl> < / div > <nl> <nl> width : 40 ; <nl> margin - right : 30 ; <nl> } <nl> + . absolute { <nl> + position : absolute ; <nl> + top : 0px ; <nl> + width : 750 ; <nl> + height : 377 ; <nl> + } <nl> . banner { <nl> height : 377 ; <nl> flex - direction : row ; <nl> | * [ android ] recycler demo bugfix | apache/incubator-weex | 5c543fa40dbb3240aa37e998962fcc3b3c22474a | 2017-03-28T11:13:42Z |
mmm a / src / csharp / ext / std + + compat . cc <nl> ppp b / src / csharp / ext / std + + compat . cc <nl> namespace std { <nl> <nl> / / CentOS 7 ( GLIBC_2 . 17 / GLIBCXX_3 . 4 . 19 ) doesn ' t have a following symbol <nl> / / which was added to GLIBCXX_3 . 4 . 20 . gRPC uses Debian 8 which has <nl> - / / GLIBCXX_3 . 4 . 20 when buliding . net artifacts so artifacts can have symbols <nl> + / / GLIBCXX_3 . 4 . 20 when building . net artifacts so artifacts can have symbols <nl> / / which are not available on CentOS 7 . <nl> / / To support CentOS 7 , missing symbols are provided as weak symbols . <nl> void __attribute__ ( ( weak ) ) __throw_out_of_range_fmt ( char const * fmt , . . . ) { <nl> | Update by review | grpc/grpc | 60f45833e91ef730142cadf02e3f4dfd245ee961 | 2019-12-16T19:54:24Z |
mmm a / libraries / wasm - jit / CMakeLists . txt <nl> ppp b / libraries / wasm - jit / CMakeLists . txt <nl> endif ( ) <nl> <nl> add_subdirectory ( Include / Inline ) <nl> <nl> - add_subdirectory ( Source / Emscripten ) <nl> + # add_subdirectory ( Source / Emscripten ) <nl> add_subdirectory ( Source / IR ) <nl> add_subdirectory ( Source / Logging ) <nl> add_subdirectory ( Source / Platform ) <nl> - add_subdirectory ( Source / Programs ) <nl> + # add_subdirectory ( Source / Programs ) <nl> add_subdirectory ( Source / Runtime ) <nl> add_subdirectory ( Source / WASM ) <nl> add_subdirectory ( Source / WAST ) <nl> | Disable building Programs and Emscripten stuff | EOSIO/eos | ce9a01ab6b150c7d00c8aba3cad73b8e02b84526 | 2019-04-15T19:51:09Z |
mmm a / src / core / console_user_server / include / menu_process_manager . hpp <nl> ppp b / src / core / console_user_server / include / menu_process_manager . hpp <nl> <nl> # pragma once <nl> <nl> # include " application_launcher . hpp " <nl> - # include " boost_utility . hpp " <nl> # include " configuration_monitor . hpp " <nl> <nl> namespace krbn { <nl> | update # include | pqrs-org/Karabiner-Elements | 81e9571417f06683c98c40b7a0b1d5f0bee3fb29 | 2018-08-12T14:48:30Z |
mmm a / python - package / lightgbm / basic . py <nl> ppp b / python - package / lightgbm / basic . py <nl> def _data_from_pandas ( data , feature_name , categorical_feature , pandas_categorica <nl> raise ValueError ( ' Input data must be 2 dimensional and non empty . ' ) <nl> if feature_name = = ' auto ' or feature_name is None : <nl> data = data . rename ( columns = str ) <nl> - cat_cols = data . select_dtypes ( include = [ ' category ' ] ) . columns <nl> + cat_cols = list ( data . select_dtypes ( include = [ ' category ' ] ) . columns ) <nl> + cat_cols_not_ordered = [ col for col in cat_cols if not data [ col ] . cat . ordered ] <nl> if pandas_categorical is None : # train dataset <nl> pandas_categorical = [ list ( data [ col ] . cat . categories ) for col in cat_cols ] <nl> else : <nl> def _data_from_pandas ( data , feature_name , categorical_feature , pandas_categorica <nl> for col , category in zip_ ( cat_cols , pandas_categorical ) : <nl> if list ( data [ col ] . cat . categories ) ! = list ( category ) : <nl> data [ col ] = data [ col ] . cat . set_categories ( category ) <nl> - if len ( cat_cols ) : # cat_cols is pandas Index object <nl> + if len ( cat_cols ) : # cat_cols is list <nl> data = data . copy ( ) # not alter origin DataFrame <nl> data [ cat_cols ] = data [ cat_cols ] . apply ( lambda x : x . cat . codes ) . replace ( { - 1 : np . nan } ) <nl> if categorical_feature is not None : <nl> if feature_name is None : <nl> feature_name = list ( data . columns ) <nl> if categorical_feature = = ' auto ' : <nl> - categorical_feature = list ( cat_cols ) <nl> + categorical_feature = cat_cols_not_ordered <nl> else : <nl> - categorical_feature = list ( categorical_feature ) + list ( cat_cols ) <nl> + categorical_feature = list ( categorical_feature ) + cat_cols_not_ordered <nl> if feature_name = = ' auto ' : <nl> feature_name = list ( data . columns ) <nl> data_dtypes = data . dtypes <nl> if not all ( dtype . name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes ) : <nl> bad_fields = [ data . columns [ i ] for i , dtype in <nl> enumerate ( data_dtypes ) if dtype . name not in PANDAS_DTYPE_MAPPER ] <nl> - <nl> - msg = ( " DataFrame . dtypes for data must be int , float or bool . \ n " <nl> - " Did not expect the data types in fields " ) <nl> - raise ValueError ( msg + ' , ' . join ( bad_fields ) ) <nl> + raise ValueError ( " DataFrame . dtypes for data must be int , float or bool . \ n " <nl> + " Did not expect the data types in fields " <nl> + + ' , ' . join ( bad_fields ) ) <nl> data = data . values . astype ( ' float ' ) <nl> else : <nl> if feature_name = = ' auto ' : <nl> def __init__ ( self , data , label = None , reference = None , <nl> Categorical features . <nl> If list of int , interpreted as indices . <nl> If list of strings , interpreted as feature names ( need to specify ` ` feature_name ` ` as well ) . <nl> - If ' auto ' and data is pandas DataFrame , pandas categorical columns are used . <nl> + If ' auto ' and data is pandas DataFrame , pandas unordered categorical columns are used . <nl> All values in categorical features should be less than int32 max value ( 2147483647 ) . <nl> Large values could be memory consuming . Consider using consecutive integers starting from zero . <nl> All negative values in categorical features will be treated as missing values . <nl> mmm a / python - package / lightgbm / engine . py <nl> ppp b / python - package / lightgbm / engine . py <nl> def train ( params , train_set , num_boost_round = 100 , <nl> Categorical features . <nl> If list of int , interpreted as indices . <nl> If list of strings , interpreted as feature names ( need to specify ` ` feature_name ` ` as well ) . <nl> - If ' auto ' and data is pandas DataFrame , pandas categorical columns are used . <nl> + If ' auto ' and data is pandas DataFrame , pandas unordered categorical columns are used . <nl> All values in categorical features should be less than int32 max value ( 2147483647 ) . <nl> Large values could be memory consuming . Consider using consecutive integers starting from zero . <nl> All negative values in categorical features will be treated as missing values . <nl> def cv ( params , train_set , num_boost_round = 100 , <nl> Categorical features . <nl> If list of int , interpreted as indices . <nl> If list of strings , interpreted as feature names ( need to specify ` ` feature_name ` ` as well ) . <nl> - If ' auto ' and data is pandas DataFrame , pandas categorical columns are used . <nl> + If ' auto ' and data is pandas DataFrame , pandas unordered categorical columns are used . <nl> All values in categorical features should be less than int32 max value ( 2147483647 ) . <nl> Large values could be memory consuming . Consider using consecutive integers starting from zero . <nl> All negative values in categorical features will be treated as missing values . <nl> mmm a / python - package / lightgbm / sklearn . py <nl> ppp b / python - package / lightgbm / sklearn . py <nl> def fit ( self , X , y , <nl> Categorical features . <nl> If list of int , interpreted as indices . <nl> If list of strings , interpreted as feature names ( need to specify ` ` feature_name ` ` as well ) . <nl> - If ' auto ' and data is pandas DataFrame , pandas categorical columns are used . <nl> + If ' auto ' and data is pandas DataFrame , pandas unordered categorical columns are used . <nl> All values in categorical features should be less than int32 max value ( 2147483647 ) . <nl> Large values could be memory consuming . Consider using consecutive integers starting from zero . <nl> All negative values in categorical features will be treated as missing values . <nl> mmm a / tests / python_package_test / test_engine . py <nl> ppp b / tests / python_package_test / test_engine . py <nl> def test_template ( init_model = None , return_model = False ) : <nl> @ unittest . skipIf ( not lgb . compat . PANDAS_INSTALLED , ' pandas is not installed ' ) <nl> def test_pandas_categorical ( self ) : <nl> import pandas as pd <nl> + np . random . seed ( 42 ) # sometimes there is no difference how E col is treated ( cat or not cat ) <nl> X = pd . DataFrame ( { " A " : np . random . permutation ( [ ' a ' , ' b ' , ' c ' , ' d ' ] * 75 ) , # str <nl> " B " : np . random . permutation ( [ 1 , 2 , 3 ] * 100 ) , # int <nl> " C " : np . random . permutation ( [ 0 . 1 , 0 . 2 , - 0 . 1 , - 0 . 1 , 0 . 2 ] * 60 ) , # float <nl> - " D " : np . random . permutation ( [ True , False ] * 150 ) } ) # bool <nl> + " D " : np . random . permutation ( [ True , False ] * 150 ) , # bool <nl> + " E " : pd . Categorical ( np . random . permutation ( [ ' z ' , ' y ' , ' x ' , ' w ' , ' v ' ] * 60 ) , <nl> + ordered = True ) } ) # str and ordered categorical <nl> y = np . random . permutation ( [ 0 , 1 ] * 150 ) <nl> - X_test = pd . DataFrame ( { " A " : np . random . permutation ( [ ' a ' , ' b ' , ' e ' ] * 20 ) , <nl> + X_test = pd . DataFrame ( { " A " : np . random . permutation ( [ ' a ' , ' b ' , ' e ' ] * 20 ) , # unseen category <nl> " B " : np . random . permutation ( [ 1 , 3 ] * 30 ) , <nl> " C " : np . random . permutation ( [ 0 . 1 , - 0 . 1 , 0 . 2 , 0 . 2 ] * 15 ) , <nl> - " D " : np . random . permutation ( [ True , False ] * 30 ) } ) <nl> - cat_cols = [ ] <nl> - for col in [ " A " , " B " , " C " , " D " ] : <nl> - X [ col ] = X [ col ] . astype ( ' category ' ) <nl> - X_test [ col ] = X_test [ col ] . astype ( ' category ' ) <nl> - cat_cols . append ( X [ col ] . cat . categories . tolist ( ) ) <nl> + " D " : np . random . permutation ( [ True , False ] * 30 ) , <nl> + " E " : pd . Categorical ( pd . np . random . permutation ( [ ' z ' , ' y ' ] * 30 ) , <nl> + ordered = True ) } ) <nl> + np . random . seed ( ) # reset seed <nl> + cat_cols_actual = [ " A " , " B " , " C " , " D " ] <nl> + cat_cols_to_store = cat_cols_actual + [ " E " ] <nl> + X [ cat_cols_actual ] = X [ cat_cols_actual ] . astype ( ' category ' ) <nl> + X_test [ cat_cols_actual ] = X_test [ cat_cols_actual ] . astype ( ' category ' ) <nl> + cat_values = [ X [ col ] . cat . categories . tolist ( ) for col in cat_cols_to_store ] <nl> params = { <nl> ' objective ' : ' binary ' , <nl> ' metric ' : ' binary_logloss ' , <nl> ' verbose ' : - 1 <nl> } <nl> lgb_train = lgb . Dataset ( X , y ) <nl> - gbm0 = lgb . train ( params , lgb_train , num_boost_round = 10 , verbose_eval = False ) <nl> + gbm0 = lgb . train ( params , lgb_train , num_boost_round = 10 ) <nl> pred0 = gbm0 . predict ( X_test ) <nl> lgb_train = lgb . Dataset ( X , pd . DataFrame ( y ) ) # also test that label can be one - column pd . DataFrame <nl> - gbm1 = lgb . train ( params , lgb_train , num_boost_round = 10 , verbose_eval = False , <nl> - categorical_feature = [ 0 ] ) <nl> + gbm1 = lgb . train ( params , lgb_train , num_boost_round = 10 , categorical_feature = [ 0 ] ) <nl> pred1 = gbm1 . predict ( X_test ) <nl> lgb_train = lgb . Dataset ( X , pd . Series ( y ) ) # also test that label can be pd . Series <nl> - gbm2 = lgb . train ( params , lgb_train , num_boost_round = 10 , verbose_eval = False , <nl> - categorical_feature = [ ' A ' ] ) <nl> + gbm2 = lgb . train ( params , lgb_train , num_boost_round = 10 , categorical_feature = [ ' A ' ] ) <nl> pred2 = gbm2 . predict ( X_test ) <nl> lgb_train = lgb . Dataset ( X , y ) <nl> - gbm3 = lgb . train ( params , lgb_train , num_boost_round = 10 , verbose_eval = False , <nl> - categorical_feature = [ ' A ' , ' B ' , ' C ' , ' D ' ] ) <nl> + gbm3 = lgb . train ( params , lgb_train , num_boost_round = 10 , categorical_feature = [ ' A ' , ' B ' , ' C ' , ' D ' ] ) <nl> pred3 = gbm3 . predict ( X_test ) <nl> gbm3 . save_model ( ' categorical . model ' ) <nl> gbm4 = lgb . Booster ( model_file = ' categorical . model ' ) <nl> def test_pandas_categorical ( self ) : <nl> pred5 = gbm4 . predict ( X_test ) <nl> gbm5 = lgb . Booster ( model_str = model_str ) <nl> pred6 = gbm5 . predict ( X_test ) <nl> + lgb_train = lgb . Dataset ( X , y ) <nl> + gbm6 = lgb . train ( params , lgb_train , num_boost_round = 10 , categorical_feature = [ ' E ' ] ) <nl> + pred7 = gbm6 . predict ( X_test ) <nl> np . testing . assert_almost_equal ( pred0 , pred1 ) <nl> np . testing . assert_almost_equal ( pred0 , pred2 ) <nl> np . testing . assert_almost_equal ( pred0 , pred3 ) <nl> np . testing . assert_almost_equal ( pred0 , pred4 ) <nl> np . testing . assert_almost_equal ( pred0 , pred5 ) <nl> np . testing . assert_almost_equal ( pred0 , pred6 ) <nl> - self . assertListEqual ( gbm0 . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm1 . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm2 . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm3 . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm4 . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm5 . pandas_categorical , cat_cols ) <nl> + self . assertRaises ( AssertionError , <nl> + np . testing . assert_almost_equal , <nl> + pred0 , pred7 ) # ordered cat features aren ' t treated as cat features by default <nl> + self . assertListEqual ( gbm0 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm1 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm2 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm3 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm4 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm5 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm6 . pandas_categorical , cat_values ) <nl> <nl> def test_reference_chain ( self ) : <nl> X = np . random . normal ( size = ( 100 , 2 ) ) <nl> mmm a / tests / python_package_test / test_sklearn . py <nl> ppp b / tests / python_package_test / test_sklearn . py <nl> def test_sklearn_integration ( self ) : <nl> @ unittest . skipIf ( not lgb . compat . PANDAS_INSTALLED , ' pandas is not installed ' ) <nl> def test_pandas_categorical ( self ) : <nl> import pandas as pd <nl> + np . random . seed ( 42 ) # sometimes there is no difference how E col is treated ( cat or not cat ) <nl> X = pd . DataFrame ( { " A " : np . random . permutation ( [ ' a ' , ' b ' , ' c ' , ' d ' ] * 75 ) , # str <nl> " B " : np . random . permutation ( [ 1 , 2 , 3 ] * 100 ) , # int <nl> " C " : np . random . permutation ( [ 0 . 1 , 0 . 2 , - 0 . 1 , - 0 . 1 , 0 . 2 ] * 60 ) , # float <nl> - " D " : np . random . permutation ( [ True , False ] * 150 ) } ) # bool <nl> + " D " : np . random . permutation ( [ True , False ] * 150 ) , # bool <nl> + " E " : pd . Categorical ( np . random . permutation ( [ ' z ' , ' y ' , ' x ' , ' w ' , ' v ' ] * 60 ) , <nl> + ordered = True ) } ) # str and ordered categorical <nl> y = np . random . permutation ( [ 0 , 1 ] * 150 ) <nl> - X_test = pd . DataFrame ( { " A " : np . random . permutation ( [ ' a ' , ' b ' , ' e ' ] * 20 ) , <nl> + X_test = pd . DataFrame ( { " A " : np . random . permutation ( [ ' a ' , ' b ' , ' e ' ] * 20 ) , # unseen category <nl> " B " : np . random . permutation ( [ 1 , 3 ] * 30 ) , <nl> " C " : np . random . permutation ( [ 0 . 1 , - 0 . 1 , 0 . 2 , 0 . 2 ] * 15 ) , <nl> - " D " : np . random . permutation ( [ True , False ] * 30 ) } ) <nl> - cat_cols = [ ] <nl> - for col in [ " A " , " B " , " C " , " D " ] : <nl> - X [ col ] = X [ col ] . astype ( ' category ' ) <nl> - X_test [ col ] = X_test [ col ] . astype ( ' category ' ) <nl> - cat_cols . append ( X [ col ] . cat . categories . tolist ( ) ) <nl> + " D " : np . random . permutation ( [ True , False ] * 30 ) , <nl> + " E " : pd . Categorical ( pd . np . random . permutation ( [ ' z ' , ' y ' ] * 30 ) , <nl> + ordered = True ) } ) <nl> + np . random . seed ( ) # reset seed <nl> + cat_cols_actual = [ " A " , " B " , " C " , " D " ] <nl> + cat_cols_to_store = cat_cols_actual + [ " E " ] <nl> + X [ cat_cols_actual ] = X [ cat_cols_actual ] . astype ( ' category ' ) <nl> + X_test [ cat_cols_actual ] = X_test [ cat_cols_actual ] . astype ( ' category ' ) <nl> + cat_values = [ X [ col ] . cat . categories . tolist ( ) for col in cat_cols_to_store ] <nl> gbm0 = lgb . sklearn . LGBMClassifier ( ) . fit ( X , y ) <nl> pred0 = gbm0 . predict ( X_test ) <nl> + pred_prob = gbm0 . predict_proba ( X_test ) [ : , 1 ] <nl> gbm1 = lgb . sklearn . LGBMClassifier ( ) . fit ( X , pd . Series ( y ) , categorical_feature = [ 0 ] ) <nl> pred1 = gbm1 . predict ( X_test ) <nl> gbm2 = lgb . sklearn . LGBMClassifier ( ) . fit ( X , y , categorical_feature = [ ' A ' ] ) <nl> def test_pandas_categorical ( self ) : <nl> gbm3 . booster_ . save_model ( ' categorical . model ' ) <nl> gbm4 = lgb . Booster ( model_file = ' categorical . model ' ) <nl> pred4 = gbm4 . predict ( X_test ) <nl> - pred_prob = gbm0 . predict_proba ( X_test ) [ : , 1 ] <nl> + gbm5 = lgb . sklearn . LGBMClassifier ( ) . fit ( X , y , categorical_feature = [ ' E ' ] ) <nl> + pred5 = gbm5 . predict ( X_test ) <nl> np . testing . assert_almost_equal ( pred0 , pred1 ) <nl> np . testing . assert_almost_equal ( pred0 , pred2 ) <nl> np . testing . assert_almost_equal ( pred0 , pred3 ) <nl> np . testing . assert_almost_equal ( pred_prob , pred4 ) <nl> - self . assertListEqual ( gbm0 . booster_ . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm1 . booster_ . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm2 . booster_ . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm3 . booster_ . pandas_categorical , cat_cols ) <nl> - self . assertListEqual ( gbm4 . pandas_categorical , cat_cols ) <nl> + self . assertRaises ( AssertionError , <nl> + np . testing . assert_almost_equal , <nl> + pred0 , pred5 ) # ordered cat features aren ' t treated as cat features by default <nl> + self . assertListEqual ( gbm0 . booster_ . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm1 . booster_ . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm2 . booster_ . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm3 . booster_ . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm4 . pandas_categorical , cat_values ) <nl> + self . assertListEqual ( gbm5 . booster_ . pandas_categorical , cat_values ) <nl> <nl> def test_predict ( self ) : <nl> iris = load_iris ( ) <nl> | [ python ] ignore pandas ordered categorical columns by default ( ) | microsoft/LightGBM | d115769c2a2ddffadc76c7b84739a47937114c77 | 2019-04-19T11:08:00Z |
mmm a / core / math / a_star . cpp <nl> ppp b / core / math / a_star . cpp <nl> int AStar : : get_available_point_id ( ) const { <nl> <nl> void AStar : : add_point ( int p_id , const Vector3 & p_pos , real_t p_weight_scale ) { <nl> ERR_FAIL_COND ( p_id < 0 ) ; <nl> + ERR_FAIL_COND ( p_weight_scale < 1 ) ; <nl> if ( ! points . has ( p_id ) ) { <nl> Point * pt = memnew ( Point ) ; <nl> pt - > id = p_id ; <nl> bool AStar : : _solve ( Point * begin_point , Point * end_point ) { <nl> <nl> real_t cost = p - > distance ; <nl> cost + = _estimate_cost ( p - > id , end_point - > id ) ; <nl> - cost * = p - > weight_scale ; <nl> <nl> if ( cost < least_cost ) { <nl> <nl> mmm a / doc / base / classes . xml <nl> ppp b / doc / base / classes . xml <nl> <nl> < argument index = " 1 " name = " pos " type = " Vector3 " > <nl> < / argument > <nl> < argument index = " 2 " name = " weight_scale " type = " float " default = " 1 " > <nl> + Weight scale has to be 1 or larger . <nl> < / argument > <nl> < description > <nl> < / description > <nl> | Merge pull request from tagcup / astar_overestimate | godotengine/godot | 6dfab3c7e92992293c536248d2244c12da8aef38 | 2017-05-22T13:15:25Z |
mmm a / xbmc / guilib / GUIListItem . cpp <nl> ppp b / xbmc / guilib / GUIListItem . cpp <nl> void CGUIListItem : : SetArt ( const ArtMap & art ) <nl> SetInvalid ( ) ; <nl> } <nl> <nl> + void CGUIListItem : : AppendArt ( const ArtMap & art ) <nl> + { <nl> + for ( ArtMap : : const_iterator i = art . begin ( ) ; i ! = art . end ( ) ; + + i ) <nl> + SetArt ( i - > first , i - > second ) ; <nl> + } <nl> + <nl> std : : string CGUIListItem : : GetArt ( const std : : string & type ) const <nl> { <nl> ArtMap : : const_iterator i = m_art . find ( type ) ; <nl> mmm a / xbmc / guilib / GUIListItem . h <nl> ppp b / xbmc / guilib / GUIListItem . h <nl> class CGUIListItem <nl> * / <nl> void SetArt ( const ArtMap & art ) ; <nl> <nl> + / * ! \ brief append artwork to an item <nl> + \ param art a type : url map for artwork <nl> + \ sa GetArt <nl> + * / <nl> + void AppendArt ( const ArtMap & art ) ; <nl> + <nl> / * ! \ brief Get a particular art type for an item <nl> \ param type type of art to fetch . <nl> \ return the art URL , if available , else empty . <nl> | adds AppendArt to CGUIListItem | xbmc/xbmc | b51a768af9dbe81c9e4da78dc9b25f4a162c798b | 2012-10-15T22:43:13Z |
mmm a / src / webui / www / public / scripts / dynamicTable . js <nl> ppp b / src / webui / www / public / scripts / dynamicTable . js <nl> <nl> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> <nl> var DynamicTableHeaderContextMenuClass = null ; <nl> + var ProgressColumnWidth = - 1 ; <nl> <nl> var DynamicTable = new Class ( { <nl> <nl> var TorrentsTable = new Class ( { <nl> <nl> if ( td . getChildren ( ' div ' ) . length ) { <nl> var div = td . getChildren ( ' div ' ) [ 0 ] ; <nl> - var newWidth = td . offsetWidth - 5 ; <nl> - if ( div . lastWidth ! = = newWidth ) { <nl> - div . setWidth ( newWidth ) ; <nl> - div . lastWidth = newWidth ; <nl> + if ( td . resized ) { <nl> + td . resized = false ; <nl> + div . setWidth ( ProgressColumnWidth - 5 ) ; <nl> } <nl> if ( div . getValue ( ) ! = progressFormated ) <nl> div . setValue ( progressFormated ) ; <nl> } <nl> - else <nl> + else { <nl> + if ( ProgressColumnWidth < 0 ) <nl> + ProgressColumnWidth = td . offsetWidth ; <nl> td . adopt ( new ProgressBar ( progressFormated . toFloat ( ) , { <nl> - ' width ' : td . offsetWidth - 5 <nl> + ' width ' : ProgressColumnWidth - 5 <nl> } ) ) ; <nl> + td . resized = false ; <nl> + } <nl> } ; <nl> <nl> this . columns [ ' progress ' ] . onResize = function ( columnName ) { <nl> var pos = this . getColumnPos ( columnName ) ; <nl> var trs = this . tableBody . getElements ( ' tr ' ) ; <nl> - for ( var i = 0 ; i < trs . length ; i + + ) <nl> - this . columns [ columnName ] . updateTd ( trs [ i ] . getElements ( ' td ' ) [ pos ] , this . rows . get ( trs [ i ] . rowId ) ) ; <nl> + ProgressColumnWidth = - 1 ; <nl> + for ( var i = 0 ; i < trs . length ; i + + ) { <nl> + var td = trs [ i ] . getElements ( ' td ' ) [ pos ] ; <nl> + if ( ProgressColumnWidth < 0 ) <nl> + ProgressColumnWidth = td . offsetWidth ; <nl> + td . resized = true ; <nl> + this . columns [ columnName ] . updateTd ( td , this . rows . get ( trs [ i ] . rowId ) ) ; <nl> + } <nl> } . bind ( this ) ; <nl> <nl> / / num_seeds <nl> | Merge pull request from buinsky / master | qbittorrent/qBittorrent | 922fec44d2a59daaee934a5e7c03bc30535428a8 | 2017-03-05T12:46:30Z |
mmm a / xbmc / cores / dvdplayer / DVDCodecs / Video / DVDVideoCodecVDA . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDCodecs / Video / DVDVideoCodecVDA . cpp <nl> bool CDVDVideoCodecVDA : : Open ( CDVDStreamInfo & hints , CDVDCodecOptions & options ) <nl> { <nl> case CODEC_ID_H264 : <nl> / / TODO : need to quality h264 encoding ( profile , level and number of reference frame ) <nl> - / / source must be H . 264 with valid avcC atom in extradata <nl> + / / source must be H . 264 with valid avcC atom data in extradata <nl> if ( hints . extrasize < 7 | | hints . extradata = = NULL ) <nl> return false ; <nl> m_format = ' avc1 ' ; <nl> | correct comment , avcC atom data goes in extradata and not the atom itself | xbmc/xbmc | f78d6648db8c64bd81958308dca6e9ae5457bc2c | 2010-04-29T19:02:19Z |
mmm a / src / arm / simulator - arm . h <nl> ppp b / src / arm / simulator - arm . h <nl> namespace internal { <nl> ( entry ( p0 , p1 , p2 , p3 , p4 ) ) <nl> <nl> typedef int ( * arm_regexp_matcher ) ( String * , int , const byte * , const byte * , <nl> - void * , int * , Address , int ) ; <nl> + void * , int * , Address , int , Isolate * ) ; <nl> <nl> <nl> / / Call the generated regexp code directly . The code at the entry address <nl> | Try fix arm build . | v8/v8 | 14216b357abdb0bce3eaecd246ca16d79927a3c1 | 2011-03-18T22:47:09Z |
mmm a / test / test262 / test262 . status <nl> ppp b / test / test262 / test262 . status <nl> <nl> ' intl402 / NumberFormat / currency - digits ' : [ FAIL ] , <nl> <nl> # https : / / bugs . chromium . org / p / v8 / issues / detail ? id = 7473 <nl> - ' intl402 / language - tags - canonicalized ' : [ FAIL ] , <nl> + ' intl402 / language - tags - canonicalized ' : [ SKIP ] , <nl> <nl> # https : / / bugs . chromium . org / p / v8 / issues / detail ? id = 7474 <nl> ' intl402 / NumberFormat / prototype / format / format - fraction - digits ' : [ FAIL ] , <nl> <nl> # https : / / bugs . chromium . org / p / v8 / issues / detail ? id = 5012 <nl> ' intl402 / Intl / getCanonicalLocales / canonicalized - tags ' : [ FAIL ] , <nl> ' intl402 / Intl / getCanonicalLocales / preferred - grandfathered ' : [ FAIL ] , <nl> - ' intl402 / Intl / getCanonicalLocales / preferred - variant ' : [ FAIL ] , <nl> + ' intl402 / Intl / getCanonicalLocales / preferred - variant ' : [ SKIP ] , <nl> <nl> # https : / / bugs . chromium . org / p / v8 / issues / detail ? id = 7513 <nl> ' built - ins / TypedArrays / ctors / buffer - arg / buffer - arg - bufferbyteoffset - throws - from - modulo - element - size ' : [ FAIL ] , <nl> | Skip two tests temporarily to prepare for ICU roll | v8/v8 | c6d2daace321536801fd0303b5d4cb479e03db70 | 2018-04-25T20:40:39Z |
mmm a / downloads . md <nl> ppp b / downloads . md <nl> Windows ( MSVC 2013 ) | [ 32 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtml <nl> Windows ( MinGW - w64 ) | [ 32 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - c22928d_mingw - w64 - cross - win32 . exe ) & emsp ; [ 64 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - c22928d_mingw - w64 - cross - win64 . exe ) | will work on Windows XP / 2003 ; slower <nl> Linux ( Debian Wheezy ) | [ 32 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - c22928d_linux - wheezy - i386 . tar . xz ) & emsp ; [ 64 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - c22928d_linux - wheezy - amd64 . tar . xz ) | for recent distributions ( i . e . glibc 2 . 13 or later ) <nl> Linux ( CentOS 5 ) | [ 32 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - c22928d_linux - centos5 - i386 . tar . xz ) & emsp ; [ 64 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - c22928d_linux - centos5 - amd64 . tar . xz ) | for old distributions ( i . e . glibc 2 . 5 or later ) <nl> - OS X | not available | should be available in a few days <nl> - <nl> - You can try two unofficial builds for OS X : <nl> - <nl> - * [ 32 - bit Carbon build ] ( https : / / www . dropbox . com / s / y8mfc4xd3gcxe8b / wkhtmltox - 0 . 12 . 1 - 08b0817_mac - osx - i386 . zip ) made on OS X 10 . 6 . 8 by @ mn4367 which supports selectable text with proper font - embedding / subsetting <nl> - * [ 64 - bit Cocoa build ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - b8ea0f4_mac - osx - x86 - 64 . tar . xz ) made on OS X 10 . 9 . 2 by @ npinchot which doesn ' t support selectable text <nl> + OS X 10 . 8 + ( Carbon ) | [ 32 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - 7fac78c_osx - 10 . 9 - carbon - i386 . tar . xz ) | produces selectable text and smaller file sizes <nl> + OS X 10 . 8 + ( Cocoa ) | [ 64 - bit ] ( http : / / downloads . sourceforge . net / project / wkhtmltopdf / 0 . 12 . 1 - dev / wkhtmltox - 0 . 12 . 1 - 7fac78c_osx - 10 . 9 - cocoa - x86 - 64 . tar . xz ) | not recommended ; output file size is very large <nl> <nl> The next release will be made once the [ preparations for 0 . 12 . 1 ] ( https : / / github . com / wkhtmltopdf / wkhtmltopdf / issues / 1663 ) have been completed . <nl> <nl> | link to OS X builds which should work on 10 . 8 and above | wkhtmltopdf/wkhtmltopdf | 3ec88b352bf0e484cce3fe70f522bd9c11dd667c | 2014-05-16T08:59:11Z |
mmm a / doc / cascadia / SettingsSchema . md <nl> ppp b / doc / cascadia / SettingsSchema . md <nl> Properties listed below affect the entire window , regardless of the profile sett <nl> | Property | Necessity | Type | Default | Description | <nl> | mmmmmm - - | mmmmmmmmm | mmm - | mmmmmm - | mmmmmmmmm - - | <nl> | ` alwaysShowTabs ` | _Required_ | Boolean | ` true ` | When set to ` true ` , tabs are always displayed . When set to ` false ` and ` showTabsInTitlebar ` is set to ` false ` , tabs only appear after typing < kbd > Ctrl < / kbd > + < kbd > T < / kbd > . | <nl> + | ` copyOnSelect ` | Optional | Boolean | ` false ` | When set to ` true ` , a selection is immediately copied to your clipboard upon creation . When set to ` false ` , the selection persists and awaits further action . | <nl> | ` defaultProfile ` | _Required_ | String | PowerShell guid | Sets the default profile . Opens by typing < kbd > Ctrl < / kbd > + < kbd > T < / kbd > or by clicking the ' + ' icon . The guid of the desired default profile is used as the value . | <nl> | ` initialCols ` | _Required_ | Integer | ` 120 ` | The number of columns displayed in the window upon first load . | <nl> | ` initialRows ` | _Required_ | Integer | ` 30 ` | The number of rows displayed in the window upon first load . | <nl> mmm a / src / cascadia / TerminalApp / GlobalAppSettings . cpp <nl> ppp b / src / cascadia / TerminalApp / GlobalAppSettings . cpp <nl> static constexpr std : : string_view ShowTitleInTitlebarKey { " showTerminalTitleInTi <nl> static constexpr std : : string_view RequestedThemeKey { " requestedTheme " } ; <nl> static constexpr std : : string_view ShowTabsInTitlebarKey { " showTabsInTitlebar " } ; <nl> static constexpr std : : string_view WordDelimitersKey { " wordDelimiters " } ; <nl> + static constexpr std : : string_view CopyOnSelectKey { " copyOnSelect " } ; <nl> <nl> static constexpr std : : wstring_view LightThemeValue { L " light " } ; <nl> static constexpr std : : wstring_view DarkThemeValue { L " dark " } ; <nl> GlobalAppSettings : : GlobalAppSettings ( ) : <nl> _showTitleInTitlebar { true } , <nl> _showTabsInTitlebar { true } , <nl> _requestedTheme { ElementTheme : : Default } , <nl> - _wordDelimiters { DEFAULT_WORD_DELIMITERS } <nl> + _wordDelimiters { DEFAULT_WORD_DELIMITERS } , <nl> + _copyOnSelect { false } <nl> { <nl> } <nl> <nl> void GlobalAppSettings : : SetWordDelimiters ( const std : : wstring wordDelimiters ) noe <nl> _wordDelimiters = wordDelimiters ; <nl> } <nl> <nl> + bool GlobalAppSettings : : GetCopyOnSelect ( ) const noexcept <nl> + { <nl> + return _copyOnSelect ; <nl> + } <nl> + <nl> + void GlobalAppSettings : : SetCopyOnSelect ( const bool copyOnSelect ) noexcept <nl> + { <nl> + _copyOnSelect = copyOnSelect ; <nl> + } <nl> + <nl> # pragma region ExperimentalSettings <nl> bool GlobalAppSettings : : GetShowTabsInTitlebar ( ) const noexcept <nl> { <nl> void GlobalAppSettings : : ApplyToSettings ( TerminalSettings & settings ) const noexce <nl> settings . InitialRows ( _initialRows ) ; <nl> settings . InitialCols ( _initialCols ) ; <nl> settings . WordDelimiters ( _wordDelimiters ) ; <nl> + settings . CopyOnSelect ( _copyOnSelect ) ; <nl> } <nl> <nl> / / Method Description : <nl> Json : : Value GlobalAppSettings : : ToJson ( ) const <nl> jsonObject [ JsonKey ( ShowTitleInTitlebarKey ) ] = _showTitleInTitlebar ; <nl> jsonObject [ JsonKey ( ShowTabsInTitlebarKey ) ] = _showTabsInTitlebar ; <nl> jsonObject [ JsonKey ( WordDelimitersKey ) ] = winrt : : to_string ( _wordDelimiters ) ; <nl> + jsonObject [ JsonKey ( CopyOnSelectKey ) ] = _copyOnSelect ; <nl> jsonObject [ JsonKey ( RequestedThemeKey ) ] = winrt : : to_string ( _SerializeTheme ( _requestedTheme ) ) ; <nl> jsonObject [ JsonKey ( KeybindingsKey ) ] = AppKeyBindingsSerialization : : ToJson ( _keybindings ) ; <nl> <nl> GlobalAppSettings GlobalAppSettings : : FromJson ( const Json : : Value & json ) <nl> result . _wordDelimiters = GetWstringFromJson ( wordDelimiters ) ; <nl> } <nl> <nl> + if ( auto copyOnSelect { json [ JsonKey ( CopyOnSelectKey ) ] } ) <nl> + { <nl> + result . _copyOnSelect = copyOnSelect . asBool ( ) ; <nl> + } <nl> + <nl> if ( auto requestedTheme { json [ JsonKey ( RequestedThemeKey ) ] } ) <nl> { <nl> result . _requestedTheme = _ParseTheme ( GetWstringFromJson ( requestedTheme ) ) ; <nl> mmm a / src / cascadia / TerminalApp / GlobalAppSettings . h <nl> ppp b / src / cascadia / TerminalApp / GlobalAppSettings . h <nl> class TerminalApp : : GlobalAppSettings final <nl> std : : wstring GetWordDelimiters ( ) const noexcept ; <nl> void SetWordDelimiters ( const std : : wstring wordDelimiters ) noexcept ; <nl> <nl> + bool GetCopyOnSelect ( ) const noexcept ; <nl> + void SetCopyOnSelect ( const bool copyOnSelect ) noexcept ; <nl> + <nl> winrt : : Windows : : UI : : Xaml : : ElementTheme GetRequestedTheme ( ) const noexcept ; <nl> <nl> Json : : Value ToJson ( ) const ; <nl> class TerminalApp : : GlobalAppSettings final <nl> <nl> bool _showTabsInTitlebar ; <nl> std : : wstring _wordDelimiters ; <nl> + bool _copyOnSelect ; <nl> winrt : : Windows : : UI : : Xaml : : ElementTheme _requestedTheme ; <nl> <nl> static winrt : : Windows : : UI : : Xaml : : ElementTheme _ParseTheme ( const std : : wstring & themeString ) noexcept ; <nl> mmm a / src / cascadia / TerminalControl / TermControl . cpp <nl> ppp b / src / cascadia / TerminalControl / TermControl . cpp <nl> namespace winrt : : Microsoft : : Terminal : : TerminalControl : : implementation <nl> } <nl> else if ( point . Properties ( ) . IsRightButtonPressed ( ) ) <nl> { <nl> - / / copy selection , if one exists <nl> - if ( _terminal - > IsSelectionActive ( ) ) <nl> + / / copyOnSelect causes right - click to always paste <nl> + if ( _terminal - > IsCopyOnSelectActive ( ) | | ! _terminal - > IsSelectionActive ( ) ) <nl> { <nl> - CopySelectionToClipboard ( ! shiftEnabled ) ; <nl> + PasteTextFromClipboard ( ) ; <nl> } <nl> - / / paste selection , otherwise <nl> else <nl> { <nl> - PasteTextFromClipboard ( ) ; <nl> + CopySelectionToClipboard ( ! shiftEnabled ) ; <nl> } <nl> } <nl> } <nl> namespace winrt : : Microsoft : : Terminal : : TerminalControl : : implementation <nl> <nl> if ( ptr . PointerDeviceType ( ) = = Windows : : Devices : : Input : : PointerDeviceType : : Mouse ) <nl> { <nl> - if ( _terminal - > IsSelectionActive ( ) & & point . Properties ( ) . IsLeftButtonPressed ( ) ) <nl> + if ( point . Properties ( ) . IsLeftButtonPressed ( ) ) <nl> { <nl> const auto cursorPosition = point . Position ( ) ; <nl> _SetEndSelectionPointAtCursor ( cursorPosition ) ; <nl> namespace winrt : : Microsoft : : Terminal : : TerminalControl : : implementation <nl> <nl> const auto ptr = args . Pointer ( ) ; <nl> <nl> - if ( ptr . PointerDeviceType ( ) = = Windows : : Devices : : Input : : PointerDeviceType : : Touch ) <nl> + if ( ptr . PointerDeviceType ( ) = = Windows : : Devices : : Input : : PointerDeviceType : : Mouse ) <nl> + { <nl> + const auto modifiers = static_cast < uint32_t > ( args . KeyModifiers ( ) ) ; <nl> + / / static_cast to a uint32_t because we can ' t use the WI_IsFlagSet <nl> + / / macro directly with a VirtualKeyModifiers <nl> + const auto shiftEnabled = WI_IsFlagSet ( modifiers , static_cast < uint32_t > ( VirtualKeyModifiers : : Shift ) ) ; <nl> + <nl> + if ( _terminal - > IsCopyOnSelectActive ( ) ) <nl> + { <nl> + CopySelectionToClipboard ( ! shiftEnabled ) ; <nl> + } <nl> + } <nl> + else if ( ptr . PointerDeviceType ( ) = = Windows : : Devices : : Input : : PointerDeviceType : : Touch ) <nl> { <nl> _touchAnchor = std : : nullopt ; <nl> } <nl> namespace winrt : : Microsoft : : Terminal : : TerminalControl : : implementation <nl> } <nl> <nl> / / Method Description : <nl> - / / - get text from buffer and send it to the Windows Clipboard ( CascadiaWin32 : main . cpp ) . Also removes rendering of selection . <nl> + / / - Given a copy - able selection , get the selected text from the buffer and send it to the <nl> + / / Windows Clipboard ( CascadiaWin32 : main . cpp ) . <nl> + / / - CopyOnSelect does NOT clear the selection <nl> / / Arguments : <nl> / / - trimTrailingWhitespace : enable removing any whitespace from copied selection <nl> / / and get text to appear on separate lines . <nl> bool TermControl : : CopySelectionToClipboard ( bool trimTrailingWhitespace ) <nl> { <nl> - if ( _terminal ! = nullptr & & _terminal - > IsSelectionActive ( ) ) <nl> + / / no selection - - > nothing to copy <nl> + if ( _terminal = = nullptr | | ! _terminal - > IsSelectionActive ( ) ) <nl> { <nl> - / / extract text from buffer <nl> - const auto bufferData = _terminal - > RetrieveSelectedTextFromBuffer ( trimTrailingWhitespace ) ; <nl> + return false ; <nl> + } <nl> + / / extract text from buffer <nl> + const auto bufferData = _terminal - > RetrieveSelectedTextFromBuffer ( trimTrailingWhitespace ) ; <nl> <nl> - / / convert text : vector < string > - - > string <nl> - std : : wstring textData ; <nl> - for ( const auto & text : bufferData . text ) <nl> - { <nl> - textData + = text ; <nl> - } <nl> + / / convert text : vector < string > - - > string <nl> + std : : wstring textData ; <nl> + for ( const auto & text : bufferData . text ) <nl> + { <nl> + textData + = text ; <nl> + } <nl> <nl> - / / convert text to HTML format <nl> - const auto htmlData = TextBuffer : : GenHTML ( bufferData , _actualFont . GetUnscaledSize ( ) . Y , _actualFont . GetFaceName ( ) , " Windows Terminal " ) ; <nl> + / / convert text to HTML format <nl> + const auto htmlData = TextBuffer : : GenHTML ( bufferData , _actualFont . GetUnscaledSize ( ) . Y , _actualFont . GetFaceName ( ) , " Windows Terminal " ) ; <nl> <nl> + if ( ! _terminal - > IsCopyOnSelectActive ( ) ) <nl> + { <nl> _terminal - > ClearSelection ( ) ; <nl> - <nl> - / / send data up for clipboard <nl> - auto copyArgs = winrt : : make_self < CopyToClipboardEventArgs > ( winrt : : hstring ( textData . data ( ) , textData . size ( ) ) , winrt : : to_hstring ( htmlData ) ) ; <nl> - _clipboardCopyHandlers ( * this , * copyArgs ) ; <nl> - return true ; <nl> } <nl> - return false ; <nl> + <nl> + / / send data up for clipboard <nl> + auto copyArgs = winrt : : make_self < CopyToClipboardEventArgs > ( winrt : : hstring ( textData . data ( ) , textData . size ( ) ) , winrt : : to_hstring ( htmlData ) ) ; <nl> + _clipboardCopyHandlers ( * this , * copyArgs ) ; <nl> + return true ; <nl> } <nl> <nl> / / Method Description : <nl> mmm a / src / cascadia / TerminalCore / Terminal . cpp <nl> ppp b / src / cascadia / TerminalCore / Terminal . cpp <nl> Terminal : : Terminal ( ) : <nl> _snapOnInput { true } , <nl> _boxSelection { false } , <nl> _selectionActive { false } , <nl> + _allowSingleCharSelection { false } , <nl> + _copyOnSelect { false } , <nl> _selectionAnchor { 0 , 0 } , <nl> _endSelectionPosition { 0 , 0 } <nl> { <nl> void Terminal : : UpdateSettings ( winrt : : Microsoft : : Terminal : : Settings : : ICoreSetting <nl> <nl> _wordDelimiters = settings . WordDelimiters ( ) ; <nl> <nl> + _copyOnSelect = settings . CopyOnSelect ( ) ; <nl> + <nl> / / TODO : MSFT : 21327402 - if HistorySize has changed , resize the buffer so we <nl> / / have a smaller scrollback . We should do this carefully - if the new buffer <nl> / / size is smaller than where the mutable viewport currently is , we ' ll want <nl> mmm a / src / cascadia / TerminalCore / Terminal . hpp <nl> ppp b / src / cascadia / TerminalCore / Terminal . hpp <nl> class Microsoft : : Terminal : : Core : : Terminal final : <nl> <nl> # pragma region TextSelection <nl> / / These methods are defined in TerminalSelection . cpp <nl> + const bool IsCopyOnSelectActive ( ) const noexcept ; <nl> void DoubleClickSelection ( const COORD position ) ; <nl> void TripleClickSelection ( const COORD position ) ; <nl> void SetSelectionAnchor ( const COORD position ) ; <nl> class Microsoft : : Terminal : : Core : : Terminal final : <nl> COORD _endSelectionPosition ; <nl> bool _boxSelection ; <nl> bool _selectionActive ; <nl> + bool _allowSingleCharSelection ; <nl> + bool _copyOnSelect ; <nl> SHORT _selectionAnchor_YOffset ; <nl> SHORT _endSelectionPosition_YOffset ; <nl> std : : wstring _wordDelimiters ; <nl> class Microsoft : : Terminal : : Core : : Terminal final : <nl> COORD _ExpandDoubleClickSelectionRight ( const COORD position ) const ; <nl> const bool _isWordDelimiter ( std : : wstring_view cellChar ) const ; <nl> const COORD _ConvertToBufferCell ( const COORD viewportPos ) const ; <nl> + const bool _isSingleCellSelection ( ) const noexcept ; <nl> # pragma endregion <nl> } ; <nl> mmm a / src / cascadia / TerminalCore / TerminalSelection . cpp <nl> ppp b / src / cascadia / TerminalCore / TerminalSelection . cpp <nl> std : : vector < SMALL_RECT > Terminal : : _GetSelectionRects ( ) const <nl> { <nl> std : : vector < SMALL_RECT > selectionArea ; <nl> <nl> - if ( ! _selectionActive ) <nl> + if ( ! IsSelectionActive ( ) ) <nl> { <nl> return selectionArea ; <nl> } <nl> std : : vector < SMALL_RECT > Terminal : : _GetSelectionRects ( ) const <nl> if ( _multiClickSelectionMode = = SelectionExpansionMode : : Word ) <nl> { <nl> const auto cellChar = _buffer - > GetCellDataAt ( selectionAnchorWithOffset ) - > Chars ( ) ; <nl> - if ( _selectionAnchor = = _endSelectionPosition & & _isWordDelimiter ( cellChar ) ) <nl> + if ( _isSingleCellSelection ( ) & & _isWordDelimiter ( cellChar ) ) <nl> { <nl> / / only highlight the cell if you double click a delimiter <nl> } <nl> const SHORT Terminal : : _ExpandWideGlyphSelectionRight ( const SHORT xPos , const SHO <nl> return position . X ; <nl> } <nl> <nl> + / / Method Description : <nl> + / / - Checks if selection is on a single cell <nl> + / / Return Value : <nl> + / / - bool representing if selection is only a single cell . Used for copyOnSelect <nl> + const bool Terminal : : _isSingleCellSelection ( ) const noexcept <nl> + { <nl> + return ( _selectionAnchor = = _endSelectionPosition ) ; <nl> + } <nl> + <nl> / / Method Description : <nl> / / - Checks if selection is active <nl> / / Return Value : <nl> / / - bool representing if selection is active . Used to decide copy / paste on right click <nl> const bool Terminal : : IsSelectionActive ( ) const noexcept <nl> { <nl> + / / A single cell selection is not considered an active selection , <nl> + / / if it ' s not allowed <nl> + if ( ! _allowSingleCharSelection & & _isSingleCellSelection ( ) ) <nl> + { <nl> + return false ; <nl> + } <nl> return _selectionActive ; <nl> } <nl> <nl> + / / Method Description : <nl> + / / - Checks if the CopyOnSelect setting is active <nl> + / / Return Value : <nl> + / / - true if feature is active , false otherwise . <nl> + const bool Terminal : : IsCopyOnSelectActive ( ) const noexcept <nl> + { <nl> + return _copyOnSelect ; <nl> + } <nl> + <nl> / / Method Description : <nl> / / - Select the sequence between delimiters defined in Settings <nl> / / Arguments : <nl> void Terminal : : SetSelectionAnchor ( const COORD position ) <nl> _selectionAnchor_YOffset = gsl : : narrow < SHORT > ( _ViewStartIndex ( ) ) ; <nl> <nl> _selectionActive = true ; <nl> + _allowSingleCharSelection = ( _copyOnSelect ) ? false : true ; <nl> + <nl> SetEndSelectionPosition ( position ) ; <nl> <nl> _multiClickSelectionMode = SelectionExpansionMode : : Cell ; <nl> void Terminal : : SetEndSelectionPosition ( const COORD position ) <nl> / / copy value of ViewStartIndex to support scrolling <nl> / / and update on new buffer output ( used in _GetSelectionRects ( ) ) <nl> _endSelectionPosition_YOffset = gsl : : narrow < SHORT > ( _ViewStartIndex ( ) ) ; <nl> + <nl> + if ( _copyOnSelect & & ! _isSingleCellSelection ( ) ) <nl> + { <nl> + _allowSingleCharSelection = true ; <nl> + } <nl> } <nl> <nl> / / Method Description : <nl> void Terminal : : SetBoxSelection ( const bool isEnabled ) noexcept <nl> void Terminal : : ClearSelection ( ) <nl> { <nl> _selectionActive = false ; <nl> + _allowSingleCharSelection = false ; <nl> _selectionAnchor = { 0 , 0 } ; <nl> _endSelectionPosition = { 0 , 0 } ; <nl> _selectionAnchor_YOffset = 0 ; <nl> mmm a / src / cascadia / TerminalSettings / ICoreSettings . idl <nl> ppp b / src / cascadia / TerminalSettings / ICoreSettings . idl <nl> namespace Microsoft . Terminal . Settings <nl> CursorStyle CursorShape ; <nl> UInt32 CursorHeight ; <nl> String WordDelimiters ; <nl> + Boolean CopyOnSelect ; <nl> } ; <nl> <nl> } <nl> mmm a / src / cascadia / TerminalSettings / TerminalSettings . cpp <nl> ppp b / src / cascadia / TerminalSettings / TerminalSettings . cpp <nl> namespace winrt : : Microsoft : : Terminal : : Settings : : implementation <nl> _cursorShape { CursorStyle : : Vintage } , <nl> _cursorHeight { DEFAULT_CURSOR_HEIGHT } , <nl> _wordDelimiters { DEFAULT_WORD_DELIMITERS } , <nl> + _copyOnSelect { false } , <nl> _useAcrylic { false } , <nl> _closeOnExit { true } , <nl> _tintOpacity { 0 . 5 } , <nl> namespace winrt : : Microsoft : : Terminal : : Settings : : implementation <nl> _wordDelimiters = value ; <nl> } <nl> <nl> + bool TerminalSettings : : CopyOnSelect ( ) <nl> + { <nl> + return _copyOnSelect ; <nl> + } <nl> + <nl> + void TerminalSettings : : CopyOnSelect ( bool value ) <nl> + { <nl> + _copyOnSelect = value ; <nl> + } <nl> + <nl> bool TerminalSettings : : UseAcrylic ( ) <nl> { <nl> return _useAcrylic ; <nl> mmm a / src / cascadia / TerminalSettings / terminalsettings . h <nl> ppp b / src / cascadia / TerminalSettings / terminalsettings . h <nl> namespace winrt : : Microsoft : : Terminal : : Settings : : implementation <nl> void CursorHeight ( uint32_t value ) ; <nl> hstring WordDelimiters ( ) ; <nl> void WordDelimiters ( hstring const & value ) ; <nl> + bool CopyOnSelect ( ) ; <nl> + void CopyOnSelect ( bool value ) ; <nl> / / mmmmmmmmmmmmmmmmmmmmmmmm End of Core Settings mmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> bool UseAcrylic ( ) ; <nl> namespace winrt : : Microsoft : : Terminal : : Settings : : implementation <nl> winrt : : Windows : : UI : : Xaml : : Media : : Stretch _backgroundImageStretchMode ; <nl> winrt : : Windows : : UI : : Xaml : : HorizontalAlignment _backgroundImageHorizontalAlignment ; <nl> winrt : : Windows : : UI : : Xaml : : VerticalAlignment _backgroundImageVerticalAlignment ; <nl> + bool _copyOnSelect ; <nl> hstring _commandline ; <nl> hstring _startingDir ; <nl> hstring _startingTitle ; <nl> mmm a / src / cascadia / UnitTests_TerminalCore / MockTermSettings . h <nl> ppp b / src / cascadia / UnitTests_TerminalCore / MockTermSettings . h <nl> namespace TerminalCoreUnitTests <nl> CursorStyle CursorShape ( ) const noexcept { return CursorStyle : : Vintage ; } <nl> uint32_t CursorHeight ( ) { return 42UL ; } <nl> winrt : : hstring WordDelimiters ( ) { return winrt : : to_hstring ( DEFAULT_WORD_DELIMITERS . c_str ( ) ) ; } <nl> + bool CopyOnSelect ( ) { return _copyOnSelect ; } <nl> <nl> / / other implemented methods <nl> uint32_t GetColorTableEntry ( int32_t ) const { return 123 ; } <nl> namespace TerminalCoreUnitTests <nl> void CursorShape ( CursorStyle const & ) noexcept { } <nl> void CursorHeight ( uint32_t ) { } <nl> void WordDelimiters ( winrt : : hstring ) { } <nl> + void CopyOnSelect ( bool copyOnSelect ) { _copyOnSelect = copyOnSelect ; } <nl> <nl> / / other unimplemented methods <nl> void SetColorTableEntry ( int32_t / * index * / , uint32_t / * value * / ) { } <nl> namespace TerminalCoreUnitTests <nl> int32_t _historySize ; <nl> int32_t _initialRows ; <nl> int32_t _initialCols ; <nl> + bool _copyOnSelect { false } ; <nl> } ; <nl> } <nl> mmm a / src / cascadia / UnitTests_TerminalCore / SelectionTest . cpp <nl> ppp b / src / cascadia / UnitTests_TerminalCore / SelectionTest . cpp <nl> namespace TerminalCoreUnitTests <nl> selection = term . GetViewport ( ) . ConvertToOrigin ( selectionRects . at ( 1 ) ) . ToInclusive ( ) ; <nl> VERIFY_ARE_EQUAL ( selection , SMALL_RECT ( { 0 , 11 , 99 , 11 } ) ) ; <nl> } <nl> + <nl> + TEST_METHOD ( CopyOnSelect ) <nl> + { <nl> + Terminal term ; <nl> + DummyRenderTarget emptyRT ; <nl> + term . Create ( { 100 , 100 } , 0 , emptyRT ) ; <nl> + <nl> + / / set copyOnSelect for terminal <nl> + auto settings = winrt : : make < MockTermSettings > ( 0 , 100 , 100 ) ; <nl> + settings . CopyOnSelect ( true ) ; <nl> + term . UpdateSettings ( settings ) ; <nl> + <nl> + / / Simulate click at ( x , y ) = ( 5 , 10 ) <nl> + term . SetSelectionAnchor ( { 5 , 10 } ) ; <nl> + <nl> + / / Simulate move to ( x , y ) = ( 5 , 10 ) <nl> + / / ( So , no movement ) <nl> + term . SetEndSelectionPosition ( { 5 , 10 } ) ; <nl> + <nl> + / / Case 1 : single cell selection not allowed <nl> + { <nl> + / / Simulate renderer calling TriggerSelection and acquiring selection area <nl> + auto selectionRects = term . GetSelectionRects ( ) ; <nl> + <nl> + / / Validate selection area <nl> + VERIFY_ARE_EQUAL ( selectionRects . size ( ) , static_cast < size_t > ( 0 ) ) ; <nl> + <nl> + / / single cell selection should not be allowed <nl> + / / thus , selection is NOT active <nl> + VERIFY_IS_FALSE ( term . IsSelectionActive ( ) ) ; <nl> + } <nl> + <nl> + / / Case 2 : move off of single cell <nl> + term . SetEndSelectionPosition ( { 6 , 10 } ) ; <nl> + { / / Simulate renderer calling TriggerSelection and acquiring selection area <nl> + auto selectionRects = term . GetSelectionRects ( ) ; <nl> + <nl> + / / Validate selection area <nl> + VERIFY_ARE_EQUAL ( selectionRects . size ( ) , static_cast < size_t > ( 1 ) ) ; <nl> + auto selection = term . GetViewport ( ) . ConvertToOrigin ( selectionRects . at ( 0 ) ) . ToInclusive ( ) ; <nl> + VERIFY_ARE_EQUAL ( selection , SMALL_RECT ( { 5 , 10 , 6 , 10 } ) ) ; <nl> + VERIFY_IS_TRUE ( term . IsSelectionActive ( ) ) ; <nl> + } <nl> + <nl> + / / Case 3 : move back onto single cell ( now allowed ) <nl> + term . SetEndSelectionPosition ( { 5 , 10 } ) ; <nl> + { / / Simulate renderer calling TriggerSelection and acquiring selection area <nl> + auto selectionRects = term . GetSelectionRects ( ) ; <nl> + <nl> + / / Validate selection area <nl> + VERIFY_ARE_EQUAL ( selectionRects . size ( ) , static_cast < size_t > ( 1 ) ) ; <nl> + auto selection = term . GetViewport ( ) . ConvertToOrigin ( selectionRects . at ( 0 ) ) . ToInclusive ( ) ; <nl> + VERIFY_ARE_EQUAL ( selection , SMALL_RECT ( { 5 , 10 , 5 , 10 } ) ) ; <nl> + <nl> + / / single cell selection should now be allowed <nl> + VERIFY_IS_TRUE ( term . IsSelectionActive ( ) ) ; <nl> + } <nl> + } <nl> } ; <nl> } <nl> | Added CopyOnSelect as a Global Setting ( ) | microsoft/terminal | ff87190823ea699a7eee4dafda34358ee0caffa4 | 2019-08-20T16:42:17Z |
new file mode 100644 <nl> index 000000000 . . 0a45f9bd5 <nl> mmm / dev / null <nl> ppp b / CODE_OF_CONDUCT . md <nl> <nl> + # Code of Conduct <nl> + <nl> + Facebook has adopted a Code of Conduct that we expect project participants to adhere to . Please [ read the full text ] ( https : / / code . facebook . com / codeofconduct ) so that you can understand what actions will and will not be tolerated . <nl> mmm a / README . markdown <nl> ppp b / README . markdown <nl> Head on over to https : / / facebook . github . io / watchman / <nl> Watchman is made available under the terms of the Apache License 2 . 0 . See the <nl> LICENSE file that accompanies this distribution for the full text of the <nl> license . <nl> + <nl> + # # Contributing <nl> + <nl> + Please see the [ contributing guide ] ( https : / / facebook . github . io / watchman / contributing . html ) . <nl> mmm a / website / _docs / contributing . markdown <nl> ppp b / website / _docs / contributing . markdown <nl> If we ask you to fill out a CLA we ' ll direct you to [ our online CLA <nl> page ] ( https : / / code . facebook . com / cla ) where you can complete it <nl> easily . We use the same form as the Apache CLA so that friction is minimal . <nl> <nl> + Facebook Open Source provides a Code of Conduct statement for all <nl> + projects to follow , to promote a welcoming and safe open source community . <nl> + Please [ read the full text ] ( https : / / code . facebook . com / codeofconduct ) so that you can understand what actions will and will not be tolerated . <nl> + <nl> # # # Getting Started <nl> <nl> You need to be able to build watchman from source and run its test suite . <nl> | Add Code of Conduct | facebook/watchman | 2f9359ffb1a271f316ab335293a55ded1dac1359 | 2017-12-07T03:32:42Z |
mmm a / src / core / lib / security / transport / security_handshaker . c <nl> ppp b / src / core / lib / security / transport / security_handshaker . c <nl> static void on_peer_checked_inner ( grpc_exec_ctx * exec_ctx , <nl> / / Create zero - copy frame protector , if implemented . <nl> tsi_zero_copy_grpc_protector * zero_copy_protector = NULL ; <nl> tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector ( <nl> - h - > handshaker_result , NULL , & zero_copy_protector ) ; <nl> + exec_ctx , h - > handshaker_result , NULL , & zero_copy_protector ) ; <nl> if ( result ! = TSI_OK & & result ! = TSI_UNIMPLEMENTED ) { <nl> error = grpc_set_tsi_error_result ( <nl> GRPC_ERROR_CREATE_FROM_STATIC_STRING ( <nl> mmm a / src / core / tsi / fake_transport_security . c <nl> ppp b / src / core / tsi / fake_transport_security . c <nl> static tsi_result fake_handshaker_result_extract_peer ( <nl> } <nl> <nl> static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector ( <nl> - const tsi_handshaker_result * self , size_t * max_output_protected_frame_size , <nl> + void * exec_ctx , const tsi_handshaker_result * self , <nl> + size_t * max_output_protected_frame_size , <nl> tsi_zero_copy_grpc_protector * * protector ) { <nl> * protector = <nl> tsi_create_fake_zero_copy_grpc_protector ( max_output_protected_frame_size ) ; <nl> mmm a / src / core / tsi / transport_security . h <nl> ppp b / src / core / tsi / transport_security . h <nl> struct tsi_handshaker { <nl> } ; <nl> <nl> / * Base for tsi_handshaker_result implementations . <nl> - See transport_security_interface . h for documentation . * / <nl> + See transport_security_interface . h for documentation . <nl> + The exec_ctx parameter in create_zero_copy_grpc_protector is supposed to be <nl> + of type grpc_exec_ctx * , but we ' re using void * instead to avoid making the TSI <nl> + API depend on grpc . The create_zero_copy_grpc_protector ( ) method is only used <nl> + in grpc , where we do need the exec_ctx passed through , but the API still <nl> + needs to compile in other applications , where grpc_exec_ctx is not defined . <nl> + * / <nl> typedef struct { <nl> tsi_result ( * extract_peer ) ( const tsi_handshaker_result * self , tsi_peer * peer ) ; <nl> tsi_result ( * create_zero_copy_grpc_protector ) ( <nl> - const tsi_handshaker_result * self , <nl> + void * exec_ctx , const tsi_handshaker_result * self , <nl> size_t * max_output_protected_frame_size , <nl> tsi_zero_copy_grpc_protector * * protector ) ; <nl> tsi_result ( * create_frame_protector ) ( const tsi_handshaker_result * self , <nl> mmm a / src / core / tsi / transport_security_grpc . c <nl> ppp b / src / core / tsi / transport_security_grpc . c <nl> <nl> <nl> / * This method creates a tsi_zero_copy_grpc_protector object . * / <nl> tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector ( <nl> - const tsi_handshaker_result * self , size_t * max_output_protected_frame_size , <nl> + grpc_exec_ctx * exec_ctx , const tsi_handshaker_result * self , <nl> + size_t * max_output_protected_frame_size , <nl> tsi_zero_copy_grpc_protector * * protector ) { <nl> - if ( self = = NULL | | self - > vtable = = NULL | | protector = = NULL ) { <nl> + if ( exec_ctx = = NULL | | self = = NULL | | self - > vtable = = NULL | | <nl> + protector = = NULL ) { <nl> return TSI_INVALID_ARGUMENT ; <nl> } <nl> if ( self - > vtable - > create_zero_copy_grpc_protector = = NULL ) { <nl> return TSI_UNIMPLEMENTED ; <nl> } <nl> return self - > vtable - > create_zero_copy_grpc_protector ( <nl> - self , max_output_protected_frame_size , protector ) ; <nl> + exec_ctx , self , max_output_protected_frame_size , protector ) ; <nl> } <nl> <nl> / * mmm tsi_zero_copy_grpc_protector common implementation . mmm <nl> mmm a / src / core / tsi / transport_security_grpc . h <nl> ppp b / src / core / tsi / transport_security_grpc . h <nl> extern " C " { <nl> assuming there is no fatal error . <nl> The caller is responsible for destroying the protector . * / <nl> tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector ( <nl> - const tsi_handshaker_result * self , size_t * max_output_protected_frame_size , <nl> + grpc_exec_ctx * exec_ctx , const tsi_handshaker_result * self , <nl> + size_t * max_output_protected_frame_size , <nl> tsi_zero_copy_grpc_protector * * protector ) ; <nl> <nl> / * - - tsi_zero_copy_grpc_protector object - - * / <nl> | Pass exec_ctx in TSI zero_copy_protector create | grpc/grpc | f68978c41f13197171325d944f971db7d9d85a54 | 2017-09-14T16:18:24Z |
mmm a / emcc <nl> ppp b / emcc <nl> try : <nl> assert has_source_inputs or has_header_inputs , ' Must have source code or header inputs to use - c ' <nl> target = target_basename + ' . o ' <nl> final_suffix = ' o ' <nl> + final_ending = ( ' . ' + final_suffix ) if len ( final_suffix ) > 0 else ' ' <nl> <nl> # Find library files <nl> for lib in libs : <nl> try : <nl> sys . exit ( 1 ) <nl> <nl> def get_bitcode_file ( input_file ) : <nl> - if final_suffix = = ' o ' : <nl> + if final_suffix not in JS_CONTAINING_SUFFIXES : <nl> # no need for a temp file , just emit to the right place <nl> if len ( input_files ) = = 1 : <nl> # can just emit directly to the target <nl> - if specified_target : return specified_target <nl> - return unsuffixed ( input_file ) + ' . ' + final_suffix <nl> - return unsuffixed ( input_file ) + ' . o ' <nl> - return in_temp ( unsuffixed ( uniquename ( input_file ) ) + ' . o ' ) <nl> + if specified_target : <nl> + if specified_target . endswith ( ' / ' ) or specified_target . endswith ( ' \ \ ' ) or os . path . isdir ( specified_target ) : <nl> + return os . path . join ( specified_target , os . path . basename ( unsuffixed ( input_file ) ) ) + default_object_extension <nl> + return specified_target <nl> + return unsuffixed ( input_file ) + final_ending <nl> + return unsuffixed ( input_file ) + default_object_extension <nl> + return in_temp ( unsuffixed ( uniquename ( input_file ) ) + default_object_extension ) <nl> <nl> # First , generate LLVM bitcode . For each input file , we get base . o with bitcode <nl> for input_file in input_files : <nl> try : <nl> if final_suffix not in JS_CONTAINING_SUFFIXES : <nl> if not specified_target : <nl> for input_file in input_files : <nl> - safe_move ( get_bitcode_file ( input_file ) , unsuffixed_basename ( input_file ) + ' . ' + final_suffix ) <nl> + safe_move ( get_bitcode_file ( input_file ) , unsuffixed_basename ( input_file ) + final_ending ) <nl> else : <nl> if len ( input_files ) = = 1 : <nl> temp_output_base = unsuffixed ( get_bitcode_file ( input_files [ 0 ] ) ) <nl> - if specified_target . endswith ( ' / ' ) or specified_target . endswith ( ' \ \ ' ) or os . path . isdir ( specified_target ) : # User passed ' - o < directory ' as the location to output to . <nl> - obj_output_name = os . path . join ( specified_target , os . path . splitext ( os . path . basename ( input_file ) ) [ 0 ] + default_object_extension ) <nl> - logging . debug ( ' User specified - o < directoryname > as the location of the output . Generating output file ' + obj_output_name ) <nl> - try : <nl> - safe_move ( temp_output_base + ' . o ' , obj_output_name ) <nl> - except IOError , e : <nl> - logging . error ( ' Could not write to output file ' + obj_output_name + ' . Perhaps the output directory does not exist ? ' ) <nl> - exit ( 1 ) <nl> - else : # User passed ' - o < filename > ' as the location to output to . <nl> - safe_move ( temp_output_base + ' . o ' , specified_target ) <nl> + if not specified_target : <nl> + safe_move ( get_bitcode_file ( input_file ) , unsuffixed_basename ( input_file ) + final_ending ) <nl> if os . path . exists ( temp_output_base + ' . d ' ) : <nl> # There was a . d file generated , from - MD or - MMD and friends , save a copy of it to where the output resides , <nl> # adjusting the target name away from the temporary file name to the specified target . <nl> # It will be deleted with the rest of the temporary directory . <nl> deps = open ( temp_output_base + ' . d ' ) . read ( ) <nl> - deps = deps . replace ( temp_output_base + ' . o ' , specified_target ) <nl> + deps = deps . replace ( temp_output_base + default_object_ext , specified_target ) <nl> with open ( os . path . join ( os . path . dirname ( specified_target ) , os . path . basename ( unsuffixed ( input_files [ 0 ] ) + ' . d ' ) ) , " w " ) as out_dep : <nl> out_dep . write ( deps ) <nl> else : <nl> mmm a / tests / test_other . py <nl> ppp b / tests / test_other . py <nl> def test_emcc ( self ) : <nl> # emcc src . cpp - c and emcc src . cpp - o src . [ o | bc ] = = > should give a . bc file <nl> # regression check : - o js should create " js " , with bitcode content <nl> for args in [ [ ' - c ' ] , [ ' - o ' , ' src . o ' ] , [ ' - o ' , ' src . bc ' ] , [ ' - o ' , ' src . so ' ] , [ ' - o ' , ' js ' ] ] : <nl> + print ' - c stuff ' , args <nl> target = args [ 1 ] if len ( args ) = = 2 else ' hello_world . o ' <nl> self . clear ( ) <nl> Popen ( [ PYTHON , compiler , path_from_root ( ' tests ' , ' hello_world ' + suffix ) ] + args , stdout = PIPE , stderr = PIPE ) . communicate ( ) <nl> def measure_funcs ( filename ) : <nl> args = [ ' - I ' + path_from_root ( ' tests ' , ' zlib ' ) ] , suffix = ' c ' ) <nl> <nl> def test_symlink ( self ) : <nl> + self . clear ( ) <nl> if os . name = = ' nt ' : <nl> return self . skip ( ' Windows FS does not need to be tested for symlinks support , since it does not have them . ' ) <nl> open ( os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) , ' w ' ) . write ( ' int main ( ) { return 0 ; } ' ) <nl> os . symlink ( os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) , os . path . join ( self . get_dir ( ) , ' foobar . c ' ) ) <nl> - Popen ( [ PYTHON , EMCC , os . path . join ( self . get_dir ( ) , ' foobar . c ' ) , ' - o ' , os . path . join ( self . get_dir ( ) , ' foobar ' ) ] , stdout = PIPE , stderr = PIPE ) . communicate ( ) <nl> + Popen ( [ PYTHON , EMCC , os . path . join ( self . get_dir ( ) , ' foobar . c ' ) , ' - o ' , os . path . join ( self . get_dir ( ) , ' foobar ' ) ] ) . communicate ( ) <nl> assert os . path . exists ( os . path . join ( self . get_dir ( ) , ' foobar ' ) ) <nl> try_delete ( os . path . join ( self . get_dir ( ) , ' foobar ' ) ) <nl> try_delete ( os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) ) <nl> def test_symlink ( self ) : <nl> <nl> open ( os . path . join ( self . get_dir ( ) , ' foobar . c ' ) , ' w ' ) . write ( ' int main ( ) { return 0 ; } ' ) <nl> os . symlink ( os . path . join ( self . get_dir ( ) , ' foobar . c ' ) , os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) ) <nl> - Popen ( [ PYTHON , EMCC , os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) , ' - o ' , os . path . join ( self . get_dir ( ) , ' foobar ' ) ] , stdout = PIPE , stderr = PIPE ) . communicate ( ) <nl> + Popen ( [ PYTHON , EMCC , os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) , ' - o ' , os . path . join ( self . get_dir ( ) , ' foobar ' ) ] ) . communicate ( ) <nl> assert os . path . exists ( os . path . join ( self . get_dir ( ) , ' foobar ' ) ) <nl> try_delete ( os . path . join ( self . get_dir ( ) , ' foobar ' ) ) <nl> try_delete ( os . path . join ( self . get_dir ( ) , ' foobar . xxx ' ) ) <nl> def test_float_h ( self ) : <nl> <nl> def test_default_obj_ext ( self ) : <nl> outdir = os . path . join ( self . get_dir ( ) , ' out_dir ' ) + ' / ' <nl> + <nl> + self . clear ( ) <nl> os . mkdir ( outdir ) <nl> - process = Popen ( [ PYTHON , EMCC , ' - c ' , path_from_root ( ' tests ' , ' hello_world . c ' ) , ' - o ' , outdir ] , stdout = PIPE , stderr = PIPE ) <nl> + process = Popen ( [ PYTHON , EMCC , ' - c ' , path_from_root ( ' tests ' , ' hello_world . c ' ) , ' - o ' , outdir ] ) <nl> process . communicate ( ) <nl> assert ( os . path . isfile ( outdir + ' hello_world . o ' ) ) <nl> - process = Popen ( [ PYTHON , EMCC , ' - c ' , path_from_root ( ' tests ' , ' hello_world . c ' ) , ' - o ' , outdir , ' - - default - obj - ext ' , ' obj ' ] , stdout = PIPE , stderr = PIPE ) <nl> + <nl> + self . clear ( ) <nl> + os . mkdir ( outdir ) <nl> + process = Popen ( [ PYTHON , EMCC , ' - c ' , path_from_root ( ' tests ' , ' hello_world . c ' ) , ' - o ' , outdir , ' - - default - obj - ext ' , ' obj ' ] ) <nl> process . communicate ( ) <nl> assert ( os . path . isfile ( outdir + ' hello_world . obj ' ) ) <nl> <nl> | handle - - default - object - ext properly , and other emcc fixes | emscripten-core/emscripten | c4038a1fc9adba8c1a833717ec3145da98365c09 | 2014-03-26T17:43:07Z |
mmm a / test / ClangImporter / serialization - sil . swift <nl> ppp b / test / ClangImporter / serialization - sil . swift <nl> <nl> - <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - path % t / Test . swiftmodule - emit - sil - o / dev / null - module - name Test % s - sdk " " - import - objc - header % S / Inputs / serialization - sil . h - enable - sil - ownership <nl> + / / RUN : % target - swift - frontend - emit - module - path % t / Test . swiftmodule - emit - sil - o / dev / null - module - name Test % s - sdk " " - import - objc - header % S / Inputs / serialization - sil . h <nl> / / RUN : % target - sil - func - extractor % t / Test . swiftmodule - sil - print - debuginfo - func = ' $ s4Test16testPartialApplyyySoAA_pF ' - o - | % FileCheck % s <nl> <nl> / / REQUIRES : objc_interop <nl> mmm a / test / IRGen / keypath_witness_overrides . swift <nl> ppp b / test / IRGen / keypath_witness_overrides . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - module - name protocol_overrides - emit - module - enable - sil - ownership - enable - resilience - emit - module - path = % t / protocol_overrides . swiftmodule % S / . . / SILGen / Inputs / protocol_overrides . swift <nl> + / / RUN : % target - swift - frontend - module - name protocol_overrides - emit - module - enable - resilience - emit - module - path = % t / protocol_overrides . swiftmodule % S / . . / SILGen / Inputs / protocol_overrides . swift <nl> / / RUN : % target - swift - frontend - module - name keypath_witness_overrides - emit - ir % s - I % t | % FileCheck % s <nl> <nl> import protocol_overrides <nl> mmm a / test / IRGen / outlined_copy_addr . swift <nl> ppp b / test / IRGen / outlined_copy_addr . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership - module - name outcopyaddr - primary - file % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - ir - module - name outcopyaddr - primary - file % s | % FileCheck % s <nl> <nl> public protocol BaseProt { <nl> } <nl> mmm a / test / Profiler / instrprof_basic . swift <nl> ppp b / test / Profiler / instrprof_basic . swift <nl> <nl> - / / RUN : % target - swift - frontend - parse - as - library - enable - sil - ownership - emit - silgen - profile - generate % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - parse - as - library - emit - silgen - profile - generate % s | % FileCheck % s <nl> <nl> / / CHECK : sil hidden [ ossa ] @ [ [ F_EMPTY : . * empty . * ] ] : <nl> / / CHECK : % [ [ NAME : . * ] ] = string_literal utf8 " { { . * } } instrprof_basic . swift : [ [ F_EMPTY ] ] " <nl> mmm a / test / Profiler / instrprof_operators . swift <nl> ppp b / test / Profiler / instrprof_operators . swift <nl> <nl> - / / RUN : % target - swift - frontend - parse - as - library - emit - silgen - enable - sil - ownership - profile - generate % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - parse - as - library - emit - silgen - profile - generate % s | % FileCheck % s <nl> <nl> / / CHECK : sil hidden [ ossa ] @ [ [ F_OPERATORS : . * operators . * ] ] : <nl> / / CHECK : % [ [ NAME : . * ] ] = string_literal utf8 " { { . * } } instrprof_operators . swift : [ [ F_OPERATORS ] ] " <nl> mmm a / test / Profiler / pgo_switchenum . swift <nl> ppp b / test / Profiler / pgo_switchenum . swift <nl> <nl> <nl> / / RUN : % llvm - profdata merge % t / default . profraw - o % t / default . profdata <nl> / / need to move counts attached to expr for this <nl> - / / RUN : % target - swift - frontend % s - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - enable - sil - ownership - emit - sorted - sil - emit - sil - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = SIL <nl> + / / RUN : % target - swift - frontend % s - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - emit - sorted - sil - emit - sil - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = SIL <nl> / / need to lower switch_enum ( addr ) into IR for this <nl> - / / % target - swift - frontend % s - enable - sil - ownership - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - emit - ir - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = IR <nl> + / / % target - swift - frontend % s - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - emit - ir - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = IR <nl> / / need to check Opt support <nl> - / / % target - swift - frontend % s - enable - sil - ownership - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - O - emit - sorted - sil - emit - sil - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = SIL - OPT <nl> + / / % target - swift - frontend % s - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - O - emit - sorted - sil - emit - sil - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = SIL - OPT <nl> / / need to lower switch_enum ( addr ) into IR for this <nl> - / / % target - swift - frontend - enable - sil - ownership % s - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - O - emit - ir - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = IR - OPT <nl> + / / % target - swift - frontend % s - Xllvm - sil - full - demangle - profile - use = % t / default . profdata - O - emit - ir - module - name pgo_switchenum - o - | % FileCheck % s - - check - prefix = IR - OPT <nl> <nl> / / REQUIRES : profile_runtime <nl> / / REQUIRES : executable_test <nl> mmm a / test / SILGen / apply_abstraction_nested . swift <nl> ppp b / test / SILGen / apply_abstraction_nested . swift <nl> <nl> - <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - emit - silgen % s | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - emit - silgen % s | % FileCheck % s <nl> <nl> infix operator ~ > <nl> <nl> mmm a / test / SILGen / codable / struct_codable_member_type_lookup . swift <nl> ppp b / test / SILGen / codable / struct_codable_member_type_lookup . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - silgen - enable - sil - ownership % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - silgen % s | % FileCheck % s <nl> <nl> / / Make sure we have an int , not a float . <nl> / / <nl> mmm a / test / SILGen / global_init_attribute . swift <nl> ppp b / test / SILGen / global_init_attribute . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - Xllvm - sil - full - demangle - emit - module - o % t % S / Inputs / def_global . swift - enable - sil - ownership <nl> + / / RUN : % target - swift - frontend - Xllvm - sil - full - demangle - emit - module - o % t % S / Inputs / def_global . swift <nl> / / RUN : % target - swift - emit - silgen - Xllvm - sil - full - demangle - parse - as - library - I % t % s | % FileCheck % s <nl> / / <nl> / / Test that SILGen uses the " global_init " attribute for all global <nl> mmm a / test / SILGen / global_resilience . swift <nl> ppp b / test / SILGen / global_resilience . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - enable - sil - ownership - enable - resilience - emit - module - path = % t / resilient_global . swiftmodule - module - name = resilient_global % S / . . / Inputs / resilient_global . swift <nl> - / / RUN : % target - swift - emit - silgen - I % t - enable - resilience - enable - sil - ownership - parse - as - library % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module - enable - resilience - emit - module - path = % t / resilient_global . swiftmodule - module - name = resilient_global % S / . . / Inputs / resilient_global . swift <nl> + / / RUN : % target - swift - emit - silgen - I % t - enable - resilience - parse - as - library % s | % FileCheck % s <nl> / / RUN : % target - swift - emit - sil - I % t - O - enable - resilience - parse - as - library % s | % FileCheck - - check - prefix = CHECK - OPT % s <nl> <nl> import resilient_global <nl> mmm a / test / SILGen / keypath_covariant_override . swift <nl> ppp b / test / SILGen / keypath_covariant_override . swift <nl> <nl> / / RUN : % target - swift - emit - silgen - enable - sil - ownership % s | % FileCheck % s <nl> / / RUN : % target - swift - emit - silgen - enable - sil - ownership - enable - resilience % s | % FileCheck % s <nl> <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership % s <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership - enable - resilience % s <nl> + / / RUN : % target - swift - frontend - emit - ir % s <nl> + / / RUN : % target - swift - frontend - emit - ir - enable - resilience % s <nl> <nl> public class C : Hashable { <nl> public static func = = ( lhs : C , rhs : C ) - > Bool { return lhs = = = rhs } <nl> mmm a / test / SILGen / keypath_witness_overrides . swift <nl> ppp b / test / SILGen / keypath_witness_overrides . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - module - name protocol_overrides - emit - module - enable - sil - ownership - enable - resilience - emit - module - path = % t / protocol_overrides . swiftmodule % S / Inputs / protocol_overrides . swift <nl> + / / RUN : % target - swift - frontend - module - name protocol_overrides - emit - module - enable - resilience - emit - module - path = % t / protocol_overrides . swiftmodule % S / Inputs / protocol_overrides . swift <nl> / / RUN : % target - swift - emit - silgen % s - I % t | % FileCheck % s <nl> <nl> / / Check that keypath formation properly records the point at which the witness <nl> mmm a / test / SILGen / keypaths_inlinable . swift <nl> ppp b / test / SILGen / keypaths_inlinable . swift <nl> <nl> / / RUN : % target - swift - emit - silgen % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = FRAGILE <nl> / / RUN : % target - swift - emit - silgen - enable - resilience % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = RESILIENT <nl> <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership % s <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership - enable - resilience % s <nl> + / / RUN : % target - swift - frontend - emit - ir % s <nl> + / / RUN : % target - swift - frontend - emit - ir - enable - resilience % s <nl> <nl> public struct KeypathStruct { <nl> public var stored : Int = 0 <nl> mmm a / test / SILGen / mangling . swift <nl> ppp b / test / SILGen / mangling . swift <nl> <nl> - <nl> - / / RUN : % target - swift - frontend - module - name mangling - Xllvm - sil - full - demangle - sdk % S / Inputs - I % S / Inputs - enable - source - import % s - emit - silgen - enable - sil - ownership | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name mangling - Xllvm - sil - full - demangle - sdk % S / Inputs - I % S / Inputs - enable - source - import % s - emit - silgen | % FileCheck % s <nl> <nl> / / REQUIRES : objc_interop <nl> <nl> mmm a / test / SILGen / mangling_ext_structA . swift <nl> ppp b / test / SILGen / mangling_ext_structA . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - enable - sil - ownership - o % t % S / Inputs / def_structA . swift <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - module - name ext_structA - I % t % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module - o % t % S / Inputs / def_structA . swift <nl> + / / RUN : % target - swift - emit - silgen - module - name ext_structA - I % t % s | % FileCheck % s <nl> <nl> / / Ensure that members of extensions of types from another module are mangled <nl> / / correctly . <nl> mmm a / test / SILGen / mangling_private . swift <nl> ppp b / test / SILGen / mangling_private . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - emit - module - o % t % S / Inputs / mangling_private_helper . swift <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership % S / Inputs / mangling_private_helper . swift | % FileCheck % s - check - prefix = CHECK - BASE <nl> + / / RUN : % target - swift - frontend - emit - module - o % t % S / Inputs / mangling_private_helper . swift <nl> + / / RUN : % target - swift - emit - silgen % S / Inputs / mangling_private_helper . swift | % FileCheck % s - check - prefix = CHECK - BASE <nl> <nl> - / / RUN : % target - swift - emit - silgen % s - I % t - enable - sil - ownership | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen % s - I % t | % FileCheck % s <nl> <nl> / / RUN : cp % s % t <nl> - / / RUN : % target - swift - emit - silgen % t / mangling_private . swift - I % t - enable - sil - ownership | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen % t / mangling_private . swift - I % t | % FileCheck % s <nl> <nl> / / RUN : cp % s % t / other_name . swift <nl> - / / RUN : % target - swift - emit - silgen % t / other_name . swift - I % t - enable - sil - ownership - module - name mangling_private | % FileCheck % s - check - prefix = OTHER - NAME <nl> + / / RUN : % target - swift - emit - silgen % t / other_name . swift - I % t - module - name mangling_private | % FileCheck % s - check - prefix = OTHER - NAME <nl> <nl> import mangling_private_helper <nl> <nl> mmm a / test / SILGen / mangling_retroactive . swift <nl> ppp b / test / SILGen / mangling_retroactive . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - enable - sil - ownership - o % t % S / Inputs / RetroactiveA . swift <nl> - / / RUN : % target - swift - frontend - emit - module - enable - sil - ownership - o % t % S / Inputs / RetroactiveB . swift <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module - o % t % S / Inputs / RetroactiveA . swift <nl> + / / RUN : % target - swift - frontend - emit - module - o % t % S / Inputs / RetroactiveB . swift <nl> + / / RUN : % target - swift - emit - silgen - I % t % s | % FileCheck % s <nl> <nl> <nl> import RetroactiveA <nl> mmm a / test / SILGen / nsmanaged - witness - multi . swift <nl> ppp b / test / SILGen / nsmanaged - witness - multi . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> <nl> - / / RUN : % target - swift - frontend - module - name main - emit - silgen - enable - sil - ownership - sdk % S / Inputs - primary - file % s % S / Inputs / nsmanaged - witness - multi - other . swift - I % S / Inputs - I % t - enable - source - import | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name main - emit - silgen - sdk % S / Inputs - primary - file % s % S / Inputs / nsmanaged - witness - multi - other . swift - I % S / Inputs - I % t - enable - source - import | % FileCheck % s <nl> <nl> - / / RUN : % target - swift - frontend - module - name main - emit - silgen - enable - sil - ownership - sdk % S / Inputs - primary - file % s - primary - file % S / Inputs / nsmanaged - witness - multi - other . swift - I % S / Inputs - I % t - enable - source - import | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name main - emit - silgen - sdk % S / Inputs - primary - file % s - primary - file % S / Inputs / nsmanaged - witness - multi - other . swift - I % S / Inputs - I % t - enable - source - import | % FileCheck % s <nl> <nl> - / / RUN : % target - swift - frontend - module - name main - emit - silgen - enable - sil - ownership - sdk % S / Inputs % s % S / Inputs / nsmanaged - witness - multi - other . swift - I % S / Inputs - I % t - enable - source - import | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name main - emit - silgen - sdk % S / Inputs % s % S / Inputs / nsmanaged - witness - multi - other . swift - I % S / Inputs - I % t - enable - source - import | % FileCheck % s <nl> <nl> / / REQUIRES : objc_interop <nl> import Foundation <nl> mmm a / test / SILGen / objc_required_designated_init . swift <nl> ppp b / test / SILGen / objc_required_designated_init . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - enable - sil - ownership % S / Inputs / objc_required_designated_init_2 . swift - module - name Booms - o % t / Booms . swiftmodule - import - objc - header % S / Inputs / objc_required_designated_init . h <nl> - / / RUN : % target - swift - emit - silgen - I % t - enable - sil - ownership - verify % s - import - objc - header % S / Inputs / objc_required_designated_init . h | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module % S / Inputs / objc_required_designated_init_2 . swift - module - name Booms - o % t / Booms . swiftmodule - import - objc - header % S / Inputs / objc_required_designated_init . h <nl> + / / RUN : % target - swift - emit - silgen - I % t - verify % s - import - objc - header % S / Inputs / objc_required_designated_init . h | % FileCheck % s <nl> / / RUN : % target - swift - emit - ir - I % t % s - import - objc - header % S / Inputs / objc_required_designated_init . h <nl> <nl> / / REQUIRES : objc_interop <nl> mmm a / test / SILGen / private_import . swift <nl> ppp b / test / SILGen / private_import . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> <nl> - / / RUN : % target - swift - frontend - module - name Mod - emit - module - enable - private - imports - enable - sil - ownership - swift - version 5 - o % t % S / Inputs / private_import_module . swift <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t - primary - file % s % S / private_import_other . swift - module - name main - swift - version 5 | % FileCheck % s <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % s % S / private_import_other . swift - module - name main - swift - version 5 | % FileCheck % s <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % S / private_import_other . swift % s - module - name main - swift - version 5 | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name Mod - emit - module - enable - private - imports - swift - version 5 - o % t % S / Inputs / private_import_module . swift <nl> + / / RUN : % target - swift - emit - silgen - I % t - primary - file % s % S / private_import_other . swift - module - name main - swift - version 5 | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t % s % S / private_import_other . swift - module - name main - swift - version 5 | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t % S / private_import_other . swift % s - module - name main - swift - version 5 | % FileCheck % s <nl> / / RUN : % target - swift - emit - ir - enable - sil - ownership - I % t - primary - file % s % S / private_import_other . swift - module - name main - o / dev / null <nl> / / RUN : % target - swift - emit - ir - enable - sil - ownership - I % t - O - primary - file % s % S / private_import_other . swift - module - name main - o / dev / null <nl> <nl> mmm a / test / SILGen / private_import_other . swift <nl> ppp b / test / SILGen / private_import_other . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> <nl> - / / RUN : % target - swift - frontend - module - name Mod - emit - module - enable - private - imports - enable - sil - ownership - swift - version 5 - o % t % S / Inputs / private_import_module . swift <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t - primary - file % s % S / private_import . swift - module - name main - swift - version 5 | % FileCheck % s <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % s % S / private_import . swift - module - name main - swift - version 5 | % FileCheck % s <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % S / private_import . swift % s - module - name main - swift - version 5 | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name Mod - emit - module - enable - private - imports - swift - version 5 - o % t % S / Inputs / private_import_module . swift <nl> + / / RUN : % target - swift - emit - silgen - I % t - primary - file % s % S / private_import . swift - module - name main - swift - version 5 | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t % s % S / private_import . swift - module - name main - swift - version 5 | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t % S / private_import . swift % s - module - name main - swift - version 5 | % FileCheck % s <nl> / / RUN : % target - swift - emit - ir - enable - sil - ownership - I % t - primary - file % s % S / private_import . swift - module - name main - o / dev / null <nl> / / RUN : % target - swift - emit - ir - enable - sil - ownership - I % t - O - primary - file % s % S / private_import . swift - module - name main - o / dev / null <nl> <nl> mmm a / test / SILGen / protocol_resilience . swift <nl> ppp b / test / SILGen / protocol_resilience . swift <nl> <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - module - name protocol_resilience - emit - module - enable - sil - ownership - enable - resilience - emit - module - path = % t / resilient_protocol . swiftmodule - module - name = resilient_protocol % S / . . / Inputs / resilient_protocol . swift <nl> + / / RUN : % target - swift - frontend - module - name protocol_resilience - emit - module - enable - resilience - emit - module - path = % t / resilient_protocol . swiftmodule - module - name = resilient_protocol % S / . . / Inputs / resilient_protocol . swift <nl> / / RUN : % target - swift - emit - silgen - module - name protocol_resilience - I % t - enable - sil - ownership - enable - resilience % s | % FileCheck % s <nl> <nl> import resilient_protocol <nl> mmm a / test / SILGen / protocol_with_superclass . swift <nl> ppp b / test / SILGen / protocol_with_superclass . swift <nl> <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership % s | % FileCheck % s <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership % s <nl> + / / RUN : % target - swift - emit - silgen % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - ir % s <nl> <nl> / / Protocols with superclass - constrained Self . <nl> <nl> mmm a / test / SILGen / protocol_with_superclass_where_clause . swift <nl> ppp b / test / SILGen / protocol_with_superclass_where_clause . swift <nl> <nl> / / RUN : % target - swift - emit - silgen - enable - sil - ownership - module - name protocol_with_superclass % s | % FileCheck % s <nl> - / / RUN : % target - swift - frontend - emit - ir - enable - sil - ownership % s <nl> + / / RUN : % target - swift - frontend - emit - ir % s <nl> <nl> / / Protocols with superclass - constrained Self , written using a ' where ' clause . <nl> <nl> mmm a / test / SILGen / struct_resilience . swift <nl> ppp b / test / SILGen / struct_resilience . swift <nl> <nl> - <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - enable - resilience - emit - module - path = % t / resilient_struct . swiftmodule - enable - sil - ownership % S / . . / Inputs / resilient_struct . swift <nl> - / / RUN : % target - swift - emit - silgen - I % t - enable - sil - ownership - enable - resilience % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module - enable - resilience - emit - module - path = % t / resilient_struct . swiftmodule % S / . . / Inputs / resilient_struct . swift <nl> + / / RUN : % target - swift - emit - silgen - I % t - enable - resilience % s | % FileCheck % s <nl> <nl> import resilient_struct <nl> <nl> mmm a / test / SILGen / struct_resilience_testable . swift <nl> ppp b / test / SILGen / struct_resilience_testable . swift <nl> <nl> - <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module - enable - resilience - enable - testing - emit - module - path = % t / resilient_struct . swiftmodule - enable - sil - ownership % S / . . / Inputs / resilient_struct . swift <nl> - / / RUN : % target - swift - emit - silgen - I % t - enable - sil - ownership % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module - enable - resilience - enable - testing - emit - module - path = % t / resilient_struct . swiftmodule % S / . . / Inputs / resilient_struct . swift <nl> + / / RUN : % target - swift - emit - silgen - I % t % s | % FileCheck % s <nl> <nl> @ testable import resilient_struct <nl> <nl> mmm a / test / SILGen / subscript_accessor . swift <nl> ppp b / test / SILGen / subscript_accessor . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - O - emit - sil - primary - file % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - O - emit - sil - primary - file % s | % FileCheck % s <nl> <nl> / / CHECK - LABEL : sil hidden [ transparent ] @ $ s18subscript_accessor1XVxSgyciM <nl> / / CHECK : [ [ SETTER : % . * ] ] = function_ref @ $ s18subscript_accessor1XVxSgycis <nl> mmm a / test / SILGen / testable - multifile . swift <nl> ppp b / test / SILGen / testable - multifile . swift <nl> <nl> / / This test is paired with testable - multifile - other . swift . <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - emit - module % S / Inputs / TestableMultifileHelper . swift - enable - testing - enable - sil - ownership - o % t <nl> + / / RUN : % target - swift - frontend - emit - module % S / Inputs / TestableMultifileHelper . swift - enable - testing - o % t <nl> <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % s % S / testable - multifile - other . swift - module - name main | % FileCheck % s <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % S / testable - multifile - other . swift % s - module - name main | % FileCheck % s <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t - primary - file % s % S / testable - multifile - other . swift - module - name main | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t % s % S / testable - multifile - other . swift - module - name main | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t % S / testable - multifile - other . swift % s - module - name main | % FileCheck % s <nl> + / / RUN : % target - swift - emit - silgen - I % t - primary - file % s % S / testable - multifile - other . swift - module - name main | % FileCheck % s <nl> <nl> / / Just make sure we don ' t crash later on . <nl> / / RUN : % target - swift - emit - ir - enable - sil - ownership - I % t - primary - file % s % S / testable - multifile - other . swift - module - name main - o / dev / null <nl> mmm a / test / SILGen / witness_tables_serialized_import . swift <nl> ppp b / test / SILGen / witness_tables_serialized_import . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - emit - module % S / witness_tables_serialized . swift - o % t - enable - resilience <nl> - / / RUN : % target - swift - emit - silgen - enable - sil - ownership - I % t % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - module % S / witness_tables_serialized . swift - o % t - enable - resilience <nl> + / / RUN : % target - swift - emit - silgen - I % t % s | % FileCheck % s <nl> <nl> import witness_tables_serialized <nl> <nl> mmm a / test / SILOptimizer / access_enforcement_noescape . swift <nl> ppp b / test / SILOptimizer / access_enforcement_noescape . swift <nl> <nl> - / / RUN : % target - swift - frontend - module - name access_enforcement_noescape - enable - sil - ownership - enforce - exclusivity = checked - Onone - emit - sil - swift - version 4 - parse - as - library % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name access_enforcement_noescape - enforce - exclusivity = checked - Onone - emit - sil - swift - version 4 - parse - as - library % s | % FileCheck % s <nl> <nl> / / This tests SILGen and AccessEnforcementSelection as a single set of tests . <nl> / / ( Some static / dynamic enforcement selection is done in SILGen , and some is <nl> mmm a / test / SILOptimizer / access_enforcement_noescape_error . swift <nl> ppp b / test / SILOptimizer / access_enforcement_noescape_error . swift <nl> <nl> - / / RUN : % target - swift - frontend - module - name access_enforcement_noescape - enable - sil - ownership - enforce - exclusivity = checked - Onone - emit - sil - swift - version 4 - verify - parse - as - library % s <nl> + / / RUN : % target - swift - frontend - module - name access_enforcement_noescape - enforce - exclusivity = checked - Onone - emit - sil - swift - version 4 - verify - parse - as - library % s <nl> / / REQUIRES : asserts <nl> <nl> / / This is the subset of tests from access_enforcement_noescape . swift <nl> mmm a / test / SILOptimizer / access_enforcement_options . swift <nl> ppp b / test / SILOptimizer / access_enforcement_options . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - Onone - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = NONE <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - Osize - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = OPT <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - O - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = OPT <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - Ounchecked - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = UNCHECKED <nl> + / / RUN : % target - swift - frontend - Onone - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = NONE <nl> + / / RUN : % target - swift - frontend - Osize - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = OPT <nl> + / / RUN : % target - swift - frontend - O - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = OPT <nl> + / / RUN : % target - swift - frontend - Ounchecked - emit - sil - parse - as - library % s | % FileCheck % s - - check - prefix = CHECK - - check - prefix = UNCHECKED <nl> <nl> @ inline ( never ) <nl> func takesInoutAndEscaping ( _ : inout Int , _ f : @ escaping ( ) - > ( ) ) { <nl> mmm a / test / SILOptimizer / access_marker_mandatory . swift <nl> ppp b / test / SILOptimizer / access_marker_mandatory . swift <nl> <nl> - <nl> - / / RUN : % target - swift - frontend - module - name access_marker_mandatory - enable - sil - ownership - parse - as - library - Xllvm - sil - full - demangle - emit - sil - Onone - enforce - exclusivity = checked % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name access_marker_mandatory - parse - as - library - Xllvm - sil - full - demangle - emit - sil - Onone - enforce - exclusivity = checked % s | % FileCheck % s <nl> <nl> public struct S { <nl> var i : Int <nl> mmm a / test / SILOptimizer / access_marker_verify . swift <nl> ppp b / test / SILOptimizer / access_marker_verify . swift <nl> <nl> - <nl> - / / RUN : % target - swift - frontend - module - name access_marker_verify - enable - verify - exclusivity - enforce - exclusivity = checked - enable - sil - ownership - emit - silgen - swift - version 4 - parse - as - library % s | % FileCheck % s <nl> - / / RUN : % target - swift - frontend - module - name access_marker_verify - enable - verify - exclusivity - enforce - exclusivity = checked - enable - sil - ownership - Onone - emit - sil - swift - version 4 - parse - as - library % s - o / dev / null <nl> - / / RUN : % target - swift - frontend - module - name access_marker_verify - enable - verify - exclusivity - enforce - exclusivity = checked - enable - sil - ownership - O - emit - sil - swift - version 4 - parse - as - library % s - o / dev / null <nl> + / / RUN : % target - swift - frontend - module - name access_marker_verify - enable - verify - exclusivity - enforce - exclusivity = checked - emit - silgen - swift - version 4 - parse - as - library % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name access_marker_verify - enable - verify - exclusivity - enforce - exclusivity = checked - Onone - emit - sil - swift - version 4 - parse - as - library % s - o / dev / null <nl> + / / RUN : % target - swift - frontend - module - name access_marker_verify - enable - verify - exclusivity - enforce - exclusivity = checked - O - emit - sil - swift - version 4 - parse - as - library % s - o / dev / null <nl> / / REQUIRES : asserts <nl> <nl> / / Test the combination of SILGen + DiagnoseStaticExclusivity with verification . <nl> mmm a / test / SILOptimizer / access_marker_verify_objc . swift <nl> ppp b / test / SILOptimizer / access_marker_verify_objc . swift <nl> <nl> - / / RUN : % target - swift - frontend - enforce - exclusivity = checked - enable - sil - ownership - import - objc - header % S / Inputs / access_marker_verify_objc . h - Onone - emit - silgen - swift - version 4 - parse - as - library % s | % FileCheck % s <nl> - / / RUN : % target - swift - frontend - enable - verify - exclusivity - enforce - exclusivity = checked - enable - sil - ownership - import - objc - header % S / Inputs / access_marker_verify_objc . h - Onone - emit - sil - swift - version 4 - parse - as - library % s <nl> + / / RUN : % target - swift - frontend - enforce - exclusivity = checked - import - objc - header % S / Inputs / access_marker_verify_objc . h - Onone - emit - silgen - swift - version 4 - parse - as - library % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - enable - verify - exclusivity - enforce - exclusivity = checked - import - objc - header % S / Inputs / access_marker_verify_objc . h - Onone - emit - sil - swift - version 4 - parse - as - library % s <nl> / / REQUIRES : asserts <nl> / / REQUIRES : OS = macosx <nl> <nl> mmm a / test / SILOptimizer / allocbox_to_stack_not_crash_ownership . swift <nl> ppp b / test / SILOptimizer / allocbox_to_stack_not_crash_ownership . swift <nl> <nl> - / / RUN : % target - swift - frontend % s - emit - ir - verify - enable - sil - ownership <nl> + / / RUN : % target - swift - frontend % s - emit - ir - verify <nl> <nl> / / Verify we don ' t crash on this . <nl> / / rdar : / / 15595118 <nl> mmm a / test / SILOptimizer / bridged_casts_folding . sil <nl> ppp b / test / SILOptimizer / bridged_casts_folding . sil <nl> <nl> - / / RUN : % target - swift - frontend - module - name bridged_casts_folding - O - enable - sil - ownership - emit - sil % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name bridged_casts_folding - O - emit - sil % s | % FileCheck % s <nl> <nl> / / REQUIRES : objc_interop <nl> <nl> mmm a / test / SILOptimizer / capture_promotion_generic_context_ownership . sil <nl> ppp b / test / SILOptimizer / capture_promotion_generic_context_ownership . sil <nl> <nl> - <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - emit - sil - O - Xllvm - sil - fso - enable - generics = false % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - sil - O - Xllvm - sil - fso - enable - generics = false % s | % FileCheck % s <nl> <nl> sil_stage raw <nl> <nl> mmm a / test / SILOptimizer / capture_promotion_ownership . swift <nl> ppp b / test / SILOptimizer / capture_promotion_ownership . swift <nl> <nl> - / / RUN : % target - swift - frontend % s - enable - sil - ownership - emit - sil - o - | % FileCheck % s <nl> + / / RUN : % target - swift - frontend % s - emit - sil - o - | % FileCheck % s <nl> <nl> class Foo { <nl> func foo ( ) - > Int { <nl> mmm a / test / SILOptimizer / closure_lifetime_fixup . swift <nl> ppp b / test / SILOptimizer / closure_lifetime_fixup . swift <nl> <nl> / / RUN : % empty - directory ( % t ) <nl> / / RUN : % target - swift - frontend % S / . . / Inputs / resilient_struct . swift - enable - resilience - emit - module - emit - module - path % t / resilient_struct . swiftmodule <nl> / / RUN : % target - swift - frontend % S / . . / Inputs / resilient_enum . swift - I % t - enable - resilience - emit - module - emit - module - path % t / resilient_enum . swiftmodule <nl> - / / RUN : % target - swift - frontend % s - sil - verify - all - enable - sil - ownership - emit - sil - I % t - o - | % FileCheck % s - - check - prefix = CHECK - - check - prefix = % target - os <nl> + / / RUN : % target - swift - frontend % s - sil - verify - all - emit - sil - I % t - o - | % FileCheck % s - - check - prefix = CHECK - - check - prefix = % target - os <nl> <nl> import resilient_struct <nl> import resilient_enum <nl> mmm a / test / SILOptimizer / closure_lifetime_fixup_objc . swift <nl> ppp b / test / SILOptimizer / closure_lifetime_fixup_objc . swift <nl> <nl> - / / RUN : % target - swift - frontend % s - enable - sil - ownership - sil - verify - all - emit - sil - o - - I % S / Inputs / usr / include | % FileCheck % s <nl> + / / RUN : % target - swift - frontend % s - sil - verify - all - emit - sil - o - - I % S / Inputs / usr / include | % FileCheck % s <nl> / / REQUIRES : objc_interop <nl> <nl> import Foundation <nl> mmm a / test / SILOptimizer / definite - init - convert - to - escape . swift <nl> ppp b / test / SILOptimizer / definite - init - convert - to - escape . swift <nl> <nl> - / / RUN : % target - swift - frontend - module - name A - verify - emit - sil - import - objc - header % S / Inputs / Closure . h - disable - objc - attr - requires - foundation - module - enable - sil - ownership % s | % FileCheck % s <nl> - / / RUN : % target - swift - frontend - module - name A - verify - emit - sil - import - objc - header % S / Inputs / Closure . h - disable - objc - attr - requires - foundation - module - enable - sil - ownership - Xllvm - sil - disable - convert - escape - to - noescape - switch - peephole % s | % FileCheck % s - - check - prefix = NOPEEPHOLE <nl> + / / RUN : % target - swift - frontend - module - name A - verify - emit - sil - import - objc - header % S / Inputs / Closure . h - disable - objc - attr - requires - foundation - module % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name A - verify - emit - sil - import - objc - header % S / Inputs / Closure . h - disable - objc - attr - requires - foundation - module - Xllvm - sil - disable - convert - escape - to - noescape - switch - peephole % s | % FileCheck % s - - check - prefix = NOPEEPHOLE <nl> <nl> / / REQUIRES : objc_interop <nl> <nl> mmm a / test / SILOptimizer / definite_init . swift <nl> ppp b / test / SILOptimizer / definite_init . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s - o / dev / null <nl> + / / RUN : % target - swift - frontend - emit - sil % s - o / dev / null <nl> <nl> class SomeClass { } <nl> <nl> mmm a / test / SILOptimizer / definite_init_diagnostics . swift <nl> ppp b / test / SILOptimizer / definite_init_diagnostics . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - primary - file % s - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - emit - sil - primary - file % s - o / dev / null - verify <nl> <nl> import Swift <nl> <nl> class WeakCycle { <nl> self . c = self / / expected - error { { variable ' self . d ' used before being initialized } } <nl> self . d = 10 <nl> } <nl> - } <nl> \ No newline at end of file <nl> + } <nl> mmm a / test / SILOptimizer / definite_init_diagnostics_globals . swift <nl> ppp b / test / SILOptimizer / definite_init_diagnostics_globals . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - primary - file % s - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - emit - sil - primary - file % s - o / dev / null - verify <nl> <nl> import Swift <nl> <nl> mmm a / test / SILOptimizer / definite_init_diagnostics_objc . swift <nl> ppp b / test / SILOptimizer / definite_init_diagnostics_objc . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - sdk % S / . . / SILGen / Inputs % s - I % S / . . / SILGen / Inputs - enable - source - import - parse - stdlib - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - emit - sil - sdk % S / . . / SILGen / Inputs % s - I % S / . . / SILGen / Inputs - enable - source - import - parse - stdlib - o / dev / null - verify <nl> / / REQUIRES : objc_interop <nl> <nl> import Swift <nl> mmm a / test / SILOptimizer / definite_init_existential_let . swift <nl> ppp b / test / SILOptimizer / definite_init_existential_let . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - verify % s <nl> + / / RUN : % target - swift - frontend - emit - sil - verify % s <nl> <nl> / / rdar : / / problem / 29716016 - Check that we properly enforce DI on ` let ` <nl> / / variables and class properties . <nl> mmm a / test / SILOptimizer / definite_init_extension . swift <nl> ppp b / test / SILOptimizer / definite_init_extension . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - verify % s - o / dev / null <nl> + / / RUN : % target - swift - frontend - emit - sil - verify % s - o / dev / null <nl> <nl> struct S < T > { <nl> let t : T / / expected - note { { ' self . t . 1 ' not initialized } } <nl> mmm a / test / SILOptimizer / definite_init_failable_initializers . swift <nl> ppp b / test / SILOptimizer / definite_init_failable_initializers . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - sil % s | % FileCheck % s <nl> <nl> / / High - level tests that DI handles early returns from failable and throwing <nl> / / initializers properly . The main complication is conditional release of self <nl> mmm a / test / SILOptimizer / definite_init_failable_initializers_diagnostics . swift <nl> ppp b / test / SILOptimizer / definite_init_failable_initializers_diagnostics . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - disable - objc - attr - requires - foundation - module - verify % s <nl> + / / RUN : % target - swift - frontend - emit - sil - disable - objc - attr - requires - foundation - module - verify % s <nl> <nl> / / High - level tests that DI rejects certain invalid idioms for early <nl> / / return from initializers . <nl> mmm a / test / SILOptimizer / definite_init_failable_initializers_objc . swift <nl> ppp b / test / SILOptimizer / definite_init_failable_initializers_objc . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - disable - objc - attr - requires - foundation - module % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - sil - disable - objc - attr - requires - foundation - module % s | % FileCheck % s <nl> <nl> / / REQUIRES : objc_interop <nl> <nl> mmm a / test / SILOptimizer / definite_init_hang . swift <nl> ppp b / test / SILOptimizer / definite_init_hang . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s - parse - as - library - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - emit - sil % s - parse - as - library - o / dev / null - verify <nl> <nl> var gg : Bool = false <nl> var rg : Int = 0 <nl> mmm a / test / SILOptimizer / definite_init_lvalue_let_witness_methods . swift <nl> ppp b / test / SILOptimizer / definite_init_lvalue_let_witness_methods . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership - disable - objc - attr - requires - foundation - module - verify % s <nl> + / / RUN : % target - swift - frontend - emit - sil - disable - objc - attr - requires - foundation - module - verify % s <nl> <nl> / / High - level tests that DI rejects passing let constants to <nl> / / mutating witness methods <nl> mmm a / test / SILOptimizer / definite_init_protocol_init . swift <nl> ppp b / test / SILOptimizer / definite_init_protocol_init . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s - swift - version 5 - verify | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - sil % s - swift - version 5 - verify | % FileCheck % s <nl> <nl> / / Ensure that convenience initializers on concrete types can <nl> / / delegate to factory initializers defined in protocol <nl> mmm a / test / SILOptimizer / definite_init_value_types . swift <nl> ppp b / test / SILOptimizer / definite_init_value_types . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - emit - sil % s | % FileCheck % s <nl> <nl> enum ValueEnum { <nl> case a ( String ) <nl> mmm a / test / SILOptimizer / definite_init_value_types_diagnostics . swift <nl> ppp b / test / SILOptimizer / definite_init_value_types_diagnostics . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - emit - sil % s - o / dev / null - verify <nl> <nl> struct EmptyStruct { } <nl> <nl> mmm a / test / SILOptimizer / mandatory_inlining . swift <nl> ppp b / test / SILOptimizer / mandatory_inlining . swift <nl> <nl> - <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - sil - verify - all - primary - file % s - emit - sil - o - - verify | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - sil - verify - all - primary - file % s - emit - sil - o - - verify | % FileCheck % s <nl> <nl> / / These tests are deliberately shallow , because I do not want to depend on the <nl> / / specifics of SIL generation , which might change for reasons unrelated to this <nl> mmm a / test / SILOptimizer / mandatory_inlining_circular . swift <nl> ppp b / test / SILOptimizer / mandatory_inlining_circular . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - sil - verify - all - emit - sil % s - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - sil - verify - all - emit - sil % s - o / dev / null - verify <nl> <nl> @ _transparent func waldo ( _ x : Double ) - > Double { <nl> return fred ( x ) ; / / expected - error { { inlining ' transparent ' functions forms circular loop } } expected - note 1 { { while inlining here } } <nl> mmm a / test / SILOptimizer / mandatory_inlining_devirt . swift <nl> ppp b / test / SILOptimizer / mandatory_inlining_devirt . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - sil - verify - all % s - module - name test - emit - sil - o - - verify | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - sil - verify - all % s - module - name test - emit - sil - o - - verify | % FileCheck % s <nl> <nl> <nl> / / Constructor calls are dispatched dynamically for open classes , even if <nl> mmm a / test / SILOptimizer / mandatory_inlining_devirt_multifile . swift <nl> ppp b / test / SILOptimizer / mandatory_inlining_devirt_multifile . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - module - name test - primary - file % s % S / Inputs / mandatory_inlining_devirt_other . swift - emit - sil - o - | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - module - name test - primary - file % s % S / Inputs / mandatory_inlining_devirt_other . swift - emit - sil - o - | % FileCheck % s <nl> <nl> / / rdar : / / 45110471 <nl> <nl> mmm a / test / SILOptimizer / mandatory_inlining_dynamic_method . swift <nl> ppp b / test / SILOptimizer / mandatory_inlining_dynamic_method . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - sil - verify - all - emit - sil % s - o / dev / null - verify <nl> + / / RUN : % target - swift - frontend - sil - verify - all - emit - sil % s - o / dev / null - verify <nl> / / REQUIRES : objc_interop <nl> <nl> import Foundation <nl> mmm a / test / SILOptimizer / mandatory_nil_comparison_inlining . swift <nl> ppp b / test / SILOptimizer / mandatory_nil_comparison_inlining . swift <nl> <nl> - / / RUN : % target - swift - frontend - enable - sil - ownership - sil - verify - all - primary - file % s - module - name = test - emit - sil - o - - verify | % FileCheck % s <nl> + / / RUN : % target - swift - frontend - sil - verify - all - primary - file % s - module - name = test - emit - sil - o - - verify | % FileCheck % s <nl> <nl> <nl> / / CHECK - LABEL : sil { { . * } } @ { { . * } } generic_func <nl> mmm a / test / SILOptimizer / stack - nesting - wrong - scope . swift <nl> ppp b / test / SILOptimizer / stack - nesting - wrong - scope . swift <nl> <nl> - / / RUN : % target - swift - frontend - emit - sil - enable - sil - ownership % s - Onone - Xllvm \ <nl> + / / RUN : % target - swift - frontend - emit - sil % s - Onone - Xllvm \ <nl> / / RUN : - sil - print - after = allocbox - to - stack - Xllvm \ <nl> / / RUN : - sil - print - only - functions = $ s3red19ThrowAddrOnlyStructV016throwsOptionalToG0ACyxGSgSi_tcfC \ <nl> / / RUN : - Xllvm - sil - print - debuginfo - o % t - module - name red 2 > & 1 | % FileCheck % s <nl> | Merge remote - tracking branch ' origin / master ' into master - next | apple/swift | 6e11834a4a9287f95d79d9211906a60e12ac7570 | 2019-03-13T08:08:58Z |
mmm a / tests / api_tests / api_tests . cpp <nl> ppp b / tests / api_tests / api_tests . cpp <nl> TEST_CASE_TABLE_TYPE_FAILURE ( test_table_load_fail_i64i64i64_with_str , ldiiinotst <nl> { try { \ <nl> auto wasm = assemble_wast ( test_api_wast ) ; \ <nl> \ <nl> - Make_Blockchain ( chain , 500 , \ <nl> + Make_Blockchain ( chain , 5000 , \ <nl> : : eosio : : chain_plugin : : default_received_block_transaction_execution_time , \ <nl> : : eosio : : chain_plugin : : default_create_block_transaction_execution_time , chain_controller : : txn_msg_limits { } ) ; \ <nl> chain . produce_blocks ( 2 ) ; \ <nl> | Increase time for db api tests . | EOSIO/eos | 315f35295312c67c207bcda6d8c46aba4a0b5a37 | 2017-11-20T20:24:06Z |
mmm a / src / library_syscall . js <nl> ppp b / src / library_syscall . js <nl> var SyscallsLibrary = { <nl> } , <nl> __syscall20__deps : [ ' $ PROCINFO ' ] , <nl> __syscall20__nothrow : true , <nl> + __syscall20__proxy : false , <nl> __syscall20 : function ( ) { / / getpid <nl> return PROCINFO . pid ; <nl> } , <nl> var SyscallsLibrary = { <nl> path = SYSCALLS . getStr ( path ) ; <nl> return SYSCALLS . doAccess ( path , amode ) ; <nl> } , <nl> + __syscall34__proxy : false , <nl> __syscall34 : function ( inc ) { / / nice <nl> return - { { { cDefine ( ' EPERM ' ) } } } ; / / no meaning to nice for our single - process environment <nl> } , <nl> + __syscall36__proxy : false , <nl> __syscall36__nothrow : true , <nl> __syscall36 : function ( ) { / / sync <nl> return 0 ; <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall51__nothrow : true , <nl> + __syscall51__proxy : false , <nl> __syscall51 : function ( filename ) { / / acct <nl> return - { { { cDefine ( ' ENOSYS ' ) } } } ; / / unsupported features <nl> } , <nl> var SyscallsLibrary = { <nl> } , <nl> __syscall64__deps : [ ' $ PROCINFO ' ] , <nl> __syscall64__nothrow : true , <nl> + __syscall64__proxy : false , <nl> __syscall64 : function ( ) { / / getppid <nl> return PROCINFO . ppid ; <nl> } , <nl> __syscall65__deps : [ ' $ PROCINFO ' ] , <nl> __syscall65__nothrow : true , <nl> + __syscall65__proxy : false , <nl> __syscall65 : function ( ) { / / getpgrp <nl> return PROCINFO . pgid ; <nl> } , <nl> __syscall66__nothrow : true , <nl> + __syscall66__proxy : false , <nl> __syscall66 : function ( ) { / / setsid <nl> return 0 ; / / no - op <nl> } , <nl> __syscall75__nothrow : true , <nl> + __syscall75__proxy : false , <nl> __syscall75 : function ( varargs ) { / / setrlimit <nl> return 0 ; / / no - op <nl> } , <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall96__nothrow : true , <nl> + __syscall96__proxy : false , <nl> __syscall96 : function ( ) { / / getpriority <nl> return 0 ; <nl> } , <nl> __syscall97__nothrow : true , <nl> + __syscall97__proxy : false , <nl> __syscall97 : function ( ) { / / setpriority <nl> return - { { { cDefine ( ' EPERM ' ) } } } ; <nl> } , <nl> var SyscallsLibrary = { <nl> } , <nl> # endif / / ~ PROXY_POSIX_SOCKETS = = 0 <nl> __syscall104__nothrow : true , <nl> + __syscall104__proxy : false , <nl> __syscall104 : function ( which , new_value , old_value ) { / / setitimer <nl> return - { { { cDefine ( ' ENOSYS ' ) } } } ; / / unsupported feature <nl> } , <nl> + __syscall114__proxy : false , <nl> __syscall114 : function ( pid , wstart , options , rusage ) { / / wait4 <nl> abort ( ' cannot wait on child processes ' ) ; <nl> } , <nl> __syscall121__nothrow : true , <nl> + __syscall121__proxy : false , <nl> __syscall121 : function ( name , size ) { / / setdomainname <nl> return - { { { cDefine ( ' EPERM ' ) } } } ; <nl> } , <nl> # if MINIMAL_RUNTIME <nl> __syscall122__deps : [ ' $ writeAsciiToMemory ' ] , <nl> # endif <nl> + __syscall122__proxy : false , <nl> __syscall122 : function ( buf ) { / / uname <nl> if ( ! buf ) return - { { { cDefine ( ' EFAULT ' ) } } } <nl> var layout = { { { JSON . stringify ( C_STRUCTS . utsname ) } } } ; <nl> var SyscallsLibrary = { <nl> copyString ( ' machine ' , ' x86 - JS ' ) ; <nl> return 0 ; <nl> } , <nl> + __syscall125__proxy : false , <nl> __syscall125__nothrow : true , <nl> __syscall125 : function ( addr , len , size ) { / / mprotect <nl> return 0 ; / / let ' s not and say we did <nl> } , <nl> __syscall132__deps : [ ' $ PROCINFO ' ] , <nl> + __syscall132__proxy : false , <nl> __syscall132 : function ( pid ) { / / getpgid <nl> if ( pid & & pid ! = = PROCINFO . pid ) return - { { { cDefine ( ' ESRCH ' ) } } } ; <nl> return PROCINFO . pgid ; <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall147__deps : [ ' $ PROCINFO ' ] , <nl> + __syscall147__proxy : false , <nl> __syscall147 : function ( pid ) { / / getsid <nl> if ( pid & & pid ! = = PROCINFO . pid ) return - { { { cDefine ( ' ESRCH ' ) } } } ; <nl> return PROCINFO . sid ; <nl> var SyscallsLibrary = { <nl> var stream = SYSCALLS . getStreamFromFD ( fd ) ; <nl> return 0 ; / / we can ' t do anything synchronously ; the in - memory FS is already synced to <nl> } , <nl> + __syscall150__proxy : false , <nl> __syscall150__sig : ' iii ' , <nl> __syscall150 : ' __syscall153 ' , / / mlock <nl> + __syscall151__proxy : false , <nl> __syscall151__sig : ' iii ' , <nl> __syscall151 : ' __syscall153 ' , / / munlock <nl> + __syscall152__proxy : false , <nl> __syscall152__sig : ' iii ' , <nl> __syscall152 : ' __syscall153 ' , / / mlockall <nl> __syscall153__nothrow : true , <nl> + __syscall153__proxy : false , <nl> __syscall153 : function ( ) { / / munlockall <nl> return 0 ; <nl> } , <nl> __syscall163__nothrow : true , <nl> + __syscall163__proxy : false , <nl> __syscall163 : function ( old_addr , old_size , new_size , flags ) { / / mremap <nl> return - { { { cDefine ( ' ENOMEM ' ) } } } ; / / never succeed <nl> } , <nl> var SyscallsLibrary = { <nl> return nonzero ; <nl> } , <nl> __syscall178__nothrow : true , <nl> + __syscall178__proxy : false , <nl> __syscall178 : function ( tgid , pid , uinfo ) { / / rt_sigqueueinfo <nl> # if SYSCALL_DEBUG <nl> err ( ' warning : ignoring SYS_rt_sigqueueinfo ' ) ; <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall199__sig : ' i ' , <nl> + __syscall199__proxy : false , <nl> __syscall199 : ' __syscall202 ' , / / getuid32 <nl> __syscall200__sig : ' i ' , <nl> + __syscall200__proxy : false , <nl> __syscall200 : ' __syscall202 ' , / / getgid32 <nl> __syscall201__sig : ' i ' , <nl> + __syscall201__proxy : false , <nl> __syscall201 : ' __syscall202 ' , / / geteuid32 <nl> __syscall202__nothrow : true , <nl> + __syscall202__proxy : false , <nl> __syscall202 : function ( ) { / / getgid32 <nl> return 0 ; <nl> } , <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall203__sig : ' ii ' , <nl> + __syscall203__proxy : false , <nl> __syscall203 : ' __sysicall214 ' , / / setreuid32 <nl> __syscall204__sig : ' ii ' , <nl> + __syscall204__proxy : false , <nl> __syscall204 : ' __syscall214 ' , / / setregid32 <nl> __syscall213__sig : ' ii ' , <nl> + __syscall213__proxy : false , <nl> __syscall213 : ' __syscall214 ' , / / setuid32 <nl> + __syscall214__proxy : false , <nl> __syscall214 : function ( uid ) { / / setgid32 <nl> if ( uid ! = = 0 ) return - { { { cDefine ( ' EPERM ' ) } } } ; <nl> return 0 ; <nl> } , <nl> + __syscall205__proxy : false , <nl> __syscall205 : function ( size , list ) { / / getgroups32 <nl> if ( size < 1 ) return - { { { cDefine ( ' EINVAL ' ) } } } ; <nl> { { { makeSetValue ( ' list ' , ' 0 ' , ' 0 ' , ' i32 ' ) } } } ; <nl> return 1 ; <nl> } , <nl> + __syscall208__proxy : false , <nl> __syscall208__sig : ' iiii ' , <nl> __syscall208 : ' __syscall210 ' , / / setresuid32 <nl> + __syscall210__proxy : false , <nl> __syscall210 : function ( ruid , euid , suid ) { / / setresgid32 <nl> if ( euid ! = = 0 ) return - { { { cDefine ( ' EPERM ' ) } } } ; <nl> return 0 ; <nl> } , <nl> __syscall209__sig : ' iiii ' , <nl> + __syscall209__proxy : false , <nl> __syscall209 : ' __syscall211 ' , / / getresuid <nl> + __syscall211__proxy : false , <nl> __syscall211 : function ( ruid , euid , suid ) { / / getresgid32 <nl> # if SYSCALL_DEBUG <nl> err ( ' warning : untested syscall ' ) ; <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall218__nothrow : true , <nl> + __syscall218__proxy : false , <nl> __syscall218 : function ( addr , length , vec ) { / / mincore <nl> return - { { { cDefine ( ' ENOSYS ' ) } } } ; / / unsupported feature <nl> } , <nl> __syscall219__nothrow : true , <nl> + __syscall219__proxy : false , <nl> __syscall219 : function ( addr , length , advice ) { / / madvise <nl> return 0 ; / / advice is welcome , but ignored <nl> } , <nl> var SyscallsLibrary = { <nl> return ___syscall ( [ 268 , 0 , size , buf ] , 0 ) ; <nl> } , <nl> __syscall272__nothrow : true , <nl> + __syscall272__proxy : false , <nl> __syscall272 : function ( fd , offset , len , advice ) { / / fadvise64_64 <nl> return 0 ; / / your advice is important to us ( but we can ' t use it ) <nl> } , <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall303__nothrow : true , <nl> + __syscall303__proxy : false , <nl> __syscall303 : function ( olddirfd , oldpath , newdirfd , newpath , flags ) { / / linkat <nl> return - { { { cDefine ( ' EMLINK ' ) } } } ; / / no hardlinks for us <nl> } , <nl> var SyscallsLibrary = { <nl> return SYSCALLS . doDup ( old . path , old . flags , suggestFD ) ; <nl> } , <nl> __syscall331__nothrow : true , <nl> + __syscall331__proxy : false , <nl> __syscall331 : function ( fds , flags ) { / / pipe2 <nl> return - { { { cDefine ( ' ENOSYS ' ) } } } ; / / unsupported feature <nl> } , <nl> var SyscallsLibrary = { <nl> return 0 ; <nl> } , <nl> __syscall345__nothrow : true , <nl> + __syscall345__proxy : false , <nl> __syscall345 : function ( sockfd , msg , flags ) { / / sendmmsg <nl> # if SYSCALL_DEBUG <nl> err ( ' warning : ignoring SYS_sendmmsg ' ) ; <nl> for ( var x in SyscallsLibrary ) { <nl> if ( ! SyscallsLibrary [ x + ' __deps ' ] ) SyscallsLibrary [ x + ' __deps ' ] = [ ] ; <nl> SyscallsLibrary [ x + ' __deps ' ] . push ( ' $ SYSCALLS ' ) ; <nl> # if USE_PTHREADS <nl> - / / proxy all syscalls synchronously , for their return values <nl> - SyscallsLibrary [ x + ' __proxy ' ] = ' sync ' ; <nl> + / / Most syscalls need to happen on the main JS thread ( e . g . because the <nl> + / / filesystem is in JS and on that thread ) . Proxy synchronously to there . <nl> + / / There are some exceptions , syscalls that we know are ok to just run in <nl> + / / any thread ; those are marked as not being proxied with <nl> + / / __proxy : false <nl> + / / A syscall without a return value could perhaps be proxied asynchronously <nl> + / / instead of synchronously , and marked with <nl> + / / __proxy : ' async ' <nl> + / / ( but essentially all syscalls do have return values ) . <nl> + if ( SyscallsLibrary [ x + ' __proxy ' ] = = = undefined ) { <nl> + SyscallsLibrary [ x + ' __proxy ' ] = ' sync ' ; <nl> + } <nl> # endif <nl> } <nl> <nl> | Mark syscalls which do not need proxying ( ) | emscripten-core/emscripten | b3808332645ce238da5cd8a1763ba3c9142440f4 | 2020-02-20T03:12:30Z |
mmm a / src / library . js <nl> ppp b / src / library . js <nl> LibraryManager . library = { <nl> inet_pton__deps : [ ' __setErrNo ' , ' $ ERRNO_CODES ' , ' inet_addr ' ] , <nl> inet_pton : function ( af , src , dst ) { <nl> / / int af , const char * src , void * dst <nl> - if ( ( af ^ { { { cDefine ( " AF_INET " ) } } } ) ! = = 0 ) { ___setErrNo ( ERRNO_CODES . EAFNOSUPPORT ) ; return - 1 ; } <nl> + if ( ( af ^ { { { cDefine ( ' AF_INET ' ) } } } ) ! = = 0 ) { ___setErrNo ( ERRNO_CODES . EAFNOSUPPORT ) ; return - 1 ; } <nl> var ret = _inet_addr ( src ) ; <nl> if ( ret = = - 1 | | isNaN ( ret ) ) return 0 ; <nl> setValue ( dst , ret , ' i32 ' ) ; <nl> LibraryManager . library = { <nl> var aliasesBuf = _malloc ( 4 ) ; <nl> setValue ( aliasesBuf , 0 , ' i8 * ' ) ; <nl> setValue ( ret + ___hostent_struct_layout . h_aliases , aliasesBuf , ' i8 * * ' ) ; <nl> - setValue ( ret + ___hostent_struct_layout . h_addrtype , { { { cDefine ( " AF_INET " ) } } } , ' i32 ' ) ; <nl> + setValue ( ret + ___hostent_struct_layout . h_addrtype , { { { cDefine ( ' AF_INET ' ) } } } , ' i32 ' ) ; <nl> setValue ( ret + ___hostent_struct_layout . h_length , 4 , ' i32 ' ) ; <nl> var addrListBuf = _malloc ( 12 ) ; <nl> setValue ( addrListBuf , addrListBuf + 8 , ' i32 * ' ) ; <nl> | Merge pull request from waywardmonkeys / cdefine - single - quotes | emscripten-core/emscripten | 4dc7e36c1e1677f56982450a18e99b9eaa2b2d5d | 2013-08-04T04:50:47Z |
mmm a / drivers / gles3 / rasterizer_canvas_gles3 . cpp <nl> ppp b / drivers / gles3 / rasterizer_canvas_gles3 . cpp <nl> void RasterizerCanvasGLES3 : : canvas_render_items ( Item * p_item_list , int p_z , cons <nl> glActiveTexture ( GL_TEXTURE0 + storage - > config . max_texture_image_units - 1 ) ; <nl> glBindTexture ( GL_TEXTURE_2D , skeleton - > texture ) ; <nl> state . using_skeleton = true ; <nl> - state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : SKELETON_TRANSFORM , state . skeleton_transform ) ; <nl> - state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : SKELETON_TRANSFORM_INVERSE , state . skeleton_transform_inverse ) ; <nl> } else { <nl> state . using_skeleton = false ; <nl> } <nl> void RasterizerCanvasGLES3 : : canvas_render_items ( Item * p_item_list , int p_z , cons <nl> state . final_transform = ci - > final_transform ; <nl> state . extra_matrix = Transform2D ( ) ; <nl> <nl> + if ( state . using_skeleton ) { <nl> + state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : SKELETON_TRANSFORM , state . skeleton_transform ) ; <nl> + state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : SKELETON_TRANSFORM_INVERSE , state . skeleton_transform_inverse ) ; <nl> + } <nl> + <nl> state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : FINAL_MODULATE , state . canvas_item_modulate ) ; <nl> state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : MODELVIEW_MATRIX , state . final_transform ) ; <nl> state . canvas_shader . set_uniform ( CanvasShaderGLES3 : : EXTRA_MATRIX , state . extra_matrix ) ; <nl> | Merge pull request from MadEqua / fix - skeleton - transform | godotengine/godot | d711c57d767734887fbf0955a7b9902c54498a0d | 2019-12-23T21:36:09Z |
deleted file mode 100644 <nl> index 9c57fd09c318 . . 000000000000 <nl> mmm a / native_mate / constructor . cc <nl> ppp / dev / null <nl> <nl> - / / Copyright 2014 The Chromium Authors . All rights reserved . <nl> - / / Use of this source code is governed by a BSD - style license that can be <nl> - / / found in the LICENSE . chromium file . <nl> - <nl> - # include " native_mate / constructor . h " <nl> - <nl> - # include " base / bind . h " <nl> - # include " base / strings / string_piece . h " <nl> - # include " native_mate / arguments . h " <nl> - # include " native_mate / function_template . h " <nl> - <nl> - namespace mate { <nl> - <nl> - Constructor : : Constructor ( const base : : StringPiece & name , <nl> - const WrappableFactoryFunction & factory ) <nl> - : name_ ( name ) , factory_ ( factory ) { <nl> - } <nl> - <nl> - virtual Constructor : : ~ Constructor ( ) { <nl> - constructor_ . Reset ( ) ; <nl> - } <nl> - <nl> - v8 : : Handle < v8 : : FunctionTemplate > Constructor : : GetFunctionTemplate ( <nl> - v8 : : Isolate * isolate ) { <nl> - if ( constructor_ . IsEmpty ( ) ) { <nl> - v8 : : Local < v8 : : FunctionTemplate > constructor = CreateFunctionTemplate ( <nl> - isolate , base : : Bind ( & Constructor : : New , base : : Unretained ( this ) ) ) ; <nl> - constructor - > InstanceTemplate ( ) - > SetInternalFieldCount ( 1 ) ; <nl> - constructor - > SetClassName ( StringToV8 ( isolate , name_ ) ) ; <nl> - constructor_ . Reset ( isolate , constructor ) ; <nl> - } <nl> - <nl> - return MATE_PERSISTENT_TO_LOCAL ( v8 : : FunctionTemplate , isolate , constructor_ ) ; <nl> - } <nl> - <nl> - void Constructor : : New ( mate : : Arguments * args ) { <nl> - MATE_SET_INTERNAL_FIELD_POINTER ( args - > GetThis ( ) , 0 , factory_ . Run ( ) ) ; <nl> - } <nl> - <nl> - } / / namespace mate <nl> mmm a / native_mate / constructor . h <nl> ppp b / native_mate / constructor . h <nl> <nl> <nl> # include " base / bind . h " <nl> # include " base / compiler_specific . h " <nl> - # include " base / strings / string_piece . h " <nl> # include " native_mate / function_template . h " <nl> - # include " v8 / include / v8 . h " <nl> <nl> namespace mate { <nl> <nl> mmm a / native_mate / scoped_persistent . h <nl> ppp b / native_mate / scoped_persistent . h <nl> class RefCountedPersistent : public ScopedPersistent < T > , <nl> DISALLOW_COPY_AND_ASSIGN ( RefCountedPersistent ) ; <nl> } ; <nl> <nl> + typedef scoped_refptr < RefCountedPersistent < v8 : : Object > > RefCountedV8Object ; <nl> + <nl> } / / namespace mate <nl> <nl> # endif / / NATIVE_MATE_SCOPED_PERSISTENT_H_ <nl> | Cleanup . | electron/electron | bdfbef41c6d81121f06c5bc2454954f4db706753 | 2014-04-16T02:25:22Z |
mmm a / xbmc / interfaces / json - rpc / JSONRPC . cpp <nl> ppp b / xbmc / interfaces / json - rpc / JSONRPC . cpp <nl> JsonRpcMethodMap CJSONRPC : : m_methodMaps [ ] = { <nl> / / TODO <nl> <nl> / / Playlist <nl> - / / { " Playlist . Create " , CPlaylistOperations : : Create } , <nl> - / / { " Playlist . Destroy " , CPlaylistOperations : : Destroy } , <nl> - / / <nl> - / / { " Playlist . GetItems " , CPlaylistOperations : : GetItems } , <nl> - / / { " Playlist . Add " , CPlaylistOperations : : Add } , <nl> - / / { " Playlist . Remove " , CPlaylistOperations : : Remove } , <nl> - / / { " Playlist . Swap " , CPlaylistOperations : : Swap } , <nl> - / / { " Playlist . Shuffle " , CPlaylistOperations : : Shuffle } , <nl> - / / { " Playlist . UnShuffle " , CPlaylistOperations : : UnShuffle } , <nl> + { " Playlist . Create " , CPlaylistOperations : : Create } , <nl> + { " Playlist . Destroy " , CPlaylistOperations : : Destroy } , <nl> + <nl> + { " Playlist . GetItems " , CPlaylistOperations : : GetItems } , <nl> + { " Playlist . Add " , CPlaylistOperations : : Add } , <nl> + { " Playlist . Remove " , CPlaylistOperations : : Remove } , <nl> + { " Playlist . Swap " , CPlaylistOperations : : Swap } , <nl> + { " Playlist . Clear " , CPlaylistOperations : : Clear } , <nl> + { " Playlist . Shuffle " , CPlaylistOperations : : Shuffle } , <nl> + { " Playlist . UnShuffle " , CPlaylistOperations : : UnShuffle } , <nl> <nl> / / Files <nl> { " Files . GetSources " , CFileOperations : : GetRootDirectory } , <nl> JsonRpcMethodMap CJSONRPC : : m_methodMaps [ ] = { <nl> { " AudioPlaylist . Shuffle " , CAVPlaylistOperations : : Shuffle , Response , ControlPlayback , " Shuffle audio playlist " } , <nl> { " AudioPlaylist . UnShuffle " , CAVPlaylistOperations : : UnShuffle , Response , ControlPlayback , " UnShuffle audio playlist " } , <nl> { " AudioPlaylist . Remove " , CAVPlaylistOperations : : Remove , Response , ControlPlayback , " Remove entry from playlist " } , <nl> - <nl> - / / Playlist <nl> - { " Playlist . Create " , CPlaylistOperations : : Create , Response , ReadData , " Creates a virtual playlist from a given one from a file " } , <nl> - { " Playlist . Destroy " , CPlaylistOperations : : Destroy , Response , ReadData , " Destroys a virtual playlist " } , <nl> - <nl> - { " Playlist . GetItems " , CPlaylistOperations : : GetItems , Response , ReadData , " Retrieve items in the playlist . Parameter example { \ " playlist \ " : \ " music \ " } . playlist optional . " } , <nl> - { " Playlist . Add " , CPlaylistOperations : : Add , Response , ControlPlayback , " Add items to the playlist . Parameter example { \ " playlist \ " : \ " music \ " , \ " file \ " : \ " / foo / bar . mp3 \ " } . playlist optional . " } , <nl> - { " Playlist . Remove " , CPlaylistOperations : : Remove , Response , ControlPlayback , " Remove items in the playlist . Parameter example { \ " playlist \ " : \ " music \ " , \ " item \ " : 0 } . playlist optional . " } , <nl> - { " Playlist . Swap " , CPlaylistOperations : : Swap , Response , ControlPlayback , " Swap items in the playlist . Parameter example { \ " playlist \ " : \ " music \ " , \ " item1 \ " : 0 , \ " item2 \ " : 1 } . playlist optional . " } , <nl> - { " Playlist . Shuffle " , CPlaylistOperations : : Shuffle , Response , ControlPlayback , " Shuffle playlist " } , <nl> - { " Playlist . UnShuffle " , CPlaylistOperations : : UnShuffle , Response , ControlPlayback , " UnShuffle playlist " } , <nl> } ; * / <nl> <nl> void CJSONRPC : : Initialize ( ) <nl> mmm a / xbmc / interfaces / json - rpc / PlaylistOperations . cpp <nl> ppp b / xbmc / interfaces / json - rpc / PlaylistOperations . cpp <nl> using namespace JSONRPC ; <nl> using namespace PLAYLIST ; <nl> using namespace std ; <nl> <nl> - # define PLAYLIST_MEMBER_VIRTUAL " playlist - virtual " <nl> - # define PLAYLIST_MEMBER_FILE " playlist - file " <nl> + # define PLAYLIST_MEMBER_VIRTUAL " id " <nl> + # define PLAYLIST_MEMBER_FILE " file " <nl> <nl> map < CStdString , CPlayListPtr > CPlaylistOperations : : VirtualPlaylists ; <nl> CCriticalSection CPlaylistOperations : : VirtualCriticalSection ; <nl> <nl> JSON_STATUS CPlaylistOperations : : Create ( const CStdString & method , ITransportLayer * transport , IClient * client , const Value & parameterObject , Value & result ) <nl> { <nl> - if ( ! ( parameterObject . isString ( ) | | parameterObject . isNull ( ) | | parameterObject . isObject ( ) ) ) <nl> - return InvalidParams ; <nl> - <nl> CStdString file = " " ; <nl> CStdString id = " " ; <nl> <nl> - if ( parameterObject . isObject ( ) ) <nl> - { <nl> - if ( parameterObject . isMember ( PLAYLIST_MEMBER_FILE ) & & parameterObject [ PLAYLIST_MEMBER_FILE ] . isString ( ) ) <nl> - file = parameterObject [ PLAYLIST_MEMBER_FILE ] . asString ( ) ; <nl> + if ( parameterObject [ " playlist " ] . isMember ( PLAYLIST_MEMBER_FILE ) & & parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_FILE ] . isString ( ) ) <nl> + file = parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_FILE ] . asString ( ) ; <nl> <nl> - if ( parameterObject . isMember ( PLAYLIST_MEMBER_VIRTUAL ) & & parameterObject [ PLAYLIST_MEMBER_VIRTUAL ] . isString ( ) ) <nl> - id = parameterObject [ PLAYLIST_MEMBER_VIRTUAL ] . asString ( ) ; <nl> - } <nl> + if ( parameterObject [ " playlist " ] . isMember ( PLAYLIST_MEMBER_VIRTUAL ) & & parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_VIRTUAL ] . isString ( ) ) <nl> + id = parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_VIRTUAL ] . asString ( ) ; <nl> <nl> CPlayListPtr playlist ; <nl> <nl> JSON_STATUS CPlaylistOperations : : Create ( const CStdString & method , ITransportLaye <nl> <nl> CSingleLock lock ( VirtualCriticalSection ) ; <nl> VirtualPlaylists [ id ] = playlist ; <nl> - result [ PLAYLIST_MEMBER_VIRTUAL ] = id ; <nl> + result [ " playlistid " ] = id ; <nl> <nl> return OK ; <nl> } <nl> <nl> JSON_STATUS CPlaylistOperations : : Destroy ( const CStdString & method , ITransportLayer * transport , IClient * client , const Value & parameterObject , Value & result ) <nl> { <nl> - if ( ! parameterObject . isString ( ) ) <nl> - return InvalidParams ; <nl> - <nl> CSingleLock lock ( VirtualCriticalSection ) ; <nl> - VirtualPlaylists . erase ( parameterObject . asString ( ) ) ; <nl> + if ( VirtualPlaylists . erase ( parameterObject [ " playlistid " ] . asString ( ) ) < = 0 ) <nl> + return InvalidParams ; <nl> <nl> return ACK ; <nl> } <nl> JSON_STATUS CPlaylistOperations : : Add ( const CStdString & method , ITransportLayer * <nl> <nl> JSON_STATUS CPlaylistOperations : : Remove ( const CStdString & method , ITransportLayer * transport , IClient * client , const Value & parameterObject , Value & result ) <nl> { <nl> - if ( ! ( parameterObject [ " item " ] . isInt ( ) | | parameterObject [ " item " ] . isString ( ) ) ) <nl> - return InvalidParams ; <nl> - <nl> CSingleLock lock ( VirtualCriticalSection ) ; <nl> CPlayListPtr playlist = GetPlaylist ( parameterObject ) ; <nl> <nl> JSON_STATUS CPlaylistOperations : : Remove ( const CStdString & method , ITransportLaye <nl> playlist - > Remove ( parameterObject [ " item " ] . asInt ( ) ) ; <nl> else if ( parameterObject [ " item " ] . isString ( ) ) <nl> playlist - > Remove ( parameterObject [ " item " ] . asString ( ) ) ; <nl> + <nl> return ACK ; <nl> } <nl> <nl> JSON_STATUS CPlaylistOperations : : Remove ( const CStdString & method , ITransportLaye <nl> <nl> JSON_STATUS CPlaylistOperations : : Swap ( const CStdString & method , ITransportLayer * transport , IClient * client , const Value & parameterObject , Value & result ) <nl> { <nl> - if ( ! parameterObject [ " item1 " ] . isInt ( ) & & ! parameterObject [ " item2 " ] . isInt ( ) ) <nl> - return InvalidParams ; <nl> - <nl> CSingleLock lock ( VirtualCriticalSection ) ; <nl> CPlayListPtr playlist = GetPlaylist ( parameterObject ) ; <nl> <nl> bool CPlaylistOperations : : FillFileItemList ( const Value & parameterObject , CFileIt <nl> { <nl> bool found = false ; <nl> <nl> - if ( parameterObject [ PLAYLIST_MEMBER_FILE ] . isString ( ) ) <nl> + if ( parameterObject [ " playlist " ] . isMember ( PLAYLIST_MEMBER_FILE ) & & parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_FILE ] . isString ( ) ) <nl> { <nl> - CStdString file = parameterObject [ PLAYLIST_MEMBER_FILE ] . asString ( ) ; <nl> + CStdString file = parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_FILE ] . asString ( ) ; <nl> CPlayListPtr playlist = CPlayListPtr ( CPlayListFactory : : Create ( file ) ) ; <nl> if ( playlist & & playlist - > Load ( file ) ) <nl> { <nl> bool CPlaylistOperations : : FillFileItemList ( const Value & parameterObject , CFileIt <nl> } <nl> <nl> CSingleLock lock ( VirtualCriticalSection ) ; <nl> - if ( parameterObject [ PLAYLIST_MEMBER_VIRTUAL ] . isString ( ) ) <nl> + if ( parameterObject [ " playlist " ] . isMember ( PLAYLIST_MEMBER_VIRTUAL ) & & parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_VIRTUAL ] . isString ( ) ) <nl> { <nl> - CStdString id = parameterObject [ PLAYLIST_MEMBER_VIRTUAL ] . asString ( ) ; <nl> + CStdString id = parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_VIRTUAL ] . asString ( ) ; <nl> CPlayListPtr playlist = VirtualPlaylists [ id ] ; <nl> if ( playlist ) <nl> { <nl> bool CPlaylistOperations : : FillFileItemList ( const Value & parameterObject , CFileIt <nl> <nl> CPlayListPtr CPlaylistOperations : : GetPlaylist ( const Value & parameterObject ) <nl> { <nl> - if ( parameterObject [ PLAYLIST_MEMBER_VIRTUAL ] . isString ( ) ) <nl> + if ( parameterObject [ " playlist " ] . isMember ( PLAYLIST_MEMBER_VIRTUAL ) & & parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_VIRTUAL ] . isString ( ) ) <nl> { <nl> - CStdString id = parameterObject [ PLAYLIST_MEMBER_VIRTUAL ] . asString ( ) ; <nl> + CStdString id = parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_VIRTUAL ] . asString ( ) ; <nl> return VirtualPlaylists [ id ] ; <nl> } <nl> - else if ( parameterObject [ PLAYLIST_MEMBER_FILE ] . isString ( ) ) <nl> + else if ( parameterObject [ " playlist " ] . isMember ( PLAYLIST_MEMBER_FILE ) & & parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_FILE ] . isString ( ) ) <nl> { <nl> - CStdString file = parameterObject [ PLAYLIST_MEMBER_FILE ] . asString ( ) ; <nl> + CStdString file = parameterObject [ " playlist " ] [ PLAYLIST_MEMBER_FILE ] . asString ( ) ; <nl> CPlayListPtr playlist = CPlayListPtr ( CPlayListFactory : : Create ( file ) ) ; <nl> if ( playlist & & playlist - > Load ( file ) ) <nl> return playlist ; <nl> mmm a / xbmc / interfaces / json - rpc / ServiceDescription . h <nl> ppp b / xbmc / interfaces / json - rpc / ServiceDescription . h <nl> namespace JSONRPC <nl> " \ " params \ " : [ ] , " <nl> " \ " returns \ " : \ " string \ " " <nl> " } , " <nl> + " \ " Playlist . Create \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Creates a virtual playlist from a given one from a file \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ReadData \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " type \ " : \ " object \ " , \ " id \ " : \ " Playlist . Id \ " , \ " required \ " : true , " <nl> + " \ " properties \ " : { " <nl> + " \ " id \ " : { \ " type \ " : \ " string \ " , \ " description \ " : \ " Identification of a virtual playlist \ " } , " <nl> + " \ " file \ " : { \ " type \ " : \ " string \ " , \ " description \ " : \ " File from which to load a playlist \ " } " <nl> + " } " <nl> + " } " <nl> + " ] , " <nl> + " \ " returns \ " : { \ " type \ " : \ " object \ " , " <nl> + " \ " properties \ " : { " <nl> + " \ " playlistid \ " : { \ " type \ " : \ " string \ " , \ " required \ " : true } " <nl> + " } " <nl> + " } " <nl> + " } , " <nl> + " \ " Playlist . Destroy \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Destroys a virtual playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ReadData \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlistid \ " , \ " type \ " : \ " string \ " , \ " required \ " : true , \ " description \ " : \ " Identification of the playlist \ " } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> + " \ " Playlist . GetItems \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Retrieve items in the playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ReadData \ " , " <nl> + " \ " statechanging \ " : false , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } , " <nl> + " { \ " name \ " : \ " fields \ " , \ " type \ " : \ " array \ " , \ " id \ " : \ " List . Fields . All \ " , " <nl> + " \ " items \ " : { \ " type \ " : \ " string \ " , \ " uniqueItems \ " : true , " <nl> + " \ " enum \ " : [ \ " title \ " , \ " artist \ " , \ " albumartist \ " , \ " genre \ " , \ " year \ " , \ " rating \ " , " <nl> + " \ " album \ " , \ " track \ " , \ " duration \ " , \ " comment \ " , \ " lyrics \ " , \ " musicbrainztrackid \ " , " <nl> + " \ " musicbrainzartistid \ " , \ " musicbrainzalbumid \ " , \ " musicbrainzalbumartistid \ " , " <nl> + " \ " playcount \ " , \ " fanart \ " , \ " director \ " , \ " trailer \ " , \ " tagline \ " , \ " plot \ " , " <nl> + " \ " plotoutline \ " , \ " originaltitle \ " , \ " lastplayed \ " , \ " writer \ " , \ " studio \ " , " <nl> + " \ " mpaa \ " , \ " cast \ " , \ " country \ " , \ " imdbnumber \ " , \ " premiered \ " , \ " productioncode \ " , " <nl> + " \ " runtime \ " , \ " set \ " , \ " showlink \ " , \ " streamDetails \ " , \ " top250 \ " , \ " votes \ " , " <nl> + " \ " writingcredits \ " , \ " firstaired \ " , \ " season \ " , \ " episode \ " , \ " showtitle \ " ] " <nl> + " } " <nl> + " } , " <nl> + " { \ " name \ " : \ " limits \ " , \ " $ ref \ " : \ " List . Limits \ " } , " <nl> + " { \ " name \ " : \ " sort \ " , \ " $ ref \ " : \ " List . Sort \ " } " <nl> + " ] , " <nl> + " \ " returns \ " : { \ " type \ " : \ " object \ " , " <nl> + " \ " properties \ " : { " <nl> + " \ " name \ " : { \ " type \ " : \ " string \ " , \ " description \ " : \ " Name of the playlist ( if available ) \ " } , " <nl> + " \ " items \ " : { \ " type \ " : \ " array \ " , \ " required \ " : true , \ " id \ " : \ " List . Items . All \ " , " <nl> + " \ " items \ " : { \ " type \ " : \ " object \ " , " <nl> + " \ " properties \ " : { " <nl> + " \ " label \ " : { \ " type \ " : \ " string \ " , \ " required \ " : true } , " <nl> + " \ " file \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " fanart \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " thumbnail \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " title \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " artist \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " albumartist \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " genre \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " year \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " rating \ " : { \ " type \ " : \ " number \ " } , " <nl> + " \ " album \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " track \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " duration \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " comment \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " lyrics \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " playcount \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " musicbrainztrackid \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " musicbrainzartistid \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " musicbrainzalbumid \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " musicbrainzalbumartistid \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " director \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " trailer \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " tagline \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " plot \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " plotoutline \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " originaltitle \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " lastplayed \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " writer \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " studio \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " mpaa \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " cast \ " : { \ " $ ref \ " : \ " Video . Cast \ " } , " <nl> + " \ " country \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " imdbnumber \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " premiered \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " productioncode \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " runtime \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " set \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " showlink \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " streamDetails \ " : { \ " $ ref \ " : \ " Video . Streams \ " } , " <nl> + " \ " top250 \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " votes \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " writingcredits \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " firstaired \ " : { \ " type \ " : \ " string \ " } , " <nl> + " \ " season \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " episode \ " : { \ " type \ " : \ " integer \ " } , " <nl> + " \ " showtitle \ " : { \ " type \ " : \ " string \ " } " <nl> + " } " <nl> + " } " <nl> + " } " <nl> + " } " <nl> + " } " <nl> + " } , " <nl> + " \ " Playlist . Add \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Add items to the playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ControlPlayback \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } , " <nl> + " { \ " name \ " : \ " items \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true , \ " description \ " : \ " Adds items from given virtual and / or file based playlist \ " } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> + " \ " Playlist . Remove \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Remove item from the playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ControlPlayback \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } , " <nl> + " { \ " name \ " : \ " item \ " , \ " type \ " : [ \ " integer \ " , \ " string \ " ] , \ " required \ " : true } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> + " \ " Playlist . Swap \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Swap items in the playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ControlPlayback \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } , " <nl> + " { \ " name \ " : \ " item1 \ " , \ " $ ref \ " : \ " Playlist . Item . Position \ " , \ " required \ " : true } , " <nl> + " { \ " name \ " : \ " item2 \ " , \ " $ ref \ " : \ " Playlist . Item . Position \ " , \ " required \ " : true } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> + " \ " Playlist . Clear \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Clear playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ControlPlayback \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> + " \ " Playlist . Shuffle \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Shuffle playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ControlPlayback \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> + " \ " Playlist . UnShuffle \ " : { " <nl> + " \ " type \ " : \ " method \ " , " <nl> + " \ " description \ " : \ " Unshuffle playlist \ " , " <nl> + " \ " transport \ " : \ " Response \ " , " <nl> + " \ " permission \ " : \ " ControlPlayback \ " , " <nl> + " \ " statechanging \ " : true , " <nl> + " \ " params \ " : [ " <nl> + " { \ " name \ " : \ " playlist \ " , \ " $ ref \ " : \ " Playlist . Id \ " , \ " required \ " : true } " <nl> + " ] , " <nl> + " \ " returns \ " : \ " string \ " " <nl> + " } , " <nl> <nl> " \ " Files . GetSources \ " : { " <nl> " \ " type \ " : \ " method \ " , " <nl> namespace JSONRPC <nl> " \ " statechanging \ " : false , " <nl> " \ " params \ " : [ " <nl> " { \ " name \ " : \ " directory \ " , \ " type \ " : \ " string \ " , \ " required \ " : true } , " <nl> - " { \ " name \ " : \ " media \ " , \ " $ ref \ " : \ " Files . Media \ " , \ " default \ " : \ " files \ " } " <nl> + " { \ " name \ " : \ " media \ " , \ " $ ref \ " : \ " Files . Media \ " , \ " default \ " : \ " files \ " } , " <nl> + " { \ " name \ " : \ " fields \ " , \ " $ ref \ " : \ " List . Fields . All \ " } , " <nl> + " { \ " name \ " : \ " sort \ " , \ " $ ref \ " : \ " List . Sort \ " } " <nl> " ] , " <nl> " \ " returns \ " : { " <nl> " \ " type \ " : \ " object \ " , " <nl> | JSONRPC : Migration of Playlist | xbmc/xbmc | 5265fa56fae3e387e0b0b946d9812427dc36a479 | 2011-04-01T19:13:52Z |
mmm a / arangod / Replication / Syncer . cpp <nl> ppp b / arangod / Replication / Syncer . cpp <nl> int Syncer : : getMasterState ( string & errorMsg ) { <nl> <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> string & errorMsg ) { <nl> + const string endpointString = " from endpoint ' " + string ( _masterInfo . _endpoint ) + " ' " ; <nl> <nl> / / process " state " section <nl> TRI_json_t const * state = JsonHelper : : getArrayElement ( json , " state " ) ; <nl> <nl> if ( ! JsonHelper : : isArray ( state ) ) { <nl> - errorMsg = " state section is missing from response " ; <nl> + errorMsg = " state section is missing in response " + endpointString ; <nl> <nl> return TRI_ERROR_REPLICATION_INVALID_RESPONSE ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> TRI_json_t const * tick = JsonHelper : : getArrayElement ( state , " lastLogTick " ) ; <nl> <nl> if ( ! JsonHelper : : isString ( tick ) ) { <nl> - errorMsg = " lastLogTick is missing from response " ; <nl> + errorMsg = " lastLogTick is missing in response " + endpointString ; <nl> <nl> return TRI_ERROR_REPLICATION_INVALID_RESPONSE ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> TRI_json_t const * server = JsonHelper : : getArrayElement ( json , " server " ) ; <nl> <nl> if ( ! JsonHelper : : isArray ( server ) ) { <nl> - errorMsg = " server section is missing from response " ; <nl> + errorMsg = " server section is missing in response " + endpointString ; <nl> <nl> return TRI_ERROR_REPLICATION_INVALID_RESPONSE ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> TRI_json_t const * version = JsonHelper : : getArrayElement ( server , " version " ) ; <nl> <nl> if ( ! JsonHelper : : isString ( version ) ) { <nl> - errorMsg = " server version is missing from response " ; <nl> + errorMsg = " server version is missing in response " + endpointString ; <nl> <nl> return TRI_ERROR_REPLICATION_INVALID_RESPONSE ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> TRI_json_t const * serverId = JsonHelper : : getArrayElement ( server , " serverId " ) ; <nl> <nl> if ( ! JsonHelper : : isString ( serverId ) ) { <nl> - errorMsg = " server id is missing from response " ; <nl> + errorMsg = " server id is missing in response " + endpointString ; <nl> <nl> return TRI_ERROR_REPLICATION_INVALID_RESPONSE ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> <nl> if ( masterId = = 0 ) { <nl> / / invalid master id <nl> - errorMsg = " server id in response is invalid " ; <nl> + errorMsg = " invalid server id in response " + endpointString ; <nl> <nl> return TRI_ERROR_REPLICATION_INVALID_RESPONSE ; <nl> } <nl> <nl> if ( masterIdString = = _localServerIdString ) { <nl> / / master and replica are the same instance . this is not supported . <nl> - errorMsg = " master ' s id is the same as the applier server ' s id " ; <nl> + errorMsg = " got same server id ( " + _localServerIdString + " ) " + endpointString + <nl> + " as the local applier server ' s id " ; <nl> <nl> return TRI_ERROR_REPLICATION_LOOP ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> const string versionString = string ( version - > _value . _string . data , version - > _value . _string . length - 1 ) ; <nl> <nl> if ( sscanf ( versionString . c_str ( ) , " % d . % d " , & major , & minor ) ! = 2 ) { <nl> - errorMsg = " invalid master version info : " + versionString ; <nl> + errorMsg = " invalid master version info " + endpointString + " : ' " + versionString + " ' " ; <nl> <nl> return TRI_ERROR_REPLICATION_MASTER_INCOMPATIBLE ; <nl> } <nl> int Syncer : : handleStateResponse ( TRI_json_t const * json , <nl> if ( major ! = 1 | | <nl> ( major = = 1 & & minor < 4 ) ) { <nl> / / we can connect to 1 . 4 and higher only <nl> - errorMsg = " incompatible master version : " + versionString ; <nl> + errorMsg = " got incompatible master version " + endpointString + " : ' " + versionString + " ' " ; <nl> <nl> return TRI_ERROR_REPLICATION_MASTER_INCOMPATIBLE ; <nl> } <nl> mmm a / arangod / RestServer / ArangoServer . cpp <nl> ppp b / arangod / RestServer / ArangoServer . cpp <nl> static bool handleUserDatabase ( TRI_doc_mptr_t const * document , <nl> } <nl> <nl> string dbName = doc . getStringValue ( " name " , " " ) ; <nl> - if ( dbName = = " " ) { <nl> - / / database name not found <nl> - LOG_ERROR ( " Database name not found . User database not loaded ! " ) ; <nl> - return true ; <nl> - } <nl> - <nl> string dbPath = doc . getStringValue ( " path " , " " ) ; <nl> - if ( dbPath = = " " ) { <nl> - / / database path not found <nl> - LOG_ERROR ( " Database path not found . User database not loaded ! " ) ; <nl> - return true ; <nl> - } <nl> <nl> - if ( ! VocbaseManager : : manager . canAddVocbase ( dbName , dbPath ) ) { <nl> - LOG_ERROR ( " Cannot add database . ( Wrong name or path ) " ) ; <nl> + int res = VocbaseManager : : manager . canAddVocbase ( dbName , dbPath , false ) ; <nl> + <nl> + if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> + LOGGER_ERROR ( " cannot load database : " < < string ( TRI_errno_string ( res ) ) ) ; <nl> return true ; <nl> } <nl> <nl> static bool handleUserDatabase ( TRI_doc_mptr_t const * document , <nl> <nl> if ( userVocbase ) { <nl> VocbaseManager : : manager . addUserVocbase ( userVocbase ) ; <nl> - } <nl> <nl> - LOGGER_INFO ( " loaded user database ' " < < dbName < < " ' from ' " < < dbPath < < " ' " ) ; <nl> + LOGGER_INFO ( " loaded database ' " < < dbName < < " ' from ' " < < dbPath < < " ' " ) ; <nl> + } <nl> + else { <nl> + LOGGER_ERROR ( " unable to load database ' " < < dbName < < " ' from ' " < < dbPath < < " ' " ) ; <nl> + } <nl> <nl> return true ; <nl> } <nl> mmm a / arangod / RestServer / VocbaseManager . cpp <nl> ppp b / arangod / RestServer / VocbaseManager . cpp <nl> <nl> # include " Logger / Logger . h " <nl> # include " Rest / ConnectionInfo . h " <nl> # include " Basics / StringUtils . h " <nl> - # include " RestServer / VocbaseContext . h " <nl> + # include " BasicsC / files . h " <nl> # include " BasicsC / tri - strings . h " <nl> + # include " RestServer / VocbaseContext . h " <nl> # include " VocBase / auth . h " <nl> # include " Actions / actions . h " <nl> <nl> TRI_vocbase_t * VocbaseManager : : lookupVocbaseByName ( string const & name ) { <nl> / / / @ brief check if name and path is not used <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - bool VocbaseManager : : canAddVocbase ( std : : string const & name , <nl> - std : : string const & path ) { <nl> + int VocbaseManager : : canAddVocbase ( std : : string const & name , <nl> + std : : string const & path , <nl> + bool checkPath ) { <nl> + if ( ! isValidName ( name ) ) { <nl> + return TRI_ERROR_ARANGO_DATABASE_NAME_INVALID ; <nl> + } <nl> + <nl> + if ( path . empty ( ) ) { <nl> + return TRI_ERROR_ARANGO_DATABASE_PATH_INVALID ; <nl> + } <nl> + <nl> / / loop over all vocbases and check name and path <nl> - <nl> READ_LOCKER ( _rwLock ) ; <nl> <nl> / / system vocbase <nl> if ( name = = string ( _vocbase - > _name ) ) { <nl> - return false ; <nl> + return TRI_ERROR_ARANGO_DATABASE_NAME_USED ; <nl> } <nl> if ( path = = string ( _vocbase - > _path ) ) { <nl> - return false ; <nl> + return TRI_ERROR_ARANGO_DATABASE_PATH_USED ; <nl> } <nl> <nl> / / user vocbases <nl> bool VocbaseManager : : canAddVocbase ( std : : string const & name , <nl> TRI_vocbase_t * vocbase = i - > second ; <nl> <nl> if ( name = = string ( vocbase - > _name ) ) { <nl> - return false ; <nl> + return TRI_ERROR_ARANGO_DATABASE_NAME_USED ; <nl> } <nl> if ( path = = string ( vocbase - > _path ) ) { <nl> - return false ; <nl> + return TRI_ERROR_ARANGO_DATABASE_PATH_USED ; <nl> } <nl> } <nl> + <nl> + / / check if the path already exists <nl> + if ( checkPath & & TRI_ExistsFile ( path . c_str ( ) ) ) { <nl> + return TRI_ERROR_ARANGO_DATABASE_PATH_USED ; <nl> + } <nl> <nl> - return true ; <nl> + return TRI_ERROR_NO_ERROR ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief check if a collection name is valid <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + bool VocbaseManager : : isValidName ( std : : string const & name ) const { <nl> + return TRI_IsAllowedCollectionName ( false , name . c_str ( ) ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / arangod / RestServer / VocbaseManager . h <nl> ppp b / arangod / RestServer / VocbaseManager . h <nl> namespace triagens { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> class VocbaseManager { <nl> + <nl> private : <nl> VocbaseManager ( ) : _vocbase ( 0 ) , _startupLoader ( 0 ) , _endpointServer ( 0 ) { } ; <nl> VocbaseManager ( const VocbaseManager & ) ; <nl> namespace triagens { <nl> / / / @ brief add system vocbase <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - void addSystemVocbase ( TRI_vocbase_t * vocbase ) ; <nl> + void addSystemVocbase ( TRI_vocbase_t * ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief get system vocbase <nl> namespace triagens { <nl> / / / @ brief add user vocbase <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - void addUserVocbase ( TRI_vocbase_t * vocbase ) ; <nl> + void addUserVocbase ( TRI_vocbase_t * ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief close user vocbases <nl> namespace triagens { <nl> / / / @ brief lookup vocbase by name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - TRI_vocbase_t * lookupVocbaseByName ( std : : string const & name ) ; <nl> + TRI_vocbase_t * lookupVocbaseByName ( std : : string const & ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief check name and path <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - bool canAddVocbase ( std : : string const & name , std : : string const & path ) ; <nl> + int canAddVocbase ( std : : string const & , <nl> + std : : string const & , <nl> + bool ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief check if a collection name is valid <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + bool isValidName ( std : : string const & ) const ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief add the startup loader <nl> mmm a / arangod / V8Server / v8 - vocbase . cpp <nl> ppp b / arangod / V8Server / v8 - vocbase . cpp <nl> static v8 : : Handle < v8 : : Value > JS_UseVocbase ( v8 : : Arguments const & argv ) { <nl> v8 : : HandleScope scope ; <nl> <nl> if ( argv . Length ( ) ! = 1 ) { <nl> - TRI_V8_EXCEPTION_USAGE ( scope , " USE_DATABASE ( < database name > ) " ) ; <nl> + TRI_V8_EXCEPTION_USAGE ( scope , " USE_DATABASE ( < name > ) " ) ; <nl> } <nl> <nl> string name = TRI_ObjectToString ( argv [ 0 ] ) ; <nl> <nl> TRI_vocbase_t * vocbase = VocbaseManager : : manager . lookupVocbaseByName ( name ) ; <nl> + <nl> if ( vocbase ) { <nl> TRI_v8_global_t * v8g = ( TRI_v8_global_t * ) v8 : : Isolate : : GetCurrent ( ) - > GetData ( ) ; <nl> v8g - > _vocbase = vocbase ; <nl> <nl> return scope . Close ( WrapVocBase ( vocbase ) ) ; <nl> } <nl> - <nl> - return scope . Close ( v8 : : Boolean : : New ( false ) ) ; <nl> + <nl> + TRI_V8_EXCEPTION ( scope , TRI_ERROR_ARANGO_DATABASE_NOT_FOUND ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > JS_ListVocbases ( v8 : : Arguments const & argv ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> static v8 : : Handle < v8 : : Value > saveToCollection ( TRI_vocbase_t * vocbase , <nl> - std : : string const & collectionName , v8 : : Handle < v8 : : Object > newDoc ) { <nl> + std : : string const & collectionName , <nl> + std : : string const & key , <nl> + v8 : : Handle < v8 : : Object > newDoc ) { <nl> v8 : : HandleScope scope ; <nl> <nl> - TRI_vocbase_col_t * col = <nl> - TRI_LookupCollectionByNameVocBase ( vocbase , collectionName . c_str ( ) ) ; <nl> + TRI_vocbase_col_t * col = TRI_LookupCollectionByNameVocBase ( vocbase , collectionName . c_str ( ) ) ; <nl> <nl> if ( col = = 0 ) { <nl> TRI_V8_EXCEPTION_INTERNAL ( scope , " cannot extract collection " ) ; <nl> static v8 : : Handle < v8 : : Value > saveToCollection ( TRI_vocbase_t * vocbase , <nl> TRI_primary_collection_t * primary = trx . primaryCollection ( ) ; <nl> TRI_shaped_json_t * shaped = TRI_ShapedJsonV8Object ( newDoc , primary - > _shaper ) ; <nl> <nl> - if ( ! holder . registerShapedJson ( primary - > _shaper , shaped ) ) { <nl> - TRI_V8_EXCEPTION_MESSAGE ( scope , TRI_errno ( ) , <nl> - " < data > cannot be converted into JSON shape " ) ; <nl> + if ( ! holder . registerShapedJson ( primary - > _shaper , shaped ) ) { <nl> + TRI_V8_EXCEPTION_MESSAGE ( scope , TRI_errno ( ) , " < data > cannot be converted into JSON shape " ) ; <nl> } <nl> <nl> TRI_doc_mptr_t document ; <nl> - TRI_voc_key_t key = 0 ; <nl> - res = trx . createDocument ( key , & document , shaped , true ) ; <nl> + <nl> + if ( key . empty ( ) ) { <nl> + res = trx . createDocument ( 0 , & document , shaped , true ) ; <nl> + } <nl> + else { <nl> + res = trx . createDocument ( ( const TRI_voc_key_t ) key . c_str ( ) , & document , shaped , true ) ; <nl> + } <nl> <nl> res = trx . finish ( res ) ; <nl> <nl> static v8 : : Handle < v8 : : Value > saveToCollection ( TRI_vocbase_t * vocbase , <nl> return scope . Close ( result ) ; <nl> } <nl> <nl> - TRI_V8_EXCEPTION_MESSAGE ( scope , res , " cannot save document into collection . " ) ; <nl> + TRI_V8_EXCEPTION_MESSAGE ( scope , res , " cannot save document into collection " ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > JS_CreateUserVocbase ( v8 : : Arguments const & argv ) { <nl> v8 : : HandleScope scope ; <nl> <nl> if ( argv . Length ( ) < 2 ) { <nl> - TRI_V8_EXCEPTION_USAGE ( scope , " CREATE_DATABASE ( < database name > , < database path > , < database options > ) " ) ; <nl> + TRI_V8_EXCEPTION_USAGE ( scope , " CREATE_DATABASE ( < name > , < path > , < options > ) " ) ; <nl> } <nl> <nl> TRI_vocbase_t * vocbase = UnwrapVocBase ( argv . Holder ( ) ) ; <nl> <nl> - if ( ! vocbase - > _isSystem ) { <nl> - TRI_V8_EXCEPTION_INTERNAL ( scope , " current database is not the system database " ) ; <nl> + if ( ! vocbase - > _isSystem ) { <nl> + TRI_V8_EXCEPTION ( scope , TRI_ERROR_ARANGO_USE_SYSTEM_DATABASE ) ; <nl> } <nl> <nl> string name = TRI_ObjectToString ( argv [ 0 ] ) ; <nl> string path = TRI_ObjectToString ( argv [ 1 ] ) ; <nl> <nl> - if ( ! VocbaseManager : : manager . canAddVocbase ( name , path ) ) { <nl> - TRI_V8_EXCEPTION_INTERNAL ( scope , " cannot create database with that name and path " ) ; <nl> + int res = VocbaseManager : : manager . canAddVocbase ( name , path , true ) ; <nl> + <nl> + if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> + TRI_V8_EXCEPTION ( scope , res ) ; <nl> } <nl> <nl> v8 : : Local < v8 : : String > keyName = v8 : : String : : New ( " name " ) ; <nl> static v8 : : Handle < v8 : : Value > JS_CreateUserVocbase ( v8 : : Arguments const & argv ) { <nl> } <nl> } <nl> <nl> + / / now create the directory <nl> + res = TRI_CreateDirectory ( path . c_str ( ) ) ; <nl> + <nl> + if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> + TRI_V8_EXCEPTION ( scope , res ) ; <nl> + } <nl> + <nl> + <nl> / / load vocbase with defaults <nl> TRI_vocbase_t * userVocbase = TRI_OpenVocBase ( path . c_str ( ) , name . c_str ( ) , & defaults ) ; <nl> <nl> static v8 : : Handle < v8 : : Value > JS_CreateUserVocbase ( v8 : : Arguments const & argv ) { <nl> } <nl> <nl> bool vocbaseOk = VocbaseManager : : manager . runVersionCheck ( userVocbase , v8 : : Context : : GetCurrent ( ) ) ; <nl> - if ( ! vocbaseOk ) { <nl> + <nl> + if ( ! vocbaseOk ) { <nl> / / unload vocbase <nl> TRI_DestroyVocBase ( userVocbase ) ; <nl> TRI_Free ( TRI_UNKNOWN_MEM_ZONE , userVocbase ) ; <nl> static v8 : : Handle < v8 : : Value > JS_CreateUserVocbase ( v8 : : Arguments const & argv ) { <nl> v8 : : Handle < v8 : : Value > result ; <nl> <nl> try { <nl> - result = saveToCollection ( vocbase , TRI_COL_NAME_DATABASES , newDoc ) ; <nl> + result = saveToCollection ( vocbase , TRI_COL_NAME_DATABASES , name . c_str ( ) , newDoc ) ; <nl> } <nl> catch ( . . . ) { <nl> } <nl> static v8 : : Handle < v8 : : Value > JS_AddEndpoint ( v8 : : Arguments const & argv ) { <nl> v8 : : Handle < v8 : : Object > newDoc = v8 : : Object : : New ( ) ; <nl> newDoc - > Set ( keyEndpoint , TRI_V8_SYMBOL ( endpoint . c_str ( ) ) ) ; <nl> <nl> - return saveToCollection ( vocbase , TRI_COL_NAME_ENDPOINTS , newDoc ) ; <nl> + return saveToCollection ( vocbase , TRI_COL_NAME_ENDPOINTS , " " , newDoc ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> static v8 : : Handle < v8 : : Value > JS_AddPrefixMapping ( v8 : : Arguments const & argv ) { <nl> v8 : : Handle < v8 : : Value > result ; <nl> <nl> try { <nl> - result = saveToCollection ( vocbase , TRI_COL_NAME_PREFIXES , newDoc ) ; <nl> + result = saveToCollection ( vocbase , TRI_COL_NAME_PREFIXES , " " , newDoc ) ; <nl> } <nl> catch ( . . . ) { <nl> } <nl> void TRI_InitV8VocBridge ( v8 : : Handle < v8 : : Context > context , <nl> / / generate global functions <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> - TRI_AddGlobalFunctionVocbase ( context , " AHUACATL_RUN " , JS_RunAhuacatl ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " AHUACATL_EXPLAIN " , JS_ExplainAhuacatl ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " AHUACATL_PARSE " , JS_ParseAhuacatl ) ; <nl> + / / AQL functions . not intended to be used by end users <nl> + TRI_AddGlobalFunctionVocbase ( context , " AHUACATL_RUN " , JS_RunAhuacatl , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " AHUACATL_EXPLAIN " , JS_ExplainAhuacatl , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " AHUACATL_PARSE " , JS_ParseAhuacatl , true ) ; <nl> <nl> - TRI_AddGlobalFunctionVocbase ( context , " CURSOR " , JS_Cursor ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " CREATE_CURSOR " , JS_CreateCursor ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " DELETE_CURSOR " , JS_DeleteCursor ) ; <nl> + / / cursor functions . not intended to be used by end users <nl> + TRI_AddGlobalFunctionVocbase ( context , " CURSOR " , JS_Cursor , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " CREATE_CURSOR " , JS_CreateCursor , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " DELETE_CURSOR " , JS_DeleteCursor , true ) ; <nl> <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_START " , JS_StartLoggerReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_STOP " , JS_StopLoggerReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_STATE " , JS_StateLoggerReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_CONFIGURE " , JS_ConfigureLoggerReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_SYNCHRONISE " , JS_SynchroniseReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_SERVER_ID " , JS_ServerIdReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_CONFIGURE " , JS_ConfigureApplierReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_START " , JS_StartApplierReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_STOP " , JS_StopApplierReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_STATE " , JS_StateApplierReplication ) ; <nl> - TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_FORGET " , JS_ForgetApplierReplication ) ; <nl> + / / replication functions . not intended to be used by end users <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_START " , JS_StartLoggerReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_STOP " , JS_StopLoggerReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_STATE " , JS_StateLoggerReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_LOGGER_CONFIGURE " , JS_ConfigureLoggerReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_SYNCHRONISE " , JS_SynchroniseReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_SERVER_ID " , JS_ServerIdReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_CONFIGURE " , JS_ConfigureApplierReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_START " , JS_StartApplierReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_STOP " , JS_StopApplierReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_STATE " , JS_StateApplierReplication , true ) ; <nl> + TRI_AddGlobalFunctionVocbase ( context , " REPLICATION_APPLIER_FORGET " , JS_ForgetApplierReplication , true ) ; <nl> <nl> TRI_AddGlobalFunctionVocbase ( context , " COMPARE_STRING " , JS_compare_string ) ; <nl> TRI_AddGlobalFunctionVocbase ( context , " NORMALIZE_STRING " , JS_normalize_string ) ; <nl> mmm a / arangod / VocBase / collection . c <nl> ppp b / arangod / VocBase / collection . c <nl> TRI_collection_t * TRI_CreateCollection ( TRI_vocbase_t * vocbase , <nl> char const * path , <nl> const TRI_col_info_t * const parameter ) { <nl> char * filename ; <nl> + int res ; <nl> <nl> / / sanity check <nl> if ( sizeof ( TRI_df_header_marker_t ) + sizeof ( TRI_df_footer_marker_t ) > parameter - > _maximalSize ) { <nl> TRI_collection_t * TRI_CreateCollection ( TRI_vocbase_t * vocbase , <nl> } <nl> <nl> / / create directory <nl> - if ( ! TRI_CreateDirectory ( filename ) ) { <nl> + res = TRI_CreateDirectory ( filename ) ; <nl> + <nl> + if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> LOG_ERROR ( " cannot create collection ' % s ' in ' % s ' as ' % s ' : % s " , <nl> parameter - > _name , <nl> path , <nl> filename , <nl> - TRI_last_error ( ) ) ; <nl> + TRI_errno_string ( res ) ) ; <nl> <nl> TRI_FreeString ( TRI_CORE_MEM_ZONE , filename ) ; <nl> <nl> mmm a / arangod / VocBase / replication - applier . c <nl> ppp b / arangod / VocBase / replication - applier . c <nl> static int SetError ( TRI_replication_applier_t * applier , <nl> } <nl> <nl> / / log error message <nl> - if ( errorCode ! = TRI_ERROR_REPLICATION_NO_RESPONSE & & <nl> - errorCode ! = TRI_ERROR_REPLICATION_APPLIER_STOPPED ) { <nl> - LOG_WARNING ( " replication applier error for database ' % s ' : % s " , applier - > _databaseName , realMsg ) ; <nl> + if ( errorCode ! = TRI_ERROR_REPLICATION_APPLIER_STOPPED ) { <nl> + LOG_ERROR ( " replication applier error for database ' % s ' : % s " , applier - > _databaseName , realMsg ) ; <nl> } <nl> <nl> state = & applier - > _state ; <nl> mmm a / arangod / VocBase / vocbase . c <nl> ppp b / arangod / VocBase / vocbase . c <nl> static TRI_vocbase_col_t * AddCollection ( TRI_vocbase_t * vocbase , <nl> <nl> / / the replication collection cannot be unloaded manually ) <nl> / / ( this would make the server hang ) <nl> - init . _canUnload = ! TRI_EqualString ( name , TRI_COL_NAME_REPLICATION ) ; <nl> + init . _canUnload = ! <nl> + ( TRI_EqualString ( name , TRI_COL_NAME_REPLICATION ) | | <nl> + TRI_EqualString ( name , TRI_COL_NAME_DATABASES ) ) ; <nl> } <nl> } <nl> <nl> mmm a / html / admin / js / bootstrap / errors . js <nl> ppp b / html / admin / js / bootstrap / errors . js <nl> <nl> " ERROR_ARANGO_DATABASE_NAME_USED " : { " code " : 1229 , " message " : " database name already used " } , <nl> " ERROR_ARANGO_DATABASE_PATH_USED " : { " code " : 1230 , " message " : " database path already used " } , <nl> " ERROR_ARANGO_DATABASE_NAME_INVALID " : { " code " : 1231 , " message " : " database name invalid " } , <nl> - " ERROR_ARANGO_USE_SYSTEM_DATABASE " : { " code " : 1232 , " message " : " operation only allowed in system database " } , <nl> + " ERROR_ARANGO_DATABASE_PATH_INVALID " : { " code " : 1232 , " message " : " database path invalid " } , <nl> + " ERROR_ARANGO_USE_SYSTEM_DATABASE " : { " code " : 1233 , " message " : " operation only allowed in system database " } , <nl> " ERROR_ARANGO_DATAFILE_FULL " : { " code " : 1300 , " message " : " datafile full " } , <nl> " ERROR_REPLICATION_NO_RESPONSE " : { " code " : 1400 , " message " : " no response " } , <nl> " ERROR_REPLICATION_INVALID_RESPONSE " : { " code " : 1401 , " message " : " invalid response " } , <nl> mmm a / js / common / bootstrap / errors . js <nl> ppp b / js / common / bootstrap / errors . js <nl> <nl> " ERROR_ARANGO_DATABASE_NAME_USED " : { " code " : 1229 , " message " : " database name already used " } , <nl> " ERROR_ARANGO_DATABASE_PATH_USED " : { " code " : 1230 , " message " : " database path already used " } , <nl> " ERROR_ARANGO_DATABASE_NAME_INVALID " : { " code " : 1231 , " message " : " database name invalid " } , <nl> - " ERROR_ARANGO_USE_SYSTEM_DATABASE " : { " code " : 1232 , " message " : " operation only allowed in system database " } , <nl> + " ERROR_ARANGO_DATABASE_PATH_INVALID " : { " code " : 1232 , " message " : " database path invalid " } , <nl> + " ERROR_ARANGO_USE_SYSTEM_DATABASE " : { " code " : 1233 , " message " : " operation only allowed in system database " } , <nl> " ERROR_ARANGO_DATAFILE_FULL " : { " code " : 1300 , " message " : " datafile full " } , <nl> " ERROR_REPLICATION_NO_RESPONSE " : { " code " : 1400 , " message " : " no response " } , <nl> " ERROR_REPLICATION_INVALID_RESPONSE " : { " code " : 1401 , " message " : " invalid response " } , <nl> mmm a / lib / BasicsC / errors . dat <nl> ppp b / lib / BasicsC / errors . dat <nl> ERROR_ARANGO_DATABASE_NOT_FOUND , 1228 , " database not found " , " Will be raised when a <nl> ERROR_ARANGO_DATABASE_NAME_USED , 1229 , " database name already used " , " Will be raised when a duplicate database name is used . " <nl> ERROR_ARANGO_DATABASE_PATH_USED , 1230 , " database path already used " , " Will be raised when a duplicate database path is used . " <nl> ERROR_ARANGO_DATABASE_NAME_INVALID , 1231 , " database name invalid " , " Will be raised when an invalid database name is used . " <nl> - ERROR_ARANGO_USE_SYSTEM_DATABASE , 1232 , " operation only allowed in system database " , " Will be raised when an operation is requested in a database other than the system database . " <nl> + ERROR_ARANGO_DATABASE_PATH_INVALID , 1232 , " database path invalid " , " Will be raised when an invalid database path is used . " <nl> + ERROR_ARANGO_USE_SYSTEM_DATABASE , 1233 , " operation only allowed in system database " , " Will be raised when an operation is requested in a database other than the system database . " <nl> <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> # # ArangoDB storage errors <nl> mmm a / lib / BasicsC / files . c <nl> ppp b / lib / BasicsC / files . c <nl> int TRI_CreateRecursiveDirectory ( char const * path ) { <nl> / / / @ brief creates a directory <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - bool TRI_CreateDirectory ( char const * path ) { <nl> + int TRI_CreateDirectory ( char const * path ) { <nl> int res ; <nl> <nl> + / / reset error flag <nl> + TRI_set_errno ( TRI_ERROR_NO_ERROR ) ; <nl> + <nl> res = TRI_MKDIR ( path , 0777 ) ; <nl> <nl> - if ( res ! = 0 ) { <nl> - TRI_set_errno ( TRI_ERROR_SYS_ERROR ) ; <nl> - return false ; <nl> + if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> + / / check errno <nl> + res = TRI_errno ( ) ; <nl> + <nl> + / / if errno doesn ' t indicate an error , return a system error <nl> + if ( res = = TRI_ERROR_NO_ERROR ) { <nl> + res = TRI_ERROR_SYS_ERROR ; <nl> + } <nl> } <nl> <nl> - return true ; <nl> + return res ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / lib / BasicsC / files . h <nl> ppp b / lib / BasicsC / files . h <nl> int TRI_CreateRecursiveDirectory ( char const * path ) ; <nl> / / / @ brief creates a directory <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - bool TRI_CreateDirectory ( char const * path ) ; <nl> + int TRI_CreateDirectory ( char const * path ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief removes an empty directory <nl> mmm a / lib / BasicsC / voc - errors . c <nl> ppp b / lib / BasicsC / voc - errors . c <nl> void TRI_InitialiseErrorMessages ( void ) { <nl> REG_ERROR ( ERROR_ARANGO_DATABASE_NAME_USED , " database name already used " ) ; <nl> REG_ERROR ( ERROR_ARANGO_DATABASE_PATH_USED , " database path already used " ) ; <nl> REG_ERROR ( ERROR_ARANGO_DATABASE_NAME_INVALID , " database name invalid " ) ; <nl> + REG_ERROR ( ERROR_ARANGO_DATABASE_PATH_INVALID , " database path invalid " ) ; <nl> REG_ERROR ( ERROR_ARANGO_USE_SYSTEM_DATABASE , " operation only allowed in system database " ) ; <nl> REG_ERROR ( ERROR_ARANGO_DATAFILE_FULL , " datafile full " ) ; <nl> REG_ERROR ( ERROR_REPLICATION_NO_RESPONSE , " no response " ) ; <nl> mmm a / lib / BasicsC / voc - errors . h <nl> ppp b / lib / BasicsC / voc - errors . h <nl> extern " C " { <nl> / / / Will be raised when a duplicate database path is used . <nl> / / / - 1231 : @ LIT { database name invalid } <nl> / / / Will be raised when an invalid database name is used . <nl> - / / / - 1232 : @ LIT { operation only allowed in system database } <nl> + / / / - 1232 : @ LIT { database path invalid } <nl> + / / / Will be raised when an invalid database path is used . <nl> + / / / - 1233 : @ LIT { operation only allowed in system database } <nl> / / / Will be raised when an operation is requested in a database other than <nl> / / / the system database . <nl> / / / - 1300 : @ LIT { datafile full } <nl> void TRI_InitialiseErrorMessages ( void ) ; <nl> # define TRI_ERROR_ARANGO_DATABASE_NAME_INVALID ( 1231 ) <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief 1232 : ERROR_ARANGO_USE_SYSTEM_DATABASE <nl> + / / / @ brief 1232 : ERROR_ARANGO_DATABASE_PATH_INVALID <nl> + / / / <nl> + / / / database path invalid <nl> + / / / <nl> + / / / Will be raised when an invalid database path is used . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # define TRI_ERROR_ARANGO_DATABASE_PATH_INVALID ( 1232 ) <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief 1233 : ERROR_ARANGO_USE_SYSTEM_DATABASE <nl> / / / <nl> / / / operation only allowed in system database <nl> / / / <nl> void TRI_InitialiseErrorMessages ( void ) ; <nl> / / / system database . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - # define TRI_ERROR_ARANGO_USE_SYSTEM_DATABASE ( 1232 ) <nl> + # define TRI_ERROR_ARANGO_USE_SYSTEM_DATABASE ( 1233 ) <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief 1300 : ERROR_ARANGO_DATAFILE_FULL <nl> mmm a / lib / V8 / v8 - globals . cpp <nl> ppp b / lib / V8 / v8 - globals . cpp <nl> void TRI_AddMethodVocbase ( v8 : : Handle < v8 : : ObjectTemplate > tpl , <nl> <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> const char * const name , <nl> - v8 : : Handle < v8 : : Value > ( * func ) ( v8 : : Arguments const & ) ) { <nl> + v8 : : Handle < v8 : : Value > ( * func ) ( v8 : : Arguments const & ) , <nl> + const bool isHidden ) { <nl> / / all global functions are read - only <nl> - context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , v8 : : FunctionTemplate : : New ( func ) - > GetFunction ( ) , v8 : : ReadOnly ) ; <nl> + if ( isHidden ) { <nl> + context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , v8 : : FunctionTemplate : : New ( func ) - > GetFunction ( ) , static_cast < v8 : : PropertyAttribute > ( v8 : : ReadOnly | v8 : : DontEnum ) ) ; <nl> + } <nl> + else { <nl> + context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , v8 : : FunctionTemplate : : New ( func ) - > GetFunction ( ) , v8 : : ReadOnly ) ; <nl> + } <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> const char * const name , <nl> - v8 : : Handle < v8 : : Function > func ) { <nl> + v8 : : Handle < v8 : : Function > func , <nl> + const bool isHidden ) { <nl> / / all global functions are read - only <nl> - context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , func , v8 : : ReadOnly ) ; <nl> + if ( isHidden ) { <nl> + context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , func , static_cast < v8 : : PropertyAttribute > ( v8 : : ReadOnly | v8 : : DontEnum ) ) ; <nl> + } <nl> + else { <nl> + context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , func , v8 : : ReadOnly ) ; <nl> + } <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> void TRI_AddGlobalVariableVocbase ( v8 : : Handle < v8 : : Context > context , <nl> const char * const name , <nl> v8 : : Handle < v8 : : Value > value ) { <nl> - / / all global functions are read - only <nl> + / / all global variables are read - only <nl> context - > Global ( ) - > Set ( TRI_V8_SYMBOL ( name ) , value , v8 : : ReadOnly ) ; <nl> } <nl> <nl> mmm a / lib / V8 / v8 - globals . h <nl> ppp b / lib / V8 / v8 - globals . h <nl> void TRI_AddMethodVocbase ( v8 : : Handle < v8 : : ObjectTemplate > tpl , <nl> <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> const char * const name , <nl> - v8 : : Handle < v8 : : Value > ( * func ) ( v8 : : Arguments const & ) ) ; <nl> + v8 : : Handle < v8 : : Value > ( * func ) ( v8 : : Arguments const & ) , <nl> + const bool isHidden = false ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief adds a global function to the given context <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> <nl> void TRI_AddGlobalFunctionVocbase ( v8 : : Handle < v8 : : Context > context , <nl> const char * const name , <nl> - v8 : : Handle < v8 : : Function > func ) ; <nl> + v8 : : Handle < v8 : : Function > func , <nl> + const bool isHidden = false ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief adds a global variable to the given context <nl> mmm a / lib / V8 / v8 - utils . cpp <nl> ppp b / lib / V8 / v8 - utils . cpp <nl> static v8 : : Handle < v8 : : Value > JS_MakeDirectory ( v8 : : Arguments const & argv ) { <nl> TRI_V8_TYPE_ERROR ( scope , " < path > must be a string " ) ; <nl> } <nl> <nl> - bool result = TRI_CreateDirectory ( * name ) ; <nl> + int res = TRI_CreateDirectory ( * name ) ; <nl> <nl> - if ( ! result ) { <nl> - TRI_V8_EXCEPTION_SYS ( scope , " cannot create directory " ) ; <nl> + if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> + TRI_V8_EXCEPTION ( scope , res ) ; <nl> } <nl> <nl> return scope . Close ( v8 : : Undefined ( ) ) ; <nl> | improved error messages | arangodb/arangodb | 3871fb7af32b8b770b5a3c442d1583d26a52019a | 2013-08-20T09:48:08Z |
mmm a / stdlib / public / core / Bool . swift <nl> ppp b / stdlib / public / core / Bool . swift <nl> public struct Bool { <nl> @ _transparent <nl> public init ( ) { <nl> let zero : Int8 = 0 <nl> - self . _value = Builtin . trunc_Int8_Int1 ( zero . _value ) <nl> + self . _value = Builtin . trunc_Int8_Int1 ( zero . _storage ) <nl> } <nl> <nl> @ _versioned <nl> mmm a / stdlib / public / core / CMakeLists . txt <nl> ppp b / stdlib / public / core / CMakeLists . txt <nl> set ( SWIFTLIB_ESSENTIAL <nl> ErrorType . swift <nl> Existential . swift <nl> Filter . swift . gyb <nl> - FixedPoint . swift . gyb <nl> FlatMap . swift <nl> Flatten . swift . gyb <nl> FloatingPoint . swift . gyb <nl> mmm a / stdlib / public / core / ClosedRange . swift <nl> ppp b / stdlib / public / core / ClosedRange . swift <nl> internal enum _ClosedRangeIndexRepresentation < Bound > <nl> where <nl> / / WORKAROUND rdar : / / 25214598 - should be Bound : Strideable <nl> Bound : _Strideable & Comparable , <nl> - Bound . Stride : Integer { <nl> + Bound . Stride : BinaryInteger { <nl> case pastEnd <nl> case inRange ( Bound ) <nl> } <nl> mmm a / stdlib / public / core / FloatingPoint . swift . gyb <nl> ppp b / stdlib / public / core / FloatingPoint . swift . gyb <nl> extension FloatingPoint { <nl> } <nl> % end <nl> <nl> + / / FIXME ( integers ) : maybe uncomment it back <nl> + # if false <nl> @ _transparent <nl> public func negated ( ) - > Self { <nl> var rhs = self <nl> rhs . negate ( ) <nl> return rhs <nl> } <nl> + # endif <nl> + <nl> } <nl> <nl> extension BinaryFloatingPoint { <nl> mmm a / stdlib / public / core / FloatingPointTypes . swift . gyb <nl> ppp b / stdlib / public / core / FloatingPointTypes . swift . gyb <nl> def intFormatFix ( bits ) : <nl> return bits <nl> } % <nl> <nl> - / / TODO : remove once integer proposal is available mmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / TODO : remove once integer proposal is available mmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + / / FIXME ( integers ) : ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ <nl> % for bits in [ 32 , 64 ] : <nl> extension UInt $ { bits } { <nl> var signBitIndex : Int { <nl> - return $ { bits - 1 } - Int ( Int $ { bits } ( Builtin . int_ctlz_Int $ { bits } ( self . _value , false . _value ) ) ) <nl> + return $ { bits - 1 } - Int ( Int $ { bits } ( Builtin . int_ctlz_Int $ { bits } ( self . _storage , false . _value ) ) ) <nl> } <nl> var countTrailingZeros : Int { <nl> - return Int ( Int $ { bits } ( Builtin . int_cttz_Int $ { bits } ( self . _value , false . _value ) ) ) <nl> + return Int ( Int $ { bits } ( Builtin . int_cttz_Int $ { bits } ( self . _storage , false . _value ) ) ) <nl> } <nl> } <nl> % end <nl> public struct $ { Self } { <nl> var _value : Builtin . FPIEEE $ { bits } <nl> <nl> / / / Create an instance initialized to zero . <nl> - @ _transparent public <nl> - init ( ) { <nl> + @ _transparent <nl> + public init ( ) { <nl> let zero : Int64 = 0 <nl> - self . _value = Builtin . sitofp_Int64_FPIEEE $ { bits } ( zero . _value ) <nl> + self . _value = Builtin . sitofp_Int64_FPIEEE $ { bits } ( zero . _storage ) <nl> } <nl> <nl> @ _transparent <nl> extension $ { Self } : BinaryFloatingPoint { <nl> } <nl> <nl> public init ( bitPattern : UInt $ { bits } ) { <nl> - self . init ( _bits : Builtin . bitcast_Int $ { bits } _FPIEEE $ { bits } ( bitPattern . _value ) ) <nl> + self . init ( _bits : Builtin . bitcast_Int $ { bits } _FPIEEE $ { bits } ( bitPattern . _storage ) ) <nl> } <nl> <nl> public var sign : FloatingPointSign { <nl> extension $ { Self } : _ExpressibleByBuiltinIntegerLiteral , ExpressibleByIntegerLit <nl> <nl> / / / Create an instance initialized to ` value ` . <nl> public init ( integerLiteral value : Int64 ) { <nl> - self = $ { Self } ( _bits : Builtin . sitofp_Int64_FPIEEE $ { bits } ( value . _value ) ) <nl> + self = $ { Self } ( _bits : Builtin . sitofp_Int64_FPIEEE $ { bits } ( value . _storage ) ) <nl> } <nl> } <nl> <nl> extension $ { Self } : Hashable { <nl> } <nl> } <nl> <nl> - extension $ { Self } : Arithmetic { <nl> + extension $ { Self } : SignedArithmetic { <nl> @ _transparent <nl> public var magnitude : $ { Self } { <nl> return $ { Self } ( _bits : Builtin . int_fabs_FPIEEE $ { bits } ( _value ) ) <nl> extension $ { Self } { <nl> % ThatBuiltinName = builtinIntName ( srcBits ) <nl> % sign = ' s ' if srcSigned else ' u ' <nl> public init ( _ v : $ { That } ) { <nl> - _value = Builtin . $ { sign } itofp_ $ { ThatBuiltinName } _FPIEEE $ { bits } ( v . _value ) <nl> + _value = Builtin . $ { sign } itofp_ $ { ThatBuiltinName } _FPIEEE $ { bits } ( v . _storage ) <nl> } <nl> % end <nl> } <nl> extension $ { Self } { <nl> " $ { That } value cannot be converted to $ { Self } because the result would be less than $ { Self } . min " ) <nl> _precondition ( other < $ { str ( upper ) } . 0 , <nl> " $ { That } value cannot be converted to $ { Self } because the result would be greater than $ { Self } . max " ) <nl> - self . _value = Builtin . fpto $ { sign } i_FPIEEE $ { srcBits } _ $ { BuiltinName } ( other . _value ) <nl> + self . _storage = Builtin . fpto $ { sign } i_FPIEEE $ { srcBits } _ $ { BuiltinName } ( other . _value ) <nl> } <nl> % if srcBits = = 80 : <nl> # endif <nl> mmm a / stdlib / public / core / IntegerParsing . swift . gyb <nl> ppp b / stdlib / public / core / IntegerParsing . swift . gyb <nl> UIntMax = ' UInt % s ' % int_max_bits <nl> <nl> / / = = = mmm Parsing helpers mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> <nl> + / / FIXME ( integers ) : Needs to get rid of ` IntMax ` <nl> + # if false <nl> / / / If text is an ASCII representation in the given ` radix ` of a <nl> / / / non - negative number < = ` maximum ` , return that number . Otherwise , <nl> / / / return ` nil ` . <nl> internal func _parseAsciiAsIntMax ( <nl> / / Convert to signed . <nl> return IntMax ( bitPattern : hasMinus ? 0 & - absValue : absValue ) <nl> } <nl> + # endif <nl> <nl> / / / Strip an optional single leading ASCII plus / minus sign from ` utf16 ` . <nl> private func _parseOptionalAsciiSign ( <nl> extension $ { Self } { <nl> / / / " [ + - ] ? [ 0 - 9a - zA - Z ] + " , or the value it denotes in the given ` radix ` <nl> / / / is not representable , the result is ` nil ` . <nl> public init ? ( _ text : String , radix : Int = 10 ) { <nl> - if let value = _parseAsciiAs $ { ' ' if signed else ' U ' } IntMax ( <nl> - text . utf16 , radix , $ { ' ' if signed else ' U ' } IntMax ( $ { Self } . max ) ) { <nl> - self . init ( <nl> - $ { ' ' if Self in ( IntMax , UIntMax ) else ' truncatingBitPattern : ' } value ) <nl> - } <nl> - else { <nl> - return nil <nl> - } <nl> + / / FIXME ( integers ) : implement <nl> + fatalError ( ) <nl> + / / if let value = _parseAsciiAs $ { ' ' if signed else ' U ' } IntMax ( <nl> + / / text . utf16 , radix , $ { ' ' if signed else ' U ' } IntMax ( $ { Self } . max ) ) { <nl> + / / self . init ( <nl> + / / $ { ' ' if Self in ( IntMax , UIntMax ) else ' truncatingBitPattern : ' } value ) <nl> + / / } <nl> + / / else { <nl> + / / return nil <nl> + / / } <nl> } <nl> } <nl> <nl> mmm a / stdlib / public / core / Integers . swift . gyb <nl> ppp b / stdlib / public / core / Integers . swift . gyb <nl> <nl> # Utility code for later in this template <nl> # <nl> <nl> - from SwiftIntTypes import all_integer_types <nl> + from SwiftIntTypes import all_integer_types , int_max_bits <nl> <nl> from string import maketrans , capitalize <nl> <nl> maskingShifts = [ <nl> operator = ' & < < ' , nonMaskingOperator = ' < < ' , description = ' left shift ' , <nl> name = ' maskingShiftLeft ' , llvmName = lambda _ : ' shl ' ) , <nl> ] <nl> + <nl> + IntMax = ' Int % s ' % int_max_bits <nl> + UIntMax = ' UInt % s ' % int_max_bits <nl> } % <nl> <nl> + / / FIXME ( integers ) : remove these two aliases <nl> + / / / The largest native signed integer type . <nl> + public typealias IntMax = $ { IntMax } <nl> + / / / The largest native unsigned integer type . <nl> + public typealias UIntMax = $ { UIntMax } <nl> + <nl> infix operator & < < : BitwiseShiftPrecedence <nl> infix operator & < < = : AssignmentPrecedence <nl> infix operator & > > : BitwiseShiftPrecedence <nl> public protocol SignedArithmetic : Arithmetic { <nl> } <nl> <nl> extension SignedArithmetic { <nl> + @ _transparent <nl> public func negated ( ) - > Self { <nl> return Self ( ) . subtracting ( self ) <nl> } <nl> + <nl> + @ _transparent <nl> public mutating func negate ( ) { <nl> self = negated ( ) <nl> } <nl> public prefix func - < T : SignedArithmetic > ( x : T ) - > T { <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> public protocol BinaryInteger : <nl> - Comparable , Arithmetic , CustomStringConvertible { <nl> + Comparable , Hashable , Arithmetic , CustomStringConvertible , Strideable { <nl> <nl> static var isSigned : Bool { get } <nl> <nl> extension BinaryInteger { <nl> } <nl> } <nl> <nl> + / / Strideable conformance <nl> + extension BinaryInteger { <nl> + / / FIXME ( ABI ) : using Int as the return value is wrong . <nl> + @ _transparent <nl> + public func distance ( to other : Self ) - > Int { <nl> + / / FIXME ( integers ) <nl> + fatalError ( ) <nl> + } <nl> + <nl> + / / FIXME ( ABI ) : using Int as the return value is wrong . <nl> + @ _transparent <nl> + public func advanced ( by n : Int ) - > Self { <nl> + / / FIXME ( integers ) <nl> + fatalError ( ) <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / = = = mmm Homogeneous comparison mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> public func & $ { x . operator } < T : FixedWidthInteger > ( lhs : T , rhs : T ) - > T { <nl> / / = = = mmm UnsignedInteger mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - public protocol UnsignedInteger : BinaryInteger { <nl> + / / / This protocol is an implementation detail of ` UnsignedInteger ` ; <nl> + / / / do not use it directly . <nl> + @ _show_in_interface <nl> + public protocol _DisallowMixedSignArithmetic : BinaryInteger { <nl> + / / Used to create a deliberate ambiguity in cases like UInt ( 1 ) + <nl> + / / Int ( 1 ) , which would otherwise compile due to the arithmetic <nl> + / / operators defined for Strideable types ( unsigned types are <nl> + / / Strideable ) . <nl> + associatedtype _DisallowMixedSignArithmetic : SignedInteger = Int <nl> + } <nl> + <nl> + public protocol UnsignedInteger : _DisallowMixedSignArithmetic , BinaryInteger { <nl> associatedtype Magnitude : BinaryInteger <nl> } <nl> <nl> extension SignedInteger where Self : FixedWidthInteger { <nl> % for self_type in all_integer_types ( word_bits ) : <nl> % bits = self_type . bits <nl> % signed = self_type . is_signed <nl> + % BuiltinName = self_type . builtin_name <nl> % Self = self_type . stdlib_name <nl> + % OtherSelf = self_type . get_opposite_signedness ( ) . stdlib_name <nl> % Unsigned = ' Signed ' if signed else ' Unsigned ' <nl> % u = ' s ' if signed else ' u ' <nl> % U = ' U ' if signed else ' ' <nl> public struct $ { Self } <nl> _storage ) , x ) ) <nl> } <nl> <nl> + / / / Construct a ` $ { Self } ` having the same memory representation as <nl> + / / / the ` $ { OtherSelf } ` ` bitPattern ` . No range or overflow checking <nl> + / / / occurs , and the resulting ` $ { Self } ` may not have the same numeric <nl> + / / / value as ` bitPattern ` - - it is only guaranteed to use the same <nl> + / / / pattern of bits . <nl> @ _transparent <nl> - public init ( bitPattern x : $ { U } Int $ { bits } ) { <nl> + public init ( bitPattern x : $ { OtherSelf } ) { <nl> _storage = x . _storage <nl> } <nl> <nl> public struct $ { Self } <nl> } <nl> <nl> % if signed : <nl> + public typealias Magnitude = U $ { Self } <nl> + <nl> @ _transparent <nl> public var magnitude : U $ { Self } { <nl> let base = U $ { Self } ( _storage ) <nl> public struct $ { Self } <nl> } <nl> <nl> public var _storage : Builtin . Int $ { bits } <nl> + <nl> + / / Implementation details <nl> + <nl> + % if self_type . is_word : <nl> + @ _transparent <nl> + public / / @ testable <nl> + init ( _ _v : Builtin . Word ) { <nl> + % if BuiltinName = = ' Int32 ' : <nl> + self . _storage = Builtin . truncOrBitCast_Word_Int32 ( _v ) <nl> + % elif BuiltinName = = ' Int64 ' : <nl> + self . _storage = Builtin . zextOrBitCast_Word_Int64 ( _v ) <nl> + % end <nl> + } <nl> + <nl> + @ _transparent <nl> + public / / @ testable <nl> + var _builtinWordValue : Builtin . Word { <nl> + % if BuiltinName = = ' Int32 ' : <nl> + return Builtin . zextOrBitCast_Int32_Word ( _storage ) <nl> + % elif BuiltinName = = ' Int64 ' : <nl> + return Builtin . truncOrBitCast_Int64_Word ( _storage ) <nl> + % end <nl> + } <nl> + % end <nl> + } <nl> + % # end of concrete type : $ { Self } <nl> + <nl> + extension $ { Self } : Hashable { <nl> + / / / The hash value . <nl> + / / / <nl> + / / / * * Axiom : * * ` x = = y ` implies ` x . hashValue = = y . hashValue ` . <nl> + / / / <nl> + / / / - Note : The hash value is not guaranteed to be stable across <nl> + / / / different invocations of the same program . Do not persist the <nl> + / / / hash value across program runs . <nl> + public var hashValue : Int { <nl> + @ inline ( __always ) <nl> + get { <nl> + % if bits < = word_bits and signed : <nl> + / / Sign extend the value . <nl> + return Int ( self ) <nl> + % elif bits < = word_bits and not signed : <nl> + / / Sign extend the value . <nl> + return Int ( $ { OtherSelf } ( bitPattern : self ) ) <nl> + % elif bits = = word_bits * 2 : <nl> + / / We have twice as many bits as we need to return . <nl> + return <nl> + Int ( extendingOrTruncating : self ) ^ <nl> + Int ( extendingOrTruncating : self & > > 32 ) <nl> + % else : <nl> + _Unimplemented ( ) <nl> + % end <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + / / Create an ambiguity when indexing or slicing <nl> + / / Range [ OfStrideable ] < $ { Self } > outside a generic context . See <nl> + / / Range . swift for details . <nl> + extension $ { Self } { <nl> + public typealias _DisabledRangeIndex = $ { Self } <nl> + } <nl> + <nl> + % if signed : <nl> + / / TODO : Consider removing the underscore . <nl> + / / / Returns the argument and specifies that the value is not negative . <nl> + / / / It has only an effect if the argument is a load or call . <nl> + @ _transparent <nl> + public func _assumeNonNegative ( _ x : $ { Self } ) - > $ { Self } { <nl> + _sanityCheck ( x > = 0 ) <nl> + return $ { Self } ( Builtin . assumeNonNegative_ $ { BuiltinName } ( x . _storage ) ) <nl> } <nl> + % end <nl> <nl> % end <nl> + <nl> + / / FIXME ( integers ) : inline manually everywhere <nl> + public func numericCast < T : BinaryInteger , U : BinaryInteger > ( _ x : T ) - > U { <nl> + return U ( x ) <nl> + } <nl> mmm a / stdlib / public / core / Mirror . swift <nl> ppp b / stdlib / public / core / Mirror . swift <nl> public protocol CustomLeafReflectable : CustomReflectable { } <nl> / / / Do not declare new conformances to this protocol ; they will not <nl> / / / work as expected . <nl> public protocol MirrorPath { } <nl> - extension IntMax : MirrorPath { } <nl> extension Int : MirrorPath { } <nl> extension String : MirrorPath { } <nl> <nl> mmm a / stdlib / public / core / Range . swift . gyb <nl> ppp b / stdlib / public / core / Range . swift . gyb <nl> public func . . < < Bound > ( <nl> where <nl> / / WORKAROUND rdar : / / 25214598 - should be just Bound : Strideable <nl> Bound : _Strideable & Comparable , <nl> - Bound . Stride : Integer { <nl> + Bound . Stride : BinaryInteger { <nl> <nl> / / FIXME : swift - 3 - indexing - model : tests for traps . <nl> _precondition ( minimum < = maximum , <nl> mmm a / stdlib / public / core / Stride . swift . gyb <nl> ppp b / stdlib / public / core / Stride . swift . gyb <nl> public protocol $ { Self } : $ { Conformance } { <nl> / / FIXME ( ABI ) ( compiler limitation ) : We ' d like to name this type " Distance " <nl> / / but for < rdar : / / problem / 17619038 > <nl> / / / A type that can represent the distance between two values of ` Self ` . <nl> - associatedtype Stride : SignedNumber <nl> + associatedtype Stride : SignedArithmetic <nl> <nl> / / / Returns a stride ` x ` such that ` self . advanced ( by : x ) ` approximates <nl> / / / ` other ` . <nl> public func = = < T : Strideable > ( x : T , y : T ) - > Bool { <nl> return x . distance ( to : y ) = = 0 <nl> } <nl> <nl> + / / FIXME ( integers ) : uncomment the block <nl> + # if false <nl> public func + < T : Strideable > ( lhs : T , rhs : T . Stride ) - > T { <nl> return lhs . advanced ( by : rhs ) <nl> } <nl> public func + = < T : Strideable > ( lhs : inout T , rhs : T . Stride ) { <nl> public func - = < T : Strideable > ( lhs : inout T , rhs : T . Stride ) { <nl> lhs = lhs . advanced ( by : - rhs ) <nl> } <nl> + # endif <nl> <nl> / / = = = mmm Deliberately - ambiguous operators for UnsignedIntegerTypes mmmmmm - - = = = / / <nl> / / The UnsignedIntegerTypes all have a signed Stride type . Without these / / <nl> mmm a / stdlib / public / core / StringLegacy . swift <nl> ppp b / stdlib / public / core / StringLegacy . swift <nl> extension String { <nl> / / / let max = String ( Int . max ) <nl> / / / print ( " \ ( max ) has \ ( max . utf16 . count ) digits . " ) <nl> / / / / / Prints " 9223372036854775807 has 19 digits . " <nl> - public init < T : _SignedInteger > ( _ v : T ) { <nl> - self = _int64ToString ( v . toIntMax ( ) ) <nl> + public init < T : SignedInteger > ( _ v : T ) { <nl> + / / FIXME ( integers ) : fix toIntMax <nl> + fatalError ( ) <nl> + / / self = _int64ToString ( v . toIntMax ( ) ) <nl> } <nl> <nl> / / / Creates a string representing the given value in base 10 . <nl> extension String { <nl> / / / - uppercase : Pass ` true ` to use uppercase letters to represent numerals <nl> / / / greater than 9 , or ` false ` to use lowercase letters . The default is <nl> / / / ` false ` . <nl> - public init < T : _SignedInteger > ( <nl> + public init < T : SignedInteger > ( <nl> _ value : T , radix : Int , uppercase : Bool = false <nl> ) { <nl> - _precondition ( radix > 1 , " Radix must be greater than 1 " ) <nl> - self = _int64ToString ( <nl> - value . toIntMax ( ) , radix : Int64 ( radix ) , uppercase : uppercase ) <nl> + / / FIXME ( integers ) : fix toIntMax <nl> + fatalError ( ) <nl> + / / _precondition ( radix > 1 , " Radix must be greater than 1 " ) <nl> + / / self = _int64ToString ( <nl> + / / value . toIntMax ( ) , radix : Int64 ( radix ) , uppercase : uppercase ) <nl> } <nl> <nl> / / / Creates a string representing the given value in the specified base . <nl> | WIP eliminating compilation errors one by one . . . | apple/swift | 1867ca4a0f0f9872e629ec42bf73b9731ee3b1d0 | 2016-07-29T10:31:21Z |
mmm a / scripts / time_sync . sh <nl> ppp b / scripts / time_sync . sh <nl> if [ $ ? - eq 1 ] ; then <nl> fi <nl> <nl> # ntpdate running log at / var / log / syslog <nl> + <nl> + sudo ntpdate - v - u us . pool . ntp . org <nl> | script : time sync script call ntp once besides crontab job . ( ) | ApolloAuto/apollo | 46059af68a28e4dd0d6f223037d4f378903cb124 | 2017-11-03T07:38:24Z |
mmm a / tests / Makefile <nl> ppp b / tests / Makefile <nl> LDPFALGS = - pthread <nl> CPP_FILES : = $ ( wildcard * . cpp ) <nl> OBJ_FILES : = $ ( addprefix . / , $ ( notdir $ ( CPP_FILES : . cpp = . o ) ) ) <nl> <nl> - <nl> - tests : $ ( OBJ_FILES ) <nl> + <nl> + tests : $ ( OBJ_FILES ) <nl> $ ( CXX ) $ ( CXXFLAGS ) $ ( LDPFALGS ) - o $ @ $ ^ <nl> + mkdir - p logs <nl> <nl> % . o : % . cpp <nl> $ ( CXX ) $ ( CXXFLAGS ) - c - o $ @ $ < <nl> <nl> clean : <nl> - rm - f tests * . o logs / * <nl> + rm - f tests * . o logs / * . txt <nl> <nl> rebuild : clean tests <nl> <nl> | fixed tests makefile | gabime/spdlog | 319a62d73f0d6fd8262c606b071499be26f472b1 | 2015-05-15T20:04:09Z |
mmm a / test / functional / interface_bitcoin_cli . py <nl> ppp b / test / functional / interface_bitcoin_cli . py <nl> def run_test ( self ) : <nl> assert_equal ( [ " foo " , " bar " ] , self . nodes [ 0 ] . cli ( ' - rpcuser = % s ' % user , ' - stdin ' , ' - stdinrpcpass ' , input = password + " \ nfoo \ nbar " ) . echo ( ) ) <nl> assert_raises_process_error ( 1 , " Incorrect rpcuser or rpcpassword " , self . nodes [ 0 ] . cli ( ' - rpcuser = % s ' % user , ' - stdin ' , ' - stdinrpcpass ' , input = " foo " ) . echo ) <nl> <nl> + self . log . info ( " Test connecting to a non - existing server " ) <nl> + assert_raises_process_error ( 1 , " Could not connect to the server " , self . nodes [ 0 ] . cli ( ' - rpcport = 1 ' ) . echo ) <nl> + <nl> self . log . info ( " Make sure that - getinfo with arguments fails " ) <nl> assert_raises_process_error ( 1 , " - getinfo takes no arguments " , self . nodes [ 0 ] . cli ( ' - getinfo ' ) . help ) <nl> <nl> | tests : Test connecting to a non - existing server | bitcoin/bitcoin | a2b2476e96b34cba2e413bb099bcef5e4165b29a | 2018-03-19T21:44:43Z |
mmm a / tensorflow / compiler / jit / BUILD <nl> ppp b / tensorflow / compiler / jit / BUILD <nl> cc_library ( <nl> " / / tensorflow / compiler / jit / ops : parallel_check_op " , <nl> " / / tensorflow / compiler / jit / ops : xla_ops " , <nl> " / / tensorflow / compiler / tf2xla : dump_graph " , <nl> + " / / tensorflow / compiler / tf2xla : validate_control_flow " , <nl> " / / tensorflow / compiler / tf2xla : xla_compiler " , <nl> " / / tensorflow / compiler / xla : status_macros " , <nl> " / / tensorflow / core : core_cpu " , <nl> mmm a / tensorflow / compiler / jit / encapsulate_subgraphs_pass . cc <nl> ppp b / tensorflow / compiler / jit / encapsulate_subgraphs_pass . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / jit / shape_inference_helpers . h " <nl> # include " tensorflow / compiler / tf2xla / const_analysis . h " <nl> # include " tensorflow / compiler / tf2xla / dump_graph . h " <nl> + # include " tensorflow / compiler / tf2xla / validate_control_flow . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> # include " tensorflow / core / common_runtime / optimization_registry . h " <nl> Status Encapsulator : : SplitIntoSubgraphs ( ) { <nl> for ( auto & entry : subgraphs_ ) { <nl> Subgraph & subgraph = entry . second ; <nl> FixupSourceAndSinkEdges ( subgraph . GetGraph ( ) ) ; <nl> + / / Verify that the graph has well - formed control flow structure to be <nl> + / / functionalized . <nl> + std : : vector < ControlFlowInfo > dummy ; <nl> + TF_RETURN_IF_ERROR ( <nl> + BuildAndValidateControlFlowInfo ( subgraph . GetGraph ( ) , & dummy ) ) ; <nl> } <nl> <nl> return s ; <nl> Status EncapsulateSubgraphsPass : : Run ( <nl> return Status : : OK ( ) ; <nl> } ; <nl> <nl> - TF_RETURN_IF_ERROR ( EncapsulateSubgraphsInFunctions ( <nl> - kXlaClusterAttr , kXlaOutsideCompilationAttr , * * options . graph , <nl> - rewrite_subgraph , <nl> - / * reuse_existing_functions = * / false , & graph_out , library ) ) ; <nl> + TF_RETURN_WITH_CONTEXT_IF_ERROR ( <nl> + EncapsulateSubgraphsInFunctions ( <nl> + kXlaClusterAttr , kXlaOutsideCompilationAttr , * * options . graph , <nl> + rewrite_subgraph , / * reuse_existing_functions = * / false , & graph_out , <nl> + library ) , <nl> + " EncapsulateSubgraphsPass failed " ) ; <nl> <nl> if ( VLOG_IS_ON ( 1 ) ) { <nl> dump_graph : : DumpGraphToFile ( " after_encapsulate_subgraphs " , * graph_out , <nl> mmm a / tensorflow / compiler / tf2xla / BUILD <nl> ppp b / tensorflow / compiler / tf2xla / BUILD <nl> cc_library ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " validate_control_flow " , <nl> + srcs = [ " validate_control_flow . cc " ] , <nl> + hdrs = [ " validate_control_flow . h " ] , <nl> + deps = [ <nl> + " / / tensorflow / core : graph " , <nl> + " / / tensorflow / core : lib " , <nl> + ] , <nl> + ) <nl> + <nl> + tf_cc_test ( <nl> + name = " validate_control_flow_test " , <nl> + srcs = [ " validate_control_flow_test . cc " ] , <nl> + deps = [ <nl> + " : validate_control_flow " , <nl> + " / / tensorflow / cc : cc_ops " , <nl> + " / / tensorflow / cc : ops " , <nl> + " / / tensorflow / cc : while_loop " , <nl> + " / / tensorflow / core : lib " , <nl> + " / / tensorflow / core : ops " , <nl> + " / / tensorflow / core : protos_all_cc " , <nl> + " / / tensorflow / core : test " , <nl> + " / / tensorflow / core : test_main " , <nl> + ] , <nl> + ) <nl> + <nl> cc_library ( <nl> name = " functionalize_control_flow " , <nl> srcs = [ " functionalize_control_flow . cc " ] , <nl> hdrs = [ " functionalize_control_flow . h " ] , <nl> deps = [ <nl> " : tf2xla_util " , <nl> + " : validate_control_flow " , <nl> " / / tensorflow / compiler / jit : union_find " , <nl> " / / tensorflow / compiler / tf2xla : dump_graph " , <nl> " / / tensorflow / compiler / tf2xla / ops : xla_ops " , <nl> mmm a / tensorflow / compiler / tf2xla / functionalize_control_flow . cc <nl> ppp b / tensorflow / compiler / tf2xla / functionalize_control_flow . cc <nl> limitations under the License . <nl> # include " tensorflow / compiler / jit / union_find . h " <nl> # include " tensorflow / compiler / tf2xla / dump_graph . h " <nl> # include " tensorflow / compiler / tf2xla / tf2xla_util . h " <nl> + # include " tensorflow / compiler / tf2xla / validate_control_flow . h " <nl> # include " tensorflow / compiler / xla / ptr_util . h " <nl> # include " tensorflow / compiler / xla / status_macros . h " <nl> # include " tensorflow / core / common_runtime / function . h " <nl> Status FunctionalizeControlFlow ( const FunctionLibraryDefinition * lookup_library , <nl> / / invariant . <nl> std : : vector < ControlFlowInfo > cf_info ; <nl> std : : vector < string > unreachable_nodes ; <nl> - TF_RETURN_IF_ERROR ( BuildControlFlowInfo ( graph , & cf_info , & unreachable_nodes ) ) ; <nl> + TF_RETURN_WITH_CONTEXT_IF_ERROR ( <nl> + BuildAndValidateControlFlowInfo ( graph , & cf_info , & unreachable_nodes ) , <nl> + " FunctionalizeControlFlow failed " ) ; <nl> if ( ! unreachable_nodes . empty ( ) ) { <nl> return errors : : InvalidArgument ( <nl> " The following nodes are unreachable from the source in the graph : " , <nl> Status FunctionalizeControlFlow ( const FunctionLibraryDefinition * lookup_library , <nl> frame . parent = parent ; <nl> frame . name = cf . frame_name ; <nl> + + parent - > num_children ; <nl> - } else if ( frame . parent ! = parent ) { <nl> - return errors : : InvalidArgument ( " Mismatched parent frames for " , <nl> - cf . frame - > id ( ) , " : " , parent - > name , " vs " , <nl> - frame . parent - > name ) ; <nl> } <nl> <nl> if ( IsEnter ( node ) ) { <nl> Status FunctionalizeControlFlow ( const FunctionLibraryDefinition * lookup_library , <nl> & arg . is_loop_invariant ) ) ; <nl> frame . args . push_back ( arg ) ; <nl> } else if ( IsLoopCond ( node ) ) { <nl> - if ( frame . loop_cond ) { <nl> - return errors : : InvalidArgument ( <nl> - " Loop " , cf . frame_name , <nl> - " has more than one LoopCond node : " , node - > name ( ) , " and " , <nl> - frame . loop_cond - > name ( ) ) ; <nl> - } <nl> frame . loop_cond = node ; <nl> } <nl> frame . nodes . insert ( node ) ; <nl> new file mode 100644 <nl> index 0000000000000 . . 1b3be4cfa4aff <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / tf2xla / validate_control_flow . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / compiler / tf2xla / validate_control_flow . h " <nl> + <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / core / graph / node_builder . h " <nl> + # include " tensorflow / core / lib / core / errors . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + / / Information about a loop frame structure . <nl> + struct Frame { <nl> + string name ; <nl> + <nl> + / / Pointer to the parent frame . The root frame has a pointer to itself . <nl> + Frame * parent = nullptr ; <nl> + <nl> + / / The loop condition of the loop . There should be exactly one loop condition <nl> + / / in every loop . <nl> + const Node * loop_cond = nullptr ; <nl> + } ; <nl> + <nl> + / / Verify that the ControlFlowInfo of the graph has valid loop structure . <nl> + Status ValidateControlFlowInfo ( const Graph * graph , <nl> + const std : : vector < ControlFlowInfo > & cf_info ) { <nl> + std : : unordered_map < string , Frame > frames ; <nl> + for ( const Node * node : graph - > op_nodes ( ) ) { <nl> + const ControlFlowInfo & cf = cf_info [ node - > id ( ) ] ; <nl> + if ( ! cf . frame | | ! cf . parent_frame ) { <nl> + / / Skip nodes unreachable from the source node . They might be pruned <nl> + / / later . <nl> + continue ; <nl> + } <nl> + <nl> + Frame & frame = frames [ cf . frame_name ] ; <nl> + Frame * parent = & frames [ cf_info [ cf . parent_frame - > id ( ) ] . frame_name ] ; <nl> + if ( frame . parent = = nullptr ) { <nl> + frame . parent = parent ; <nl> + frame . name = cf . frame_name ; <nl> + } else if ( frame . parent ! = parent ) { <nl> + return errors : : InvalidArgument ( <nl> + " Invalid loop structure : Mismatched parent frames for \ " " , <nl> + cf . frame_name , " \ " : \ " " , parent - > name , " \ " vs \ " " , frame . parent - > name , <nl> + " \ " . This is an internal bug , please file a bug report with " <nl> + " instructions on how to reproduce the error . " ) ; <nl> + } <nl> + if ( IsLoopCond ( node ) ) { <nl> + if ( frame . loop_cond ) { <nl> + return errors : : InvalidArgument ( <nl> + " Invalid loop structure : Loop \ " " , cf . frame_name , <nl> + " \ " has more than one LoopCond node : \ " " , node - > name ( ) , " \ " and \ " " , <nl> + frame . loop_cond - > name ( ) , <nl> + " \ " . This is an internal bug , please file a bug report with " <nl> + " instructions on how to reproduce the error . " ) ; <nl> + } <nl> + frame . loop_cond = node ; <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + } / / namespace <nl> + <nl> + Status BuildAndValidateControlFlowInfo ( const Graph * graph , <nl> + std : : vector < ControlFlowInfo > * info , <nl> + std : : vector < string > * unreachable_nodes ) { <nl> + TF_RETURN_IF_ERROR ( BuildControlFlowInfo ( graph , info , unreachable_nodes ) ) ; <nl> + return ValidateControlFlowInfo ( graph , * info ) ; <nl> + } <nl> + <nl> + } / / namespace tensorflow <nl> new file mode 100644 <nl> index 0000000000000 . . 74159dc9291bf <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / tf2xla / validate_control_flow . h <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # ifndef TENSORFLOW_COMPILER_TF2XLA_VALIDATE_CONTROL_FLOW_H_ <nl> + # define TENSORFLOW_COMPILER_TF2XLA_VALIDATE_CONTROL_FLOW_H_ <nl> + <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / core / graph / control_flow . h " <nl> + # include " tensorflow / core / graph / graph . h " <nl> + # include " tensorflow / core / lib / core / status . h " <nl> + <nl> + namespace tensorflow { <nl> + <nl> + / / Populate the control flow frame info of each node in the graph . Verify that <nl> + / / the graph has well - formed control flow strcuture that can be functionalized . <nl> + / / If unreachable_nodes is not nullptr , append to it the names of nodes <nl> + / / unreachable from the source node . <nl> + Status BuildAndValidateControlFlowInfo ( <nl> + const Graph * graph , std : : vector < ControlFlowInfo > * info , <nl> + std : : vector < string > * unreachable_nodes = nullptr ) ; <nl> + <nl> + } / / namespace tensorflow <nl> + <nl> + # endif / / TENSORFLOW_COMPILER_TF2XLA_VALIDATE_CONTROL_FLOW_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . 74c9f4b86cae4 <nl> mmm / dev / null <nl> ppp b / tensorflow / compiler / tf2xla / validate_control_flow_test . cc <nl> <nl> + / * Copyright 2018 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / compiler / tf2xla / validate_control_flow . h " <nl> + <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> + # include " tensorflow / cc / ops / standard_ops . h " <nl> + # include " tensorflow / cc / ops / while_loop . h " <nl> + # include " tensorflow / core / lib / core / status_test_util . h " <nl> + # include " tensorflow / core / lib / strings / str_util . h " <nl> + # include " tensorflow / core / platform / test . h " <nl> + <nl> + namespace tensorflow { <nl> + namespace { <nl> + Status LessThanTenCond ( const Scope & scope , const std : : vector < Output > & inputs , <nl> + Output * output ) { <nl> + * output = ops : : Less ( scope , inputs [ 0 ] , 10 ) ; <nl> + return scope . status ( ) ; <nl> + } <nl> + <nl> + Status AddOneBody ( const Scope & scope , const std : : vector < Output > & inputs , <nl> + std : : vector < Output > * outputs ) { <nl> + outputs - > push_back ( ops : : AddN ( scope , { inputs [ 0 ] , 1 } ) ) ; <nl> + return scope . status ( ) ; <nl> + } <nl> + <nl> + Status NestedLoopBody ( const Scope & scope , const std : : vector < Output > & inputs , <nl> + std : : vector < Output > * outputs ) { <nl> + return ops : : BuildWhileLoop ( scope . NewSubScope ( " inner " ) , inputs , <nl> + LessThanTenCond , AddOneBody , " inner_loop " , <nl> + outputs ) ; <nl> + } <nl> + <nl> + TEST ( ValidateControlFlowTest , InputsFromDifferentFrames ) { <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + std : : vector < Output > inputs ; <nl> + inputs . push_back ( ops : : Placeholder ( scope , DT_INT32 ) ) ; <nl> + std : : vector < Output > outputs ; <nl> + TF_ASSERT_OK ( ops : : BuildWhileLoop ( scope . NewSubScope ( " outer " ) , inputs , <nl> + LessThanTenCond , NestedLoopBody , <nl> + " outer_loop " , & outputs ) ) ; <nl> + std : : unique_ptr < Graph > graph ( new Graph ( OpRegistry : : Global ( ) ) ) ; <nl> + TF_ASSERT_OK ( scope . ToGraph ( graph . get ( ) ) ) ; <nl> + / / { inner / Enter ' , ' outer / Switch ' } - - > ' inner / Merge ' . ' inner / Enter ' is in frame <nl> + / / ' inner_loop ' . ' outer / Switch ' is in frame ' outer_loop ' . <nl> + std : : vector < ControlFlowInfo > info ; <nl> + Status status = BuildAndValidateControlFlowInfo ( graph . get ( ) , & info ) ; <nl> + EXPECT_FALSE ( status . ok ( ) ) ; <nl> + EXPECT_TRUE ( str_util : : StrContains ( status . error_message ( ) , <nl> + " has inputs from different frames " ) ) <nl> + < < status . error_message ( ) ; <nl> + } <nl> + <nl> + TEST ( ValidateControlFlowTest , MismatchedParentFrames ) { <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + std : : vector < Output > inputs ; <nl> + inputs . push_back ( ops : : Placeholder ( scope , DT_INT32 ) ) ; <nl> + std : : vector < Output > outputs ; <nl> + TF_ASSERT_OK ( ops : : BuildWhileLoop ( scope , inputs , LessThanTenCond , AddOneBody , <nl> + " test_loop " , & outputs ) ) ; <nl> + std : : unique_ptr < Graph > graph ( new Graph ( OpRegistry : : Global ( ) ) ) ; <nl> + TF_ASSERT_OK ( scope . ToGraph ( graph . get ( ) ) ) ; <nl> + Node * enter_1 = nullptr ; <nl> + for ( Node * node : graph - > op_nodes ( ) ) { <nl> + if ( IsEnter ( node ) ) { <nl> + enter_1 = node ; <nl> + } <nl> + } <nl> + ASSERT_TRUE ( enter_1 ! = nullptr ) ; <nl> + <nl> + NodeDef enter ; <nl> + enter . set_name ( " Enter2 " ) ; <nl> + enter . set_op ( " Enter " ) ; <nl> + ( * enter . mutable_attr ( ) ) [ " T " ] . set_type ( DT_INT32 ) ; <nl> + ( * enter . mutable_attr ( ) ) [ " frame_name " ] . set_s ( " test_loop " ) ; <nl> + * enter . add_input ( ) = " Enter " ; <nl> + Status status ; <nl> + Node * enter_2 = graph - > AddNode ( enter , & status ) ; <nl> + TF_ASSERT_OK ( status ) ; <nl> + graph - > AddControlEdge ( enter_1 , enter_2 ) ; <nl> + <nl> + / / SOURCE ( " " ) - - > Enter ( " test_loop " ) - - > Enter2 ( " test_loop " ) <nl> + / / For node ' Enter ' , the parent frame of " test_loop " is empty . <nl> + / / For node ' Enter2 ' , the parent frame of " test_loop " is " test_loop " . <nl> + std : : vector < ControlFlowInfo > info ; <nl> + status = BuildAndValidateControlFlowInfo ( graph . get ( ) , & info ) ; <nl> + EXPECT_FALSE ( status . ok ( ) ) ; <nl> + EXPECT_TRUE ( <nl> + str_util : : StrContains ( status . error_message ( ) , " Mismatched parent frames " ) ) <nl> + < < status . error_message ( ) ; <nl> + } <nl> + <nl> + TEST ( ValidateControlFlowTest , TwoLoopCond ) { <nl> + / / Test that one frame has at most one LoopCond node . This is necessary for <nl> + / / functionalize control flow . <nl> + Scope scope = Scope : : NewRootScope ( ) . ExitOnError ( ) ; <nl> + std : : vector < Output > inputs ; <nl> + inputs . push_back ( ops : : Placeholder ( scope , DT_INT32 ) ) ; <nl> + std : : vector < Output > outputs ; <nl> + TF_ASSERT_OK ( ops : : BuildWhileLoop ( scope , inputs , LessThanTenCond , AddOneBody , <nl> + " test_loop " , & outputs ) ) ; <nl> + outputs . clear ( ) ; <nl> + TF_ASSERT_OK ( ops : : BuildWhileLoop ( scope . NewSubScope ( " sub " ) , inputs , <nl> + LessThanTenCond , AddOneBody , " test_loop " , <nl> + & outputs , false ) ) ; <nl> + std : : unique_ptr < Graph > graph ( new Graph ( OpRegistry : : Global ( ) ) ) ; <nl> + TF_ASSERT_OK ( scope . ToGraph ( graph . get ( ) ) ) ; <nl> + std : : vector < ControlFlowInfo > info ; <nl> + Status status = BuildAndValidateControlFlowInfo ( graph . get ( ) , & info ) ; <nl> + EXPECT_FALSE ( status . ok ( ) ) ; <nl> + EXPECT_TRUE ( str_util : : StrContains ( status . error_message ( ) , <nl> + " more than one LoopCond node " ) ) <nl> + < < status . error_message ( ) ; <nl> + } <nl> + <nl> + } / / namespace <nl> + } / / namespace tensorflow <nl> | [ TF : XLA ] Validate the control flow structure in encapsulate_subgraphs_pass and encapsulate_tpu_computations_pass , in order to detect errors earlier . | tensorflow/tensorflow | c9a2034f93981e17eef5f96fbd2894202b8fc2c1 | 2018-06-15T17:29:10Z |
mmm a / Makefile <nl> ppp b / Makefile <nl> LIBINTEROP_SERVER_MAIN_SRC = \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / empty . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / empty . grpc . pb . cc \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / messages . grpc . pb . cc \ <nl> $ ( GENDIR ) / src / proto / grpc / testing / test . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / test . grpc . pb . cc \ <nl> - test / cpp / interop / server_main . cc \ <nl> + test / cpp / interop / interop_server . cc \ <nl> <nl> PUBLIC_HEADERS_CXX + = \ <nl> <nl> ifneq ( $ ( NO_DEPS ) , true ) <nl> - include $ ( LIBINTEROP_SERVER_MAIN_OBJS : . o = . dep ) <nl> endif <nl> endif <nl> - $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / interop / server_main . o : $ ( GENDIR ) / src / proto / grpc / testing / empty . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / empty . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / test . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / test . grpc . pb . cc <nl> + $ ( OBJDIR ) / $ ( CONFIG ) / test / cpp / interop / interop_server . o : $ ( GENDIR ) / src / proto / grpc / testing / empty . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / empty . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / messages . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / messages . grpc . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / test . pb . cc $ ( GENDIR ) / src / proto / grpc / testing / test . grpc . pb . cc <nl> <nl> <nl> LIBQPS_SRC = \ <nl> test / cpp / end2end / test_service_impl . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / interop / client . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / interop / client_helper . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / interop / interop_client . cc : $ ( OPENSSL_DEP ) <nl> + test / cpp / interop / interop_server . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / interop / server_helper . cc : $ ( OPENSSL_DEP ) <nl> - test / cpp / interop / server_main . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / qps / client_async . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / qps / client_sync . cc : $ ( OPENSSL_DEP ) <nl> test / cpp / qps / driver . cc : $ ( OPENSSL_DEP ) <nl> mmm a / PYTHON - MANIFEST . in <nl> ppp b / PYTHON - MANIFEST . in <nl> <nl> recursive - include src / python / grpcio / grpc * . c * . h * . py * . pyx * . pxd * . pxi * . python * . pem <nl> recursive - exclude src / python / grpcio / grpc / _cython * . so * . pyd <nl> graft src / python / grpcio / tests <nl> + graft src / python / grpcio / grpcio . egg - info <nl> graft src / core <nl> graft src / boringssl <nl> graft include / grpc <nl> mmm a / Rakefile <nl> ppp b / Rakefile <nl> task ' dlls ' do <nl> grpc_config = ENV [ ' GRPC_CONFIG ' ] | | ' opt ' <nl> verbose = ENV [ ' V ' ] | | ' 0 ' <nl> <nl> - env = ' CPPFLAGS = " - D_WIN32_WINNT = 0x600 - DUNICODE - D_UNICODE " ' <nl> + env = ' CPPFLAGS = " - D_WIN32_WINNT = 0x600 - DUNICODE - D_UNICODE - Wno - unused - variable - Wno - unused - result " ' <nl> env + = ' LDFLAGS = - static ' <nl> env + = ' SYSTEM = MINGW32 ' <nl> env + = ' EMBED_ZLIB = true ' <nl> mmm a / build . yaml <nl> ppp b / build . yaml <nl> libs : <nl> - src / proto / grpc / testing / empty . proto <nl> - src / proto / grpc / testing / messages . proto <nl> - src / proto / grpc / testing / test . proto <nl> - - test / cpp / interop / server_main . cc <nl> + - test / cpp / interop / interop_server . cc <nl> deps : <nl> - interop_server_helper <nl> - grpc + + _test_util <nl> mmm a / doc / PROTOCOL - HTTP2 . md <nl> ppp b / doc / PROTOCOL - HTTP2 . md <nl> Request - Headers are delivered as HTTP2 headers in HEADERS + CONTINUATION frames . <nl> * * * Nanosecond * * → " n " <nl> * * * Content - Type * * → " content - type " " application / grpc " [ ( " + proto " / " + json " / { _custom_ } ) ] <nl> * * * Content - Coding * * → " identity " / " gzip " / " deflate " / " snappy " / { _custom_ } <nl> - * * * Message - Encoding * * → " grpc - encoding " Content - Coding <nl> + * < a name = " message - encoding " > < / a > * * Message - Encoding * * → " grpc - encoding " Content - Coding <nl> * * * Message - Accept - Encoding * * → " grpc - accept - encoding " Content - Coding \ * ( " , " Content - Coding ) <nl> * * * User - Agent * * → " user - agent " { _structured user - agent string_ } <nl> * * * Message - Type * * → " grpc - message - type " { _type name for message schema_ } <nl> binary values ' lengths being post - Base64 . <nl> The repeated sequence of * * Length - Prefixed - Message * * items is delivered in DATA frames <nl> <nl> * * * Length - Prefixed - Message * * → Compressed - Flag Message - Length Message <nl> - * * * Compressed - Flag * * → 0 / 1 # encoded as 1 byte unsigned integer <nl> + * < a name = " compressed - flag " > < / a > * * Compressed - Flag * * → 0 / 1 # encoded as 1 byte unsigned integer <nl> * * * Message - Length * * → { _length of Message_ } # encoded as 4 byte unsigned integer <nl> * * * Message * * → \ * { binary octet } <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 15fae4d29bf <nl> mmm / dev / null <nl> ppp b / doc / compression . md <nl> <nl> + # # * * gRPC Compression * * <nl> + <nl> + The keywords " MUST " , " MUST NOT " , " REQUIRED " , " SHALL " , " SHALL NOT " , " SHOULD " , <nl> + " SHOULD NOT " , " RECOMMENDED " , " MAY " , and " OPTIONAL " in this document are to be <nl> + interpreted as described in [ RFC 2119 ] ( http : / / www . ietf . org / rfc / rfc2119 . txt ) . <nl> + <nl> + # # # Intent <nl> + <nl> + Compression is used to reduce the amount of bandwidth used between peers . The <nl> + compression supported by gRPC acts _at the individual message level_ , taking <nl> + _message_ [ as defined in the wire format <nl> + document ] ( PROTOCOL - HTTP2 . md ) . <nl> + <nl> + The implementation supports different compression algorithms . A _default <nl> + compression level_ , to be used in the absence of message - specific settings , MAY <nl> + be specified for during channel creation . <nl> + <nl> + The ability to control compression settings per call and to enable / disable <nl> + compression on a per message basis MAY be used to prevent CRIME / BEAST attacks . <nl> + It also allows for asymmetric compression communication , whereby a response MAY <nl> + be compressed differently , if at all . <nl> + <nl> + # # # Specification <nl> + <nl> + Compression MAY be configured by the Client Application by calling the <nl> + appropriate API method . There are two scenarios where compression MAY be <nl> + configured : <nl> + <nl> + + At channel creation time , which sets the channel default compression and <nl> + therefore the compression that SHALL be used in the absence of per - RPC <nl> + compression configuration . <nl> + + At response time , via : <nl> + + For unary RPCs , the { Client , Server } Context instance . <nl> + + For streaming RPCs , the { Client , Server } Writer instance . In this case , <nl> + configuration is reduced to disabling compression altogether . <nl> + <nl> + # # # Compression Method Asymmetry Between Peers <nl> + <nl> + A gRPC peer MAY choose to respond using a different compression method to that <nl> + of the request , including not performing any compression , regardless of channel <nl> + and RPC settings ( for example , if compression would result in small or negative <nl> + gains ) . <nl> + <nl> + When a message from a client compressed with an unsupported algorithm is <nl> + processed by a server , it WILL result in an INVALID \ _ARGUMENT error on the <nl> + server . The server will then include in its response a ` grpc - accept - encoding ` <nl> + header specifying the algorithms it does accept . If an INTERNAL error is <nl> + returned from the server despite having used one of the algorithms from the <nl> + ` grpc - accept - encoding ` header , the cause MUST NOT be related to compression . <nl> + Data sent from a server compressed with an algorithm not supported by the client <nl> + WILL result in an INTERNAL error on the client side . <nl> + <nl> + Note that a peer MAY choose to not disclose all the encodings it supports . <nl> + However , if it receives a message compressed in an undisclosed but supported <nl> + encoding , it MUST include said encoding in the response ' s ` grpc - accept - encoding <nl> + h ` eader . <nl> + <nl> + For every message a server is requested to compress using an algorithm it knows <nl> + the client doesn ' t support ( as indicated by the last ` grpc - accept - encoding ` <nl> + header received from the client ) , it SHALL send the message uncompressed . <nl> + <nl> + # # # Specific Disabling of Compression <nl> + <nl> + If the user ( through the previously described mechanisms ) requests to disable <nl> + compression the next message MUST be sent uncompressed . This is instrumental in <nl> + preventing BEAST / CRIME attacks . This applies to both the the unary and streaming <nl> + cases . <nl> + <nl> + # # # Compression Levels and Algorithms <nl> + <nl> + The set of supported algorithm is implementation dependent . In order to simplify <nl> + the public API and to operate seamlessly across implementations ( both in terms <nl> + of languages but also different version of the same one ) , we introduce the idea <nl> + of _compression levels_ ( such as " low " , " medium " , " high " ) . <nl> + <nl> + Levels map to concrete algorithms and / or their settings ( such as " low " mapping <nl> + to " gzip - 3 " and " high " mapping to " gzip - 9 " ) automatically depending on what a <nl> + peer is known to support . A server is always aware of what its clients support , <nl> + as clients disclose it in their Message - Accept - Encoding header as part of their <nl> + initial call . A client doesn ' t a priori ( presently ) know which algorithms a <nl> + server supports . This issue can be addressed with an initial negotiation of <nl> + capabilities or an automatic retry mechanism . These features will be implemented <nl> + in the future . Currently however , compression levels are only supported at the <nl> + server side , which is aware of the client ' s capabilities through the incoming <nl> + Message - Accept - Encoding header . <nl> + <nl> + # # # Propagation to child RPCs <nl> + <nl> + The inheritance of the compression configuration by child RPCs is left up to the <nl> + implementation . Note that in the absence of changes to the parent channel , its <nl> + configuration will be used . <nl> + <nl> + # # # Test cases <nl> + <nl> + 1 . When a compression level is not specified for either the channel or the <nl> + message , the default channel level _none_ is considered : data MUST NOT be <nl> + compressed . <nl> + 1 . When per - RPC compression configuration isn ' t present for a message , the <nl> + channel compression configuration MUST be used . <nl> + 1 . When a compression method ( including no compression ) is specified for an <nl> + outgoing message , the message MUST be compressed accordingly . <nl> + 1 . A message compressed in a way not supported by its endpoint MUST fail with <nl> + INVALID \ _ARGUMENT status , its associated description indicating the unsupported <nl> + condition as well as the supported ones . The returned ` grpc - accept - encoding ` <nl> + header MUST NOT contain the compression method ( encoding ) used . <nl> + 1 . An ill - constructed message with its [ Compressed - Flag <nl> + bit ] ( PROTOCOL - HTTP2 . md # compressed - flag ) <nl> + set but lacking a <nl> + " [ grpc - encoding ] ( PROTOCOL - HTTP2 . md # message - encoding ) " <nl> + entry different from _identity_ in its metadata MUST fail with INTERNAL status , <nl> + its associated description indicating the invalid Compressed - Flag condition . <nl> mmm a / doc / interop - test - descriptions . md <nl> ppp b / doc / interop - test - descriptions . md <nl> control ( even if compression is enabled on the channel ) . <nl> <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> <nl> Procedure : <nl> 1 . Client calls UnaryCall with : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> Procedure : <nl> <nl> Client asserts : <nl> * call was successful <nl> - * response payload type is COMPRESSABLE <nl> * response payload body is 314159 bytes in size <nl> * clients are free to assert that the response payload body contents are zero <nl> and comparing the entire response message against a golden response <nl> <nl> - # # # large_compressed_unary <nl> - <nl> - This test verifies compressed unary calls succeed in sending messages . It <nl> - sends one unary request for every payload type , with and without requesting a <nl> - compressed response from the server . <nl> - <nl> - In all scenarios , whether compression was actually performed is determined by <nl> - the compression bit in the response ' s message flags . <nl> + # # # client_compressed_unary <nl> <nl> + This test verifies the client can compress unary messages by sending two unary <nl> + calls , for compressed and uncompressed payloads . It also sends an initial <nl> + probing request to verify whether the server supports the [ CompressedRequest ] [ ] <nl> + feature by checking if the probing call fails with an ` INVALID_ARGUMENT ` status . <nl> <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> - * [ Uncompressable Payload ] [ ] <nl> + * [ CompressedRequest ] [ ] <nl> <nl> Procedure : <nl> - 1 . Client calls UnaryCall with : <nl> + 1 . Client calls UnaryCall with the feature probe , an * uncompressed * message : <nl> + ` ` ` <nl> + { <nl> + expect_compressed : { <nl> + value : true <nl> + } <nl> + response_size : 314159 <nl> + payload : { <nl> + body : 271828 bytes of zeros <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + 1 . Client calls UnaryCall with the * compressed * message : <nl> + <nl> + ` ` ` <nl> + { <nl> + expect_compressed : { <nl> + value : true <nl> + } <nl> + response_size : 314159 <nl> + payload : { <nl> + body : 271828 bytes of zeros <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + 1 . Client calls UnaryCall with the * uncompressed * message : <nl> <nl> ` ` ` <nl> { <nl> - request_compressed_response : bool <nl> - response_type : COMPRESSABLE <nl> + expect_compressed : { <nl> + value : false <nl> + } <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> } <nl> } <nl> ` ` ` <nl> + <nl> Client asserts : <nl> - * call was successful <nl> - * response payload type is COMPRESSABLE <nl> - * if ` request_compressed_response ` is false , the response MUST NOT have the <nl> - compressed message flag set . <nl> - * if ` request_compressed_response ` is true , the response MUST have the <nl> - compressed message flag set . <nl> - * response payload body is 314159 bytes in size <nl> - * clients are free to assert that the response payload body contents are <nl> - zero and comparing the entire response message against a golden response <nl> + * First call failed with ` INVALID_ARGUMENT ` status . <nl> + * Subsequent calls were successful . <nl> + * Response payload body is 314159 bytes in size . <nl> + * Clients are free to assert that the response payload body contents are <nl> + zeros and comparing the entire response message against a golden response . <nl> <nl> <nl> - 2 . Client calls UnaryCall with : <nl> + # # # server_compressed_unary <nl> + <nl> + This test verifies the server can compress unary messages . It sends two unary <nl> + requests , expecting the server ' s response to be compressed or not according to <nl> + the ` response_compressed ` boolean . <nl> + <nl> + Whether compression was actually performed is determined by the compression bit <nl> + in the response ' s message flags . * Note that some languages may not have access <nl> + to the message flags * . <nl> + <nl> + <nl> + Server features : <nl> + * [ UnaryCall ] [ ] <nl> + * [ CompressedResponse ] [ ] <nl> + <nl> + Procedure : <nl> + 1 . Client calls UnaryCall with ` SimpleRequest ` : <nl> + <nl> + ` ` ` <nl> + { <nl> + response_compressed : { <nl> + value : true <nl> + } <nl> + response_size : 314159 <nl> + payload : { <nl> + body : 271828 bytes of zeros <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> ` ` ` <nl> { <nl> - request_compressed_response : bool <nl> - response_type : UNCOMPRESSABLE <nl> + response_compressed : { <nl> + value : false <nl> + } <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> Procedure : <nl> ` ` ` <nl> Client asserts : <nl> * call was successful <nl> - * response payload type is UNCOMPRESSABLE <nl> - * the response MAY have the compressed message flag set . Some <nl> - implementations will choose to compress the payload even when the output <nl> - size if larger than the input . <nl> - * response payload body is 314159 bytes in size <nl> + * when ` response_compressed ` is true , the response MUST have the <nl> + compressed message flag set . <nl> + * when ` response_compressed ` is false , the response MUST NOT have <nl> + the compressed message flag set . <nl> + * response payload body is 314159 bytes in size in both cases . <nl> + * clients are free to assert that the response payload body contents are <nl> + zero and comparing the entire response message against a golden response <nl> <nl> <nl> # # # client_streaming <nl> This test verifies that client - only streaming succeeds . <nl> <nl> Server features : <nl> * [ StreamingInputCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> <nl> Procedure : <nl> 1 . Client calls StreamingInputCall <nl> Client asserts : <nl> * call was successful <nl> * response aggregated_payload_size is 74922 <nl> <nl> + <nl> + # # # client_compressed_streaming <nl> + <nl> + This test verifies the client can compress requests on per - message basis by <nl> + performing a two - request streaming call . It also sends an initial probing <nl> + request to verify whether the server supports the [ CompressedRequest ] [ ] feature <nl> + by checking if the probing call fails with an ` INVALID_ARGUMENT ` status . <nl> + <nl> + Procedure : <nl> + 1 . Client calls ` StreamingInputCall ` and sends the following feature - probing <nl> + * uncompressed * ` StreamingInputCallRequest ` message <nl> + <nl> + ` ` ` <nl> + { <nl> + expect_compressed : { <nl> + value : true <nl> + } <nl> + payload : { <nl> + body : 27182 bytes of zeros <nl> + } <nl> + } <nl> + ` ` ` <nl> + If the call fails with ` INVALID_ARGUMENT ` , the test fails . Otherwise , we <nl> + continue . <nl> + <nl> + 1 . Client calls ` StreamingInputCall ` again , sending the * compressed * message <nl> + <nl> + ` ` ` <nl> + { <nl> + expect_compressed : { <nl> + value : true <nl> + } <nl> + payload : { <nl> + body : 27182 bytes of zeros <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + 1 . And finally , the * uncompressed * message <nl> + ` ` ` <nl> + { <nl> + expect_compressed : { <nl> + value : false <nl> + } <nl> + payload : { <nl> + body : 45904 bytes of zeros <nl> + } <nl> + } <nl> + ` ` ` <nl> + <nl> + 1 . Client half - closes <nl> + <nl> + Client asserts : <nl> + * First call fails with ` INVALID_ARGUMENT ` . <nl> + * Next calls succeeds . <nl> + * Response aggregated payload size is 73086 . <nl> + <nl> + <nl> # # # server_streaming <nl> <nl> This test verifies that server - only streaming succeeds . <nl> <nl> Server features : <nl> * [ StreamingOutputCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> <nl> Procedure : <nl> - 1 . Client calls StreamingOutputCall with : <nl> + 1 . Client calls StreamingOutputCall with ` StreamingOutputCallRequest ` : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> size : 31415 <nl> } <nl> response_parameters : { <nl> - size : 59 <nl> + size : 9 <nl> } <nl> response_parameters : { <nl> size : 2653 <nl> Procedure : <nl> Client asserts : <nl> * call was successful <nl> * exactly four responses <nl> - * response payloads are COMPRESSABLE <nl> * response payload bodies are sized ( in order ) : 31415 , 9 , 2653 , 58979 <nl> * clients are free to assert that the response payload body contents are zero <nl> and comparing the entire response messages against golden responses <nl> <nl> # # # server_compressed_streaming <nl> <nl> - This test verifies that server - only compressed streaming succeeds . <nl> + This test verifies that the server can compress streaming messages and disable <nl> + compression on individual messages . <nl> <nl> Server features : <nl> * [ StreamingOutputCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> - * [ Uncompressable Payload ] [ ] <nl> + * [ CompressedResponse ] [ ] <nl> <nl> <nl> Procedure : <nl> - 1 . Client calls StreamingOutputCall with : <nl> + 1 . Client calls StreamingOutputCall with ` StreamingOutputCallRequest ` : <nl> <nl> ` ` ` <nl> { <nl> - request_compressed_response : bool <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> + compressed : { <nl> + value : true <nl> + } <nl> size : 31415 <nl> } <nl> response_parameters : { <nl> - size : 59 <nl> - } <nl> - response_parameters : { <nl> - size : 2653 <nl> - } <nl> - response_parameters : { <nl> - size : 58979 <nl> + compressed : { <nl> + value : false <nl> + } <nl> + size : 92653 <nl> } <nl> } <nl> ` ` ` <nl> <nl> Client asserts : <nl> * call was successful <nl> - * exactly four responses <nl> - * response payloads are COMPRESSABLE <nl> - * if ` request_compressed_response ` is false , the response ' s messages MUST <nl> + * exactly two responses <nl> + * when ` response_compressed ` is false , the response ' s messages MUST <nl> NOT have the compressed message flag set . <nl> - * if ` request_compressed_response ` is true , the response ' s messages MUST <nl> + * when ` response_compressed ` is true , the response ' s messages MUST <nl> have the compressed message flag set . <nl> - * response payload bodies are sized ( in order ) : 31415 , 59 , 2653 , 58979 <nl> + * response payload bodies are sized ( in order ) : 31415 , 92653 <nl> * clients are free to assert that the response payload body contents are <nl> zero and comparing the entire response messages against golden responses <nl> <nl> <nl> - 2 . Client calls StreamingOutputCall with : <nl> - <nl> - ` ` ` <nl> - { <nl> - request_compressed_response : bool <nl> - response_type : UNCOMPRESSABLE <nl> - response_parameters : { <nl> - size : 31415 <nl> - } <nl> - response_parameters : { <nl> - size : 59 <nl> - } <nl> - response_parameters : { <nl> - size : 2653 <nl> - } <nl> - response_parameters : { <nl> - size : 58979 <nl> - } <nl> - } <nl> - ` ` ` <nl> - <nl> - Client asserts : <nl> - * call was successful <nl> - * exactly four responses <nl> - * response payloads are UNCOMPRESSABLE <nl> - * the response MAY have the compressed message flag set . Some <nl> - implementations will choose to compress the payload even when the output <nl> - size if larger than the input . <nl> - * response payload bodies are sized ( in order ) : 31415 , 59 , 2653 , 58979 <nl> - * clients are free to assert that the body of the responses are identical to <nl> - the golden uncompressable data at ` test / cpp / interop / rnd . dat ` . <nl> - <nl> - <nl> # # # ping_pong <nl> <nl> This test verifies that full duplex bidi is supported . <nl> <nl> Server features : <nl> * [ FullDuplexCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> <nl> Procedure : <nl> 1 . Client calls FullDuplexCall with : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> size : 31415 <nl> } <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> - size : 59 <nl> + size : 9 <nl> } <nl> payload : { <nl> body : 8 bytes of zeros <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> size : 2653 <nl> } <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> size : 58979 <nl> } <nl> Procedure : <nl> Client asserts : <nl> * call was successful <nl> * exactly four responses <nl> - * response payloads are COMPRESSABLE <nl> * response payload bodies are sized ( in order ) : 31415 , 9 , 2653 , 58979 <nl> * clients are free to assert that the response payload body contents are zero <nl> and comparing the entire response messages against golden responses <nl> with desired oauth scope . <nl> <nl> The test uses ` - - default_service_account ` with GCE service account email and <nl> ` - - oauth_scope ` with the OAuth scope to use . For testing against <nl> - grpc - test . sandbox . googleapis . com , " https : / / www . googleapis . com / auth / xapi . zoo " should <nl> + grpc - test . sandbox . googleapis . com , " https : / / www . googleapis . com / auth / xapi . zoo " <nl> + should <nl> be passed in as ` - - oauth_scope ` . <nl> <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> * [ Echo Authenticated Username ] [ ] <nl> * [ Echo OAuth Scope ] [ ] <nl> <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> Procedure : <nl> <nl> Client asserts : <nl> * call was successful <nl> - * received SimpleResponse . username equals the value of ` - - default_service_account ` flag <nl> + * received SimpleResponse . username equals the value of <nl> + ` - - default_service_account ` flag <nl> * received SimpleResponse . oauth_scope is in ` - - oauth_scope ` <nl> * response payload body is 314159 bytes in size <nl> * clients are free to assert that the response payload body contents are zero <nl> variable GOOGLE_APPLICATION_CREDENTIALS . <nl> <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> * [ Echo Authenticated Username ] [ ] <nl> * [ Echo OAuth Scope ] [ ] <nl> <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> Client asserts : <nl> * call was successful <nl> * received SimpleResponse . username is not empty and is in the json key file used <nl> by the auth library . The client can optionally check the username matches the <nl> - email address in the key file or equals the value of ` - - default_service_account ` flag . <nl> + email address in the key file or equals the value of ` - - default_service_account ` <nl> + flag . <nl> * response payload body is 314159 bytes in size <nl> * clients are free to assert that the response payload body contents are zero <nl> and comparing the entire response message against a golden response <nl> variable GOOGLE_APPLICATION_CREDENTIALS , * OR * if GCE credentials is used to <nl> fetch the token , ` - - default_service_account ` can be used to pass in GCE service <nl> account email . <nl> - uses the flag ` - - oauth_scope ` for the oauth scope . For testing against <nl> - grpc - test . sandbox . googleapis . com , " https : / / www . googleapis . com / auth / xapi . zoo " should <nl> - be passed as the ` - - oauth_scope ` . <nl> + grpc - test . sandbox . googleapis . com , " https : / / www . googleapis . com / auth / xapi . zoo " <nl> + should be passed as the ` - - oauth_scope ` . <nl> <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> * [ Echo Authenticated Username ] [ ] <nl> * [ Echo OAuth Scope ] [ ] <nl> <nl> Procedure : <nl> 1 . Client uses the auth library to obtain an authorization token <nl> - 2 . Client configures the channel to use AccessTokenCredentials with the access token obtained in step 1 <nl> + 2 . Client configures the channel to use AccessTokenCredentials with the access <nl> + token obtained in step 1 <nl> 3 . Client calls UnaryCall with the following message <nl> <nl> ` ` ` <nl> json key file or GCE default service account email . <nl> <nl> Similar to the other auth tests , this test is only for cloud - to - prod path . <nl> <nl> - This test verifies unary calls succeed in sending messages using a JWT or a service account <nl> - credentials set on the RPC . <nl> + This test verifies unary calls succeed in sending messages using a JWT or a <nl> + service account credentials set on the RPC . <nl> <nl> The test <nl> - uses the flag ` - - service_account_key_file ` with the path to a json key file <nl> downloaded from https : / / console . developers . google . com . Alternately , if using a <nl> usable auth implementation , it may specify the file location in the environment <nl> variable GOOGLE_APPLICATION_CREDENTIALS <nl> - - optionally uses the flag ` - - oauth_scope ` for the oauth scope if implementator <nl> + - optionally uses the flag ` - - oauth_scope ` for the oauth scope if implementator <nl> wishes to use service account credential instead of JWT credential . For testing <nl> - against grpc - test . sandbox . googleapis . com , oauth scope <nl> + against grpc - test . sandbox . googleapis . com , oauth scope <nl> " https : / / www . googleapis . com / auth / xapi . zoo " should be used . <nl> <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> * [ Echo Authenticated Username ] [ ] <nl> * [ Echo OAuth Scope ] [ ] <nl> <nl> by the server . <nl> Server features : <nl> * [ UnaryCall ] [ ] <nl> * [ FullDuplexCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> * [ Echo Metadata ] [ ] <nl> <nl> Procedure : <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> Procedure : <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_size : 314159 <nl> payload : { <nl> body : 271828 bytes of zeros <nl> from the server . <nl> <nl> Server features : <nl> * [ FullDuplexCall ] [ ] <nl> - * [ Compressable Payload ] [ ] <nl> <nl> Procedure : <nl> 1 . Client starts FullDuplexCall with <nl> <nl> ` ` ` <nl> { <nl> - response_type : COMPRESSABLE <nl> response_parameters : { <nl> size : 31415 <nl> } <nl> payload body of size ` SimpleRequest . response_size ` bytes and type as appropriate <nl> for the ` SimpleRequest . response_type ` . If the server does not support the <nl> ` response_type ` , then it should fail the RPC with ` INVALID_ARGUMENT ` . <nl> <nl> + # # # CompressedResponse <nl> + [ CompressedResponse ] : # compressedresponse <nl> + <nl> + When the client sets ` response_compressed ` to true , the server ' s response is <nl> + sent back compressed . Note that ` response_compressed ` is present on both <nl> + ` SimpleRequest ` ( unary ) and ` StreamingOutputCallRequest ` ( streaming ) . <nl> + <nl> + # # # CompressedRequest <nl> + [ CompressedRequest ] : # compressedrequest <nl> + <nl> + When the client sets ` expect_compressed ` to true , the server expects the client <nl> + request to be compressed . If it ' s not , it fails the RPC with ` INVALID_ARGUMENT ` . <nl> + Note that ` response_compressed ` is present on both ` SimpleRequest ` ( unary ) and <nl> + ` StreamingOutputCallRequest ` ( streaming ) . <nl> + <nl> # # # StreamingInputCall <nl> [ StreamingInputCall ] : # streaminginputcall <nl> <nl> payload body of size ResponseParameters . size bytes , as specified by its <nl> respective ResponseParameters . After receiving half close and sending all <nl> responses , it closes with OK . <nl> <nl> - # # # Compressable Payload <nl> - [ Compressable Payload ] : # compressable - payload <nl> - <nl> - When the client requests COMPRESSABLE payload , the response includes a payload <nl> - of the size requested containing all zeros and the payload type is <nl> - COMPRESSABLE . <nl> - <nl> - # # # Uncompressable Payload <nl> - [ Uncompressable Payload ] : # uncompressable - payload <nl> - <nl> - When the client requests UNCOMPRESSABLE payload , the response includes a payload <nl> - of the size requested containing uncompressable data and the payload type is <nl> - UNCOMPRESSABLE . <nl> - <nl> # # # Echo Status <nl> [ Echo Status ] : # echo - status <nl> When the client sends a response_status in the request payload , the server closes <nl> mmm a / examples / objective - c / auth_sample / AuthSample . xcodeproj / project . pbxproj <nl> ppp b / examples / objective - c / auth_sample / AuthSample . xcodeproj / project . pbxproj <nl> <nl> isa = PBXNativeTarget ; <nl> buildConfigurationList = 63E1E9A21B28CB2100EF0978 / * Build configuration list for PBXNativeTarget " AuthSample " * / ; <nl> buildPhases = ( <nl> - DAABBA7B5788A39108D7CA83 / * Check Pods Manifest . lock * / , <nl> + DAABBA7B5788A39108D7CA83 / * [ CP ] Check Pods Manifest . lock * / , <nl> 63E1E9781B28CB2000EF0978 / * Sources * / , <nl> 63E1E9791B28CB2000EF0978 / * Frameworks * / , <nl> 63E1E97A1B28CB2000EF0978 / * Resources * / , <nl> - AEFCCC69DD59CE8F6EB769D7 / * Copy Pods Resources * / , <nl> + AEFCCC69DD59CE8F6EB769D7 / * [ CP ] Copy Pods Resources * / , <nl> + D24F6598302C412D4B863D6F / * [ CP ] Embed Pods Frameworks * / , <nl> ) ; <nl> buildRules = ( <nl> ) ; <nl> <nl> / * End PBXResourcesBuildPhase section * / <nl> <nl> / * Begin PBXShellScriptBuildPhase section * / <nl> - AEFCCC69DD59CE8F6EB769D7 / * Copy Pods Resources * / = { <nl> + AEFCCC69DD59CE8F6EB769D7 / * [ CP ] Copy Pods Resources * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Copy Pods Resources " ; <nl> + name = " [ CP ] Copy Pods Resources " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - AuthSample / Pods - AuthSample - resources . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - DAABBA7B5788A39108D7CA83 / * Check Pods Manifest . lock * / = { <nl> + D24F6598302C412D4B863D6F / * [ CP ] Embed Pods Frameworks * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Check Pods Manifest . lock " ; <nl> + name = " [ CP ] Embed Pods Frameworks " ; <nl> + outputPaths = ( <nl> + ) ; <nl> + runOnlyForDeploymentPostprocessing = 0 ; <nl> + shellPath = / bin / sh ; <nl> + shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - AuthSample / Pods - AuthSample - frameworks . sh \ " \ n " ; <nl> + showEnvVarsInLog = 0 ; <nl> + } ; <nl> + DAABBA7B5788A39108D7CA83 / * [ CP ] Check Pods Manifest . lock * / = { <nl> + isa = PBXShellScriptBuildPhase ; <nl> + buildActionMask = 2147483647 ; <nl> + files = ( <nl> + ) ; <nl> + inputPaths = ( <nl> + ) ; <nl> + name = " [ CP ] Check Pods Manifest . lock " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> mmm a / examples / objective - c / auth_sample / AuthTestService . podspec <nl> ppp b / examples / objective - c / auth_sample / AuthTestService . podspec <nl> Pod : : Spec . new do | s | <nl> s . name = " AuthTestService " <nl> s . version = " 0 . 0 . 1 " <nl> s . license = " New BSD " <nl> + s . authors = { ' gRPC contributors ' = > ' grpc - io @ googlegroups . com ' } <nl> + s . homepage = " http : / / www . grpc . io / " <nl> + s . summary = " AuthTestService example " <nl> + s . source = { : git = > ' https : / / github . com / grpc / grpc . git ' } <nl> <nl> s . ios . deployment_target = " 7 . 1 " <nl> s . osx . deployment_target = " 10 . 9 " <nl> mmm a / examples / objective - c / helloworld / HelloWorld . podspec <nl> ppp b / examples / objective - c / helloworld / HelloWorld . podspec <nl> Pod : : Spec . new do | s | <nl> s . name = " HelloWorld " <nl> s . version = " 0 . 0 . 1 " <nl> s . license = " New BSD " <nl> + s . authors = { ' gRPC contributors ' = > ' grpc - io @ googlegroups . com ' } <nl> + s . homepage = " http : / / www . grpc . io / " <nl> + s . summary = " HelloWorld example " <nl> + s . source = { : git = > ' https : / / github . com / grpc / grpc . git ' } <nl> <nl> s . ios . deployment_target = " 7 . 1 " <nl> s . osx . deployment_target = " 10 . 9 " <nl> mmm a / examples / objective - c / helloworld / HelloWorld . xcodeproj / project . pbxproj <nl> ppp b / examples / objective - c / helloworld / HelloWorld . xcodeproj / project . pbxproj <nl> <nl> objects = { <nl> <nl> / * Begin PBXBuildFile section * / <nl> - 3EF35C14BDC2B65E21837F02 / * libPods . a in Frameworks * / = { isa = PBXBuildFile ; fileRef = 43AB08B32839A6700EA00DD4 / * libPods . a * / ; } ; <nl> 5E3690661B2A23800040F884 / * main . m in Sources * / = { isa = PBXBuildFile ; fileRef = 5E3690651B2A23800040F884 / * main . m * / ; } ; <nl> 5E3690691B2A23800040F884 / * AppDelegate . m in Sources * / = { isa = PBXBuildFile ; fileRef = 5E3690681B2A23800040F884 / * AppDelegate . m * / ; } ; <nl> 5E36906C1B2A23800040F884 / * ViewController . m in Sources * / = { isa = PBXBuildFile ; fileRef = 5E36906B1B2A23800040F884 / * ViewController . m * / ; } ; <nl> <nl> <nl> / * Begin PBXFileReference section * / <nl> 0C432EF610DB15C0F47A66BB / * Pods - HelloWorld . release . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = " Pods - HelloWorld . release . xcconfig " ; path = " Pods / Target Support Files / Pods - HelloWorld / Pods - HelloWorld . release . xcconfig " ; sourceTree = " < group > " ; } ; <nl> - 43AB08B32839A6700EA00DD4 / * libPods . a * / = { isa = PBXFileReference ; explicitFileType = archive . ar ; includeInIndex = 0 ; path = libPods . a ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> 5E3690601B2A23800040F884 / * HelloWorld . app * / = { isa = PBXFileReference ; explicitFileType = wrapper . application ; includeInIndex = 0 ; path = HelloWorld . app ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> 5E3690641B2A23800040F884 / * Info . plist * / = { isa = PBXFileReference ; lastKnownFileType = text . plist . xml ; path = Info . plist ; sourceTree = " < group > " ; } ; <nl> 5E3690651B2A23800040F884 / * main . m * / = { isa = PBXFileReference ; lastKnownFileType = sourcecode . c . objc ; path = main . m ; sourceTree = " < group > " ; } ; <nl> <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> EF61CF6AE2536A31D47F0E63 / * libPods - HelloWorld . a in Frameworks * / , <nl> - 3EF35C14BDC2B65E21837F02 / * libPods . a in Frameworks * / , <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> } ; <nl> <nl> isa = PBXGroup ; <nl> children = ( <nl> 6B4E1F55F8A2EC95A0E7EE88 / * libPods - HelloWorld . a * / , <nl> - 43AB08B32839A6700EA00DD4 / * libPods . a * / , <nl> ) ; <nl> name = Frameworks ; <nl> sourceTree = " < group > " ; <nl> <nl> isa = PBXNativeTarget ; <nl> buildConfigurationList = 5E3690831B2A23810040F884 / * Build configuration list for PBXNativeTarget " HelloWorld " * / ; <nl> buildPhases = ( <nl> - ACF9162361FB8F24C70657DE / * Check Pods Manifest . lock * / , <nl> + ACF9162361FB8F24C70657DE / * [ CP ] Check Pods Manifest . lock * / , <nl> 5E36905C1B2A23800040F884 / * Sources * / , <nl> 5E36905D1B2A23800040F884 / * Frameworks * / , <nl> 5E36905E1B2A23800040F884 / * Resources * / , <nl> - 4C7D815378D98AB3BFC1A7D5 / * Copy Pods Resources * / , <nl> - BB76529986A8BFAF19A385B1 / * Embed Pods Frameworks * / , <nl> + 4C7D815378D98AB3BFC1A7D5 / * [ CP ] Copy Pods Resources * / , <nl> + BB76529986A8BFAF19A385B1 / * [ CP ] Embed Pods Frameworks * / , <nl> ) ; <nl> buildRules = ( <nl> ) ; <nl> <nl> / * End PBXResourcesBuildPhase section * / <nl> <nl> / * Begin PBXShellScriptBuildPhase section * / <nl> - 4C7D815378D98AB3BFC1A7D5 / * Copy Pods Resources * / = { <nl> + 4C7D815378D98AB3BFC1A7D5 / * [ CP ] Copy Pods Resources * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Copy Pods Resources " ; <nl> + name = " [ CP ] Copy Pods Resources " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - HelloWorld / Pods - HelloWorld - resources . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - ACF9162361FB8F24C70657DE / * Check Pods Manifest . lock * / = { <nl> + ACF9162361FB8F24C70657DE / * [ CP ] Check Pods Manifest . lock * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Check Pods Manifest . lock " ; <nl> + name = " [ CP ] Check Pods Manifest . lock " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " diff \ " $ { PODS_ROOT } / . . / Podfile . lock \ " \ " $ { PODS_ROOT } / Manifest . lock \ " > / dev / null \ nif [ [ $ ? ! = 0 ] ] ; then \ n cat < < EOM \ nerror : The sandbox is not in sync with the Podfile . lock . Run ' pod install ' or update your CocoaPods installation . \ nEOM \ n exit 1 \ nfi \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - BB76529986A8BFAF19A385B1 / * Embed Pods Frameworks * / = { <nl> + BB76529986A8BFAF19A385B1 / * [ CP ] Embed Pods Frameworks * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Embed Pods Frameworks " ; <nl> + name = " [ CP ] Embed Pods Frameworks " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> shellPath = / bin / sh ; <nl> - shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods / Pods - frameworks . sh \ " \ n " ; <nl> + shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - HelloWorld / Pods - HelloWorld - frameworks . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> / * End PBXShellScriptBuildPhase section * / <nl> mmm a / examples / objective - c / route_guide / RouteGuide . podspec <nl> ppp b / examples / objective - c / route_guide / RouteGuide . podspec <nl> Pod : : Spec . new do | s | <nl> s . name = " RouteGuide " <nl> s . version = " 0 . 0 . 1 " <nl> s . license = " New BSD " <nl> + s . authors = { ' gRPC contributors ' = > ' grpc - io @ googlegroups . com ' } <nl> + s . homepage = " http : / / www . grpc . io / " <nl> + s . summary = " RouteGuide example " <nl> + s . source = { : git = > ' https : / / github . com / grpc / grpc . git ' } <nl> <nl> s . ios . deployment_target = " 7 . 1 " <nl> s . osx . deployment_target = " 10 . 9 " <nl> mmm a / examples / objective - c / route_guide / RouteGuideClient . xcodeproj / project . pbxproj <nl> ppp b / examples / objective - c / route_guide / RouteGuideClient . xcodeproj / project . pbxproj <nl> <nl> isa = PBXNativeTarget ; <nl> buildConfigurationList = 632527A31B1D0396003073D9 / * Build configuration list for PBXNativeTarget " RouteGuideClient " * / ; <nl> buildPhases = ( <nl> - C6FC30AD2376EC04317237C5 / * Check Pods Manifest . lock * / , <nl> + C6FC30AD2376EC04317237C5 / * [ CP ] Check Pods Manifest . lock * / , <nl> 632527791B1D0395003073D9 / * Sources * / , <nl> 6325277A1B1D0395003073D9 / * Frameworks * / , <nl> 6325277B1B1D0395003073D9 / * Resources * / , <nl> - FFE0BCF30339E7A50A989EAB / * Copy Pods Resources * / , <nl> - B5388EC5A25E89021740B916 / * Embed Pods Frameworks * / , <nl> + FFE0BCF30339E7A50A989EAB / * [ CP ] Copy Pods Resources * / , <nl> + B5388EC5A25E89021740B916 / * [ CP ] Embed Pods Frameworks * / , <nl> ) ; <nl> buildRules = ( <nl> ) ; <nl> <nl> / * End PBXResourcesBuildPhase section * / <nl> <nl> / * Begin PBXShellScriptBuildPhase section * / <nl> - B5388EC5A25E89021740B916 / * Embed Pods Frameworks * / = { <nl> + B5388EC5A25E89021740B916 / * [ CP ] Embed Pods Frameworks * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Embed Pods Frameworks " ; <nl> + name = " [ CP ] Embed Pods Frameworks " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - RouteGuideClient / Pods - RouteGuideClient - frameworks . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - C6FC30AD2376EC04317237C5 / * Check Pods Manifest . lock * / = { <nl> + C6FC30AD2376EC04317237C5 / * [ CP ] Check Pods Manifest . lock * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Check Pods Manifest . lock " ; <nl> + name = " [ CP ] Check Pods Manifest . lock " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " diff \ " $ { PODS_ROOT } / . . / Podfile . lock \ " \ " $ { PODS_ROOT } / Manifest . lock \ " > / dev / null \ nif [ [ $ ? ! = 0 ] ] ; then \ n cat < < EOM \ nerror : The sandbox is not in sync with the Podfile . lock . Run ' pod install ' or update your CocoaPods installation . \ nEOM \ n exit 1 \ nfi \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - FFE0BCF30339E7A50A989EAB / * Copy Pods Resources * / = { <nl> + FFE0BCF30339E7A50A989EAB / * [ CP ] Copy Pods Resources * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Copy Pods Resources " ; <nl> + name = " [ CP ] Copy Pods Resources " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> mmm a / include / grpc + + / impl / codegen / client_context . h <nl> ppp b / include / grpc + + / impl / codegen / client_context . h <nl> class ClientContext { <nl> / / / <nl> / / / \ return A multimap of initial metadata key - value pairs from the server . <nl> const std : : multimap < grpc : : string_ref , grpc : : string_ref > & <nl> - GetServerInitialMetadata ( ) { <nl> + GetServerInitialMetadata ( ) const { <nl> GPR_CODEGEN_ASSERT ( initial_metadata_received_ ) ; <nl> return recv_initial_metadata_ ; <nl> } <nl> class ClientContext { <nl> / / / <nl> / / / \ return A multimap of metadata trailing key - value pairs from the server . <nl> const std : : multimap < grpc : : string_ref , grpc : : string_ref > & <nl> - GetServerTrailingMetadata ( ) { <nl> + GetServerTrailingMetadata ( ) const { <nl> / / TODO ( yangg ) check finished <nl> return trailing_metadata_ ; <nl> } <nl> class ClientContext { <nl> <nl> # ifndef GRPC_CXX0X_NO_CHRONO <nl> / / / Return the deadline for the client call . <nl> - std : : chrono : : system_clock : : time_point deadline ( ) { <nl> + std : : chrono : : system_clock : : time_point deadline ( ) const { <nl> return Timespec2Timepoint ( deadline_ ) ; <nl> } <nl> # endif / / ! GRPC_CXX0X_NO_CHRONO <nl> <nl> / / / Return a \ a gpr_timespec representation of the client call ' s deadline . <nl> - gpr_timespec raw_deadline ( ) { return deadline_ ; } <nl> + gpr_timespec raw_deadline ( ) const { return deadline_ ; } <nl> <nl> / / / Set the per call authority header ( see <nl> / / / https : / / tools . ietf . org / html / rfc7540 # section - 8 . 1 . 2 . 3 ) . <nl> class ClientContext { <nl> const InputMessage & request , <nl> OutputMessage * result ) ; <nl> <nl> - grpc_call * call ( ) { return call_ ; } <nl> + grpc_call * call ( ) const { return call_ ; } <nl> void set_call ( grpc_call * call , const std : : shared_ptr < Channel > & channel ) ; <nl> <nl> uint32_t initial_metadata_flags ( ) const { <nl> mmm a / include / grpc + + / impl / codegen / server_context . h <nl> ppp b / include / grpc + + / impl / codegen / server_context . h <nl> class ServerContext { <nl> ~ ServerContext ( ) ; <nl> <nl> # ifndef GRPC_CXX0X_NO_CHRONO <nl> - std : : chrono : : system_clock : : time_point deadline ( ) { <nl> + std : : chrono : : system_clock : : time_point deadline ( ) const { <nl> return Timespec2Timepoint ( deadline_ ) ; <nl> } <nl> # endif / / ! GRPC_CXX0X_NO_CHRONO <nl> <nl> - gpr_timespec raw_deadline ( ) { return deadline_ ; } <nl> + gpr_timespec raw_deadline ( ) const { return deadline_ ; } <nl> <nl> void AddInitialMetadata ( const grpc : : string & key , const grpc : : string & value ) ; <nl> void AddTrailingMetadata ( const grpc : : string & key , const grpc : : string & value ) ; <nl> class ServerContext { <nl> / / was called . <nl> void TryCancel ( ) const ; <nl> <nl> - const std : : multimap < grpc : : string_ref , grpc : : string_ref > & client_metadata ( ) { <nl> + const std : : multimap < grpc : : string_ref , grpc : : string_ref > & client_metadata ( ) <nl> + const { <nl> return client_metadata_ ; <nl> } <nl> <nl> mmm a / src / core / lib / iomgr / tcp_server_windows . c <nl> ppp b / src / core / lib / iomgr / tcp_server_windows . c <nl> static grpc_error * add_socket_to_server ( grpc_tcp_server * s , SOCKET sock , <nl> size_t addr_len , unsigned port_index , <nl> grpc_tcp_listener * * listener ) { <nl> grpc_tcp_listener * sp = NULL ; <nl> - int port ; <nl> + int port = - 1 ; <nl> int status ; <nl> GUID guid = WSAID_ACCEPTEX ; <nl> DWORD ioctl_num_bytes ; <nl> mmm a / src / csharp / . gitignore <nl> ppp b / src / csharp / . gitignore <nl> <nl> + * . xproj . user <nl> * . userprefs <nl> * . csproj . user <nl> + * . lock . json <nl> StyleCop . Cache <nl> test - results <nl> packages <nl> mmm a / src / csharp / Grpc . Auth / Grpc . Auth . csproj <nl> ppp b / src / csharp / Grpc . Auth / Grpc . Auth . csproj <nl> <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < None Include = " Grpc . Auth . nuspec " / > <nl> + < None Include = " Grpc . Auth . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Auth / Grpc . Auth . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . dd3d94c574a <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Auth / Grpc . Auth . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > c82631ed - 06d1 - 4458 - 87bc - 8257d12307a8 < / ProjectGuid > <nl> + < RootNamespace > Grpc . Auth < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ Grpc . Core \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 1677565824b <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Auth / project . json <nl> <nl> + { <nl> + " version " : " 0 . 15 . 0 - dev " , <nl> + " title " : " gRPC C # Auth " , <nl> + " authors " : [ " Google Inc . " ] , <nl> + " copyright " : " Copyright 2015 , Google Inc . " , <nl> + " packOptions " : { <nl> + " summary " : " Auth library for C # implementation of gRPC - an RPC library and framework " , <nl> + " description " : " Auth library for C # implementation of gRPC - an RPC library and framework . See project site for more info . " , <nl> + " owners " : [ " grpc - packages " ] , <nl> + " licenseUrl " : " https : / / github . com / grpc / grpc / blob / master / LICENSE " , <nl> + " projectUrl " : " https : / / github . com / grpc / grpc " , <nl> + " requireLicenseAcceptance " : false , <nl> + " tags " : [ " gRPC RPC Protocol HTTP / 2 Auth OAuth2 " ] , <nl> + } , <nl> + " dependencies " : { <nl> + " Grpc . Core " : " 0 . 15 . 0 - dev " , <nl> + " Google . Apis . Auth " : " 1 . 11 . 1 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " Microsoft . NETCore . Portable . Compatibility " : " 1 . 0 . 1 - rc2 - 24027 " , <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " , <nl> + " System . Threading . Tasks " : " 4 . 0 . 11 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . Core . Tests / AppDomainUnloadTest . cs <nl> ppp b / src / csharp / Grpc . Core . Tests / AppDomainUnloadTest . cs <nl> <nl> # endregion <nl> <nl> using System ; <nl> - using System . Diagnostics ; <nl> - using System . Linq ; <nl> - using System . Reflection ; <nl> - using System . Threading ; <nl> using System . Threading . Tasks ; <nl> - using Grpc . Core ; <nl> - using Grpc . Core . Internal ; <nl> using Grpc . Core . Utils ; <nl> using NUnit . Framework ; <nl> <nl> namespace Grpc . Core . Tests <nl> { <nl> public class AppDomainUnloadTest <nl> { <nl> + # if NETSTANDARD1_5 <nl> + [ Test ] <nl> + [ Ignore ( " Not supported for CoreCLR " ) ] <nl> + public void AppDomainUnloadHookCanCleanupAbandonedCall ( ) <nl> + { <nl> + } <nl> + # else <nl> [ Test ] <nl> public void AppDomainUnloadHookCanCleanupAbandonedCall ( ) <nl> { <nl> public AppDomainTestClass ( ) <nl> readyToShutdown . Task . Wait ( ) ; / / make sure handler is running <nl> } <nl> } <nl> + # endif <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . Core . Tests / CompressionTest . cs <nl> ppp b / src / csharp / Grpc . Core . Tests / CompressionTest . cs <nl> <nl> using System ; <nl> using System . Diagnostics ; <nl> using System . Linq ; <nl> + using System . Text ; <nl> using System . Threading ; <nl> using System . Threading . Tasks ; <nl> using Grpc . Core ; <nl> public async Task WriteOptions_DuplexStreaming ( ) <nl> <nl> await call . ResponseStream . ToListAsync ( ) ; <nl> } <nl> + <nl> + [ Test ] <nl> + public void CanReadCompressedMessages ( ) <nl> + { <nl> + var compressionMetadata = new Metadata <nl> + { <nl> + { new Metadata . Entry ( Metadata . CompressionRequestAlgorithmMetadataKey , " gzip " ) } <nl> + } ; <nl> + <nl> + helper . UnaryHandler = new UnaryServerMethod < string , string > ( async ( req , context ) = > <nl> + { <nl> + await context . WriteResponseHeadersAsync ( compressionMetadata ) ; <nl> + return req ; <nl> + } ) ; <nl> + <nl> + var stringBuilder = new StringBuilder ( ) ; <nl> + for ( int i = 0 ; i < 200000 ; i + + ) <nl> + { <nl> + stringBuilder . Append ( ' a ' ) ; <nl> + } <nl> + var request = stringBuilder . ToString ( ) ; <nl> + var response = Calls . BlockingUnaryCall ( helper . CreateUnaryCall ( new CallOptions ( compressionMetadata ) ) , request ) ; <nl> + <nl> + Assert . AreEqual ( request , response ) ; <nl> + } <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . Core . Tests / Grpc . Core . Tests . csproj <nl> ppp b / src / csharp / Grpc . Core . Tests / Grpc . Core . Tests . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . Core . Tests . project . json " / > <nl> < None Include = " packages . config " > <nl> < SubType > Designer < / SubType > <nl> < / None > <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core . Tests / Grpc . Core . Tests . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 05823291542 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core . Tests / Grpc . Core . Tests . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 759e23b2 - fc04 - 4695 - 902d - b073cded3599 < / ProjectGuid > <nl> + < RootNamespace > Grpc . Core . Tests < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> mmm a / src / csharp / Grpc . Core . Tests / NUnitMain . cs <nl> ppp b / src / csharp / Grpc . Core . Tests / NUnitMain . cs <nl> public static int Main ( string [ ] args ) <nl> { <nl> / / Make logger immune to NUnit capturing stdout and stderr to workaround https : / / github . com / nunit / nunit / issues / 1406 . <nl> GrpcEnvironment . SetLogger ( new TextWriterLogger ( Console . Error ) ) ; <nl> - # if DOTNET5_4 <nl> + # if NETSTANDARD1_5 <nl> return new AutoRun ( typeof ( NUnitMain ) . GetTypeInfo ( ) . Assembly ) . Execute ( args , new ExtendedTextWrapper ( Console . Out ) , Console . In ) ; <nl> # else <nl> return new AutoRun ( ) . Execute ( args ) ; <nl> mmm a / src / csharp / Grpc . Core . Tests / NUnitVersionTest . cs <nl> ppp b / src / csharp / Grpc . Core . Tests / NUnitVersionTest . cs <nl> public void Cleanup ( ) <nl> Console . Error . WriteLine ( " You are using and old version of NUnit that doesn ' t support async tests and skips them instead . " + <nl> " This test has failed to indicate that . " ) ; <nl> Console . Error . Flush ( ) ; <nl> - Environment . Exit ( 1 ) ; <nl> + throw new Exception ( " NUnitVersionTest has failed . " ) ; <nl> } <nl> } <nl> <nl> mmm a / src / csharp / Grpc . Core . Tests / SanityTest . cs <nl> ppp b / src / csharp / Grpc . Core . Tests / SanityTest . cs <nl> namespace Grpc . Core . Tests <nl> { <nl> public class SanityTest <nl> { <nl> + / / TODO : make sanity test work for CoreCLR as well <nl> + # if ! NETSTANDARD1_5 <nl> / / / < summary > <nl> / / / Because we depend on a native library , sometimes when things go wrong , the <nl> / / / entire NUnit test process crashes . To be able to track down problems better , <nl> private List < Assembly > GetTestAssemblies ( ) <nl> } <nl> return result ; <nl> } <nl> + # endif <nl> } <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 3ad081df39e <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core . Tests / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . Core " : { <nl> + " target " : " project " <nl> + } , <nl> + " Newtonsoft . Json " : " 8 . 0 . 3 " , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } , <nl> + } <nl> mmm a / src / csharp / Grpc . Core / Grpc . Core . csproj <nl> ppp b / src / csharp / Grpc . Core / Grpc . Core . csproj <nl> <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < None Include = " Grpc . Core . nuspec " / > <nl> + < None Include = " Grpc . Core . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < Import Project = " NativeDeps . targets " / > <nl> <nl> < ItemGroup / > <nl> < ItemGroup > <nl> < EmbeddedResource Include = " . . \ . . \ . . \ etc \ roots . pem " > <nl> - < Link > Resources \ roots . pem < / Link > <nl> + < Link > roots . pem < / Link > <nl> < / EmbeddedResource > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> mmm a / src / csharp / Grpc . Core / Grpc . Core . nuspec <nl> ppp b / src / csharp / Grpc . Core / Grpc . Core . nuspec <nl> <nl> < file src = " bin / ReleaseSigned / Grpc . Core . xml " target = " lib / net45 " / > <nl> < file src = " * * \ * . cs " target = " src " / > <nl> < file src = " Grpc . Core . targets " target = " \ build \ net45 \ Grpc . Core . targets " / > <nl> - < file src = " windows_x86 / grpc_csharp_ext . dll " target = " / build / native / bin / windows_x86 / grpc_csharp_ext . dll " / > <nl> - < file src = " windows_x64 / grpc_csharp_ext . dll " target = " / build / native / bin / windows_x64 / grpc_csharp_ext . dll " / > <nl> - < file src = " linux_x86 / libgrpc_csharp_ext . so " target = " / build / native / bin / linux_x86 / libgrpc_csharp_ext . so " / > <nl> - < file src = " linux_x64 / libgrpc_csharp_ext . so " target = " / build / native / bin / linux_x64 / libgrpc_csharp_ext . so " / > <nl> - < file src = " macosx_x86 / libgrpc_csharp_ext . dylib " target = " / build / native / bin / macosx_x86 / libgrpc_csharp_ext . dylib " / > <nl> - < file src = " macosx_x64 / libgrpc_csharp_ext . dylib " target = " / build / native / bin / macosx_x64 / libgrpc_csharp_ext . dylib " / > <nl> + < file src = " . . / nativelibs / windows_x86 / grpc_csharp_ext . dll " target = " / build / native / bin / windows_x86 / grpc_csharp_ext . dll " / > <nl> + < file src = " . . / nativelibs / windows_x64 / grpc_csharp_ext . dll " target = " / build / native / bin / windows_x64 / grpc_csharp_ext . dll " / > <nl> + < file src = " . . / nativelibs / linux_x86 / libgrpc_csharp_ext . so " target = " / build / native / bin / linux_x86 / libgrpc_csharp_ext . so " / > <nl> + < file src = " . . / nativelibs / linux_x64 / libgrpc_csharp_ext . so " target = " / build / native / bin / linux_x64 / libgrpc_csharp_ext . so " / > <nl> + < file src = " . . / nativelibs / macosx_x86 / libgrpc_csharp_ext . dylib " target = " / build / native / bin / macosx_x86 / libgrpc_csharp_ext . dylib " / > <nl> + < file src = " . . / nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " target = " / build / native / bin / macosx_x64 / libgrpc_csharp_ext . dylib " / > <nl> < / files > <nl> < / package > <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core / Grpc . Core . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 137236ffdb6 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core / Grpc . Core . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > dc9908b6 - f291 - 4fc8 - a46d - 2ea2551790ec < / ProjectGuid > <nl> + < RootNamespace > Grpc . Core < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> mmm a / src / csharp / Grpc . Core / GrpcEnvironment . cs <nl> ppp b / src / csharp / Grpc . Core / GrpcEnvironment . cs <nl> internal static async Task ReleaseAsync ( ) <nl> <nl> if ( instanceToShutdown ! = null ) <nl> { <nl> - await instanceToShutdown . ShutdownAsync ( ) ; <nl> + await instanceToShutdown . ShutdownAsync ( ) . ConfigureAwait ( false ) ; <nl> } <nl> } <nl> <nl> public static void Register ( ) <nl> { <nl> if ( ! hooksRegistered ) <nl> { <nl> + / / TODO ( jtattermusch ) : register shutdownhooks for CoreCLR as well <nl> + # if ! NETSTANDARD1_5 <nl> + <nl> AppDomain . CurrentDomain . ProcessExit + = ShutdownHookHandler ; <nl> AppDomain . CurrentDomain . DomainUnload + = ShutdownHookHandler ; <nl> + # endif <nl> } <nl> hooksRegistered = true ; <nl> } <nl> mmm a / src / csharp / Grpc . Core / Internal / DefaultSslRootsOverride . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / DefaultSslRootsOverride . cs <nl> namespace Grpc . Core . Internal <nl> / / / < / summary > <nl> internal static class DefaultSslRootsOverride <nl> { <nl> - const string RootsPemResourceName = " Grpc . Core . Resources . roots . pem " ; <nl> + const string RootsPemResourceName = " Grpc . Core . roots . pem " ; <nl> static object staticLock = new object ( ) ; <nl> <nl> / / / < summary > <nl> mmm a / src / csharp / Grpc . Core / Internal / NativeExtension . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / NativeExtension . cs <nl> private static UnmanagedLibrary Load ( ) <nl> private static string GetAssemblyPath ( ) <nl> { <nl> var assembly = typeof ( NativeExtension ) . GetTypeInfo ( ) . Assembly ; <nl> - # if DOTNET5_4 <nl> - / / Assembly . EscapedCodeBase does not exit under CoreCLR , but assemblies imported from a nuget package <nl> + # if NETSTANDARD1_5 <nl> + / / Assembly . EscapedCodeBase does not exist under CoreCLR , but assemblies imported from a nuget package <nl> / / don ' t seem to be shadowed by DNX - based projects at all . <nl> return assembly . Location ; <nl> # else <nl> private static string GetAssemblyPath ( ) <nl> # endif <nl> } <nl> <nl> - # if ! DOTNET5_4 <nl> + # if ! NETSTANDARD1_5 <nl> private static bool IsFileUri ( string uri ) <nl> { <nl> return uri . ToLowerInvariant ( ) . StartsWith ( Uri . UriSchemeFile ) ; <nl> mmm a / src / csharp / Grpc . Core / Internal / PlatformApis . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / PlatformApis . cs <nl> internal static class PlatformApis <nl> <nl> static PlatformApis ( ) <nl> { <nl> - # if DNXCORE50 <nl> + # if NETSTANDARD1_5 <nl> isLinux = RuntimeInformation . IsOSPlatform ( OSPlatform . Linux ) ; <nl> isMacOSX = RuntimeInformation . IsOSPlatform ( OSPlatform . OSX ) ; <nl> isWindows = RuntimeInformation . IsOSPlatform ( OSPlatform . Windows ) ; <nl> mmm a / src / csharp / Grpc . Core / Internal / SafeHandleZeroIsInvalid . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / SafeHandleZeroIsInvalid . cs <nl> namespace Grpc . Core . Internal <nl> / / / < summary > <nl> / / / Safe handle to wrap native objects . <nl> / / / < / summary > <nl> - internal abstract class SafeHandleZeroIsInvalid : SafeHandle <nl> + internal abstract class SafeHandleZeroIsInvalid : System . Runtime . InteropServices . SafeHandle <nl> { <nl> public SafeHandleZeroIsInvalid ( ) : base ( IntPtr . Zero , true ) <nl> { <nl> mmm a / src / csharp / Grpc . Core / Metadata . cs <nl> ppp b / src / csharp / Grpc . Core / Metadata . cs <nl> public sealed class Metadata : IList < Metadata . Entry > <nl> / / / < / summary > <nl> public static readonly Metadata Empty = new Metadata ( ) . Freeze ( ) ; <nl> <nl> + / / / < summary > <nl> + / / / To be used in initial metadata to request specific compression algorithm <nl> + / / / for given call . Direct selection of compression algorithms is an internal <nl> + / / / feature and is not part of public API . <nl> + / / / < / summary > <nl> + internal const string CompressionRequestAlgorithmMetadataKey = " grpc - internal - encoding - request " ; <nl> + <nl> readonly List < Entry > entries ; <nl> bool readOnly ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 7253107e04a <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core / project . json <nl> <nl> + { <nl> + " version " : " 0 . 15 . 0 - dev " , <nl> + " title " : " gRPC C # Core " , <nl> + " authors " : [ " Google Inc . " ] , <nl> + " copyright " : " Copyright 2015 , Google Inc . " , <nl> + " packOptions " : { <nl> + " summary " : " Core C # implementation of gRPC - an RPC library and framework " , <nl> + " description " : " Core C # implementation of gRPC - an RPC library and framework . See project site for more info . " , <nl> + " owners " : [ " grpc - packages " ] , <nl> + " licenseUrl " : " https : / / github . com / grpc / grpc / blob / master / LICENSE " , <nl> + " projectUrl " : " https : / / github . com / grpc / grpc " , <nl> + " requireLicenseAcceptance " : false , <nl> + " tags " : [ " gRPC RPC Protocol HTTP / 2 " ] , <nl> + " files " : { <nl> + " build / net45 / " : " Grpc . Core . targets " , <nl> + " build / native / bin / windows_x86 / " : " . . / nativelibs / windows_x86 / grpc_csharp_ext . dll " , <nl> + " build / native / bin / windows_x64 / " : " . . / nativelibs / windows_x64 / grpc_csharp_ext . dll " , <nl> + " build / native / bin / linux_x86 / " : " . . / nativelibs / linux_x86 / libgrpc_csharp_ext . so " , <nl> + " build / native / bin / linux_x64 / " : " . . / nativelibs / linux_x64 / libgrpc_csharp_ext . so " , <nl> + " build / native / bin / macosx_x86 / " : " . . / nativelibs / macosx_x86 / libgrpc_csharp_ext . dylib " , <nl> + " build / native / bin / macosx_x64 / " : " . . / nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } , <nl> + " buildOptions " : { <nl> + " embed " : [ " . . / . . / . . / etc / roots . pem " ] <nl> + } , <nl> + " dependencies " : { <nl> + " Ix - Async " : " 1 . 2 . 5 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " , <nl> + " System . Threading . Thread " : " 4 . 0 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 98b3cd54abb <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Dotnet . sln <nl> <nl> + <nl> + Microsoft Visual Studio Solution File , Format Version 12 . 00 <nl> + # Visual Studio 14 <nl> + VisualStudioVersion = 14 . 0 . 25123 . 0 <nl> + MinimumVisualStudioVersion = 10 . 0 . 40219 . 1 <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Core " , " Grpc . Core \ Grpc . Core . xproj " , " { DC9908B6 - F291 - 4FC8 - A46D - 2EA2551790EC } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Auth " , " Grpc . Auth \ Grpc . Auth . xproj " , " { C82631ED - 06D1 - 4458 - 87BC - 8257D12307A8 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Core . Tests " , " Grpc . Core . Tests \ Grpc . Core . Tests . xproj " , " { 759E23B2 - FC04 - 4695 - 902D - B073CDED3599 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Examples " , " Grpc . Examples \ Grpc . Examples . xproj " , " { C77B792D - FC78 - 4CE2 - 9522 - B40B0803C636 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Examples . MathClient " , " Grpc . Examples . MathClient \ Grpc . Examples . MathClient . xproj " , " { FD48DECA - 1622 - 4173 - B1D9 - 2101CF5E7C5F } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Examples . MathServer " , " Grpc . Examples . MathServer \ Grpc . Examples . MathServer . xproj " , " { 58579368 - 5372 - 4E67 - ACD6 - 9B59CB9FA698 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . Examples . Tests " , " Grpc . Examples . Tests \ Grpc . Examples . Tests . xproj " , " { C61714A6 - F633 - 44FB - 97F4 - C91F425C1D15 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . HealthCheck " , " Grpc . HealthCheck \ Grpc . HealthCheck . xproj " , " { 3BE4AD0B - 2BF0 - 4D68 - B625 - F6018EF0DCFA } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . HealthCheck . Tests " , " Grpc . HealthCheck . Tests \ Grpc . HealthCheck . Tests . xproj " , " { 43DAFAC6 - 5343 - 4621 - 960E - A8A977EA3F0B } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . IntegrationTesting " , " Grpc . IntegrationTesting \ Grpc . IntegrationTesting . xproj " , " { 20354386 - 3E71 - 4046 - A269 - 3BC2A06F3EC8 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . IntegrationTesting . Client " , " Grpc . IntegrationTesting . Client \ Grpc . IntegrationTesting . Client . xproj " , " { 48EA5BBE - 70E2 - 4198 - 869D - D7E59C45F30D } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . IntegrationTesting . QpsWorker " , " Grpc . IntegrationTesting . QpsWorker \ Grpc . IntegrationTesting . QpsWorker . xproj " , " { 661B70D7 - F56A - 46E0 - 9B81 - 6227B591B5E7 } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . IntegrationTesting . Server " , " Grpc . IntegrationTesting . Server \ Grpc . IntegrationTesting . Server . xproj " , " { 881F7AD1 - A84E - 47A2 - 9402 - 115C63C4031E } " <nl> + EndProject <nl> + Project ( " { 8BB2217D - 0F2D - 49D1 - 97BC - 3654ED321F3B } " ) = " Grpc . IntegrationTesting . StressClient " , " Grpc . IntegrationTesting . StressClient \ Grpc . IntegrationTesting . StressClient . xproj " , " { 0EBC910B - 8867 - 4D3E - 8686 - 91F34183D839 } " <nl> + EndProject <nl> + Global <nl> + GlobalSection ( SolutionConfigurationPlatforms ) = preSolution <nl> + Debug | Any CPU = Debug | Any CPU <nl> + Release | Any CPU = Release | Any CPU <nl> + EndGlobalSection <nl> + GlobalSection ( ProjectConfigurationPlatforms ) = postSolution <nl> + { DC9908B6 - F291 - 4FC8 - A46D - 2EA2551790EC } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { DC9908B6 - F291 - 4FC8 - A46D - 2EA2551790EC } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { DC9908B6 - F291 - 4FC8 - A46D - 2EA2551790EC } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { DC9908B6 - F291 - 4FC8 - A46D - 2EA2551790EC } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { C82631ED - 06D1 - 4458 - 87BC - 8257D12307A8 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { C82631ED - 06D1 - 4458 - 87BC - 8257D12307A8 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { C82631ED - 06D1 - 4458 - 87BC - 8257D12307A8 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { C82631ED - 06D1 - 4458 - 87BC - 8257D12307A8 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 759E23B2 - FC04 - 4695 - 902D - B073CDED3599 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 759E23B2 - FC04 - 4695 - 902D - B073CDED3599 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 759E23B2 - FC04 - 4695 - 902D - B073CDED3599 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 759E23B2 - FC04 - 4695 - 902D - B073CDED3599 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { C77B792D - FC78 - 4CE2 - 9522 - B40B0803C636 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { C77B792D - FC78 - 4CE2 - 9522 - B40B0803C636 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { C77B792D - FC78 - 4CE2 - 9522 - B40B0803C636 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { C77B792D - FC78 - 4CE2 - 9522 - B40B0803C636 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { FD48DECA - 1622 - 4173 - B1D9 - 2101CF5E7C5F } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { FD48DECA - 1622 - 4173 - B1D9 - 2101CF5E7C5F } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { FD48DECA - 1622 - 4173 - B1D9 - 2101CF5E7C5F } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { FD48DECA - 1622 - 4173 - B1D9 - 2101CF5E7C5F } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 58579368 - 5372 - 4E67 - ACD6 - 9B59CB9FA698 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 58579368 - 5372 - 4E67 - ACD6 - 9B59CB9FA698 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 58579368 - 5372 - 4E67 - ACD6 - 9B59CB9FA698 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 58579368 - 5372 - 4E67 - ACD6 - 9B59CB9FA698 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { C61714A6 - F633 - 44FB - 97F4 - C91F425C1D15 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { C61714A6 - F633 - 44FB - 97F4 - C91F425C1D15 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { C61714A6 - F633 - 44FB - 97F4 - C91F425C1D15 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { C61714A6 - F633 - 44FB - 97F4 - C91F425C1D15 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 3BE4AD0B - 2BF0 - 4D68 - B625 - F6018EF0DCFA } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 3BE4AD0B - 2BF0 - 4D68 - B625 - F6018EF0DCFA } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 3BE4AD0B - 2BF0 - 4D68 - B625 - F6018EF0DCFA } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 3BE4AD0B - 2BF0 - 4D68 - B625 - F6018EF0DCFA } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 43DAFAC6 - 5343 - 4621 - 960E - A8A977EA3F0B } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 43DAFAC6 - 5343 - 4621 - 960E - A8A977EA3F0B } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 43DAFAC6 - 5343 - 4621 - 960E - A8A977EA3F0B } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 43DAFAC6 - 5343 - 4621 - 960E - A8A977EA3F0B } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 20354386 - 3E71 - 4046 - A269 - 3BC2A06F3EC8 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 20354386 - 3E71 - 4046 - A269 - 3BC2A06F3EC8 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 20354386 - 3E71 - 4046 - A269 - 3BC2A06F3EC8 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 20354386 - 3E71 - 4046 - A269 - 3BC2A06F3EC8 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 48EA5BBE - 70E2 - 4198 - 869D - D7E59C45F30D } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 48EA5BBE - 70E2 - 4198 - 869D - D7E59C45F30D } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 48EA5BBE - 70E2 - 4198 - 869D - D7E59C45F30D } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 48EA5BBE - 70E2 - 4198 - 869D - D7E59C45F30D } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 661B70D7 - F56A - 46E0 - 9B81 - 6227B591B5E7 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 661B70D7 - F56A - 46E0 - 9B81 - 6227B591B5E7 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 661B70D7 - F56A - 46E0 - 9B81 - 6227B591B5E7 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 661B70D7 - F56A - 46E0 - 9B81 - 6227B591B5E7 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 881F7AD1 - A84E - 47A2 - 9402 - 115C63C4031E } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 881F7AD1 - A84E - 47A2 - 9402 - 115C63C4031E } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 881F7AD1 - A84E - 47A2 - 9402 - 115C63C4031E } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 881F7AD1 - A84E - 47A2 - 9402 - 115C63C4031E } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + { 0EBC910B - 8867 - 4D3E - 8686 - 91F34183D839 } . Debug | Any CPU . ActiveCfg = Debug | Any CPU <nl> + { 0EBC910B - 8867 - 4D3E - 8686 - 91F34183D839 } . Debug | Any CPU . Build . 0 = Debug | Any CPU <nl> + { 0EBC910B - 8867 - 4D3E - 8686 - 91F34183D839 } . Release | Any CPU . ActiveCfg = Release | Any CPU <nl> + { 0EBC910B - 8867 - 4D3E - 8686 - 91F34183D839 } . Release | Any CPU . Build . 0 = Release | Any CPU <nl> + EndGlobalSection <nl> + GlobalSection ( SolutionProperties ) = preSolution <nl> + HideSolutionNode = FALSE <nl> + EndGlobalSection <nl> + EndGlobal <nl> mmm a / src / csharp / Grpc . Examples . MathClient / Grpc . Examples . MathClient . csproj <nl> ppp b / src / csharp / Grpc . Examples . MathClient / Grpc . Examples . MathClient . csproj <nl> <nl> < Name > Grpc . Examples < / Name > <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> + < ItemGroup > <nl> + < None Include = " Grpc . Examples . MathClient . project . json " / > <nl> + < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . MathClient / Grpc . Examples . MathClient . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 4655bd43774 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . MathClient / Grpc . Examples . MathClient . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > fd48deca - 1622 - 4173 - b1d9 - 2101cf5e7c5f < / ProjectGuid > <nl> + < RootNamespace > Grpc . Examples . MathClient < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . b254f15af87 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . MathClient / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . Examples " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . Examples . MathServer / Grpc . Examples . MathServer . csproj <nl> ppp b / src / csharp / Grpc . Examples . MathServer / Grpc . Examples . MathServer . csproj <nl> <nl> < Name > Grpc . Examples < / Name > <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> + < ItemGroup > <nl> + < None Include = " Grpc . Examples . MathServer . project . json " / > <nl> + < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . MathServer / Grpc . Examples . MathServer . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 38a449e8f29 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . MathServer / Grpc . Examples . MathServer . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 58579368 - 5372 - 4e67 - acd6 - 9b59cb9fa698 < / ProjectGuid > <nl> + < RootNamespace > Grpc . Examples . MathServer < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . b254f15af87 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . MathServer / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . Examples " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . Examples . Tests / Grpc . Examples . Tests . csproj <nl> ppp b / src / csharp / Grpc . Examples . Tests / Grpc . Examples . Tests . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . Examples . Tests . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . Tests / Grpc . Examples . Tests . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 9cecd18b2e4 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . Tests / Grpc . Examples . Tests . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > c61714a6 - f633 - 44fb - 97f4 - c91f425c1d15 < / ProjectGuid > <nl> + < RootNamespace > Grpc . Examples . Tests < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> mmm a / src / csharp / Grpc . Examples . Tests / MathClientServerTests . cs <nl> ppp b / src / csharp / Grpc . Examples . Tests / MathClientServerTests . cs <nl> public async Task Fib ( ) <nl> { <nl> var responses = await call . ResponseStream . ToListAsync ( ) ; <nl> CollectionAssert . AreEqual ( new List < long > { 1 , 1 , 2 , 3 , 5 , 8 } , <nl> - responses . ConvertAll ( ( n ) = > n . Num_ ) ) ; <nl> + responses . Select ( ( n ) = > n . Num_ ) ) ; <nl> } <nl> } <nl> <nl> public async Task Sum ( ) <nl> { <nl> using ( var call = client . Sum ( ) ) <nl> { <nl> - var numbers = new List < long > { 10 , 20 , 30 } . ConvertAll ( n = > new Num { Num_ = n } ) ; <nl> + var numbers = new List < long > { 10 , 20 , 30 } . Select ( n = > new Num { Num_ = n } ) ; <nl> <nl> await call . RequestStream . WriteAllAsync ( numbers ) ; <nl> var result = await call . ResponseAsync ; <nl> public async Task DivMany ( ) <nl> await call . RequestStream . WriteAllAsync ( divArgsList ) ; <nl> var result = await call . ResponseStream . ToListAsync ( ) ; <nl> <nl> - CollectionAssert . AreEqual ( new long [ ] { 3 , 4 , 3 } , result . ConvertAll ( ( divReply ) = > divReply . Quotient ) ) ; <nl> - CollectionAssert . AreEqual ( new long [ ] { 1 , 16 , 1 } , result . ConvertAll ( ( divReply ) = > divReply . Remainder ) ) ; <nl> + CollectionAssert . AreEqual ( new long [ ] { 3 , 4 , 3 } , result . Select ( ( divReply ) = > divReply . Quotient ) ) ; <nl> + CollectionAssert . AreEqual ( new long [ ] { 1 , 16 , 1 } , result . Select ( ( divReply ) = > divReply . Remainder ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . Examples . Tests / NUnitMain . cs <nl> ppp b / src / csharp / Grpc . Examples . Tests / NUnitMain . cs <nl> public static int Main ( string [ ] args ) <nl> { <nl> / / Make logger immune to NUnit capturing stdout and stderr to workaround https : / / github . com / nunit / nunit / issues / 1406 . <nl> GrpcEnvironment . SetLogger ( new TextWriterLogger ( Console . Error ) ) ; <nl> - # if DOTNET5_4 <nl> + # if NETSTANDARD1_5 <nl> return new AutoRun ( typeof ( NUnitMain ) . GetTypeInfo ( ) . Assembly ) . Execute ( args , new ExtendedTextWrapper ( Console . Out ) , Console . In ) ; <nl> # else <nl> return new AutoRun ( ) . Execute ( args ) ; <nl> new file mode 100644 <nl> index 00000000000 . . d2779e814f9 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples . Tests / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . Examples " : { <nl> + " target " : " project " <nl> + } , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . Examples / Grpc . Examples . csproj <nl> ppp b / src / csharp / Grpc . Examples / Grpc . Examples . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . Examples . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples / Grpc . Examples . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d1d7e6d9816 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples / Grpc . Examples . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > c77b792d - fc78 - 4ce2 - 9522 - b40b0803c636 < / ProjectGuid > <nl> + < RootNamespace > Grpc . Examples < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . 7d3f4dcbb1e <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Examples / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . Core " : { <nl> + " target " : " project " <nl> + } , <nl> + " Google . Protobuf " : " 3 . 0 . 0 - beta3 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { <nl> + " frameworkAssemblies " : { <nl> + " System . Runtime " : " " , <nl> + " System . IO " : " " <nl> + } <nl> + } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . HealthCheck . Tests / Grpc . HealthCheck . Tests . csproj <nl> ppp b / src / csharp / Grpc . HealthCheck . Tests / Grpc . HealthCheck . Tests . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . HealthCheck . Tests . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . HealthCheck . Tests / Grpc . HealthCheck . Tests . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 724c5b2a160 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . HealthCheck . Tests / Grpc . HealthCheck . Tests . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 43dafac6 - 5343 - 4621 - 960e - a8a977ea3f0b < / ProjectGuid > <nl> + < RootNamespace > Grpc . HealthCheck . Tests < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> mmm a / src / csharp / Grpc . HealthCheck . Tests / NUnitMain . cs <nl> ppp b / src / csharp / Grpc . HealthCheck . Tests / NUnitMain . cs <nl> public static int Main ( string [ ] args ) <nl> { <nl> / / Make logger immune to NUnit capturing stdout and stderr to workaround https : / / github . com / nunit / nunit / issues / 1406 . <nl> GrpcEnvironment . SetLogger ( new TextWriterLogger ( Console . Error ) ) ; <nl> - # if DOTNET5_4 <nl> + # if NETSTANDARD1_5 <nl> return new AutoRun ( typeof ( NUnitMain ) . GetTypeInfo ( ) . Assembly ) . Execute ( args , new ExtendedTextWrapper ( Console . Out ) , Console . In ) ; <nl> # else <nl> return new AutoRun ( ) . Execute ( args ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 74599bd4b9e <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . HealthCheck . Tests / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . HealthCheck " : { <nl> + " target " : " project " <nl> + } , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . HealthCheck / Grpc . HealthCheck . csproj <nl> ppp b / src / csharp / Grpc . HealthCheck / Grpc . HealthCheck . csproj <nl> <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < None Include = " Grpc . HealthCheck . nuspec " / > <nl> + < None Include = " Grpc . HealthCheck . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . HealthCheck / Grpc . HealthCheck . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 5806a7af979 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . HealthCheck / Grpc . HealthCheck . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 3be4ad0b - 2bf0 - 4d68 - b625 - f6018ef0dcfa < / ProjectGuid > <nl> + < RootNamespace > Grpc . HealthCheck < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . eb57608957a <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . HealthCheck / project . json <nl> <nl> + { <nl> + " version " : " 0 . 15 . 0 - dev " , <nl> + " title " : " gRPC C # Healthchecking " , <nl> + " authors " : [ " Google Inc . " ] , <nl> + " copyright " : " Copyright 2015 , Google Inc . " , <nl> + " packOptions " : { <nl> + " summary " : " Implementation of gRPC health service " , <nl> + " description " : " Example implementation of grpc . health . v1 service that can be used for health - checking . " , <nl> + " owners " : [ " grpc - packages " ] , <nl> + " licenseUrl " : " https : / / github . com / grpc / grpc / blob / master / LICENSE " , <nl> + " projectUrl " : " https : / / github . com / grpc / grpc " , <nl> + " requireLicenseAcceptance " : false , <nl> + " tags " : [ " gRPC health check " ] <nl> + } , <nl> + " dependencies " : { <nl> + " Grpc . Core " : " 0 . 15 . 0 - dev " , <nl> + " Google . Protobuf " : " 3 . 0 . 0 - beta3 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { <nl> + " frameworkAssemblies " : { <nl> + " System . Runtime " : " " , <nl> + " System . IO " : " " <nl> + } <nl> + } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . IntegrationTesting . Client / Grpc . IntegrationTesting . Client . csproj <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Client / Grpc . IntegrationTesting . Client . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . IntegrationTesting . Client . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Client / Grpc . IntegrationTesting . Client . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 7f456cfaef1 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Client / Grpc . IntegrationTesting . Client . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 48ea5bbe - 70e2 - 4198 - 869d - d7e59c45f30d < / ProjectGuid > <nl> + < RootNamespace > Grpc . IntegrationTesting . Client < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . e5ba04d7173 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Client / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . IntegrationTesting . QpsWorker / Grpc . IntegrationTesting . QpsWorker . csproj <nl> ppp b / src / csharp / Grpc . IntegrationTesting . QpsWorker / Grpc . IntegrationTesting . QpsWorker . csproj <nl> <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < None Include = " app . config " / > <nl> + < None Include = " Grpc . IntegrationTesting . QpsWorker . project . json " / > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . QpsWorker / Grpc . IntegrationTesting . QpsWorker . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 15bec443d6c <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . QpsWorker / Grpc . IntegrationTesting . QpsWorker . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 661b70d7 - f56a - 46e0 - 9b81 - 6227b591b5e7 < / ProjectGuid > <nl> + < RootNamespace > Grpc . IntegrationTesting . QpsWorker < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . e5ba04d7173 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . QpsWorker / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . IntegrationTesting . Server / Grpc . IntegrationTesting . Server . csproj <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Server / Grpc . IntegrationTesting . Server . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . IntegrationTesting . Server . project . json " / > <nl> < None Include = " packages . config " / > <nl> < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Server / Grpc . IntegrationTesting . Server . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 689eb0b8425 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Server / Grpc . IntegrationTesting . Server . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 881f7ad1 - a84e - 47a2 - 9402 - 115c63c4031e < / ProjectGuid > <nl> + < RootNamespace > Grpc . IntegrationTesting . Server < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . e5ba04d7173 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . Server / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . IntegrationTesting . StressClient / Grpc . IntegrationTesting . StressClient . csproj <nl> ppp b / src / csharp / Grpc . IntegrationTesting . StressClient / Grpc . IntegrationTesting . StressClient . csproj <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < Project DefaultTargets = " Build " ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> < PropertyGroup > <nl> < Configuration Condition = " ' $ ( Configuration ) ' = = ' ' " > Debug < / Configuration > <nl> <nl> < Name > Grpc . IntegrationTesting < / Name > <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> + < ItemGroup > <nl> + < None Include = " Grpc . IntegrationTesting . StressClient . project . json " / > <nl> + < / ItemGroup > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . StressClient / Grpc . IntegrationTesting . StressClient . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 2f4fdcbb470 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . StressClient / Grpc . IntegrationTesting . StressClient . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DotNet \ Microsoft . DotNet . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 0ebc910b - 8867 - 4d3e - 8686 - 91f34183d839 < / ProjectGuid > <nl> + < RootNamespace > Grpc . IntegrationTesting . StressClient < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . \ obj < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DotNet \ Microsoft . DotNet . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . e5ba04d7173 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting . StressClient / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . IntegrationTesting / GeneratedClientTest . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / GeneratedClientTest . cs <nl> <nl> using Grpc . Core ; <nl> using Grpc . Core . Utils ; <nl> using Grpc . Testing ; <nl> - using Moq ; <nl> using NUnit . Framework ; <nl> <nl> namespace Grpc . IntegrationTesting <nl> public class GeneratedClientTest <nl> { <nl> TestService . TestServiceClient unimplementedClient = new UnimplementedTestServiceClient ( ) ; <nl> <nl> + / / TODO : replace Moq by some mocking library with CoreCLR support . <nl> + # if ! NETSTANDARD1_5 <nl> [ Test ] <nl> public void ExpandedParamOverloadCanBeMocked ( ) <nl> { <nl> var expected = new SimpleResponse ( ) ; <nl> <nl> - var mockClient = new Mock < TestService . TestServiceClient > ( ) ; <nl> + var mockClient = new Moq . Mock < TestService . TestServiceClient > ( ) ; <nl> / / mocking is relatively clumsy because one needs to specify value for all the optional params . <nl> - mockClient . Setup ( m = > m . UnaryCall ( It . IsAny < SimpleRequest > ( ) , null , null , CancellationToken . None ) ) . Returns ( expected ) ; <nl> + mockClient . Setup ( m = > m . UnaryCall ( Moq . It . IsAny < SimpleRequest > ( ) , null , null , CancellationToken . None ) ) . Returns ( expected ) ; <nl> <nl> Assert . AreSame ( expected , mockClient . Object . UnaryCall ( new SimpleRequest ( ) ) ) ; <nl> } <nl> public void CallOptionsOverloadCanBeMocked ( ) <nl> { <nl> var expected = new SimpleResponse ( ) ; <nl> <nl> - var mockClient = new Mock < TestService . TestServiceClient > ( ) ; <nl> - mockClient . Setup ( m = > m . UnaryCall ( It . IsAny < SimpleRequest > ( ) , It . IsAny < CallOptions > ( ) ) ) . Returns ( expected ) ; <nl> + var mockClient = new Moq . Mock < TestService . TestServiceClient > ( ) ; <nl> + mockClient . Setup ( m = > m . UnaryCall ( Moq . It . IsAny < SimpleRequest > ( ) , Moq . It . IsAny < CallOptions > ( ) ) ) . Returns ( expected ) ; <nl> <nl> Assert . AreSame ( expected , mockClient . Object . UnaryCall ( new SimpleRequest ( ) , new CallOptions ( ) ) ) ; <nl> } <nl> + # endif <nl> <nl> [ Test ] <nl> public void DefaultMethodStubThrows_UnaryCall ( ) <nl> mmm a / src / csharp / Grpc . IntegrationTesting / GeneratedServiceBaseTest . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / GeneratedServiceBaseTest . cs <nl> <nl> using Grpc . Core ; <nl> using Grpc . Core . Utils ; <nl> using Grpc . Testing ; <nl> - using Moq ; <nl> using NUnit . Framework ; <nl> <nl> namespace Grpc . IntegrationTesting <nl> mmm a / src / csharp / Grpc . IntegrationTesting / Grpc . IntegrationTesting . csproj <nl> ppp b / src / csharp / Grpc . IntegrationTesting / Grpc . IntegrationTesting . csproj <nl> <nl> < / ProjectReference > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> + < None Include = " Grpc . IntegrationTesting . project . json " / > <nl> < None Include = " packages . config " > <nl> < SubType > Designer < / SubType > <nl> < / None > <nl> new file mode 100644 <nl> index 00000000000 . . c2f5bcb1637 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting / Grpc . IntegrationTesting . project . json <nl> <nl> + { <nl> + " frameworks " : { <nl> + " net45 " : { } <nl> + } , <nl> + " runtimes " : { <nl> + " win " : { } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 357300ecb9b <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting / Grpc . IntegrationTesting . xproj <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> + < Project ToolsVersion = " 14 . 0 . 25123 " DefaultTargets = " Build " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> + < PropertyGroup > <nl> + < VisualStudioVersion Condition = " ' $ ( VisualStudioVersion ) ' = = ' ' " > 14 . 0 . 25123 < / VisualStudioVersion > <nl> + < VSToolsPath Condition = " ' $ ( VSToolsPath ) ' = = ' ' " > $ ( MSBuildExtensionsPath32 ) \ Microsoft \ VisualStudio \ v $ ( VisualStudioVersion ) < / VSToolsPath > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . Props " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < PropertyGroup Label = " Globals " > <nl> + < ProjectGuid > 20354386 - 3e71 - 4046 - a269 - 3bc2a06f3ec8 < / ProjectGuid > <nl> + < RootNamespace > Grpc . IntegrationTesting < / RootNamespace > <nl> + < BaseIntermediateOutputPath Condition = " ' $ ( BaseIntermediateOutputPath ) ' = = ' ' " > . . \ artifacts \ obj \ $ ( MSBuildProjectName ) < / BaseIntermediateOutputPath > <nl> + < OutputPath Condition = " ' $ ( OutputPath ) ' = = ' ' " > . \ bin \ < / OutputPath > <nl> + < / PropertyGroup > <nl> + < PropertyGroup > <nl> + < SchemaVersion > 2 . 0 < / SchemaVersion > <nl> + < / PropertyGroup > <nl> + < Import Project = " $ ( VSToolsPath ) \ DNX \ Microsoft . DNX . targets " Condition = " ' $ ( VSToolsPath ) ' ! = ' ' " / > <nl> + < / Project > <nl> \ No newline at end of file <nl> mmm a / src / csharp / Grpc . IntegrationTesting / InteropClient . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / InteropClient . cs <nl> private async Task < ChannelCredentials > CreateCredentialsAsync ( ) <nl> <nl> if ( options . TestCase = = " jwt_token_creds " ) <nl> { <nl> + # if ! NETSTANDARD1_5 <nl> var googleCredential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> Assert . IsTrue ( googleCredential . IsCreateScopedRequired ) ; <nl> credentials = ChannelCredentials . Create ( credentials , googleCredential . ToCallCredentials ( ) ) ; <nl> + # else <nl> + / / TODO ( jtattermusch ) : implement this <nl> + throw new NotImplementedException ( " Not supported on CoreCLR yet " ) ; <nl> + # endif <nl> } <nl> <nl> if ( options . TestCase = = " compute_engine_creds " ) <nl> { <nl> + # if ! NETSTANDARD1_5 <nl> var googleCredential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> Assert . IsFalse ( googleCredential . IsCreateScopedRequired ) ; <nl> credentials = ChannelCredentials . Create ( credentials , googleCredential . ToCallCredentials ( ) ) ; <nl> + # else <nl> + / / TODO ( jtattermusch ) : implement this <nl> + throw new NotImplementedException ( " Not supported on CoreCLR yet " ) ; <nl> + # endif <nl> } <nl> return credentials ; <nl> } <nl> private async Task RunTestCaseAsync ( Channel channel , ClientOptions options ) <nl> case " unimplemented_method " : <nl> RunUnimplementedMethod ( new UnimplementedService . UnimplementedServiceClient ( channel ) ) ; <nl> break ; <nl> + case " client_compressed_unary " : <nl> + RunClientCompressedUnary ( client ) ; <nl> + break ; <nl> + case " client_compressed_streaming " : <nl> + await RunClientCompressedStreamingAsync ( client ) ; <nl> + break ; <nl> default : <nl> throw new ArgumentException ( " Unknown test case " + options . TestCase ) ; <nl> } <nl> public static void RunLargeUnary ( TestService . TestServiceClient client ) <nl> Console . WriteLine ( " running large_unary " ) ; <nl> var request = new SimpleRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseSize = 314159 , <nl> Payload = CreateZerosPayload ( 271828 ) <nl> } ; <nl> var response = client . UnaryCall ( request ) ; <nl> <nl> - Assert . AreEqual ( PayloadType . Compressable , response . Payload . Type ) ; <nl> Assert . AreEqual ( 314159 , response . Payload . Body . Length ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> public static async Task RunClientStreamingAsync ( TestService . TestServiceClient c <nl> { <nl> Console . WriteLine ( " running client_streaming " ) ; <nl> <nl> - var bodySizes = new List < int > { 27182 , 8 , 1828 , 45904 } . ConvertAll ( ( size ) = > new StreamingInputCallRequest { Payload = CreateZerosPayload ( size ) } ) ; <nl> + var bodySizes = new List < int > { 27182 , 8 , 1828 , 45904 } . Select ( ( size ) = > new StreamingInputCallRequest { Payload = CreateZerosPayload ( size ) } ) ; <nl> <nl> using ( var call = client . StreamingInputCall ( ) ) <nl> { <nl> public static async Task RunServerStreamingAsync ( TestService . TestServiceClient c <nl> <nl> var request = new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> - ResponseParameters = { bodySizes . ConvertAll ( ( size ) = > new ResponseParameters { Size = size } ) } <nl> + ResponseParameters = { bodySizes . Select ( ( size ) = > new ResponseParameters { Size = size } ) } <nl> } ; <nl> <nl> using ( var call = client . StreamingOutputCall ( request ) ) <nl> { <nl> var responseList = await call . ResponseStream . ToListAsync ( ) ; <nl> - foreach ( var res in responseList ) <nl> - { <nl> - Assert . AreEqual ( PayloadType . Compressable , res . Payload . Type ) ; <nl> - } <nl> - CollectionAssert . AreEqual ( bodySizes , responseList . ConvertAll ( ( item ) = > item . Payload . Body . Length ) ) ; <nl> + CollectionAssert . AreEqual ( bodySizes , responseList . Select ( ( item ) = > item . Payload . Body . Length ) ) ; <nl> } <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> public static async Task RunPingPongAsync ( TestService . TestServiceClient client ) <nl> { <nl> await call . RequestStream . WriteAsync ( new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseParameters = { new ResponseParameters { Size = 31415 } } , <nl> Payload = CreateZerosPayload ( 27182 ) <nl> } ) ; <nl> <nl> Assert . IsTrue ( await call . ResponseStream . MoveNext ( ) ) ; <nl> - Assert . AreEqual ( PayloadType . Compressable , call . ResponseStream . Current . Payload . Type ) ; <nl> Assert . AreEqual ( 31415 , call . ResponseStream . Current . Payload . Body . Length ) ; <nl> <nl> await call . RequestStream . WriteAsync ( new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseParameters = { new ResponseParameters { Size = 9 } } , <nl> Payload = CreateZerosPayload ( 8 ) <nl> } ) ; <nl> <nl> Assert . IsTrue ( await call . ResponseStream . MoveNext ( ) ) ; <nl> - Assert . AreEqual ( PayloadType . Compressable , call . ResponseStream . Current . Payload . Type ) ; <nl> Assert . AreEqual ( 9 , call . ResponseStream . Current . Payload . Body . Length ) ; <nl> <nl> await call . RequestStream . WriteAsync ( new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseParameters = { new ResponseParameters { Size = 2653 } } , <nl> Payload = CreateZerosPayload ( 1828 ) <nl> } ) ; <nl> <nl> Assert . IsTrue ( await call . ResponseStream . MoveNext ( ) ) ; <nl> - Assert . AreEqual ( PayloadType . Compressable , call . ResponseStream . Current . Payload . Type ) ; <nl> Assert . AreEqual ( 2653 , call . ResponseStream . Current . Payload . Body . Length ) ; <nl> <nl> await call . RequestStream . WriteAsync ( new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseParameters = { new ResponseParameters { Size = 58979 } } , <nl> Payload = CreateZerosPayload ( 45904 ) <nl> } ) ; <nl> <nl> Assert . IsTrue ( await call . ResponseStream . MoveNext ( ) ) ; <nl> - Assert . AreEqual ( PayloadType . Compressable , call . ResponseStream . Current . Payload . Type ) ; <nl> Assert . AreEqual ( 58979 , call . ResponseStream . Current . Payload . Body . Length ) ; <nl> <nl> await call . RequestStream . CompleteAsync ( ) ; <nl> public static void RunComputeEngineCreds ( TestService . TestServiceClient client , s <nl> <nl> var request = new SimpleRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseSize = 314159 , <nl> Payload = CreateZerosPayload ( 271828 ) , <nl> FillUsername = true , <nl> public static void RunComputeEngineCreds ( TestService . TestServiceClient client , s <nl> / / not setting credentials here because they were set on channel already <nl> var response = client . UnaryCall ( request ) ; <nl> <nl> - Assert . AreEqual ( PayloadType . Compressable , response . Payload . Type ) ; <nl> Assert . AreEqual ( 314159 , response . Payload . Body . Length ) ; <nl> Assert . False ( string . IsNullOrEmpty ( response . OauthScope ) ) ; <nl> Assert . True ( oauthScope . Contains ( response . OauthScope ) ) ; <nl> public static void RunJwtTokenCreds ( TestService . TestServiceClient client ) <nl> <nl> var request = new SimpleRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseSize = 314159 , <nl> Payload = CreateZerosPayload ( 271828 ) , <nl> FillUsername = true , <nl> public static void RunJwtTokenCreds ( TestService . TestServiceClient client ) <nl> / / not setting credentials here because they were set on channel already <nl> var response = client . UnaryCall ( request ) ; <nl> <nl> - Assert . AreEqual ( PayloadType . Compressable , response . Payload . Type ) ; <nl> Assert . AreEqual ( 314159 , response . Payload . Body . Length ) ; <nl> Assert . AreEqual ( GetEmailFromServiceAccountFile ( ) , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> public static void RunJwtTokenCreds ( TestService . TestServiceClient client ) <nl> <nl> public static async Task RunOAuth2AuthTokenAsync ( TestService . TestServiceClient client , string oauthScope ) <nl> { <nl> + # if ! NETSTANDARD1_5 <nl> Console . WriteLine ( " running oauth2_auth_token " ) ; <nl> ITokenAccess credential = ( await GoogleCredential . GetApplicationDefaultAsync ( ) ) . CreateScoped ( new [ ] { oauthScope } ) ; <nl> string oauth2Token = await credential . GetAccessTokenForRequestAsync ( ) ; <nl> public static async Task RunOAuth2AuthTokenAsync ( TestService . TestServiceClient c <nl> Assert . True ( oauthScope . Contains ( response . OauthScope ) ) ; <nl> Assert . AreEqual ( GetEmailFromServiceAccountFile ( ) , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> + # else <nl> + / / TODO ( jtattermusch ) : implement this <nl> + throw new NotImplementedException ( " Not supported on CoreCLR yet " ) ; <nl> + # endif <nl> } <nl> <nl> public static async Task RunPerRpcCredsAsync ( TestService . TestServiceClient client , string oauthScope ) <nl> { <nl> + # if ! NETSTANDARD1_5 <nl> Console . WriteLine ( " running per_rpc_creds " ) ; <nl> ITokenAccess googleCredential = await GoogleCredential . GetApplicationDefaultAsync ( ) ; <nl> <nl> public static async Task RunPerRpcCredsAsync ( TestService . TestServiceClient clien <nl> <nl> Assert . AreEqual ( GetEmailFromServiceAccountFile ( ) , response . Username ) ; <nl> Console . WriteLine ( " Passed ! " ) ; <nl> + # else <nl> + / / TODO ( jtattermusch ) : implement this <nl> + throw new NotImplementedException ( " Not supported on CoreCLR yet " ) ; <nl> + # endif <nl> } <nl> <nl> public static async Task RunCancelAfterBeginAsync ( TestService . TestServiceClient client ) <nl> public static async Task RunCancelAfterFirstResponseAsync ( TestService . TestServic <nl> { <nl> await call . RequestStream . WriteAsync ( new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseParameters = { new ResponseParameters { Size = 31415 } } , <nl> Payload = CreateZerosPayload ( 27182 ) <nl> } ) ; <nl> <nl> Assert . IsTrue ( await call . ResponseStream . MoveNext ( ) ) ; <nl> - Assert . AreEqual ( PayloadType . Compressable , call . ResponseStream . Current . Payload . Type ) ; <nl> Assert . AreEqual ( 31415 , call . ResponseStream . Current . Payload . Body . Length ) ; <nl> <nl> cts . Cancel ( ) ; <nl> public static async Task RunCustomMetadataAsync ( TestService . TestServiceClient cl <nl> / / step 1 : test unary call <nl> var request = new SimpleRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseSize = 314159 , <nl> Payload = CreateZerosPayload ( 271828 ) <nl> } ; <nl> public static async Task RunCustomMetadataAsync ( TestService . TestServiceClient cl <nl> / / step 2 : test full duplex call <nl> var request = new StreamingOutputCallRequest <nl> { <nl> - ResponseType = PayloadType . Compressable , <nl> ResponseParameters = { new ResponseParameters { Size = 31415 } } , <nl> Payload = CreateZerosPayload ( 27182 ) <nl> } ; <nl> public static void RunUnimplementedMethod ( UnimplementedService . UnimplementedServ <nl> Console . WriteLine ( " Passed ! " ) ; <nl> } <nl> <nl> + public static void RunClientCompressedUnary ( TestService . TestServiceClient client ) <nl> + { <nl> + Console . WriteLine ( " running client_compressed_unary " ) ; <nl> + var probeRequest = new SimpleRequest <nl> + { <nl> + ExpectCompressed = new BoolValue <nl> + { <nl> + Value = true / / lie about compression <nl> + } , <nl> + ResponseSize = 314159 , <nl> + Payload = CreateZerosPayload ( 271828 ) <nl> + } ; <nl> + var e = Assert . Throws < RpcException > ( ( ) = > client . UnaryCall ( probeRequest , CreateClientCompressionMetadata ( false ) ) ) ; <nl> + Assert . AreEqual ( StatusCode . InvalidArgument , e . Status . StatusCode ) ; <nl> + <nl> + var compressedRequest = new SimpleRequest <nl> + { <nl> + ExpectCompressed = new BoolValue <nl> + { <nl> + Value = true <nl> + } , <nl> + ResponseSize = 314159 , <nl> + Payload = CreateZerosPayload ( 271828 ) <nl> + } ; <nl> + var response1 = client . UnaryCall ( compressedRequest , CreateClientCompressionMetadata ( true ) ) ; <nl> + Assert . AreEqual ( 314159 , response1 . Payload . Body . Length ) ; <nl> + <nl> + var uncompressedRequest = new SimpleRequest <nl> + { <nl> + ExpectCompressed = new BoolValue <nl> + { <nl> + Value = false <nl> + } , <nl> + ResponseSize = 314159 , <nl> + Payload = CreateZerosPayload ( 271828 ) <nl> + } ; <nl> + var response2 = client . UnaryCall ( uncompressedRequest , CreateClientCompressionMetadata ( false ) ) ; <nl> + Assert . AreEqual ( 314159 , response2 . Payload . Body . Length ) ; <nl> + <nl> + Console . WriteLine ( " Passed ! " ) ; <nl> + } <nl> + <nl> + public static async Task RunClientCompressedStreamingAsync ( TestService . TestServiceClient client ) <nl> + { <nl> + Console . WriteLine ( " running client_compressed_streaming " ) ; <nl> + try <nl> + { <nl> + var probeCall = client . StreamingInputCall ( CreateClientCompressionMetadata ( false ) ) ; <nl> + await probeCall . RequestStream . WriteAsync ( new StreamingInputCallRequest <nl> + { <nl> + ExpectCompressed = new BoolValue <nl> + { <nl> + Value = true <nl> + } , <nl> + Payload = CreateZerosPayload ( 27182 ) <nl> + } ) ; <nl> + <nl> + / / cannot use Assert . ThrowsAsync because it uses Task . Wait and would deadlock . <nl> + await probeCall ; <nl> + Assert . Fail ( ) ; <nl> + } <nl> + catch ( RpcException e ) <nl> + { <nl> + Assert . AreEqual ( StatusCode . InvalidArgument , e . Status . StatusCode ) ; <nl> + } <nl> + <nl> + var call = client . StreamingInputCall ( CreateClientCompressionMetadata ( true ) ) ; <nl> + await call . RequestStream . WriteAsync ( new StreamingInputCallRequest <nl> + { <nl> + ExpectCompressed = new BoolValue <nl> + { <nl> + Value = true <nl> + } , <nl> + Payload = CreateZerosPayload ( 27182 ) <nl> + } ) ; <nl> + <nl> + call . RequestStream . WriteOptions = new WriteOptions ( WriteFlags . NoCompress ) ; <nl> + await call . RequestStream . WriteAsync ( new StreamingInputCallRequest <nl> + { <nl> + ExpectCompressed = new BoolValue <nl> + { <nl> + Value = false <nl> + } , <nl> + Payload = CreateZerosPayload ( 45904 ) <nl> + } ) ; <nl> + await call . RequestStream . CompleteAsync ( ) ; <nl> + <nl> + var response = await call . ResponseAsync ; <nl> + Assert . AreEqual ( 73086 , response . AggregatedPayloadSize ) ; <nl> + <nl> + Console . WriteLine ( " Passed ! " ) ; <nl> + } <nl> + <nl> private static Payload CreateZerosPayload ( int size ) <nl> { <nl> return new Payload { Body = ByteString . CopyFrom ( new byte [ size ] ) } ; <nl> } <nl> <nl> + private static Metadata CreateClientCompressionMetadata ( bool compressed ) <nl> + { <nl> + var algorithmName = compressed ? " gzip " : " identity " ; <nl> + return new Metadata <nl> + { <nl> + { new Metadata . Entry ( Metadata . CompressionRequestAlgorithmMetadataKey , algorithmName ) } <nl> + } ; <nl> + } <nl> + <nl> / / extracts the client_email field from service account file used for auth test cases <nl> private static string GetEmailFromServiceAccountFile ( ) <nl> { <nl> + # if ! NETSTANDARD1_5 <nl> string keyFile = Environment . GetEnvironmentVariable ( " GOOGLE_APPLICATION_CREDENTIALS " ) ; <nl> Assert . IsNotNull ( keyFile ) ; <nl> - <nl> var jobject = JObject . Parse ( File . ReadAllText ( keyFile ) ) ; <nl> string email = jobject . GetValue ( " client_email " ) . Value < string > ( ) ; <nl> Assert . IsTrue ( email . Length > 0 ) ; / / spec requires nonempty client email . <nl> return email ; <nl> + # else <nl> + / / TODO ( jtattermusch ) : implement this <nl> + throw new NotImplementedException ( " Not supported on CoreCLR yet " ) ; <nl> + # endif <nl> } <nl> <nl> private static Metadata CreateTestMetadata ( ) <nl> mmm a / src / csharp / Grpc . IntegrationTesting / Messages . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / Messages . cs <nl> public static partial class MessagesReflection { <nl> byte [ ] descriptorData = global : : System . Convert . FromBase64String ( <nl> string . Concat ( <nl> " CiVzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL21lc3NhZ2VzLnByb3RvEgxncnBj " , <nl> - " LnRlc3RpbmciQAoHUGF5bG9hZBInCgR0eXBlGAEgASgOMhkuZ3JwYy50ZXN0 " , <nl> - " aW5nLlBheWxvYWRUeXBlEgwKBGJvZHkYAiABKAwiKwoKRWNob1N0YXR1cxIM " , <nl> - " CgRjb2RlGAEgASgFEg8KB21lc3NhZ2UYAiABKAkioQIKDVNpbXBsZVJlcXVl " , <nl> - " c3QSMAoNcmVzcG9uc2VfdHlwZRgBIAEoDjIZLmdycGMudGVzdGluZy5QYXls " , <nl> - " b2FkVHlwZRIVCg1yZXNwb25zZV9zaXplGAIgASgFEiYKB3BheWxvYWQYAyAB " , <nl> - " KAsyFS5ncnBjLnRlc3RpbmcuUGF5bG9hZBIVCg1maWxsX3VzZXJuYW1lGAQg " , <nl> - " ASgIEhgKEGZpbGxfb2F1dGhfc2NvcGUYBSABKAgSOwoUcmVzcG9uc2VfY29t " , <nl> - " cHJlc3Npb24YBiABKA4yHS5ncnBjLnRlc3RpbmcuQ29tcHJlc3Npb25UeXBl " , <nl> - " EjEKD3Jlc3BvbnNlX3N0YXR1cxgHIAEoCzIYLmdycGMudGVzdGluZy5FY2hv " , <nl> - " U3RhdHVzIl8KDlNpbXBsZVJlc3BvbnNlEiYKB3BheWxvYWQYASABKAsyFS5n " , <nl> - " cnBjLnRlc3RpbmcuUGF5bG9hZBIQCgh1c2VybmFtZRgCIAEoCRITCgtvYXV0 " , <nl> - " aF9zY29wZRgDIAEoCSJDChlTdHJlYW1pbmdJbnB1dENhbGxSZXF1ZXN0EiYK " , <nl> - " B3BheWxvYWQYASABKAsyFS5ncnBjLnRlc3RpbmcuUGF5bG9hZCI9ChpTdHJl " , <nl> - " YW1pbmdJbnB1dENhbGxSZXNwb25zZRIfChdhZ2dyZWdhdGVkX3BheWxvYWRf " , <nl> - " c2l6ZRgBIAEoBSI3ChJSZXNwb25zZVBhcmFtZXRlcnMSDAoEc2l6ZRgBIAEo " , <nl> - " BRITCgtpbnRlcnZhbF91cxgCIAEoBSKlAgoaU3RyZWFtaW5nT3V0cHV0Q2Fs " , <nl> - " bFJlcXVlc3QSMAoNcmVzcG9uc2VfdHlwZRgBIAEoDjIZLmdycGMudGVzdGlu " , <nl> - " Zy5QYXlsb2FkVHlwZRI9ChNyZXNwb25zZV9wYXJhbWV0ZXJzGAIgAygLMiAu " , <nl> - " Z3JwYy50ZXN0aW5nLlJlc3BvbnNlUGFyYW1ldGVycxImCgdwYXlsb2FkGAMg " , <nl> - " ASgLMhUuZ3JwYy50ZXN0aW5nLlBheWxvYWQSOwoUcmVzcG9uc2VfY29tcHJl " , <nl> - " c3Npb24YBiABKA4yHS5ncnBjLnRlc3RpbmcuQ29tcHJlc3Npb25UeXBlEjEK " , <nl> - " D3Jlc3BvbnNlX3N0YXR1cxgHIAEoCzIYLmdycGMudGVzdGluZy5FY2hvU3Rh " , <nl> - " dHVzIkUKG1N0cmVhbWluZ091dHB1dENhbGxSZXNwb25zZRImCgdwYXlsb2Fk " , <nl> - " GAEgASgLMhUuZ3JwYy50ZXN0aW5nLlBheWxvYWQiMwoPUmVjb25uZWN0UGFy " , <nl> - " YW1zEiAKGG1heF9yZWNvbm5lY3RfYmFja29mZl9tcxgBIAEoBSIzCg1SZWNv " , <nl> - " bm5lY3RJbmZvEg4KBnBhc3NlZBgBIAEoCBISCgpiYWNrb2ZmX21zGAIgAygF " , <nl> - " Kj8KC1BheWxvYWRUeXBlEhAKDENPTVBSRVNTQUJMRRAAEhIKDlVOQ09NUFJF " , <nl> - " U1NBQkxFEAESCgoGUkFORE9NEAIqMgoPQ29tcHJlc3Npb25UeXBlEggKBE5P " , <nl> - " TkUQABIICgRHWklQEAESCwoHREVGTEFURRACYgZwcm90bzM = " ) ) ; <nl> + " LnRlc3RpbmciGgoJQm9vbFZhbHVlEg0KBXZhbHVlGAEgASgIIkAKB1BheWxv " , <nl> + " YWQSJwoEdHlwZRgBIAEoDjIZLmdycGMudGVzdGluZy5QYXlsb2FkVHlwZRIM " , <nl> + " CgRib2R5GAIgASgMIisKCkVjaG9TdGF0dXMSDAoEY29kZRgBIAEoBRIPCgdt " , <nl> + " ZXNzYWdlGAIgASgJIs4CCg1TaW1wbGVSZXF1ZXN0EjAKDXJlc3BvbnNlX3R5 " , <nl> + " cGUYASABKA4yGS5ncnBjLnRlc3RpbmcuUGF5bG9hZFR5cGUSFQoNcmVzcG9u " , <nl> + " c2Vfc2l6ZRgCIAEoBRImCgdwYXlsb2FkGAMgASgLMhUuZ3JwYy50ZXN0aW5n " , <nl> + " LlBheWxvYWQSFQoNZmlsbF91c2VybmFtZRgEIAEoCBIYChBmaWxsX29hdXRo " , <nl> + " X3Njb3BlGAUgASgIEjQKE3Jlc3BvbnNlX2NvbXByZXNzZWQYBiABKAsyFy5n " , <nl> + " cnBjLnRlc3RpbmcuQm9vbFZhbHVlEjEKD3Jlc3BvbnNlX3N0YXR1cxgHIAEo " , <nl> + " CzIYLmdycGMudGVzdGluZy5FY2hvU3RhdHVzEjIKEWV4cGVjdF9jb21wcmVz " , <nl> + " c2VkGAggASgLMhcuZ3JwYy50ZXN0aW5nLkJvb2xWYWx1ZSJfCg5TaW1wbGVS " , <nl> + " ZXNwb25zZRImCgdwYXlsb2FkGAEgASgLMhUuZ3JwYy50ZXN0aW5nLlBheWxv " , <nl> + " YWQSEAoIdXNlcm5hbWUYAiABKAkSEwoLb2F1dGhfc2NvcGUYAyABKAkidwoZ " , <nl> + " U3RyZWFtaW5nSW5wdXRDYWxsUmVxdWVzdBImCgdwYXlsb2FkGAEgASgLMhUu " , <nl> + " Z3JwYy50ZXN0aW5nLlBheWxvYWQSMgoRZXhwZWN0X2NvbXByZXNzZWQYAiAB " , <nl> + " KAsyFy5ncnBjLnRlc3RpbmcuQm9vbFZhbHVlIj0KGlN0cmVhbWluZ0lucHV0 " , <nl> + " Q2FsbFJlc3BvbnNlEh8KF2FnZ3JlZ2F0ZWRfcGF5bG9hZF9zaXplGAEgASgF " , <nl> + " ImQKElJlc3BvbnNlUGFyYW1ldGVycxIMCgRzaXplGAEgASgFEhMKC2ludGVy " , <nl> + " dmFsX3VzGAIgASgFEisKCmNvbXByZXNzZWQYAyABKAsyFy5ncnBjLnRlc3Rp " , <nl> + " bmcuQm9vbFZhbHVlIugBChpTdHJlYW1pbmdPdXRwdXRDYWxsUmVxdWVzdBIw " , <nl> + " Cg1yZXNwb25zZV90eXBlGAEgASgOMhkuZ3JwYy50ZXN0aW5nLlBheWxvYWRU " , <nl> + " eXBlEj0KE3Jlc3BvbnNlX3BhcmFtZXRlcnMYAiADKAsyIC5ncnBjLnRlc3Rp " , <nl> + " bmcuUmVzcG9uc2VQYXJhbWV0ZXJzEiYKB3BheWxvYWQYAyABKAsyFS5ncnBj " , <nl> + " LnRlc3RpbmcuUGF5bG9hZBIxCg9yZXNwb25zZV9zdGF0dXMYByABKAsyGC5n " , <nl> + " cnBjLnRlc3RpbmcuRWNob1N0YXR1cyJFChtTdHJlYW1pbmdPdXRwdXRDYWxs " , <nl> + " UmVzcG9uc2USJgoHcGF5bG9hZBgBIAEoCzIVLmdycGMudGVzdGluZy5QYXls " , <nl> + " b2FkIjMKD1JlY29ubmVjdFBhcmFtcxIgChhtYXhfcmVjb25uZWN0X2JhY2tv " , <nl> + " ZmZfbXMYASABKAUiMwoNUmVjb25uZWN0SW5mbxIOCgZwYXNzZWQYASABKAgS " , <nl> + " EgoKYmFja29mZl9tcxgCIAMoBSofCgtQYXlsb2FkVHlwZRIQCgxDT01QUkVT " , <nl> + " U0FCTEUQAGIGcHJvdG8z " ) ) ; <nl> descriptor = pbr : : FileDescriptor . FromGeneratedCode ( descriptorData , <nl> new pbr : : FileDescriptor [ ] { } , <nl> - new pbr : : GeneratedClrTypeInfo ( new [ ] { typeof ( global : : Grpc . Testing . PayloadType ) , typeof ( global : : Grpc . Testing . CompressionType ) , } , new pbr : : GeneratedClrTypeInfo [ ] { <nl> + new pbr : : GeneratedClrTypeInfo ( new [ ] { typeof ( global : : Grpc . Testing . PayloadType ) , } , new pbr : : GeneratedClrTypeInfo [ ] { <nl> + new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . BoolValue ) , global : : Grpc . Testing . BoolValue . Parser , new [ ] { " Value " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . Payload ) , global : : Grpc . Testing . Payload . Parser , new [ ] { " Type " , " Body " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . EchoStatus ) , global : : Grpc . Testing . EchoStatus . Parser , new [ ] { " Code " , " Message " } , null , null , null ) , <nl> - new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . SimpleRequest ) , global : : Grpc . Testing . SimpleRequest . Parser , new [ ] { " ResponseType " , " ResponseSize " , " Payload " , " FillUsername " , " FillOauthScope " , " ResponseCompression " , " ResponseStatus " } , null , null , null ) , <nl> + new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . SimpleRequest ) , global : : Grpc . Testing . SimpleRequest . Parser , new [ ] { " ResponseType " , " ResponseSize " , " Payload " , " FillUsername " , " FillOauthScope " , " ResponseCompressed " , " ResponseStatus " , " ExpectCompressed " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . SimpleResponse ) , global : : Grpc . Testing . SimpleResponse . Parser , new [ ] { " Payload " , " Username " , " OauthScope " } , null , null , null ) , <nl> - new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . StreamingInputCallRequest ) , global : : Grpc . Testing . StreamingInputCallRequest . Parser , new [ ] { " Payload " } , null , null , null ) , <nl> + new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . StreamingInputCallRequest ) , global : : Grpc . Testing . StreamingInputCallRequest . Parser , new [ ] { " Payload " , " ExpectCompressed " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . StreamingInputCallResponse ) , global : : Grpc . Testing . StreamingInputCallResponse . Parser , new [ ] { " AggregatedPayloadSize " } , null , null , null ) , <nl> - new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . ResponseParameters ) , global : : Grpc . Testing . ResponseParameters . Parser , new [ ] { " Size " , " IntervalUs " } , null , null , null ) , <nl> - new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . StreamingOutputCallRequest ) , global : : Grpc . Testing . StreamingOutputCallRequest . Parser , new [ ] { " ResponseType " , " ResponseParameters " , " Payload " , " ResponseCompression " , " ResponseStatus " } , null , null , null ) , <nl> + new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . ResponseParameters ) , global : : Grpc . Testing . ResponseParameters . Parser , new [ ] { " Size " , " IntervalUs " , " Compressed " } , null , null , null ) , <nl> + new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . StreamingOutputCallRequest ) , global : : Grpc . Testing . StreamingOutputCallRequest . Parser , new [ ] { " ResponseType " , " ResponseParameters " , " Payload " , " ResponseStatus " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . StreamingOutputCallResponse ) , global : : Grpc . Testing . StreamingOutputCallResponse . Parser , new [ ] { " Payload " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . ReconnectParams ) , global : : Grpc . Testing . ReconnectParams . Parser , new [ ] { " MaxReconnectBackoffMs " } , null , null , null ) , <nl> new pbr : : GeneratedClrTypeInfo ( typeof ( global : : Grpc . Testing . ReconnectInfo ) , global : : Grpc . Testing . ReconnectInfo . Parser , new [ ] { " Passed " , " BackoffMs " } , null , null , null ) <nl> public static partial class MessagesReflection { <nl> } <nl> # region Enums <nl> / / / < summary > <nl> + / / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / / The type of payload that should be returned . <nl> / / / < / summary > <nl> public enum PayloadType { <nl> public enum PayloadType { <nl> / / / Compressable text format . <nl> / / / < / summary > <nl> [ pbr : : OriginalName ( " COMPRESSABLE " ) ] Compressable = 0 , <nl> - / / / < summary > <nl> - / / / Uncompressable binary format . <nl> - / / / < / summary > <nl> - [ pbr : : OriginalName ( " UNCOMPRESSABLE " ) ] Uncompressable = 1 , <nl> - / / / < summary > <nl> - / / / Randomly chosen from all other formats defined in this enum . <nl> - / / / < / summary > <nl> - [ pbr : : OriginalName ( " RANDOM " ) ] Random = 2 , <nl> } <nl> <nl> + # endregion <nl> + <nl> + # region Messages <nl> / / / < summary > <nl> - / / / Compression algorithms <nl> + / / / TODO ( dgq ) : Go back to using well - known types once <nl> + / / / https : / / github . com / grpc / grpc / issues / 6980 has been fixed . <nl> + / / / import " google / protobuf / wrappers . proto " ; <nl> / / / < / summary > <nl> - public enum CompressionType { <nl> + [ global : : System . Diagnostics . DebuggerNonUserCodeAttribute ( ) ] <nl> + public sealed partial class BoolValue : pb : : IMessage < BoolValue > { <nl> + private static readonly pb : : MessageParser < BoolValue > _parser = new pb : : MessageParser < BoolValue > ( ( ) = > new BoolValue ( ) ) ; <nl> + public static pb : : MessageParser < BoolValue > Parser { get { return _parser ; } } <nl> + <nl> + public static pbr : : MessageDescriptor Descriptor { <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 0 ] ; } <nl> + } <nl> + <nl> + pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> + get { return Descriptor ; } <nl> + } <nl> + <nl> + public BoolValue ( ) { <nl> + OnConstruction ( ) ; <nl> + } <nl> + <nl> + partial void OnConstruction ( ) ; <nl> + <nl> + public BoolValue ( BoolValue other ) : this ( ) { <nl> + value_ = other . value_ ; <nl> + } <nl> + <nl> + public BoolValue Clone ( ) { <nl> + return new BoolValue ( this ) ; <nl> + } <nl> + <nl> + / / / < summary > Field number for the " value " field . < / summary > <nl> + public const int ValueFieldNumber = 1 ; <nl> + private bool value_ ; <nl> / / / < summary > <nl> - / / / No compression <nl> + / / / The bool value . <nl> / / / < / summary > <nl> - [ pbr : : OriginalName ( " NONE " ) ] None = 0 , <nl> - [ pbr : : OriginalName ( " GZIP " ) ] Gzip = 1 , <nl> - [ pbr : : OriginalName ( " DEFLATE " ) ] Deflate = 2 , <nl> - } <nl> + public bool Value { <nl> + get { return value_ ; } <nl> + set { <nl> + value_ = value ; <nl> + } <nl> + } <nl> <nl> - # endregion <nl> + public override bool Equals ( object other ) { <nl> + return Equals ( other as BoolValue ) ; <nl> + } <nl> + <nl> + public bool Equals ( BoolValue other ) { <nl> + if ( ReferenceEquals ( other , null ) ) { <nl> + return false ; <nl> + } <nl> + if ( ReferenceEquals ( other , this ) ) { <nl> + return true ; <nl> + } <nl> + if ( Value ! = other . Value ) return false ; <nl> + return true ; <nl> + } <nl> + <nl> + public override int GetHashCode ( ) { <nl> + int hash = 1 ; <nl> + if ( Value ! = false ) hash ^ = Value . GetHashCode ( ) ; <nl> + return hash ; <nl> + } <nl> + <nl> + public override string ToString ( ) { <nl> + return pb : : JsonFormatter . ToDiagnosticString ( this ) ; <nl> + } <nl> + <nl> + public void WriteTo ( pb : : CodedOutputStream output ) { <nl> + if ( Value ! = false ) { <nl> + output . WriteRawTag ( 8 ) ; <nl> + output . WriteBool ( Value ) ; <nl> + } <nl> + } <nl> + <nl> + public int CalculateSize ( ) { <nl> + int size = 0 ; <nl> + if ( Value ! = false ) { <nl> + size + = 1 + 1 ; <nl> + } <nl> + return size ; <nl> + } <nl> + <nl> + public void MergeFrom ( BoolValue other ) { <nl> + if ( other = = null ) { <nl> + return ; <nl> + } <nl> + if ( other . Value ! = false ) { <nl> + Value = other . Value ; <nl> + } <nl> + } <nl> + <nl> + public void MergeFrom ( pb : : CodedInputStream input ) { <nl> + uint tag ; <nl> + while ( ( tag = input . ReadTag ( ) ) ! = 0 ) { <nl> + switch ( tag ) { <nl> + default : <nl> + input . SkipLastField ( ) ; <nl> + break ; <nl> + case 8 : { <nl> + Value = input . ReadBool ( ) ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + } <nl> <nl> - # region Messages <nl> / / / < summary > <nl> / / / A block of data , to simply increase gRPC message size . <nl> / / / < / summary > <nl> public sealed partial class Payload : pb : : IMessage < Payload > { <nl> public static pb : : MessageParser < Payload > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 0 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 1 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class Payload : pb : : IMessage < Payload > { <nl> public const int TypeFieldNumber = 1 ; <nl> private global : : Grpc . Testing . PayloadType type_ = 0 ; <nl> / / / < summary > <nl> + / / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / / The type of data in body . <nl> / / / < / summary > <nl> public global : : Grpc . Testing . PayloadType Type { <nl> public sealed partial class EchoStatus : pb : : IMessage < EchoStatus > { <nl> public static pb : : MessageParser < EchoStatus > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 1 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 2 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> public static pb : : MessageParser < SimpleRequest > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 2 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 3 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> Payload = other . payload_ ! = null ? other . Payload . Clone ( ) : null ; <nl> fillUsername_ = other . fillUsername_ ; <nl> fillOauthScope_ = other . fillOauthScope_ ; <nl> - responseCompression_ = other . responseCompression_ ; <nl> + ResponseCompressed = other . responseCompressed_ ! = null ? other . ResponseCompressed . Clone ( ) : null ; <nl> ResponseStatus = other . responseStatus_ ! = null ? other . ResponseStatus . Clone ( ) : null ; <nl> + ExpectCompressed = other . expectCompressed_ ! = null ? other . ExpectCompressed . Clone ( ) : null ; <nl> } <nl> <nl> public SimpleRequest Clone ( ) { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> public const int ResponseTypeFieldNumber = 1 ; <nl> private global : : Grpc . Testing . PayloadType responseType_ = 0 ; <nl> / / / < summary > <nl> + / / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / / Desired payload type in the response from the server . <nl> / / / If response_type is RANDOM , server randomly chooses one from other formats . <nl> / / / < / summary > <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> private int responseSize_ ; <nl> / / / < summary > <nl> / / / Desired payload size in the response from the server . <nl> - / / / If response_type is COMPRESSABLE , this denotes the size before compression . <nl> / / / < / summary > <nl> public int ResponseSize { <nl> get { return responseSize_ ; } <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> } <nl> } <nl> <nl> - / / / < summary > Field number for the " response_compression " field . < / summary > <nl> - public const int ResponseCompressionFieldNumber = 6 ; <nl> - private global : : Grpc . Testing . CompressionType responseCompression_ = 0 ; <nl> + / / / < summary > Field number for the " response_compressed " field . < / summary > <nl> + public const int ResponseCompressedFieldNumber = 6 ; <nl> + private global : : Grpc . Testing . BoolValue responseCompressed_ ; <nl> / / / < summary > <nl> - / / / Compression algorithm to be used by the server for the response ( stream ) <nl> + / / / Whether to request the server to compress the response . This field is <nl> + / / / " nullable " in order to interoperate seamlessly with clients not able to <nl> + / / / implement the full compression tests by introspecting the call to verify <nl> + / / / the response ' s compression status . <nl> / / / < / summary > <nl> - public global : : Grpc . Testing . CompressionType ResponseCompression { <nl> - get { return responseCompression_ ; } <nl> + public global : : Grpc . Testing . BoolValue ResponseCompressed { <nl> + get { return responseCompressed_ ; } <nl> set { <nl> - responseCompression_ = value ; <nl> + responseCompressed_ = value ; <nl> } <nl> } <nl> <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> } <nl> } <nl> <nl> + / / / < summary > Field number for the " expect_compressed " field . < / summary > <nl> + public const int ExpectCompressedFieldNumber = 8 ; <nl> + private global : : Grpc . Testing . BoolValue expectCompressed_ ; <nl> + / / / < summary > <nl> + / / / Whether the server should expect this request to be compressed . <nl> + / / / < / summary > <nl> + public global : : Grpc . Testing . BoolValue ExpectCompressed { <nl> + get { return expectCompressed_ ; } <nl> + set { <nl> + expectCompressed_ = value ; <nl> + } <nl> + } <nl> + <nl> public override bool Equals ( object other ) { <nl> return Equals ( other as SimpleRequest ) ; <nl> } <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> if ( ! object . Equals ( Payload , other . Payload ) ) return false ; <nl> if ( FillUsername ! = other . FillUsername ) return false ; <nl> if ( FillOauthScope ! = other . FillOauthScope ) return false ; <nl> - if ( ResponseCompression ! = other . ResponseCompression ) return false ; <nl> + if ( ! object . Equals ( ResponseCompressed , other . ResponseCompressed ) ) return false ; <nl> if ( ! object . Equals ( ResponseStatus , other . ResponseStatus ) ) return false ; <nl> + if ( ! object . Equals ( ExpectCompressed , other . ExpectCompressed ) ) return false ; <nl> return true ; <nl> } <nl> <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> if ( payload_ ! = null ) hash ^ = Payload . GetHashCode ( ) ; <nl> if ( FillUsername ! = false ) hash ^ = FillUsername . GetHashCode ( ) ; <nl> if ( FillOauthScope ! = false ) hash ^ = FillOauthScope . GetHashCode ( ) ; <nl> - if ( ResponseCompression ! = 0 ) hash ^ = ResponseCompression . GetHashCode ( ) ; <nl> + if ( responseCompressed_ ! = null ) hash ^ = ResponseCompressed . GetHashCode ( ) ; <nl> if ( responseStatus_ ! = null ) hash ^ = ResponseStatus . GetHashCode ( ) ; <nl> + if ( expectCompressed_ ! = null ) hash ^ = ExpectCompressed . GetHashCode ( ) ; <nl> return hash ; <nl> } <nl> <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> output . WriteRawTag ( 40 ) ; <nl> output . WriteBool ( FillOauthScope ) ; <nl> } <nl> - if ( ResponseCompression ! = 0 ) { <nl> - output . WriteRawTag ( 48 ) ; <nl> - output . WriteEnum ( ( int ) ResponseCompression ) ; <nl> + if ( responseCompressed_ ! = null ) { <nl> + output . WriteRawTag ( 50 ) ; <nl> + output . WriteMessage ( ResponseCompressed ) ; <nl> } <nl> if ( responseStatus_ ! = null ) { <nl> output . WriteRawTag ( 58 ) ; <nl> output . WriteMessage ( ResponseStatus ) ; <nl> } <nl> + if ( expectCompressed_ ! = null ) { <nl> + output . WriteRawTag ( 66 ) ; <nl> + output . WriteMessage ( ExpectCompressed ) ; <nl> + } <nl> } <nl> <nl> public int CalculateSize ( ) { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> if ( FillOauthScope ! = false ) { <nl> size + = 1 + 1 ; <nl> } <nl> - if ( ResponseCompression ! = 0 ) { <nl> - size + = 1 + pb : : CodedOutputStream . ComputeEnumSize ( ( int ) ResponseCompression ) ; <nl> + if ( responseCompressed_ ! = null ) { <nl> + size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( ResponseCompressed ) ; <nl> } <nl> if ( responseStatus_ ! = null ) { <nl> size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( ResponseStatus ) ; <nl> } <nl> + if ( expectCompressed_ ! = null ) { <nl> + size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( ExpectCompressed ) ; <nl> + } <nl> return size ; <nl> } <nl> <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> if ( other . FillOauthScope ! = false ) { <nl> FillOauthScope = other . FillOauthScope ; <nl> } <nl> - if ( other . ResponseCompression ! = 0 ) { <nl> - ResponseCompression = other . ResponseCompression ; <nl> + if ( other . responseCompressed_ ! = null ) { <nl> + if ( responseCompressed_ = = null ) { <nl> + responseCompressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + ResponseCompressed . MergeFrom ( other . ResponseCompressed ) ; <nl> } <nl> if ( other . responseStatus_ ! = null ) { <nl> if ( responseStatus_ = = null ) { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> } <nl> ResponseStatus . MergeFrom ( other . ResponseStatus ) ; <nl> } <nl> + if ( other . expectCompressed_ ! = null ) { <nl> + if ( expectCompressed_ = = null ) { <nl> + expectCompressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + ExpectCompressed . MergeFrom ( other . ExpectCompressed ) ; <nl> + } <nl> } <nl> <nl> public void MergeFrom ( pb : : CodedInputStream input ) { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> FillOauthScope = input . ReadBool ( ) ; <nl> break ; <nl> } <nl> - case 48 : { <nl> - responseCompression_ = ( global : : Grpc . Testing . CompressionType ) input . ReadEnum ( ) ; <nl> + case 50 : { <nl> + if ( responseCompressed_ = = null ) { <nl> + responseCompressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + input . ReadMessage ( responseCompressed_ ) ; <nl> break ; <nl> } <nl> case 58 : { <nl> public sealed partial class SimpleRequest : pb : : IMessage < SimpleRequest > { <nl> input . ReadMessage ( responseStatus_ ) ; <nl> break ; <nl> } <nl> + case 66 : { <nl> + if ( expectCompressed_ = = null ) { <nl> + expectCompressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + input . ReadMessage ( expectCompressed_ ) ; <nl> + break ; <nl> + } <nl> } <nl> } <nl> } <nl> public sealed partial class SimpleResponse : pb : : IMessage < SimpleResponse > { <nl> public static pb : : MessageParser < SimpleResponse > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 3 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 4 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> public static pb : : MessageParser < StreamingInputCallRequest > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 4 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 5 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> <nl> public StreamingInputCallRequest ( StreamingInputCallRequest other ) : this ( ) { <nl> Payload = other . payload_ ! = null ? other . Payload . Clone ( ) : null ; <nl> + ExpectCompressed = other . expectCompressed_ ! = null ? other . ExpectCompressed . Clone ( ) : null ; <nl> } <nl> <nl> public StreamingInputCallRequest Clone ( ) { <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> } <nl> } <nl> <nl> + / / / < summary > Field number for the " expect_compressed " field . < / summary > <nl> + public const int ExpectCompressedFieldNumber = 2 ; <nl> + private global : : Grpc . Testing . BoolValue expectCompressed_ ; <nl> + / / / < summary > <nl> + / / / Whether the server should expect this request to be compressed . This field <nl> + / / / is " nullable " in order to interoperate seamlessly with servers not able to <nl> + / / / implement the full compression tests by introspecting the call to verify <nl> + / / / the request ' s compression status . <nl> + / / / < / summary > <nl> + public global : : Grpc . Testing . BoolValue ExpectCompressed { <nl> + get { return expectCompressed_ ; } <nl> + set { <nl> + expectCompressed_ = value ; <nl> + } <nl> + } <nl> + <nl> public override bool Equals ( object other ) { <nl> return Equals ( other as StreamingInputCallRequest ) ; <nl> } <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> return true ; <nl> } <nl> if ( ! object . Equals ( Payload , other . Payload ) ) return false ; <nl> + if ( ! object . Equals ( ExpectCompressed , other . ExpectCompressed ) ) return false ; <nl> return true ; <nl> } <nl> <nl> public override int GetHashCode ( ) { <nl> int hash = 1 ; <nl> if ( payload_ ! = null ) hash ^ = Payload . GetHashCode ( ) ; <nl> + if ( expectCompressed_ ! = null ) hash ^ = ExpectCompressed . GetHashCode ( ) ; <nl> return hash ; <nl> } <nl> <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> output . WriteRawTag ( 10 ) ; <nl> output . WriteMessage ( Payload ) ; <nl> } <nl> + if ( expectCompressed_ ! = null ) { <nl> + output . WriteRawTag ( 18 ) ; <nl> + output . WriteMessage ( ExpectCompressed ) ; <nl> + } <nl> } <nl> <nl> public int CalculateSize ( ) { <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> if ( payload_ ! = null ) { <nl> size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( Payload ) ; <nl> } <nl> + if ( expectCompressed_ ! = null ) { <nl> + size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( ExpectCompressed ) ; <nl> + } <nl> return size ; <nl> } <nl> <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> } <nl> Payload . MergeFrom ( other . Payload ) ; <nl> } <nl> + if ( other . expectCompressed_ ! = null ) { <nl> + if ( expectCompressed_ = = null ) { <nl> + expectCompressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + ExpectCompressed . MergeFrom ( other . ExpectCompressed ) ; <nl> + } <nl> } <nl> <nl> public void MergeFrom ( pb : : CodedInputStream input ) { <nl> public sealed partial class StreamingInputCallRequest : pb : : IMessage < StreamingIn <nl> input . ReadMessage ( payload_ ) ; <nl> break ; <nl> } <nl> + case 18 : { <nl> + if ( expectCompressed_ = = null ) { <nl> + expectCompressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + input . ReadMessage ( expectCompressed_ ) ; <nl> + break ; <nl> + } <nl> } <nl> } <nl> } <nl> public sealed partial class StreamingInputCallResponse : pb : : IMessage < StreamingI <nl> public static pb : : MessageParser < StreamingInputCallResponse > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 5 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 6 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> public static pb : : MessageParser < ResponseParameters > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 6 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 7 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> public ResponseParameters ( ResponseParameters other ) : this ( ) { <nl> size_ = other . size_ ; <nl> intervalUs_ = other . intervalUs_ ; <nl> + Compressed = other . compressed_ ! = null ? other . Compressed . Clone ( ) : null ; <nl> } <nl> <nl> public ResponseParameters Clone ( ) { <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> private int size_ ; <nl> / / / < summary > <nl> / / / Desired payload sizes in responses from the server . <nl> - / / / If response_type is COMPRESSABLE , this denotes the size before compression . <nl> / / / < / summary > <nl> public int Size { <nl> get { return size_ ; } <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> } <nl> } <nl> <nl> + / / / < summary > Field number for the " compressed " field . < / summary > <nl> + public const int CompressedFieldNumber = 3 ; <nl> + private global : : Grpc . Testing . BoolValue compressed_ ; <nl> + / / / < summary > <nl> + / / / Whether to request the server to compress the response . This field is <nl> + / / / " nullable " in order to interoperate seamlessly with clients not able to <nl> + / / / implement the full compression tests by introspecting the call to verify <nl> + / / / the response ' s compression status . <nl> + / / / < / summary > <nl> + public global : : Grpc . Testing . BoolValue Compressed { <nl> + get { return compressed_ ; } <nl> + set { <nl> + compressed_ = value ; <nl> + } <nl> + } <nl> + <nl> public override bool Equals ( object other ) { <nl> return Equals ( other as ResponseParameters ) ; <nl> } <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> } <nl> if ( Size ! = other . Size ) return false ; <nl> if ( IntervalUs ! = other . IntervalUs ) return false ; <nl> + if ( ! object . Equals ( Compressed , other . Compressed ) ) return false ; <nl> return true ; <nl> } <nl> <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> int hash = 1 ; <nl> if ( Size ! = 0 ) hash ^ = Size . GetHashCode ( ) ; <nl> if ( IntervalUs ! = 0 ) hash ^ = IntervalUs . GetHashCode ( ) ; <nl> + if ( compressed_ ! = null ) hash ^ = Compressed . GetHashCode ( ) ; <nl> return hash ; <nl> } <nl> <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> output . WriteRawTag ( 16 ) ; <nl> output . WriteInt32 ( IntervalUs ) ; <nl> } <nl> + if ( compressed_ ! = null ) { <nl> + output . WriteRawTag ( 26 ) ; <nl> + output . WriteMessage ( Compressed ) ; <nl> + } <nl> } <nl> <nl> public int CalculateSize ( ) { <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> if ( IntervalUs ! = 0 ) { <nl> size + = 1 + pb : : CodedOutputStream . ComputeInt32Size ( IntervalUs ) ; <nl> } <nl> + if ( compressed_ ! = null ) { <nl> + size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( Compressed ) ; <nl> + } <nl> return size ; <nl> } <nl> <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> if ( other . IntervalUs ! = 0 ) { <nl> IntervalUs = other . IntervalUs ; <nl> } <nl> + if ( other . compressed_ ! = null ) { <nl> + if ( compressed_ = = null ) { <nl> + compressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + Compressed . MergeFrom ( other . Compressed ) ; <nl> + } <nl> } <nl> <nl> public void MergeFrom ( pb : : CodedInputStream input ) { <nl> public sealed partial class ResponseParameters : pb : : IMessage < ResponseParameters <nl> IntervalUs = input . ReadInt32 ( ) ; <nl> break ; <nl> } <nl> + case 26 : { <nl> + if ( compressed_ = = null ) { <nl> + compressed_ = new global : : Grpc . Testing . BoolValue ( ) ; <nl> + } <nl> + input . ReadMessage ( compressed_ ) ; <nl> + break ; <nl> + } <nl> } <nl> } <nl> } <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> public static pb : : MessageParser < StreamingOutputCallRequest > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 7 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 8 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> responseType_ = other . responseType_ ; <nl> responseParameters_ = other . responseParameters_ . Clone ( ) ; <nl> Payload = other . payload_ ! = null ? other . Payload . Clone ( ) : null ; <nl> - responseCompression_ = other . responseCompression_ ; <nl> ResponseStatus = other . responseStatus_ ! = null ? other . ResponseStatus . Clone ( ) : null ; <nl> } <nl> <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> public const int ResponseTypeFieldNumber = 1 ; <nl> private global : : Grpc . Testing . PayloadType responseType_ = 0 ; <nl> / / / < summary > <nl> + / / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / / Desired payload type in the response from the server . <nl> / / / If response_type is RANDOM , the payload from each response in the stream <nl> / / / might be of different types . This is to simulate a mixed type of payload <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> } <nl> } <nl> <nl> - / / / < summary > Field number for the " response_compression " field . < / summary > <nl> - public const int ResponseCompressionFieldNumber = 6 ; <nl> - private global : : Grpc . Testing . CompressionType responseCompression_ = 0 ; <nl> - / / / < summary > <nl> - / / / Compression algorithm to be used by the server for the response ( stream ) <nl> - / / / < / summary > <nl> - public global : : Grpc . Testing . CompressionType ResponseCompression { <nl> - get { return responseCompression_ ; } <nl> - set { <nl> - responseCompression_ = value ; <nl> - } <nl> - } <nl> - <nl> / / / < summary > Field number for the " response_status " field . < / summary > <nl> public const int ResponseStatusFieldNumber = 7 ; <nl> private global : : Grpc . Testing . EchoStatus responseStatus_ ; <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> if ( ResponseType ! = other . ResponseType ) return false ; <nl> if ( ! responseParameters_ . Equals ( other . responseParameters_ ) ) return false ; <nl> if ( ! object . Equals ( Payload , other . Payload ) ) return false ; <nl> - if ( ResponseCompression ! = other . ResponseCompression ) return false ; <nl> if ( ! object . Equals ( ResponseStatus , other . ResponseStatus ) ) return false ; <nl> return true ; <nl> } <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> if ( ResponseType ! = 0 ) hash ^ = ResponseType . GetHashCode ( ) ; <nl> hash ^ = responseParameters_ . GetHashCode ( ) ; <nl> if ( payload_ ! = null ) hash ^ = Payload . GetHashCode ( ) ; <nl> - if ( ResponseCompression ! = 0 ) hash ^ = ResponseCompression . GetHashCode ( ) ; <nl> if ( responseStatus_ ! = null ) hash ^ = ResponseStatus . GetHashCode ( ) ; <nl> return hash ; <nl> } <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> output . WriteRawTag ( 26 ) ; <nl> output . WriteMessage ( Payload ) ; <nl> } <nl> - if ( ResponseCompression ! = 0 ) { <nl> - output . WriteRawTag ( 48 ) ; <nl> - output . WriteEnum ( ( int ) ResponseCompression ) ; <nl> - } <nl> if ( responseStatus_ ! = null ) { <nl> output . WriteRawTag ( 58 ) ; <nl> output . WriteMessage ( ResponseStatus ) ; <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> if ( payload_ ! = null ) { <nl> size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( Payload ) ; <nl> } <nl> - if ( ResponseCompression ! = 0 ) { <nl> - size + = 1 + pb : : CodedOutputStream . ComputeEnumSize ( ( int ) ResponseCompression ) ; <nl> - } <nl> if ( responseStatus_ ! = null ) { <nl> size + = 1 + pb : : CodedOutputStream . ComputeMessageSize ( ResponseStatus ) ; <nl> } <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> } <nl> Payload . MergeFrom ( other . Payload ) ; <nl> } <nl> - if ( other . ResponseCompression ! = 0 ) { <nl> - ResponseCompression = other . ResponseCompression ; <nl> - } <nl> if ( other . responseStatus_ ! = null ) { <nl> if ( responseStatus_ = = null ) { <nl> responseStatus_ = new global : : Grpc . Testing . EchoStatus ( ) ; <nl> public sealed partial class StreamingOutputCallRequest : pb : : IMessage < StreamingO <nl> input . ReadMessage ( payload_ ) ; <nl> break ; <nl> } <nl> - case 48 : { <nl> - responseCompression_ = ( global : : Grpc . Testing . CompressionType ) input . ReadEnum ( ) ; <nl> - break ; <nl> - } <nl> case 58 : { <nl> if ( responseStatus_ = = null ) { <nl> responseStatus_ = new global : : Grpc . Testing . EchoStatus ( ) ; <nl> public sealed partial class StreamingOutputCallResponse : pb : : IMessage < Streaming <nl> public static pb : : MessageParser < StreamingOutputCallResponse > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 8 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 9 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class ReconnectParams : pb : : IMessage < ReconnectParams > { <nl> public static pb : : MessageParser < ReconnectParams > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 9 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 10 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> public sealed partial class ReconnectInfo : pb : : IMessage < ReconnectInfo > { <nl> public static pb : : MessageParser < ReconnectInfo > Parser { get { return _parser ; } } <nl> <nl> public static pbr : : MessageDescriptor Descriptor { <nl> - get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 10 ] ; } <nl> + get { return global : : Grpc . Testing . MessagesReflection . Descriptor . MessageTypes [ 11 ] ; } <nl> } <nl> <nl> pbr : : MessageDescriptor pb : : IMessage . Descriptor { <nl> mmm a / src / csharp / Grpc . IntegrationTesting / MetadataCredentialsTest . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / MetadataCredentialsTest . cs <nl> <nl> using Grpc . Core ; <nl> using Grpc . Core . Utils ; <nl> using Grpc . Testing ; <nl> - using Moq ; <nl> using NUnit . Framework ; <nl> <nl> namespace Grpc . IntegrationTesting <nl> public class MetadataCredentialsTest <nl> Channel channel ; <nl> TestService . TestServiceClient client ; <nl> List < ChannelOption > options ; <nl> - Mock < TestService . TestServiceBase > serviceMock ; <nl> AsyncAuthInterceptor asyncAuthInterceptor ; <nl> <nl> [ SetUp ] <nl> public void Init ( ) <nl> { <nl> - serviceMock = new Mock < TestService . TestServiceBase > ( ) ; <nl> - serviceMock . Setup ( m = > m . UnaryCall ( It . IsAny < SimpleRequest > ( ) , It . IsAny < ServerCallContext > ( ) ) ) <nl> - . Returns ( new Func < SimpleRequest , ServerCallContext , Task < SimpleResponse > > ( UnaryCallHandler ) ) ; <nl> - <nl> server = new Server <nl> { <nl> - Services = { TestService . BindService ( serviceMock . Object ) } , <nl> + Services = { TestService . BindService ( new FakeTestService ( ) ) } , <nl> Ports = { { Host , ServerPort . PickUnused , TestCredentials . CreateSslServerCredentials ( ) } } <nl> } ; <nl> server . Start ( ) ; <nl> public void MetadataCredentials ( ) <nl> channel = new Channel ( Host , server . Ports . Single ( ) . BoundPort , channelCredentials , options ) ; <nl> client = TestService . NewClient ( channel ) ; <nl> <nl> - client . UnaryCall ( new SimpleRequest { } ) ; <nl> + client . UnaryCall ( new SimpleRequest { } ) ; <nl> } <nl> <nl> [ Test ] <nl> public void MetadataCredentials_PerCall ( ) <nl> client . UnaryCall ( new SimpleRequest { } , new CallOptions ( credentials : callCredentials ) ) ; <nl> } <nl> <nl> - private Task < SimpleResponse > UnaryCallHandler ( SimpleRequest request , ServerCallContext context ) <nl> + private class FakeTestService : TestService . TestServiceBase <nl> { <nl> - var authToken = context . RequestHeaders . First ( ( entry ) = > entry . Key = = " authorization " ) . Value ; <nl> - Assert . AreEqual ( " SECRET_TOKEN " , authToken ) ; <nl> - return Task . FromResult ( new SimpleResponse ( ) ) ; <nl> + public override Task < SimpleResponse > UnaryCall ( SimpleRequest request , ServerCallContext context ) <nl> + { <nl> + var authToken = context . RequestHeaders . First ( ( entry ) = > entry . Key = = " authorization " ) . Value ; <nl> + Assert . AreEqual ( " SECRET_TOKEN " , authToken ) ; <nl> + return Task . FromResult ( new SimpleResponse ( ) ) ; <nl> + } <nl> } <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . IntegrationTesting / NUnitMain . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / NUnitMain . cs <nl> public static int Main ( string [ ] args ) <nl> { <nl> / / Make logger immune to NUnit capturing stdout and stderr to workaround https : / / github . com / nunit / nunit / issues / 1406 . <nl> GrpcEnvironment . SetLogger ( new TextWriterLogger ( Console . Error ) ) ; <nl> - # if DOTNET5_4 <nl> + # if NETSTANDARD1_5 <nl> return new AutoRun ( typeof ( NUnitMain ) . GetTypeInfo ( ) . Assembly ) . Execute ( args , new ExtendedTextWrapper ( Console . Out ) , Console . In ) ; <nl> # else <nl> return new AutoRun ( ) . Execute ( args ) ; <nl> mmm a / src / csharp / Grpc . IntegrationTesting / TestCredentials . cs <nl> ppp b / src / csharp / Grpc . IntegrationTesting / TestCredentials . cs <nl> public static SslServerCredentials CreateSslServerCredentials ( ) <nl> <nl> private static string GetPath ( string relativePath ) <nl> { <nl> - var assemblyDir = Path . GetDirectoryName ( Assembly . GetExecutingAssembly ( ) . Location ) ; <nl> + var assemblyDir = Path . GetDirectoryName ( typeof ( TestCredentials ) . GetTypeInfo ( ) . Assembly . Location ) ; <nl> return Path . Combine ( assemblyDir , relativePath ) ; <nl> } <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 3493ab0c228 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . IntegrationTesting / project . json <nl> <nl> + { <nl> + " buildOptions " : { <nl> + " emitEntryPoint " : true <nl> + } , <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + " include " : " data / * " , <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + <nl> + " dependencies " : { <nl> + " Grpc . Auth " : { <nl> + " target " : " project " <nl> + } , <nl> + " Grpc . Core " : { <nl> + " target " : " project " <nl> + } , <nl> + " Google . Protobuf " : " 3 . 0 . 0 - beta3 " , <nl> + " CommandLineParser " : " 1 . 9 . 71 " , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { <nl> + " dependencies " : { <nl> + " Moq " : " 4 . 2 . 1510 . 2205 " <nl> + } , <nl> + " frameworkAssemblies " : { <nl> + " System . Runtime " : " " , <nl> + " System . IO " : " " <nl> + } <nl> + } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " , <nl> + " System . Linq . Expressions " : " 4 . 0 . 11 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / src / csharp / README . md <nl> ppp b / src / csharp / README . md <nl> PREREQUISITES <nl> HOW TO USE <nl> mmmmmmmmmmmm - - <nl> <nl> - * * Windows * * <nl> + * * Windows , Linux , Mac OS X * * <nl> <nl> - - Open Visual Studio and start a new project / solution . <nl> + - Open Visual Studio / MonoDevelop / Xamarin Studio and start a new project / solution . <nl> <nl> - Add NuGet package ` Grpc ` as a dependency ( Project options - > Manage NuGet Packages ) . <nl> - That will also pull all the transitive dependencies ( including the gRPC native library that <nl> - gRPC C # is using internally ) . <nl> - <nl> - * * Linux ( Debian ) * * <nl> - <nl> - - Open MonoDevelop and start a new project / solution . <nl> - <nl> - - Add NuGet package ` Grpc ` as a dependency ( Project - > Add NuGet packages ) . <nl> - That will also pull all the transitive dependencies ( including the gRPC native library that <nl> - gRPC C # is using internally ) . <nl> - <nl> - - NOTE : gRPC C # doesn ' t have a good story yet for shipping precompiled Linux version of Protocol Buffers compiler ( _protoc_ ) and the gRPC _protoc_ plugin . You can install them using [ gRPC Linuxbrew instructions ] [ ] . <nl> <nl> - * * Mac OS X * * <nl> - <nl> - - Open Xamarin Studio and start a new project / solution . <nl> - <nl> - - Add NuGet package ` Grpc ` as a dependency ( Project - > Add NuGet packages ) . <nl> - That will also pull all the transitive dependencies ( including the gRPC native library that <nl> - gRPC C # is using internally ) . <nl> - <nl> - - NOTE : gRPC C # doesn ' t have a good story yet for shipping precompiled Mac OS X version of Protocol Buffers compiler ( _protoc_ ) and the gRPC _protoc_ plugin . You can install them using [ gRPC Homebrew instructions ] [ ] . <nl> + - To be able to generate code from Protocol Buffer ( ` . proto ` ) file definitions , add NuGet package ` Grpc . Tools ` that contains Protocol Buffers compiler ( _protoc_ ) and the gRPC _protoc_ plugin . <nl> <nl> BUILD FROM SOURCE <nl> mmmmmmmmmmmmmmm - - <nl> If you are a user of gRPC C # , go to Usage section above . <nl> - Open ` src \ csharp \ Grpc . sln ` ( path is relative to gRPC repository root ) <nl> using Visual Studio <nl> <nl> - * * Linux * * <nl> + * * Linux and Mac OS X * * <nl> <nl> - The grpc_csharp_ext native library needs to be built so you can build the gRPC C # solution : <nl> - ` ` ` sh <nl> - # from the gRPC repository root <nl> - $ make CONFIG = dbg grpc_csharp_ext <nl> - ` ` ` <nl> - <nl> - - Use MonoDevelop to open the solution Grpc . sln <nl> - <nl> - * * Mac OS X * * <nl> - <nl> - - The grpc_csharp_ext native library needs to be built so you can build the gRPC C # solution . <nl> - <nl> ` ` ` sh <nl> # from the gRPC repository root <nl> $ tools / run_tests / run_tests . py - c dbg - l csharp - - build_only <nl> ` ` ` <nl> <nl> - - Use Xamarin Studio to open the solution Grpc . sln <nl> + - Use MonoDevelop / Xamarin Studio to open the solution Grpc . sln <nl> <nl> RUNNING TESTS <nl> mmmmmmmmmmmm - <nl> different languages . <nl> tools / run_tests / run_tests . py - l csharp <nl> ` ` ` <nl> <nl> + ON . NET CORE SUPPORT <nl> + mmmmmmmmmmmmmmmmmm <nl> + <nl> + We are committed to providing full support for [ . NET Core ] ( https : / / dotnet . github . io / ) in near future , <nl> + but currently , the support is for . NET Core is experimental / work - in - progress . <nl> + <nl> DOCUMENTATION <nl> mmmmmmmmmmmm - <nl> - - the gRPC C # reference documentation is available online at [ grpc . io ] [ ] <nl> - - [ Helloworld example ] [ ] <nl> + - [ API Reference ] [ ] <nl> + - [ Helloworld Example ] [ ] <nl> + - [ RouteGuide Tutorial ] [ ] <nl> <nl> CONTENTS <nl> mmmmmm - - <nl> CONTENTS <nl> - ext : <nl> The extension library that wraps C API to be more digestible by C # . <nl> - Grpc . Auth : <nl> - gRPC OAuth2 support . <nl> + gRPC OAuth2 / JWT support . <nl> - Grpc . Core : <nl> The main gRPC C # library . <nl> - Grpc . Examples : <nl> API examples for math . proto <nl> - Grpc . Examples . MathClient : <nl> - An example client that sends some requests to math server . <nl> + An example client that sends requests to math server . <nl> - Grpc . Examples . MathServer : <nl> - An example client that sends some requests to math server . <nl> + An example server that implements a simple math service . <nl> - Grpc . IntegrationTesting : <nl> Cross - language gRPC implementation testing ( interop testing ) . <nl> <nl> Internally , gRPC C # uses a native library written in C ( gRPC C core ) and invokes <nl> <nl> Prior to version 0 . 13 , installing ` grpc_csharp_ext ` was required to make gRPC work on Linux and MacOS . Starting with version 0 . 13 , we have improved the packaging story significantly and precompiled versions of the native library for all supported platforms are now shipped with the NuGet package . Just installing the ` Grpc ` NuGet package should be the only step needed to use gRPC C # , regardless of your platform ( Windows , Linux or Mac ) and the bitness ( 32 or 64bit ) . <nl> <nl> - [ gRPC Linuxbrew instructions ] : https : / / github . com / grpc / homebrew - grpc # quick - install - linux <nl> - [ gRPC Homebrew instructions ] : https : / / github . com / grpc / homebrew - grpc # quick - install - linux <nl> - [ homebrew ] : http : / / brew . sh <nl> - [ gRPC install script ] : https : / / raw . githubusercontent . com / grpc / homebrew - grpc / master / scripts / install <nl> - [ grpc . io ] : http : / / www . grpc . io / docs / installation / csharp . html <nl> - [ Debian jessie - backports ] : http : / / backports . debian . org / Instructions / <nl> - [ Helloworld example ] : . . / . . / examples / csharp / helloworld <nl> + [ API Reference ] : http : / / www . grpc . io / grpc / csharp / <nl> + [ Helloworld Example ] : . . / . . / examples / csharp / helloworld <nl> + [ RouteGuide Tutorial ] : http : / / www . grpc . io / docs / tutorials / basic / csharp . html <nl> mmm a / src / csharp / build_packages . bat <nl> ppp b / src / csharp / build_packages . bat <nl> set NUGET = C : \ nuget \ nuget . exe <nl> <nl> @ rem Collect the artifacts built by the previous build step if running on Jenkins <nl> @ rem TODO ( jtattermusch ) : is there a better way to do this ? <nl> - xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = windows \ artifacts \ * Grpc . Core \ windows_x86 \ <nl> - xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = windows \ artifacts \ * Grpc . Core \ windows_x64 \ <nl> - xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = linux \ artifacts \ * Grpc . Core \ linux_x86 \ <nl> - xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = linux \ artifacts \ * Grpc . Core \ linux_x64 \ <nl> - xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = macos \ artifacts \ * Grpc . Core \ macosx_x86 \ <nl> - xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = macos \ artifacts \ * Grpc . Core \ macosx_x64 \ <nl> + xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = windows \ artifacts \ * nativelibs \ windows_x86 \ <nl> + xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = windows \ artifacts \ * nativelibs \ windows_x64 \ <nl> + xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = linux \ artifacts \ * nativelibs \ linux_x86 \ <nl> + xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = linux \ artifacts \ * nativelibs \ linux_x64 \ <nl> + xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = macos \ artifacts \ * nativelibs \ macosx_x86 \ <nl> + xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = macos \ artifacts \ * nativelibs \ macosx_x64 \ <nl> <nl> @ rem Collect protoc artifacts built by the previous build step <nl> xcopy / Y / I . . \ . . \ architecture = x86 , language = protoc , platform = windows \ artifacts \ * protoc_plugins \ windows_x86 \ <nl> mmm a / src / csharp / ext / grpc_csharp_ext . c <nl> ppp b / src / csharp / ext / grpc_csharp_ext . c <nl> grpcsharp_batch_context_recv_initial_metadata ( <nl> <nl> GPR_EXPORT intptr_t GPR_CALLTYPE grpcsharp_batch_context_recv_message_length ( <nl> const grpcsharp_batch_context * ctx ) { <nl> + grpc_byte_buffer_reader reader ; <nl> if ( ! ctx - > recv_message ) { <nl> return - 1 ; <nl> } <nl> - return ( intptr_t ) grpc_byte_buffer_length ( ctx - > recv_message ) ; <nl> + grpc_byte_buffer_reader_init ( & reader , ctx - > recv_message ) ; <nl> + return ( intptr_t ) grpc_byte_buffer_length ( reader . buffer_out ) ; <nl> } <nl> <nl> / * <nl> deleted file mode 100644 <nl> index cc688e2bc71 . . 00000000000 <nl> mmm a / src / csharp / grpc . native . csharp / grpc . native . csharp . nuspec <nl> ppp / dev / null <nl> <nl> - < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> - < package > <nl> - < metadata > <nl> - < id > grpc . native . csharp < / id > <nl> - < version > $ version $ < / version > <nl> - < authors > Google Inc . < / authors > <nl> - < owners > grpc - packages < / owners > <nl> - < licenseUrl > https : / / github . com / grpc / grpc / blob / master / LICENSE < / licenseUrl > <nl> - < projectUrl > http : / / github . com / grpc / grpc < / projectUrl > <nl> - < requireLicenseAcceptance > false < / requireLicenseAcceptance > <nl> - < description > Native extension needed by gRPC C # library . This is not the package you are looking for , it is only meant to be used as a dependency . < / description > <nl> - < releaseNotes > Release of gRPC C core $ version $ libraries . < / releaseNotes > <nl> - < copyright > Copyright 2015 < / copyright > <nl> - < title > gRPC C # Native Extension < / title > <nl> - < summary > Native library required by gRPC C # < / summary > <nl> - < tags > gRPC native < / tags > <nl> - < / metadata > <nl> - < files > <nl> - < file src = " grpc . native . csharp . targets " target = " \ build \ portable - net45 + netcore45 + wpa81 + wp8 \ grpc . native . csharp . targets " / > <nl> - < file src = " windows_x86 / grpc_csharp_ext . dll " target = " / build / native / bin / windows_x86 / grpc_csharp_ext . dll " / > <nl> - < file src = " windows_x64 / grpc_csharp_ext . dll " target = " / build / native / bin / windows_x64 / grpc_csharp_ext . dll " / > <nl> - < file src = " linux_x86 / libgrpc_csharp_ext . so " target = " / build / native / bin / linux_x86 / libgrpc_csharp_ext . so " / > <nl> - < file src = " linux_x64 / libgrpc_csharp_ext . so " target = " / build / native / bin / linux_x64 / libgrpc_csharp_ext . so " / > <nl> - < file src = " macosx_x86 / libgrpc_csharp_ext . dylib " target = " / build / native / bin / macosx_x86 / libgrpc_csharp_ext . dylib " / > <nl> - < file src = " macosx_x64 / libgrpc_csharp_ext . dylib " target = " / build / native / bin / macosx_x64 / libgrpc_csharp_ext . dylib " / > <nl> - < / files > <nl> - < / package > <nl> mmm a / src / node / interop / interop_server . js <nl> ppp b / src / node / interop / interop_server . js <nl> var testProto = grpc . load ( { <nl> var ECHO_INITIAL_KEY = ' x - grpc - test - echo - initial ' ; <nl> var ECHO_TRAILING_KEY = ' x - grpc - test - echo - trailing - bin ' ; <nl> <nl> - var incompressible_data = fs . readFileSync ( <nl> - __dirname + ' / . . / . . / . . / test / cpp / interop / rnd . dat ' ) ; <nl> - <nl> / * * <nl> * Create a buffer filled with size zeroes <nl> * @ param { number } size The length of the buffer <nl> function getEchoTrailer ( call ) { <nl> } <nl> <nl> function getPayload ( payload_type , size ) { <nl> - if ( payload_type = = = ' RANDOM ' ) { <nl> - payload_type = [ ' COMPRESSABLE ' , <nl> - ' UNCOMPRESSABLE ' ] [ Math . random ( ) < 0 . 5 ? 0 : 1 ] ; <nl> - } <nl> - var body ; <nl> - switch ( payload_type ) { <nl> - case ' COMPRESSABLE ' : body = zeroBuffer ( size ) ; break ; <nl> - case ' UNCOMPRESSABLE ' : incompressible_data . slice ( size ) ; break ; <nl> - } <nl> + var body = zeroBuffer ( size ) ; <nl> return { type : payload_type , body : body } ; <nl> } <nl> <nl> mmm a / src / objective - c / GRPCClient / GRPCCall + ChannelCredentials . h <nl> ppp b / src / objective - c / GRPCClient / GRPCCall + ChannelCredentials . h <nl> <nl> * / <nl> + ( BOOL ) setTLSPEMRootCerts : ( nullable NSString * ) pemRootCert <nl> forHost : ( nonnull NSString * ) host <nl> - error : ( NSError * * ) errorPtr ; <nl> + error : ( NSError * _Nullable * _Nullable ) errorPtr ; <nl> / * * <nl> * Configures @ c host with TLS / SSL Client Credentials and optionally trusted root Certificate <nl> * Authorities . If @ c pemRootCerts is nil , the default CA Certificates bundled with gRPC will be <nl> <nl> withPrivateKey : ( nullable NSString * ) pemPrivateKey <nl> withCertChain : ( nullable NSString * ) pemCertChain <nl> forHost : ( nonnull NSString * ) host <nl> - error : ( NSError * * ) errorPtr ; <nl> + error : ( NSError * _Nullable * _Nullable ) errorPtr ; <nl> <nl> @ end <nl> mmm a / src / objective - c / ProtoRPC / ProtoMethod . h <nl> ppp b / src / objective - c / ProtoRPC / ProtoMethod . h <nl> __attribute__ ( ( deprecated ( " Please use GRPCProtoMethod . " ) ) ) <nl> * This subclass is empty now . Eventually we ' ll remove ProtoMethod class <nl> * to avoid potential naming conflict <nl> * / <nl> + # pragma clang diagnostic push <nl> + # pragma clang diagnostic ignored " - Wdeprecated - declarations " <nl> @ interface GRPCProtoMethod : ProtoMethod <nl> + # pragma clang diagnostic pop <nl> <nl> @ end <nl> mmm a / src / objective - c / ProtoRPC / ProtoRPC . h <nl> ppp b / src / objective - c / ProtoRPC / ProtoRPC . h <nl> __attribute__ ( ( deprecated ( " Please use GRPCProtoCall . " ) ) ) <nl> * This subclass is empty now . Eventually we ' ll remove ProtoRPC class <nl> * to avoid potential naming conflict <nl> * / <nl> + # pragma clang diagnostic push <nl> + # pragma clang diagnostic ignored " - Wdeprecated - declarations " <nl> @ interface GRPCProtoCall : ProtoRPC <nl> + # pragma clang diagnostic pop <nl> <nl> @ end <nl> mmm a / src / objective - c / ProtoRPC / ProtoService . h <nl> ppp b / src / objective - c / ProtoRPC / ProtoService . h <nl> __attribute__ ( ( deprecated ( " Please use GRPCProtoService . " ) ) ) <nl> * This subclass is empty now . Eventually we ' ll remove ProtoService class <nl> * to avoid potential naming conflict <nl> * / <nl> + # pragma clang diagnostic push <nl> + # pragma clang diagnostic ignored " - Wdeprecated - declarations " <nl> @ interface GRPCProtoService : ProtoService <nl> + # pragma clang diagnostic pop <nl> <nl> @ end <nl> mmm a / src / objective - c / ProtoRPC / ProtoService . m <nl> ppp b / src / objective - c / ProtoRPC / ProtoService . m <nl> - ( ProtoRPC * ) RPCToMethod : ( NSString * ) method <nl> requestsWriter : ( GRXWriter * ) requestsWriter <nl> responseClass : ( Class ) responseClass <nl> responsesWriteable : ( id < GRXWriteable > ) responsesWriteable { <nl> - ProtoMethod * methodName = [ [ ProtoMethod alloc ] initWithPackage : _packageName <nl> - service : _serviceName <nl> - method : method ] ; <nl> + GRPCProtoMethod * methodName = [ [ GRPCProtoMethod alloc ] initWithPackage : _packageName <nl> + service : _serviceName <nl> + method : method ] ; <nl> return [ [ ProtoRPC alloc ] initWithHost : _host <nl> method : methodName <nl> requestsWriter : requestsWriter <nl> mmm a / src / objective - c / examples / RemoteTestClient / RemoteTest . podspec <nl> ppp b / src / objective - c / examples / RemoteTestClient / RemoteTest . podspec <nl> Pod : : Spec . new do | s | <nl> s . name = " RemoteTest " <nl> s . version = " 0 . 0 . 1 " <nl> s . license = " New BSD " <nl> + s . authors = { ' gRPC contributors ' = > ' grpc - io @ googlegroups . com ' } <nl> + s . homepage = " http : / / www . grpc . io / " <nl> + s . summary = " RemoteTest example " <nl> + s . source = { : git = > ' https : / / github . com / grpc / grpc . git ' } <nl> <nl> s . ios . deployment_target = ' 7 . 1 ' <nl> s . osx . deployment_target = ' 10 . 9 ' <nl> mmm a / src / objective - c / examples / Sample / Sample . xcodeproj / project . pbxproj <nl> ppp b / src / objective - c / examples / Sample / Sample . xcodeproj / project . pbxproj <nl> <nl> objects = { <nl> <nl> / * Begin PBXBuildFile section * / <nl> + 426A5020E0E158A101BCA1D9 / * libPods - Sample . a in Frameworks * / = { isa = PBXBuildFile ; fileRef = C20055928615A6F8434E26B4 / * libPods - Sample . a * / ; } ; <nl> 6369A2701A9322E20015FC5C / * main . m in Sources * / = { isa = PBXBuildFile ; fileRef = 6369A26F1A9322E20015FC5C / * main . m * / ; } ; <nl> 6369A2731A9322E20015FC5C / * AppDelegate . m in Sources * / = { isa = PBXBuildFile ; fileRef = 6369A2721A9322E20015FC5C / * AppDelegate . m * / ; } ; <nl> 6369A2761A9322E20015FC5C / * ViewController . m in Sources * / = { isa = PBXBuildFile ; fileRef = 6369A2751A9322E20015FC5C / * ViewController . m * / ; } ; <nl> 6369A2791A9322E20015FC5C / * Main . storyboard in Resources * / = { isa = PBXBuildFile ; fileRef = 6369A2771A9322E20015FC5C / * Main . storyboard * / ; } ; <nl> 6369A27B1A9322E20015FC5C / * Images . xcassets in Resources * / = { isa = PBXBuildFile ; fileRef = 6369A27A1A9322E20015FC5C / * Images . xcassets * / ; } ; <nl> - FC81FE63CA655031F3524EC0 / * libPods . a in Frameworks * / = { isa = PBXBuildFile ; fileRef = 2DC7B7C4C0410F43B9621631 / * libPods . a * / ; } ; <nl> / * End PBXBuildFile section * / <nl> <nl> / * Begin PBXFileReference section * / <nl> - 2DC7B7C4C0410F43B9621631 / * libPods . a * / = { isa = PBXFileReference ; explicitFileType = archive . ar ; includeInIndex = 0 ; path = libPods . a ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> + 5A8C9F4B28733B249DE4AB6D / * Pods - Sample . release . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = " Pods - Sample . release . xcconfig " ; path = " Pods / Target Support Files / Pods - Sample / Pods - Sample . release . xcconfig " ; sourceTree = " < group > " ; } ; <nl> 6369A26A1A9322E20015FC5C / * Sample . app * / = { isa = PBXFileReference ; explicitFileType = wrapper . application ; includeInIndex = 0 ; path = Sample . app ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> 6369A26E1A9322E20015FC5C / * Info . plist * / = { isa = PBXFileReference ; lastKnownFileType = text . plist . xml ; path = Info . plist ; sourceTree = " < group > " ; } ; <nl> 6369A26F1A9322E20015FC5C / * main . m * / = { isa = PBXFileReference ; lastKnownFileType = sourcecode . c . objc ; path = main . m ; sourceTree = " < group > " ; } ; <nl> <nl> 6369A2751A9322E20015FC5C / * ViewController . m * / = { isa = PBXFileReference ; lastKnownFileType = sourcecode . c . objc ; path = ViewController . m ; sourceTree = " < group > " ; } ; <nl> 6369A2781A9322E20015FC5C / * Base * / = { isa = PBXFileReference ; lastKnownFileType = file . storyboard ; name = Base ; path = Base . lproj / Main . storyboard ; sourceTree = " < group > " ; } ; <nl> 6369A27A1A9322E20015FC5C / * Images . xcassets * / = { isa = PBXFileReference ; lastKnownFileType = folder . assetcatalog ; path = Images . xcassets ; sourceTree = " < group > " ; } ; <nl> - AC29DD6FCDF962F519FEBB0D / * Pods . debug . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = Pods . debug . xcconfig ; path = " Pods / Target Support Files / Pods / Pods . debug . xcconfig " ; sourceTree = " < group > " ; } ; <nl> - C68330F8D451CC6ACEABA09F / * Pods . release . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = Pods . release . xcconfig ; path = " Pods / Target Support Files / Pods / Pods . release . xcconfig " ; sourceTree = " < group > " ; } ; <nl> + C20055928615A6F8434E26B4 / * libPods - Sample . a * / = { isa = PBXFileReference ; explicitFileType = archive . ar ; includeInIndex = 0 ; path = " libPods - Sample . a " ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> + E3C01DF315C4E7433BCEC6E6 / * Pods - Sample . debug . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = " Pods - Sample . debug . xcconfig " ; path = " Pods / Target Support Files / Pods - Sample / Pods - Sample . debug . xcconfig " ; sourceTree = " < group > " ; } ; <nl> / * End PBXFileReference section * / <nl> <nl> / * Begin PBXFrameworksBuildPhase section * / <nl> <nl> isa = PBXFrameworksBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> - FC81FE63CA655031F3524EC0 / * libPods . a in Frameworks * / , <nl> + 426A5020E0E158A101BCA1D9 / * libPods - Sample . a in Frameworks * / , <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> } ; <nl> <nl> AB3331C9AE6488E61B2B094E / * Pods * / = { <nl> isa = PBXGroup ; <nl> children = ( <nl> - AC29DD6FCDF962F519FEBB0D / * Pods . debug . xcconfig * / , <nl> - C68330F8D451CC6ACEABA09F / * Pods . release . xcconfig * / , <nl> + E3C01DF315C4E7433BCEC6E6 / * Pods - Sample . debug . xcconfig * / , <nl> + 5A8C9F4B28733B249DE4AB6D / * Pods - Sample . release . xcconfig * / , <nl> ) ; <nl> name = Pods ; <nl> sourceTree = " < group > " ; <nl> <nl> C4C2C5219053E079C9EFB930 / * Frameworks * / = { <nl> isa = PBXGroup ; <nl> children = ( <nl> - 2DC7B7C4C0410F43B9621631 / * libPods . a * / , <nl> + C20055928615A6F8434E26B4 / * libPods - Sample . a * / , <nl> ) ; <nl> name = Frameworks ; <nl> sourceTree = " < group > " ; <nl> <nl> isa = PBXNativeTarget ; <nl> buildConfigurationList = 6369A28D1A9322E20015FC5C / * Build configuration list for PBXNativeTarget " Sample " * / ; <nl> buildPhases = ( <nl> - 41F7486D8F66994B0BFB84AF / * Check Pods Manifest . lock * / , <nl> + 41F7486D8F66994B0BFB84AF / * [ CP ] Check Pods Manifest . lock * / , <nl> 6369A2661A9322E20015FC5C / * Sources * / , <nl> 6369A2671A9322E20015FC5C / * Frameworks * / , <nl> 6369A2681A9322E20015FC5C / * Resources * / , <nl> - 04554623324BE4A838846086 / * Copy Pods Resources * / , <nl> + 04554623324BE4A838846086 / * [ CP ] Copy Pods Resources * / , <nl> + C7FAD018D05AB5F0B0FE81E2 / * [ CP ] Embed Pods Frameworks * / , <nl> ) ; <nl> buildRules = ( <nl> ) ; <nl> <nl> / * End PBXResourcesBuildPhase section * / <nl> <nl> / * Begin PBXShellScriptBuildPhase section * / <nl> - 04554623324BE4A838846086 / * Copy Pods Resources * / = { <nl> + 04554623324BE4A838846086 / * [ CP ] Copy Pods Resources * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Copy Pods Resources " ; <nl> + name = " [ CP ] Copy Pods Resources " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> shellPath = / bin / sh ; <nl> - shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods / Pods - resources . sh \ " \ n " ; <nl> + shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - Sample / Pods - Sample - resources . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - 41F7486D8F66994B0BFB84AF / * Check Pods Manifest . lock * / = { <nl> + 41F7486D8F66994B0BFB84AF / * [ CP ] Check Pods Manifest . lock * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Check Pods Manifest . lock " ; <nl> + name = " [ CP ] Check Pods Manifest . lock " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " diff \ " $ { PODS_ROOT } / . . / Podfile . lock \ " \ " $ { PODS_ROOT } / Manifest . lock \ " > / dev / null \ nif [ [ $ ? ! = 0 ] ] ; then \ n cat < < EOM \ nerror : The sandbox is not in sync with the Podfile . lock . Run ' pod install ' or update your CocoaPods installation . \ nEOM \ n exit 1 \ nfi \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> + C7FAD018D05AB5F0B0FE81E2 / * [ CP ] Embed Pods Frameworks * / = { <nl> + isa = PBXShellScriptBuildPhase ; <nl> + buildActionMask = 2147483647 ; <nl> + files = ( <nl> + ) ; <nl> + inputPaths = ( <nl> + ) ; <nl> + name = " [ CP ] Embed Pods Frameworks " ; <nl> + outputPaths = ( <nl> + ) ; <nl> + runOnlyForDeploymentPostprocessing = 0 ; <nl> + shellPath = / bin / sh ; <nl> + shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - Sample / Pods - Sample - frameworks . sh \ " \ n " ; <nl> + showEnvVarsInLog = 0 ; <nl> + } ; <nl> / * End PBXShellScriptBuildPhase section * / <nl> <nl> / * Begin PBXSourcesBuildPhase section * / <nl> <nl> } ; <nl> 6369A28E1A9322E20015FC5C / * Debug * / = { <nl> isa = XCBuildConfiguration ; <nl> - baseConfigurationReference = AC29DD6FCDF962F519FEBB0D / * Pods . debug . xcconfig * / ; <nl> + baseConfigurationReference = E3C01DF315C4E7433BCEC6E6 / * Pods - Sample . debug . xcconfig * / ; <nl> buildSettings = { <nl> ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon ; <nl> INFOPLIST_FILE = Sample / Info . plist ; <nl> <nl> } ; <nl> 6369A28F1A9322E20015FC5C / * Release * / = { <nl> isa = XCBuildConfiguration ; <nl> - baseConfigurationReference = C68330F8D451CC6ACEABA09F / * Pods . release . xcconfig * / ; <nl> + baseConfigurationReference = 5A8C9F4B28733B249DE4AB6D / * Pods - Sample . release . xcconfig * / ; <nl> buildSettings = { <nl> ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon ; <nl> INFOPLIST_FILE = Sample / Info . plist ; <nl> mmm a / src / objective - c / examples / SwiftSample / SwiftSample . xcodeproj / project . pbxproj <nl> ppp b / src / objective - c / examples / SwiftSample / SwiftSample . xcodeproj / project . pbxproj <nl> <nl> objects = { <nl> <nl> / * Begin PBXBuildFile section * / <nl> - 253D3A297105CA46DA960A11 / * libPods . a in Frameworks * / = { isa = PBXBuildFile ; fileRef = DC58ACA18DCCB1553531B885 / * libPods . a * / ; } ; <nl> 633BFFC81B950B210007E424 / * AppDelegate . swift in Sources * / = { isa = PBXBuildFile ; fileRef = 633BFFC71B950B210007E424 / * AppDelegate . swift * / ; } ; <nl> 633BFFCA1B950B210007E424 / * ViewController . swift in Sources * / = { isa = PBXBuildFile ; fileRef = 633BFFC91B950B210007E424 / * ViewController . swift * / ; } ; <nl> 633BFFCD1B950B210007E424 / * Main . storyboard in Resources * / = { isa = PBXBuildFile ; fileRef = 633BFFCB1B950B210007E424 / * Main . storyboard * / ; } ; <nl> 633BFFCF1B950B210007E424 / * Images . xcassets in Resources * / = { isa = PBXBuildFile ; fileRef = 633BFFCE1B950B210007E424 / * Images . xcassets * / ; } ; <nl> + 92EDB1408A1E1E7DDAB25D9C / * libPods - SwiftSample . a in Frameworks * / = { isa = PBXBuildFile ; fileRef = 69BB5C6CA3C1F97E007AC527 / * libPods - SwiftSample . a * / ; } ; <nl> / * End PBXBuildFile section * / <nl> <nl> / * Begin PBXFileReference section * / <nl> - 12C7B447AA80E624D93B5C54 / * Pods . debug . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = Pods . debug . xcconfig ; path = " Pods / Target Support Files / Pods / Pods . debug . xcconfig " ; sourceTree = " < group > " ; } ; <nl> 633BFFC21B950B210007E424 / * SwiftSample . app * / = { isa = PBXFileReference ; explicitFileType = wrapper . application ; includeInIndex = 0 ; path = SwiftSample . app ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> 633BFFC61B950B210007E424 / * Info . plist * / = { isa = PBXFileReference ; lastKnownFileType = text . plist . xml ; path = Info . plist ; sourceTree = " < group > " ; } ; <nl> 633BFFC71B950B210007E424 / * AppDelegate . swift * / = { isa = PBXFileReference ; lastKnownFileType = sourcecode . swift ; path = AppDelegate . swift ; sourceTree = " < group > " ; } ; <nl> <nl> 633BFFCC1B950B210007E424 / * Base * / = { isa = PBXFileReference ; lastKnownFileType = file . storyboard ; name = Base ; path = Base . lproj / Main . storyboard ; sourceTree = " < group > " ; } ; <nl> 633BFFCE1B950B210007E424 / * Images . xcassets * / = { isa = PBXFileReference ; lastKnownFileType = folder . assetcatalog ; path = Images . xcassets ; sourceTree = " < group > " ; } ; <nl> 6367AD231B951655007FD3A4 / * Bridging - Header . h * / = { isa = PBXFileReference ; fileEncoding = 4 ; lastKnownFileType = sourcecode . c . h ; path = " Bridging - Header . h " ; sourceTree = " < group > " ; } ; <nl> - C335CBC4C160E0D9EDEE646B / * Pods . release . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = Pods . release . xcconfig ; path = " Pods / Target Support Files / Pods / Pods . release . xcconfig " ; sourceTree = " < group > " ; } ; <nl> - DC58ACA18DCCB1553531B885 / * libPods . a * / = { isa = PBXFileReference ; explicitFileType = archive . ar ; includeInIndex = 0 ; path = libPods . a ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> + 69BB5C6CA3C1F97E007AC527 / * libPods - SwiftSample . a * / = { isa = PBXFileReference ; explicitFileType = archive . ar ; includeInIndex = 0 ; path = " libPods - SwiftSample . a " ; sourceTree = BUILT_PRODUCTS_DIR ; } ; <nl> + A7E614A494D89D01BB395761 / * Pods - SwiftSample . debug . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = " Pods - SwiftSample . debug . xcconfig " ; path = " Pods / Target Support Files / Pods - SwiftSample / Pods - SwiftSample . debug . xcconfig " ; sourceTree = " < group > " ; } ; <nl> + C314E3E246AF23AC29B38FCF / * Pods - SwiftSample . release . xcconfig * / = { isa = PBXFileReference ; includeInIndex = 1 ; lastKnownFileType = text . xcconfig ; name = " Pods - SwiftSample . release . xcconfig " ; path = " Pods / Target Support Files / Pods - SwiftSample / Pods - SwiftSample . release . xcconfig " ; sourceTree = " < group > " ; } ; <nl> / * End PBXFileReference section * / <nl> <nl> / * Begin PBXFrameworksBuildPhase section * / <nl> <nl> isa = PBXFrameworksBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> - 253D3A297105CA46DA960A11 / * libPods . a in Frameworks * / , <nl> + 92EDB1408A1E1E7DDAB25D9C / * libPods - SwiftSample . a in Frameworks * / , <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> } ; <nl> <nl> 31F283C976AE97586C17CCD9 / * Pods * / = { <nl> isa = PBXGroup ; <nl> children = ( <nl> - 12C7B447AA80E624D93B5C54 / * Pods . debug . xcconfig * / , <nl> - C335CBC4C160E0D9EDEE646B / * Pods . release . xcconfig * / , <nl> + A7E614A494D89D01BB395761 / * Pods - SwiftSample . debug . xcconfig * / , <nl> + C314E3E246AF23AC29B38FCF / * Pods - SwiftSample . release . xcconfig * / , <nl> ) ; <nl> name = Pods ; <nl> sourceTree = " < group > " ; <nl> <nl> 9D63A7F6423989BA306810CA / * Frameworks * / = { <nl> isa = PBXGroup ; <nl> children = ( <nl> - DC58ACA18DCCB1553531B885 / * libPods . a * / , <nl> + 69BB5C6CA3C1F97E007AC527 / * libPods - SwiftSample . a * / , <nl> ) ; <nl> name = Frameworks ; <nl> sourceTree = " < group > " ; <nl> <nl> isa = PBXNativeTarget ; <nl> buildConfigurationList = 633BFFE11B950B210007E424 / * Build configuration list for PBXNativeTarget " SwiftSample " * / ; <nl> buildPhases = ( <nl> - 6BEEB33CA2705D7D2F2210E6 / * Check Pods Manifest . lock * / , <nl> + 6BEEB33CA2705D7D2F2210E6 / * [ CP ] Check Pods Manifest . lock * / , <nl> 633BFFBE1B950B210007E424 / * Sources * / , <nl> 633BFFBF1B950B210007E424 / * Frameworks * / , <nl> 633BFFC01B950B210007E424 / * Resources * / , <nl> - AC2F6F9AB1C090BB0BEE6E4D / * Copy Pods Resources * / , <nl> - A1738A987353B0BF2C64F0F7 / * Embed Pods Frameworks * / , <nl> + AC2F6F9AB1C090BB0BEE6E4D / * [ CP ] Copy Pods Resources * / , <nl> + A1738A987353B0BF2C64F0F7 / * [ CP ] Embed Pods Frameworks * / , <nl> ) ; <nl> buildRules = ( <nl> ) ; <nl> <nl> / * End PBXResourcesBuildPhase section * / <nl> <nl> / * Begin PBXShellScriptBuildPhase section * / <nl> - 6BEEB33CA2705D7D2F2210E6 / * Check Pods Manifest . lock * / = { <nl> + 6BEEB33CA2705D7D2F2210E6 / * [ CP ] Check Pods Manifest . lock * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Check Pods Manifest . lock " ; <nl> + name = " [ CP ] Check Pods Manifest . lock " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> <nl> shellScript = " diff \ " $ { PODS_ROOT } / . . / Podfile . lock \ " \ " $ { PODS_ROOT } / Manifest . lock \ " > / dev / null \ nif [ [ $ ? ! = 0 ] ] ; then \ n cat < < EOM \ nerror : The sandbox is not in sync with the Podfile . lock . Run ' pod install ' or update your CocoaPods installation . \ nEOM \ n exit 1 \ nfi \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - A1738A987353B0BF2C64F0F7 / * Embed Pods Frameworks * / = { <nl> + A1738A987353B0BF2C64F0F7 / * [ CP ] Embed Pods Frameworks * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Embed Pods Frameworks " ; <nl> + name = " [ CP ] Embed Pods Frameworks " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> shellPath = / bin / sh ; <nl> - shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods / Pods - frameworks . sh \ " \ n " ; <nl> + shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - SwiftSample / Pods - SwiftSample - frameworks . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> - AC2F6F9AB1C090BB0BEE6E4D / * Copy Pods Resources * / = { <nl> + AC2F6F9AB1C090BB0BEE6E4D / * [ CP ] Copy Pods Resources * / = { <nl> isa = PBXShellScriptBuildPhase ; <nl> buildActionMask = 2147483647 ; <nl> files = ( <nl> ) ; <nl> inputPaths = ( <nl> ) ; <nl> - name = " Copy Pods Resources " ; <nl> + name = " [ CP ] Copy Pods Resources " ; <nl> outputPaths = ( <nl> ) ; <nl> runOnlyForDeploymentPostprocessing = 0 ; <nl> shellPath = / bin / sh ; <nl> - shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods / Pods - resources . sh \ " \ n " ; <nl> + shellScript = " \ " $ { SRCROOT } / Pods / Target Support Files / Pods - SwiftSample / Pods - SwiftSample - resources . sh \ " \ n " ; <nl> showEnvVarsInLog = 0 ; <nl> } ; <nl> / * End PBXShellScriptBuildPhase section * / <nl> <nl> } ; <nl> 633BFFE21B950B210007E424 / * Debug * / = { <nl> isa = XCBuildConfiguration ; <nl> - baseConfigurationReference = 12C7B447AA80E624D93B5C54 / * Pods . debug . xcconfig * / ; <nl> + baseConfigurationReference = A7E614A494D89D01BB395761 / * Pods - SwiftSample . debug . xcconfig * / ; <nl> buildSettings = { <nl> ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon ; <nl> INFOPLIST_FILE = Info . plist ; <nl> <nl> } ; <nl> 633BFFE31B950B210007E424 / * Release * / = { <nl> isa = XCBuildConfiguration ; <nl> - baseConfigurationReference = C335CBC4C160E0D9EDEE646B / * Pods . release . xcconfig * / ; <nl> + baseConfigurationReference = C314E3E246AF23AC29B38FCF / * Pods - SwiftSample . release . xcconfig * / ; <nl> buildSettings = { <nl> ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon ; <nl> INFOPLIST_FILE = Info . plist ; <nl> mmm a / src / objective - c / tests / GRPCClientTests . m <nl> ppp b / src / objective - c / tests / GRPCClientTests . m <nl> <nl> static NSString * const kService = @ " TestService " ; <nl> static NSString * const kRemoteSSLHost = @ " grpc - test . sandbox . googleapis . com " ; <nl> <nl> - static ProtoMethod * kInexistentMethod ; <nl> - static ProtoMethod * kEmptyCallMethod ; <nl> - static ProtoMethod * kUnaryCallMethod ; <nl> + static GRPCProtoMethod * kInexistentMethod ; <nl> + static GRPCProtoMethod * kEmptyCallMethod ; <nl> + static GRPCProtoMethod * kUnaryCallMethod ; <nl> <nl> / * * Observer class for testing that responseMetadata is KVO - compliant * / <nl> @ interface PassthroughObserver : NSObject <nl> - ( void ) setUp { <nl> [ GRPCCall useInsecureConnectionsForHost : kHostAddress ] ; <nl> <nl> / / This method isn ' t implemented by the remote server . <nl> - kInexistentMethod = [ [ ProtoMethod alloc ] initWithPackage : kPackage <nl> - service : kService <nl> - method : @ " Inexistent " ] ; <nl> - kEmptyCallMethod = [ [ ProtoMethod alloc ] initWithPackage : kPackage <nl> - service : kService <nl> - method : @ " EmptyCall " ] ; <nl> - kUnaryCallMethod = [ [ ProtoMethod alloc ] initWithPackage : kPackage <nl> - service : kService <nl> - method : @ " UnaryCall " ] ; <nl> + kInexistentMethod = [ [ GRPCProtoMethod alloc ] initWithPackage : kPackage <nl> + service : kService <nl> + method : @ " Inexistent " ] ; <nl> + kEmptyCallMethod = [ [ GRPCProtoMethod alloc ] initWithPackage : kPackage <nl> + service : kService <nl> + method : @ " EmptyCall " ] ; <nl> + kUnaryCallMethod = [ [ GRPCProtoMethod alloc ] initWithPackage : kPackage <nl> + service : kService <nl> + method : @ " UnaryCall " ] ; <nl> } <nl> <nl> - ( void ) testConnectionToRemoteServer { <nl> - ( void ) testExceptions { <nl> <nl> / / Try to set parameters to nil for GRPCCall . This should cause an exception <nl> @ try { <nl> - GRPCCall * call = [ [ GRPCCall alloc ] initWithHost : nil <nl> - path : nil <nl> - requestsWriter : nil ] ; <nl> + ( void ) [ [ GRPCCall alloc ] initWithHost : nil <nl> + path : nil <nl> + requestsWriter : nil ] ; <nl> XCTFail ( @ " Did not receive an exception when parameters are nil " ) ; <nl> } @ catch ( NSException * theException ) { <nl> NSLog ( @ " Received exception as expected : % @ " , theException . name ) ; <nl> - ( void ) testExceptions { <nl> GRXWriter * requestsWriter = [ GRXWriter emptyWriter ] ; <nl> [ requestsWriter finishWithError : nil ] ; <nl> @ try { <nl> - GRPCCall * call = [ [ GRPCCall alloc ] initWithHost : kHostAddress <nl> - path : kUnaryCallMethod . HTTPPath <nl> - requestsWriter : requestsWriter ] ; <nl> + ( void ) [ [ GRPCCall alloc ] initWithHost : kHostAddress <nl> + path : kUnaryCallMethod . HTTPPath <nl> + requestsWriter : requestsWriter ] ; <nl> XCTFail ( @ " Did not receive an exception when GRXWriter has incorrect state . " ) ; <nl> } @ catch ( NSException * theException ) { <nl> NSLog ( @ " Received exception as expected : % @ " , theException . name ) ; <nl> mmm a / src / objective - c / tests / InteropTests . m <nl> ppp b / src / objective - c / tests / InteropTests . m <nl> + ( instancetype ) messageWithPayloadSize : ( NSNumber * ) payloadSize <nl> requestedResponseSize : ( NSNumber * ) responseSize { <nl> RMTStreamingOutputCallRequest * request = [ self message ] ; <nl> RMTResponseParameters * parameters = [ RMTResponseParameters message ] ; <nl> - parameters . size = responseSize . integerValue ; <nl> + parameters . size = ( int ) responseSize . integerValue ; <nl> [ request . responseParametersArray addObject : parameters ] ; <nl> request . payload . body = [ NSMutableData dataWithLength : payloadSize . unsignedIntegerValue ] ; <nl> return request ; <nl> + ( instancetype ) messageWithPayloadSize : ( NSNumber * ) payloadSize { <nl> <nl> # pragma mark Tests <nl> <nl> + # ifdef GRPC_COMPILE_WITH_CRONET <nl> static cronet_engine * cronetEngine = NULL ; <nl> + # endif <nl> <nl> @ implementation InteropTests { <nl> RMTTestService * _service ; <nl> - ( void ) testServerStreamingRPC { <nl> RMTStreamingOutputCallRequest * request = [ RMTStreamingOutputCallRequest message ] ; <nl> for ( NSNumber * size in expectedSizes ) { <nl> RMTResponseParameters * parameters = [ RMTResponseParameters message ] ; <nl> - parameters . size = [ size integerValue ] ; <nl> + parameters . size = ( int ) [ size integerValue ] ; <nl> [ request . responseParametersArray addObject : parameters ] ; <nl> } <nl> <nl> - ( void ) testCancelAfterBeginRPC { <nl> / / A buffered pipe to which we never write any value acts as a writer that just hangs . <nl> GRXBufferedPipe * requestsBuffer = [ [ GRXBufferedPipe alloc ] init ] ; <nl> <nl> - ProtoRPC * call = [ _service RPCToStreamingInputCallWithRequestsWriter : requestsBuffer <nl> - handler : ^ ( RMTStreamingInputCallResponse * response , <nl> - NSError * error ) { <nl> + GRPCProtoCall * call = [ _service RPCToStreamingInputCallWithRequestsWriter : requestsBuffer <nl> + handler : ^ ( RMTStreamingInputCallResponse * response , <nl> + NSError * error ) { <nl> XCTAssertEqual ( error . code , GRPC_STATUS_CANCELLED ) ; <nl> [ expectation fulfill ] ; <nl> } ] ; <nl> - ( void ) testCancelAfterFirstResponseRPC { <nl> <nl> [ requestsBuffer writeValue : request ] ; <nl> <nl> - __block ProtoRPC * call = <nl> + __block GRPCProtoCall * call = <nl> [ _service RPCToFullDuplexCallWithRequestsWriter : requestsBuffer <nl> eventHandler : ^ ( BOOL done , <nl> RMTStreamingOutputCallResponse * response , <nl> mmm a / src / objective - c / tests / Podfile <nl> ppp b / src / objective - c / tests / Podfile <nl> platform : ios , ' 8 . 0 ' <nl> install ! ' cocoapods ' , : deterministic_uuids = > false <nl> <nl> def shared_pods <nl> - pod ' Protobuf ' , : path = > " . . / . . / . . / third_party / protobuf " <nl> - pod ' BoringSSL ' , : podspec = > " . . " <nl> - pod ' CronetFramework ' , : podspec = > " . . " <nl> - pod ' gRPC ' , : path = > " . . / . . / . . " <nl> - pod ' RemoteTest ' , : path = > " RemoteTestClient " <nl> + pod ' Protobuf ' , : path = > " . . / . . / . . / third_party / protobuf " , : inhibit_warnings = > true <nl> + pod ' BoringSSL ' , : podspec = > " . . " , : inhibit_warnings = > true <nl> + pod ' CronetFramework ' , : podspec = > " . . " <nl> + pod ' gRPC ' , : path = > " . . / . . / . . " <nl> + pod ' RemoteTest ' , : path = > " RemoteTestClient " <nl> end <nl> <nl> target ' Tests ' do <nl> end <nl> target ' InteropTestsLocalCleartext ' do <nl> shared_pods <nl> end <nl> + <nl> + post_install do | installer | <nl> + installer . pods_project . targets . each do | target | <nl> + target . build_configurations . each do | config | <nl> + config . build_settings [ ' GCC_TREAT_WARNINGS_AS_ERRORS ' ] = ' YES ' <nl> + end <nl> + if target . name = = ' gRPC ' <nl> + target . build_configurations . each do | config | <nl> + # TODO ( zyc ) Remove this setting after the issue is resolved <nl> + # GPR_UNREACHABLE_CODE causes " Control may reach end of non - void <nl> + # function " warning <nl> + config . build_settings [ ' GCC_WARN_ABOUT_RETURN_TYPE ' ] = ' NO ' <nl> + end <nl> + end <nl> + end <nl> + end <nl> mmm a / src / objective - c / tests / Tests . xcodeproj / project . pbxproj <nl> ppp b / src / objective - c / tests / Tests . xcodeproj / project . pbxproj <nl> <nl> " $ ( inherited ) " , <nl> ) ; <nl> GCC_SYMBOLS_PRIVATE_EXTERN = NO ; <nl> + GCC_TREAT_WARNINGS_AS_ERRORS = YES ; <nl> GCC_WARN_64_TO_32_BIT_CONVERSION = YES ; <nl> GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR ; <nl> GCC_WARN_UNDECLARED_SELECTOR = YES ; <nl> <nl> ENABLE_STRICT_OBJC_MSGSEND = YES ; <nl> GCC_C_LANGUAGE_STANDARD = gnu99 ; <nl> GCC_NO_COMMON_BLOCKS = YES ; <nl> + GCC_TREAT_WARNINGS_AS_ERRORS = YES ; <nl> GCC_WARN_64_TO_32_BIT_CONVERSION = YES ; <nl> GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR ; <nl> GCC_WARN_UNDECLARED_SELECTOR = YES ; <nl> <nl> isa = XCBuildConfiguration ; <nl> baseConfigurationReference = 060EF32D7EC0DF67ED617507 / * Pods - Tests . debug . xcconfig * / ; <nl> buildSettings = { <nl> + GCC_TREAT_WARNINGS_AS_ERRORS = YES ; <nl> PRODUCT_NAME = " $ ( TARGET_NAME ) " ; <nl> SKIP_INSTALL = YES ; <nl> } ; <nl> <nl> isa = XCBuildConfiguration ; <nl> baseConfigurationReference = E6733B838B28453434B556E2 / * Pods - Tests . release . xcconfig * / ; <nl> buildSettings = { <nl> + GCC_TREAT_WARNINGS_AS_ERRORS = YES ; <nl> PRODUCT_NAME = " $ ( TARGET_NAME ) " ; <nl> SKIP_INSTALL = YES ; <nl> } ; <nl> mmm a / src / proto / grpc / binary_log / v1alpha / log . proto <nl> ppp b / src / proto / grpc / binary_log / v1alpha / log . proto <nl> <nl> <nl> syntax = " proto3 " ; <nl> <nl> - import " google / protobuf / timestamp . proto " <nl> + import " google / protobuf / timestamp . proto " ; <nl> <nl> package grpc . binary_log . v1alpha ; <nl> <nl> enum Direction { <nl> - SERVER_SEND ; <nl> - SERVER_RECV ; <nl> - CLIENT_SEND ; <nl> - CLIENT_RECV ; <nl> + SERVER_SEND = 0 ; <nl> + SERVER_RECV = 1 ; <nl> + CLIENT_SEND = 2 ; <nl> + CLIENT_RECV = 3 ; <nl> } <nl> <nl> message KeyValuePair { <nl> - string key ; <nl> - string value ; <nl> + string key = 1 ; <nl> + string value = 2 ; <nl> } <nl> <nl> / / Any sort of metadata that may be sent in either direction during a call <nl> mmm a / src / proto / grpc / testing / messages . proto <nl> ppp b / src / proto / grpc / testing / messages . proto <nl> syntax = " proto3 " ; <nl> <nl> package grpc . testing ; <nl> <nl> + / / TODO ( dgq ) : Go back to using well - known types once <nl> + / / https : / / github . com / grpc / grpc / issues / 6980 has been fixed . <nl> + / / import " google / protobuf / wrappers . proto " ; <nl> + message BoolValue { <nl> + / / The bool value . <nl> + bool value = 1 ; <nl> + } <nl> + <nl> + / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / The type of payload that should be returned . <nl> enum PayloadType { <nl> / / Compressable text format . <nl> COMPRESSABLE = 0 ; <nl> - <nl> - / / Uncompressable binary format . <nl> - UNCOMPRESSABLE = 1 ; <nl> } <nl> <nl> / / A block of data , to simply increase gRPC message size . <nl> message Payload { <nl> + / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / The type of data in body . <nl> PayloadType type = 1 ; <nl> / / Primary contents of payload . <nl> message EchoStatus { <nl> <nl> / / Unary request . <nl> message SimpleRequest { <nl> + / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / Desired payload type in the response from the server . <nl> / / If response_type is RANDOM , server randomly chooses one from other formats . <nl> PayloadType response_type = 1 ; <nl> <nl> / / Desired payload size in the response from the server . <nl> - / / If response_type is COMPRESSABLE , this denotes the size before compression . <nl> int32 response_size = 2 ; <nl> <nl> / / Optional input payload sent along with the request . <nl> message SimpleRequest { <nl> / / Whether SimpleResponse should include OAuth scope . <nl> bool fill_oauth_scope = 5 ; <nl> <nl> - / / Whether to request the server to compress the response . <nl> - bool request_compressed_response = 6 ; <nl> + / / Whether to request the server to compress the response . This field is <nl> + / / " nullable " in order to interoperate seamlessly with clients not able to <nl> + / / implement the full compression tests by introspecting the call to verify <nl> + / / the response ' s compression status . <nl> + BoolValue response_compressed = 6 ; <nl> <nl> / / Whether server should return a given status <nl> EchoStatus response_status = 7 ; <nl> + <nl> + / / Whether the server should expect this request to be compressed . <nl> + BoolValue expect_compressed = 8 ; <nl> } <nl> <nl> / / Unary response , as configured by the request . <nl> message StreamingInputCallRequest { <nl> / / Optional input payload sent along with the request . <nl> Payload payload = 1 ; <nl> <nl> + / / Whether the server should expect this request to be compressed . This field <nl> + / / is " nullable " in order to interoperate seamlessly with servers not able to <nl> + / / implement the full compression tests by introspecting the call to verify <nl> + / / the request ' s compression status . <nl> + BoolValue expect_compressed = 2 ; <nl> + <nl> / / Not expecting any payload from the response . <nl> } <nl> <nl> message StreamingInputCallResponse { <nl> / / Configuration for a particular response . <nl> message ResponseParameters { <nl> / / Desired payload sizes in responses from the server . <nl> - / / If response_type is COMPRESSABLE , this denotes the size before compression . <nl> int32 size = 1 ; <nl> <nl> / / Desired interval between consecutive responses in the response stream in <nl> / / microseconds . <nl> int32 interval_us = 2 ; <nl> + <nl> + / / Whether to request the server to compress the response . This field is <nl> + / / " nullable " in order to interoperate seamlessly with clients not able to <nl> + / / implement the full compression tests by introspecting the call to verify <nl> + / / the response ' s compression status . <nl> + BoolValue compressed = 3 ; <nl> } <nl> <nl> / / Server - streaming request . <nl> message StreamingOutputCallRequest { <nl> + / / DEPRECATED , don ' t use . To be removed shortly . <nl> / / Desired payload type in the response from the server . <nl> / / If response_type is RANDOM , the payload from each response in the stream <nl> / / might be of different types . This is to simulate a mixed type of payload <nl> message StreamingOutputCallRequest { <nl> / / Optional input payload sent along with the request . <nl> Payload payload = 3 ; <nl> <nl> - / / Whether to request the server to compress the response . <nl> - bool request_compressed_response = 6 ; <nl> - <nl> / / Whether server should return a given status <nl> EchoStatus response_status = 7 ; <nl> } <nl> mmm a / src / python / grpcio / commands . py <nl> ppp b / src / python / grpcio / commands . py <nl> def run ( self ) : <nl> ' - - plugin = protoc - gen - python - grpc = { } ' . format ( <nl> self . grpc_python_plugin_command ) , <nl> ' - I { } ' . format ( GRPC_STEM ) , <nl> + ' - I . ' , <nl> + ' - I { } / third_party / protobuf / src ' . format ( GRPC_STEM ) , <nl> ' - - python_out = { } ' . format ( PROTO_GEN_STEM ) , <nl> ' - - python - grpc_out = { } ' . format ( PROTO_GEN_STEM ) , <nl> ] + [ path ] <nl> mmm a / src / python / grpcio / tests / tests . json <nl> ppp b / src / python / grpcio / tests / tests . json <nl> <nl> " _implementations_test . ChannelCredentialsTest " , <nl> " _insecure_interop_test . InsecureInteropTest " , <nl> " _logging_pool_test . LoggingPoolTest " , <nl> + " _metadata_code_details_test . MetadataCodeDetailsTest " , <nl> " _metadata_test . MetadataTest " , <nl> " _not_found_test . NotFoundTest " , <nl> " _python_plugin_test . PythonPluginTest " , <nl> new file mode 100644 <nl> index 00000000000 . . dd74268cbf1 <nl> mmm / dev / null <nl> ppp b / src / python / grpcio / tests / unit / _metadata_code_details_test . py <nl> <nl> + # Copyright 2016 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + " " " Tests application - provided metadata , status code , and details . " " " <nl> + <nl> + import threading <nl> + import unittest <nl> + <nl> + import grpc <nl> + from grpc . framework . foundation import logging_pool <nl> + <nl> + from tests . unit import test_common <nl> + from tests . unit . framework . common import test_constants <nl> + from tests . unit . framework . common import test_control <nl> + <nl> + _SERIALIZED_REQUEST = b ' \ x46 \ x47 \ x48 ' <nl> + _SERIALIZED_RESPONSE = b ' \ x49 \ x50 \ x51 ' <nl> + <nl> + _REQUEST_SERIALIZER = lambda unused_request : _SERIALIZED_REQUEST <nl> + _REQUEST_DESERIALIZER = lambda unused_serialized_request : object ( ) <nl> + _RESPONSE_SERIALIZER = lambda unused_response : _SERIALIZED_RESPONSE <nl> + _RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse : object ( ) <nl> + <nl> + _SERVICE = b ' test . TestService ' <nl> + _UNARY_UNARY = b ' UnaryUnary ' <nl> + _UNARY_STREAM = b ' UnaryStream ' <nl> + _STREAM_UNARY = b ' StreamUnary ' <nl> + _STREAM_STREAM = b ' StreamStream ' <nl> + <nl> + _CLIENT_METADATA = ( <nl> + ( b ' client - md - key ' , b ' client - md - key ' ) , <nl> + ( b ' client - md - key - bin ' , b ' \ x00 \ x01 ' ) <nl> + ) <nl> + <nl> + _SERVER_INITIAL_METADATA = ( <nl> + ( b ' server - initial - md - key ' , b ' server - initial - md - value ' ) , <nl> + ( b ' server - initial - md - key - bin ' , b ' \ x00 \ x02 ' ) <nl> + ) <nl> + <nl> + _SERVER_TRAILING_METADATA = ( <nl> + ( b ' server - trailing - md - key ' , b ' server - trailing - md - value ' ) , <nl> + ( b ' server - trailing - md - key - bin ' , b ' \ x00 \ x03 ' ) <nl> + ) <nl> + <nl> + _NON_OK_CODE = grpc . StatusCode . NOT_FOUND <nl> + _DETAILS = b ' Test details ! ' <nl> + <nl> + <nl> + class _Servicer ( object ) : <nl> + <nl> + def __init__ ( self ) : <nl> + self . _lock = threading . Lock ( ) <nl> + self . _code = None <nl> + self . _details = None <nl> + self . _exception = False <nl> + self . _return_none = False <nl> + self . _received_client_metadata = None <nl> + <nl> + def unary_unary ( self , request , context ) : <nl> + with self . _lock : <nl> + self . _received_client_metadata = context . invocation_metadata ( ) <nl> + context . send_initial_metadata ( _SERVER_INITIAL_METADATA ) <nl> + context . set_trailing_metadata ( _SERVER_TRAILING_METADATA ) <nl> + if self . _code is not None : <nl> + context . set_code ( self . _code ) <nl> + if self . _details is not None : <nl> + context . set_details ( self . _details ) <nl> + if self . _exception : <nl> + raise test_control . Defect ( ) <nl> + else : <nl> + return None if self . _return_none else object ( ) <nl> + <nl> + def unary_stream ( self , request , context ) : <nl> + with self . _lock : <nl> + self . _received_client_metadata = context . invocation_metadata ( ) <nl> + context . send_initial_metadata ( _SERVER_INITIAL_METADATA ) <nl> + context . set_trailing_metadata ( _SERVER_TRAILING_METADATA ) <nl> + if self . _code is not None : <nl> + context . set_code ( self . _code ) <nl> + if self . _details is not None : <nl> + context . set_details ( self . _details ) <nl> + for _ in range ( test_constants . STREAM_LENGTH / / 2 ) : <nl> + yield _SERIALIZED_RESPONSE <nl> + if self . _exception : <nl> + raise test_control . Defect ( ) <nl> + <nl> + def stream_unary ( self , request_iterator , context ) : <nl> + with self . _lock : <nl> + self . _received_client_metadata = context . invocation_metadata ( ) <nl> + context . send_initial_metadata ( _SERVER_INITIAL_METADATA ) <nl> + context . set_trailing_metadata ( _SERVER_TRAILING_METADATA ) <nl> + if self . _code is not None : <nl> + context . set_code ( self . _code ) <nl> + if self . _details is not None : <nl> + context . set_details ( self . _details ) <nl> + # TODO ( https : / / github . com / grpc / grpc / issues / 6891 ) : just ignore the <nl> + # request iterator . <nl> + for ignored_request in request_iterator : <nl> + pass <nl> + if self . _exception : <nl> + raise test_control . Defect ( ) <nl> + else : <nl> + return None if self . _return_none else _SERIALIZED_RESPONSE <nl> + <nl> + def stream_stream ( self , request_iterator , context ) : <nl> + with self . _lock : <nl> + self . _received_client_metadata = context . invocation_metadata ( ) <nl> + context . send_initial_metadata ( _SERVER_INITIAL_METADATA ) <nl> + context . set_trailing_metadata ( _SERVER_TRAILING_METADATA ) <nl> + if self . _code is not None : <nl> + context . set_code ( self . _code ) <nl> + if self . _details is not None : <nl> + context . set_details ( self . _details ) <nl> + # TODO ( https : / / github . com / grpc / grpc / issues / 6891 ) : just ignore the <nl> + # request iterator . <nl> + for ignored_request in request_iterator : <nl> + pass <nl> + for _ in range ( test_constants . STREAM_LENGTH / / 3 ) : <nl> + yield object ( ) <nl> + if self . _exception : <nl> + raise test_control . Defect ( ) <nl> + <nl> + def set_code ( self , code ) : <nl> + with self . _lock : <nl> + self . _code = code <nl> + <nl> + def set_details ( self , details ) : <nl> + with self . _lock : <nl> + self . _details = details <nl> + <nl> + def set_exception ( self ) : <nl> + with self . _lock : <nl> + self . _exception = True <nl> + <nl> + def set_return_none ( self ) : <nl> + with self . _lock : <nl> + self . _return_none = True <nl> + <nl> + def received_client_metadata ( self ) : <nl> + with self . _lock : <nl> + return self . _received_client_metadata <nl> + <nl> + <nl> + def _generic_handler ( servicer ) : <nl> + method_handlers = { <nl> + _UNARY_UNARY : grpc . unary_unary_rpc_method_handler ( <nl> + servicer . unary_unary , request_deserializer = _REQUEST_DESERIALIZER , <nl> + response_serializer = _RESPONSE_SERIALIZER ) , <nl> + _UNARY_STREAM : grpc . unary_stream_rpc_method_handler ( <nl> + servicer . unary_stream ) , <nl> + _STREAM_UNARY : grpc . stream_unary_rpc_method_handler ( <nl> + servicer . stream_unary ) , <nl> + _STREAM_STREAM : grpc . stream_stream_rpc_method_handler ( <nl> + servicer . stream_stream , request_deserializer = _REQUEST_DESERIALIZER , <nl> + response_serializer = _RESPONSE_SERIALIZER ) , <nl> + } <nl> + return grpc . method_handlers_generic_handler ( _SERVICE , method_handlers ) <nl> + <nl> + <nl> + class MetadataCodeDetailsTest ( unittest . TestCase ) : <nl> + <nl> + def setUp ( self ) : <nl> + self . _servicer = _Servicer ( ) <nl> + self . _server_pool = logging_pool . pool ( test_constants . THREAD_CONCURRENCY ) <nl> + self . _server = grpc . server ( <nl> + ( _generic_handler ( self . _servicer ) , ) , self . _server_pool ) <nl> + port = self . _server . add_insecure_port ( ' [ : : ] : 0 ' ) <nl> + self . _server . start ( ) <nl> + <nl> + channel = grpc . insecure_channel ( ' localhost : { } ' . format ( port ) ) <nl> + self . _unary_unary = channel . unary_unary ( <nl> + b ' / ' . join ( ( b ' ' , _SERVICE , _UNARY_UNARY , ) ) , <nl> + request_serializer = _REQUEST_SERIALIZER , <nl> + response_deserializer = _RESPONSE_DESERIALIZER , ) <nl> + self . _unary_stream = channel . unary_stream ( <nl> + b ' / ' . join ( ( b ' ' , _SERVICE , _UNARY_STREAM , ) ) , ) <nl> + self . _stream_unary = channel . stream_unary ( <nl> + b ' / ' . join ( ( b ' ' , _SERVICE , _STREAM_UNARY , ) ) , ) <nl> + self . _stream_stream = channel . stream_stream ( <nl> + b ' / ' . join ( ( b ' ' , _SERVICE , _STREAM_STREAM , ) ) , <nl> + request_serializer = _REQUEST_SERIALIZER , <nl> + response_deserializer = _RESPONSE_DESERIALIZER , ) <nl> + <nl> + <nl> + def testSuccessfulUnaryUnary ( self ) : <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + unused_response , call = self . _unary_unary . with_call ( <nl> + object ( ) , metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , call . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( grpc . StatusCode . OK , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testSuccessfulUnaryStream ( self ) : <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + call = self . _unary_stream ( _SERIALIZED_REQUEST , metadata = _CLIENT_METADATA ) <nl> + received_initial_metadata = call . initial_metadata ( ) <nl> + for _ in call : <nl> + pass <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , received_initial_metadata ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( grpc . StatusCode . OK , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testSuccessfulStreamUnary ( self ) : <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + unused_response , call = self . _stream_unary . with_call ( <nl> + iter ( [ _SERIALIZED_REQUEST ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , call . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( grpc . StatusCode . OK , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testSuccessfulStreamStream ( self ) : <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + call = self . _stream_stream ( <nl> + iter ( [ object ( ) ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + received_initial_metadata = call . initial_metadata ( ) <nl> + for _ in call : <nl> + pass <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , received_initial_metadata ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( grpc . StatusCode . OK , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testCustomCodeUnaryUnary ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + self . _unary_unary . with_call ( object ( ) , metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , <nl> + exception_context . exception . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + def testCustomCodeUnaryStream ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + call = self . _unary_stream ( _SERIALIZED_REQUEST , metadata = _CLIENT_METADATA ) <nl> + received_initial_metadata = call . initial_metadata ( ) <nl> + with self . assertRaises ( grpc . RpcError ) : <nl> + for _ in call : <nl> + pass <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , received_initial_metadata ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testCustomCodeStreamUnary ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + self . _stream_unary . with_call ( <nl> + iter ( [ _SERIALIZED_REQUEST ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , <nl> + exception_context . exception . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + def testCustomCodeStreamStream ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + <nl> + call = self . _stream_stream ( <nl> + iter ( [ object ( ) ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + received_initial_metadata = call . initial_metadata ( ) <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + for _ in call : <nl> + pass <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , received_initial_metadata ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + def testCustomCodeExceptionUnaryUnary ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + self . _servicer . set_exception ( ) <nl> + <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + self . _unary_unary . with_call ( object ( ) , metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , <nl> + exception_context . exception . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + def testCustomCodeExceptionUnaryStream ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + self . _servicer . set_exception ( ) <nl> + <nl> + call = self . _unary_stream ( _SERIALIZED_REQUEST , metadata = _CLIENT_METADATA ) <nl> + received_initial_metadata = call . initial_metadata ( ) <nl> + with self . assertRaises ( grpc . RpcError ) : <nl> + for _ in call : <nl> + pass <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , received_initial_metadata ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testCustomCodeExceptionStreamUnary ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + self . _servicer . set_exception ( ) <nl> + <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + self . _stream_unary . with_call ( <nl> + iter ( [ _SERIALIZED_REQUEST ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , <nl> + exception_context . exception . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + def testCustomCodeExceptionStreamStream ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + self . _servicer . set_exception ( ) <nl> + <nl> + call = self . _stream_stream ( <nl> + iter ( [ object ( ) ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + received_initial_metadata = call . initial_metadata ( ) <nl> + with self . assertRaises ( grpc . RpcError ) : <nl> + for _ in call : <nl> + pass <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , received_initial_metadata ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , call . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , call . code ( ) ) <nl> + self . assertEqual ( _DETAILS , call . details ( ) ) <nl> + <nl> + def testCustomCodeReturnNoneUnaryUnary ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + self . _servicer . set_return_none ( ) <nl> + <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + self . _unary_unary . with_call ( object ( ) , metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , <nl> + exception_context . exception . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + def testCustomCodeReturnNoneStreamUnary ( self ) : <nl> + self . _servicer . set_code ( _NON_OK_CODE ) <nl> + self . _servicer . set_details ( _DETAILS ) <nl> + self . _servicer . set_return_none ( ) <nl> + <nl> + with self . assertRaises ( grpc . RpcError ) as exception_context : <nl> + self . _stream_unary . with_call ( <nl> + iter ( [ _SERIALIZED_REQUEST ] * test_constants . STREAM_LENGTH ) , <nl> + metadata = _CLIENT_METADATA ) <nl> + <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _CLIENT_METADATA , self . _servicer . received_client_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_INITIAL_METADATA , <nl> + exception_context . exception . initial_metadata ( ) ) ) <nl> + self . assertTrue ( <nl> + test_common . metadata_transmitted ( <nl> + _SERVER_TRAILING_METADATA , <nl> + exception_context . exception . trailing_metadata ( ) ) ) <nl> + self . assertIs ( _NON_OK_CODE , exception_context . exception . code ( ) ) <nl> + self . assertEqual ( _DETAILS , exception_context . exception . details ( ) ) <nl> + <nl> + <nl> + if __name__ = = ' __main__ ' : <nl> + unittest . main ( verbosity = 2 ) <nl> new file mode 100644 <nl> index 00000000000 . . 90ad0eb0891 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Auth / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + " version " : " $ { settings . csharp_version } " , <nl> + " title " : " gRPC C # Auth " , <nl> + " authors " : [ " Google Inc . " ] , <nl> + " copyright " : " Copyright 2015 , Google Inc . " , <nl> + " packOptions " : { <nl> + " summary " : " Auth library for C # implementation of gRPC - an RPC library and framework " , <nl> + " description " : " Auth library for C # implementation of gRPC - an RPC library and framework . See project site for more info . " , <nl> + " owners " : [ " grpc - packages " ] , <nl> + " licenseUrl " : " https : / / github . com / grpc / grpc / blob / master / LICENSE " , <nl> + " projectUrl " : " https : / / github . com / grpc / grpc " , <nl> + " requireLicenseAcceptance " : false , <nl> + " tags " : [ " gRPC RPC Protocol HTTP / 2 Auth OAuth2 " ] , <nl> + } , <nl> + " dependencies " : { <nl> + " Grpc . Core " : " $ { settings . csharp_version } " , <nl> + " Google . Apis . Auth " : " 1 . 11 . 1 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " Microsoft . NETCore . Portable . Compatibility " : " 1 . 0 . 1 - rc2 - 24027 " , <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " , <nl> + " System . Threading . Tasks " : " 4 . 0 . 11 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . bc9fa3e63a9 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Core . Tests / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True " / > <nl> + " dependencies " : { <nl> + " Grpc . Core " : { <nl> + " target " : " project " <nl> + } , <nl> + " Newtonsoft . Json " : " 8 . 0 . 3 " , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } , <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 6f9197f572f <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Core / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + " version " : " $ { settings . csharp_version } " , <nl> + " title " : " gRPC C # Core " , <nl> + " authors " : [ " Google Inc . " ] , <nl> + " copyright " : " Copyright 2015 , Google Inc . " , <nl> + " packOptions " : { <nl> + " summary " : " Core C # implementation of gRPC - an RPC library and framework " , <nl> + " description " : " Core C # implementation of gRPC - an RPC library and framework . See project site for more info . " , <nl> + " owners " : [ " grpc - packages " ] , <nl> + " licenseUrl " : " https : / / github . com / grpc / grpc / blob / master / LICENSE " , <nl> + " projectUrl " : " https : / / github . com / grpc / grpc " , <nl> + " requireLicenseAcceptance " : false , <nl> + " tags " : [ " gRPC RPC Protocol HTTP / 2 " ] , <nl> + " files " : { <nl> + " build / net45 / " : " Grpc . Core . targets " , <nl> + " build / native / bin / windows_x86 / " : " . . / nativelibs / windows_x86 / grpc_csharp_ext . dll " , <nl> + " build / native / bin / windows_x64 / " : " . . / nativelibs / windows_x64 / grpc_csharp_ext . dll " , <nl> + " build / native / bin / linux_x86 / " : " . . / nativelibs / linux_x86 / libgrpc_csharp_ext . so " , <nl> + " build / native / bin / linux_x64 / " : " . . / nativelibs / linux_x64 / libgrpc_csharp_ext . so " , <nl> + " build / native / bin / macosx_x86 / " : " . . / nativelibs / macosx_x86 / libgrpc_csharp_ext . dylib " , <nl> + " build / native / bin / macosx_x64 / " : " . . / nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } , <nl> + " buildOptions " : { <nl> + " embed " : [ " . . / . . / . . / etc / roots . pem " ] <nl> + } , <nl> + " dependencies " : { <nl> + " Ix - Async " : " 1 . 2 . 5 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " , <nl> + " System . Threading . Thread " : " 4 . 0 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . fba401c3a47 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Examples . MathClient / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True " / > <nl> + " dependencies " : { <nl> + " Grpc . Examples " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . fba401c3a47 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Examples . MathServer / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True " / > <nl> + " dependencies " : { <nl> + " Grpc . Examples " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 21765f0565c <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Examples . Tests / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True " / > <nl> + " dependencies " : { <nl> + " Grpc . Examples " : { <nl> + " target " : " project " <nl> + } , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 715fc087256 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . Examples / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = False " / > <nl> + " dependencies " : { <nl> + " Grpc . Core " : { <nl> + " target " : " project " <nl> + } , <nl> + " Google . Protobuf " : " 3 . 0 . 0 - beta3 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { <nl> + " frameworkAssemblies " : { <nl> + " System . Runtime " : " " , <nl> + " System . IO " : " " <nl> + } <nl> + } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 79e67226cb4 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . HealthCheck . Tests / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True " / > <nl> + " dependencies " : { <nl> + " Grpc . HealthCheck " : { <nl> + " target " : " project " <nl> + } , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 59073af7eec <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . HealthCheck / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + " version " : " $ { settings . csharp_version } " , <nl> + " title " : " gRPC C # Healthchecking " , <nl> + " authors " : [ " Google Inc . " ] , <nl> + " copyright " : " Copyright 2015 , Google Inc . " , <nl> + " packOptions " : { <nl> + " summary " : " Implementation of gRPC health service " , <nl> + " description " : " Example implementation of grpc . health . v1 service that can be used for health - checking . " , <nl> + " owners " : [ " grpc - packages " ] , <nl> + " licenseUrl " : " https : / / github . com / grpc / grpc / blob / master / LICENSE " , <nl> + " projectUrl " : " https : / / github . com / grpc / grpc " , <nl> + " requireLicenseAcceptance " : false , <nl> + " tags " : [ " gRPC health check " ] <nl> + } , <nl> + " dependencies " : { <nl> + " Grpc . Core " : " $ { settings . csharp_version } " , <nl> + " Google . Protobuf " : " 3 . 0 . 0 - beta3 " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { <nl> + " frameworkAssemblies " : { <nl> + " System . Runtime " : " " , <nl> + " System . IO " : " " <nl> + } <nl> + } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 10ed5493477 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . IntegrationTesting . Client / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True , includeData = True " / > <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 10ed5493477 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . IntegrationTesting . QpsWorker / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True , includeData = True " / > <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 10ed5493477 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . IntegrationTesting . Server / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True , includeData = True " / > <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 10ed5493477 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . IntegrationTesting . StressClient / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True , includeData = True " / > <nl> + " dependencies " : { <nl> + " Grpc . IntegrationTesting " : { <nl> + " target " : " project " <nl> + } <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 31815114857 <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / Grpc . IntegrationTesting / project . json . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + { <nl> + < % include file = " . . / build_options . include " args = " executable = True , includeData = True " / > <nl> + " dependencies " : { <nl> + " Grpc . Auth " : { <nl> + " target " : " project " <nl> + } , <nl> + " Grpc . Core " : { <nl> + " target " : " project " <nl> + } , <nl> + " Google . Protobuf " : " 3 . 0 . 0 - beta3 " , <nl> + " CommandLineParser " : " 1 . 9 . 71 " , <nl> + " NUnit " : " 3 . 2 . 0 " , <nl> + " NUnitLite " : " 3 . 2 . 0 - * " <nl> + } , <nl> + " frameworks " : { <nl> + " net45 " : { <nl> + " dependencies " : { <nl> + " Moq " : " 4 . 2 . 1510 . 2205 " <nl> + } , <nl> + " frameworkAssemblies " : { <nl> + " System . Runtime " : " " , <nl> + " System . IO " : " " <nl> + } <nl> + } , <nl> + " netstandard1 . 5 " : { <nl> + " imports " : [ <nl> + " portable - net45 " , <nl> + " net45 " <nl> + ] , <nl> + " dependencies " : { <nl> + " NETStandard . Library " : " 1 . 5 . 0 - rc2 - 24027 " , <nl> + " System . Linq . Expressions " : " 4 . 0 . 11 - rc2 - 24027 " <nl> + } <nl> + } <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 468d281618c <nl> mmm / dev / null <nl> ppp b / templates / src / csharp / build_options . include <nl> <nl> + < % page args = " executable = False , includeData = False " / > \ <nl> + " buildOptions " : { <nl> + % if executable : <nl> + " emitEntryPoint " : true <nl> + % endif <nl> + } , <nl> + % if executable : <nl> + " configurations " : { <nl> + " Debug " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + % if includeData : <nl> + " include " : " data / * " , <nl> + % endif <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Debug / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / dbg / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " Release " : { <nl> + " buildOptions " : { <nl> + " copyToOutput " : { <nl> + % if includeData : <nl> + " include " : " data / * " , <nl> + % endif <nl> + " mappings " : { <nl> + " nativelibs / windows_x64 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / x64 / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / windows_x86 / grpc_csharp_ext . dll " : " . . / . . / . . / vsprojects / Release / grpc_csharp_ext . dll " , <nl> + " nativelibs / linux_x64 / libgrpc_csharp_ext . so " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . so " , <nl> + " nativelibs / macosx_x64 / libgrpc_csharp_ext . dylib " : " . . / . . / . . / libs / opt / libgrpc_csharp_ext . dylib " <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } , <nl> + " runtimes " : { <nl> + " win7 - x64 " : { } , <nl> + " debian . 8 - x64 " : { } , <nl> + " osx . 10 . 11 - x64 " : { } <nl> + } , <nl> + % endif <nl> \ No newline at end of file <nl> mmm a / templates / src / csharp / build_packages . bat . template <nl> ppp b / templates / src / csharp / build_packages . bat . template <nl> <nl> <nl> @ rem Collect the artifacts built by the previous build step if running on Jenkins <nl> @ rem TODO ( jtattermusch ) : is there a better way to do this ? <nl> - xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = windows \ artifacts \ * Grpc . Core \ windows_x86 $ { " \ \ " } <nl> - xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = windows \ artifacts \ * Grpc . Core \ windows_x64 $ { " \ \ " } <nl> - xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = linux \ artifacts \ * Grpc . Core \ linux_x86 $ { " \ \ " } <nl> - xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = linux \ artifacts \ * Grpc . Core \ linux_x64 $ { " \ \ " } <nl> - xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = macos \ artifacts \ * Grpc . Core \ macosx_x86 $ { " \ \ " } <nl> - xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = macos \ artifacts \ * Grpc . Core \ macosx_x64 $ { " \ \ " } <nl> + xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = windows \ artifacts \ * nativelibs \ windows_x86 $ { " \ \ " } <nl> + xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = windows \ artifacts \ * nativelibs \ windows_x64 $ { " \ \ " } <nl> + xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = linux \ artifacts \ * nativelibs \ linux_x86 $ { " \ \ " } <nl> + xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = linux \ artifacts \ * nativelibs \ linux_x64 $ { " \ \ " } <nl> + xcopy / Y / I . . \ . . \ architecture = x86 , language = csharp , platform = macos \ artifacts \ * nativelibs \ macosx_x86 $ { " \ \ " } <nl> + xcopy / Y / I . . \ . . \ architecture = x64 , language = csharp , platform = macos \ artifacts \ * nativelibs \ macosx_x64 $ { " \ \ " } <nl> <nl> @ rem Collect protoc artifacts built by the previous build step <nl> xcopy / Y / I . . \ . . \ architecture = x86 , language = protoc , platform = windows \ artifacts \ * protoc_plugins \ windows_x86 $ { " \ \ " } <nl> new file mode 100644 <nl> index 00000000000 . . 35782d6665f <nl> mmm / dev / null <nl> ppp b / templates / tools / dockerfile / test / csharp_coreclr_x64 / Dockerfile . template <nl> <nl> + % YAML 1 . 2 <nl> + mmm | <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + FROM microsoft / dotnet : 1 . 0 . 0 - preview1 <nl> + <nl> + < % include file = " . . / . . / apt_get_basic . include " / > <nl> + < % include file = " . . / . . / run_tests_addons . include " / > <nl> + # Define the default command . <nl> + CMD [ " bash " ] <nl> + <nl> mmm a / test / cpp / interop / client . cc <nl> ppp b / test / cpp / interop / client . cc <nl> <nl> # include < grpc + + / client_context . h > <nl> # include < grpc / grpc . h > <nl> # include < grpc / support / log . h > <nl> + # include < grpc / support / useful . h > <nl> <nl> + # include " src / core / lib / support / string . h " <nl> # include " test / cpp / interop / client_helper . h " <nl> # include " test / cpp / interop / interop_client . h " <nl> # include " test / cpp / util / test_config . h " <nl> DEFINE_string ( server_host , " 127 . 0 . 0 . 1 " , " Server host to connect to " ) ; <nl> DEFINE_string ( server_host_override , " foo . test . google . fr " , <nl> " Override the server host which is sent in HTTP header " ) ; <nl> DEFINE_string ( test_case , " large_unary " , <nl> - " Configure different test cases . Valid options are : " <nl> - " empty_unary : empty ( zero bytes ) request and response ; " <nl> - " large_unary : single request and ( large ) response ; " <nl> - " large_compressed_unary : single request and compressed ( large ) " <nl> - " response ; " <nl> - " client_streaming : request streaming with single response ; " <nl> - " server_streaming : single request with response streaming ; " <nl> + " Configure different test cases . Valid options are : \ n \ n " <nl> + " all : all test cases ; \ n " <nl> + " cancel_after_begin : cancel stream after starting it ; \ n " <nl> + " cancel_after_first_response : cancel on first response ; \ n " <nl> + " client_compressed_streaming : compressed request streaming with " <nl> + " client_compressed_unary : single compressed request ; \ n " <nl> + " client_streaming : request streaming with single response ; \ n " <nl> + " compute_engine_creds : large_unary with compute engine auth ; \ n " <nl> + " custom_metadata : server will echo custom metadata ; \ n " <nl> + " empty_stream : bi - di stream with no request / response ; \ n " <nl> + " empty_unary : empty ( zero bytes ) request and response ; \ n " <nl> + " half_duplex : half - duplex streaming ; \ n " <nl> + " jwt_token_creds : large_unary with JWT token auth ; \ n " <nl> + " large_unary : single request and ( large ) response ; \ n " <nl> + " oauth2_auth_token : raw oauth2 access token auth ; \ n " <nl> + " per_rpc_creds : raw oauth2 access token on a single rpc ; \ n " <nl> + " ping_pong : full - duplex streaming ; \ n " <nl> + " response streaming ; \ n " <nl> " server_compressed_streaming : single request with compressed " <nl> - " response streaming ; " <nl> - " slow_consumer : single request with response ; " <nl> - " streaming with slow client consumer ; " <nl> - " half_duplex : half - duplex streaming ; " <nl> - " ping_pong : full - duplex streaming ; " <nl> - " cancel_after_begin : cancel stream after starting it ; " <nl> - " cancel_after_first_response : cancel on first response ; " <nl> - " timeout_on_sleeping_server : deadline exceeds on stream ; " <nl> - " empty_stream : bi - di stream with no request / response ; " <nl> - " compute_engine_creds : large_unary with compute engine auth ; " <nl> - " jwt_token_creds : large_unary with JWT token auth ; " <nl> - " oauth2_auth_token : raw oauth2 access token auth ; " <nl> - " per_rpc_creds : raw oauth2 access token on a single rpc ; " <nl> - " status_code_and_message : verify status code & message ; " <nl> - " custom_metadata : server will echo custom metadata ; " <nl> - " all : all of above . " ) ; <nl> + " server_compressed_unary : single compressed response ; \ n " <nl> + " server_streaming : single request with response streaming ; \ n " <nl> + " slow_consumer : single request with response streaming with " <nl> + " slow client consumer ; \ n " <nl> + " status_code_and_message : verify status code & message ; \ n " <nl> + " timeout_on_sleeping_server : deadline exceeds on stream ; \ n " ) ; <nl> DEFINE_string ( default_service_account , " " , <nl> " Email of GCE default service account " ) ; <nl> DEFINE_string ( service_account_key_file , " " , <nl> int main ( int argc , char * * argv ) { <nl> client . DoEmpty ( ) ; <nl> } else if ( FLAGS_test_case = = " large_unary " ) { <nl> client . DoLargeUnary ( ) ; <nl> - } else if ( FLAGS_test_case = = " large_compressed_unary " ) { <nl> - client . DoLargeCompressedUnary ( ) ; <nl> + } else if ( FLAGS_test_case = = " server_compressed_unary " ) { <nl> + client . DoServerCompressedUnary ( ) ; <nl> + } else if ( FLAGS_test_case = = " client_compressed_unary " ) { <nl> + client . DoClientCompressedUnary ( ) ; <nl> } else if ( FLAGS_test_case = = " client_streaming " ) { <nl> client . DoRequestStreaming ( ) ; <nl> } else if ( FLAGS_test_case = = " server_streaming " ) { <nl> client . DoResponseStreaming ( ) ; <nl> } else if ( FLAGS_test_case = = " server_compressed_streaming " ) { <nl> - client . DoResponseCompressedStreaming ( ) ; <nl> + client . DoServerCompressedStreaming ( ) ; <nl> + } else if ( FLAGS_test_case = = " client_compressed_streaming " ) { <nl> + client . DoClientCompressedStreaming ( ) ; <nl> } else if ( FLAGS_test_case = = " slow_consumer " ) { <nl> client . DoResponseStreamingWithSlowConsumer ( ) ; <nl> } else if ( FLAGS_test_case = = " half_duplex " ) { <nl> int main ( int argc , char * * argv ) { <nl> } else if ( FLAGS_test_case = = " all " ) { <nl> client . DoEmpty ( ) ; <nl> client . DoLargeUnary ( ) ; <nl> + client . DoClientCompressedUnary ( ) ; <nl> + client . DoServerCompressedUnary ( ) ; <nl> client . DoRequestStreaming ( ) ; <nl> client . DoResponseStreaming ( ) ; <nl> - client . DoResponseCompressedStreaming ( ) ; <nl> + client . DoClientCompressedStreaming ( ) ; <nl> + client . DoServerCompressedStreaming ( ) ; <nl> client . DoHalfDuplex ( ) ; <nl> client . DoPingPong ( ) ; <nl> client . DoCancelAfterBegin ( ) ; <nl> int main ( int argc , char * * argv ) { <nl> } <nl> / / compute_engine_creds only runs in GCE . <nl> } else { <nl> - gpr_log ( <nl> - GPR_ERROR , <nl> - " Unsupported test case % s . Valid options are all | empty_unary | " <nl> - " large_unary | large_compressed_unary | client_streaming | server_streaming | " <nl> - " server_compressed_streaming | half_duplex | ping_pong | cancel_after_begin | " <nl> - " cancel_after_first_response | timeout_on_sleeping_server | empty_stream | " <nl> - " compute_engine_creds | jwt_token_creds | oauth2_auth_token | per_rpc_creds | " <nl> - " status_code_and_message | custom_metadata " , <nl> - FLAGS_test_case . c_str ( ) ) ; <nl> + const char * testcases [ ] = { " all " , <nl> + " cancel_after_begin " , <nl> + " cancel_after_first_response " , <nl> + " client_compressed_streaming " , <nl> + " client_compressed_unary " , <nl> + " client_streaming " , <nl> + " compute_engine_creds " , <nl> + " custom_metadata " , <nl> + " empty_stream " , <nl> + " empty_unary " , <nl> + " half_duplex " , <nl> + " jwt_token_creds " , <nl> + " large_unary " , <nl> + " oauth2_auth_token " , <nl> + " oauth2_auth_token " , <nl> + " per_rpc_creds " , <nl> + " per_rpc_creds " , <nl> + " ping_pong " , <nl> + " server_compressed_streaming " , <nl> + " server_compressed_unary " , <nl> + " server_streaming " , <nl> + " status_code_and_message " , <nl> + " timeout_on_sleeping_server " } ; <nl> + char * joined_testcases = <nl> + gpr_strjoin_sep ( testcases , GPR_ARRAY_SIZE ( testcases ) , " \ n " , NULL ) ; <nl> + <nl> + gpr_log ( GPR_ERROR , " Unsupported test case % s . Valid options are \ n % s " , <nl> + FLAGS_test_case . c_str ( ) , joined_testcases ) ; <nl> + gpr_free ( joined_testcases ) ; <nl> ret = 1 ; <nl> } <nl> <nl> mmm a / test / cpp / interop / interop_client . cc <nl> ppp b / test / cpp / interop / interop_client . cc <nl> <nl> / * <nl> * <nl> - * Copyright 2015 , Google Inc . <nl> + * Copyright 2015 - 2016 , Google Inc . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> namespace testing { <nl> namespace { <nl> / / The same value is defined by the Java client . <nl> const std : : vector < int > request_stream_sizes = { 27182 , 8 , 1828 , 45904 } ; <nl> - const std : : vector < int > response_stream_sizes = { 31415 , 59 , 2653 , 58979 } ; <nl> + const std : : vector < int > response_stream_sizes = { 31415 , 9 , 2653 , 58979 } ; <nl> const int kNumResponseMessages = 2000 ; <nl> const int kResponseMessageSize = 1030 ; <nl> const int kReceiveDelayMilliSeconds = 20 ; <nl> const int kLargeResponseSize = 314159 ; <nl> void NoopChecks ( const InteropClientContextInspector & inspector , <nl> const SimpleRequest * request , const SimpleResponse * response ) { } <nl> <nl> - void CompressionChecks ( const InteropClientContextInspector & inspector , <nl> - const SimpleRequest * request , <nl> - const SimpleResponse * response ) { <nl> + void UnaryCompressionChecks ( const InteropClientContextInspector & inspector , <nl> + const SimpleRequest * request , <nl> + const SimpleResponse * response ) { <nl> const grpc_compression_algorithm received_compression = <nl> inspector . GetCallCompressionAlgorithm ( ) ; <nl> - if ( request - > request_compressed_response ( ) & & <nl> - received_compression = = GRPC_COMPRESS_NONE ) { <nl> - if ( request - > request_compressed_response ( ) & & <nl> - received_compression = = GRPC_COMPRESS_NONE ) { <nl> + if ( request - > response_compressed ( ) . value ( ) ) { <nl> + if ( received_compression = = GRPC_COMPRESS_NONE ) { <nl> / / Requested some compression , got NONE . This is an error . <nl> gpr_log ( GPR_ERROR , <nl> " Failure : Requested compression but got uncompressed response " <nl> " from server . " ) ; <nl> abort ( ) ; <nl> } <nl> - } <nl> - if ( ! request - > request_compressed_response ( ) ) { <nl> - GPR_ASSERT ( ! ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ) ; <nl> - } else if ( request - > response_type ( ) = = PayloadType : : COMPRESSABLE ) { <nl> - / / requested compression and compressable response = > results should always <nl> - / / be compressed . <nl> GPR_ASSERT ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ; <nl> + } else { <nl> + / / Didn ' t request compression - > make sure the response is uncompressed <nl> + GPR_ASSERT ( ! ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ) ; <nl> } <nl> } <nl> } / / namespace <nl> bool InteropClient : : PerformLargeUnary ( SimpleRequest * request , <nl> CheckerFn custom_checks_fn ) { <nl> ClientContext context ; <nl> InteropClientContextInspector inspector ( context ) ; <nl> - / / If the request doesn ' t already specify the response type , default to <nl> - / / COMPRESSABLE . <nl> request - > set_response_size ( kLargeResponseSize ) ; <nl> grpc : : string payload ( kLargeRequestSize , ' \ 0 ' ) ; <nl> request - > mutable_payload ( ) - > set_body ( payload . c_str ( ) , kLargeRequestSize ) ; <nl> + if ( request - > has_expect_compressed ( ) ) { <nl> + if ( request - > expect_compressed ( ) . value ( ) ) { <nl> + context . set_compression_algorithm ( GRPC_COMPRESS_GZIP ) ; <nl> + } else { <nl> + context . set_compression_algorithm ( GRPC_COMPRESS_NONE ) ; <nl> + } <nl> + } <nl> <nl> Status s = serviceStub_ . Get ( ) - > UnaryCall ( & context , * request , response ) ; <nl> if ( ! AssertStatusOk ( s ) ) { <nl> bool InteropClient : : PerformLargeUnary ( SimpleRequest * request , <nl> custom_checks_fn ( inspector , request , response ) ; <nl> <nl> / / Payload related checks . <nl> - GPR_ASSERT ( response - > payload ( ) . type ( ) = = request - > response_type ( ) ) ; <nl> - switch ( response - > payload ( ) . type ( ) ) { <nl> - case PayloadType : : COMPRESSABLE : <nl> - GPR_ASSERT ( response - > payload ( ) . body ( ) = = <nl> - grpc : : string ( kLargeResponseSize , ' \ 0 ' ) ) ; <nl> - break ; <nl> - case PayloadType : : UNCOMPRESSABLE : { <nl> - / / We don ' t really check anything : We can ' t assert that the payload is <nl> - / / uncompressed because it ' s the server ' s prerogative to decide on that , <nl> - / / and different implementations decide differently ( ie , Java always <nl> - / / compresses when requested to do so , whereas C core throws away the <nl> - / / compressed payload if the output is larger than the input ) . <nl> - / / In addition , we don ' t compare the actual random bytes received because <nl> - / / asserting that data is sent / received properly isn ' t the purpose of this <nl> - / / test . Moreover , different implementations are also free to use <nl> - / / different sets of random bytes . <nl> - } break ; <nl> - default : <nl> - GPR_ASSERT ( false ) ; <nl> - } <nl> - <nl> + GPR_ASSERT ( response - > payload ( ) . body ( ) = = <nl> + grpc : : string ( kLargeResponseSize , ' \ 0 ' ) ) ; <nl> return true ; <nl> } <nl> <nl> bool InteropClient : : DoComputeEngineCreds ( <nl> SimpleResponse response ; <nl> request . set_fill_username ( true ) ; <nl> request . set_fill_oauth_scope ( true ) ; <nl> - request . set_response_type ( PayloadType : : COMPRESSABLE ) ; <nl> <nl> if ( ! PerformLargeUnary ( & request , & response ) ) { <nl> return false ; <nl> bool InteropClient : : DoJwtTokenCreds ( const grpc : : string & username ) { <nl> SimpleRequest request ; <nl> SimpleResponse response ; <nl> request . set_fill_username ( true ) ; <nl> - request . set_response_type ( PayloadType : : COMPRESSABLE ) ; <nl> <nl> if ( ! PerformLargeUnary ( & request , & response ) ) { <nl> return false ; <nl> bool InteropClient : : DoLargeUnary ( ) { <nl> gpr_log ( GPR_DEBUG , " Sending a large unary rpc . . . " ) ; <nl> SimpleRequest request ; <nl> SimpleResponse response ; <nl> - request . set_response_type ( PayloadType : : COMPRESSABLE ) ; <nl> if ( ! PerformLargeUnary ( & request , & response ) ) { <nl> return false ; <nl> } <nl> bool InteropClient : : DoLargeUnary ( ) { <nl> return true ; <nl> } <nl> <nl> - bool InteropClient : : DoLargeCompressedUnary ( ) { <nl> - const bool request_compression [ ] = { false , true } ; <nl> - const PayloadType payload_types [ ] = { COMPRESSABLE , UNCOMPRESSABLE } ; <nl> - for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( payload_types ) ; i + + ) { <nl> - for ( size_t j = 0 ; j < GPR_ARRAY_SIZE ( request_compression ) ; j + + ) { <nl> - char * log_suffix ; <nl> - gpr_asprintf ( & log_suffix , " ( compression = % s ; payload = % s ) " , <nl> - request_compression [ j ] ? " true " : " false " , <nl> - PayloadType_Name ( payload_types [ i ] ) . c_str ( ) ) ; <nl> - <nl> - gpr_log ( GPR_DEBUG , " Sending a large compressed unary rpc % s . " , <nl> - log_suffix ) ; <nl> - SimpleRequest request ; <nl> - SimpleResponse response ; <nl> - request . set_response_type ( payload_types [ i ] ) ; <nl> - request . set_request_compressed_response ( request_compression [ j ] ) ; <nl> - <nl> - if ( ! PerformLargeUnary ( & request , & response , CompressionChecks ) ) { <nl> - gpr_log ( GPR_ERROR , " Large compressed unary failed % s " , log_suffix ) ; <nl> - gpr_free ( log_suffix ) ; <nl> - return false ; <nl> - } <nl> - <nl> - gpr_log ( GPR_DEBUG , " Large compressed unary done % s . " , log_suffix ) ; <nl> + bool InteropClient : : DoClientCompressedUnary ( ) { <nl> + / / Probing for compression - checks support . <nl> + ClientContext probe_context ; <nl> + SimpleRequest probe_req ; <nl> + SimpleResponse probe_res ; <nl> + <nl> + probe_context . set_compression_algorithm ( GRPC_COMPRESS_NONE ) ; <nl> + probe_req . mutable_expect_compressed ( ) - > set_value ( true ) ; / / lies ! <nl> + <nl> + probe_req . set_response_size ( kLargeResponseSize ) ; <nl> + probe_req . mutable_payload ( ) - > set_body ( grpc : : string ( kLargeRequestSize , ' \ 0 ' ) ) ; <nl> + <nl> + gpr_log ( GPR_DEBUG , " Sending probe for compressed unary request . " ) ; <nl> + const Status s = <nl> + serviceStub_ . Get ( ) - > UnaryCall ( & probe_context , probe_req , & probe_res ) ; <nl> + if ( s . error_code ( ) ! = grpc : : StatusCode : : INVALID_ARGUMENT ) { <nl> + / / The server isn ' t able to evaluate incoming compression , making the rest <nl> + / / of this test moot . <nl> + gpr_log ( GPR_DEBUG , " Compressed unary request probe failed " ) ; <nl> + return false ; <nl> + } <nl> + gpr_log ( GPR_DEBUG , " Compressed unary request probe succeeded . Proceeding . " ) ; <nl> + <nl> + const std : : vector < bool > compressions = { true , false } ; <nl> + for ( size_t i = 0 ; i < compressions . size ( ) ; i + + ) { <nl> + char * log_suffix ; <nl> + gpr_asprintf ( & log_suffix , " ( compression = % s ) " , <nl> + compressions [ i ] ? " true " : " false " ) ; <nl> + <nl> + gpr_log ( GPR_DEBUG , " Sending compressed unary request % s . " , log_suffix ) ; <nl> + SimpleRequest request ; <nl> + SimpleResponse response ; <nl> + request . mutable_expect_compressed ( ) - > set_value ( compressions [ i ] ) ; <nl> + if ( ! PerformLargeUnary ( & request , & response , UnaryCompressionChecks ) ) { <nl> + gpr_log ( GPR_ERROR , " Compressed unary request failed % s " , log_suffix ) ; <nl> + gpr_free ( log_suffix ) ; <nl> + return false ; <nl> + } <nl> + <nl> + gpr_log ( GPR_DEBUG , " Compressed unary request failed % s " , log_suffix ) ; <nl> + gpr_free ( log_suffix ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + bool InteropClient : : DoServerCompressedUnary ( ) { <nl> + const std : : vector < bool > compressions = { true , false } ; <nl> + for ( size_t i = 0 ; i < compressions . size ( ) ; i + + ) { <nl> + char * log_suffix ; <nl> + gpr_asprintf ( & log_suffix , " ( compression = % s ) " , <nl> + compressions [ i ] ? " true " : " false " ) ; <nl> + <nl> + gpr_log ( GPR_DEBUG , " Sending unary request for compressed response % s . " , <nl> + log_suffix ) ; <nl> + SimpleRequest request ; <nl> + SimpleResponse response ; <nl> + request . mutable_response_compressed ( ) - > set_value ( compressions [ i ] ) ; <nl> + <nl> + if ( ! PerformLargeUnary ( & request , & response , UnaryCompressionChecks ) ) { <nl> + gpr_log ( GPR_ERROR , " Request for compressed unary failed % s " , log_suffix ) ; <nl> gpr_free ( log_suffix ) ; <nl> + return false ; <nl> } <nl> + <nl> + gpr_log ( GPR_DEBUG , " Request for compressed unary failed % s " , log_suffix ) ; <nl> + gpr_free ( log_suffix ) ; <nl> } <nl> <nl> return true ; <nl> bool InteropClient : : DoRequestStreaming ( ) { <nl> serviceStub_ . Get ( ) - > StreamingInputCall ( & context , & response ) ) ; <nl> <nl> int aggregated_payload_size = 0 ; <nl> - for ( unsigned int i = 0 ; i < request_stream_sizes . size ( ) ; + + i ) { <nl> + for ( size_t i = 0 ; i < request_stream_sizes . size ( ) ; + + i ) { <nl> Payload * payload = request . mutable_payload ( ) ; <nl> payload - > set_body ( grpc : : string ( request_stream_sizes [ i ] , ' \ 0 ' ) ) ; <nl> if ( ! stream - > Write ( request ) ) { <nl> bool InteropClient : : DoRequestStreaming ( ) { <nl> } <nl> aggregated_payload_size + = request_stream_sizes [ i ] ; <nl> } <nl> - stream - > WritesDone ( ) ; <nl> + GPR_ASSERT ( stream - > WritesDone ( ) ) ; <nl> <nl> Status s = stream - > Finish ( ) ; <nl> if ( ! AssertStatusOk ( s ) ) { <nl> bool InteropClient : : DoResponseStreaming ( ) { <nl> return true ; <nl> } <nl> <nl> - bool InteropClient : : DoResponseCompressedStreaming ( ) { <nl> - const bool request_compression [ ] = { false , true } ; <nl> - const PayloadType payload_types [ ] = { COMPRESSABLE , UNCOMPRESSABLE } ; <nl> - for ( size_t i = 0 ; i < GPR_ARRAY_SIZE ( payload_types ) ; i + + ) { <nl> - for ( size_t j = 0 ; j < GPR_ARRAY_SIZE ( request_compression ) ; j + + ) { <nl> - ClientContext context ; <nl> - InteropClientContextInspector inspector ( context ) ; <nl> - StreamingOutputCallRequest request ; <nl> - <nl> - char * log_suffix ; <nl> - gpr_asprintf ( & log_suffix , " ( compression = % s ; payload = % s ) " , <nl> - request_compression [ j ] ? " true " : " false " , <nl> - PayloadType_Name ( payload_types [ i ] ) . c_str ( ) ) ; <nl> - <nl> - gpr_log ( GPR_DEBUG , " Receiving response streaming rpc % s . " , log_suffix ) ; <nl> - <nl> - request . set_response_type ( payload_types [ i ] ) ; <nl> - request . set_request_compressed_response ( request_compression [ j ] ) ; <nl> - <nl> - for ( size_t k = 0 ; k < response_stream_sizes . size ( ) ; + + k ) { <nl> - ResponseParameters * response_parameter = <nl> - request . add_response_parameters ( ) ; <nl> - response_parameter - > set_size ( response_stream_sizes [ k ] ) ; <nl> - } <nl> - StreamingOutputCallResponse response ; <nl> - <nl> - std : : unique_ptr < ClientReader < StreamingOutputCallResponse > > stream ( <nl> - serviceStub_ . Get ( ) - > StreamingOutputCall ( & context , request ) ) ; <nl> - <nl> - size_t k = 0 ; <nl> - while ( stream - > Read ( & response ) ) { <nl> - / / Payload related checks . <nl> - GPR_ASSERT ( response . payload ( ) . type ( ) = = request . response_type ( ) ) ; <nl> - switch ( response . payload ( ) . type ( ) ) { <nl> - case PayloadType : : COMPRESSABLE : <nl> - GPR_ASSERT ( response . payload ( ) . body ( ) = = <nl> - grpc : : string ( response_stream_sizes [ k ] , ' \ 0 ' ) ) ; <nl> - break ; <nl> - case PayloadType : : UNCOMPRESSABLE : <nl> - break ; <nl> - default : <nl> - GPR_ASSERT ( false ) ; <nl> - } <nl> - <nl> - / / Compression related checks . <nl> - if ( request . request_compressed_response ( ) ) { <nl> - GPR_ASSERT ( inspector . GetCallCompressionAlgorithm ( ) > <nl> - GRPC_COMPRESS_NONE ) ; <nl> - if ( request . response_type ( ) = = PayloadType : : COMPRESSABLE ) { <nl> - / / requested compression and compressable response = > results should <nl> - / / always be compressed . <nl> - GPR_ASSERT ( inspector . GetMessageFlags ( ) & <nl> - GRPC_WRITE_INTERNAL_COMPRESS ) ; <nl> - } <nl> - } else { <nl> - / / requested * no * compression . <nl> - GPR_ASSERT ( <nl> - ! ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ) ; <nl> - } <nl> - <nl> - + + k ; <nl> - } <nl> - <nl> - gpr_log ( GPR_DEBUG , " Response streaming done % s . " , log_suffix ) ; <nl> - gpr_free ( log_suffix ) ; <nl> + bool InteropClient : : DoClientCompressedStreaming ( ) { <nl> + / / Probing for compression - checks support . <nl> + ClientContext probe_context ; <nl> + StreamingInputCallRequest probe_req ; <nl> + StreamingInputCallResponse probe_res ; <nl> + <nl> + probe_context . set_compression_algorithm ( GRPC_COMPRESS_NONE ) ; <nl> + probe_req . mutable_expect_compressed ( ) - > set_value ( true ) ; / / lies ! <nl> + probe_req . mutable_payload ( ) - > set_body ( grpc : : string ( 27182 , ' \ 0 ' ) ) ; <nl> + <nl> + gpr_log ( GPR_DEBUG , " Sending probe for compressed streaming request . " ) ; <nl> + <nl> + std : : unique_ptr < ClientWriter < StreamingInputCallRequest > > probe_stream ( <nl> + serviceStub_ . Get ( ) - > StreamingInputCall ( & probe_context , & probe_res ) ) ; <nl> + <nl> + if ( ! probe_stream - > Write ( probe_req ) ) { <nl> + gpr_log ( GPR_ERROR , " % s ( ) : stream - > Write ( ) failed " , __func__ ) ; <nl> + return TransientFailureOrAbort ( ) ; <nl> + } <nl> + Status s = probe_stream - > Finish ( ) ; <nl> + if ( s . error_code ( ) ! = grpc : : StatusCode : : INVALID_ARGUMENT ) { <nl> + / / The server isn ' t able to evaluate incoming compression , making the rest <nl> + / / of this test moot . <nl> + gpr_log ( GPR_DEBUG , " Compressed streaming request probe failed " ) ; <nl> + return false ; <nl> + } <nl> + gpr_log ( GPR_DEBUG , <nl> + " Compressed streaming request probe succeeded . Proceeding . " ) ; <nl> + <nl> + ClientContext context ; <nl> + StreamingInputCallRequest request ; <nl> + StreamingInputCallResponse response ; <nl> + <nl> + context . set_compression_algorithm ( GRPC_COMPRESS_GZIP ) ; <nl> + std : : unique_ptr < ClientWriter < StreamingInputCallRequest > > stream ( <nl> + serviceStub_ . Get ( ) - > StreamingInputCall ( & context , & response ) ) ; <nl> + <nl> + request . mutable_payload ( ) - > set_body ( grpc : : string ( 27182 , ' \ 0 ' ) ) ; <nl> + request . mutable_expect_compressed ( ) - > set_value ( true ) ; <nl> + gpr_log ( GPR_DEBUG , " Sending streaming request with compression enabled " ) ; <nl> + if ( ! stream - > Write ( request ) ) { <nl> + gpr_log ( GPR_ERROR , " % s ( ) : stream - > Write ( ) failed " , __func__ ) ; <nl> + return TransientFailureOrAbort ( ) ; <nl> + } <nl> + <nl> + WriteOptions wopts ; <nl> + wopts . set_no_compression ( ) ; <nl> + request . mutable_payload ( ) - > set_body ( grpc : : string ( 45904 , ' \ 0 ' ) ) ; <nl> + request . mutable_expect_compressed ( ) - > set_value ( false ) ; <nl> + gpr_log ( GPR_DEBUG , " Sending streaming request with compression disabled " ) ; <nl> + if ( ! stream - > Write ( request , wopts ) ) { <nl> + gpr_log ( GPR_ERROR , " % s ( ) : stream - > Write ( ) failed " , __func__ ) ; <nl> + return TransientFailureOrAbort ( ) ; <nl> + } <nl> + GPR_ASSERT ( stream - > WritesDone ( ) ) ; <nl> + <nl> + s = stream - > Finish ( ) ; <nl> + if ( ! AssertStatusOk ( s ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> <nl> - if ( k < response_stream_sizes . size ( ) ) { <nl> - / / stream - > Read ( ) failed before reading all the expected messages . This <nl> - / / is most likely due to a connection failure . <nl> - gpr_log ( GPR_ERROR , <nl> - " DoResponseCompressedStreaming ( ) : Responses read ( k = % " PRIuPTR <nl> - " ) is " <nl> - " less than the expected messages ( i . e " <nl> - " response_stream_sizes . size ( ) ( % " PRIuPTR " ) ) . ( i = % " PRIuPTR <nl> - " , j = % " PRIuPTR " ) " , <nl> - k , response_stream_sizes . size ( ) , i , j ) ; <nl> - return TransientFailureOrAbort ( ) ; <nl> - } <nl> - <nl> - Status s = stream - > Finish ( ) ; <nl> - if ( ! AssertStatusOk ( s ) ) { <nl> - return false ; <nl> - } <nl> + bool InteropClient : : DoServerCompressedStreaming ( ) { <nl> + const std : : vector < bool > compressions = { true , false } ; <nl> + const std : : vector < int > sizes = { 31415 , 92653 } ; <nl> + <nl> + ClientContext context ; <nl> + InteropClientContextInspector inspector ( context ) ; <nl> + StreamingOutputCallRequest request ; <nl> + <nl> + GPR_ASSERT ( compressions . size ( ) = = sizes . size ( ) ) ; <nl> + for ( size_t i = 0 ; i < sizes . size ( ) ; i + + ) { <nl> + char * log_suffix ; <nl> + gpr_asprintf ( & log_suffix , " ( compression = % s ; size = % d ) " , <nl> + compressions [ i ] ? " true " : " false " , sizes [ i ] ) ; <nl> + <nl> + gpr_log ( GPR_DEBUG , " Sending request streaming rpc % s . " , log_suffix ) ; <nl> + gpr_free ( log_suffix ) ; <nl> + <nl> + ResponseParameters * const response_parameter = <nl> + request . add_response_parameters ( ) ; <nl> + response_parameter - > mutable_compressed ( ) - > set_value ( compressions [ i ] ) ; <nl> + response_parameter - > set_size ( sizes [ i ] ) ; <nl> + } <nl> + std : : unique_ptr < ClientReader < StreamingOutputCallResponse > > stream ( <nl> + serviceStub_ . Get ( ) - > StreamingOutputCall ( & context , request ) ) ; <nl> + <nl> + size_t k = 0 ; <nl> + StreamingOutputCallResponse response ; <nl> + while ( stream - > Read ( & response ) ) { <nl> + / / Payload size checks . <nl> + GPR_ASSERT ( response . payload ( ) . body ( ) = = <nl> + grpc : : string ( request . response_parameters ( k ) . size ( ) , ' \ 0 ' ) ) ; <nl> + <nl> + / / Compression checks . <nl> + GPR_ASSERT ( request . response_parameters ( k ) . has_compressed ( ) ) ; <nl> + if ( request . response_parameters ( k ) . compressed ( ) . value ( ) ) { <nl> + GPR_ASSERT ( inspector . GetCallCompressionAlgorithm ( ) > GRPC_COMPRESS_NONE ) ; <nl> + GPR_ASSERT ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ; <nl> + } else { <nl> + / / requested * no * compression . <nl> + GPR_ASSERT ( ! ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ) ; <nl> } <nl> + + + k ; <nl> + } <nl> + <nl> + if ( k < sizes . size ( ) ) { <nl> + / / stream - > Read ( ) failed before reading all the expected messages . This <nl> + / / is most likely due to a connection failure . <nl> + gpr_log ( GPR_ERROR , " % s ( ) : Responses read ( k = % " PRIuPTR <nl> + " ) is " <nl> + " less than the expected messages ( i . e " <nl> + " response_stream_sizes . size ( ) ( % " PRIuPTR " ) ) . " , <nl> + __func__ , k , response_stream_sizes . size ( ) ) ; <nl> + return TransientFailureOrAbort ( ) ; <nl> } <nl> <nl> + Status s = stream - > Finish ( ) ; <nl> + if ( ! AssertStatusOk ( s ) ) { <nl> + return false ; <nl> + } <nl> return true ; <nl> } <nl> <nl> bool InteropClient : : DoPingPong ( ) { <nl> stream ( serviceStub_ . Get ( ) - > FullDuplexCall ( & context ) ) ; <nl> <nl> StreamingOutputCallRequest request ; <nl> - request . set_response_type ( PayloadType : : COMPRESSABLE ) ; <nl> ResponseParameters * response_parameter = request . add_response_parameters ( ) ; <nl> Payload * payload = request . mutable_payload ( ) ; <nl> StreamingOutputCallResponse response ; <nl> bool InteropClient : : DoCancelAfterFirstResponse ( ) { <nl> stream ( serviceStub_ . Get ( ) - > FullDuplexCall ( & context ) ) ; <nl> <nl> StreamingOutputCallRequest request ; <nl> - request . set_response_type ( PayloadType : : COMPRESSABLE ) ; <nl> ResponseParameters * response_parameter = request . add_response_parameters ( ) ; <nl> response_parameter - > set_size ( 31415 ) ; <nl> request . mutable_payload ( ) - > set_body ( grpc : : string ( 27182 , ' \ 0 ' ) ) ; <nl> bool InteropClient : : DoCustomMetadata ( ) { <nl> stream ( serviceStub_ . Get ( ) - > FullDuplexCall ( & context ) ) ; <nl> <nl> StreamingOutputCallRequest request ; <nl> - request . set_response_type ( PayloadType : : COMPRESSABLE ) ; <nl> ResponseParameters * response_parameter = request . add_response_parameters ( ) ; <nl> response_parameter - > set_size ( kLargeResponseSize ) ; <nl> grpc : : string payload ( kLargeRequestSize , ' \ 0 ' ) ; <nl> mmm a / test / cpp / interop / interop_client . h <nl> ppp b / test / cpp / interop / interop_client . h <nl> class InteropClient { <nl> <nl> bool DoEmpty ( ) ; <nl> bool DoLargeUnary ( ) ; <nl> - bool DoLargeCompressedUnary ( ) ; <nl> + bool DoServerCompressedUnary ( ) ; <nl> + bool DoClientCompressedUnary ( ) ; <nl> bool DoPingPong ( ) ; <nl> bool DoHalfDuplex ( ) ; <nl> bool DoRequestStreaming ( ) ; <nl> bool DoResponseStreaming ( ) ; <nl> - bool DoResponseCompressedStreaming ( ) ; <nl> + bool DoServerCompressedStreaming ( ) ; <nl> + bool DoClientCompressedStreaming ( ) ; <nl> bool DoResponseStreamingWithSlowConsumer ( ) ; <nl> bool DoCancelAfterBegin ( ) ; <nl> bool DoCancelAfterFirstResponse ( ) ; <nl> similarity index 72 % <nl> rename from test / cpp / interop / server_main . cc <nl> rename to test / cpp / interop / interop_server . cc <nl> mmm a / test / cpp / interop / server_main . cc <nl> ppp b / test / cpp / interop / interop_server . cc <nl> <nl> / * <nl> * <nl> - * Copyright 2015 , Google Inc . <nl> + * Copyright 2015 - 2016 , Google Inc . <nl> * All rights reserved . <nl> * <nl> * Redistribution and use in source and binary forms , with or without <nl> <nl> # include < grpc / support / log . h > <nl> # include < grpc / support / useful . h > <nl> <nl> + # include " src / core / lib / transport / byte_stream . h " <nl> # include " src / proto / grpc / testing / empty . grpc . pb . h " <nl> # include " src / proto / grpc / testing / messages . grpc . pb . h " <nl> # include " src / proto / grpc / testing / test . grpc . pb . h " <nl> using grpc : : ServerCredentials ; <nl> using grpc : : ServerReader ; <nl> using grpc : : ServerReaderWriter ; <nl> using grpc : : ServerWriter ; <nl> + using grpc : : WriteOptions ; <nl> using grpc : : SslServerCredentialsOptions ; <nl> using grpc : : testing : : InteropServerContextInspector ; <nl> using grpc : : testing : : Payload ; <nl> - using grpc : : testing : : PayloadType ; <nl> using grpc : : testing : : SimpleRequest ; <nl> using grpc : : testing : : SimpleResponse ; <nl> using grpc : : testing : : StreamingInputCallRequest ; <nl> using grpc : : testing : : TestService ; <nl> using grpc : : Status ; <nl> <nl> static bool got_sigint = false ; <nl> - static const char * kRandomFile = " test / cpp / interop / rnd . dat " ; <nl> <nl> const char kEchoInitialMetadataKey [ ] = " x - grpc - test - echo - initial " ; <nl> const char kEchoTrailingBinMetadataKey [ ] = " x - grpc - test - echo - trailing - bin " ; <nl> void MaybeEchoMetadata ( ServerContext * context ) { <nl> } <nl> } <nl> <nl> - bool SetPayload ( PayloadType response_type , int size , Payload * payload ) { <nl> - payload - > set_type ( response_type ) ; <nl> - switch ( response_type ) { <nl> - case PayloadType : : COMPRESSABLE : { <nl> - std : : unique_ptr < char [ ] > body ( new char [ size ] ( ) ) ; <nl> - payload - > set_body ( body . get ( ) , size ) ; <nl> - } break ; <nl> - case PayloadType : : UNCOMPRESSABLE : { <nl> - std : : unique_ptr < char [ ] > body ( new char [ size ] ( ) ) ; <nl> - std : : ifstream rnd_file ( kRandomFile ) ; <nl> - GPR_ASSERT ( rnd_file . good ( ) ) ; <nl> - rnd_file . read ( body . get ( ) , size ) ; <nl> - GPR_ASSERT ( ! rnd_file . eof ( ) ) ; / / Requested more rnd bytes than available <nl> - payload - > set_body ( body . get ( ) , size ) ; <nl> - } break ; <nl> - default : <nl> - GPR_ASSERT ( false ) ; <nl> - } <nl> + bool SetPayload ( int size , Payload * payload ) { <nl> + std : : unique_ptr < char [ ] > body ( new char [ size ] ( ) ) ; <nl> + payload - > set_body ( body . get ( ) , size ) ; <nl> return true ; <nl> } <nl> <nl> - template < typename RequestType > <nl> - void SetResponseCompression ( ServerContext * context , <nl> - const RequestType & request ) { <nl> - if ( request . request_compressed_response ( ) ) { <nl> - / / Any level would do , let ' s go for HIGH because we are overachievers . <nl> - context - > set_compression_level ( GRPC_COMPRESS_LEVEL_HIGH ) ; <nl> + bool CheckExpectedCompression ( const ServerContext & context , <nl> + const bool compression_expected ) { <nl> + const InteropServerContextInspector inspector ( context ) ; <nl> + const grpc_compression_algorithm received_compression = <nl> + inspector . GetCallCompressionAlgorithm ( ) ; <nl> + <nl> + if ( compression_expected ) { <nl> + if ( received_compression = = GRPC_COMPRESS_NONE ) { <nl> + / / Expected some compression , got NONE . This is an error . <nl> + gpr_log ( GPR_ERROR , <nl> + " Expected compression but got uncompressed request from client . " ) ; <nl> + return false ; <nl> + } <nl> + if ( ! ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) ) { <nl> + gpr_log ( GPR_ERROR , <nl> + " Failure : Requested compression in a compressable request , but " <nl> + " compression bit in message flags not set . " ) ; <nl> + return false ; <nl> + } <nl> + } else { <nl> + / / Didn ' t expect compression - > make sure the request is uncompressed <nl> + if ( inspector . GetMessageFlags ( ) & GRPC_WRITE_INTERNAL_COMPRESS ) { <nl> + gpr_log ( GPR_ERROR , <nl> + " Failure : Didn ' t requested compression , but compression bit in " <nl> + " message flags set . " ) ; <nl> + return false ; <nl> + } <nl> } <nl> + return true ; <nl> } <nl> <nl> class TestServiceImpl : public TestService : : Service { <nl> class TestServiceImpl : public TestService : : Service { <nl> Status UnaryCall ( ServerContext * context , const SimpleRequest * request , <nl> SimpleResponse * response ) { <nl> MaybeEchoMetadata ( context ) ; <nl> - SetResponseCompression ( context , * request ) ; <nl> + if ( request - > has_response_compressed ( ) ) { <nl> + const bool compression_requested = request - > response_compressed ( ) . value ( ) ; <nl> + gpr_log ( GPR_DEBUG , " Request for compression ( % s ) present for % s " , <nl> + compression_requested ? " enabled " : " disabled " , __func__ ) ; <nl> + if ( compression_requested ) { <nl> + / / Any level would do , let ' s go for HIGH because we are overachievers . <nl> + context - > set_compression_level ( GRPC_COMPRESS_LEVEL_HIGH ) ; <nl> + } else { <nl> + context - > set_compression_level ( GRPC_COMPRESS_LEVEL_NONE ) ; <nl> + } <nl> + } <nl> + if ( ! CheckExpectedCompression ( * context , <nl> + request - > expect_compressed ( ) . value ( ) ) ) { <nl> + return Status ( grpc : : StatusCode : : INVALID_ARGUMENT , <nl> + " Compressed request expectation not met . " ) ; <nl> + } <nl> if ( request - > response_size ( ) > 0 ) { <nl> - if ( ! SetPayload ( request - > response_type ( ) , request - > response_size ( ) , <nl> - response - > mutable_payload ( ) ) ) { <nl> - return Status ( grpc : : StatusCode : : INTERNAL , " Error creating payload . " ) ; <nl> + if ( ! SetPayload ( request - > response_size ( ) , response - > mutable_payload ( ) ) ) { <nl> + return Status ( grpc : : StatusCode : : INVALID_ARGUMENT , <nl> + " Error creating payload . " ) ; <nl> } <nl> } <nl> <nl> class TestServiceImpl : public TestService : : Service { <nl> Status StreamingOutputCall ( <nl> ServerContext * context , const StreamingOutputCallRequest * request , <nl> ServerWriter < StreamingOutputCallResponse > * writer ) { <nl> - SetResponseCompression ( context , * request ) ; <nl> StreamingOutputCallResponse response ; <nl> bool write_success = true ; <nl> for ( int i = 0 ; write_success & & i < request - > response_parameters_size ( ) ; <nl> i + + ) { <nl> - if ( ! SetPayload ( request - > response_type ( ) , <nl> - request - > response_parameters ( i ) . size ( ) , <nl> + if ( ! SetPayload ( request - > response_parameters ( i ) . size ( ) , <nl> response . mutable_payload ( ) ) ) { <nl> - return Status ( grpc : : StatusCode : : INTERNAL , " Error creating payload . " ) ; <nl> + return Status ( grpc : : StatusCode : : INVALID_ARGUMENT , <nl> + " Error creating payload . " ) ; <nl> + } <nl> + WriteOptions wopts ; <nl> + if ( request - > response_parameters ( i ) . has_compressed ( ) ) { <nl> + / / Compress by default . Disabled on a per - message basis . <nl> + context - > set_compression_level ( GRPC_COMPRESS_LEVEL_HIGH ) ; <nl> + const bool compression_requested = <nl> + request - > response_parameters ( i ) . compressed ( ) . value ( ) ; <nl> + gpr_log ( GPR_DEBUG , " Request for compression ( % s ) present for % s " , <nl> + compression_requested ? " enabled " : " disabled " , __func__ ) ; <nl> + if ( ! compression_requested ) { <nl> + wopts . set_no_compression ( ) ; <nl> + } / / else , compression is already enabled via the context . <nl> } <nl> int time_us ; <nl> if ( ( time_us = request - > response_parameters ( i ) . interval_us ( ) ) > 0 ) { <nl> class TestServiceImpl : public TestService : : Service { <nl> gpr_time_from_micros ( time_us , GPR_TIMESPAN ) ) ; <nl> gpr_sleep_until ( sleep_time ) ; <nl> } <nl> - write_success = writer - > Write ( response ) ; <nl> + write_success = writer - > Write ( response , wopts ) ; <nl> } <nl> if ( write_success ) { <nl> return Status : : OK ; <nl> class TestServiceImpl : public TestService : : Service { <nl> StreamingInputCallRequest request ; <nl> int aggregated_payload_size = 0 ; <nl> while ( reader - > Read ( & request ) ) { <nl> + if ( ! CheckExpectedCompression ( * context , <nl> + request . expect_compressed ( ) . value ( ) ) ) { <nl> + return Status ( grpc : : StatusCode : : INVALID_ARGUMENT , <nl> + " Compressed request expectation not met . " ) ; <nl> + } <nl> if ( request . has_payload ( ) ) { <nl> aggregated_payload_size + = request . payload ( ) . body ( ) . size ( ) ; <nl> } <nl> class TestServiceImpl : public TestService : : Service { <nl> StreamingOutputCallResponse response ; <nl> bool write_success = true ; <nl> while ( write_success & & stream - > Read ( & request ) ) { <nl> - SetResponseCompression ( context , request ) ; <nl> if ( request . response_parameters_size ( ) ! = 0 ) { <nl> response . mutable_payload ( ) - > set_type ( request . payload ( ) . type ( ) ) ; <nl> response . mutable_payload ( ) - > set_body ( <nl> deleted file mode 100644 <nl> index 8c7f38f9e0e . . 00000000000 <nl> Binary files a / test / cpp / interop / rnd . dat and / dev / null differ <nl> mmm a / test / cpp / interop / server_helper . cc <nl> ppp b / test / cpp / interop / server_helper . cc <nl> uint32_t InteropServerContextInspector : : GetEncodingsAcceptedByClient ( ) const { <nl> return grpc_call_test_only_get_encodings_accepted_by_peer ( context_ . call_ ) ; <nl> } <nl> <nl> + uint32_t InteropServerContextInspector : : GetMessageFlags ( ) const { <nl> + return grpc_call_test_only_get_message_flags ( context_ . call_ ) ; <nl> + } <nl> + <nl> std : : shared_ptr < const AuthContext > <nl> InteropServerContextInspector : : GetAuthContext ( ) const { <nl> return context_ . auth_context ( ) ; <nl> mmm a / test / cpp / interop / server_helper . h <nl> ppp b / test / cpp / interop / server_helper . h <nl> class InteropServerContextInspector { <nl> bool IsCancelled ( ) const ; <nl> grpc_compression_algorithm GetCallCompressionAlgorithm ( ) const ; <nl> uint32_t GetEncodingsAcceptedByClient ( ) const ; <nl> + uint32_t GetMessageFlags ( ) const ; <nl> <nl> private : <nl> const : : grpc : : ServerContext & context_ ; <nl> mmm a / test / cpp / interop / stress_interop_client . cc <nl> ppp b / test / cpp / interop / stress_interop_client . cc <nl> bool StressTestInteropClient : : RunTest ( TestCaseType test_case ) { <nl> is_success = interop_client_ - > DoLargeUnary ( ) ; <nl> break ; <nl> } <nl> - case LARGE_COMPRESSED_UNARY : { <nl> - is_success = interop_client_ - > DoLargeCompressedUnary ( ) ; <nl> + case CLIENT_COMPRESSED_UNARY : { <nl> + is_success = interop_client_ - > DoClientCompressedUnary ( ) ; <nl> + break ; <nl> + } <nl> + case CLIENT_COMPRESSED_STREAMING : { <nl> + is_success = interop_client_ - > DoClientCompressedStreaming ( ) ; <nl> break ; <nl> } <nl> case CLIENT_STREAMING : { <nl> bool StressTestInteropClient : : RunTest ( TestCaseType test_case ) { <nl> is_success = interop_client_ - > DoResponseStreaming ( ) ; <nl> break ; <nl> } <nl> + case SERVER_COMPRESSED_UNARY : { <nl> + is_success = interop_client_ - > DoServerCompressedUnary ( ) ; <nl> + break ; <nl> + } <nl> case SERVER_COMPRESSED_STREAMING : { <nl> - is_success = interop_client_ - > DoResponseCompressedStreaming ( ) ; <nl> + is_success = interop_client_ - > DoServerCompressedStreaming ( ) ; <nl> break ; <nl> } <nl> case SLOW_CONSUMER : { <nl> mmm a / test / cpp / interop / stress_interop_client . h <nl> ppp b / test / cpp / interop / stress_interop_client . h <nl> using std : : vector ; <nl> <nl> enum TestCaseType { <nl> UNKNOWN_TEST = - 1 , <nl> - EMPTY_UNARY = 0 , <nl> - LARGE_UNARY = 1 , <nl> - LARGE_COMPRESSED_UNARY = 2 , <nl> - CLIENT_STREAMING = 3 , <nl> - SERVER_STREAMING = 4 , <nl> - SERVER_COMPRESSED_STREAMING = 5 , <nl> - SLOW_CONSUMER = 6 , <nl> - HALF_DUPLEX = 7 , <nl> - PING_PONG = 8 , <nl> - CANCEL_AFTER_BEGIN = 9 , <nl> - CANCEL_AFTER_FIRST_RESPONSE = 10 , <nl> - TIMEOUT_ON_SLEEPING_SERVER = 11 , <nl> - EMPTY_STREAM = 12 , <nl> - STATUS_CODE_AND_MESSAGE = 13 , <nl> - CUSTOM_METADATA = 14 <nl> + EMPTY_UNARY , <nl> + LARGE_UNARY , <nl> + CLIENT_COMPRESSED_UNARY , <nl> + CLIENT_COMPRESSED_STREAMING , <nl> + CLIENT_STREAMING , <nl> + SERVER_STREAMING , <nl> + SERVER_COMPRESSED_UNARY , <nl> + SERVER_COMPRESSED_STREAMING , <nl> + SLOW_CONSUMER , <nl> + HALF_DUPLEX , <nl> + PING_PONG , <nl> + CANCEL_AFTER_BEGIN , <nl> + CANCEL_AFTER_FIRST_RESPONSE , <nl> + TIMEOUT_ON_SLEEPING_SERVER , <nl> + EMPTY_STREAM , <nl> + STATUS_CODE_AND_MESSAGE , <nl> + CUSTOM_METADATA <nl> } ; <nl> <nl> const vector < pair < TestCaseType , grpc : : string > > kTestCaseList = { <nl> { EMPTY_UNARY , " empty_unary " } , <nl> { LARGE_UNARY , " large_unary " } , <nl> - { LARGE_COMPRESSED_UNARY , " large_compressed_unary " } , <nl> + { CLIENT_COMPRESSED_UNARY , " client_compressed_unary " } , <nl> + { CLIENT_COMPRESSED_STREAMING , " client_compressed_streaming " } , <nl> { CLIENT_STREAMING , " client_streaming " } , <nl> { SERVER_STREAMING , " server_streaming " } , <nl> + { SERVER_COMPRESSED_UNARY , " server_compressed_unary " } , <nl> { SERVER_COMPRESSED_STREAMING , " server_compressed_streaming " } , <nl> { SLOW_CONSUMER , " slow_consumer " } , <nl> { HALF_DUPLEX , " half_duplex " } , <nl> new file mode 100644 <nl> index 00000000000 . . 9dfc040d736 <nl> mmm / dev / null <nl> ppp b / tools / dockerfile / test / csharp_coreclr_x64 / Dockerfile <nl> <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + FROM microsoft / dotnet : 1 . 0 . 0 - preview1 <nl> + <nl> + # Install Git and basic packages . <nl> + RUN apt - get update & & apt - get install - y \ <nl> + autoconf \ <nl> + autotools - dev \ <nl> + build - essential \ <nl> + bzip2 \ <nl> + ccache \ <nl> + curl \ <nl> + gcc \ <nl> + gcc - multilib \ <nl> + git \ <nl> + golang \ <nl> + gyp \ <nl> + lcov \ <nl> + libc6 \ <nl> + libc6 - dbg \ <nl> + libc6 - dev \ <nl> + libgtest - dev \ <nl> + libtool \ <nl> + make \ <nl> + perl \ <nl> + strace \ <nl> + python - dev \ <nl> + python - setuptools \ <nl> + python - yaml \ <nl> + telnet \ <nl> + unzip \ <nl> + wget \ <nl> + zip & & apt - get clean <nl> + <nl> + # = = = = = = = = = = = = = = = = <nl> + # Build profiling <nl> + RUN apt - get update & & apt - get install - y time & & apt - get clean <nl> + <nl> + # Prepare ccache <nl> + RUN ln - s / usr / bin / ccache / usr / local / bin / gcc <nl> + RUN ln - s / usr / bin / ccache / usr / local / bin / g + + <nl> + RUN ln - s / usr / bin / ccache / usr / local / bin / cc <nl> + RUN ln - s / usr / bin / ccache / usr / local / bin / c + + <nl> + RUN ln - s / usr / bin / ccache / usr / local / bin / clang <nl> + RUN ln - s / usr / bin / ccache / usr / local / bin / clang + + <nl> + <nl> + # = = = = = = = = = = = = = = = = = = = = = = <nl> + # Zookeeper dependencies <nl> + # TODO ( jtattermusch ) : is zookeeper still needed ? <nl> + RUN apt - get install - y libzookeeper - mt - dev <nl> + <nl> + RUN mkdir / var / local / jenkins <nl> + <nl> + # Define the default command . <nl> + CMD [ " bash " ] <nl> mmm a / tools / run_tests / build_artifact_python . bat <nl> ppp b / tools / run_tests / build_artifact_python . bat <nl> set NUGET = C : \ nuget \ nuget . exe <nl> <nl> mkdir src \ python \ grpcio \ grpc \ _cython \ _windows <nl> <nl> + @ rem TODO ( atash ) : maybe we could avoid the grpc_c . ( 32 | 64 ) . python shim below if <nl> + @ rem this used the right python build ? <nl> copy / Y vsprojects \ Release \ grpc_dll . dll src \ python \ grpcio \ grpc \ _cython \ _windows \ grpc_c . 32 . python | | goto : error <nl> copy / Y vsprojects \ x64 \ Release \ grpc_dll . dll src \ python \ grpcio \ grpc \ _cython \ _windows \ grpc_c . 64 . python | | goto : error <nl> <nl> - <nl> set PATH = C : \ % 1 ; C : \ % 1 \ scripts ; C : \ msys64 \ mingw % 2 \ bin ; % PATH % <nl> <nl> pip install - - upgrade six <nl> pip install - rrequirements . txt <nl> set GRPC_PYTHON_USE_CUSTOM_BDIST = 0 <nl> set GRPC_PYTHON_BUILD_WITH_CYTHON = 1 <nl> <nl> - @ rem TODO ( atash ) : maybe we could avoid the grpc_c . ( 32 | 64 ) . python shim above if <nl> - @ rem this used the right python build ? <nl> - python setup . py bdist_wheel <nl> - <nl> - @ rem Build gRPC Python tools <nl> - @ rem <nl> @ rem Because this is windows and * everything seems to hate Windows * we have to <nl> @ rem set all of these flags ourselves because Python won ' t help us ( see the <nl> @ rem setup . py of the grpcio_tools project ) . <nl> set GRPC_PYTHON_CFLAGS = - fno - wrapv - frtti - std = c + + 11 <nl> python - c " from distutils . cygwinccompiler import get_msvcr ; print ( get_msvcr ( ) [ 0 ] ) " > temp . txt <nl> set / p PYTHON_MSVCR = < temp . txt <nl> set GRPC_PYTHON_LDFLAGS = - static - libgcc - static - libstdc + + - mcrtdll = % PYTHON_MSVCR % - static - lpthread <nl> + <nl> + <nl> + @ rem Build gRPC <nl> + if % 2 = = 32 ( <nl> + python setup . py build_ext - c mingw32 <nl> + ) else ( <nl> + python setup . py build_ext - c mingw32 - DMS_WIN64 <nl> + ) <nl> + python setup . py bdist_wheel <nl> + <nl> + <nl> + @ rem Build gRPC Python tools <nl> python tools \ distrib \ python \ make_grpcio_tools . py <nl> if % 2 = = 32 ( <nl> python tools \ distrib \ python \ grpcio_tools \ setup . py build_ext - c mingw32 <nl> new file mode 100644 <nl> index 00000000000 . . cead6d0e02c <nl> mmm / dev / null <nl> ppp b / tools / run_tests / build_csharp_coreclr . bat <nl> <nl> + @ rem Copyright 2016 , Google Inc . <nl> + @ rem All rights reserved . <nl> + @ rem <nl> + @ rem Redistribution and use in source and binary forms , with or without <nl> + @ rem modification , are permitted provided that the following conditions are <nl> + @ rem met : <nl> + @ rem <nl> + @ rem * Redistributions of source code must retain the above copyright <nl> + @ rem notice , this list of conditions and the following disclaimer . <nl> + @ rem * Redistributions in binary form must reproduce the above <nl> + @ rem copyright notice , this list of conditions and the following disclaimer <nl> + @ rem in the documentation and / or other materials provided with the <nl> + @ rem distribution . <nl> + @ rem * Neither the name of Google Inc . nor the names of its <nl> + @ rem contributors may be used to endorse or promote products derived from <nl> + @ rem this software without specific prior written permission . <nl> + @ rem <nl> + @ rem THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + @ rem " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + @ rem LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + @ rem A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + @ rem OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + @ rem SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + @ rem LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + @ rem DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + @ rem THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + @ rem ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + @ rem OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + setlocal <nl> + <nl> + cd / d % ~ dp0 \ . . \ . . \ src \ csharp <nl> + <nl> + dotnet restore . | | goto : error <nl> + <nl> + dotnet build - f netstandard1 . 5 - - configuration % MSBUILD_CONFIG % " * * / project . json " | | goto : error <nl> + <nl> + endlocal <nl> + <nl> + goto : EOF <nl> + <nl> + : error <nl> + echo Failed ! <nl> + exit / b % errorlevel % <nl> new file mode 100755 <nl> index 00000000000 . . 68c19cb6c9d <nl> mmm / dev / null <nl> ppp b / tools / run_tests / build_csharp_coreclr . sh <nl> <nl> + # ! / bin / bash <nl> + # Copyright 2015 , Google Inc . <nl> + # All rights reserved . <nl> + # <nl> + # Redistribution and use in source and binary forms , with or without <nl> + # modification , are permitted provided that the following conditions are <nl> + # met : <nl> + # <nl> + # * Redistributions of source code must retain the above copyright <nl> + # notice , this list of conditions and the following disclaimer . <nl> + # * Redistributions in binary form must reproduce the above <nl> + # copyright notice , this list of conditions and the following disclaimer <nl> + # in the documentation and / or other materials provided with the <nl> + # distribution . <nl> + # * Neither the name of Google Inc . nor the names of its <nl> + # contributors may be used to endorse or promote products derived from <nl> + # this software without specific prior written permission . <nl> + # <nl> + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + # " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + # LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + # A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + # SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + # LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + # DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + # THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + # ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + # OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + set - ex <nl> + <nl> + cd $ ( dirname $ 0 ) / . . / . . / src / csharp <nl> + <nl> + # TODO ( jtattermusch ) : introduce caching <nl> + dotnet restore . <nl> + <nl> + dotnet build - f netstandard1 . 5 - - configuration $ MSBUILD_CONFIG ' * * / project . json ' <nl> + <nl> + # Grpc . IntegrationTesting doesn ' t get built by the previous command for some reason . <nl> + # TODO ( jtattermusch ) : get rid of the hack <nl> + dotnet build - f netstandard1 . 5 - - configuration $ MSBUILD_CONFIG Grpc . IntegrationTesting / project . json <nl> mmm a / tools / run_tests / run_interop_tests . py <nl> ppp b / tools / run_tests / run_interop_tests . py <nl> <nl> <nl> _DEFAULT_SERVER_PORT = 8080 <nl> <nl> - _SKIP_COMPRESSION = [ ' large_compressed_unary ' , <nl> - ' server_compressed_streaming ' ] <nl> + _SKIP_CLIENT_COMPRESSION = [ ' client_compressed_unary ' , <nl> + ' client_compressed_streaming ' ] <nl> + <nl> + _SKIP_SERVER_COMPRESSION = [ ' server_compressed_unary ' , <nl> + ' server_compressed_streaming ' ] <nl> + <nl> + _SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION <nl> <nl> _SKIP_ADVANCED = [ ' custom_metadata ' , ' status_code_and_message ' , <nl> ' unimplemented_method ' ] <nl> def global_env ( self ) : <nl> return { } <nl> <nl> def unimplemented_test_cases ( self ) : <nl> - return _SKIP_COMPRESSION <nl> + return _SKIP_SERVER_COMPRESSION <nl> <nl> def unimplemented_test_cases_server ( self ) : <nl> return _SKIP_COMPRESSION <nl> def __str__ ( self ) : <nl> ' cancel_after_begin ' , ' cancel_after_first_response ' , <nl> ' timeout_on_sleeping_server ' , ' custom_metadata ' , <nl> ' status_code_and_message ' , ' unimplemented_method ' , <nl> - ' large_compressed_unary ' , ' server_compressed_streaming ' ] <nl> + ' client_compressed_unary ' , ' server_compressed_unary ' , <nl> + ' client_compressed_streaming ' , ' server_compressed_streaming ' ] <nl> <nl> _AUTH_TEST_CASES = [ ' compute_engine_creds ' , ' jwt_token_creds ' , <nl> ' oauth2_auth_token ' , ' per_rpc_creds ' ] <nl> mmm a / tools / run_tests / run_tests . py <nl> ppp b / tools / run_tests / run_tests . py <nl> def configure ( self , config , args ) : <nl> if self . platform = = ' windows ' : <nl> # Explicitly choosing between x86 and x64 arch doesn ' t work yet <nl> _check_arch ( self . args . arch , [ ' default ' ] ) <nl> + # CoreCLR use 64bit runtime by default . <nl> + arch_option = ' x64 ' if self . args . compiler = = ' coreclr ' else self . args . arch <nl> self . _make_options = [ _windows_toolset_option ( self . args . compiler ) , <nl> - _windows_arch_option ( self . args . arch ) ] <nl> + _windows_arch_option ( arch_option ) ] <nl> else : <nl> - _check_compiler ( self . args . compiler , [ ' default ' ] ) <nl> + _check_compiler ( self . args . compiler , [ ' default ' , ' coreclr ' ] ) <nl> + if self . platform = = ' linux ' and self . args . compiler = = ' coreclr ' : <nl> + self . _docker_distro = ' coreclr ' <nl> + else : <nl> + self . _docker_distro = ' jessie ' <nl> + <nl> if self . platform = = ' mac ' : <nl> - # On Mac , official distribution of mono is 32bit . <nl> # TODO ( jtattermusch ) : EMBED_ZLIB = true currently breaks the mac build <nl> - self . _make_options = [ ' EMBED_OPENSSL = true ' , <nl> - ' CFLAGS = - m32 ' , ' LDFLAGS = - m32 ' ] <nl> + self . _make_options = [ ' EMBED_OPENSSL = true ' ] <nl> + if self . args . compiler ! = ' coreclr ' : <nl> + # On Mac , official distribution of mono is 32bit . <nl> + self . _make_options + = [ ' CFLAGS = - m32 ' , ' LDFLAGS = - m32 ' ] <nl> else : <nl> self . _make_options = [ ' EMBED_OPENSSL = true ' , ' EMBED_ZLIB = true ' ] <nl> <nl> def test_specs ( self ) : <nl> tests_by_assembly = json . load ( f ) <nl> <nl> msbuild_config = _MSBUILD_CONFIG [ self . config . build_config ] <nl> - nunit_args = [ ' - - labels = All ' , <nl> - ' - - noresult ' , <nl> - ' - - workers = 1 ' ] <nl> - if self . platform = = ' windows ' : <nl> + nunit_args = [ ' - - labels = All ' ] <nl> + assembly_subdir = ' bin / % s ' % msbuild_config <nl> + assembly_extension = ' . exe ' <nl> + <nl> + if self . args . compiler = = ' coreclr ' : <nl> + if self . platform = = ' linux ' : <nl> + assembly_subdir + = ' / netstandard1 . 5 / debian . 8 - x64 ' <nl> + assembly_extension = ' ' <nl> + if self . platform = = ' mac ' : <nl> + assembly_subdir + = ' / netstandard1 . 5 / osx . 10 . 11 - x64 ' <nl> + assembly_extension = ' ' <nl> + else : <nl> + assembly_subdir + = ' / netstandard1 . 5 / win7 - x64 ' <nl> runtime_cmd = [ ] <nl> else : <nl> - runtime_cmd = [ ' mono ' ] <nl> + nunit_args + = [ ' - - noresult ' , ' - - workers = 1 ' ] <nl> + if self . platform = = ' windows ' : <nl> + runtime_cmd = [ ] <nl> + else : <nl> + runtime_cmd = [ ' mono ' ] <nl> <nl> specs = [ ] <nl> for assembly in tests_by_assembly . iterkeys ( ) : <nl> - assembly_file = ' src / csharp / % s / bin / % s / % s . exe ' % ( assembly , msbuild_config , assembly ) <nl> + assembly_file = ' src / csharp / % s / % s / % s % s ' % ( assembly , <nl> + assembly_subdir , <nl> + assembly , <nl> + assembly_extension ) <nl> if self . config . build_config ! = ' gcov ' or self . platform ! = ' windows ' : <nl> # normally , run each test as a separate process <nl> for test in tests_by_assembly [ assembly ] : <nl> def make_options ( self ) : <nl> return self . _make_options ; <nl> <nl> def build_steps ( self ) : <nl> - if self . platform = = ' windows ' : <nl> - return [ [ _windows_build_bat ( self . args . compiler ) , <nl> - ' src / csharp / Grpc . sln ' , <nl> - ' / p : Configuration = % s ' % _MSBUILD_CONFIG [ self . config . build_config ] ] ] <nl> + if self . args . compiler = = ' coreclr ' : <nl> + if self . platform = = ' windows ' : <nl> + return [ [ ' tools \ \ run_tests \ \ build_csharp_coreclr . bat ' ] ] <nl> + else : <nl> + return [ [ ' tools / run_tests / build_csharp_coreclr . sh ' ] ] <nl> else : <nl> - return [ [ ' tools / run_tests / build_csharp . sh ' ] ] <nl> + if self . platform = = ' windows ' : <nl> + return [ [ _windows_build_bat ( self . args . compiler ) , <nl> + ' src / csharp / Grpc . sln ' , <nl> + ' / p : Configuration = % s ' % _MSBUILD_CONFIG [ self . config . build_config ] ] ] <nl> + else : <nl> + return [ [ ' tools / run_tests / build_csharp . sh ' ] ] <nl> <nl> def post_tests_steps ( self ) : <nl> if self . platform = = ' windows ' : <nl> def makefile_name ( self ) : <nl> return ' Makefile ' <nl> <nl> def dockerfile_dir ( self ) : <nl> - return ' tools / dockerfile / test / csharp_jessie_ % s ' % _docker_arch_suffix ( self . args . arch ) <nl> + return ' tools / dockerfile / test / csharp_ % s_ % s ' % ( self . _docker_distro , <nl> + _docker_arch_suffix ( self . args . arch ) ) <nl> <nl> def __str__ ( self ) : <nl> return ' csharp ' <nl> def _check_arch_option ( arch ) : <nl> <nl> def _windows_build_bat ( compiler ) : <nl> " " " Returns name of build . bat for selected compiler . " " " <nl> - if compiler = = ' default ' or compiler = = ' vs2013 ' : <nl> + # For CoreCLR , fall back to the default compiler for C core <nl> + if compiler = = ' default ' or compiler = = ' vs2013 ' or compiler = = ' coreclr ' : <nl> return ' vsprojects \ \ build_vs2013 . bat ' <nl> elif compiler = = ' vs2015 ' : <nl> return ' vsprojects \ \ build_vs2015 . bat ' <nl> def _windows_build_bat ( compiler ) : <nl> <nl> def _windows_toolset_option ( compiler ) : <nl> " " " Returns msbuild PlatformToolset for selected compiler . " " " <nl> - if compiler = = ' default ' or compiler = = ' vs2013 ' : <nl> + # For CoreCLR , fall back to the default compiler for C core <nl> + if compiler = = ' default ' or compiler = = ' vs2013 ' or compiler = = ' coreclr ' : <nl> return ' / p : PlatformToolset = v120 ' <nl> elif compiler = = ' vs2015 ' : <nl> return ' / p : PlatformToolset = v140 ' <nl> def runs_per_test_type ( arg_str ) : <nl> ' clang3 . 4 ' , ' clang3 . 5 ' , ' clang3 . 6 ' , ' clang3 . 7 ' , <nl> ' vs2010 ' , ' vs2013 ' , ' vs2015 ' , <nl> ' python2 . 7 ' , ' python3 . 4 ' , <nl> - ' node0 . 12 ' , ' node4 ' , ' node5 ' ] , <nl> + ' node0 . 12 ' , ' node4 ' , ' node5 ' , <nl> + ' coreclr ' ] , <nl> default = ' default ' , <nl> help = ' Selects compiler to use . Allowed values depend on the platform and language . ' ) <nl> argp . add_argument ( ' - - build_only ' , <nl> mmm a / tools / run_tests / sources_and_headers . json <nl> ppp b / tools / run_tests / sources_and_headers . json <nl> <nl> " language " : " c + + " , <nl> " name " : " interop_server_main " , <nl> " src " : [ <nl> - " test / cpp / interop / server_main . cc " <nl> + " test / cpp / interop / interop_server . cc " <nl> ] , <nl> " third_party " : false , <nl> " type " : " lib " <nl> mmm a / vsprojects / vcxproj / interop_server_main / interop_server_main . vcxproj <nl> ppp b / vsprojects / vcxproj / interop_server_main / interop_server_main . vcxproj <nl> <nl> < / ClCompile > <nl> < ClInclude Include = " $ ( SolutionDir ) \ . . \ src \ proto \ grpc \ testing \ test . grpc . pb . h " > <nl> < / ClInclude > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ test \ cpp \ interop \ server_main . cc " > <nl> + < ClCompile Include = " $ ( SolutionDir ) \ . . \ test \ cpp \ interop \ interop_server . cc " > <nl> < / ClCompile > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> mmm a / vsprojects / vcxproj / interop_server_main / interop_server_main . vcxproj . filters <nl> ppp b / vsprojects / vcxproj / interop_server_main / interop_server_main . vcxproj . filters <nl> <nl> < ClCompile Include = " $ ( SolutionDir ) \ . . \ src \ proto \ grpc \ testing \ test . proto " > <nl> < Filter > src \ proto \ grpc \ testing < / Filter > <nl> < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ test \ cpp \ interop \ server_main . cc " > <nl> + < ClCompile Include = " $ ( SolutionDir ) \ . . \ test \ cpp \ interop \ interop_server . cc " > <nl> < Filter > test \ cpp \ interop < / Filter > <nl> < / ClCompile > <nl> < / ItemGroup > <nl> | Merge remote - tracking branch ' upstream / master ' into fixes | grpc/grpc | 17c5da2bfcb8d1197c2f050568df270077afa252 | 2016-06-23T17:44:03Z |
mmm a / src / clustering / immediate_consistency / query / master_access . cc <nl> ppp b / src / clustering / immediate_consistency / query / master_access . cc <nl> void master_access_t : : read ( <nl> mailbox_t < void ( boost : : variant < read_response_t , std : : string > ) > <nl> result_or_failure_mailbox ( <nl> mailbox_manager , <nl> - std : : bind ( & promise_t < boost : : variant < read_response_t , <nl> - std : : string > > : : pulse , <nl> + std : : bind ( & promise_t < boost : : variant < read_response_t , std : : string > > : : pulse , <nl> & result_or_failure , ph : : _1 ) ) ; <nl> <nl> wait_interruptible ( token , interruptor ) ; <nl> mmm a / src / rpc / connectivity / cluster . cc <nl> ppp b / src / rpc / connectivity / cluster . cc <nl> void connectivity_cluster_t : : send_message ( peer_id_t dest , send_message_write_cal <nl> / / We could be on any thread here ! Oh no ! <nl> std : : vector < char > buffer_data ; <nl> buffer . swap ( & buffer_data ) ; <nl> - vector_read_stream_t read_stream ( std : : move ( buffer_data ) ) ; <nl> - current_run - > message_handler - > on_message ( me , & read_stream ) ; <nl> + current_run - > message_handler - > on_local_message ( me , std : : move ( buffer_data ) ) ; <nl> } else { <nl> guarantee ( dest ! = me ) ; <nl> on_thread_t threader ( conn_structure - > conn - > home_thread ( ) ) ; <nl> new file mode 100644 <nl> index 00000000000 . . e3a26c34743 <nl> mmm / dev / null <nl> ppp b / src / rpc / connectivity / messages . cc <nl> <nl> + / / Copyright 2010 - 2014 RethinkDB , all rights reserved . <nl> + # include " rpc / connectivity / messages . hpp " <nl> + <nl> + # include < functional > <nl> + <nl> + # include " arch / runtime / coroutines . hpp " <nl> + # include " containers / archive / vector_stream . hpp " <nl> + # include " rpc / connectivity / cluster . hpp " <nl> + <nl> + <nl> + void message_handler_t : : on_local_message ( peer_id_t source_peer , <nl> + std : : vector < char > & & data ) { <nl> + vector_read_stream_t read_stream ( std : : move ( data ) ) ; <nl> + on_message ( source_peer , & read_stream ) ; <nl> + } <nl> mmm a / src / rpc / connectivity / messages . hpp <nl> ppp b / src / rpc / connectivity / messages . hpp <nl> <nl> # ifndef RPC_CONNECTIVITY_MESSAGES_HPP_ <nl> # define RPC_CONNECTIVITY_MESSAGES_HPP_ <nl> <nl> + # include < vector > <nl> + <nl> class connectivity_service_t ; <nl> class peer_id_t ; <nl> + class read_stream_t ; <nl> class write_stream_t ; <nl> <nl> - # include " containers / archive / string_stream . hpp " <nl> - <nl> namespace boost { <nl> template < class > class function ; <nl> } <nl> class message_service_t { <nl> class message_handler_t { <nl> public : <nl> virtual void on_message ( peer_id_t source_peer , read_stream_t * ) = 0 ; <nl> + <nl> + / / Default implementation . Overwrite to optimize for the local case . <nl> + virtual void on_local_message ( peer_id_t source_peer , std : : vector < char > & & data ) ; <nl> protected : <nl> virtual ~ message_handler_t ( ) { } <nl> } ; <nl> mmm a / src / rpc / mailbox / mailbox . cc <nl> ppp b / src / rpc / mailbox / mailbox . cc <nl> raw_mailbox_t * mailbox_manager_t : : mailbox_table_t : : find_mailbox ( raw_mailbox_t : : i <nl> } <nl> } <nl> <nl> - void mailbox_manager_t : : on_message ( peer_id_t source_peer , read_stream_t * stream ) { <nl> - int32_t dest_thread ; <nl> + / / Helper function for on_local_message and on_message <nl> + void read_mailbox_header ( read_stream_t * stream , <nl> + uint64_t * data_length_out , <nl> + int32_t * dest_thread_out , <nl> + raw_mailbox_t : : id_t * dest_mailbox_id_out ) { <nl> uint64_t data_length = 0 ; <nl> + int32_t dest_thread ; <nl> raw_mailbox_t : : id_t dest_mailbox_id ; <nl> { <nl> archive_result_t res = deserialize ( stream , & data_length ) ; <nl> void mailbox_manager_t : : on_message ( peer_id_t source_peer , read_stream_t * stream ) <nl> if ( bad ( res ) ) { throw fake_archive_exc_t ( ) ; } <nl> } <nl> <nl> - / / Read the data from the read stream , so it can be deallocated before we continue <nl> - / / in a coroutine <nl> + * dest_mailbox_id_out = dest_mailbox_id ; <nl> + * data_length_out = data_length ; <nl> + * dest_thread_out = dest_thread ; <nl> + } <nl> + <nl> + void mailbox_manager_t : : on_local_message ( peer_id_t source_peer , std : : vector < char > & & data ) { <nl> + vector_read_stream_t stream ( std : : move ( data ) ) ; <nl> + <nl> + uint64_t data_length ; <nl> + int32_t dest_thread ; <nl> + raw_mailbox_t : : id_t dest_mailbox_id ; <nl> + read_mailbox_header ( & stream , & data_length , & dest_thread , & dest_mailbox_id ) ; <nl> + if ( dest_thread = = raw_mailbox_t : : address_t : : ANY_THREAD ) { <nl> + / / TODO : this will just run the callback on the current thread , maybe do <nl> + / / some load balancing , instead <nl> + dest_thread = get_thread_id ( ) . threadnum ; <nl> + } <nl> + <nl> std : : vector < char > stream_data ; <nl> int64_t stream_data_offset = 0 ; <nl> - / / Special case for ` vector_read_stream_t ` s to avoid copying . <nl> - / / ` connectivity_cluster_t ` gives us a ` vector_read_stream_t ` if the message is <nl> - / / delivered locally . <nl> - vector_read_stream_t * vector_stream = dynamic_cast < vector_read_stream_t * > ( stream ) ; <nl> - if ( vector_stream ! = NULL ) { <nl> - / / Avoid copying the data <nl> - vector_stream - > swap ( & stream_data , & stream_data_offset ) ; <nl> - if ( stream_data . size ( ) - static_cast < uint64_t > ( stream_data_offset ) ! = data_length ) { <nl> - / / Either we got a vector_read_stream_t that contained more data <nl> - / / than just ours ( which shouldn ' t happen ) , or we got a wrong data_length <nl> - / / from the network . <nl> - throw fake_archive_exc_t ( ) ; <nl> - } <nl> - } else { <nl> - stream_data . resize ( data_length ) ; <nl> - int64_t bytes_read = force_read ( stream , stream_data . data ( ) , data_length ) ; <nl> - if ( bytes_read ! = static_cast < int64_t > ( data_length ) ) { <nl> - throw fake_archive_exc_t ( ) ; <nl> - } <nl> + <nl> + stream . swap ( & stream_data , & stream_data_offset ) ; <nl> + if ( stream_data . size ( ) - static_cast < uint64_t > ( stream_data_offset ) ! = data_length ) { <nl> + / / Either we got a vector_read_stream_t that contained more data <nl> + / / than just ours ( which shouldn ' t happen ) , or we got a wrong data_length <nl> + / / from the network . <nl> + throw fake_archive_exc_t ( ) ; <nl> } <nl> <nl> + / / We use ` spawn_now_dangerously ( ) ` to avoid having to heap - allocate ` stream_data ` . <nl> + / / Instead we pass in a pointer to our local automatically allocated object <nl> + / / and ` mailbox_read_coroutine ( ) ` moves the data out of it before it yields . <nl> + coro_t : : spawn_now_dangerously ( std : : bind ( & mailbox_manager_t : : mailbox_read_coroutine , <nl> + this , source_peer , <nl> + threadnum_t ( dest_thread ) , dest_mailbox_id , <nl> + & stream_data , stream_data_offset , <nl> + FORCE_YIELD ) ) ; <nl> + } <nl> + <nl> + void mailbox_manager_t : : on_message ( peer_id_t source_peer , read_stream_t * stream ) { <nl> + uint64_t data_length ; <nl> + int32_t dest_thread ; <nl> + raw_mailbox_t : : id_t dest_mailbox_id ; <nl> + read_mailbox_header ( stream , & data_length , & dest_thread , & dest_mailbox_id ) ; <nl> if ( dest_thread = = raw_mailbox_t : : address_t : : ANY_THREAD ) { <nl> - / / TODO : this will just run the callback on the current thread , maybe do some load balancing , instead <nl> dest_thread = get_thread_id ( ) . threadnum ; <nl> } <nl> <nl> + / / Read the data from the read stream , so it can be deallocated before we continue <nl> + / / in a coroutine <nl> + std : : vector < char > stream_data ; <nl> + stream_data . resize ( data_length ) ; <nl> + int64_t bytes_read = force_read ( stream , stream_data . data ( ) , data_length ) ; <nl> + if ( bytes_read ! = static_cast < int64_t > ( data_length ) ) { <nl> + throw fake_archive_exc_t ( ) ; <nl> + } <nl> + <nl> / / We use ` spawn_now_dangerously ( ) ` to avoid having to heap - allocate ` stream_data ` . <nl> / / Instead we pass in a pointer to our local automatically allocated object <nl> / / and ` mailbox_read_coroutine ( ) ` moves the data out of it before it yields . <nl> coro_t : : spawn_now_dangerously ( std : : bind ( & mailbox_manager_t : : mailbox_read_coroutine , <nl> - this , source_peer , threadnum_t ( dest_thread ) , <nl> - dest_mailbox_id , & stream_data , <nl> - stream_data_offset ) ) ; <nl> + this , source_peer , <nl> + threadnum_t ( dest_thread ) , dest_mailbox_id , <nl> + & stream_data , 0 , MAYBE_YIELD ) ) ; <nl> } <nl> <nl> void mailbox_manager_t : : mailbox_read_coroutine ( peer_id_t source_peer , <nl> threadnum_t dest_thread , <nl> raw_mailbox_t : : id_t dest_mailbox_id , <nl> std : : vector < char > * stream_data , <nl> - int64_t stream_data_offset ) { <nl> + int64_t stream_data_offset , <nl> + force_yield_t force_yield ) { <nl> <nl> / / Construct a new stream to use <nl> vector_read_stream_t stream ( std : : move ( * stream_data ) , stream_data_offset ) ; <nl> void mailbox_manager_t : : mailbox_read_coroutine ( peer_id_t source_peer , <nl> bool archive_exception = false ; <nl> { <nl> on_thread_t rethreader ( dest_thread ) ; <nl> + if ( force_yield = = FORCE_YIELD & & rethreader . home_thread ( ) = = get_thread_id ( ) ) { <nl> + / / Yield to avoid problems with reentrancy in case of local <nl> + / / delivery . <nl> + coro_t : : yield ( ) ; <nl> + } <nl> <nl> try { <nl> raw_mailbox_t * mbox = mailbox_tables . get ( ) - > find_mailbox ( dest_mailbox_id ) ; <nl> mmm a / src / rpc / mailbox / mailbox . hpp <nl> ppp b / src / rpc / mailbox / mailbox . hpp <nl> <nl> # ifndef RPC_MAILBOX_MAILBOX_HPP_ <nl> # define RPC_MAILBOX_MAILBOX_HPP_ <nl> <nl> - # include < functional > <nl> # include < map > <nl> # include < string > <nl> # include < vector > <nl> class mailbox_manager_t : public message_handler_t { <nl> mailbox_write_callback_t * callback ) ; <nl> <nl> void on_message ( peer_id_t source_peer , read_stream_t * stream ) ; <nl> + void on_local_message ( peer_id_t source_peer , std : : vector < char > & & data ) ; <nl> <nl> + enum force_yield_t { FORCE_YIELD , MAYBE_YIELD } ; <nl> void mailbox_read_coroutine ( peer_id_t source_peer , threadnum_t dest_thread , <nl> raw_mailbox_t : : id_t dest_mailbox_id , <nl> std : : vector < char > * stream_data , <nl> - int64_t stream_data_offset ) ; <nl> + int64_t stream_data_offset , <nl> + force_yield_t force_yield ) ; <nl> } ; <nl> <nl> # endif / * RPC_MAILBOX_MAILBOX_HPP_ * / <nl> | Refactor local cluster message handling . | rethinkdb/rethinkdb | b41a92a1e7379efa7ef722f58f7a6db63710a2c6 | 2014-04-22T01:52:06Z |
mmm a / cocos / scripting / js - bindings / auto / api / jsb_cocos2dx_ui_auto_api . js <nl> ppp b / cocos / scripting / js - bindings / auto / api / jsb_cocos2dx_ui_auto_api . js <nl> jumpToBottomRight : function ( <nl> { <nl> } , <nl> <nl> + / * * <nl> + * @ method setTouchTotalTimeThreshold <nl> + * @ param { float } arg0 <nl> + * / <nl> + setTouchTotalTimeThreshold : function ( <nl> + float <nl> + ) <nl> + { <nl> + } , <nl> + <nl> + / * * <nl> + * @ method getTouchTotalTimeThreshold <nl> + * @ return { float } <nl> + * / <nl> + getTouchTotalTimeThreshold : function ( <nl> + ) <nl> + { <nl> + return 0 ; <nl> + } , <nl> + <nl> / * * <nl> * @ method getScrollBarPositionFromCornerForHorizontal <nl> * @ return { vec2_object } <nl> mmm a / cocos / scripting / js - bindings / auto / jsb_cocos2dx_ui_auto . cpp <nl> ppp b / cocos / scripting / js - bindings / auto / jsb_cocos2dx_ui_auto . cpp <nl> bool js_cocos2dx_ui_ScrollView_jumpToBottomRight ( JSContext * cx , uint32_t argc , j <nl> JS_ReportError ( cx , " js_cocos2dx_ui_ScrollView_jumpToBottomRight : wrong number of arguments : % d , was expecting % d " , argc , 0 ) ; <nl> return false ; <nl> } <nl> + bool js_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> + { <nl> + JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> + bool ok = true ; <nl> + JS : : RootedObject obj ( cx , args . thisv ( ) . toObjectOrNull ( ) ) ; <nl> + js_proxy_t * proxy = jsb_get_js_proxy ( obj ) ; <nl> + cocos2d : : ui : : ScrollView * cobj = ( cocos2d : : ui : : ScrollView * ) ( proxy ? proxy - > ptr : NULL ) ; <nl> + JSB_PRECONDITION2 ( cobj , cx , false , " js_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold : Invalid Native Object " ) ; <nl> + if ( argc = = 1 ) { <nl> + double arg0 = 0 ; <nl> + ok & = JS : : ToNumber ( cx , args . get ( 0 ) , & arg0 ) & & ! isnan ( arg0 ) ; <nl> + JSB_PRECONDITION2 ( ok , cx , false , " js_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold : Error processing arguments " ) ; <nl> + cobj - > setTouchTotalTimeThreshold ( arg0 ) ; <nl> + args . rval ( ) . setUndefined ( ) ; <nl> + return true ; <nl> + } <nl> + <nl> + JS_ReportError ( cx , " js_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold : wrong number of arguments : % d , was expecting % d " , argc , 1 ) ; <nl> + return false ; <nl> + } <nl> + bool js_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> + { <nl> + JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> + JS : : RootedObject obj ( cx , args . thisv ( ) . toObjectOrNull ( ) ) ; <nl> + js_proxy_t * proxy = jsb_get_js_proxy ( obj ) ; <nl> + cocos2d : : ui : : ScrollView * cobj = ( cocos2d : : ui : : ScrollView * ) ( proxy ? proxy - > ptr : NULL ) ; <nl> + JSB_PRECONDITION2 ( cobj , cx , false , " js_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold : Invalid Native Object " ) ; <nl> + if ( argc = = 0 ) { <nl> + double ret = cobj - > getTouchTotalTimeThreshold ( ) ; <nl> + jsval jsret = JSVAL_NULL ; <nl> + jsret = DOUBLE_TO_JSVAL ( ret ) ; <nl> + args . rval ( ) . set ( jsret ) ; <nl> + return true ; <nl> + } <nl> + <nl> + JS_ReportError ( cx , " js_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold : wrong number of arguments : % d , was expecting % d " , argc , 0 ) ; <nl> + return false ; <nl> + } <nl> bool js_cocos2dx_ui_ScrollView_getScrollBarPositionFromCornerForHorizontal ( JSContext * cx , uint32_t argc , jsval * vp ) <nl> { <nl> JS : : CallArgs args = JS : : CallArgsFromVp ( argc , vp ) ; <nl> void js_register_cocos2dx_ui_ScrollView ( JSContext * cx , JS : : HandleObject global ) <nl> JS_FN ( " jumpToTopLeft " , js_cocos2dx_ui_ScrollView_jumpToTopLeft , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " jumpToPercentHorizontal " , js_cocos2dx_ui_ScrollView_jumpToPercentHorizontal , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " jumpToBottomRight " , js_cocos2dx_ui_ScrollView_jumpToBottomRight , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> + JS_FN ( " setTouchTotalTimeThreshold " , js_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> + JS_FN ( " getTouchTotalTimeThreshold " , js_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " getScrollBarPositionFromCornerForHorizontal " , js_cocos2dx_ui_ScrollView_getScrollBarPositionFromCornerForHorizontal , 0 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " setScrollBarWidth " , js_cocos2dx_ui_ScrollView_setScrollBarWidth , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> JS_FN ( " setBounceEnabled " , js_cocos2dx_ui_ScrollView_setBounceEnabled , 1 , JSPROP_PERMANENT | JSPROP_ENUMERATE ) , <nl> mmm a / cocos / scripting / js - bindings / auto / jsb_cocos2dx_ui_auto . hpp <nl> ppp b / cocos / scripting / js - bindings / auto / jsb_cocos2dx_ui_auto . hpp <nl> bool js_cocos2dx_ui_ScrollView_getScrollBarColor ( JSContext * cx , uint32_t argc , j <nl> bool js_cocos2dx_ui_ScrollView_jumpToTopLeft ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_ui_ScrollView_jumpToPercentHorizontal ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_ui_ScrollView_jumpToBottomRight ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> + bool js_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> + bool js_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_ui_ScrollView_getScrollBarPositionFromCornerForHorizontal ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_ui_ScrollView_setScrollBarWidth ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> bool js_cocos2dx_ui_ScrollView_setBounceEnabled ( JSContext * cx , uint32_t argc , jsval * vp ) ; <nl> mmm a / cocos / scripting / lua - bindings / auto / api / ScrollView . lua <nl> ppp b / cocos / scripting / lua - bindings / auto / api / ScrollView . lua <nl> <nl> - - @ param self <nl> - - @ return ScrollView # ScrollView self ( return value : ccui . ScrollView ) <nl> <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + - - brief Set the touch total time threshold < br > <nl> + - - param the touch total time threshold <nl> + - - @ function [ parent = # ScrollView ] setTouchTotalTimeThreshold <nl> + - - @ param self <nl> + - - @ param # float touchTotalTimeThreshold <nl> + - - @ return ScrollView # ScrollView self ( return value : ccui . ScrollView ) <nl> + <nl> + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + - - brief Get the touch total time threshold < br > <nl> + - - return the touch total time threshold <nl> + - - @ function [ parent = # ScrollView ] getTouchTotalTimeThreshold <nl> + - - @ param self <nl> + - - @ return float # float ret ( return value : float ) <nl> + <nl> mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - - brief Get the horizontal scroll bar ' s position from right - top corner . < br > <nl> - - return positionFromCorner <nl> mmm a / cocos / scripting / lua - bindings / auto / lua_cocos2dx_ui_auto . cpp <nl> ppp b / cocos / scripting / lua - bindings / auto / lua_cocos2dx_ui_auto . cpp <nl> int lua_cocos2dx_ui_ScrollView_jumpToBottomRight ( lua_State * tolua_S ) <nl> <nl> return 0 ; <nl> } <nl> + int lua_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ( lua_State * tolua_S ) <nl> + { <nl> + int argc = 0 ; <nl> + cocos2d : : ui : : ScrollView * cobj = nullptr ; <nl> + bool ok = true ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_Error tolua_err ; <nl> + # endif <nl> + <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! tolua_isusertype ( tolua_S , 1 , " ccui . ScrollView " , 0 , & tolua_err ) ) goto tolua_lerror ; <nl> + # endif <nl> + <nl> + cobj = ( cocos2d : : ui : : ScrollView * ) tolua_tousertype ( tolua_S , 1 , 0 ) ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! cobj ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid ' cobj ' in function ' lua_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + # endif <nl> + <nl> + argc = lua_gettop ( tolua_S ) - 1 ; <nl> + if ( argc = = 1 ) <nl> + { <nl> + double arg0 ; <nl> + <nl> + ok & = luaval_to_number ( tolua_S , 2 , & arg0 , " ccui . ScrollView : setTouchTotalTimeThreshold " ) ; <nl> + if ( ! ok ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid arguments in function ' lua_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + cobj - > setTouchTotalTimeThreshold ( arg0 ) ; <nl> + lua_settop ( tolua_S , 1 ) ; <nl> + return 1 ; <nl> + } <nl> + luaL_error ( tolua_S , " % s has wrong number of arguments : % d , was expecting % d \ n " , " ccui . ScrollView : setTouchTotalTimeThreshold " , argc , 1 ) ; <nl> + return 0 ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_lerror : <nl> + tolua_error ( tolua_S , " # ferror in function ' lua_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ' . " , & tolua_err ) ; <nl> + # endif <nl> + <nl> + return 0 ; <nl> + } <nl> + int lua_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ( lua_State * tolua_S ) <nl> + { <nl> + int argc = 0 ; <nl> + cocos2d : : ui : : ScrollView * cobj = nullptr ; <nl> + bool ok = true ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_Error tolua_err ; <nl> + # endif <nl> + <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! tolua_isusertype ( tolua_S , 1 , " ccui . ScrollView " , 0 , & tolua_err ) ) goto tolua_lerror ; <nl> + # endif <nl> + <nl> + cobj = ( cocos2d : : ui : : ScrollView * ) tolua_tousertype ( tolua_S , 1 , 0 ) ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + if ( ! cobj ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid ' cobj ' in function ' lua_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + # endif <nl> + <nl> + argc = lua_gettop ( tolua_S ) - 1 ; <nl> + if ( argc = = 0 ) <nl> + { <nl> + if ( ! ok ) <nl> + { <nl> + tolua_error ( tolua_S , " invalid arguments in function ' lua_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ' " , nullptr ) ; <nl> + return 0 ; <nl> + } <nl> + double ret = cobj - > getTouchTotalTimeThreshold ( ) ; <nl> + tolua_pushnumber ( tolua_S , ( lua_Number ) ret ) ; <nl> + return 1 ; <nl> + } <nl> + luaL_error ( tolua_S , " % s has wrong number of arguments : % d , was expecting % d \ n " , " ccui . ScrollView : getTouchTotalTimeThreshold " , argc , 0 ) ; <nl> + return 0 ; <nl> + <nl> + # if COCOS2D_DEBUG > = 1 <nl> + tolua_lerror : <nl> + tolua_error ( tolua_S , " # ferror in function ' lua_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ' . " , & tolua_err ) ; <nl> + # endif <nl> + <nl> + return 0 ; <nl> + } <nl> int lua_cocos2dx_ui_ScrollView_getScrollBarPositionFromCornerForHorizontal ( lua_State * tolua_S ) <nl> { <nl> int argc = 0 ; <nl> int lua_register_cocos2dx_ui_ScrollView ( lua_State * tolua_S ) <nl> tolua_function ( tolua_S , " jumpToTopLeft " , lua_cocos2dx_ui_ScrollView_jumpToTopLeft ) ; <nl> tolua_function ( tolua_S , " jumpToPercentHorizontal " , lua_cocos2dx_ui_ScrollView_jumpToPercentHorizontal ) ; <nl> tolua_function ( tolua_S , " jumpToBottomRight " , lua_cocos2dx_ui_ScrollView_jumpToBottomRight ) ; <nl> + tolua_function ( tolua_S , " setTouchTotalTimeThreshold " , lua_cocos2dx_ui_ScrollView_setTouchTotalTimeThreshold ) ; <nl> + tolua_function ( tolua_S , " getTouchTotalTimeThreshold " , lua_cocos2dx_ui_ScrollView_getTouchTotalTimeThreshold ) ; <nl> tolua_function ( tolua_S , " getScrollBarPositionFromCornerForHorizontal " , lua_cocos2dx_ui_ScrollView_getScrollBarPositionFromCornerForHorizontal ) ; <nl> tolua_function ( tolua_S , " setScrollBarWidth " , lua_cocos2dx_ui_ScrollView_setScrollBarWidth ) ; <nl> tolua_function ( tolua_S , " setBounceEnabled " , lua_cocos2dx_ui_ScrollView_setBounceEnabled ) ; <nl> mmm a / cocos / scripting / lua - bindings / auto / lua_cocos2dx_ui_auto . hpp <nl> ppp b / cocos / scripting / lua - bindings / auto / lua_cocos2dx_ui_auto . hpp <nl> int register_all_cocos2dx_ui ( lua_State * tolua_S ) ; <nl> <nl> <nl> <nl> + <nl> + <nl> <nl> <nl> <nl> | Merge pull request from CocosRobot / update_lua_bindings_1453343722 | cocos2d/cocos2d-x | 2fbf4c1539d17c9568b971487ca4833749873187 | 2016-01-21T02:41:00Z |
mmm a / Telegram / SourceFiles / apiwrap . cpp <nl> ppp b / Telegram / SourceFiles / apiwrap . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " mainwidget . h " <nl> # include " historywidget . h " <nl> # include " localstorage . h " <nl> + # include " auth_session . h " <nl> # include " boxes / confirmbox . h " <nl> # include " window / themes / window_theme . h " <nl> <nl> void ApiWrap : : gotSelfParticipant ( ChannelData * channel , const MTPchannels_Channel <nl> return ; <nl> } <nl> <nl> - const auto & p ( result . c_channels_channelParticipant ( ) ) ; <nl> + auto & p = result . c_channels_channelParticipant ( ) ; <nl> App : : feedUsers ( p . vusers ) ; <nl> <nl> switch ( p . vparticipant . type ( ) ) { <nl> case mtpc_channelParticipantSelf : { <nl> - const auto & d ( p . vparticipant . c_channelParticipantSelf ( ) ) ; <nl> + auto & d = p . vparticipant . c_channelParticipantSelf ( ) ; <nl> channel - > inviter = d . vinviter_id . v ; <nl> channel - > inviteDate = date ( d . vdate ) ; <nl> } break ; <nl> case mtpc_channelParticipantCreator : { <nl> - const auto & d ( p . vparticipant . c_channelParticipantCreator ( ) ) ; <nl> - channel - > inviter = MTP : : authedId ( ) ; <nl> + auto & d = p . vparticipant . c_channelParticipantCreator ( ) ; <nl> + channel - > inviter = AuthSession : : CurrentUserId ( ) ; <nl> channel - > inviteDate = date ( MTP_int ( channel - > date ) ) ; <nl> } break ; <nl> case mtpc_channelParticipantModerator : { <nl> - const auto & d ( p . vparticipant . c_channelParticipantModerator ( ) ) ; <nl> + auto & d = p . vparticipant . c_channelParticipantModerator ( ) ; <nl> channel - > inviter = d . vinviter_id . v ; <nl> channel - > inviteDate = date ( d . vdate ) ; <nl> } break ; <nl> case mtpc_channelParticipantEditor : { <nl> - const auto & d ( p . vparticipant . c_channelParticipantEditor ( ) ) ; <nl> + auto & d = p . vparticipant . c_channelParticipantEditor ( ) ; <nl> channel - > inviter = d . vinviter_id . v ; <nl> channel - > inviteDate = date ( d . vdate ) ; <nl> } break ; <nl> void ApiWrap : : gotStickerSet ( uint64 setId , const MTPmessages_StickerSet & result ) <nl> if ( auto emoji = Ui : : Emoji : : Find ( qs ( pack . vemoticon ) ) ) { <nl> emoji = emoji - > original ( ) ; <nl> auto & stickers = pack . vdocuments . c_vector ( ) . v ; <nl> - <nl> + <nl> StickerPack p ; <nl> p . reserve ( stickers . size ( ) ) ; <nl> for ( auto j = 0 , c = stickers . size ( ) ; j ! = c ; + + j ) { <nl> mmm a / Telegram / SourceFiles / app . cpp <nl> ppp b / Telegram / SourceFiles / app . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " apiwrap . h " <nl> # include " numbers . h " <nl> # include " observer_peer . h " <nl> + # include " auth_session . h " <nl> # include " window / themes / window_theme . h " <nl> # include " window / notifications_manager . h " <nl> # include " platform / platform_notifications_manager . h " <nl> namespace { <nl> w - > notifyClearFast ( ) ; <nl> w - > setupIntro ( ) ; <nl> } <nl> - MTP : : setAuthedId ( 0 ) ; <nl> + AppClass : : Instance ( ) . authSessionDestroy ( ) ; <nl> Local : : reset ( ) ; <nl> Window : : Theme : : Background ( ) - > reset ( ) ; <nl> <nl> namespace { <nl> bool showPhoneChanged = ! isServiceUser ( data - > id ) & & ! d . is_self ( ) & & ( ( showPhone & & data - > contact ) | | ( ! showPhone & & ! data - > contact ) ) ; <nl> if ( minimal ) { <nl> showPhoneChanged = false ; <nl> - showPhone = ! isServiceUser ( data - > id ) & & ( data - > id ! = peerFromUser ( MTP : : authedId ( ) ) ) & & ! data - > contact ; <nl> + showPhone = ! isServiceUser ( data - > id ) & & ( data - > id ! = AuthSession : : CurrentUserPeerId ( ) ) & & ! data - > contact ; <nl> } <nl> <nl> / / see also Local : : readPeer <nl> namespace { <nl> update . flags | = UpdateFlag : : UserOnlineChanged ; <nl> } <nl> <nl> - if ( data - > contact < 0 & & ! data - > phone ( ) . isEmpty ( ) & & peerToUser ( data - > id ) ! = MTP : : authedId ( ) ) { <nl> + if ( data - > contact < 0 & & ! data - > phone ( ) . isEmpty ( ) & & data - > id ! = AuthSession : : CurrentUserPeerId ( ) ) { <nl> data - > contact = 0 ; <nl> } <nl> if ( App : : main ( ) ) { <nl> namespace { <nl> UserData * user = App : : userLoaded ( uid ) ; <nl> if ( user ) { <nl> chat - > participants [ user ] = pversion ; <nl> - if ( inviter = = MTP : : authedId ( ) ) { <nl> + if ( inviter = = AuthSession : : CurrentUserId ( ) ) { <nl> chat - > invitedByMe . insert ( user ) ; <nl> } <nl> if ( i - > type ( ) = = mtpc_chatParticipantAdmin ) { <nl> namespace { <nl> chat - > botStatus = 0 ; <nl> } else if ( chat - > participants . find ( user ) = = chat - > participants . end ( ) ) { <nl> chat - > participants [ user ] = ( chat - > participants . isEmpty ( ) ? 1 : chat - > participants . begin ( ) . value ( ) ) ; <nl> - if ( d . vinviter_id . v = = MTP : : authedId ( ) ) { <nl> + if ( d . vinviter_id . v = = AuthSession : : CurrentUserId ( ) ) { <nl> chat - > invitedByMe . insert ( user ) ; <nl> } else { <nl> chat - > invitedByMe . remove ( user ) ; <nl> namespace { <nl> <nl> bool checkEntitiesAndViewsUpdate ( const MTPDmessage & m ) { <nl> auto peerId = peerFromMTP ( m . vto_id ) ; <nl> - if ( m . has_from_id ( ) & & peerToUser ( peerId ) = = MTP : : authedId ( ) ) { <nl> + if ( m . has_from_id ( ) & & peerId = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> peerId = peerFromUser ( m . vfrom_id ) ; <nl> } <nl> if ( auto existing = App : : histItemById ( peerToChannel ( peerId ) , m . vid . v ) ) { <nl> namespace { <nl> template < typename TMTPDclass > <nl> void updateEditedMessage ( const TMTPDclass & m ) { <nl> auto peerId = peerFromMTP ( m . vto_id ) ; <nl> - if ( m . has_from_id ( ) & & peerToUser ( peerId ) = = MTP : : authedId ( ) ) { <nl> + if ( m . has_from_id ( ) & & peerId = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> peerId = peerFromUser ( m . vfrom_id ) ; <nl> } <nl> if ( auto existing = App : : histItemById ( peerToChannel ( peerId ) , m . vid . v ) ) { <nl> namespace { <nl> break ; <nl> } <nl> if ( user - > contact < 1 ) { <nl> - if ( user - > contact < 0 & & ! user - > phone ( ) . isEmpty ( ) & & peerToUser ( user - > id ) ! = MTP : : authedId ( ) ) { <nl> + if ( user - > contact < 0 & & ! user - > phone ( ) . isEmpty ( ) & & user - > id ! = AuthSession : : CurrentUserPeerId ( ) ) { <nl> user - > contact = 0 ; <nl> } <nl> } <nl> namespace { <nl> return App : : gameSet ( game . vid . v , convert , game . vaccess_hash . v , qs ( game . vshort_name ) , qs ( game . vtitle ) , qs ( game . vdescription ) , App : : feedPhoto ( game . vphoto ) , game . has_document ( ) ? App : : feedDocument ( game . vdocument ) : nullptr ) ; <nl> } <nl> <nl> - UserData * curUser ( ) { <nl> - return user ( MTP : : authedId ( ) ) ; <nl> - } <nl> - <nl> PeerData * peer ( const PeerId & id , PeerData : : LoadedStatus restriction ) { <nl> if ( ! id ) return nullptr ; <nl> <nl> mmm a / Telegram / SourceFiles / application . cpp <nl> ppp b / Telegram / SourceFiles / application . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " history / history_location_manager . h " <nl> # include " core / task_queue . h " <nl> # include " mtproto / dc_options . h " <nl> + # include " auth_session . h " <nl> <nl> namespace { <nl> <nl> AppClass : : AppClass ( ) : QObject ( ) { <nl> if ( state = = Local : : ReadMapPassNeeded ) { <nl> _window - > setupPasscode ( ) ; <nl> } else { <nl> - if ( MTP : : authedId ( ) ) { <nl> + if ( AuthSession : : Current ( ) ) { <nl> _window - > setupMain ( ) ; <nl> } else { <nl> _window - > setupIntro ( ) ; <nl> bool AppClass : : peerPhotoFail ( PeerId peer , const RPCError & error ) { <nl> } <nl> <nl> void AppClass : : peerClearPhoto ( PeerId id ) { <nl> - if ( MTP : : authedId ( ) & & peerToUser ( id ) = = MTP : : authedId ( ) ) { <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( id = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> MTP : : send ( MTPphotos_UpdateProfilePhoto ( MTP_inputPhotoEmpty ( ) ) , rpcDone ( & AppClass : : selfPhotoCleared ) , rpcFail ( & AppClass : : peerPhotoFail , id ) ) ; <nl> } else if ( peerIsChat ( id ) ) { <nl> MTP : : send ( MTPmessages_EditChatPhoto ( peerToBareMTPInt ( id ) , MTP_inputChatPhotoEmpty ( ) ) , rpcDone ( & AppClass : : chatPhotoCleared , id ) , rpcFail ( & AppClass : : peerPhotoFail , id ) ) ; <nl> } else if ( peerIsChannel ( id ) ) { <nl> - if ( ChannelData * channel = App : : channelLoaded ( id ) ) { <nl> + if ( auto channel = App : : channelLoaded ( id ) ) { <nl> MTP : : send ( MTPchannels_EditPhoto ( channel - > inputChannel , MTP_inputChatPhotoEmpty ( ) ) , rpcDone ( & AppClass : : chatPhotoCleared , id ) , rpcFail ( & AppClass : : peerPhotoFail , id ) ) ; <nl> } <nl> } <nl> void AppClass : : killDownloadSessions ( ) { <nl> } <nl> <nl> void AppClass : : photoUpdated ( const FullMsgId & msgId , bool silent , const MTPInputFile & file ) { <nl> - if ( ! App : : self ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> <nl> auto i = photoUpdates . find ( msgId ) ; <nl> if ( i ! = photoUpdates . end ( ) ) { <nl> auto id = i . value ( ) ; <nl> - if ( MTP : : authedId ( ) & & peerToUser ( id ) = = MTP : : authedId ( ) ) { <nl> + if ( id = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> MTP : : send ( MTPphotos_UploadProfilePhoto ( file ) , rpcDone ( & AppClass : : selfPhotoDone ) , rpcFail ( & AppClass : : peerPhotoFail , id ) ) ; <nl> } else if ( peerIsChat ( id ) ) { <nl> auto history = App : : history ( id ) ; <nl> void AppClass : : onSwitchTestMode ( ) { <nl> App : : restart ( ) ; <nl> } <nl> <nl> + void AppClass : : authSessionCreate ( UserId userId ) { <nl> + _authSession = std : : make_unique < AuthSession > ( userId ) ; <nl> + } <nl> + <nl> + void AppClass : : authSessionDestroy ( ) { <nl> + _authSession . reset ( ) ; <nl> + } <nl> + <nl> FileUploader * AppClass : : uploader ( ) { <nl> if ( ! _uploader & & ! App : : quitting ( ) ) _uploader = new FileUploader ( ) ; <nl> return _uploader ; <nl> mmm a / Telegram / SourceFiles / application . h <nl> ppp b / Telegram / SourceFiles / application . h <nl> namespace MTP { <nl> class DcOptions ; <nl> } / / namespace MTP <nl> <nl> + class AuthSession ; <nl> + <nl> class UpdateChecker ; <nl> class Application : public QApplication { <nl> Q_OBJECT <nl> class AppClass : public QObject , public RPCSender , private base : : Subscriber { <nl> <nl> public : <nl> AppClass ( ) ; <nl> + AppClass ( const AppClass & other ) = delete ; <nl> + AppClass & operator = ( const AppClass & other ) = delete ; <nl> <nl> void joinThreads ( ) ; <nl> ~ AppClass ( ) ; <nl> class AppClass : public QObject , public RPCSender , private base : : Subscriber { <nl> t_assert ( result ! = nullptr ) ; <nl> return * result ; <nl> } <nl> + <nl> MTP : : DcOptions * dcOptions ( ) { <nl> return _dcOptions . get ( ) ; <nl> } <nl> + AuthSession * authSession ( ) { <nl> + return _authSession . get ( ) ; <nl> + } <nl> + void authSessionCreate ( UserId userId ) ; <nl> + void authSessionDestroy ( ) ; <nl> <nl> FileUploader * uploader ( ) ; <nl> void uploadProfilePhoto ( const QImage & tosend , const PeerId & peerId ) ; <nl> public slots : <nl> Translator * _translator = nullptr ; <nl> <nl> std : : unique_ptr < MTP : : DcOptions > _dcOptions ; <nl> + std : : unique_ptr < AuthSession > _authSession ; <nl> <nl> } ; <nl> new file mode 100644 <nl> index 00000000000 . . f4966b06237 <nl> mmm / dev / null <nl> ppp b / Telegram / SourceFiles / auth_session . cpp <nl> <nl> + / * <nl> + This file is part of Telegram Desktop , <nl> + the official desktop version of Telegram messaging app , see https : / / telegram . org <nl> + <nl> + Telegram Desktop is free software : you can redistribute it and / or modify <nl> + it under the terms of the GNU General Public License as published by <nl> + the Free Software Foundation , either version 3 of the License , or <nl> + ( at your option ) any later version . <nl> + <nl> + It is distributed in the hope that it will be useful , <nl> + but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + GNU General Public License for more details . <nl> + <nl> + In addition , as a special exception , the copyright holders give permission <nl> + to link the code of portions of this program with the OpenSSL library . <nl> + <nl> + Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> + Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> + * / <nl> + # include " stdafx . h " <nl> + # include " auth_session . h " <nl> + <nl> + # include " application . h " <nl> + <nl> + AuthSession : : AuthSession ( UserId userId ) : _userId ( userId ) { <nl> + t_assert ( _userId ! = 0 ) ; <nl> + } <nl> + <nl> + AuthSession * AuthSession : : Current ( ) { <nl> + return AppClass : : Instance ( ) . authSession ( ) ; <nl> + } <nl> + <nl> + UserData * AuthSession : : CurrentUser ( ) { <nl> + if ( auto userId = CurrentUserId ( ) ) { <nl> + return App : : user ( userId ) ; <nl> + } <nl> + return nullptr ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 8ac526c0c9e <nl> mmm / dev / null <nl> ppp b / Telegram / SourceFiles / auth_session . h <nl> <nl> + / * <nl> + This file is part of Telegram Desktop , <nl> + the official desktop version of Telegram messaging app , see https : / / telegram . org <nl> + <nl> + Telegram Desktop is free software : you can redistribute it and / or modify <nl> + it under the terms of the GNU General Public License as published by <nl> + the Free Software Foundation , either version 3 of the License , or <nl> + ( at your option ) any later version . <nl> + <nl> + It is distributed in the hope that it will be useful , <nl> + but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + GNU General Public License for more details . <nl> + <nl> + In addition , as a special exception , the copyright holders give permission <nl> + to link the code of portions of this program with the OpenSSL library . <nl> + <nl> + Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> + Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> + * / <nl> + # pragma once <nl> + <nl> + class AuthSession { <nl> + public : <nl> + AuthSession ( UserId userId ) ; <nl> + <nl> + AuthSession ( const AuthSession & other ) = delete ; <nl> + AuthSession & operator = ( const AuthSession & other ) = delete ; <nl> + <nl> + static AuthSession * Current ( ) ; <nl> + static UserId CurrentUserId ( ) { <nl> + auto current = Current ( ) ; <nl> + return current ? current - > userId ( ) : 0 ; <nl> + } <nl> + static PeerId CurrentUserPeerId ( ) { <nl> + auto userId = CurrentUserId ( ) ; <nl> + return userId ? peerFromUser ( userId ) : 0 ; <nl> + } <nl> + static UserData * CurrentUser ( ) ; <nl> + <nl> + UserId userId ( ) const { <nl> + return _userId ; <nl> + } <nl> + <nl> + private : <nl> + UserId _userId = 0 ; <nl> + <nl> + } ; <nl> mmm a / Telegram / SourceFiles / boxes / contactsbox . cpp <nl> ppp b / Telegram / SourceFiles / boxes / contactsbox . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " window / themes / window_theme . h " <nl> # include " observer_peer . h " <nl> # include " apiwrap . h " <nl> + # include " auth_session . h " <nl> <nl> QString PeerFloodErrorText ( PeerFloodType type ) { <nl> auto link = textcmdLink ( CreateInternalLinkHttps ( qsl ( " spambot " ) ) , lang ( lng_cant_more_info ) ) ; <nl> ContactsBox : : Inner : : ContactData * ContactsBox : : Inner : : contactData ( Dialogs : : Row * r <nl> data - > disabledChecked = _chat - > participants . contains ( peer - > asUser ( ) ) ; <nl> } <nl> } else if ( _creating = = CreatingGroupGroup ) { <nl> - data - > disabledChecked = ( peerToUser ( peer - > id ) = = MTP : : authedId ( ) ) ; <nl> + data - > disabledChecked = ( peer - > id = = AuthSession : : CurrentUserPeerId ( ) ) ; <nl> } else if ( _channel ) { <nl> - data - > disabledChecked = ( peerToUser ( peer - > id ) = = MTP : : authedId ( ) ) | | _already . contains ( peer - > asUser ( ) ) ; <nl> + data - > disabledChecked = ( peer - > id = = AuthSession : : CurrentUserPeerId ( ) ) | | _already . contains ( peer - > asUser ( ) ) ; <nl> } <nl> } <nl> if ( usingMultiSelect ( ) & & _checkedContacts . contains ( peer ) ) { <nl> mmm a / Telegram / SourceFiles / boxes / sharebox . cpp <nl> ppp b / Telegram / SourceFiles / boxes / sharebox . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " ui / widgets / scroll_area . h " <nl> # include " window / themes / window_theme . h " <nl> # include " boxes / contactsbox . h " <nl> + # include " auth_session . h " <nl> <nl> ShareBox : : ShareBox ( QWidget * , CopyCallback & & copyCallback , SubmitCallback & & submitCallback , FilterCallback & & filterCallback ) <nl> : _copyCallback ( std : : move ( copyCallback ) ) <nl> QString appendShareGameScoreUrl ( const QString & url , const FullMsgId & fullId ) { <nl> auto channel = fullId . channel ? App : : channelLoaded ( fullId . channel ) : static_cast < ChannelData * > ( nullptr ) ; <nl> auto channelAccessHash = channel ? channel - > access : 0ULL ; <nl> auto channelAccessHashInts = reinterpret_cast < int32 * > ( & channelAccessHash ) ; <nl> - shareHashDataInts [ 0 ] = MTP : : authedId ( ) ; <nl> + shareHashDataInts [ 0 ] = AuthSession : : CurrentUserId ( ) ; <nl> shareHashDataInts [ 1 ] = fullId . channel ; <nl> shareHashDataInts [ 2 ] = fullId . msg ; <nl> shareHashDataInts [ 3 ] = channelAccessHashInts [ 0 ] ; <nl> void shareGameScoreByHash ( const QString & hash ) { <nl> } <nl> <nl> auto hashDataInts = reinterpret_cast < int32 * > ( hashData . data ( ) ) ; <nl> - if ( hashDataInts [ 0 ] ! = MTP : : authedId ( ) ) { <nl> + if ( hashDataInts [ 0 ] ! = AuthSession : : CurrentUserId ( ) ) { <nl> Ui : : show ( Box < InformBox > ( lang ( lng_share_wrong_user ) ) ) ; <nl> return ; <nl> } <nl> mmm a / Telegram / SourceFiles / dialogswidget . cpp <nl> ppp b / Telegram / SourceFiles / dialogswidget . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " window / themes / window_theme . h " <nl> # include " autoupdater . h " <nl> # include " observer_peer . h " <nl> + # include " auth_session . h " <nl> <nl> namespace { <nl> <nl> void DialogsInner : : contactsReceived ( const QVector < MTPContact > & result ) { <nl> if ( contact . type ( ) ! = mtpc_contact ) continue ; <nl> <nl> auto userId = contact . c_contact ( ) . vuser_id . v ; <nl> - if ( userId = = MTP : : authedId ( ) & & App : : self ( ) ) { <nl> + if ( userId = = AuthSession : : CurrentUserId ( ) & & App : : self ( ) ) { <nl> if ( App : : self ( ) - > contact < 1 ) { <nl> App : : self ( ) - > contact = 1 ; <nl> Notify : : userIsContactChanged ( App : : self ( ) ) ; <nl> mmm a / Telegram / SourceFiles / history . cpp <nl> ppp b / Telegram / SourceFiles / history . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " localstorage . h " <nl> # include " window / top_bar_widget . h " <nl> # include " observer_peer . h " <nl> + # include " auth_session . h " <nl> <nl> namespace { <nl> <nl> HistoryJoined * ChannelHistory : : insertJoinedMessage ( bool unread ) { <nl> if ( ! inviter ) return nullptr ; <nl> <nl> MTPDmessage : : Flags flags = 0 ; <nl> - if ( peerToUser ( inviter - > id ) = = MTP : : authedId ( ) ) { <nl> + if ( inviter - > id = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> unread = false ; <nl> / / } else if ( unread ) { <nl> / / flags | = MTPDmessage : : Flag : : f_unread ; <nl> mmm a / Telegram / SourceFiles / history / history_item . cpp <nl> ppp b / Telegram / SourceFiles / history / history_item . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " styles / style_history . h " <nl> # include " ui / effects / ripple_animation . h " <nl> # include " fileuploader . h " <nl> + # include " auth_session . h " <nl> <nl> namespace { <nl> <nl> void HistoryItem : : setId ( MsgId newId ) { <nl> } <nl> <nl> bool HistoryItem : : canEdit ( const QDateTime & cur ) const { <nl> - auto messageToMyself = ( peerToUser ( _history - > peer - > id ) = = MTP : : authedId ( ) ) ; <nl> + auto messageToMyself = ( _history - > peer - > id = = AuthSession : : CurrentUserPeerId ( ) ) ; <nl> auto messageTooOld = messageToMyself ? false : ( date . secsTo ( cur ) > = Global : : EditTimeLimit ( ) ) ; <nl> if ( id < 0 | | messageTooOld ) return false ; <nl> <nl> bool HistoryItem : : canEdit ( const QDateTime & cur ) const { <nl> } <nl> <nl> bool HistoryItem : : canDeleteForEveryone ( const QDateTime & cur ) const { <nl> - auto messageToMyself = ( peerToUser ( _history - > peer - > id ) = = MTP : : authedId ( ) ) ; <nl> + auto messageToMyself = ( _history - > peer - > id = = AuthSession : : CurrentUserPeerId ( ) ) ; <nl> auto messageTooOld = messageToMyself ? false : ( date . secsTo ( cur ) > = Global : : EditTimeLimit ( ) ) ; <nl> if ( id < 0 | | messageToMyself | | messageTooOld ) return false ; <nl> if ( history ( ) - > peer - > isChannel ( ) ) return false ; <nl> mmm a / Telegram / SourceFiles / history / history_message . cpp <nl> ppp b / Telegram / SourceFiles / history / history_message . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " history / history_location_manager . h " <nl> # include " history / history_service_layout . h " <nl> # include " history / history_media_types . h " <nl> + # include " auth_session . h " <nl> # include " styles / style_dialogs . h " <nl> # include " styles / style_widgets . h " <nl> # include " styles / style_history . h " <nl> void HistoryService : : setMessageByAction ( const MTPmessageAction & action ) { <nl> auto & v = d . vusers . c_vector ( ) . v ; <nl> bool foundSelf = false ; <nl> for ( int i = 0 , l = v . size ( ) ; i < l ; + + i ) { <nl> - if ( v . at ( i ) . v = = MTP : : authedId ( ) ) { <nl> + if ( v . at ( i ) . v = = AuthSession : : CurrentUserId ( ) ) { <nl> foundSelf = true ; <nl> break ; <nl> } <nl> HistoryJoined : : HistoryJoined ( History * history , const QDateTime & inviteDate , User <nl> : HistoryService ( history , clientMsgId ( ) , inviteDate , QString ( ) , flags ) { <nl> Links links ; <nl> auto text = ( [ history , inviter , & links ] ( ) { <nl> - if ( peerToUser ( inviter - > id ) = = MTP : : authedId ( ) ) { <nl> + if ( inviter - > id = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> return lang ( history - > isMegagroup ( ) ? lng_action_you_joined_group : lng_action_you_joined ) ; <nl> } <nl> links . push_back ( peerOpenClickHandler ( inviter ) ) ; <nl> mmm a / Telegram / SourceFiles / historywidget . cpp <nl> ppp b / Telegram / SourceFiles / historywidget . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " core / qthelp_regex . h " <nl> # include " ui / widgets / popup_menu . h " <nl> # include " platform / platform_file_dialog . h " <nl> + # include " auth_session . h " <nl> <nl> namespace { <nl> <nl> QString mimeTagFromTag ( const QString & tagId ) { <nl> if ( tagId . startsWith ( qstr ( " mention : / / " ) ) ) { <nl> - return tagId + ' : ' + QString : : number ( MTP : : authedId ( ) ) ; <nl> + return tagId + ' : ' + QString : : number ( AuthSession : : CurrentUserId ( ) ) ; <nl> } <nl> return tagId ; <nl> } <nl> class FieldTagMimeProcessor : public Ui : : FlatTextarea : : TagMimeProcessor { <nl> QString tagFromMimeTag ( const QString & mimeTag ) override { <nl> if ( mimeTag . startsWith ( qstr ( " mention : / / " ) ) ) { <nl> auto match = QRegularExpression ( " : ( \ \ d + ) $ " ) . match ( mimeTag ) ; <nl> - if ( ! match . hasMatch ( ) | | match . capturedRef ( 1 ) . toInt ( ) ! = MTP : : authedId ( ) ) { <nl> + if ( ! match . hasMatch ( ) | | match . capturedRef ( 1 ) . toInt ( ) ! = AuthSession : : CurrentUserId ( ) ) { <nl> return QString ( ) ; <nl> } <nl> return mimeTag . mid ( 0 , mimeTag . size ( ) - match . capturedLength ( ) ) ; <nl> bool HistoryWidget : : contentOverlapped ( const QRect & globalRect ) { <nl> } <nl> <nl> void HistoryWidget : : updateReportSpamStatus ( ) { <nl> - if ( ! _peer | | ( _peer - > isUser ( ) & & ( peerToUser ( _peer - > id ) = = MTP : : authedId ( ) | | isNotificationsUser ( _peer - > id ) | | isServiceUser ( _peer - > id ) | | _peer - > asUser ( ) - > botInfo ) ) ) { <nl> + if ( ! _peer | | ( _peer - > isUser ( ) & & ( _peer - > id = = AuthSession : : CurrentUserPeerId ( ) | | isNotificationsUser ( _peer - > id ) | | isServiceUser ( _peer - > id ) | | _peer - > asUser ( ) - > botInfo ) ) ) { <nl> _reportSpamStatus = dbiprsHidden ; <nl> return ; <nl> } else if ( ! _firstLoadRequest & & _history - > isEmpty ( ) ) { <nl> void HistoryWidget : : shareContact ( const PeerId & peer , const QString & phone , const <nl> if ( silentPost ) { <nl> sendFlags | = MTPmessages_SendMedia : : Flag : : f_silent ; <nl> } <nl> - history - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( showFromName ? MTP : : authedId ( ) : 0 ) , peerToMTP ( peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( replyToId ( ) ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaContact ( MTP_string ( phone ) , MTP_string ( fname ) , MTP_string ( lname ) , MTP_int ( userId ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> + history - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( messageFromId ) , peerToMTP ( peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( replyToId ( ) ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaContact ( MTP_string ( phone ) , MTP_string ( fname ) , MTP_string ( lname ) , MTP_int ( userId ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> history - > sendRequestId = MTP : : send ( MTPmessages_SendMedia ( MTP_flags ( sendFlags ) , p - > input , MTP_int ( replyTo ) , MTP_inputMediaContact ( MTP_string ( phone ) , MTP_string ( fname ) , MTP_string ( lname ) ) , MTP_long ( randomId ) , MTPnullMarkup ) , App : : main ( ) - > rpcDone ( & MainWidget : : sentUpdatesReceived ) , App : : main ( ) - > rpcFail ( & MainWidget : : sendMessageFail ) , 0 , 0 , history - > sendRequestId ) ; <nl> <nl> App : : historyRegRandom ( randomId , newId ) ; <nl> void HistoryWidget : : sendFileConfirmed ( const FileLoadResultPtr & file ) { <nl> if ( silentPost ) { <nl> flags | = MTPDmessage : : Flag : : f_silent ; <nl> } <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> if ( file - > type = = SendMediaType : : Photo ) { <nl> - h - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( showFromName ? MTP : : authedId ( ) : 0 ) , peerToMTP ( file - > to . peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( file - > to . replyTo ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaPhoto ( file - > photo , MTP_string ( file - > caption ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + h - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( messageFromId ) , peerToMTP ( file - > to . peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( file - > to . replyTo ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaPhoto ( file - > photo , MTP_string ( file - > caption ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> } else if ( file - > type = = SendMediaType : : File ) { <nl> - h - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( showFromName ? MTP : : authedId ( ) : 0 ) , peerToMTP ( file - > to . peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( file - > to . replyTo ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaDocument ( file - > document , MTP_string ( file - > caption ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + h - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( messageFromId ) , peerToMTP ( file - > to . peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( file - > to . replyTo ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaDocument ( file - > document , MTP_string ( file - > caption ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> } else if ( file - > type = = SendMediaType : : Audio ) { <nl> if ( ! h - > peer - > isChannel ( ) ) { <nl> flags | = MTPDmessage : : Flag : : f_media_unread ; <nl> } <nl> - h - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( showFromName ? MTP : : authedId ( ) : 0 ) , peerToMTP ( file - > to . peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( file - > to . replyTo ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaDocument ( file - > document , MTP_string ( file - > caption ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + h - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( messageFromId ) , peerToMTP ( file - > to . peer ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( file - > to . replyTo ) , MTP_int ( unixtime ( ) ) , MTP_string ( " " ) , MTP_messageMediaDocument ( file - > document , MTP_string ( file - > caption ) ) , MTPnullMarkup , MTPnullEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> } <nl> <nl> if ( _peer & & file - > to . peer = = _peer - > id ) { <nl> void HistoryWidget : : sendFileConfirmed ( const FileLoadResultPtr & file ) { <nl> } <nl> <nl> void HistoryWidget : : onPhotoUploaded ( const FullMsgId & newId , bool silent , const MTPInputFile & file ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> - HistoryItem * item = App : : histItemById ( newId ) ; <nl> - if ( item ) { <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( auto item = App : : histItemById ( newId ) ) { <nl> uint64 randomId = rand_value < uint64 > ( ) ; <nl> App : : historyRegRandom ( randomId , newId ) ; <nl> History * hist = item - > history ( ) ; <nl> namespace { <nl> } <nl> <nl> void HistoryWidget : : onDocumentUploaded ( const FullMsgId & newId , bool silent , const MTPInputFile & file ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> - HistoryMessage * item = dynamic_cast < HistoryMessage * > ( App : : histItemById ( newId ) ) ; <nl> - if ( item ) { <nl> - DocumentData * document = item - > getMedia ( ) ? item - > getMedia ( ) - > getDocument ( ) : 0 ; <nl> - if ( document ) { <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( auto item = dynamic_cast < HistoryMessage * > ( App : : histItemById ( newId ) ) ) { <nl> + auto media = item - > getMedia ( ) ; <nl> + if ( auto document = media ? media - > getDocument ( ) : nullptr ) { <nl> uint64 randomId = rand_value < uint64 > ( ) ; <nl> App : : historyRegRandom ( randomId , newId ) ; <nl> History * hist = item - > history ( ) ; <nl> void HistoryWidget : : onDocumentUploaded ( const FullMsgId & newId , bool silent , cons <nl> } <nl> <nl> void HistoryWidget : : onThumbDocumentUploaded ( const FullMsgId & newId , bool silent , const MTPInputFile & file , const MTPInputFile & thumb ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> - HistoryMessage * item = dynamic_cast < HistoryMessage * > ( App : : histItemById ( newId ) ) ; <nl> - if ( item ) { <nl> - DocumentData * document = item - > getMedia ( ) ? item - > getMedia ( ) - > getDocument ( ) : 0 ; <nl> - if ( document ) { <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( auto item = dynamic_cast < HistoryMessage * > ( App : : histItemById ( newId ) ) ) { <nl> + auto media = item - > getMedia ( ) ; <nl> + if ( auto document = media ? media - > getDocument ( ) : nullptr ) { <nl> uint64 randomId = rand_value < uint64 > ( ) ; <nl> App : : historyRegRandom ( randomId , newId ) ; <nl> History * hist = item - > history ( ) ; <nl> void HistoryWidget : : onThumbDocumentUploaded ( const FullMsgId & newId , bool silent , <nl> if ( silentPost ) { <nl> sendFlags | = MTPmessages_SendMedia : : Flag : : f_silent ; <nl> } <nl> - auto caption = item - > getMedia ( ) ? item - > getMedia ( ) - > getCaption ( ) : TextWithEntities ( ) ; <nl> + auto caption = media ? media - > getCaption ( ) : TextWithEntities ( ) ; <nl> MTPDinputMediaUploadedThumbDocument : : Flags mediaFlags = 0 ; <nl> auto media = MTP_inputMediaUploadedThumbDocument ( MTP_flags ( mediaFlags ) , file , thumb , MTP_string ( document - > mime ) , _composeDocumentAttributes ( document ) , MTP_string ( caption . text ) , MTPVector < MTPInputDocument > ( ) ) ; <nl> hist - > sendRequestId = MTP : : send ( MTPmessages_SendMedia ( MTP_flags ( sendFlags ) , item - > history ( ) - > peer - > input , MTP_int ( replyTo ) , media , MTP_long ( randomId ) , MTPnullMarkup ) , App : : main ( ) - > rpcDone ( & MainWidget : : sentUpdatesReceived ) , App : : main ( ) - > rpcFail ( & MainWidget : : sendMessageFail ) , 0 , 0 , hist - > sendRequestId ) ; <nl> void HistoryWidget : : onThumbDocumentUploaded ( const FullMsgId & newId , bool silent , <nl> } <nl> <nl> void HistoryWidget : : onPhotoProgress ( const FullMsgId & newId ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> - if ( HistoryItem * item = App : : histItemById ( newId ) ) { <nl> - PhotoData * photo = ( item - > getMedia ( ) & & item - > getMedia ( ) - > type ( ) = = MediaTypePhoto ) ? static_cast < HistoryPhoto * > ( item - > getMedia ( ) ) - > photo ( ) : 0 ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( auto item = App : : histItemById ( newId ) ) { <nl> + auto photo = ( item - > getMedia ( ) & & item - > getMedia ( ) - > type ( ) = = MediaTypePhoto ) ? static_cast < HistoryPhoto * > ( item - > getMedia ( ) ) - > photo ( ) : nullptr ; <nl> if ( ! item - > isPost ( ) ) { <nl> updateSendAction ( item - > history ( ) , SendAction : : Type : : UploadPhoto , 0 ) ; <nl> } <nl> void HistoryWidget : : onPhotoProgress ( const FullMsgId & newId ) { <nl> } <nl> <nl> void HistoryWidget : : onDocumentProgress ( const FullMsgId & newId ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> - if ( HistoryItem * item = App : : histItemById ( newId ) ) { <nl> - HistoryMedia * media = item - > getMedia ( ) ; <nl> - DocumentData * doc = media ? media - > getDocument ( ) : 0 ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( auto item = App : : histItemById ( newId ) ) { <nl> + auto media = item - > getMedia ( ) ; <nl> + auto document = media ? media - > getDocument ( ) : nullptr ; <nl> if ( ! item - > isPost ( ) ) { <nl> - updateSendAction ( item - > history ( ) , ( doc & & doc - > voice ( ) ) ? SendAction : : Type : : UploadVoice : SendAction : : Type : : UploadFile , doc ? doc - > uploadOffset : 0 ) ; <nl> + updateSendAction ( item - > history ( ) , ( document & & document - > voice ( ) ) ? SendAction : : Type : : UploadVoice : SendAction : : Type : : UploadFile , document ? document - > uploadOffset : 0 ) ; <nl> } <nl> Ui : : repaintHistoryItem ( item ) ; <nl> } <nl> } <nl> <nl> void HistoryWidget : : onPhotoFailed ( const FullMsgId & newId ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> HistoryItem * item = App : : histItemById ( newId ) ; <nl> if ( item ) { <nl> if ( ! item - > isPost ( ) ) { <nl> void HistoryWidget : : onPhotoFailed ( const FullMsgId & newId ) { <nl> } <nl> <nl> void HistoryWidget : : onDocumentFailed ( const FullMsgId & newId ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> - HistoryItem * item = App : : histItemById ( newId ) ; <nl> - if ( item ) { <nl> - HistoryMedia * media = item - > getMedia ( ) ; <nl> - DocumentData * doc = media ? media - > getDocument ( ) : 0 ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> + <nl> + if ( auto item = App : : histItemById ( newId ) ) { <nl> + auto media = item - > getMedia ( ) ; <nl> + auto document = media ? media - > getDocument ( ) : nullptr ; <nl> if ( ! item - > isPost ( ) ) { <nl> - updateSendAction ( item - > history ( ) , ( doc & & doc - > voice ( ) ) ? SendAction : : Type : : UploadVoice : SendAction : : Type : : UploadFile , - 1 ) ; <nl> + updateSendAction ( item - > history ( ) , ( document & & document - > voice ( ) ) ? SendAction : : Type : : UploadVoice : SendAction : : Type : : UploadFile , - 1 ) ; <nl> } <nl> Ui : : repaintHistoryItem ( item ) ; <nl> } <nl> void HistoryWidget : : onInlineResultSend ( InlineBots : : Result * result , UserData * bot <nl> flags | = MTPDmessage : : Flag : : f_via_bot_id ; <nl> } <nl> <nl> - UserId messageFromId = showFromName ? MTP : : authedId ( ) : 0 ; <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> MTPint messageDate = MTP_int ( unixtime ( ) ) ; <nl> UserId messageViaBotId = bot ? peerToUser ( bot - > id ) : 0 ; <nl> MsgId messageId = newId . msg ; <nl> bool HistoryWidget : : sendExistingDocument ( DocumentData * doc , const QString & capti <nl> if ( silentPost ) { <nl> sendFlags | = MTPmessages_SendMedia : : Flag : : f_silent ; <nl> } <nl> - _history - > addNewDocument ( newId . msg , flags , 0 , replyToId ( ) , date ( MTP_int ( unixtime ( ) ) ) , showFromName ? MTP : : authedId ( ) : 0 , doc , caption , MTPnullMarkup ) ; <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> + _history - > addNewDocument ( newId . msg , flags , 0 , replyToId ( ) , date ( MTP_int ( unixtime ( ) ) ) , messageFromId , doc , caption , MTPnullMarkup ) ; <nl> <nl> _history - > sendRequestId = MTP : : send ( MTPmessages_SendMedia ( MTP_flags ( sendFlags ) , _peer - > input , MTP_int ( replyToId ( ) ) , MTP_inputMediaDocument ( mtpInput , MTP_string ( caption ) ) , MTP_long ( randomId ) , MTPnullMarkup ) , App : : main ( ) - > rpcDone ( & MainWidget : : sentUpdatesReceived ) , App : : main ( ) - > rpcFail ( & MainWidget : : sendMessageFail ) , 0 , 0 , _history - > sendRequestId ) ; <nl> App : : main ( ) - > finishForwarding ( _history , _silent - > checked ( ) ) ; <nl> void HistoryWidget : : sendExistingPhoto ( PhotoData * photo , const QString & caption ) <nl> if ( silentPost ) { <nl> sendFlags | = MTPmessages_SendMedia : : Flag : : f_silent ; <nl> } <nl> - _history - > addNewPhoto ( newId . msg , flags , 0 , replyToId ( ) , date ( MTP_int ( unixtime ( ) ) ) , showFromName ? MTP : : authedId ( ) : 0 , photo , caption , MTPnullMarkup ) ; <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> + _history - > addNewPhoto ( newId . msg , flags , 0 , replyToId ( ) , date ( MTP_int ( unixtime ( ) ) ) , messageFromId , photo , caption , MTPnullMarkup ) ; <nl> <nl> _history - > sendRequestId = MTP : : send ( MTPmessages_SendMedia ( MTP_flags ( sendFlags ) , _peer - > input , MTP_int ( replyToId ( ) ) , MTP_inputMediaPhoto ( MTP_inputPhoto ( MTP_long ( photo - > id ) , MTP_long ( photo - > access ) ) , MTP_string ( caption ) ) , MTP_long ( randomId ) , MTPnullMarkup ) , App : : main ( ) - > rpcDone ( & MainWidget : : sentUpdatesReceived ) , App : : main ( ) - > rpcFail ( & MainWidget : : sendMessageFail ) , 0 , 0 , _history - > sendRequestId ) ; <nl> App : : main ( ) - > finishForwarding ( _history , _silent - > checked ( ) ) ; <nl> mmm a / Telegram / SourceFiles / intro / introwidget . cpp <nl> ppp b / Telegram / SourceFiles / intro / introwidget . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " styles / style_intro . h " <nl> # include " styles / style_window . h " <nl> # include " window / themes / window_theme . h " <nl> + # include " auth_session . h " <nl> <nl> namespace Intro { <nl> <nl> void Widget : : Step : : finish ( const MTPUser & user , QImage photo ) { <nl> <nl> / / " this " is already deleted here by creating the main widget . <nl> if ( ! photo . isNull ( ) ) { <nl> - App : : app ( ) - > uploadProfilePhoto ( photo , MTP : : authedId ( ) ) ; <nl> + App : : app ( ) - > uploadProfilePhoto ( photo , AuthSession : : CurrentUserId ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / localstorage . cpp <nl> ppp b / Telegram / SourceFiles / localstorage . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " mtproto / dc_options . h " <nl> # include " application . h " <nl> # include " apiwrap . h " <nl> + # include " auth_session . h " <nl> <nl> namespace Local { <nl> namespace { <nl> bool _readSetting ( quint32 blockId , QDataStream & stream , int version , ReadSetting <nl> <nl> case dbiUser : { <nl> quint32 dcId ; <nl> - qint32 uid ; <nl> - stream > > uid > > dcId ; <nl> + qint32 userId ; <nl> + stream > > userId > > dcId ; <nl> if ( ! _checkStreamStatus ( stream ) ) return false ; <nl> <nl> - DEBUG_LOG ( ( " MTP Info : user found , dc % 1 , uid % 2 " ) . arg ( dcId ) . arg ( uid ) ) ; <nl> - MTP : : configure ( dcId , uid ) ; <nl> + DEBUG_LOG ( ( " MTP Info : user found , dc % 1 , uid % 2 " ) . arg ( dcId ) . arg ( userId ) ) ; <nl> + MTP : : configure ( dcId ) ; <nl> + <nl> + if ( userId ) { <nl> + AppClass : : Instance ( ) . authSessionCreate ( UserId ( userId ) ) ; <nl> + } <nl> } break ; <nl> <nl> case dbiKey : { <nl> void _writeMtpData ( ) { <nl> size + = keys . size ( ) * ( sizeof ( quint32 ) + sizeof ( quint32 ) + MTP : : AuthKey : : kSize ) ; <nl> <nl> EncryptedDescriptor data ( size ) ; <nl> - data . stream < < quint32 ( dbiUser ) < < qint32 ( MTP : : authedId ( ) ) < < quint32 ( MTP : : maindc ( ) ) ; <nl> + data . stream < < quint32 ( dbiUser ) < < qint32 ( AuthSession : : CurrentUserId ( ) ) < < quint32 ( MTP : : maindc ( ) ) ; <nl> for_const ( auto & key , keys ) { <nl> data . stream < < quint32 ( dbiKey ) < < quint32 ( key - > getDC ( ) ) ; <nl> key - > write ( data . stream ) ; <nl> PeerData * _readPeer ( FileReadDescriptor & from , int32 fileVersion = 0 ) { <nl> } <nl> from . stream > > onlineTill > > contact > > botInfoVersion ; <nl> <nl> - bool showPhone = ! isServiceUser ( user - > id ) & & ( peerToUser ( user - > id ) ! = MTP : : authedId ( ) ) & & ( contact < = 0 ) ; <nl> + bool showPhone = ! isServiceUser ( user - > id ) & & ( user - > id ! = AuthSession : : CurrentUserPeerId ( ) ) & & ( contact < = 0 ) ; <nl> QString pname = ( showPhone & & ! phone . isEmpty ( ) ) ? App : : formatPhone ( phone ) : QString ( ) ; <nl> <nl> if ( ! wasLoaded ) { <nl> PeerData * _readPeer ( FileReadDescriptor & from , int32 fileVersion = 0 ) { <nl> user - > botInfo - > inlinePlaceholder = inlinePlaceholder ; <nl> } <nl> <nl> - if ( peerToUser ( user - > id ) = = MTP : : authedId ( ) ) { <nl> + if ( user - > id = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> user - > input = MTP_inputPeerSelf ( ) ; <nl> user - > inputUser = MTP_inputUserSelf ( ) ; <nl> } else { <nl> mmm a / Telegram / SourceFiles / mainwidget . cpp <nl> ppp b / Telegram / SourceFiles / mainwidget . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " window / player_wrap_widget . h " <nl> # include " styles / style_boxes . h " <nl> # include " mtproto / dc_options . h " <nl> + # include " auth_session . h " <nl> <nl> StackItemSection : : StackItemSection ( std : : unique_ptr < Window : : SectionMemento > & & memento ) : StackItem ( nullptr ) <nl> , _memento ( std : : move ( memento ) ) { <nl> void MainWidget : : finishForwarding ( History * history , bool silent ) { <nl> uint64 randomId = rand_value < uint64 > ( ) ; <nl> if ( genClientSideMessage ) { <nl> FullMsgId newId ( peerToChannel ( history - > peer - > id ) , clientMsgId ( ) ) ; <nl> - HistoryMessage * msg = static_cast < HistoryMessage * > ( _toForward . cbegin ( ) . value ( ) ) ; <nl> - history - > addNewForwarded ( newId . msg , flags , date ( MTP_int ( unixtime ( ) ) ) , showFromName ? MTP : : authedId ( ) : 0 , msg ) ; <nl> + auto msg = static_cast < HistoryMessage * > ( _toForward . cbegin ( ) . value ( ) ) ; <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> + history - > addNewForwarded ( newId . msg , flags , date ( MTP_int ( unixtime ( ) ) ) , messageFromId , msg ) ; <nl> App : : historyRegRandom ( randomId , newId ) ; <nl> } <nl> if ( forwardFrom ! = i . value ( ) - > history ( ) - > peer ) { <nl> void MainWidget : : deleteHistoryPart ( DeleteHistoryRequest request , const MTPmessag <nl> } <nl> <nl> int32 offset = d . voffset . v ; <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> if ( offset < = 0 ) { <nl> cRefReportSpamStatuses ( ) . remove ( peer - > id ) ; <nl> Local : : writeReportSpamStatuses ( ) ; <nl> void MainWidget : : deleteAllFromUserPart ( DeleteAllFromUserParams params , const MTP <nl> } <nl> <nl> int32 offset = d . voffset . v ; <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> if ( offset > 0 ) { <nl> MTP : : send ( MTPchannels_DeleteUserHistory ( params . channel - > inputChannel , params . from - > inputUser ) , rpcDone ( & MainWidget : : deleteAllFromUserPart , params ) ) ; <nl> } else if ( History * h = App : : historyLoaded ( params . channel ) ) { <nl> void MainWidget : : sendMessage ( const MessageToSend & message ) { <nl> sendFlags | = MTPmessages_SendMessage : : Flag : : f_clear_draft ; <nl> history - > clearCloudDraft ( ) ; <nl> } <nl> - lastMessage = history - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( showFromName ? MTP : : authedId ( ) : 0 ) , peerToMTP ( history - > peer - > id ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( replyTo ) , MTP_int ( unixtime ( ) ) , msgText , media , MTPnullMarkup , localEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + auto messageFromId = showFromName ? AuthSession : : CurrentUserId ( ) : 0 ; <nl> + lastMessage = history - > addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( newId . msg ) , MTP_int ( messageFromId ) , peerToMTP ( history - > peer - > id ) , MTPnullFwdHeader , MTPint ( ) , MTP_int ( replyTo ) , MTP_int ( unixtime ( ) ) , msgText , media , MTPnullMarkup , localEntities , MTP_int ( 1 ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> history - > sendRequestId = MTP : : send ( MTPmessages_SendMessage ( MTP_flags ( sendFlags ) , history - > peer - > input , MTP_int ( replyTo ) , msgText , MTP_long ( randomId ) , MTPnullMarkup , sentEntities ) , rpcDone ( & MainWidget : : sentUpdatesReceived , randomId ) , rpcFail ( & MainWidget : : sendMessageFail ) , 0 , 0 , history - > sendRequestId ) ; <nl> } <nl> <nl> void MainWidget : : overviewLoaded ( History * history , const MTPmessages_Messages & re <nl> } <nl> <nl> void MainWidget : : sendReadRequest ( PeerData * peer , MsgId upTo ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> if ( peer - > isChannel ( ) ) { <nl> _readRequests . insert ( peer , qMakePair ( MTP : : send ( MTPchannels_ReadHistory ( peer - > asChannel ( ) - > inputChannel , MTP_int ( upTo ) ) , rpcDone ( & MainWidget : : channelReadDone , peer ) , rpcFail ( & MainWidget : : readRequestFail , peer ) ) , upTo ) ) ; <nl> } else { <nl> void MainWidget : : serviceNotification ( const TextWithEntities & message , const MTPM <nl> HistoryItem * item = nullptr ; <nl> while ( textSplit ( sendingText , sendingEntities , leftText , leftEntities , MaxMessageSize ) ) { <nl> MTPVector < MTPMessageEntity > localEntities = linksToMTP ( sendingEntities ) ; <nl> - item = App : : histories ( ) . addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( clientMsgId ( ) ) , MTP_int ( ServiceUserId ) , MTP_peerUser ( MTP_int ( MTP : : authedId ( ) ) ) , MTPnullFwdHeader , MTPint ( ) , MTPint ( ) , MTP_int ( date ) , MTP_string ( sendingText ) , media , MTPnullMarkup , localEntities , MTPint ( ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + item = App : : histories ( ) . addNewMessage ( MTP_message ( MTP_flags ( flags ) , MTP_int ( clientMsgId ( ) ) , MTP_int ( ServiceUserId ) , MTP_peerUser ( MTP_int ( AuthSession : : CurrentUserId ( ) ) ) , MTPnullFwdHeader , MTPint ( ) , MTPint ( ) , MTP_int ( date ) , MTP_string ( sendingText ) , media , MTPnullMarkup , localEntities , MTPint ( ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> } <nl> if ( item ) { <nl> _history - > peerMessagesUpdated ( item - > history ( ) - > peer - > id ) ; <nl> void MainWidget : : fillPeerMenu ( PeerData * peer , base : : lambda < QAction * ( const QStrin <nl> } <nl> <nl> void MainWidget : : onViewsIncrement ( ) { <nl> - if ( ! App : : main ( ) | | ! MTP : : authedId ( ) ) return ; <nl> + if ( ! App : : main ( ) | | ! AuthSession : : Current ( ) ) return ; <nl> <nl> for ( ViewsIncrement : : iterator i = _viewsToIncrement . begin ( ) ; i ! = _viewsToIncrement . cend ( ) ; ) { <nl> if ( _viewsIncrementRequests . contains ( i . key ( ) ) ) { <nl> bool MainWidget : : failDifference ( const RPCError & error ) { <nl> } <nl> <nl> void MainWidget : : onGetDifferenceTimeByPts ( ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> <nl> auto now = getms ( true ) , wait = 0LL ; <nl> if ( _getDifferenceTimeByPts ) { <nl> void MainWidget : : onGetDifferenceTimeByPts ( ) { <nl> } <nl> <nl> void MainWidget : : onGetDifferenceTimeAfterFail ( ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> <nl> auto now = getms ( true ) , wait = 0LL ; <nl> if ( _getDifferenceTimeAfterFail ) { <nl> void MainWidget : : mtpPing ( ) { <nl> <nl> void MainWidget : : start ( const MTPUser & user ) { <nl> int32 uid = user . c_user ( ) . vid . v ; <nl> - if ( MTP : : authedId ( ) ! = uid ) { <nl> - MTP : : setAuthedId ( uid ) ; <nl> + if ( ! uid ) { <nl> + LOG ( ( " MTP Error : incorrect user received " ) ) ; <nl> + App : : logOut ( ) ; <nl> + return ; <nl> + } <nl> + if ( AuthSession : : CurrentUserId ( ) ! = uid ) { <nl> + AppClass : : Instance ( ) . authSessionCreate ( uid ) ; <nl> Local : : writeMtpData ( ) ; <nl> } <nl> <nl> void MainWidget : : checkIdleFinish ( ) { <nl> } <nl> <nl> void MainWidget : : updateReceived ( const mtpPrime * from , const mtpPrime * end ) { <nl> - if ( end < = from | | ! MTP : : authedId ( ) ) return ; <nl> + if ( end < = from | | ! AuthSession : : Current ( ) ) return ; <nl> <nl> App : : wnd ( ) - > checkAutoLock ( ) ; <nl> <nl> void MainWidget : : feedUpdates ( const MTPUpdates & updates , uint64 randomId ) { <nl> <nl> / / update before applying skipped <nl> MTPDmessage : : Flags flags = mtpCastFlags ( d . vflags . v ) | MTPDmessage : : Flag : : f_from_id ; <nl> - auto item = App : : histories ( ) . addNewMessage ( MTP_message ( MTP_flags ( flags ) , d . vid , d . is_out ( ) ? MTP_int ( MTP : : authedId ( ) ) : d . vuser_id , MTP_peerUser ( d . is_out ( ) ? d . vuser_id : MTP_int ( MTP : : authedId ( ) ) ) , d . vfwd_from , d . vvia_bot_id , d . vreply_to_msg_id , d . vdate , d . vmessage , MTP_messageMediaEmpty ( ) , MTPnullMarkup , d . has_entities ( ) ? d . ventities : MTPnullEntities , MTPint ( ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> + auto item = App : : histories ( ) . addNewMessage ( MTP_message ( MTP_flags ( flags ) , d . vid , d . is_out ( ) ? MTP_int ( AuthSession : : CurrentUserId ( ) ) : d . vuser_id , MTP_peerUser ( d . is_out ( ) ? d . vuser_id : MTP_int ( AuthSession : : CurrentUserId ( ) ) ) , d . vfwd_from , d . vvia_bot_id , d . vreply_to_msg_id , d . vdate , d . vmessage , MTP_messageMediaEmpty ( ) , MTPnullMarkup , d . has_entities ( ) ? d . ventities : MTPnullEntities , MTPint ( ) , MTPint ( ) ) , NewMessageUnread ) ; <nl> if ( item ) { <nl> _history - > peerMessagesUpdated ( item - > history ( ) - > peer - > id ) ; <nl> } <nl> void MainWidget : : feedUpdates ( const MTPUpdates & updates , uint64 randomId ) { <nl> } <nl> <nl> void MainWidget : : feedUpdate ( const MTPUpdate & update ) { <nl> - if ( ! MTP : : authedId ( ) ) return ; <nl> + if ( ! AuthSession : : Current ( ) ) return ; <nl> <nl> switch ( update . type ( ) ) { <nl> case mtpc_updateNewMessage : { <nl> void MainWidget : : feedUpdate ( const MTPUpdate & update ) { <nl> } else if ( auto channel = App : : channelLoaded ( d . vchat_id . v ) ) { <nl> history = App : : historyLoaded ( channel - > id ) ; <nl> } <nl> - auto user = ( d . vuser_id . v = = MTP : : authedId ( ) ) ? nullptr : App : : userLoaded ( d . vuser_id . v ) ; <nl> + auto user = ( d . vuser_id . v = = AuthSession : : CurrentUserId ( ) ) ? nullptr : App : : userLoaded ( d . vuser_id . v ) ; <nl> if ( history & & user ) { <nl> auto when = requestingDifference ( ) ? 0 : unixtime ( ) ; <nl> App : : histories ( ) . regSendAction ( history , user , d . vaction , when ) ; <nl> void MainWidget : : feedUpdate ( const MTPUpdate & update ) { <nl> App : : markPeerUpdated ( user ) ; <nl> Notify : : peerUpdatedDelayed ( user , Notify : : PeerUpdate : : Flag : : UserOnlineChanged ) ; <nl> } <nl> - if ( d . vuser_id . v = = MTP : : authedId ( ) ) { <nl> + if ( d . vuser_id . v = = AuthSession : : CurrentUserId ( ) ) { <nl> if ( d . vstatus . type ( ) = = mtpc_userStatusOffline | | d . vstatus . type ( ) = = mtpc_userStatusEmpty ) { <nl> updateOnline ( true ) ; <nl> if ( d . vstatus . type ( ) = = mtpc_userStatusOffline ) { <nl> mmm a / Telegram / SourceFiles / mainwindow . cpp <nl> ppp b / Telegram / SourceFiles / mainwindow . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " window / themes / window_theme_warning . h " <nl> # include " window / window_main_menu . h " <nl> # include " core / task_queue . h " <nl> + # include " auth_session . h " <nl> <nl> ConnectingWidget : : ConnectingWidget ( QWidget * parent , const QString & text , const QString & reconnect ) : TWidget ( parent ) <nl> , _reconnect ( this , QString ( ) ) { <nl> bool MainWindow : : doWeReadServerHistory ( ) { <nl> } <nl> <nl> void MainWindow : : checkHistoryActivation ( ) { <nl> - if ( _main & & MTP : : authedId ( ) & & doWeReadServerHistory ( ) ) { <nl> + if ( _main & & AuthSession : : Current ( ) & & doWeReadServerHistory ( ) ) { <nl> _main - > markActiveHistoryAsRead ( ) ; <nl> } <nl> } <nl> void MainWindow : : closeEvent ( QCloseEvent * e ) { <nl> App : : quit ( ) ; <nl> } else { <nl> e - > ignore ( ) ; <nl> - if ( ! MTP : : authedId ( ) | | ! Ui : : hideWindowNoQuit ( ) ) { <nl> + if ( ! AuthSession : : Current ( ) | | ! Ui : : hideWindowNoQuit ( ) ) { <nl> App : : quit ( ) ; <nl> } <nl> } <nl> mmm a / Telegram / SourceFiles / mtproto / dc_options . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / dc_options . cpp <nl> void DcOptions : : constructFromSerialized ( const QByteArray & serialized ) { <nl> auto readonly = serialized ; <nl> QBuffer buffer ( & readonly ) ; <nl> if ( ! buffer . open ( QIODevice : : ReadOnly ) ) { <nl> - LOG ( ( " MTP Error : Can ' t open data for DcOptions : : setFromSerialized ( ) " ) ) ; <nl> + LOG ( ( " MTP Error : Can ' t open data for DcOptions : : constructFromSerialized ( ) " ) ) ; <nl> return ; <nl> } <nl> QDataStream stream ( & buffer ) ; <nl> qint32 count = 0 ; <nl> stream > > count ; <nl> if ( stream . status ( ) ! = QDataStream : : Ok ) { <nl> - LOG ( ( " MTP Error : Bad data for DcOptions : : setFromSerialized ( ) " ) ) ; <nl> + LOG ( ( " MTP Error : Bad data for DcOptions : : constructFromSerialized ( ) " ) ) ; <nl> return ; <nl> } <nl> <nl> void DcOptions : : constructFromSerialized ( const QByteArray & serialized ) { <nl> stream . readRawData ( & ip [ 0 ] , ipSize ) ; <nl> <nl> if ( stream . status ( ) ! = QDataStream : : Ok ) { <nl> - LOG ( ( " MTP Error : Bad data inside DcOptions : : setFromSerialized ( ) " ) ) ; <nl> + LOG ( ( " MTP Error : Bad data inside DcOptions : : constructFromSerialized ( ) " ) ) ; <nl> return ; <nl> } <nl> <nl> mmm a / Telegram / SourceFiles / mtproto / dcenter . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / dcenter . cpp <nl> DcenterMap gDCs ; <nl> bool configLoadedOnce = false ; <nl> bool mainDCChanged = false ; <nl> int32 _mainDC = 2 ; <nl> - int32 userId = 0 ; <nl> <nl> typedef QMap < int32 , AuthKeyPtr > _KeysMapForWrite ; <nl> _KeysMapForWrite _keysMapForWrite ; <nl> constexpr auto kEnumerateDcTimeout = 8000 ; / / 8 seconds timeout for help_getConf <nl> <nl> } / / namespace <nl> <nl> - int32 authed ( ) { <nl> - return userId ; <nl> - } <nl> - <nl> - void authed ( int32 uid ) { <nl> - if ( userId ! = uid ) { <nl> - userId = uid ; <nl> - } <nl> - } <nl> - <nl> DcenterMap & DCMap ( ) { <nl> return gDCs ; <nl> } <nl> mmm a / Telegram / SourceFiles / mtproto / dcenter . h <nl> ppp b / Telegram / SourceFiles / mtproto / dcenter . h <nl> int32 mainDC ( ) ; <nl> void logoutOtherDCs ( ) ; <nl> void setDC ( int32 dc , bool firstOnly = false ) ; <nl> <nl> - int32 authed ( ) ; <nl> - void authed ( int32 uid ) ; <nl> - <nl> AuthKeysMap getAuthKeys ( ) ; <nl> void setAuthKey ( int32 dc , AuthKeyPtr key ) ; <nl> <nl> mmm a / Telegram / SourceFiles / mtproto / facade . cpp <nl> ppp b / Telegram / SourceFiles / mtproto / facade . cpp <nl> Full license : https : / / github . com / telegramdesktop / tdesktop / blob / master / LICENSE <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> * / <nl> # include " stdafx . h " <nl> - <nl> # include " mtproto / facade . h " <nl> <nl> # include " localstorage . h " <nl> + # include " auth_session . h " <nl> <nl> namespace MTP { <nl> <nl> namespace { <nl> MTPSessionResetHandler sessionResetHandler = 0 ; <nl> internal : : GlobalSlotCarrier * _globalSlotCarrier = 0 ; <nl> <nl> + bool hasAuthorization ( ) { <nl> + return ( AuthSession : : Current ( ) ! = nullptr ) ; <nl> + } <nl> + <nl> void importDone ( const MTPauth_Authorization & result , mtpRequestId req ) { <nl> QMutexLocker locker1 ( & requestByDCLock ) ; <nl> <nl> namespace { <nl> if ( i = = requestsByDC . end ( ) ) { <nl> LOG ( ( " MTP Error : auth import request not found in requestsByDC , requestId : % 1 " ) . arg ( req ) ) ; <nl> RPCError error ( internal : : rpcClientError ( " AUTH_IMPORT_FAIL " , QString ( " did not find import request in requestsByDC , request % 1 " ) . arg ( req ) ) ) ; <nl> - if ( globalHandler . onFail & & authedId ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth failed in main dc <nl> + if ( globalHandler . onFail & & hasAuthorization ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth failed in main dc <nl> return ; <nl> } <nl> DcId newdc = bareDcId ( i . value ( ) ) ; <nl> namespace { <nl> bool importFail ( const RPCError & error , mtpRequestId req ) { <nl> if ( isDefaultHandledError ( error ) ) return false ; <nl> <nl> - if ( globalHandler . onFail & & authedId ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth import failed <nl> + if ( globalHandler . onFail & & hasAuthorization ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth import failed <nl> return true ; <nl> } <nl> <nl> namespace { <nl> if ( i = = authExportRequests . cend ( ) ) { <nl> LOG ( ( " MTP Error : auth export request target dcWithShift not found , requestId : % 1 " ) . arg ( req ) ) ; <nl> RPCError error ( internal : : rpcClientError ( " AUTH_IMPORT_FAIL " , QString ( " did not find target dcWithShift , request % 1 " ) . arg ( req ) ) ) ; <nl> - if ( globalHandler . onFail & & authedId ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth failed in main dc <nl> + if ( globalHandler . onFail & & hasAuthorization ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth failed in main dc <nl> return ; <nl> } <nl> <nl> namespace { <nl> if ( i ! = authExportRequests . cend ( ) ) { <nl> authWaiters [ bareDcId ( i . value ( ) ) ] . clear ( ) ; <nl> } <nl> - if ( globalHandler . onFail & & authedId ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth failed in main dc <nl> + if ( globalHandler . onFail & & hasAuthorization ( ) ) ( * globalHandler . onFail ) ( req , error ) ; / / auth failed in main dc <nl> return true ; <nl> } <nl> <nl> namespace { <nl> <nl> DEBUG_LOG ( ( " MTP Info : changing request % 1 from dcWithShift % 2 to dc % 3 " ) . arg ( requestId ) . arg ( dcWithShift ) . arg ( newdcWithShift ) ) ; <nl> if ( dcWithShift < 0 ) { / / newdc shift = 0 <nl> - if ( false & & authedId ( ) & & ! authExportRequests . contains ( requestId ) ) { / / migrate not supported at this moment <nl> + if ( false & & hasAuthorization ( ) & & ! authExportRequests . contains ( requestId ) ) { / / migrate not supported at this moment <nl> DEBUG_LOG ( ( " MTP Info : importing auth to dc % 1 " ) . arg ( newdcWithShift ) ) ; <nl> DCAuthWaiters & waiters ( authWaiters [ newdcWithShift ] ) ; <nl> if ( ! waiters . size ( ) ) { <nl> namespace { <nl> } <nl> } <nl> int32 newdc = bareDcId ( qAbs ( dcWithShift ) ) ; <nl> - if ( ! newdc | | newdc = = internal : : mainDC ( ) | | ! authedId ( ) ) { <nl> + if ( ! newdc | | newdc = = internal : : mainDC ( ) | | ! hasAuthorization ( ) ) { <nl> if ( ! badGuestDC & & globalHandler . onFail ) ( * globalHandler . onFail ) ( requestId , error ) ; / / auth failed in main dc <nl> return false ; <nl> } <nl> void restart ( int32 dcMask ) { <nl> } <nl> } <nl> <nl> - void configure ( int32 dc , int32 user ) { <nl> + void configure ( int32 dc ) { <nl> if ( _started ) return ; <nl> internal : : setDC ( dc ) ; <nl> - internal : : authed ( user ) ; <nl> } <nl> <nl> void setdc ( int32 dc , bool fromZeroOnly ) { <nl> void finish ( ) { <nl> _started = false ; <nl> } <nl> <nl> - void setAuthedId ( int32 uid ) { <nl> - internal : : authed ( uid ) ; <nl> - } <nl> - <nl> - int32 authedId ( ) { <nl> - return internal : : authed ( ) ; <nl> - } <nl> - <nl> void logoutKeys ( RPCDoneHandlerPtr onDone , RPCFailHandlerPtr onFail ) { <nl> mtpRequestId req = MTP : : send ( MTPauth_LogOut ( ) , onDone , onFail ) ; <nl> internal : : logoutOtherDCs ( ) ; <nl> mmm a / Telegram / SourceFiles / mtproto / facade . h <nl> ppp b / Telegram / SourceFiles / mtproto / facade . h <nl> class PauseHolder { <nl> <nl> } ; <nl> <nl> - void configure ( int32 dc , int32 user ) ; <nl> + void configure ( int32 dc ) ; <nl> <nl> void setdc ( int32 dc , bool fromZeroOnly = false ) ; <nl> int32 maindc ( ) ; <nl> int32 state ( mtpRequestId req ) ; / / < 0 means waiting for such count of ms <nl> <nl> void finish ( ) ; <nl> <nl> - void setAuthedId ( int32 uid ) ; <nl> - int32 authedId ( ) ; <nl> void logoutKeys ( RPCDoneHandlerPtr onDone , RPCFailHandlerPtr onFail ) ; <nl> <nl> void setGlobalDoneHandler ( RPCDoneHandlerPtr handler ) ; <nl> mmm a / Telegram / SourceFiles / passcodewidget . cpp <nl> ppp b / Telegram / SourceFiles / passcodewidget . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " ui / widgets / input_fields . h " <nl> # include " styles / style_boxes . h " <nl> # include " window / window_slide_animation . h " <nl> + # include " auth_session . h " <nl> <nl> PasscodeWidget : : PasscodeWidget ( QWidget * parent ) : TWidget ( parent ) <nl> , _passcode ( this , st : : passcodeInput , lang ( lng_passcode_ph ) ) <nl> void PasscodeWidget : : onSubmit ( ) { <nl> cSetPasscodeBadTries ( 0 ) ; <nl> <nl> MTP : : start ( ) ; <nl> - if ( MTP : : authedId ( ) ) { <nl> + if ( AuthSession : : Current ( ) ) { <nl> App : : wnd ( ) - > setupMain ( ) ; <nl> } else { <nl> App : : wnd ( ) - > setupIntro ( ) ; <nl> mmm a / Telegram / SourceFiles / profile / profile_block_actions . cpp <nl> ppp b / Telegram / SourceFiles / profile / profile_block_actions . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " mainwidget . h " <nl> # include " observer_peer . h " <nl> # include " apiwrap . h " <nl> + # include " auth_session . h " <nl> # include " lang . h " <nl> <nl> namespace Profile { <nl> void ActionsWidget : : refreshVisibility ( ) { <nl> <nl> QString ActionsWidget : : getBlockButtonText ( ) const { <nl> auto user = peer ( ) - > asUser ( ) ; <nl> - if ( ! user | | ( user - > id = = peerFromUser ( MTP : : authedId ( ) ) ) ) return QString ( ) ; <nl> + if ( ! user | | ( user - > id = = AuthSession : : CurrentUserPeerId ( ) ) ) return QString ( ) ; <nl> if ( user - > blockStatus ( ) = = UserData : : BlockStatus : : Unknown ) return QString ( ) ; <nl> <nl> if ( user - > isBlocked ( ) ) { <nl> mmm a / Telegram / SourceFiles / profile / profile_block_group_members . cpp <nl> ppp b / Telegram / SourceFiles / profile / profile_block_group_members . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " mainwidget . h " <nl> # include " apiwrap . h " <nl> # include " observer_peer . h " <nl> + # include " auth_session . h " <nl> # include " lang . h " <nl> <nl> namespace Profile { <nl> void GroupMembersWidget : : setItemFlags ( Item * item , ChatData * chat ) { <nl> auto isCreator = ( peerFromUser ( chat - > creator ) = = item - > peer - > id ) ; <nl> auto isAdmin = chat - > admins . contains ( user ) ; <nl> item - > hasAdminStar = isCreator | | isAdmin ; <nl> - if ( item - > peer - > id = = peerFromUser ( MTP : : authedId ( ) ) ) { <nl> + if ( item - > peer - > id = = AuthSession : : CurrentUserPeerId ( ) ) { <nl> item - > hasRemoveLink = false ; <nl> } else if ( chat - > amCreator ( ) | | ( chat - > amAdmin ( ) & & ! item - > hasAdminStar ) ) { <nl> item - > hasRemoveLink = true ; <nl> bool GroupMembersWidget : : addUsersToEnd ( ChannelData * megagroup ) { <nl> } <nl> <nl> void GroupMembersWidget : : setItemFlags ( Item * item , ChannelData * megagroup ) { <nl> - auto amCreatorOrAdmin = ( peerToUser ( item - > peer - > id ) = = MTP : : authedId ( ) ) & & ( megagroup - > amCreator ( ) | | megagroup - > amEditor ( ) ) ; <nl> + auto amCreatorOrAdmin = ( item - > peer - > id = = AuthSession : : CurrentUserPeerId ( ) ) & & ( megagroup - > amCreator ( ) | | megagroup - > amEditor ( ) ) ; <nl> auto isAdmin = megagroup - > mgInfo - > lastAdmins . contains ( getMember ( item ) - > user ( ) ) ; <nl> item - > hasAdminStar = amCreatorOrAdmin | | isAdmin ; <nl> if ( item - > peer - > isSelf ( ) ) { <nl> mmm a / Telegram / SourceFiles / structs . cpp <nl> ppp b / Telegram / SourceFiles / structs . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " history / history_media_types . h " <nl> # include " styles / style_history . h " <nl> # include " window / themes / window_theme . h " <nl> + # include " auth_session . h " <nl> <nl> namespace { <nl> <nl> int peerColorIndex ( const PeerId & peer ) { <nl> - auto myId = MTP : : authedId ( ) ; <nl> + auto myId = AuthSession : : CurrentUserId ( ) ; <nl> auto peerId = peerToBareInt ( peer ) ; <nl> auto both = ( QByteArray : : number ( peerId ) + QByteArray : : number ( myId ) ) . mid ( 0 , 15 ) ; <nl> uchar md5 [ 16 ] ; <nl> mmm a / Telegram / SourceFiles / structs . h <nl> ppp b / Telegram / SourceFiles / structs . h <nl> inline MTPpeer peerToMTP ( const PeerId & id ) { <nl> return MTP_peerUser ( MTP_int ( 0 ) ) ; <nl> } <nl> inline PeerId peerFromMessage ( const MTPmessage & msg ) { <nl> - PeerId from_id = 0 , to_id = 0 ; <nl> + auto compute = [ ] ( auto & message ) { <nl> + auto from_id = message . has_from_id ( ) ? peerFromUser ( message . vfrom_id ) : 0 ; <nl> + auto to_id = peerFromMTP ( message . vto_id ) ; <nl> + auto out = message . is_out ( ) ; <nl> + return ( out | | ! peerIsUser ( to_id ) ) ? to_id : from_id ; <nl> + } ; <nl> switch ( msg . type ( ) ) { <nl> - case mtpc_message : <nl> - from_id = msg . c_message ( ) . has_from_id ( ) ? peerFromUser ( msg . c_message ( ) . vfrom_id ) : 0 ; <nl> - to_id = peerFromMTP ( msg . c_message ( ) . vto_id ) ; <nl> - break ; <nl> - case mtpc_messageService : <nl> - from_id = msg . c_messageService ( ) . has_from_id ( ) ? peerFromUser ( msg . c_messageService ( ) . vfrom_id ) : 0 ; <nl> - to_id = peerFromMTP ( msg . c_messageService ( ) . vto_id ) ; <nl> - break ; <nl> - } <nl> - return ( from_id & & peerToUser ( to_id ) = = MTP : : authedId ( ) ) ? from_id : to_id ; <nl> + case mtpc_message : return compute ( msg . c_message ( ) ) ; <nl> + case mtpc_messageService : return compute ( msg . c_messageService ( ) ) ; <nl> + } <nl> + return 0 ; <nl> } <nl> inline MTPDmessage : : Flags flagsFromMessage ( const MTPmessage & msg ) { <nl> switch ( msg . type ( ) ) { <nl> mmm a / Telegram / SourceFiles / ui / text / text_entity . cpp <nl> ppp b / Telegram / SourceFiles / ui / text / text_entity . cpp <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> # include " stdafx . h " <nl> # include " ui / text / text_entity . h " <nl> <nl> + # include " auth_session . h " <nl> + <nl> namespace { <nl> <nl> const QRegularExpression _reDomain ( QString : : fromUtf8 ( " ( ? < ! [ \ \ w \ \ $ \ \ - \ \ _ % = \ \ . ] ) ( ? : ( [ a - zA - Z ] + ) : / / ) ? ( ( ? : [ A - Za - z " " \ xd0 \ x90 - \ xd0 \ xaf " " \ xd0 \ xb0 - \ xd1 \ x8f " " \ xd1 \ x91 \ xd0 \ x81 " " 0 - 9 \ \ - \ \ _ ] + \ \ . ) { 1 , 10 } ( [ A - Za - z " " \ xd1 \ x80 \ xd1 \ x84 " " \ \ - \ \ d ] { 2 , 22 } ) ( \ \ : \ \ d + ) ? ) " ) , QRegularExpression : : UseUnicodePropertiesOption ) ; <nl> EntitiesInText entitiesFromMTP ( const QVector < MTPMessageEntity > & entities ) { <nl> const auto & d ( entity . c_inputMessageEntityMentionName ( ) ) ; <nl> auto data = ( [ & d ] ( ) - > QString { <nl> if ( d . vuser_id . type ( ) = = mtpc_inputUserSelf ) { <nl> - return QString : : number ( MTP : : authedId ( ) ) ; <nl> + return QString : : number ( AuthSession : : CurrentUserId ( ) ) ; <nl> } else if ( d . vuser_id . type ( ) = = mtpc_inputUser ) { <nl> const auto & user ( d . vuser_id . c_inputUser ( ) ) ; <nl> return QString : : number ( user . vuser_id . v ) + ' . ' + QString : : number ( user . vaccess_hash . v ) ; <nl> MTPVector < MTPMessageEntity > linksToMTP ( const EntitiesInText & links , bool sending <nl> UserId userId = 0 ; <nl> uint64 accessHash = 0 ; <nl> if ( mentionNameToFields ( data , & userId , & accessHash ) ) { <nl> - if ( userId = = MTP : : authedId ( ) ) { <nl> + if ( userId = = AuthSession : : CurrentUserId ( ) ) { <nl> return MTP_inputUserSelf ( ) ; <nl> } <nl> return MTP_inputUser ( MTP_int ( userId ) , MTP_long ( accessHash ) ) ; <nl> mmm a / Telegram / gyp / Telegram . gyp <nl> ppp b / Telegram / gyp / Telegram . gyp <nl> <nl> ' < ( src_loc ) / app . h ' , <nl> ' < ( src_loc ) / application . cpp ' , <nl> ' < ( src_loc ) / application . h ' , <nl> + ' < ( src_loc ) / auth_session . cpp ' , <nl> + ' < ( src_loc ) / auth_session . h ' , <nl> ' < ( src_loc ) / autoupdater . cpp ' , <nl> ' < ( src_loc ) / autoupdater . h ' , <nl> ' < ( src_loc ) / config . h ' , <nl> | Moved MTP : : authedId ( ) to AuthSession : : Current ( ) . | telegramdesktop/tdesktop | 63c61637f81bb53c383343f9ad3e2a9cc9423968 | 2017-02-25T16:48:19Z |
mmm a / src / core / hle / kernel / process_capability . cpp <nl> ppp b / src / core / hle / kernel / process_capability . cpp <nl> void ProcessCapabilities : : Clear ( ) { <nl> } <nl> <nl> ResultCode ProcessCapabilities : : HandlePriorityCoreNumFlags ( u32 flags ) { <nl> - / / TODO : Implement <nl> + if ( priority_mask ! = 0 | | core_mask ! = 0 ) { <nl> + return ERR_INVALID_CAPABILITY_DESCRIPTOR ; <nl> + } <nl> + <nl> + const u32 core_num_min = ( flags > > 16 ) & 0xFF ; <nl> + const u32 core_num_max = ( flags > > 24 ) & 0xFF ; <nl> + if ( core_num_min > core_num_max ) { <nl> + return ERR_INVALID_COMBINATION ; <nl> + } <nl> + <nl> + const u32 priority_min = ( flags > > 10 ) & 0x3F ; <nl> + const u32 priority_max = ( flags > > 4 ) & 0x3F ; <nl> + if ( priority_min > priority_max ) { <nl> + return ERR_INVALID_COMBINATION ; <nl> + } <nl> + <nl> + / / The switch only has 4 usable cores . <nl> + if ( core_num_max > = 4 ) { <nl> + return ERR_INVALID_PROCESSOR_ID ; <nl> + } <nl> + <nl> + const auto make_mask = [ ] ( u64 min , u64 max ) { <nl> + const u64 range = max - min + 1 ; <nl> + const u64 mask = ( 1ULL < < range ) - 1 ; <nl> + <nl> + return mask < < min ; <nl> + } ; <nl> + <nl> + core_mask = make_mask ( core_num_min , core_num_max ) ; <nl> + priority_mask = make_mask ( priority_min , priority_max ) ; <nl> return RESULT_SUCCESS ; <nl> } <nl> <nl> mmm a / src / core / hle / kernel / process_capability . h <nl> ppp b / src / core / hle / kernel / process_capability . h <nl> class ProcessCapabilities { <nl> / / / <nl> void InitializeForMetadatalessProcess ( ) ; <nl> <nl> + / / / Gets the allowable core mask <nl> + u64 GetCoreMask ( ) const { <nl> + return core_mask ; <nl> + } <nl> + <nl> + / / / Gets the allowable priority mask <nl> + u64 GetPriorityMask ( ) const { <nl> + return priority_mask ; <nl> + } <nl> + <nl> private : <nl> / / / Attempts to parse a given sequence of capability descriptors . <nl> / / / <nl> | kernel / process_capability : Handle the priority mask and core mask flags | yuzu-emu/yuzu | 27caf7120444d1c34e1c2e322ab97ba9f5275b28 | 2018-12-21T12:05:34Z |
mmm a / src / interfaces / wallet . cpp <nl> ppp b / src / interfaces / wallet . cpp <nl> class WalletImpl : public Wallet <nl> { <nl> return m_wallet . ChangeWalletPassphrase ( old_wallet_passphrase , new_wallet_passphrase ) ; <nl> } <nl> + void abortRescan ( ) override { m_wallet . AbortRescan ( ) ; } <nl> bool backupWallet ( const std : : string & filename ) override { return m_wallet . BackupWallet ( filename ) ; } <nl> std : : string getWalletName ( ) override { return m_wallet . GetName ( ) ; } <nl> bool getKeyFromPool ( bool internal , CPubKey & pub_key ) override <nl> mmm a / src / interfaces / wallet . h <nl> ppp b / src / interfaces / wallet . h <nl> class Wallet <nl> virtual bool changeWalletPassphrase ( const SecureString & old_wallet_passphrase , <nl> const SecureString & new_wallet_passphrase ) = 0 ; <nl> <nl> + / / ! Abort a rescan . <nl> + virtual void abortRescan ( ) = 0 ; <nl> + <nl> / / ! Back up wallet . <nl> virtual bool backupWallet ( const std : : string & filename ) = 0 ; <nl> <nl> mmm a / src / qt / walletview . cpp <nl> ppp b / src / qt / walletview . cpp <nl> void WalletView : : showProgress ( const QString & title , int nProgress ) <nl> progressDialog = new QProgressDialog ( title , " " , 0 , 100 ) ; <nl> progressDialog - > setWindowModality ( Qt : : ApplicationModal ) ; <nl> progressDialog - > setMinimumDuration ( 0 ) ; <nl> - progressDialog - > setCancelButton ( 0 ) ; <nl> progressDialog - > setAutoClose ( false ) ; <nl> progressDialog - > setValue ( 0 ) ; <nl> + progressDialog - > setCancelButtonText ( tr ( " Cancel " ) ) ; <nl> } <nl> else if ( nProgress = = 100 ) <nl> { <nl> void WalletView : : showProgress ( const QString & title , int nProgress ) <nl> progressDialog - > deleteLater ( ) ; <nl> } <nl> } <nl> - else if ( progressDialog ) <nl> - progressDialog - > setValue ( nProgress ) ; <nl> + else if ( progressDialog ) { <nl> + if ( progressDialog - > wasCanceled ( ) ) { <nl> + getWalletModel ( ) - > wallet ( ) . abortRescan ( ) ; <nl> + } else { <nl> + progressDialog - > setValue ( nProgress ) ; <nl> + } <nl> + } <nl> } <nl> <nl> void WalletView : : requestedSyncWarningInfo ( ) <nl> mmm a / src / wallet / rpcdump . cpp <nl> ppp b / src / wallet / rpcdump . cpp <nl> UniValue importwallet ( const JSONRPCRequest & request ) <nl> int64_t nFilesize = std : : max ( ( int64_t ) 1 , ( int64_t ) file . tellg ( ) ) ; <nl> file . seekg ( 0 , file . beg ) ; <nl> <nl> - pwallet - > ShowProgress ( _ ( " Importing . . . " ) , 0 ) ; / / show progress dialog in GUI <nl> + / / Use uiInterface . ShowProgress instead of pwallet . ShowProgress because pwallet . ShowProgress has a cancel button tied to AbortRescan which <nl> + / / we don ' t want for this progress bar shoing the import progress . uiInterface . ShowProgress does not have a cancel button . <nl> + uiInterface . ShowProgress ( _ ( " Importing . . . " ) , 0 , false ) ; / / show progress dialog in GUI <nl> while ( file . good ( ) ) { <nl> - pwallet - > ShowProgress ( " " , std : : max ( 1 , std : : min ( 99 , ( int ) ( ( ( double ) file . tellg ( ) / ( double ) nFilesize ) * 100 ) ) ) ) ; <nl> + uiInterface . ShowProgress ( " " , std : : max ( 1 , std : : min ( 99 , ( int ) ( ( ( double ) file . tellg ( ) / ( double ) nFilesize ) * 100 ) ) ) , false ) ; <nl> std : : string line ; <nl> std : : getline ( file , line ) ; <nl> if ( line . empty ( ) | | line [ 0 ] = = ' # ' ) <nl> UniValue importwallet ( const JSONRPCRequest & request ) <nl> } <nl> } <nl> file . close ( ) ; <nl> - pwallet - > ShowProgress ( " " , 100 ) ; / / hide progress dialog in GUI <nl> + uiInterface . ShowProgress ( " " , 100 , false ) ; / / hide progress dialog in GUI <nl> pwallet - > UpdateTimeFirstKey ( nTimeBegin ) ; <nl> } <nl> pwallet - > RescanFromTime ( nTimeBegin , reserver , false / * update * / ) ; <nl> | Add cancel button to rescan progress dialog | bitcoin/bitcoin | 69b01e6f8b3aca781da7f049909ad75de73a804a | 2018-04-12T21:00:30Z |
mmm a / docs / api / menu - item . md <nl> ppp b / docs / api / menu - item . md <nl> The ` role ` property can have following values : <nl> * ` zoomin ` - Zoom in the focused page by 10 % <nl> * ` zoomout ` - Zoom out the focused page by 10 % <nl> <nl> + * ` menuEdit ` - Whole default " Edit " menu ( Undo , Copy , etc . ) <nl> + * ` menuWindow ` - Whole default " Window " menu ( Minimize , Close , etc . ) <nl> + <nl> On macOS ` role ` can also have following additional values : <nl> <nl> * ` about ` - Map to the ` orderFrontStandardAboutPanel ` action <nl> mmm a / lib / browser / api / menu - item - roles . js <nl> ppp b / lib / browser / api / menu - item - roles . js <nl> const roles = { <nl> webContents . setZoomLevel ( zoomLevel - 0 . 5 ) <nl> } ) <nl> } <nl> + } , <nl> + / / submenu Edit ( should fit both Mac & Windows ) <nl> + menuEdit : { <nl> + label : ' Edit ' , <nl> + submenu : [ <nl> + { <nl> + role : ' undo ' <nl> + } , <nl> + { <nl> + role : ' redo ' <nl> + } , <nl> + { <nl> + type : ' separator ' <nl> + } , <nl> + { <nl> + role : ' cut ' <nl> + } , <nl> + { <nl> + role : ' copy ' <nl> + } , <nl> + { <nl> + role : ' paste ' <nl> + } , <nl> + <nl> + process . platform = = = ' darwin ' ? <nl> + { <nl> + role : ' pasteandmatchstyle ' <nl> + } : { } , <nl> + <nl> + { <nl> + role : ' delete ' <nl> + } , <nl> + <nl> + process . platform = = = ' win32 ' ? <nl> + { <nl> + type : ' separator ' <nl> + } : { } , <nl> + <nl> + { <nl> + role : ' selectall ' <nl> + } <nl> + ] <nl> + } , <nl> + <nl> + / / submenu Window should be used for Mac only <nl> + menuWindow : { <nl> + label : ' Window ' , <nl> + submenu : [ <nl> + { <nl> + role : ' minimize ' <nl> + } , <nl> + { <nl> + role : ' close ' <nl> + } , <nl> + <nl> + process . platform = = = ' darwin ' ? <nl> + { <nl> + type : ' separator ' <nl> + } : { } , <nl> + <nl> + process . platform = = = ' darwin ' ? <nl> + { <nl> + label : ' Bring All to Front ' , <nl> + role : ' front ' <nl> + } : { } <nl> + <nl> + ] <nl> } <nl> } <nl> <nl> exports . getDefaultAccelerator = ( role ) = > { <nl> if ( roles . hasOwnProperty ( role ) ) return roles [ role ] . accelerator <nl> } <nl> <nl> + exports . getDefaultSubmenu = ( role ) = > { <nl> + if ( roles . hasOwnProperty ( role ) ) { <nl> + submenu = roles [ role ] . submenu <nl> + <nl> + / / remove empty objects from within the submenu <nl> + if ( Array . isArray ( submenu ) ) <nl> + submenu = submenu . filter ( function ( n ) { <nl> + return n . constructor ! = = Object | | Object . keys ( n ) . length > 0 <nl> + } ) <nl> + <nl> + return submenu <nl> + } <nl> + } <nl> + <nl> exports . execute = ( role , focusedWindow , focusedWebContents ) = > { <nl> if ( ! canExecuteRole ( role ) ) return false <nl> <nl> mmm a / lib / browser / api / menu - item . js <nl> ppp b / lib / browser / api / menu - item . js <nl> const MenuItem = function ( options ) { <nl> for ( let key in options ) { <nl> if ( ! ( key in this ) ) this [ key ] = options [ key ] <nl> } <nl> - <nl> + this . submenu = this . submenu | | roles . getDefaultSubmenu ( this . role ) <nl> if ( this . submenu ! = null & & this . submenu . constructor ! = = Menu ) { <nl> this . submenu = Menu . buildFromTemplate ( this . submenu ) <nl> } <nl> | added default menu items for ' Edit ' and ' Window ' | electron/electron | 8aba64025038e79f043793b29871e77dd88bc118 | 2017-03-09T15:01:33Z |
mmm a / src / network / Timer . c <nl> ppp b / src / network / Timer . c <nl> static swTimer_node * swTimer_add ( swTimer * timer , int _msec , int interval , void * <nl> <nl> int swTimer_del ( swTimer * timer , swTimer_node * tnode ) <nl> { <nl> - / / current timer , cannot remove here . <nl> - if ( tnode - > id = = SwooleG . timer . _current_id ) <nl> - { <nl> - / / To avoid repeat delete <nl> - if ( 0 = = tnode - > remove ) <nl> - { <nl> - tnode - > remove = 1 ; <nl> - return SW_TRUE ; <nl> - } <nl> - else <nl> - { <nl> - return SW_FALSE ; <nl> - } <nl> - } <nl> / / remove from min - heap <nl> swHeap_remove ( timer - > heap , tnode - > heap_node ) ; <nl> if ( tnode - > heap_node ) <nl> | remove redundant code . | swoole/swoole-src | ed74247dd01b0e09e4bbc34a990b4c52cd966dfc | 2017-06-13T05:36:46Z |
Binary files a / data / skins / default / sheet . png and b / data / skins / default / sheet . png differ <nl> mmm a / data / skins / default / skin . xml <nl> ppp b / data / skins / default / skin . xml <nl> <nl> < part id = " icon_black " x = " 48 " y = " 256 " w = " 16 " h = " 16 " / > <nl> < part id = " icon_white " x = " 64 " y = " 256 " w = " 16 " h = " 16 " / > <nl> < part id = " icon_transparent " x = " 80 " y = " 256 " w = " 16 " h = " 16 " / > <nl> + < part id = " color_wheel_indicator " x = " 48 " y = " 192 " w = " 4 " h = " 4 " / > <nl> < / parts > <nl> <nl> < stylesheet > <nl> mmm a / src / app / ui / color_bar . cpp <nl> ppp b / src / app / ui / color_bar . cpp <nl> void ColorBar : : setColorSelector ( ColorSelector selector ) <nl> if ( ! m_wheel ) { <nl> m_wheel = new ColorWheel ; <nl> m_wheel - > setExpansive ( true ) ; <nl> + m_wheel - > selectColor ( m_fgColor . getColor ( ) ) ; <nl> m_wheel - > ColorChange . connect ( & ColorBar : : onPickSpectrum , this ) ; <nl> m_selectorPlaceholder . addChild ( m_wheel ) ; <nl> } <nl> void ColorBar : : onColorButtonChange ( const app : : Color & color ) <nl> / / palette view fg / bg indicators . <nl> m_paletteView . invalidate ( ) ; <nl> } <nl> + <nl> + if ( m_wheel & & m_wheel - > isVisible ( ) ) <nl> + m_wheel - > selectColor ( color ) ; <nl> } <nl> <nl> void ColorBar : : onPickSpectrum ( const app : : Color & color , ui : : MouseButtons buttons ) <nl> mmm a / src / app / ui / color_wheel . cpp <nl> ppp b / src / app / ui / color_wheel . cpp <nl> <nl> # include " app / ui / status_bar . h " <nl> # include " base / bind . h " <nl> # include " base / pi . h " <nl> + # include " she / surface . h " <nl> # include " ui / graphics . h " <nl> # include " ui / menu . h " <nl> # include " ui / message . h " <nl> app : : Color ColorWheel : : pickColor ( const gfx : : Point & pos ) const <nl> int v = ( pos . y - ( m_wheelBounds . y + m_wheelBounds . h / 2 ) ) ; <nl> double d = std : : sqrt ( u * u + v * v ) ; <nl> <nl> - if ( d < m_wheelRadius ) { <nl> + if ( d < m_wheelRadius + 2 * guiscale ( ) ) { <nl> double a = std : : atan2 ( - v , u ) ; <nl> <nl> int hue = ( int ( 180 . 0 * a / PI ) <nl> app : : Color ColorWheel : : pickColor ( const gfx : : Point & pos ) const <nl> } <nl> hue % = 360 ; / / To leave hue in [ 0 , 360 ) range <nl> <nl> - int sat = int ( 120 . 0 * d / m_wheelRadius ) ; <nl> + int sat ; <nl> if ( m_discrete ) { <nl> + sat = int ( 120 . 0 * d / m_wheelRadius ) ; <nl> sat / = 20 ; <nl> sat * = 20 ; <nl> } <nl> + else { <nl> + sat = int ( 100 . 0 * d / m_wheelRadius ) ; <nl> + } <nl> <nl> return app : : Color : : fromHsv ( <nl> MID ( 0 , hue , 360 ) , <nl> app : : Color ColorWheel : : pickColor ( const gfx : : Point & pos ) const <nl> } <nl> } <nl> <nl> + void ColorWheel : : selectColor ( const app : : Color & color ) <nl> + { <nl> + m_mainColor = color ; <nl> + invalidate ( ) ; <nl> + } <nl> + <nl> void ColorWheel : : setDiscrete ( bool state ) <nl> { <nl> m_discrete = state ; <nl> void ColorWheel : : onPaint ( ui : : PaintEvent & ev ) <nl> g - > putPixel ( color , x , y ) ; <nl> } <nl> } <nl> + <nl> + if ( m_mainColor . getAlpha ( ) > 0 ) { <nl> + int hue = m_mainColor . getHue ( ) - 30 ; <nl> + int sat = m_mainColor . getSaturation ( ) ; <nl> + gfx : : Point pos = <nl> + m_wheelBounds . getCenter ( ) + <nl> + gfx : : Point ( int ( + std : : cos ( PI * hue / 180 ) * double ( m_wheelRadius ) * sat / 100 . 0 ) , <nl> + int ( - std : : sin ( PI * hue / 180 ) * double ( m_wheelRadius ) * sat / 100 . 0 ) ) ; <nl> + <nl> + she : : Surface * icon = theme - > parts . colorWheelIndicator ( ) - > getBitmap ( 0 ) ; <nl> + g - > drawRgbaSurface ( icon , <nl> + pos . x - icon - > width ( ) / 2 , <nl> + pos . y - icon - > height ( ) / 2 ) ; <nl> + } <nl> } <nl> <nl> bool ColorWheel : : onProcessMessage ( ui : : Message * msg ) <nl> mmm a / src / app / ui / color_wheel . h <nl> ppp b / src / app / ui / color_wheel . h <nl> namespace app { <nl> ~ ColorWheel ( ) ; <nl> <nl> app : : Color pickColor ( const gfx : : Point & pos ) const ; <nl> + void selectColor ( const app : : Color & color ) ; <nl> <nl> bool isDiscrete ( ) const { return m_discrete ; } <nl> void setDiscrete ( bool state ) ; <nl> namespace app { <nl> int m_wheelRadius ; <nl> bool m_discrete ; <nl> ui : : ButtonBase m_options ; <nl> + app : : Color m_mainColor ; <nl> } ; <nl> <nl> } / / namespace app <nl> | Show current color indicator in the color wheel | aseprite/aseprite | b0877df0cb528f59f8004fecc17f661e28dc25d4 | 2015-08-21T15:34:06Z |
mmm a / src / banman . cpp <nl> ppp b / src / banman . cpp <nl> BanMan : : BanMan ( fs : : path ban_file , CClientUIInterface * client_interface , int64_t <nl> SweepBanned ( ) ; / / sweep out unused entries <nl> <nl> LogPrint ( BCLog : : NET , " Loaded % d banned node ips / subnets from banlist . dat % dms \ n " , <nl> - banmap . size ( ) , GetTimeMillis ( ) - n_start ) ; <nl> + m_banned . size ( ) , GetTimeMillis ( ) - n_start ) ; <nl> } else { <nl> LogPrintf ( " Invalid or missing banlist . dat ; recreating \ n " ) ; <nl> SetBannedSetDirty ( true ) ; / / force write <nl> | Merge : banlist : log post - swept banlist size at startup | bitcoin/bitcoin | e3b31255c5ad8841023231ce843a27789d996ff6 | 2020-07-09T12:29:31Z |
mmm a / src / core / hle / kernel / thread . cpp <nl> ppp b / src / core / hle / kernel / thread . cpp <nl> void ChangeReadyState ( Thread * t , bool ready ) { <nl> } <nl> } <nl> <nl> - / / / Verify that a thread has not been released from waiting <nl> - static bool VerifyWait ( const Thread * thread , WaitType type , Handle wait_handle ) { <nl> - _dbg_assert_ ( Kernel , thread ! = nullptr ) ; <nl> - return ( type = = thread - > wait_type ) & & ( wait_handle = = thread - > wait_handle ) & & ( thread - > IsWaiting ( ) ) ; <nl> + / / / Check if a thread is blocking on a specified wait type <nl> + static bool CheckWaitType ( const Thread * thread , WaitType type ) { <nl> + return ( type = = thread - > wait_type ) & & ( thread - > IsWaiting ( ) ) ; <nl> } <nl> <nl> - / / / Verify that a thread has not been released from waiting ( with wait address ) <nl> - static bool VerifyWait ( const Thread * thread , WaitType type , Handle wait_handle , VAddr wait_address ) { <nl> - _dbg_assert_ ( Kernel , thread ! = nullptr ) ; <nl> - return VerifyWait ( thread , type , wait_handle ) & & ( wait_address = = thread - > wait_address ) ; <nl> + / / / Check if a thread is blocking on a specified wait type with a specified handle <nl> + static bool CheckWaitType ( const Thread * thread , WaitType type , Handle wait_handle ) { <nl> + return CheckWaitType ( thread , type ) & & ( wait_handle = = thread - > wait_handle ) ; <nl> + } <nl> + <nl> + / / / Check if a thread is blocking on a specified wait type with a specified handle and address <nl> + static bool CheckWaitType ( const Thread * thread , WaitType type , Handle wait_handle , VAddr wait_address ) { <nl> + return CheckWaitType ( thread , type , wait_handle ) & & ( wait_address = = thread - > wait_address ) ; <nl> } <nl> <nl> / / / Stops the current thread <nl> ResultCode StopThread ( Handle handle , const char * reason ) { <nl> thread - > status = THREADSTATUS_DORMANT ; <nl> for ( Handle waiting_handle : thread - > waiting_threads ) { <nl> Thread * waiting_thread = g_object_pool . Get < Thread > ( waiting_handle ) ; <nl> - if ( VerifyWait ( waiting_thread , WAITTYPE_THREADEND , handle ) ) { <nl> + <nl> + if ( CheckWaitType ( waiting_thread , WAITTYPE_THREADEND , handle ) ) <nl> ResumeThreadFromWait ( waiting_handle ) ; <nl> - } <nl> } <nl> thread - > waiting_threads . clear ( ) ; <nl> <nl> Handle ArbitrateHighestPriorityThread ( u32 arbiter , u32 address ) { <nl> for ( Handle handle : thread_queue ) { <nl> Thread * thread = g_object_pool . Get < Thread > ( handle ) ; <nl> <nl> - if ( ! VerifyWait ( thread , WAITTYPE_ARB , arbiter , address ) ) <nl> + if ( ! CheckWaitType ( thread , WAITTYPE_ARB , arbiter , address ) ) <nl> continue ; <nl> <nl> if ( thread = = nullptr ) <nl> void ArbitrateAllThreads ( u32 arbiter , u32 address ) { <nl> for ( Handle handle : thread_queue ) { <nl> Thread * thread = g_object_pool . Get < Thread > ( handle ) ; <nl> <nl> - if ( VerifyWait ( thread , WAITTYPE_ARB , arbiter , address ) ) <nl> + if ( CheckWaitType ( thread , WAITTYPE_ARB , arbiter , address ) ) <nl> ResumeThreadFromWait ( handle ) ; <nl> } <nl> } <nl> void ResumeThreadFromWait ( Handle handle ) { <nl> Thread * thread = Kernel : : g_object_pool . Get < Thread > ( handle ) ; <nl> if ( thread ) { <nl> thread - > status & = ~ THREADSTATUS_WAIT ; <nl> + thread - > wait_handle = 0 ; <nl> + thread - > wait_type = WAITTYPE_NONE ; <nl> if ( ! ( thread - > status & ( THREADSTATUS_WAITSUSPEND | THREADSTATUS_DORMANT | THREADSTATUS_DEAD ) ) ) { <nl> ChangeReadyState ( thread , true ) ; <nl> } <nl> void Reschedule ( ) { <nl> Thread * prev = GetCurrentThread ( ) ; <nl> Thread * next = NextThread ( ) ; <nl> HLE : : g_reschedule = false ; <nl> - if ( next > 0 ) { <nl> - LOG_TRACE ( Kernel , " context switch 0x % 08X - > 0x % 08X " , prev - > GetHandle ( ) , next - > GetHandle ( ) ) ; <nl> <nl> + if ( next ! = nullptr ) { <nl> + LOG_TRACE ( Kernel , " context switch 0x % 08X - > 0x % 08X " , prev - > GetHandle ( ) , next - > GetHandle ( ) ) ; <nl> SwitchContext ( next ) ; <nl> + } else { <nl> + LOG_TRACE ( Kernel , " cannot context switch from 0x % 08X , no higher priority thread ! " , prev - > GetHandle ( ) ) ; <nl> <nl> - / / Hack - There is no mechanism yet to waken the primary thread if it has been put to sleep <nl> - / / by a simulated VBLANK thread switch . So , we ' ll just immediately set it to " ready " again . <nl> - / / This results in the current thread yielding on a VBLANK once , and then it will be <nl> - / / immediately placed back in the queue for execution . <nl> - if ( prev - > wait_type = = WAITTYPE_VBLANK ) { <nl> - ResumeThreadFromWait ( prev - > GetHandle ( ) ) ; <nl> + for ( Handle handle : thread_queue ) { <nl> + Thread * thread = g_object_pool . Get < Thread > ( handle ) ; <nl> + LOG_TRACE ( Kernel , " \ thandle = 0x % 08X prio = 0x % 02X , status = 0x % 08X wait_type = 0x % 08X wait_handle = 0x % 08X " , <nl> + thread - > GetHandle ( ) , thread - > current_priority , thread - > status , thread - > wait_type , thread - > wait_handle ) ; <nl> } <nl> } <nl> + <nl> + / / TODO ( bunnei ) : Hack - There is no timing mechanism yet to wake up a thread if it has been put <nl> + / / to sleep . So , we ' ll just immediately set it to " ready " again after an attempted context <nl> + / / switch has occurred . This results in the current thread yielding on a sleep once , and then it <nl> + / / will immediately be placed back in the queue for execution . <nl> + <nl> + if ( CheckWaitType ( prev , WAITTYPE_SLEEP ) ) <nl> + ResumeThreadFromWait ( prev - > GetHandle ( ) ) ; <nl> } <nl> <nl> ResultCode GetThreadId ( u32 * thread_id , Handle handle ) { <nl> mmm a / src / core / hle / kernel / thread . h <nl> ppp b / src / core / hle / kernel / thread . h <nl> enum WaitType { <nl> WAITTYPE_SEMA , <nl> WAITTYPE_EVENT , <nl> WAITTYPE_THREADEND , <nl> - WAITTYPE_VBLANK , <nl> WAITTYPE_MUTEX , <nl> WAITTYPE_SYNCH , <nl> WAITTYPE_ARB , <nl> mmm a / src / core / hle / svc . cpp <nl> ppp b / src / core / hle / svc . cpp <nl> static Result ClearEvent ( Handle evt ) { <nl> static void SleepThread ( s64 nanoseconds ) { <nl> LOG_TRACE ( Kernel_SVC , " called nanoseconds = % lld " , nanoseconds ) ; <nl> <nl> - / / Check for next thread to schedule <nl> + / / Sleep current thread and check for next thread to schedule <nl> + Kernel : : WaitCurrentThread ( WAITTYPE_SLEEP ) ; <nl> HLE : : Reschedule ( __func__ ) ; <nl> } <nl> <nl> | Thread : Wait current thread on svc_SleepThread | yuzu-emu/yuzu | 4fcdbed9f661a37772db915904a852850037d84a | 2014-12-21T04:20:19Z |
mmm a / plugins / producer_plugin / producer_plugin . cpp <nl> ppp b / plugins / producer_plugin / producer_plugin . cpp <nl> void producer_plugin_impl : : produce_block ( ) { <nl> <nl> void producer_plugin : : log_failed_transaction ( const transaction_id_type & trx_id , const char * reason ) const { <nl> fc_dlog ( _trx_failed_trace_log , " [ TRX_TRACE ] Speculative execution is REJECTING tx : $ { txid } : $ { why } " , <nl> - ( " trxid " , trx_id ) ( " reason " , reason ) ) ; <nl> + ( " txid " , trx_id ) ( " why " , reason ) ) ; <nl> } <nl> <nl> } / / namespace eosio <nl> | Merge pull request from nsjames / patch - 1 | EOSIO/eos | 2046a51fc849b1b58ed2125a97c9ae1e75440655 | 2020-11-03T13:56:33Z |
mmm a / third - party <nl> ppp b / third - party <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit 20e77d319386e62743c08d45a9fba173d1d7a9b5 <nl> + Subproject commit 26dc09fb221a06179bd28376996fa55a1e848157 <nl> | update hhvm - third - party ( boost ) | facebook/hhvm | 44b58b3834ce95b6b02faca55d6a1f78b0187736 | 2019-04-11T16:13:42Z |
new file mode 100644 <nl> index 00000000000 . . 09f3101cf4b <nl> mmm / dev / null <nl> ppp b / tests / python / test_local_atomics . py <nl> <nl> + import taichi as ti <nl> + <nl> + @ ti . all_archs <nl> + def test_explicit_local_atomics ( ) : <nl> + A = ti . var ( ti . f32 , shape = ( ) ) <nl> + <nl> + @ ti . kernel <nl> + def func ( ) : <nl> + a = 0 <nl> + for i in range ( 10 ) : <nl> + ti . atomic_add ( a , i ) <nl> + A [ None ] = a <nl> + <nl> + func ( ) <nl> + assert A [ None ] = = 45 <nl> + <nl> + @ ti . all_archs <nl> + def test_implicit_local_atomics ( ) : <nl> + A = ti . var ( ti . f32 , shape = ( ) ) <nl> + <nl> + @ ti . kernel <nl> + def func ( ) : <nl> + a = 0 <nl> + for i in range ( 10 ) : <nl> + a + = i <nl> + A [ None ] = a <nl> + <nl> + func ( ) <nl> + assert A [ None ] = = 45 <nl> + <nl> | . | taichi-dev/taichi | c08baa5af0c230975fc38101e8b025d63e0e210b | 2019-12-06T09:45:53Z |
mmm a / caffe2 / core / operator . h <nl> ppp b / caffe2 / core / operator . h <nl> class CAFFE2_API OperatorBase : public Observable < OperatorBase > { <nl> DCHECK_LT ( 0U , newstyle_inputs_ . size ( ) ) ; <nl> IValue ival ; <nl> if ( newstyle_inputs_ [ 0 ] . isTensorList ( ) ) { <nl> - / / if the first input is a tensor list , we get input tensors by indexing into that list . <nl> - / / currently , this means that only tensors from that list are accessible as inputs . <nl> - / / any hypothetical input tensors that come after the list are not accessible . <nl> + / / if the first input is a tensor list , we get input tensors by indexing <nl> + / / into that list . currently , this means that only tensors from that list <nl> + / / are accessible as inputs . any hypothetical input tensors that come <nl> + / / after the list are not accessible . <nl> auto tensorList = newstyle_inputs_ [ 0 ] . toTensorVector ( ) ; <nl> DCHECK_LT ( ( size_t ) idx , tensorList . size ( ) ) ; <nl> ival = tensorList [ idx ] ; <nl> } else { <nl> - / / if the first input is not a tensor list , we get input tensors by indexing into the inputs . <nl> + / / if the first input is not a tensor list , we get input tensors by <nl> + / / indexing into the inputs . <nl> DCHECK_LT ( ( size_t ) idx , newstyle_inputs_ . size ( ) ) ; <nl> ival = newstyle_inputs_ [ idx ] ; <nl> } <nl> class CAFFE2_API OperatorBase : public Observable < OperatorBase > { <nl> / / also update the tensor in the hack <nl> output_tensors_ [ idx ] = std : : move ( tensor ) ; <nl> # else <nl> - CAFFE_THROW ( " Non - legacy operators are not legal in xplat / caffe2 " ) ; <nl> + CAFFE_THROW ( " Non - legacy operators are not legal in xplat / caffe2 " ) ; <nl> # endif <nl> } else { <nl> / / update the tensor in the workspace <nl> class CAFFE2_API OperatorBase : public Observable < OperatorBase > { <nl> CAFFE_ENFORCE ( <nl> isLegacyOperator ( ) , <nl> " OutputTensorAlias ( idx , src ) not ( yet ) supported for operators exported to c10 . " ) ; <nl> - return BlobSetTensor ( OutputBlob ( idx ) , <nl> - src . Alias ( ) ) ; <nl> + return BlobSetTensor ( OutputBlob ( idx ) , src . Alias ( ) ) ; <nl> } <nl> <nl> - <nl> template < typename T > <nl> inline T * Output ( int idx , T * allocated ) { <nl> CAFFE_ENFORCE ( <nl> inline vector < int16_t > OperatorBase : : GetVectorFromIValueList < int16_t > ( <nl> / / member variables for the class constructors . <nl> / / This is a workaround for CUDA9 . 2 and GCC7 <nl> # if defined ( CUDART_VERSION ) & & CUDART_VERSION > = 9020 & & __GNUC__ > = 7 <nl> - # define OP_SINGLE_ARG ( type , name , variable , default ) \ <nl> + # define OP_SINGLE_ARG ( type , name , variable , default ) \ <nl> variable ( this - > template GetSingleArgument < type > ( name , ( default ) ) ) <nl> # else <nl> - # define OP_SINGLE_ARG ( type , name , variable , default ) \ <nl> + # define OP_SINGLE_ARG ( type , name , variable , default ) \ <nl> variable ( OperatorBase : : GetSingleArgument < type > ( name , ( default ) ) ) <nl> # endif <nl> <nl> inline vector < int16_t > OperatorBase : : GetVectorFromIValueList < int16_t > ( <nl> / / you can now do <nl> / / auto & weight = Input ( WEIGHT ) ; <nl> / / to make it more clear . <nl> - # define INPUT_TAGS ( first_input , . . . ) \ <nl> + # define INPUT_TAGS ( first_input , . . . ) \ <nl> enum _InputTags { first_input = 0 , __VA_ARGS__ } <nl> - # define OUTPUT_TAGS ( first_input , . . . ) \ <nl> + # define OUTPUT_TAGS ( first_input , . . . ) \ <nl> enum _OutputTags { first_input = 0 , __VA_ARGS__ } <nl> <nl> - <nl> template < typename T > <nl> inline vector < T > OperatorBase : : GetRepeatedArgument ( <nl> const string & name , <nl> class Operator : public OperatorBase { <nl> <nl> # define USE_OPERATOR_CONTEXT_FUNCTIONS USE_OPERATOR_FUNCTIONS ( Context ) <nl> <nl> - # define USE_SIMPLE_CTOR_DTOR ( name ) \ <nl> - template < class . . . Args > explicit name ( Args & & . . . args ) \ <nl> - : Operator < Context > ( std : : forward < Args > ( args ) . . . ) { } \ <nl> + # define USE_SIMPLE_CTOR_DTOR ( name ) \ <nl> + template < class . . . Args > \ <nl> + explicit name ( Args & & . . . args ) \ <nl> + : Operator < Context > ( std : : forward < Args > ( args ) . . . ) { } \ <nl> virtual ~ name ( ) noexcept { } <nl> <nl> / / Helpers to implement runtime op polymorphism . Often it ' s convenient to make <nl> C10_DECLARE_REGISTRY ( <nl> # define REGISTER_HIP_OPERATOR_WITH_ENGINE ( name , engine , . . . ) \ <nl> C10_REGISTER_CLASS ( HIPOperatorRegistry , name # # _ENGINE_ # # engine , __VA_ARGS__ ) <nl> <nl> - # define REGISTER_MIOPEN_OPERATOR ( name , . . . ) \ <nl> + # define REGISTER_MIOPEN_OPERATOR ( name , . . . ) \ <nl> REGISTER_HIP_OPERATOR_WITH_ENGINE ( name , MIOPEN , __VA_ARGS__ ) \ <nl> - REGISTER_HIP_OPERATOR_WITH_ENGINE ( name , CUDNN , __VA_ARGS__ ) / / Make CUDNN an alias of MIOPEN for HIP ops <nl> + REGISTER_HIP_OPERATOR_WITH_ENGINE ( \ <nl> + name , CUDNN , __VA_ARGS__ ) / / Make CUDNN an alias of MIOPEN for HIP ops <nl> <nl> / / StaticLinkingProtector is a helper class that ensures that the Caffe2 <nl> / / library is linked correctly with whole archives ( in the case of static <nl> struct StaticLinkingProtector { <nl> / / If Caffe2 is properly linked with whole archive , there should be more <nl> / / than zero registered ops . <nl> if ( registered_ops = = 0 ) { <nl> - LOG ( FATAL ) < < <nl> - " You might have made a build error : the Caffe2 library does not seem " <nl> - " to be linked with whole - static library option . To do so , use " <nl> - " - Wl , - force_load ( clang ) or - Wl , - - whole - archive ( gcc ) to link the " <nl> - " Caffe2 library . " ; <nl> + LOG ( FATAL ) <nl> + < < " You might have made a build error : the Caffe2 library does not seem " <nl> + " to be linked with whole - static library option . To do so , use " <nl> + " - Wl , - force_load ( clang ) or - Wl , - - whole - archive ( gcc ) to link the " <nl> + " Caffe2 library . " ; <nl> } <nl> } <nl> } ; <nl> using PerOpEnginePrefType = <nl> CaffeMap < DeviceType , CaffeMap < std : : string , EnginePrefType > > ; <nl> / / { device_type - > EnginePrefType } <nl> using GlobalEnginePrefType = CaffeMap < DeviceType , EnginePrefType > ; <nl> - CAFFE2_API void SetPerOpEnginePref ( const PerOpEnginePrefType & per_op_engine_pref ) ; <nl> - CAFFE2_API void SetGlobalEnginePref ( const GlobalEnginePrefType & global_engine_pref ) ; <nl> + CAFFE2_API void SetPerOpEnginePref ( <nl> + const PerOpEnginePrefType & per_op_engine_pref ) ; <nl> + CAFFE2_API void SetGlobalEnginePref ( <nl> + const GlobalEnginePrefType & global_engine_pref ) ; <nl> CAFFE2_API void SetEnginePref ( <nl> const PerOpEnginePrefType & per_op_engine_pref , <nl> const GlobalEnginePrefType & global_engine_pref ) ; <nl> CAFFE2_API TensorShapes InferBlobShapesAndTypesFromMap ( <nl> const CaffeMap < std : : string , TensorProto_DataType > & blob_types , <nl> const vector < NetDef * > & nets ) ; <nl> <nl> - CAFFE2_API std : : map < string , std : : pair < DeviceOption , DeviceOption > > ValidateTensorDevices ( <nl> - OperatorBase & op , <nl> - const OperatorDef & op_def ) ; <nl> + CAFFE2_API std : : map < string , std : : pair < DeviceOption , DeviceOption > > <nl> + ValidateTensorDevices ( OperatorBase & op , const OperatorDef & op_def ) ; <nl> <nl> / / Get a set of registered operator names <nl> CAFFE2_API std : : set < std : : string > GetRegisteredOperators ( ) ; <nl> <nl> / / Operator logging capabilities <nl> - CAFFE2_API void SetOperatorLogger ( std : : function < void ( const OperatorDef & ) > tracer ) ; <nl> + CAFFE2_API void SetOperatorLogger ( <nl> + std : : function < void ( const OperatorDef & ) > tracer ) ; <nl> std : : function < void ( const OperatorDef & ) > GetOperatorLogger ( ) ; <nl> <nl> # ifndef C10_MOBILE <nl> inline unique_ptr < ExternalTensorFunctionsBase > CreateExternalTensorFunctions ( <nl> } <nl> # endif / / C10_MOBILE <nl> <nl> - } / / namespace caffe2 <nl> - <nl> + } / / namespace caffe2 <nl> <nl> - # endif / / CAFFE2_CORE_OPERATOR_H_ <nl> + # endif / / CAFFE2_CORE_OPERATOR_H_ <nl> mmm a / caffe2 / operators / cross_entropy_op . cc <nl> ppp b / caffe2 / operators / cross_entropy_op . cc <nl> bool LabelCrossEntropyGradientOp < float , CPUContext > : : RunOnDevice ( ) { <nl> float * dXdata = dX - > template mutable_data < float > ( ) ; <nl> for ( int i = 0 ; i < N ; + + i ) { <nl> dXdata [ i * D + labelData [ i ] ] = <nl> - - dYdata [ i ] / std : : max ( Xdata [ i * D + labelData [ i ] ] , kLOG_THRESHOLD ( ) ) ; <nl> + - dYdata [ i ] / std : : max ( Xdata [ i * D + labelData [ i ] ] , kLOG_THRESHOLD ( ) ) ; <nl> } <nl> return true ; <nl> } <nl> bool CrossEntropyGradientOp < float , CPUContext > : : RunOnDevice ( ) { <nl> return true ; <nl> } <nl> <nl> - REGISTER_CPU_OPERATOR ( LabelCrossEntropy , <nl> - LabelCrossEntropyOp < float , CPUContext > ) ; <nl> - REGISTER_CPU_OPERATOR ( LabelCrossEntropyGradient , <nl> - LabelCrossEntropyGradientOp < float , CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( <nl> + LabelCrossEntropy , <nl> + LabelCrossEntropyOp < float , CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( <nl> + LabelCrossEntropyGradient , <nl> + LabelCrossEntropyGradientOp < float , CPUContext > ) ; <nl> <nl> OPERATOR_SCHEMA ( LabelCrossEntropy ) <nl> . NumInputs ( 2 ) <nl> print ( " Y : \ n " , workspace . FetchBlob ( " Y " ) ) <nl> <nl> <nl> ) DOC " ) <nl> - . Input ( <nl> - 0 , <nl> - " X " , <nl> - " Input tensor which is almost always the result of a softmax operation . $ X $ is a 2D array of size $ NxD $ , where $ N $ is the batch size and $ D $ is the number of classes . " ) <nl> - . Input ( <nl> - 1 , <nl> - " label " , <nl> - " Blob containing the labels used to compare the input . $ label $ is a length $ N $ list of integers , where each element is the integer label for the $ n $ th element of the batch . " ) <nl> - . Output ( <nl> - 0 , <nl> - " Y " , <nl> - " Output blob from the cross entropy computation . $ Y $ is 1D length $ N $ tensor . " ) ; <nl> - OPERATOR_SCHEMA ( LabelCrossEntropyGradient ) <nl> - . NumInputs ( 3 ) <nl> - . NumOutputs ( 1 ) ; <nl> + . Input ( <nl> + 0 , <nl> + " X " , <nl> + " Input tensor which is almost always the result of a softmax operation . $ X $ is a 2D array of size $ NxD $ , where $ N $ is the batch size and $ D $ is the number of classes . " ) <nl> + . Input ( <nl> + 1 , <nl> + " label " , <nl> + " Blob containing the labels used to compare the input . $ label $ is a length $ N $ list of integers , where each element is the integer label for the $ n $ th element of the batch . " ) <nl> + . Output ( <nl> + 0 , <nl> + " Y " , <nl> + " Output blob from the cross entropy computation . $ Y $ is 1D length $ N $ tensor . " ) ; <nl> + OPERATOR_SCHEMA ( LabelCrossEntropyGradient ) . NumInputs ( 3 ) . NumOutputs ( 1 ) ; <nl> <nl> class GetLabelCrossEntropyGradient : public GradientMakerBase { <nl> using GradientMakerBase : : GradientMakerBase ; <nl> vector < OperatorDef > GetGradientDefs ( ) override { <nl> return SingleGradientDef ( <nl> - " LabelCrossEntropyGradient " , " " , <nl> + " LabelCrossEntropyGradient " , <nl> + " " , <nl> vector < string > { I ( 0 ) , I ( 1 ) , GO ( 0 ) } , <nl> vector < string > { GI ( 0 ) } ) ; <nl> } <nl> } ; <nl> REGISTER_GRADIENT ( LabelCrossEntropy , GetLabelCrossEntropyGradient ) ; <nl> <nl> - REGISTER_CPU_OPERATOR ( MakeTwoClass , <nl> - MakeTwoClassOp < float , CPUContext > ) ; <nl> - REGISTER_CPU_OPERATOR ( MakeTwoClassGradient , <nl> - MakeTwoClassGradientOp < float , CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( MakeTwoClass , MakeTwoClassOp < float , CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( <nl> + MakeTwoClassGradient , <nl> + MakeTwoClassGradientOp < float , CPUContext > ) ; <nl> <nl> REGISTER_CPU_OPERATOR ( <nl> SigmoidCrossEntropyWithLogits , <nl> REGISTER_CPU_OPERATOR ( <nl> OPERATOR_SCHEMA ( MakeTwoClass ) <nl> . NumInputs ( 1 ) <nl> . NumOutputs ( 1 ) <nl> - . TensorInferenceFunction ( <nl> - [ ] ( const OperatorDef & / * unused * / , const vector < TensorShape > & in ) { <nl> - vector < TensorShape > out ( 1 ) ; <nl> - out [ 0 ] . add_dims ( in [ 0 ] . dims ( 0 ) ) ; <nl> - out [ 0 ] . add_dims ( 2 ) ; <nl> - return out ; <nl> - } ) <nl> + . TensorInferenceFunction ( [ ] ( const OperatorDef & / * unused * / , <nl> + const vector < TensorShape > & in ) { <nl> + vector < TensorShape > out ( 1 ) ; <nl> + out [ 0 ] . add_dims ( in [ 0 ] . dims ( 0 ) ) ; <nl> + out [ 0 ] . add_dims ( 2 ) ; <nl> + return out ; <nl> + } ) <nl> . SetDoc ( R " DOC ( <nl> Given a vector of probabilities , this operator transforms this into a 2 - column <nl> matrix with complimentary probabilities for binary classification . In explicit <nl> Given a vector of probabilities , this operator transforms this into a 2 - column <nl> " 2 - column matrix with complimentary probabilities of X for " <nl> " binary classification " ) ; <nl> <nl> - OPERATOR_SCHEMA ( MakeTwoClassGradient ) <nl> - . NumInputs ( 1 ) <nl> - . NumOutputs ( 1 ) ; <nl> + OPERATOR_SCHEMA ( MakeTwoClassGradient ) . NumInputs ( 1 ) . NumOutputs ( 1 ) ; <nl> <nl> OPERATOR_SCHEMA ( SigmoidCrossEntropyWithLogits ) <nl> . Arg ( " log_D_trick " , R " DOC ( <nl> REGISTER_GRADIENT ( <nl> WeightedSigmoidCrossEntropyWithLogits , <nl> GetWeightedSigmoidCrossEntropyWithLogitsGradient ) ; <nl> <nl> - REGISTER_CPU_OPERATOR ( CrossEntropy , <nl> - CrossEntropyOp < float , CPUContext > ) ; <nl> - REGISTER_CPU_OPERATOR ( CrossEntropyGradient , <nl> - CrossEntropyGradientOp < float , CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( CrossEntropy , CrossEntropyOp < float , CPUContext > ) ; <nl> + REGISTER_CPU_OPERATOR ( <nl> + CrossEntropyGradient , <nl> + CrossEntropyGradientOp < float , CPUContext > ) ; <nl> <nl> OPERATOR_SCHEMA ( CrossEntropy ) <nl> . NumInputs ( 2 ) <nl> print ( " Y : \ n " , workspace . FetchBlob ( " Y " ) ) <nl> 0 , <nl> " Y " , <nl> " Output blob from the cross entropy computation . $ Y $ is 1D length $ N $ tensor . " ) ; <nl> - OPERATOR_SCHEMA ( CrossEntropyGradient ) <nl> - . NumInputs ( 3 ) <nl> - . NumOutputs ( 1 ) ; <nl> + OPERATOR_SCHEMA ( CrossEntropyGradient ) . NumInputs ( 3 ) . NumOutputs ( 1 ) ; <nl> <nl> class GetCrossEntropyGradient : public GradientMakerBase { <nl> using GradientMakerBase : : GradientMakerBase ; <nl> vector < OperatorDef > GetGradientDefs ( ) override { <nl> return SingleGradientDef ( <nl> - " CrossEntropyGradient " , " " , <nl> + " CrossEntropyGradient " , <nl> + " " , <nl> vector < string > { I ( 0 ) , I ( 1 ) , GO ( 0 ) } , <nl> vector < string > { GI ( 0 ) } ) ; <nl> } <nl> } ; <nl> REGISTER_GRADIENT ( CrossEntropy , GetCrossEntropyGradient ) ; <nl> <nl> - } / / namespace caffe2 <nl> + } / / namespace caffe2 <nl> mmm a / caffe2 / operators / fully_connected_op . h <nl> ppp b / caffe2 / operators / fully_connected_op . h <nl> class FullyConnectedGradientOp : public Operator < Context > { <nl> & context_ , <nl> math_type ) ; <nl> if ( ! bias_multiplier_ . has_value ( ) ) { <nl> - bias_multiplier_ = caffe2 : : empty ( { M } , at : : dtype < T_B > ( ) . device ( Context : : GetDeviceType ( ) ) ) ; <nl> + bias_multiplier_ = <nl> + caffe2 : : empty ( { M } , at : : dtype < T_B > ( ) . device ( Context : : GetDeviceType ( ) ) ) ; <nl> math : : Set < T_B , Context > ( <nl> M , <nl> convert : : To < float , T_B > ( 1 ) , <nl> mmm a / torch / nn / functional . py <nl> ppp b / torch / nn / functional . py <nl> def glu ( input , dim = - 1 ) : <nl> if type ( input ) is not Tensor and has_torch_function ( ( input , ) ) : <nl> return handle_torch_function ( glu , ( input , ) , input , dim = dim ) <nl> if input . dim ( ) = = 0 : <nl> - raise RuntimeError ( " glu does not suppport scalars because halving size must be even " ) <nl> + raise RuntimeError ( " glu does not support scalars because halving size must be even " ) <nl> return torch . _C . _nn . glu ( input , dim ) <nl> <nl> <nl> | [ Format ] format a few files ( ) | pytorch/pytorch | cc5befc461ba15f0e5d54f3b0e9fc2c72422772f | 2020-04-17T21:30:01Z |
new file mode 100644 <nl> index 0000000000 . . 6deec4bcef <nl> mmm / dev / null <nl> ppp b / change / @ office - iss - react - native - win32 - 2020 - 10 - 05 - 13 - 03 - 34 - noinstallrex . json <nl> <nl> + { <nl> + " type " : " none " , <nl> + " comment " : " Most people do not need rex , do not install it for everyone using the RNW repo " , <nl> + " packageName " : " @ office - iss / react - native - win32 " , <nl> + " email " : " 30809111 + acoates - ms @ users . noreply . github . com " , <nl> + " dependentChangeType " : " none " , <nl> + " date " : " 2020 - 10 - 05T20 : 03 : 34 . 627Z " <nl> + } <nl> mmm a / packages / react - native - win32 / package . json <nl> ppp b / packages / react - native - win32 / package . json <nl> <nl> " lint : fix " : " just - scripts lint : fix " , <nl> " lint " : " just - scripts lint " , <nl> " api " : " just - scripts api " , <nl> - " run - win32 - dev - web " : " rex - win32 - - bundle RNTester / js / RNTesterApp - - component RNTesterApp - - basePath . / dist / win32 / dev - - useDevMain - - useWebDebugger " , <nl> - " run - win32 - devmain " : " rex - win32 - - bundle RNTester / js / RNTesterApp - - component RNTesterApp - - basePath . / dist / win32 / dev - - useDevMain " , <nl> - " run - win32 " : " rex - win32 - - bundle RNTester / js / RNTesterApp - - component RNTesterApp - - basePath . / dist / win32 / dev " , <nl> + " run - win32 - dev - web " : " npx @ office - iss / rex - win32 @ 0 . 62 . 7 - tenantreactnativewin - 13222 - - bundle RNTester / js / RNTesterApp - - component RNTesterApp - - basePath . / dist / win32 / dev - - useDevMain - - useWebDebugger " , <nl> + " run - win32 - devmain " : " npx @ office - iss / rex - win32 @ 0 . 62 . 7 - tenantreactnativewin - 13222 - - bundle RNTester / js / RNTesterApp - - component RNTesterApp - - basePath . / dist / win32 / dev - - useDevMain " , <nl> + " run - win32 " : " npx @ office - iss / rex - win32 @ 0 . 62 . 7 - tenantreactnativewin - 13222 - - bundle RNTester / js / RNTesterApp - - component RNTesterApp - - basePath . / dist / win32 / dev " , <nl> " start " : " react - native start " , <nl> " validate - overrides " : " react - native - platform - override validate " <nl> } , <nl> <nl> " ws " : " ^ 6 . 1 . 4 " <nl> } , <nl> " devDependencies " : { <nl> - " @ office - iss / rex - win32 " : " 0 . 62 . 7 - tenantreactnativewin - 13222 " , <nl> " @ rnw - scripts / eslint - config " : " 0 . 1 . 4 " , <nl> " @ types / es6 - collections " : " ^ 0 . 5 . 29 " , <nl> " @ types / es6 - promise " : " 0 . 0 . 32 " , <nl> mmm a / yarn . lock <nl> ppp b / yarn . lock <nl> <nl> universal - user - agent " ^ 3 . 0 . 0 " <nl> url - template " ^ 2 . 0 . 8 " <nl> <nl> - " @ office - iss / rex - win32 @ 0 . 62 . 7 - tenantreactnativewin - 13222 " : <nl> - version " 0 . 62 . 7 - tenantreactnativewin - 13222 " <nl> - resolved " https : / / registry . yarnpkg . com / @ office - iss / rex - win32 / - / rex - win32 - 0 . 62 . 7 - tenantreactnativewin - 13222 . tgz # 32bf141552588e7e71d87a8276aa67f4ebdb0017 " <nl> - integrity sha512 - D15B9H9e96C9aovfuC3V9A4Wy1fwLf8R5nEDcpn3nObdePT1Zwm02 / yvrND6Jpmf3bcxHPikKcCwF6d1TQI31A = = <nl> - dependencies : <nl> - command - line - args " ^ 5 . 0 . 2 " <nl> - command - line - usage " ^ 5 . 0 . 5 " <nl> - <nl> " @ react - native - community / cli - debugger - ui @ ^ 4 . 9 . 0 " : <nl> version " 4 . 9 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / @ react - native - community / cli - debugger - ui / - / cli - debugger - ui - 4 . 9 . 0 . tgz # 4177764ba69243c97aa26829d59d9501acb2bd71 " <nl> arr - union @ ^ 3 . 1 . 0 : <nl> resolved " https : / / registry . yarnpkg . com / arr - union / - / arr - union - 3 . 1 . 0 . tgz # e39b09aea9def866a8f206e288af63919bae39c4 " <nl> integrity sha1 - 45sJrqne + Gao8gbiiK9jkZuuOcQ = <nl> <nl> - array - back @ ^ 2 . 0 . 0 : <nl> - version " 2 . 0 . 0 " <nl> - resolved " https : / / registry . yarnpkg . com / array - back / - / array - back - 2 . 0 . 0 . tgz # 6877471d51ecc9c9bfa6136fb6c7d5fe69748022 " <nl> - integrity sha512 - eJv4pLLufP3g5kcZry0j6WXpIbzYw9GUB4mVJZno9wfwiBxbizTnHCw3VJb07cBihbFX48Y7oSrW9y + gt4glyw = = <nl> - dependencies : <nl> - typical " ^ 2 . 6 . 1 " <nl> - <nl> - array - back @ ^ 3 . 0 . 1 : <nl> - version " 3 . 1 . 0 " <nl> - resolved " https : / / registry . yarnpkg . com / array - back / - / array - back - 3 . 1 . 0 . tgz # b8859d7a508871c9a7b2cf42f99428f65e96bfb0 " <nl> - integrity sha512 - TkuxA4UCOvxuDK6NZYXCalszEzj + TLszyASooky + i742l9TqsOdYCMJJupxRic61hwquNtppB3hgcuq9SVSH1Q = = <nl> - <nl> array - differ @ ^ 2 . 0 . 3 : <nl> version " 2 . 1 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / array - differ / - / array - differ - 2 . 1 . 0 . tgz # 4b9c1c3f14b906757082925769e8ab904f4801b1 " <nl> command - exists @ ^ 1 . 2 . 8 : <nl> resolved " https : / / registry . yarnpkg . com / command - exists / - / command - exists - 1 . 2 . 8 . tgz # 715acefdd1223b9c9b37110a149c6392c2852291 " <nl> integrity sha512 - PM54PkseWbiiD / mMsbvW351 / u + dafwTJ0ye2qB60G1aGQP9j3xK2gmMDc + R34L3nDtx4qMCitXT75mkbkGJDLw = = <nl> <nl> - command - line - args @ ^ 5 . 0 . 2 : <nl> - version " 5 . 1 . 1 " <nl> - resolved " https : / / registry . yarnpkg . com / command - line - args / - / command - line - args - 5 . 1 . 1 . tgz # 88e793e5bb3ceb30754a86863f0401ac92fd369a " <nl> - integrity sha512 - hL / eG8lrll1Qy1ezvkant + trihbGnaKaeEjj6Scyr3DN + RC7iQ5Rz84IeLERfAWDGo0HBSNAakczwgCilDXnWg = = <nl> - dependencies : <nl> - array - back " ^ 3 . 0 . 1 " <nl> - find - replace " ^ 3 . 0 . 0 " <nl> - lodash . camelcase " ^ 4 . 3 . 0 " <nl> - typical " ^ 4 . 0 . 0 " <nl> - <nl> - command - line - usage @ ^ 5 . 0 . 5 : <nl> - version " 5 . 0 . 5 " <nl> - resolved " https : / / registry . yarnpkg . com / command - line - usage / - / command - line - usage - 5 . 0 . 5 . tgz # 5f25933ffe6dedd983c635d38a21d7e623fda357 " <nl> - integrity sha512 - d8NrGylA5oCXSbGoKz05FkehDAzSmIm4K03S5VDh4d5lZAtTWfc3D1RuETtuQCn8129nYfJfDdF7P / lwcz1BlA = = <nl> - dependencies : <nl> - array - back " ^ 2 . 0 . 0 " <nl> - chalk " ^ 2 . 4 . 1 " <nl> - table - layout " ^ 0 . 4 . 3 " <nl> - typical " ^ 2 . 6 . 1 " <nl> - <nl> commander @ ^ 2 . 19 . 0 , commander @ ^ 2 . 7 . 1 , commander @ ~ 2 . 20 . 3 : <nl> version " 2 . 20 . 3 " <nl> resolved " https : / / registry . yarnpkg . com / commander / - / commander - 2 . 20 . 3 . tgz # fd485e84c03eb4881c20722ba48035e8531aeb33 " <nl> deep - eql @ ^ 3 . 0 . 1 : <nl> dependencies : <nl> type - detect " ^ 4 . 0 . 0 " <nl> <nl> - deep - extend @ ^ 0 . 6 . 0 , deep - extend @ ~ 0 . 6 . 0 : <nl> + deep - extend @ ^ 0 . 6 . 0 : <nl> version " 0 . 6 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / deep - extend / - / deep - extend - 0 . 6 . 0 . tgz # c4fa7c95404a17a9c3e8ca7e1537312b736330ac " <nl> integrity sha512 - LOHxIOaPYdHlJRtCQfDIVZtfw / ufM8 + rVj649RIHzcm / vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA = = <nl> find - cache - dir @ ^ 2 . 0 . 0 : <nl> make - dir " ^ 2 . 0 . 0 " <nl> pkg - dir " ^ 3 . 0 . 0 " <nl> <nl> - find - replace @ ^ 3 . 0 . 0 : <nl> - version " 3 . 0 . 0 " <nl> - resolved " https : / / registry . yarnpkg . com / find - replace / - / find - replace - 3 . 0 . 0 . tgz # 3e7e23d3b05167a76f770c9fbd5258b0def68c38 " <nl> - integrity sha512 - 6Tb2myMioCAgv5kfvP5 / PkZZ / ntTpVK39fHY7WkWBgvbeE + VHd / tZuZ4mrC + bxh4cfOZeYKVPaJIZtZXV7GNCQ = = <nl> - dependencies : <nl> - array - back " ^ 3 . 0 . 1 " <nl> - <nl> find - root @ ^ 1 . 1 . 0 : <nl> version " 1 . 1 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / find - root / - / find - root - 1 . 1 . 0 . tgz # abcfc8ba76f708c42a97b3d685b7e9450bfb9ce4 " <nl> lodash . _reinterpolate @ ^ 3 . 0 . 0 : <nl> resolved " https : / / registry . yarnpkg . com / lodash . _reinterpolate / - / lodash . _reinterpolate - 3 . 0 . 0 . tgz # 0ccf2d89166af03b3663c796538b75ac6e114d9d " <nl> integrity sha1 - DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0 = <nl> <nl> - lodash . camelcase @ ^ 4 . 3 . 0 : <nl> - version " 4 . 3 . 0 " <nl> - resolved " https : / / registry . yarnpkg . com / lodash . camelcase / - / lodash . camelcase - 4 . 3 . 0 . tgz # b28aa6288a2b9fc651035c7711f65ab6190331a6 " <nl> - integrity sha1 - soqmKIorn8ZRA1x3EfZathkDMaY = <nl> - <nl> lodash . clonedeep @ ^ 4 . 5 . 0 : <nl> version " 4 . 5 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / lodash . clonedeep / - / lodash . clonedeep - 4 . 5 . 0 . tgz # e23f3f9c4f8fbdde872529c1071857a086e5ccef " <nl> lodash . merge @ ^ 4 . 6 . 1 : <nl> resolved " https : / / registry . yarnpkg . com / lodash . merge / - / lodash . merge - 4 . 6 . 2 . tgz # 558aa53b43b661e1925a0afdfa36a9a1085fe57a " <nl> integrity sha512 - 0KpjqXRVvrYyCsX1swR / XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4 + KlKj8YS0ZUCtRT / YUuhyYDujIQ = = <nl> <nl> - lodash . padend @ ^ 4 . 6 . 1 : <nl> - version " 4 . 6 . 1 " <nl> - resolved " https : / / registry . yarnpkg . com / lodash . padend / - / lodash . padend - 4 . 6 . 1 . tgz # 53ccba047d06e158d311f45da625f4e49e6f166e " <nl> - integrity sha1 - U8y6BH0G4VjTEfRdpiX05J5vFm4 = <nl> - <nl> lodash . pickby @ ^ 4 . 6 . 0 : <nl> version " 4 . 6 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / lodash . pickby / - / lodash . pickby - 4 . 6 . 0 . tgz # 7dea21d8c18d7703a27c704c15d3b84a67e33aff " <nl> redeyed @ ~ 2 . 1 . 0 : <nl> dependencies : <nl> esprima " ~ 4 . 0 . 0 " <nl> <nl> - reduce - flatten @ ^ 1 . 0 . 1 : <nl> - version " 1 . 0 . 1 " <nl> - resolved " https : / / registry . yarnpkg . com / reduce - flatten / - / reduce - flatten - 1 . 0 . 1 . tgz # 258c78efd153ddf93cb561237f61184f3696e327 " <nl> - integrity sha1 - JYx479FT3fk8tWEjf2EYTzaW4yc = <nl> - <nl> regenerate - unicode - properties @ ^ 8 . 2 . 0 : <nl> version " 8 . 2 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / regenerate - unicode - properties / - / regenerate - unicode - properties - 8 . 2 . 0 . tgz # e5de7111d655e7ba60c057dbe9ff37c87e65cdec " <nl> symbol - tree @ ^ 3 . 2 . 4 : <nl> resolved " https : / / registry . yarnpkg . com / symbol - tree / - / symbol - tree - 3 . 2 . 4 . tgz # 430637d248ba77e078883951fb9aa0eed7c63fa2 " <nl> integrity sha512 - 9QNk5KwDF + Bvz + PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3 + XndOsUb + AQ9QhfzfCT2O + CNWT5Tw = = <nl> <nl> - table - layout @ ^ 0 . 4 . 3 : <nl> - version " 0 . 4 . 5 " <nl> - resolved " https : / / registry . yarnpkg . com / table - layout / - / table - layout - 0 . 4 . 5 . tgz # d906de6a25fa09c0c90d1d08ecd833ecedcb7378 " <nl> - integrity sha512 - zTvf0mcggrGeTe / 2jJ6ECkJHAQPIYEwDoqsiqBjI24mvRmQbInK5jq33fyypaCBxX08hMkfmdOqj6haT33EqWw = = <nl> - dependencies : <nl> - array - back " ^ 2 . 0 . 0 " <nl> - deep - extend " ~ 0 . 6 . 0 " <nl> - lodash . padend " ^ 4 . 6 . 1 " <nl> - typical " ^ 2 . 6 . 1 " <nl> - wordwrapjs " ^ 3 . 0 . 0 " <nl> - <nl> table @ ^ 5 . 2 . 3 : <nl> version " 5 . 4 . 6 " <nl> resolved " https : / / registry . yarnpkg . com / table / - / table - 5 . 4 . 6 . tgz # 1292d19500ce3f86053b05f0e8e7e4a3bb21079e " <nl> typescript @ ^ 3 . 5 . 3 , typescript @ ^ 3 . 8 . 3 , typescript @ ~ 3 . 9 . 7 : <nl> resolved " https : / / registry . yarnpkg . com / typescript / - / typescript - 3 . 9 . 7 . tgz # 98d600a5ebdc38f40cb277522f12dc800e9e25fa " <nl> integrity sha512 - BLbiRkiBzAwsjut4x / dsibSTB6yWpwT5qWmC2OfuCg3GgVQCSgMs4vEctYPhsaGtd0AeuuHMkjZ2h2WG8MSzRw = = <nl> <nl> - typical @ ^ 2 . 6 . 1 : <nl> - version " 2 . 6 . 1 " <nl> - resolved " https : / / registry . yarnpkg . com / typical / - / typical - 2 . 6 . 1 . tgz # 5c080e5d661cbbe38259d2e70a3c7253e873881d " <nl> - integrity sha1 - XAgOXWYcu + OCWdLnCjxyU + hziB0 = <nl> - <nl> - typical @ ^ 4 . 0 . 0 : <nl> - version " 4 . 0 . 0 " <nl> - resolved " https : / / registry . yarnpkg . com / typical / - / typical - 4 . 0 . 0 . tgz # cbeaff3b9d7ae1e2bbfaf5a4e6f11eccfde94fc4 " <nl> - integrity sha512 - VAH4IvQ7BDFYglMd7BPRDfLgxZZX4O4TFcRDA6EN5X7erNJJq + McIEp8np9aVtxrCJ6qx4GTYVfOWNjcqwZgRw = = <nl> - <nl> ua - parser - js @ ^ 0 . 7 . 18 : <nl> version " 0 . 7 . 20 " <nl> resolved " https : / / registry . yarnpkg . com / ua - parser - js / - / ua - parser - js - 0 . 7 . 20 . tgz # 7527178b82f6a62a0f243d1f94fd30e3e3c21098 " <nl> wordwrap @ ^ 1 . 0 . 0 : <nl> resolved " https : / / registry . yarnpkg . com / wordwrap / - / wordwrap - 1 . 0 . 0 . tgz # 27584810891456a4171c8d0226441ade90cbcaeb " <nl> integrity sha1 - J1hIEIkUVqQXHI0CJkQa3pDLyus = <nl> <nl> - wordwrapjs @ ^ 3 . 0 . 0 : <nl> - version " 3 . 0 . 0 " <nl> - resolved " https : / / registry . yarnpkg . com / wordwrapjs / - / wordwrapjs - 3 . 0 . 0 . tgz # c94c372894cadc6feb1a66bff64e1d9af92c5d1e " <nl> - integrity sha512 - mO8XtqyPvykVCsrwj5MlOVWvSnCdT + C + QVbm6blradR7JExAhbkZ7hZ9A + 9NUtwzSqrlUo9a67ws0EiILrvRpw = = <nl> - dependencies : <nl> - reduce - flatten " ^ 1 . 0 . 1 " <nl> - typical " ^ 2 . 6 . 1 " <nl> - <nl> wrap - ansi @ ^ 2 . 0 . 0 : <nl> version " 2 . 1 . 0 " <nl> resolved " https : / / registry . yarnpkg . com / wrap - ansi / - / wrap - ansi - 2 . 1 . 0 . tgz # d8fc3d284dd05794fe84973caecdd1cf824fdd85 " <nl> | Most people do not need rex , do not install it for everyone using the RNW repo ( ) | microsoft/react-native-windows | ccf67e7a36e760e5907660aa1393d7ab7f692bbc | 2020-10-07T18:28:02Z |
mmm a / src / video_core / shader / shader_ir . cpp <nl> ppp b / src / video_core / shader / shader_ir . cpp <nl> void ShaderIR : : SetInternalFlagsFromInteger ( NodeBlock & bb , Node value , bool sets_ <nl> Iterop ( bb , value ) ; <nl> break ; <nl> case 2 : / / Genral Purpose Node <nl> - if ( const auto gpr = std : : get_if < GprNode > ( value . get ( ) ) ) { <nl> + if ( const auto * gpr = std : : get_if < GprNode > ( value . get ( ) ) ) { <nl> LOG_DEBUG ( HW_GPU , " GprNode : index = { } " , gpr - > GetIndex ( ) ) ; <nl> Node zerop = Operation ( OperationCode : : LogicalIEqual , std : : move ( value ) , <nl> Immediate ( gpr - > GetIndex ( ) ) ) ; <nl> | Forgot to apply suggestion here as well | yuzu-emu/yuzu | 24c1bb3842324c681f2a718b7a004c7c6afe52a8 | 2020-09-25T03:58:51Z |
mmm a / include / swift / SIL / AbstractionPattern . h <nl> ppp b / include / swift / SIL / AbstractionPattern . h <nl> class AbstractionPattern { <nl> / / / The based abstraction pattern must be either opaque or based on <nl> / / / a Clang or Swift type . That is , it cannot be a tuple or an ObjC <nl> / / / method type . <nl> - static AbstractionPattern getOptional ( AbstractionPattern objectPattern , <nl> - OptionalTypeKind optionalKind ) ; <nl> + static AbstractionPattern getOptional ( AbstractionPattern objectPattern ) ; <nl> <nl> / / / Does this abstraction pattern have something that can be used as a key ? <nl> bool hasCachingKey ( ) const { <nl> mmm a / lib / SIL / AbstractionPattern . cpp <nl> ppp b / lib / SIL / AbstractionPattern . cpp <nl> AbstractionPattern : : getCurriedCFunctionAsMethod ( CanType origType , <nl> } <nl> <nl> AbstractionPattern <nl> - AbstractionPattern : : getOptional ( AbstractionPattern object , <nl> - OptionalTypeKind optionalKind ) { <nl> + AbstractionPattern : : getOptional ( AbstractionPattern object ) { <nl> switch ( object . getKind ( ) ) { <nl> case Kind : : Invalid : <nl> llvm_unreachable ( " querying invalid abstraction pattern ! " ) ; <nl> AbstractionPattern : : getOptional ( AbstractionPattern object , <nl> return AbstractionPattern : : getOpaque ( ) ; <nl> case Kind : : ClangType : <nl> return AbstractionPattern ( object . getGenericSignature ( ) , <nl> - OptionalType : : get ( optionalKind , object . getType ( ) ) <nl> + OptionalType : : get ( object . getType ( ) ) <nl> - > getCanonicalType ( ) , <nl> object . getClangType ( ) ) ; <nl> case Kind : : Type : <nl> return AbstractionPattern ( object . getGenericSignature ( ) , <nl> - OptionalType : : get ( optionalKind , object . getType ( ) ) <nl> + OptionalType : : get ( object . getType ( ) ) <nl> - > getCanonicalType ( ) ) ; <nl> case Kind : : Discard : <nl> return AbstractionPattern : : getDiscard ( object . getGenericSignature ( ) , <nl> - OptionalType : : get ( optionalKind , object . getType ( ) ) <nl> + OptionalType : : get ( object . getType ( ) ) <nl> - > getCanonicalType ( ) ) ; <nl> } <nl> llvm_unreachable ( " bad kind " ) ; <nl> mmm a / lib / SIL / SILFunctionType . cpp <nl> ppp b / lib / SIL / SILFunctionType . cpp <nl> static std : : pair < AbstractionPattern , CanType > updateResultTypeForForeignError ( <nl> substFormalResultType = <nl> OptionalType : : get ( substFormalResultType ) - > getCanonicalType ( ) ; <nl> origResultType = <nl> - AbstractionPattern : : getOptional ( origResultType , OTK_Optional ) ; <nl> + AbstractionPattern : : getOptional ( origResultType ) ; <nl> return { origResultType , substFormalResultType } ; <nl> <nl> / / These conventions don ' t require changes to the formal error type . <nl> mmm a / lib / Sema / DerivedConformanceCodingKey . cpp <nl> ppp b / lib / Sema / DerivedConformanceCodingKey . cpp <nl> ValueDecl * DerivedConformance : : deriveCodingKey ( TypeChecker & tc , <nl> } else if ( name = = C . Id_intValue ) { <nl> / / Synthesize ` var intValue : Int ? { get } ` <nl> auto intType = C . getIntDecl ( ) - > getDeclaredType ( ) ; <nl> - auto optionalIntType = OptionalType : : get ( OTK_Optional , intType ) ; <nl> + auto optionalIntType = OptionalType : : get ( intType ) ; <nl> <nl> auto synth = [ rawType , intType ] ( AbstractFunctionDecl * getterDecl ) { <nl> if ( rawType & & rawType - > isEqual ( intType ) ) { <nl> | Merge pull request from rudkx / remove - some - uses - of - two - param - form - of - optional - type - get | apple/swift | 3e16929e18c3d489f05628b0ecd4a08d933e4c3c | 2018-01-09T16:58:34Z |
mmm a / stdlib / core / Dictionary . swift <nl> ppp b / stdlib / core / Dictionary . swift <nl> struct Dictionary < KeyType : Hashable , ValueType > : Collection , <nl> } <nl> <nl> @ public func = = < KeyType : Equatable , ValueType : Equatable > ( <nl> - lhs : Dictionary < KeyType , ValueType > , <nl> - rhs : Dictionary < KeyType , ValueType > <nl> + lhs : [ KeyType : ValueType ] , <nl> + rhs : [ KeyType : ValueType ] <nl> ) - > Bool { <nl> switch ( lhs . _variantStorage , rhs . _variantStorage ) { <nl> case ( . Native ( let lhsNativeOwner ) , . Native ( let rhsNativeOwner ) ) : <nl> struct Dictionary < KeyType : Hashable , ValueType > : Collection , <nl> } <nl> <nl> @ public func ! = < KeyType : Equatable , ValueType : Equatable > ( <nl> - lhs : Dictionary < KeyType , ValueType > , <nl> - rhs : Dictionary < KeyType , ValueType > <nl> + lhs : [ KeyType : ValueType ] , <nl> + rhs : [ KeyType : ValueType ] <nl> ) - > Bool { <nl> return ! ( lhs = = rhs ) <nl> } <nl> mmm a / stdlib / core / Reflection . swift <nl> ppp b / stdlib / core / Reflection . swift <nl> func _getSummary < T > ( out : UnsafePointer < String > , <nl> inout targetStream : TargetStream <nl> ) - > T { <nl> var maxItemCounter = maxItems <nl> - var visitedItems = Dictionary < ObjectIdentifier , Int > ( ) <nl> + var visitedItems = [ ObjectIdentifier : Int ] ( ) <nl> _dumpWithMirror ( reflect ( x ) , name , indent , maxDepth , <nl> & maxItemCounter , & visitedItems , & targetStream ) <nl> return x <nl> func _getSummary < T > ( out : UnsafePointer < String > , <nl> func _dumpWithMirror < TargetStream : OutputStream > ( <nl> mirror : Mirror , name : String ? , indent : Int , maxDepth : Int , <nl> inout maxItemCounter : Int , <nl> - inout visitedItems : Dictionary < ObjectIdentifier , Int > , <nl> + inout visitedItems : [ ObjectIdentifier : Int ] , <nl> inout targetStream : TargetStream <nl> ) { <nl> if maxItemCounter < = 0 { return } <nl> mmm a / stdlib / objc / Foundation / Foundation . swift <nl> ppp b / stdlib / objc / Foundation / Foundation . swift <nl> extension NSDictionary : DictionaryLiteralConvertible { <nl> / / / The entry point for bridging ` NSDictionary ` to ` Dictionary ` . <nl> @ public func _convertNSDictionaryToDictionary < K : NSObject , V : AnyObject > ( <nl> d : NSDictionary <nl> - ) - > Dictionary < K , V > { <nl> - return Dictionary < K , V > ( _cocoaDictionary : reinterpretCast ( d ) ) <nl> + ) - > [ K : V ] { <nl> + return [ K : V ] ( _cocoaDictionary : reinterpretCast ( d ) ) <nl> } <nl> <nl> / / / The entry point for bridging ` Dictionary ` to ` NSDictionary ` . <nl> @ public func _convertDictionaryToNSDictionary < KeyType , ValueType > ( <nl> - d : Dictionary < KeyType , ValueType > <nl> + d : [ KeyType : ValueType ] <nl> ) - > NSDictionary { <nl> switch d . _variantStorage { <nl> case . Native ( let nativeOwner ) : <nl> extension Dictionary : _ConditionallyBridgedToObjectiveC { <nl> @ public static func bridgeFromObjectiveCConditional ( <nl> x : NSDictionary <nl> ) - > Dictionary ? { <nl> - let anyDict = x as Dictionary < NSObject , AnyObject > <nl> + let anyDict = x as [ NSObject : AnyObject ] <nl> if isBridgedVerbatimToObjectiveC ( KeyType . self ) & & <nl> isBridgedVerbatimToObjectiveC ( ValueType . self ) { <nl> return Swift . _dictionaryDownCastConditional ( anyDict ) <nl> extension Dictionary : _ConditionallyBridgedToObjectiveC { <nl> <nl> extension NSDictionary { <nl> @ conversion @ public <nl> - func __conversion ( ) - > Dictionary < NSObject , AnyObject > { <nl> + func __conversion ( ) - > [ NSObject : AnyObject ] { <nl> return _convertNSDictionaryToDictionary ( reinterpretCast ( self ) ) <nl> } <nl> } <nl> extension Dictionary { <nl> <nl> extension NSDictionary : Reflectable { <nl> @ public func getMirror ( ) - > Mirror { <nl> - let dict : Dictionary < NSObject , AnyObject > = _convertNSDictionaryToDictionary ( self ) <nl> + let dict : [ NSObject : AnyObject ] = _convertNSDictionaryToDictionary ( self ) <nl> return reflect ( dict ) <nl> } <nl> } <nl> mmm a / stdlib / objc / Foundation / NSStringAPI . swift <nl> ppp b / stdlib / objc / Foundation / NSStringAPI . swift <nl> extension String { <nl> / / / Returns a dictionary object initialized with the keys and <nl> / / / values found in the ` String ` . <nl> @ public <nl> - func propertyListFromStringsFileFormat ( ) - > Dictionary < String , String > { <nl> - return _ns . propertyListFromStringsFileFormat ( ) as Dictionary < String , String > <nl> + func propertyListFromStringsFileFormat ( ) - > [ String : String ] { <nl> + return _ns . propertyListFromStringsFileFormat ( ) as [ String : String ] <nl> } <nl> <nl> / / - ( NSRange ) rangeOfCharacterFromSet : ( NSCharacterSet * ) aSet <nl> | Use dictionary type sugar in the standard library . | apple/swift | bea1d3d9b38a2e1f58075c9614b51a0c3e7f8ca9 | 2014-06-26T22:26:58Z |
mmm a / test / posix - mock - test . cc <nl> ppp b / test / posix - mock - test . cc <nl> <nl> # define _CRT_SECURE_NO_WARNINGS <nl> <nl> # include " posix - mock . h " <nl> - # include < cppformat / posix . cc > <nl> + # include " cppformat / posix . cc " <nl> <nl> # include < errno . h > <nl> # include < fcntl . h > <nl> | Use quotes for local includes | fmtlib/fmt | 220bb764e501deff534636dfa2b022dac6d1b9dd | 2016-02-04T16:08:33Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> include_directories ( <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / external / glfw3 / include / $ { PLATFORM_FOLDER } <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / external / freetype2 / include / $ { PLATFORM_FOLDER } <nl> $ { CMAKE_CURRENT_SOURCE_DIR } / external / websockets / include / $ { PLATFORM_FOLDER } <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / external / xxhash <nl> ) <nl> <nl> if ( WIN32 ) <nl> add_subdirectory ( external / unzip ) <nl> # tinyxml2 library <nl> add_subdirectory ( external / tinyxml2 ) <nl> <nl> + # xxhas library <nl> + add_subdirectory ( external / xxhash ) <nl> + <nl> # audio <nl> add_subdirectory ( cocos / audio ) <nl> <nl> new file mode 100644 <nl> index 000000000000 . . 18f6cada0af6 <nl> mmm / dev / null <nl> ppp b / external / xxhash / CMakeLists . txt <nl> <nl> + set ( XXHASH <nl> + xxhash . c <nl> + ) <nl> + <nl> + add_library ( xxhash STATIC <nl> + $ { XXHASH } <nl> + ) <nl> + <nl> + set_target_properties ( xxhash <nl> + PROPERTIES <nl> + ARCHIVE_OUTPUT_DIRECTORY " $ { CMAKE_BINARY_DIR } / lib " <nl> + LIBRARY_OUTPUT_DIRECTORY " $ { CMAKE_BINARY_DIR } / lib " <nl> + ) <nl> + <nl> | add xxhash on linux | cocos2d/cocos2d-x | 66440ac33505f74e36c5276fadc6afdc53c065b8 | 2014-03-31T07:43:35Z |
mmm a / xbmc / osx / ios / IOSKeyboardView . mm <nl> ppp b / xbmc / osx / ios / IOSKeyboardView . mm <nl> - ( void ) keyboardWillShow : ( NSNotification * ) notification { <nl> NSDictionary * info = [ notification userInfo ] ; <nl> CGRect kbRect = [ [ info objectForKey : UIKeyboardFrameEndUserInfoKey ] CGRectValue ] ; <nl> # if ! __IPHONE_8_0 <nl> - if ( GetIOSVersion ( ) > = 8 . 0 ) <nl> + if ( CDarwinUtils : : GetIOSVersion ( ) > = 8 . 0 ) <nl> kbRect = [ self convertRect : kbRect fromView : nil ] ; <nl> # endif <nl> LOG ( @ " keyboardWillShow : keyboard frame : % @ " , NSStringFromCGRect ( kbRect ) ) ; <nl> | [ ios ] - fix compile ( stupid backport error - yeah i will continue using jenkins i promise . . . ) | xbmc/xbmc | 16af3121388e97aec7154e63314f488288474937 | 2014-10-26T21:24:30Z |
mmm a / atom / browser / common_web_contents_delegate . cc <nl> ppp b / atom / browser / common_web_contents_delegate . cc <nl> std : : set < std : : string > GetAddedFileSystemPaths ( <nl> return result ; <nl> } <nl> <nl> + bool IsDevToolsFileSystemAdded ( <nl> + content : : WebContents * web_contents , <nl> + const std : : string & file_system_path ) { <nl> + auto file_system_paths = GetAddedFileSystemPaths ( web_contents ) ; <nl> + return file_system_paths . find ( file_system_path ) ! = file_system_paths . end ( ) ; <nl> + } <nl> + <nl> content : : SecurityStyle SecurityLevelToSecurityStyle ( <nl> SecurityStateModel : : SecurityLevel security_level ) { <nl> switch ( security_level ) { <nl> content : : SecurityStyle SecurityLevelToSecurityStyle ( <nl> <nl> CommonWebContentsDelegate : : CommonWebContentsDelegate ( ) <nl> : html_fullscreen_ ( false ) , <nl> - native_fullscreen_ ( false ) { <nl> + native_fullscreen_ ( false ) , <nl> + devtools_file_system_indexer_ ( new DevToolsFileSystemIndexer ) { <nl> } <nl> <nl> CommonWebContentsDelegate : : ~ CommonWebContentsDelegate ( ) { <nl> void CommonWebContentsDelegate : : DevToolsAddFileSystem ( <nl> <nl> std : : string file_system_id = RegisterFileSystem ( GetDevToolsWebContents ( ) , <nl> path ) ; <nl> - auto file_system_paths = GetAddedFileSystemPaths ( GetDevToolsWebContents ( ) ) ; <nl> - if ( file_system_paths . find ( path . AsUTF8Unsafe ( ) ) ! = file_system_paths . end ( ) ) <nl> + if ( IsDevToolsFileSystemAdded ( GetDevToolsWebContents ( ) , path . AsUTF8Unsafe ( ) ) ) <nl> return ; <nl> <nl> FileSystem file_system = CreateFileSystemStruct ( GetDevToolsWebContents ( ) , <nl> void CommonWebContentsDelegate : : DevToolsRemoveFileSystem ( <nl> nullptr , nullptr ) ; <nl> } <nl> <nl> + void CommonWebContentsDelegate : : DevToolsIndexPath ( <nl> + int request_id , <nl> + const std : : string & file_system_path ) { <nl> + if ( ! IsDevToolsFileSystemAdded ( GetDevToolsWebContents ( ) , file_system_path ) ) { <nl> + OnDevToolsIndexingDone ( request_id , file_system_path ) ; <nl> + return ; <nl> + } <nl> + if ( devtools_indexing_jobs_ . count ( request_id ) ! = 0 ) <nl> + return ; <nl> + devtools_indexing_jobs_ [ request_id ] = <nl> + scoped_refptr < DevToolsFileSystemIndexer : : FileSystemIndexingJob > ( <nl> + devtools_file_system_indexer_ - > IndexPath ( <nl> + file_system_path , <nl> + base : : Bind ( <nl> + & CommonWebContentsDelegate : : OnDevToolsIndexingWorkCalculated , <nl> + base : : Unretained ( this ) , <nl> + request_id , <nl> + file_system_path ) , <nl> + base : : Bind ( & CommonWebContentsDelegate : : OnDevToolsIndexingWorked , <nl> + base : : Unretained ( this ) , <nl> + request_id , <nl> + file_system_path ) , <nl> + base : : Bind ( & CommonWebContentsDelegate : : OnDevToolsIndexingDone , <nl> + base : : Unretained ( this ) , <nl> + request_id , <nl> + file_system_path ) ) ) ; <nl> + } <nl> + <nl> + void CommonWebContentsDelegate : : DevToolsStopIndexing ( int request_id ) { <nl> + auto it = devtools_indexing_jobs_ . find ( request_id ) ; <nl> + if ( it = = devtools_indexing_jobs_ . end ( ) ) <nl> + return ; <nl> + it - > second - > Stop ( ) ; <nl> + devtools_indexing_jobs_ . erase ( it ) ; <nl> + } <nl> + <nl> + void CommonWebContentsDelegate : : DevToolsSearchInPath ( <nl> + int request_id , <nl> + const std : : string & file_system_path , <nl> + const std : : string & query ) { <nl> + if ( ! IsDevToolsFileSystemAdded ( GetDevToolsWebContents ( ) , file_system_path ) ) { <nl> + OnDevToolsSearchCompleted ( request_id , <nl> + file_system_path , <nl> + std : : vector < std : : string > ( ) ) ; <nl> + return ; <nl> + } <nl> + devtools_file_system_indexer_ - > SearchInPath ( <nl> + file_system_path , <nl> + query , <nl> + base : : Bind ( & CommonWebContentsDelegate : : OnDevToolsSearchCompleted , <nl> + base : : Unretained ( this ) , <nl> + request_id , <nl> + file_system_path ) ) ; <nl> + } <nl> + <nl> void CommonWebContentsDelegate : : OnDevToolsSaveToFile ( <nl> const std : : string & url ) { <nl> / / Notify DevTools . <nl> void CommonWebContentsDelegate : : OnDevToolsAppendToFile ( <nl> " DevToolsAPI . appendedToURL " , & url_value , nullptr , nullptr ) ; <nl> } <nl> <nl> + void CommonWebContentsDelegate : : OnDevToolsIndexingWorkCalculated ( <nl> + int request_id , <nl> + const std : : string & file_system_path , <nl> + int total_work ) { <nl> + base : : FundamentalValue request_id_value ( request_id ) ; <nl> + base : : StringValue file_system_path_value ( file_system_path ) ; <nl> + base : : FundamentalValue total_work_value ( total_work ) ; <nl> + web_contents_ - > CallClientFunction ( " DevToolsAPI . indexingTotalWorkCalculated " , <nl> + & request_id_value , <nl> + & file_system_path_value , <nl> + & total_work_value ) ; <nl> + } <nl> + <nl> + void CommonWebContentsDelegate : : OnDevToolsIndexingWorked ( <nl> + int request_id , <nl> + const std : : string & file_system_path , <nl> + int worked ) { <nl> + base : : FundamentalValue request_id_value ( request_id ) ; <nl> + base : : StringValue file_system_path_value ( file_system_path ) ; <nl> + base : : FundamentalValue worked_value ( worked ) ; <nl> + web_contents_ - > CallClientFunction ( " DevToolsAPI . indexingWorked " , <nl> + & request_id_value , <nl> + & file_system_path_value , <nl> + & worked_value ) ; <nl> + } <nl> + <nl> + void CommonWebContentsDelegate : : OnDevToolsIndexingDone ( <nl> + int request_id , <nl> + const std : : string & file_system_path ) { <nl> + devtools_indexing_jobs_ . erase ( request_id ) ; <nl> + base : : FundamentalValue request_id_value ( request_id ) ; <nl> + base : : StringValue file_system_path_value ( file_system_path ) ; <nl> + web_contents_ - > CallClientFunction ( " DevToolsAPI . indexingDone " , <nl> + & request_id_value , <nl> + & file_system_path_value , <nl> + nullptr ) ; <nl> + } <nl> + <nl> + void CommonWebContentsDelegate : : OnDevToolsSearchCompleted ( <nl> + int request_id , <nl> + const std : : string & file_system_path , <nl> + const std : : vector < std : : string > & file_paths ) { <nl> + base : : ListValue file_paths_value ; <nl> + for ( std : : vector < std : : string > : : const_iterator it ( file_paths . begin ( ) ) ; <nl> + it ! = file_paths . end ( ) ; + + it ) { <nl> + file_paths_value . AppendString ( * it ) ; <nl> + } <nl> + base : : FundamentalValue request_id_value ( request_id ) ; <nl> + base : : StringValue file_system_path_value ( file_system_path ) ; <nl> + web_contents_ - > CallClientFunction ( " DevToolsAPI . searchCompleted " , <nl> + & request_id_value , <nl> + & file_system_path_value , <nl> + & file_paths_value ) ; <nl> + } <nl> + <nl> # if defined ( TOOLKIT_VIEWS ) <nl> gfx : : ImageSkia CommonWebContentsDelegate : : GetDevToolsWindowIcon ( ) { <nl> if ( ! owner_window ( ) ) <nl> mmm a / atom / browser / common_web_contents_delegate . h <nl> ppp b / atom / browser / common_web_contents_delegate . h <nl> <nl> # include " brightray / browser / inspectable_web_contents_impl . h " <nl> # include " brightray / browser / inspectable_web_contents_delegate . h " <nl> # include " brightray / browser / inspectable_web_contents_view_delegate . h " <nl> + # include " brightray / browser / devtools_file_system_indexer . h " <nl> # include " content / public / browser / web_contents_delegate . h " <nl> <nl> + using brightray : : DevToolsFileSystemIndexer ; <nl> + <nl> namespace atom { <nl> <nl> class AtomJavaScriptDialogManager ; <nl> class CommonWebContentsDelegate <nl> void DevToolsAddFileSystem ( const base : : FilePath & path ) override ; <nl> void DevToolsRemoveFileSystem ( <nl> const base : : FilePath & file_system_path ) override ; <nl> + void DevToolsIndexPath ( int request_id , <nl> + const std : : string & file_system_path ) override ; <nl> + void DevToolsStopIndexing ( int request_id ) override ; <nl> + void DevToolsSearchInPath ( int request_id , <nl> + const std : : string & file_system_path , <nl> + const std : : string & query ) override ; <nl> <nl> / / brightray : : InspectableWebContentsViewDelegate : <nl> # if defined ( TOOLKIT_VIEWS ) <nl> class CommonWebContentsDelegate <nl> / / Callback for when DevToolsAppendToFile has completed . <nl> void OnDevToolsAppendToFile ( const std : : string & url ) ; <nl> <nl> + / / <nl> + void OnDevToolsIndexingWorkCalculated ( int request_id , <nl> + const std : : string & file_system_path , <nl> + int total_work ) ; <nl> + void OnDevToolsIndexingWorked ( int request_id , <nl> + const std : : string & file_system_path , <nl> + int worked ) ; <nl> + void OnDevToolsIndexingDone ( int request_id , <nl> + const std : : string & file_system_path ) ; <nl> + void OnDevToolsSearchCompleted ( int request_id , <nl> + const std : : string & file_system_path , <nl> + const std : : vector < std : : string > & file_paths ) ; <nl> + <nl> / / Set fullscreen mode triggered by html api . <nl> void SetHtmlApiFullscreen ( bool enter_fullscreen ) ; <nl> <nl> class CommonWebContentsDelegate <nl> <nl> scoped_ptr < WebDialogHelper > web_dialog_helper_ ; <nl> scoped_ptr < AtomJavaScriptDialogManager > dialog_manager_ ; <nl> + scoped_refptr < DevToolsFileSystemIndexer > devtools_file_system_indexer_ ; <nl> <nl> / / The stored InspectableWebContents object . <nl> / / Notice that web_contents_ must be placed after dialog_manager_ , so we can <nl> class CommonWebContentsDelegate <nl> typedef std : : map < std : : string , base : : FilePath > PathsMap ; <nl> PathsMap saved_files_ ; <nl> <nl> + / / Map id to index job , used for file system indexing requests from devtools . <nl> + typedef std : : map < <nl> + int , <nl> + scoped_refptr < DevToolsFileSystemIndexer : : FileSystemIndexingJob > > <nl> + DevToolsIndexingJobsMap ; <nl> + DevToolsIndexingJobsMap devtools_indexing_jobs_ ; <nl> + <nl> DISALLOW_COPY_AND_ASSIGN ( CommonWebContentsDelegate ) ; <nl> } ; <nl> <nl> | devtools : handle file system indexing requests | electron/electron | 53ac79cb19850005f64c2f42a42e8f62bb9b393f | 2016-05-06T23:02:54Z |
mmm a / lib / Sema / CSGen . cpp <nl> ppp b / lib / Sema / CSGen . cpp <nl> namespace { <nl> / / / type information with fresh type variables . <nl> / / / <nl> / / / \ param pattern The pattern . <nl> - Type getTypeForPattern ( Pattern * pattern , bool forFunctionParam , <nl> - ConstraintLocatorBuilder locator ) { <nl> + Type getTypeForPattern ( Pattern * pattern , ConstraintLocatorBuilder locator ) { <nl> switch ( pattern - > getKind ( ) ) { <nl> case PatternKind : : Paren : <nl> / / Parentheses don ' t affect the type . <nl> return getTypeForPattern ( cast < ParenPattern > ( pattern ) - > getSubPattern ( ) , <nl> - forFunctionParam , locator ) ; <nl> + locator ) ; <nl> case PatternKind : : Var : <nl> / / Var doesn ' t affect the type . <nl> return getTypeForPattern ( cast < VarPattern > ( pattern ) - > getSubPattern ( ) , <nl> - forFunctionParam , locator ) ; <nl> + locator ) ; <nl> case PatternKind : : Any : <nl> / / For a pattern of unknown type , create a new type variable . <nl> return CS . createTypeVariable ( CS . getConstraintLocator ( locator ) , <nl> namespace { <nl> <nl> / / For weak variables , use Optional < T > . <nl> if ( auto * OA = var - > getAttrs ( ) . getAttribute < OwnershipAttr > ( ) ) <nl> - if ( ! forFunctionParam & & OA - > get ( ) = = Ownership : : Weak ) { <nl> + if ( OA - > get ( ) = = Ownership : : Weak ) { <nl> ty = CS . getTypeChecker ( ) . getOptionalType ( var - > getLoc ( ) , ty ) ; <nl> if ( ! ty ) return Type ( ) ; <nl> } <nl> <nl> - / / We want to set the variable ' s type here when type - checking <nl> - / / a function ' s parameter clauses because we ' re going to <nl> - / / type - check the entire function body within the context of <nl> - / / the constraint system . In contrast , when type - checking a <nl> - / / variable binding , we really don ' t want to set the <nl> - / / variable ' s type because it can easily escape the constraint <nl> - / / system and become a dangling type reference . <nl> - if ( forFunctionParam ) <nl> - var - > overwriteType ( ty ) ; <nl> return ty ; <nl> } <nl> <nl> namespace { <nl> tupleTypeElts . reserve ( tuplePat - > getNumElements ( ) ) ; <nl> for ( unsigned i = 0 , e = tuplePat - > getNumElements ( ) ; i ! = e ; + + i ) { <nl> auto & tupleElt = tuplePat - > getElement ( i ) ; <nl> - Type eltTy = getTypeForPattern ( tupleElt . getPattern ( ) , forFunctionParam , <nl> + Type eltTy = getTypeForPattern ( tupleElt . getPattern ( ) , <nl> locator . withPathElement ( <nl> LocatorPathElt : : getTupleElement ( i ) ) ) ; <nl> tupleTypeElts . push_back ( TupleTypeElt ( eltTy , tupleElt . getLabel ( ) ) ) ; <nl> Expr * ConstraintSystem : : generateConstraintsShallow ( Expr * expr ) { <nl> Type ConstraintSystem : : generateConstraints ( Pattern * pattern , <nl> ConstraintLocatorBuilder locator ) { <nl> ConstraintGenerator cg ( * this ) ; <nl> - return cg . getTypeForPattern ( pattern , / * forFunctionParam * / false , locator ) ; <nl> + return cg . getTypeForPattern ( pattern , locator ) ; <nl> } <nl> <nl> void ConstraintSystem : : optimizeConstraints ( Expr * e ) { <nl> | remove the ' forFunctionParam ' from getTypeForPattern since it is always false now , NFC . | apple/swift | 84b3a2ecc152fd67621828d8724e97836791d205 | 2016-01-01T05:03:19Z |
new file mode 100644 <nl> index 00000000000 . . 894922c6c6b <nl> mmm / dev / null <nl> ppp b / 3rdParty / velocypack / include / velocypack / Basics . h <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Library to build up VPack documents . <nl> + / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2015 ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Max Neunhoeffer <nl> + / / / @ author Jan Steemann <nl> + / / / @ author Copyright 2015 , ArangoDB GmbH , Cologne , Germany <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # ifndef VELOCYPACK_BASICS_H <nl> + # define VELOCYPACK_BASICS_H 1 <nl> + <nl> + # include < cstring > <nl> + # include < new > <nl> + <nl> + namespace arangodb { <nl> + namespace velocypack { <nl> + <nl> + / / classes from Basics . h are for internal use only and are not exposed here <nl> + <nl> + / / prevent copying <nl> + class NonCopyable { <nl> + public : <nl> + NonCopyable ( ) = default ; <nl> + ~ NonCopyable ( ) = default ; <nl> + private : <nl> + NonCopyable ( NonCopyable const & ) = delete ; <nl> + NonCopyable & operator = ( NonCopyable const & ) = delete ; <nl> + } ; <nl> + <nl> + <nl> + # ifdef _WIN32 <nl> + / / turn off warnings about unimplemented exception specifications <nl> + # pragma warning ( push ) <nl> + # pragma warning ( disable : 4290 ) <nl> + # endif <nl> + <nl> + / / prevent heap allocation <nl> + struct NonHeapAllocatable { <nl> + void * operator new ( std : : size_t ) throw ( std : : bad_alloc ) = delete ; <nl> + void operator delete ( void * ) throw ( ) = delete ; <nl> + void * operator new [ ] ( std : : size_t ) throw ( std : : bad_alloc ) = delete ; <nl> + void operator delete [ ] ( void * ) throw ( ) = delete ; <nl> + } ; <nl> + <nl> + # ifdef _WIN32 <nl> + # pragma warning ( pop ) <nl> + # endif <nl> + <nl> + } / / namespace arangodb : : velocypack <nl> + } / / namespace arangodb <nl> + <nl> + # endif <nl> mmm a / 3rdParty / velocypack / include / velocypack / Builder . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / Builder . h <nl> <nl> <nl> # include " velocypack / velocypack - common . h " <nl> # include " velocypack / AttributeTranslator . h " <nl> + # include " velocypack / Basics . h " <nl> # include " velocypack / Buffer . h " <nl> # include " velocypack / Exception . h " <nl> # include " velocypack / Options . h " <nl> struct BuilderNonDeleter { <nl> } <nl> } ; <nl> <nl> - / / convenience class scope guard for building objects <nl> struct BuilderContainer { <nl> BuilderContainer ( Builder * builder ) : builder ( builder ) { } <nl> <nl> struct BuilderContainer { <nl> Builder * builder ; <nl> } ; <nl> <nl> - struct ObjectBuilder final : public BuilderContainer , public NoHeapAllocation { <nl> + / / convenience class scope guard for building objects <nl> + struct ObjectBuilder final : public BuilderContainer , private NonHeapAllocatable , NonCopyable { <nl> ObjectBuilder ( Builder * builder , bool allowUnindexed = false ) : BuilderContainer ( builder ) { <nl> builder - > openObject ( allowUnindexed ) ; <nl> } <nl> struct ObjectBuilder final : public BuilderContainer , public NoHeapAllocation { <nl> } ; <nl> <nl> / / convenience class scope guard for building arrays <nl> - struct ArrayBuilder final : public BuilderContainer , public NoHeapAllocation { <nl> + struct ArrayBuilder final : public BuilderContainer , private NonHeapAllocatable , NonCopyable { <nl> ArrayBuilder ( Builder * builder , bool allowUnindexed = false ) : BuilderContainer ( builder ) { <nl> builder - > openArray ( allowUnindexed ) ; <nl> } <nl> mmm a / 3rdParty / velocypack / include / velocypack / velocypack - aliases . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / velocypack - aliases . h <nl> using VPackValueLength = arangodb : : velocypack : : ValueLength ; <nl> / / conditional typedefs , only used when the respective headers are already <nl> / / included <nl> <nl> + / / note : <nl> + / / classes from Basics . h are for internal use only and are not exposed here <nl> + <nl> # ifdef VELOCYPACK_ITERATOR_H <nl> # ifndef VELOCYPACK_ALIAS_ITERATOR <nl> # define VELOCYPACK_ALIAS_ITERATOR <nl> using VPackArrayBuilder = arangodb : : velocypack : : ArrayBuilder ; <nl> # ifndef VELOCYPACK_ALIAS_BUFFER <nl> # define VELOCYPACK_ALIAS_BUFFER <nl> using VPackCharBuffer = arangodb : : velocypack : : CharBuffer ; <nl> + template < typename T > using VPackBuffer = arangodb : : velocypack : : Buffer < T > ; <nl> # endif <nl> # endif <nl> <nl> mmm a / 3rdParty / velocypack / include / velocypack / velocypack - common . h <nl> ppp b / 3rdParty / velocypack / include / velocypack / velocypack - common . h <nl> <nl> # include < cstdint > <nl> / / for size_t : <nl> # include < cstring > <nl> - # include < new > <nl> <nl> / / debug mode <nl> # ifdef VELOCYPACK_DEBUG <nl> static inline void storeUInt64 ( uint8_t * start , uint64_t value ) throw ( ) { <nl> } while ( start < end ) ; <nl> } <nl> <nl> - # ifdef _WIN32 <nl> - / / turn off warnings about unimplemented exception specifications <nl> - # pragma warning ( push ) <nl> - # pragma warning ( disable : 4290 ) <nl> - # endif <nl> - <nl> - struct NoHeapAllocation { <nl> - void * operator new ( std : : size_t ) throw ( std : : bad_alloc ) = delete ; <nl> - void operator delete ( void * ) throw ( ) = delete ; <nl> - void * operator new [ ] ( std : : size_t ) throw ( std : : bad_alloc ) = delete ; <nl> - void operator delete [ ] ( void * ) throw ( ) = delete ; <nl> - } ; <nl> - <nl> - # ifdef _WIN32 <nl> - # pragma warning ( pop ) <nl> - # endif <nl> - <nl> } / / namespace arangodb : : velocypack <nl> } / / namespace arangodb <nl> <nl> | updated vpack library | arangodb/arangodb | 83ee4ffdf58cf32028f05d800b8559303950e408 | 2015-12-14T12:18:13Z |
mmm a / ios / sdk / WeexSDK / Sources / Component / WXSliderComponent . m <nl> ppp b / ios / sdk / WeexSDK / Sources / Component / WXSliderComponent . m <nl> @ interface WXSliderComponent ( ) < WXSliderViewDelegate > <nl> @ property ( nonatomic , assign ) BOOL autoPlay ; <nl> @ property ( nonatomic , assign ) NSUInteger interval ; <nl> @ property ( nonatomic , assign ) NSInteger index ; <nl> - @ property ( nonatomic , assign ) CGFloat lastoffsetXRatio ; <nl> + @ property ( nonatomic , assign ) CGFloat lastOffsetXRatio ; <nl> @ property ( nonatomic , assign ) CGFloat offsetXAccuracy ; <nl> @ property ( nonatomic , assign ) BOOL sliderChangeEvent ; <nl> @ property ( nonatomic , assign ) BOOL sliderScrollEvent ; <nl> - ( instancetype ) initWithRef : ( NSString * ) ref type : ( NSString * ) type styles : ( NSDict <nl> _sliderScrollEvent = NO ; <nl> _interval = 3000 ; <nl> _childrenView = [ NSMutableArray new ] ; <nl> - _lastoffsetXRatio = 0 ; <nl> + _lastOffsetXRatio = 0 ; <nl> <nl> if ( attributes [ @ " autoPlay " ] ) { <nl> _autoPlay = [ attributes [ @ " autoPlay " ] boolValue ] ; <nl> - ( void ) sliderView : ( WXSliderView * ) sliderView sliderViewDidScroll : ( UIScrollView <nl> CGFloat width = scrollView . frame . size . width ; <nl> CGFloat XDeviation = scrollView . frame . origin . x - ( scrollView . contentOffset . x - width ) ; <nl> CGFloat offsetXRatio = ( XDeviation / width ) ; <nl> - if ( ABS ( offsetXRatio - _lastoffsetXRatio ) > = _offsetXAccuracy ) { <nl> - _lastoffsetXRatio = offsetXRatio ; <nl> + if ( ABS ( offsetXRatio - _lastOffsetXRatio ) > = _offsetXAccuracy ) { <nl> + _lastOffsetXRatio = offsetXRatio ; <nl> [ self fireEvent : @ " scroll " params : @ { @ " offsetXRatio " : [ NSNumber numberWithFloat : offsetXRatio ] } domChanges : nil ] ; <nl> } <nl> } <nl> | * [ ios ] fix : Change the lastoffsetXRatio to lastOffsetXRatio | apache/incubator-weex | de8f24648b40e66300cd409d99493f45e3bf834e | 2016-12-19T07:22:22Z |
mmm a / src / compiler / js - typed - lowering . cc <nl> ppp b / src / compiler / js - typed - lowering . cc <nl> JSTypedLowering : : JSTypedLowering ( Editor * editor , <nl> dependencies_ ( dependencies ) , <nl> flags_ ( flags ) , <nl> jsgraph_ ( jsgraph ) , <nl> - the_hole_type_ ( <nl> - Type : : HeapConstant ( factory ( ) - > the_hole_value ( ) , graph ( ) - > zone ( ) ) ) , <nl> type_cache_ ( TypeCache : : Get ( ) ) { <nl> for ( size_t k = 0 ; k < arraysize ( shifted_int32_ranges_ ) ; + + k ) { <nl> double min = kMinInt / ( 1 < < k ) ; <nl> Reduction JSTypedLowering : : ReduceJSStrictEqual ( Node * node , bool invert ) { <nl> Reduction const reduction = ReduceJSEqualTypeOf ( node , invert ) ; <nl> if ( reduction . Changed ( ) ) return reduction ; <nl> <nl> - if ( r . OneInputIs ( the_hole_type_ ) ) { <nl> - return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> - } <nl> - if ( r . OneInputIs ( Type : : Undefined ( ) ) ) { <nl> - return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> - } <nl> - if ( r . OneInputIs ( Type : : Null ( ) ) ) { <nl> - return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> - } <nl> - if ( r . OneInputIs ( Type : : Boolean ( ) ) ) { <nl> - return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> - } <nl> - if ( r . OneInputIs ( Type : : Object ( ) ) ) { <nl> - return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> - } <nl> - if ( r . OneInputIs ( Type : : Receiver ( ) ) ) { <nl> + if ( r . BothInputsAre ( Type : : Unique ( ) ) ) { <nl> return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> } <nl> - if ( r . BothInputsAre ( Type : : Unique ( ) ) ) { <nl> + if ( r . OneInputIs ( Type : : NonStringUniqueOrHole ( ) ) ) { <nl> return r . ChangeToPureOperator ( simplified ( ) - > ReferenceEqual ( ) , invert ) ; <nl> } <nl> if ( r . IsInternalizedStringCompareOperation ( ) ) { <nl> mmm a / src / compiler / js - typed - lowering . h <nl> ppp b / src / compiler / js - typed - lowering . h <nl> class V8_EXPORT_PRIVATE JSTypedLowering final <nl> Flags flags_ ; <nl> JSGraph * jsgraph_ ; <nl> Type * shifted_int32_ranges_ [ 4 ] ; <nl> - Type * const the_hole_type_ ; <nl> TypeCache const & type_cache_ ; <nl> } ; <nl> <nl> mmm a / src / compiler / types . h <nl> ppp b / src / compiler / types . h <nl> namespace compiler { <nl> V ( StringOrReceiver , kString | kReceiver ) \ <nl> V ( Unique , kBoolean | kUniqueName | kNull | kUndefined | \ <nl> kReceiver ) \ <nl> + V ( NonStringUniqueOrHole , kBoolean | kHole | kNull | kReceiver | \ <nl> + kSymbol | kUndefined ) \ <nl> V ( Internal , kHole | kExternalPointer | kOtherInternal ) \ <nl> V ( NonInternal , kPrimitive | kReceiver ) \ <nl> V ( NonNumber , kUnique | kString | kInternal ) \ <nl> | [ turbofan ] Optimize strict equality with unique input . | v8/v8 | b36b8395e81fa2f7709c27acec93e8ce26d5bc3a | 2017-01-09T06:40:23Z |
mmm a / dlib / test / type_safe_union . cpp <nl> ppp b / dlib / test / type_safe_union . cpp <nl> namespace <nl> <nl> <nl> <nl> - f_val = 4 . 345 ; <nl> + f_val = 4 . 345f ; <nl> a . get < float > ( ) = f_val ; <nl> <nl> DLIB_CASSERT ( a . is_empty ( ) = = false , " " ) ; <nl> mmm a / dlib / type_safe_union / type_safe_union_kernel . h <nl> ppp b / dlib / type_safe_union / type_safe_union_kernel . h <nl> namespace dlib <nl> <nl> private : <nl> <nl> - <nl> - template < typename A , typename B > <nl> - struct max <nl> - { <nl> - const static size_t value = tmax < sizeof ( A ) , sizeof ( B ) > : : value ; <nl> - } ; <nl> - <nl> - template < typename A , typename B , typename C > <nl> - struct max < max < A , B > , C > <nl> - { <nl> - const static size_t value = tmax < max < A , B > : : value , sizeof ( C ) > : : value ; <nl> - } ; <nl> - <nl> - const static size_t max_size = max < max < max < max < max < max < max < max < max < T1 , T2 > , T3 > , T4 > , T5 > , T6 > , T7 > , T8 > , T9 > , T10 > : : value ; <nl> + const static size_t max_size = tmax < tmax < tmax < tmax < tmax < tmax < tmax < tmax < tmax < sizeof ( T1 ) , <nl> + sizeof ( T2 ) > : : value , <nl> + sizeof ( T3 ) > : : value , <nl> + sizeof ( T4 ) > : : value , <nl> + sizeof ( T5 ) > : : value , <nl> + sizeof ( T6 ) > : : value , <nl> + sizeof ( T7 ) > : : value , <nl> + sizeof ( T8 ) > : : value , <nl> + sizeof ( T9 ) > : : value , <nl> + sizeof ( T10 ) > : : value ; <nl> <nl> union mem_block <nl> { <nl> | Changed code to avoid a bug in visual studio 7 . 1 | davisking/dlib | 6d04dc96185b38f0d4ee82a56d4fb62574831dbd | 2009-01-17T01:32:30Z |
mmm a / modules / core / include / opencv2 / core / hal / intrin_avx . hpp <nl> ppp b / modules / core / include / opencv2 / core / hal / intrin_avx . hpp <nl> OPENCV_HAL_IMPL_AVX_EXTRACT ( v_float32x8 ) <nl> OPENCV_HAL_IMPL_AVX_EXTRACT ( v_float64x4 ) <nl> <nl> <nl> - / * * Reinterpret * * / <nl> - / / its up there with load and store operations <nl> - <nl> - / * de & interleave * / <nl> - # define OPENCV_HAL_IMPL_AVX_INTERLEAVE_2CH ( _Tpvec , _Tp , suffix ) \ <nl> - inline void v_load_deinterleave ( const _Tp * ptr , _Tpvec & a , _Tpvec & b ) \ <nl> - { return v256_load_deinterleave_ # # suffix ( ptr , a , b ) ; } \ <nl> - inline void v_store_interleave ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b ) \ <nl> - { return v256_store_interleave_2ch ( ptr , a , b ) ; } <nl> + / / / / / / / / / / / / / / / / / / / / / load deinterleave / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - # define OPENCV_HAL_IMPL_AVX_INTERLEAVE_3CH ( _Tpvec , _Tp , suffix ) \ <nl> - inline void v_load_deinterleave \ <nl> - ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c ) \ <nl> - { return v256_load_deinterleave_ # # suffix ( ptr , a , b , c ) ; } \ <nl> - inline void v_store_interleave \ <nl> - ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c ) \ <nl> - { return v256_store_interleave_ # # suffix ( ptr , a , b , c ) ; } <nl> + inline void v_load_deinterleave ( const uchar * ptr , v_uint8x32 & a , v_uint8x32 & b ) <nl> + { <nl> + __m256i ab0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i ab1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 32 ) ) ; <nl> <nl> - # define OPENCV_HAL_IMPL_AVX_INTERLEAVE_4CH ( _Tpvec , _Tp , suffix ) \ <nl> - inline void v_load_deinterleave \ <nl> - ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c , _Tpvec & d ) \ <nl> - { return v256_load_deinterleave_ # # suffix ( ptr , a , b , c , d ) ; } \ <nl> - inline void v_store_interleave \ <nl> - ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c , const _Tpvec & d ) \ <nl> - { return v256_store_interleave_ # # suffix ( ptr , a , b , c , d ) ; } <nl> + static const __m256i sh = _mm256_setr_epi8 ( 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 , <nl> + 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 ) ; <nl> + __m256i p0 = _mm256_shuffle_epi8 ( ab0 , sh ) ; <nl> + __m256i p1 = _mm256_shuffle_epi8 ( ab1 , sh ) ; <nl> + __m256i pl = _mm256_permute2x128_si256 ( p0 , p1 , 0 + 2 * 16 ) ; <nl> + __m256i ph = _mm256_permute2x128_si256 ( p0 , p1 , 1 + 3 * 16 ) ; <nl> + __m256i a0 = _mm256_unpacklo_epi64 ( pl , ph ) ; <nl> + __m256i b0 = _mm256_unpackhi_epi64 ( pl , ph ) ; <nl> + a = v_uint8x32 ( a0 ) ; <nl> + b = v_uint8x32 ( b0 ) ; <nl> + } <nl> + <nl> + inline void v_load_deinterleave ( const ushort * ptr , v_uint16x16 & a , v_uint16x16 & b ) <nl> + { <nl> + __m256i ab0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i ab1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 16 ) ) ; <nl> + <nl> + static const __m256i sh = _mm256_setr_epi8 ( 0 , 1 , 4 , 5 , 8 , 9 , 12 , 13 , 2 , 3 , 6 , 7 , 10 , 11 , 14 , 15 , <nl> + 0 , 1 , 4 , 5 , 8 , 9 , 12 , 13 , 2 , 3 , 6 , 7 , 10 , 11 , 14 , 15 ) ; <nl> + __m256i p0 = _mm256_shuffle_epi8 ( ab0 , sh ) ; <nl> + __m256i p1 = _mm256_shuffle_epi8 ( ab1 , sh ) ; <nl> + __m256i pl = _mm256_permute2x128_si256 ( p0 , p1 , 0 + 2 * 16 ) ; <nl> + __m256i ph = _mm256_permute2x128_si256 ( p0 , p1 , 1 + 3 * 16 ) ; <nl> + __m256i a0 = _mm256_unpacklo_epi64 ( pl , ph ) ; <nl> + __m256i b0 = _mm256_unpackhi_epi64 ( pl , ph ) ; <nl> + a = v_uint16x16 ( a0 ) ; <nl> + b = v_uint16x16 ( b0 ) ; <nl> + } <nl> + <nl> + inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x8 & a , v_uint32x8 & b ) <nl> + { <nl> + __m256i ab0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i ab1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 8 ) ) ; <nl> + <nl> + const int sh = 0 + 2 * 4 + 1 * 16 + 3 * 64 ; <nl> + __m256i p0 = _mm256_shuffle_epi32 ( ab0 , sh ) ; <nl> + __m256i p1 = _mm256_shuffle_epi32 ( ab1 , sh ) ; <nl> + __m256i pl = _mm256_permute2x128_si256 ( p0 , p1 , 0 + 2 * 16 ) ; <nl> + __m256i ph = _mm256_permute2x128_si256 ( p0 , p1 , 1 + 3 * 16 ) ; <nl> + __m256i a0 = _mm256_unpacklo_epi64 ( pl , ph ) ; <nl> + __m256i b0 = _mm256_unpackhi_epi64 ( pl , ph ) ; <nl> + a = v_uint32x8 ( a0 ) ; <nl> + b = v_uint32x8 ( b0 ) ; <nl> + } <nl> <nl> - # define OPENCV_HAL_IMPL_AVX_INTERLEAVE_3n4CH ( _Tpvec , _Tp , suffix ) \ <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_3CH ( _Tpvec , _Tp , suffix ) \ <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_4CH ( _Tpvec , _Tp , suffix ) <nl> + inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x4 & a , v_uint64x4 & b ) <nl> + { <nl> + __m256i ab0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i ab1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 4 ) ) ; <nl> <nl> - # define OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( _Tpvec , _Tp , suffix ) \ <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_2CH ( _Tpvec , _Tp , suffix ) \ <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_3n4CH ( _Tpvec , _Tp , suffix ) <nl> + __m256i pl = _mm256_permute2x128_si256 ( ab0 , ab1 , 0 + 2 * 16 ) ; <nl> + __m256i ph = _mm256_permute2x128_si256 ( ab0 , ab1 , 1 + 3 * 16 ) ; <nl> + __m256i a0 = _mm256_unpacklo_epi64 ( pl , ph ) ; <nl> + __m256i b0 = _mm256_unpackhi_epi64 ( pl , ph ) ; <nl> + a = v_uint64x4 ( a0 ) ; <nl> + b = v_uint64x4 ( b0 ) ; <nl> + } <nl> <nl> - / * * * * * * / <nl> - / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_2ch ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b ) <nl> + inline void v_load_deinterleave ( const uchar * ptr , v_uint8x32 & b , v_uint8x32 & g , v_uint8x32 & r ) <nl> { <nl> - _Tpvec ab0 , ab1 ; <nl> - v_zip ( a , b , ab0 , ab1 ) ; <nl> - v_store ( ptr , ab0 ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes , ab1 ) ; <nl> - } <nl> + __m256i bgr0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgr1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 32 ) ) ; <nl> + __m256i bgr2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 64 ) ) ; <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l4 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b ) <nl> - { <nl> - _Tpvec ab0 = v256_load ( ptr ) ; <nl> - _Tpvec ab1 = v256_load ( ptr + _Tpvec : : nlanes ) ; <nl> - _Tpvec ab00 , ab11 ; <nl> - v_recombine ( ab0 , ab1 , ab00 , ab11 ) ; <nl> - v256_zip ( ab00 , ab11 , a , b ) ; <nl> - } <nl> + __m256i s02_low = _mm256_permute2x128_si256 ( bgr0 , bgr2 , 0 + 2 * 16 ) ; <nl> + __m256i s02_high = _mm256_permute2x128_si256 ( bgr0 , bgr2 , 1 + 3 * 16 ) ; <nl> <nl> - / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l4 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c ) <nl> - { <nl> - _Tpvec abc0 = v256_load ( ptr ) ; <nl> - _Tpvec abc1 = v256_load ( ptr + _Tpvec : : nlanes ) ; <nl> - _Tpvec abc2 = v256_load ( ptr + _Tpvec : : nlanes * 2 ) ; <nl> + static const __m256i m0 = _mm256_setr_epi8 ( 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , <nl> + 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 ) ; <nl> + static const __m256i m1 = _mm256_setr_epi8 ( 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , <nl> + - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 ) ; <nl> <nl> - _Tpvec ab0 = v256_combine_diagonal ( abc0 , abc1 ) ; <nl> - _Tpvec bc1 = v256_combine_diagonal ( abc1 , abc2 ) ; <nl> - _Tpvec ac1 = v256_reverse_64 ( v256_combine_diagonal ( abc2 , abc0 ) ) ; <nl> + __m256i b0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( s02_low , s02_high , m0 ) , bgr1 , m1 ) ; <nl> + __m256i g0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( s02_high , s02_low , m1 ) , bgr1 , m0 ) ; <nl> + __m256i r0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( bgr1 , s02_low , m0 ) , s02_high , m1 ) ; <nl> <nl> - a = v256_unpacklo ( ab0 , ac1 ) ; <nl> - c = v256_unpackhi ( ac1 , bc1 ) ; <nl> - b = v256_alignr_64 ( bc1 , ab0 ) ; <nl> - } <nl> + static const __m256i <nl> + sh_b = _mm256_setr_epi8 ( 0 , 3 , 6 , 9 , 12 , 15 , 2 , 5 , 8 , 11 , 14 , 1 , 4 , 7 , 10 , 13 , <nl> + 0 , 3 , 6 , 9 , 12 , 15 , 2 , 5 , 8 , 11 , 14 , 1 , 4 , 7 , 10 , 13 ) , <nl> + sh_g = _mm256_setr_epi8 ( 1 , 4 , 7 , 10 , 13 , 0 , 3 , 6 , 9 , 12 , 15 , 2 , 5 , 8 , 11 , 14 , <nl> + 1 , 4 , 7 , 10 , 13 , 0 , 3 , 6 , 9 , 12 , 15 , 2 , 5 , 8 , 11 , 14 ) , <nl> + sh_r = _mm256_setr_epi8 ( 2 , 5 , 8 , 11 , 14 , 1 , 4 , 7 , 10 , 13 , 0 , 3 , 6 , 9 , 12 , 15 , <nl> + 2 , 5 , 8 , 11 , 14 , 1 , 4 , 7 , 10 , 13 , 0 , 3 , 6 , 9 , 12 , 15 ) ; <nl> + b0 = _mm256_shuffle_epi8 ( b0 , sh_b ) ; <nl> + g0 = _mm256_shuffle_epi8 ( g0 , sh_g ) ; <nl> + r0 = _mm256_shuffle_epi8 ( r0 , sh_r ) ; <nl> <nl> + b = v_uint8x32 ( b0 ) ; <nl> + g = v_uint8x32 ( g0 ) ; <nl> + r = v_uint8x32 ( r0 ) ; <nl> + } <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l4 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c ) <nl> - { <nl> - _Tpvec ab0 = v256_unpacklo ( a , b ) ; <nl> - _Tpvec bc1 = v256_unpackhi ( b , c ) ; <nl> - _Tpvec ca10 = v256_swap_halves ( v256_blend < 0xa > ( c , a ) ) ; <nl> + inline void v_load_deinterleave ( const ushort * ptr , v_uint16x16 & b , v_uint16x16 & g , v_uint16x16 & r ) <nl> + { <nl> + __m256i bgr0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgr1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 16 ) ) ; <nl> + __m256i bgr2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 32 ) ) ; <nl> + <nl> + __m256i s02_low = _mm256_permute2x128_si256 ( bgr0 , bgr2 , 0 + 2 * 16 ) ; <nl> + __m256i s02_high = _mm256_permute2x128_si256 ( bgr0 , bgr2 , 1 + 3 * 16 ) ; <nl> + <nl> + static const __m256i m0 = _mm256_setr_epi8 ( 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , <nl> + 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 ) ; <nl> + static const __m256i m1 = _mm256_setr_epi8 ( 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , <nl> + - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 ) ; <nl> + __m256i b0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( s02_low , s02_high , m0 ) , bgr1 , m1 ) ; <nl> + __m256i g0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( bgr1 , s02_low , m0 ) , s02_high , m1 ) ; <nl> + __m256i r0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( s02_high , s02_low , m1 ) , bgr1 , m0 ) ; <nl> + static const __m256i sh_b = _mm256_setr_epi8 ( 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 , <nl> + 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 ) ; <nl> + static const __m256i sh_g = _mm256_setr_epi8 ( 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , <nl> + 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 ) ; <nl> + static const __m256i sh_r = _mm256_setr_epi8 ( 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , <nl> + 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 ) ; <nl> + b0 = _mm256_shuffle_epi8 ( b0 , sh_b ) ; <nl> + g0 = _mm256_shuffle_epi8 ( g0 , sh_g ) ; <nl> + r0 = _mm256_shuffle_epi8 ( r0 , sh_r ) ; <nl> + <nl> + b = v_uint16x16 ( b0 ) ; <nl> + g = v_uint16x16 ( g0 ) ; <nl> + r = v_uint16x16 ( r0 ) ; <nl> + } <nl> + <nl> + inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x8 & b , v_uint32x8 & g , v_uint32x8 & r ) <nl> + { <nl> + __m256i bgr0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgr1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 8 ) ) ; <nl> + __m256i bgr2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 16 ) ) ; <nl> + <nl> + __m256i s02_low = _mm256_permute2x128_si256 ( bgr0 , bgr2 , 0 + 2 * 16 ) ; <nl> + __m256i s02_high = _mm256_permute2x128_si256 ( bgr0 , bgr2 , 1 + 3 * 16 ) ; <nl> + <nl> + __m256i b0 = _mm256_blend_epi32 ( _mm256_blend_epi32 ( s02_low , s02_high , 0x24 ) , bgr1 , 0x92 ) ; <nl> + __m256i g0 = _mm256_blend_epi32 ( _mm256_blend_epi32 ( s02_high , s02_low , 0x92 ) , bgr1 , 0x24 ) ; <nl> + __m256i r0 = _mm256_blend_epi32 ( _mm256_blend_epi32 ( bgr1 , s02_low , 0x24 ) , s02_high , 0x92 ) ; <nl> + <nl> + b0 = _mm256_shuffle_epi32 ( b0 , 0x6c ) ; <nl> + g0 = _mm256_shuffle_epi32 ( g0 , 0xb1 ) ; <nl> + r0 = _mm256_shuffle_epi32 ( r0 , 0xc6 ) ; <nl> + <nl> + b = v_uint32x8 ( b0 ) ; <nl> + g = v_uint32x8 ( g0 ) ; <nl> + r = v_uint32x8 ( r0 ) ; <nl> + } <nl> + <nl> + inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x4 & b , v_uint64x4 & g , v_uint64x4 & r ) <nl> + { <nl> + __m256i bgr0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgr1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 4 ) ) ; <nl> + __m256i bgr2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 8 ) ) ; <nl> + <nl> + __m256i s01 = _mm256_blend_epi32 ( bgr0 , bgr1 , 0xf0 ) ; <nl> + __m256i s12 = _mm256_blend_epi32 ( bgr1 , bgr2 , 0xf0 ) ; <nl> + __m256i s20r = _mm256_permute4x64_epi64 ( _mm256_blend_epi32 ( bgr2 , bgr0 , 0xf0 ) , 0x1b ) ; <nl> + __m256i b0 = _mm256_unpacklo_epi64 ( s01 , s20r ) ; <nl> + __m256i g0 = _mm256_alignr_epi8 ( s12 , s01 , 8 ) ; <nl> + __m256i r0 = _mm256_unpackhi_epi64 ( s20r , s12 ) ; <nl> + <nl> + b = v_uint64x4 ( b0 ) ; <nl> + g = v_uint64x4 ( g0 ) ; <nl> + r = v_uint64x4 ( r0 ) ; <nl> + } <nl> + <nl> + inline void v_load_deinterleave ( const uchar * ptr , v_uint8x32 & b , v_uint8x32 & g , v_uint8x32 & r , v_uint8x32 & a ) <nl> + { <nl> + __m256i bgr0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgr1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 32 ) ) ; <nl> + __m256i bgr2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 64 ) ) ; <nl> + __m256i bgr3 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 96 ) ) ; <nl> + static const __m256i sh = _mm256_setr_epi8 ( 0 , 4 , 8 , 12 , 1 , 5 , 9 , 13 , 2 , 6 , 10 , 14 , 3 , 7 , 11 , 15 , <nl> + 0 , 4 , 8 , 12 , 1 , 5 , 9 , 13 , 2 , 6 , 10 , 14 , 3 , 7 , 11 , 15 ) ; <nl> + <nl> + __m256i p0 = _mm256_shuffle_epi8 ( bgr0 , sh ) ; <nl> + __m256i p1 = _mm256_shuffle_epi8 ( bgr1 , sh ) ; <nl> + __m256i p2 = _mm256_shuffle_epi8 ( bgr2 , sh ) ; <nl> + __m256i p3 = _mm256_shuffle_epi8 ( bgr3 , sh ) ; <nl> + <nl> + __m256i p01l = _mm256_unpacklo_epi32 ( p0 , p1 ) ; <nl> + __m256i p01h = _mm256_unpackhi_epi32 ( p0 , p1 ) ; <nl> + __m256i p23l = _mm256_unpacklo_epi32 ( p2 , p3 ) ; <nl> + __m256i p23h = _mm256_unpackhi_epi32 ( p2 , p3 ) ; <nl> + <nl> + __m256i pll = _mm256_permute2x128_si256 ( p01l , p23l , 0 + 2 * 16 ) ; <nl> + __m256i plh = _mm256_permute2x128_si256 ( p01l , p23l , 1 + 3 * 16 ) ; <nl> + __m256i phl = _mm256_permute2x128_si256 ( p01h , p23h , 0 + 2 * 16 ) ; <nl> + __m256i phh = _mm256_permute2x128_si256 ( p01h , p23h , 1 + 3 * 16 ) ; <nl> + <nl> + __m256i b0 = _mm256_unpacklo_epi32 ( pll , plh ) ; <nl> + __m256i g0 = _mm256_unpackhi_epi32 ( pll , plh ) ; <nl> + __m256i r0 = _mm256_unpacklo_epi32 ( phl , phh ) ; <nl> + __m256i a0 = _mm256_unpackhi_epi32 ( phl , phh ) ; <nl> <nl> - v_store ( ptr , v256_combine_diagonal ( ab0 , ca10 ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes , v256_combine_diagonal ( bc1 , ab0 ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 2 , v256_combine_diagonal ( ca10 , bc1 ) ) ; <nl> + b = v_uint8x32 ( b0 ) ; <nl> + g = v_uint8x32 ( g0 ) ; <nl> + r = v_uint8x32 ( r0 ) ; <nl> + a = v_uint8x32 ( a0 ) ; <nl> } <nl> <nl> - / / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l4 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c , _Tpvec & d ) <nl> + inline void v_load_deinterleave ( const ushort * ptr , v_uint16x16 & b , v_uint16x16 & g , v_uint16x16 & r , v_uint16x16 & a ) <nl> { <nl> - _Tpvec abcd0 = v256_load ( ptr ) ; <nl> - _Tpvec abcd1 = v256_load ( ptr + _Tpvec : : nlanes ) ; <nl> - _Tpvec abcd2 = v256_load ( ptr + _Tpvec : : nlanes * 2 ) ; <nl> - _Tpvec abcd3 = v256_load ( ptr + _Tpvec : : nlanes * 3 ) ; <nl> + __m256i bgr0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgr1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 16 ) ) ; <nl> + __m256i bgr2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 32 ) ) ; <nl> + __m256i bgr3 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 48 ) ) ; <nl> + static const __m256i sh = _mm256_setr_epi8 ( 0 , 1 , 8 , 9 , 2 , 3 , 10 , 11 , 4 , 5 , 12 , 13 , 6 , 7 , 14 , 15 , <nl> + 0 , 1 , 8 , 9 , 2 , 3 , 10 , 11 , 4 , 5 , 12 , 13 , 6 , 7 , 14 , 15 ) ; <nl> + __m256i p0 = _mm256_shuffle_epi8 ( bgr0 , sh ) ; <nl> + __m256i p1 = _mm256_shuffle_epi8 ( bgr1 , sh ) ; <nl> + __m256i p2 = _mm256_shuffle_epi8 ( bgr2 , sh ) ; <nl> + __m256i p3 = _mm256_shuffle_epi8 ( bgr3 , sh ) ; <nl> + <nl> + __m256i p01l = _mm256_unpacklo_epi32 ( p0 , p1 ) ; <nl> + __m256i p01h = _mm256_unpackhi_epi32 ( p0 , p1 ) ; <nl> + __m256i p23l = _mm256_unpacklo_epi32 ( p2 , p3 ) ; <nl> + __m256i p23h = _mm256_unpackhi_epi32 ( p2 , p3 ) ; <nl> <nl> - _Tpvec cd0ab0 = v256_alignr_128 ( abcd0 , abcd2 ) ; <nl> - _Tpvec cd1ab1 = v256_alignr_128 ( abcd1 , abcd3 ) ; <nl> + __m256i pll = _mm256_permute2x128_si256 ( p01l , p23l , 0 + 2 * 16 ) ; <nl> + __m256i plh = _mm256_permute2x128_si256 ( p01l , p23l , 1 + 3 * 16 ) ; <nl> + __m256i phl = _mm256_permute2x128_si256 ( p01h , p23h , 0 + 2 * 16 ) ; <nl> + __m256i phh = _mm256_permute2x128_si256 ( p01h , p23h , 1 + 3 * 16 ) ; <nl> <nl> - _Tpvec ab0 = v256_combine_diagonal ( abcd0 , cd0ab0 ) ; <nl> - _Tpvec ab1 = v256_combine_diagonal ( abcd1 , cd1ab1 ) ; <nl> - _Tpvec cd0 = v256_combine_diagonal ( cd0ab0 , abcd2 ) ; <nl> - _Tpvec cd1 = v256_combine_diagonal ( cd1ab1 , abcd3 ) ; <nl> + __m256i b0 = _mm256_unpacklo_epi32 ( pll , plh ) ; <nl> + __m256i g0 = _mm256_unpackhi_epi32 ( pll , plh ) ; <nl> + __m256i r0 = _mm256_unpacklo_epi32 ( phl , phh ) ; <nl> + __m256i a0 = _mm256_unpackhi_epi32 ( phl , phh ) ; <nl> <nl> - v256_zip ( ab0 , ab1 , a , b ) ; <nl> - v256_zip ( cd0 , cd1 , c , d ) ; <nl> + b = v_uint16x16 ( b0 ) ; <nl> + g = v_uint16x16 ( g0 ) ; <nl> + r = v_uint16x16 ( r0 ) ; <nl> + a = v_uint16x16 ( a0 ) ; <nl> } <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l4 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c , const _Tpvec & d ) <nl> + inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x8 & b , v_uint32x8 & g , v_uint32x8 & r , v_uint32x8 & a ) <nl> { <nl> - _Tpvec ab0 , ab1 , cd0 , cd1 ; <nl> - v256_zip ( a , b , ab0 , ab1 ) ; <nl> - v256_zip ( c , d , cd0 , cd1 ) ; <nl> - <nl> - _Tpvec ab0cd0 = v256_alignr_128 ( ab0 , cd0 ) ; <nl> - _Tpvec ab1cd1 = v256_alignr_128 ( ab1 , cd1 ) ; <nl> - <nl> - v_store ( ptr , v256_combine_diagonal ( ab0 , ab0cd0 ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes , v256_combine_diagonal ( ab1 , ab1cd1 ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 2 , v256_combine_diagonal ( ab0cd0 , cd0 ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 3 , v256_combine_diagonal ( ab1cd1 , cd1 ) ) ; <nl> - } <nl> + __m256i p0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i p1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 8 ) ) ; <nl> + __m256i p2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 16 ) ) ; <nl> + __m256i p3 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 24 ) ) ; <nl> <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_uint64x4 , uint64 , l4 ) <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_int64x4 , int64 , l4 ) <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_float64x4 , double , l4 ) <nl> + __m256i p01l = _mm256_unpacklo_epi32 ( p0 , p1 ) ; <nl> + __m256i p01h = _mm256_unpackhi_epi32 ( p0 , p1 ) ; <nl> + __m256i p23l = _mm256_unpacklo_epi32 ( p2 , p3 ) ; <nl> + __m256i p23h = _mm256_unpackhi_epi32 ( p2 , p3 ) ; <nl> <nl> - / * * * * * * * * * * / <nl> - / / <nl> - inline void v256_load_deinterleave_l8 ( const float * ptr , v_float32x8 & a , v_float32x8 & b ) <nl> - { <nl> - v_float32x8 ab0 = v256_load ( ptr ) ; <nl> - v_float32x8 ab1 = v256_load ( ptr + 8 ) ; <nl> + __m256i pll = _mm256_permute2x128_si256 ( p01l , p23l , 0 + 2 * 16 ) ; <nl> + __m256i plh = _mm256_permute2x128_si256 ( p01l , p23l , 1 + 3 * 16 ) ; <nl> + __m256i phl = _mm256_permute2x128_si256 ( p01h , p23h , 0 + 2 * 16 ) ; <nl> + __m256i phh = _mm256_permute2x128_si256 ( p01h , p23h , 1 + 3 * 16 ) ; <nl> <nl> - v_float32x8 ab0ab2 , ab1ab3 ; <nl> - v_recombine ( ab0 , ab1 , ab0ab2 , ab1ab3 ) ; <nl> + __m256i b0 = _mm256_unpacklo_epi32 ( pll , plh ) ; <nl> + __m256i g0 = _mm256_unpackhi_epi32 ( pll , plh ) ; <nl> + __m256i r0 = _mm256_unpacklo_epi32 ( phl , phh ) ; <nl> + __m256i a0 = _mm256_unpackhi_epi32 ( phl , phh ) ; <nl> <nl> - a . val = _mm256_shuffle_ps ( ab0ab2 . val , ab1ab3 . val , _MM_SHUFFLE ( 2 , 0 , 2 , 0 ) ) ; <nl> - b . val = _mm256_shuffle_ps ( ab0ab2 . val , ab1ab3 . val , _MM_SHUFFLE ( 3 , 1 , 3 , 1 ) ) ; <nl> + b = v_uint32x8 ( b0 ) ; <nl> + g = v_uint32x8 ( g0 ) ; <nl> + r = v_uint32x8 ( r0 ) ; <nl> + a = v_uint32x8 ( a0 ) ; <nl> } <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l8 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b ) <nl> + inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x4 & b , v_uint64x4 & g , v_uint64x4 & r , v_uint64x4 & a ) <nl> { <nl> - v_float32x8 fa , fb ; <nl> - v256_load_deinterleave_l8 ( ( float * ) ptr , fa , fb ) ; <nl> - a . val = v_reinterpret_as_u32 ( fa ) . val ; <nl> - b . val = v_reinterpret_as_u32 ( fb ) . val ; <nl> - } <nl> - / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l8 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c ) <nl> - { <nl> - _Tpvec ab0 , ab1 , bc0 , bc1 ; <nl> - v256_zip ( a , b , ab0 , ab1 ) ; <nl> - v256_zip ( b , c , bc0 , bc1 ) ; <nl> + __m256i bgra0 = _mm256_loadu_si256 ( ( const __m256i * ) ptr ) ; <nl> + __m256i bgra1 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 4 ) ) ; <nl> + __m256i bgra2 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 8 ) ) ; <nl> + __m256i bgra3 = _mm256_loadu_si256 ( ( const __m256i * ) ( ptr + 12 ) ) ; <nl> <nl> - _Tpvec cazg = v256_blend < 0xaa > ( c , a ) ; <nl> - _Tpvec abc0abc1 ( _mm256_unpacklo_epi64 ( ab0 . val , cazg . val ) ) ; <nl> - _Tpvec abc1abc2 ( _mm256_unpackhi_epi64 ( cazg . val , bc1 . val ) ) ; <nl> - _Tpvec abc2abc0 = v256_reverse_64 ( v256_blend < 0xcc > ( ab1 , bc0 ) ) ; <nl> + __m256i l02 = _mm256_permute2x128_si256 ( bgra0 , bgra2 , 0 + 2 * 16 ) ; <nl> + __m256i h02 = _mm256_permute2x128_si256 ( bgra0 , bgra2 , 1 + 3 * 16 ) ; <nl> + __m256i l13 = _mm256_permute2x128_si256 ( bgra1 , bgra3 , 0 + 2 * 16 ) ; <nl> + __m256i h13 = _mm256_permute2x128_si256 ( bgra1 , bgra3 , 1 + 3 * 16 ) ; <nl> <nl> - _Tpvec abc0 = v256_combine_diagonal ( abc0abc1 , abc2abc0 ) ; <nl> - _Tpvec abc1 = v256_combine_diagonal ( abc1abc2 , abc0abc1 ) ; <nl> - _Tpvec abc2 = v256_combine_diagonal ( abc2abc0 , abc1abc2 ) ; <nl> + __m256i b0 = _mm256_unpacklo_epi64 ( l02 , l13 ) ; <nl> + __m256i g0 = _mm256_unpackhi_epi64 ( l02 , l13 ) ; <nl> + __m256i r0 = _mm256_unpacklo_epi64 ( h02 , h13 ) ; <nl> + __m256i a0 = _mm256_unpackhi_epi64 ( h02 , h13 ) ; <nl> <nl> - v_store ( ptr , abc0 ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes , abc1 ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 2 , abc2 ) ; <nl> + b = v_uint64x4 ( b0 ) ; <nl> + g = v_uint64x4 ( g0 ) ; <nl> + r = v_uint64x4 ( r0 ) ; <nl> + a = v_uint64x4 ( a0 ) ; <nl> } <nl> <nl> - inline void v256_store_interleave_l8 ( float * ptr , const v_float32x8 & a , const v_float32x8 & b , const v_float32x8 & c ) <nl> - { <nl> - v_float32x8 ab0 , ab1 , bc0 , bc1 ; <nl> - v256_zip ( a , b , ab0 , ab1 ) ; <nl> - v256_zip ( b , c , bc0 , bc1 ) ; <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / store interleave / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - v_float32x8 cazg = v256_blend < 0xaa > ( c , a ) ; <nl> - v_float32x8 abc0abc1 ( _mm256_shuffle_ps ( ab0 . val , cazg . val , _MM_SHUFFLE ( 1 , 0 , 1 , 0 ) ) ) ; <nl> - v_float32x8 abc1abc2 ( _mm256_shuffle_ps ( cazg . val , bc1 . val , _MM_SHUFFLE ( 3 , 2 , 3 , 2 ) ) ) ; <nl> - <nl> - v_float32x8 abc0abc2 ( _mm256_shuffle_ps ( bc0 . val , ab1 . val , _MM_SHUFFLE ( 1 , 0 , 3 , 2 ) ) ) ; <nl> - v_float32x8 abc2abc0 = v256_swap_halves ( abc0abc2 ) ; <nl> + inline void v_store_interleave ( uchar * ptr , const v_uint8x32 & x , const v_uint8x32 & y ) <nl> + { <nl> + __m256i xy_l = _mm256_unpacklo_epi8 ( x . val , y . val ) ; <nl> + __m256i xy_h = _mm256_unpackhi_epi8 ( x . val , y . val ) ; <nl> <nl> - v_float32x8 abc0 = v256_combine_diagonal ( abc0abc1 , abc2abc0 ) ; <nl> - v_float32x8 abc1 = v256_combine_diagonal ( abc1abc2 , abc0abc1 ) ; <nl> - v_float32x8 abc2 = v256_combine_diagonal ( abc2abc0 , abc1abc2 ) ; <nl> + __m256i xy0 = _mm256_permute2x128_si256 ( xy_l , xy_h , 0 + 2 * 16 ) ; <nl> + __m256i xy1 = _mm256_permute2x128_si256 ( xy_l , xy_h , 1 + 3 * 16 ) ; <nl> <nl> - v_store ( ptr , abc0 ) ; <nl> - v_store ( ptr + 8 , abc1 ) ; <nl> - v_store ( ptr + 16 , abc2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , xy0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 32 ) , xy1 ) ; <nl> } <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l8 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c ) <nl> + inline void v_store_interleave ( ushort * ptr , const v_uint16x16 & x , const v_uint16x16 & y ) <nl> { <nl> - _Tpvec abc02 = v256_load ( ptr ) ; <nl> - _Tpvec abc1 = v256_load ( ptr + _Tpvec : : nlanes ) ; <nl> - _Tpvec abc20 = v256_load ( ptr + _Tpvec : : nlanes * 2 ) ; <nl> + __m256i xy_l = _mm256_unpacklo_epi16 ( x . val , y . val ) ; <nl> + __m256i xy_h = _mm256_unpackhi_epi16 ( x . val , y . val ) ; <nl> <nl> - _Tpvec abc2 = v256_alignr_128 ( abc02 , abc20 ) ; <nl> - _Tpvec abc0 = v256_combine_diagonal ( abc02 , abc20 ) ; <nl> + __m256i xy0 = _mm256_permute2x128_si256 ( xy_l , xy_h , 0 + 2 * 16 ) ; <nl> + __m256i xy1 = _mm256_permute2x128_si256 ( xy_l , xy_h , 1 + 3 * 16 ) ; <nl> <nl> - a = v256_blend < 0x92 > ( abc0 , abc1 ) ; <nl> - a = v256_blend < 0x44 > ( a , abc2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , xy0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 16 ) , xy1 ) ; <nl> + } <nl> <nl> - b = v256_blend < 0x24 > ( abc0 , abc1 ) ; <nl> - b = v256_blend < 0x99 > ( b , abc2 ) ; <nl> + inline void v_store_interleave ( unsigned * ptr , const v_uint32x8 & x , const v_uint32x8 & y ) <nl> + { <nl> + __m256i xy_l = _mm256_unpacklo_epi32 ( x . val , y . val ) ; <nl> + __m256i xy_h = _mm256_unpackhi_epi32 ( x . val , y . val ) ; <nl> <nl> - c = v256_blend < 0x49 > ( abc0 , abc1 ) ; <nl> - c = v256_blend < 0x22 > ( c , abc2 ) ; <nl> + __m256i xy0 = _mm256_permute2x128_si256 ( xy_l , xy_h , 0 + 2 * 16 ) ; <nl> + __m256i xy1 = _mm256_permute2x128_si256 ( xy_l , xy_h , 1 + 3 * 16 ) ; <nl> <nl> - a = v256_shuffle < _MM_SHUFFLE ( 1 , 2 , 3 , 0 ) > ( a ) ; <nl> - b = v256_shuffle < _MM_SHUFFLE ( 2 , 3 , 0 , 1 ) > ( b ) ; <nl> - c = v256_shuffle < _MM_SHUFFLE ( 3 , 0 , 1 , 2 ) > ( c ) ; <nl> - } <nl> - / / / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l8 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c , _Tpvec & d ) <nl> - { <nl> - _Tpvec ab0 , ab1 , cd0 , cd1 ; <nl> - v256_load_deinterleave_l4 ( ptr , ab0 , cd0 , ab1 , cd1 ) ; <nl> - v256_zip ( ab0 , ab1 , a , b ) ; <nl> - v256_zip ( cd0 , cd1 , c , d ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , xy0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 8 ) , xy1 ) ; <nl> } <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l8 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c , const _Tpvec & d ) <nl> + inline void v_store_interleave ( uint64 * ptr , const v_uint64x4 & x , const v_uint64x4 & y ) <nl> { <nl> - _Tpvec ac0 , ac1 , bd0 , bd1 ; <nl> - v256_zip ( a , c , ac0 , ac1 ) ; <nl> - v256_zip ( b , d , bd0 , bd1 ) ; <nl> - <nl> - _Tpvec abcd0 , abcd1 , abcd2 , abcd3 ; <nl> - v256_zip ( ac0 , bd0 , abcd0 , abcd1 ) ; <nl> - v256_zip ( ac1 , bd1 , abcd2 , abcd3 ) ; <nl> + __m256i xy_l = _mm256_unpacklo_epi64 ( x . val , y . val ) ; <nl> + __m256i xy_h = _mm256_unpackhi_epi64 ( x . val , y . val ) ; <nl> <nl> - _Tpvec abcd01 , abcd23 , abcd45 , abcd67 ; <nl> - v_recombine ( abcd0 , abcd1 , abcd01 , abcd45 ) ; <nl> - v_recombine ( abcd2 , abcd3 , abcd23 , abcd67 ) ; <nl> + __m256i xy0 = _mm256_permute2x128_si256 ( xy_l , xy_h , 0 + 2 * 16 ) ; <nl> + __m256i xy1 = _mm256_permute2x128_si256 ( xy_l , xy_h , 1 + 3 * 16 ) ; <nl> <nl> - v_store ( ptr , abcd01 ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes , abcd23 ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 2 , abcd45 ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 3 , abcd67 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , xy0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 4 ) , xy1 ) ; <nl> } <nl> <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_uint32x8 , unsigned , l8 ) <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_int32x8 , int , l8 ) <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_float32x8 , float , l8 ) <nl> - <nl> - / * * * * * * * * * * * * * * * * * * / <nl> - / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l16 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b ) <nl> + inline void v_store_interleave ( uchar * ptr , const v_uint8x32 & b , const v_uint8x32 & g , const v_uint8x32 & r ) <nl> { <nl> - const __m256i sep = _mm256_setr_epi8 ( <nl> - 0 , 1 , 4 , 5 , 8 , 9 , 12 , 13 , 2 , 3 , 6 , 7 , 10 , 11 , 14 , 15 , <nl> - 0 , 1 , 4 , 5 , 8 , 9 , 12 , 13 , 2 , 3 , 6 , 7 , 10 , 11 , 14 , 15 <nl> - ) ; <nl> + static const __m256i sh_b = _mm256_setr_epi8 ( <nl> + 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , 10 , 5 , <nl> + 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , 10 , 5 ) ; <nl> + static const __m256i sh_g = _mm256_setr_epi8 ( <nl> + 5 , 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , 10 , <nl> + 5 , 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , 10 ) ; <nl> + static const __m256i sh_r = _mm256_setr_epi8 ( <nl> + 10 , 5 , 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , <nl> + 10 , 5 , 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 ) ; <nl> <nl> - _Tpvec ab0 , ab1 ; <nl> - v_recombine ( v256_load ( ptr ) , v256_load ( ptr + _Tpvec : : nlanes ) , ab0 , ab1 ) ; <nl> + __m256i b0 = _mm256_shuffle_epi8 ( b . val , sh_b ) ; <nl> + __m256i g0 = _mm256_shuffle_epi8 ( g . val , sh_g ) ; <nl> + __m256i r0 = _mm256_shuffle_epi8 ( r . val , sh_r ) ; <nl> <nl> - __m256i a0b0 = _mm256_shuffle_epi8 ( ab0 . val , sep ) ; <nl> - __m256i a1b1 = _mm256_shuffle_epi8 ( ab1 . val , sep ) ; <nl> - <nl> - a . val = _mm256_unpacklo_epi64 ( a0b0 , a1b1 ) ; <nl> - b . val = _mm256_unpackhi_epi64 ( a0b0 , a1b1 ) ; <nl> - } <nl> - / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l16 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c ) <nl> - { <nl> - v_uint32x8 ab0 = v_reinterpret_as_u32 ( v256_unpacklo ( a , b ) ) ; <nl> - v_uint32x8 ab1 = v_reinterpret_as_u32 ( v256_unpackhi ( a , b ) ) ; <nl> - v_uint32x8 bc0 = v_reinterpret_as_u32 ( v256_unpacklo ( b , c ) ) ; <nl> - v_uint32x8 bc1 = v_reinterpret_as_u32 ( v256_unpackhi ( b , c ) ) ; <nl> + static const __m256i m0 = _mm256_setr_epi8 ( 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , <nl> + 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 ) ; <nl> + static const __m256i m1 = _mm256_setr_epi8 ( 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , <nl> + 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 ) ; <nl> <nl> - v_uint32x8 cazg = v_reinterpret_as_u32 ( v256_blend < 0xaa > ( c , a ) ) ; <nl> - cazg = v256_shuffle < _MM_SHUFFLE ( 2 , 1 , 0 , 3 ) > ( cazg ) ; <nl> + __m256i p0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( b0 , g0 , m0 ) , r0 , m1 ) ; <nl> + __m256i p1 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( g0 , r0 , m0 ) , b0 , m1 ) ; <nl> + __m256i p2 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( r0 , b0 , m0 ) , g0 , m1 ) ; <nl> <nl> - v_uint32x8 ac1ab1 = v256_blend < 0xaa > ( ab1 , bc1 ) ; <nl> - ac1ab1 = v256_shuffle < _MM_SHUFFLE ( 2 , 1 , 0 , 3 ) > ( ac1ab1 ) ; <nl> + __m256i bgr0 = _mm256_permute2x128_si256 ( p0 , p1 , 0 + 2 * 16 ) ; <nl> + __m256i bgr1 = _mm256_permute2x128_si256 ( p2 , p0 , 0 + 3 * 16 ) ; <nl> + __m256i bgr2 = _mm256_permute2x128_si256 ( p1 , p2 , 1 + 3 * 16 ) ; <nl> <nl> - v_uint32x8 abc001 = v256_blend < 0xaa > ( ab0 , cazg ) ; <nl> - v_uint32x8 cabc0 = v256_blend < 0xaa > ( cazg , bc0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgr0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 32 ) , bgr1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 64 ) , bgr2 ) ; <nl> + } <nl> <nl> - v_uint32x8 cabc1 = v256_unpacklo ( cabc0 , ac1ab1 ) ; <nl> - v_uint32x8 bcab0 = v256_unpackhi ( cabc1 , abc001 ) ; <nl> + inline void v_store_interleave ( ushort * ptr , const v_uint16x16 & b , const v_uint16x16 & g , const v_uint16x16 & r ) <nl> + { <nl> + static const __m256i sh_b = _mm256_setr_epi8 ( <nl> + 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 , <nl> + 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 ) ; <nl> + static const __m256i sh_g = _mm256_setr_epi8 ( <nl> + 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , <nl> + 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 ) ; <nl> + static const __m256i sh_r = _mm256_setr_epi8 ( <nl> + 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , <nl> + 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 ) ; <nl> <nl> - v_uint64x4 abc01 = v256_unpacklo ( v_reinterpret_as_u64 ( abc001 ) , v_reinterpret_as_u64 ( bcab0 ) ) ; <nl> - v_uint64x4 abc21 = v256_unpackhi ( v_reinterpret_as_u64 ( cabc0 ) , v_reinterpret_as_u64 ( bcab0 ) ) ; <nl> - abc21 = v256_swap_halves ( abc21 ) ; <nl> - v_uint64x4 abc12 = v_reinterpret_as_u64 ( v256_alignr_64 ( cabc1 , ac1ab1 ) ) ; <nl> + __m256i b0 = _mm256_shuffle_epi8 ( b . val , sh_b ) ; <nl> + __m256i g0 = _mm256_shuffle_epi8 ( g . val , sh_g ) ; <nl> + __m256i r0 = _mm256_shuffle_epi8 ( r . val , sh_r ) ; <nl> <nl> - v_uint64x4 abc0 = v256_combine_diagonal ( abc01 , abc21 ) ; <nl> - v_uint64x4 abc1 = v256_combine_diagonal ( abc12 , abc01 ) ; <nl> - v_uint64x4 abc2 = v256_combine_diagonal ( abc21 , abc12 ) ; <nl> + static const __m256i m0 = _mm256_setr_epi8 ( 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , <nl> + 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 ) ; <nl> + static const __m256i m1 = _mm256_setr_epi8 ( 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , <nl> + - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 , 0 , 0 , - 1 , - 1 , 0 , 0 ) ; <nl> <nl> - v_store ( ptr , _Tpvec ( abc0 . val ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes , _Tpvec ( abc1 . val ) ) ; <nl> - v_store ( ptr + _Tpvec : : nlanes * 2 , _Tpvec ( abc2 . val ) ) ; <nl> - } <nl> - / / todo : <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l16 ( const _Tp * , _Tpvec & , _Tpvec & , _Tpvec & ) <nl> - { } <nl> - / / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l16 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c , _Tpvec & d ) <nl> - { <nl> - _Tpvec ab0 , ab1 , cd0 , cd1 ; <nl> - v256_load_deinterleave_l8 ( ptr , ab0 , cd0 , ab1 , cd1 ) ; <nl> - v256_zip ( ab0 , ab1 , a , b ) ; <nl> - v256_zip ( cd0 , cd1 , c , d ) ; <nl> - } <nl> + __m256i p0 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( b0 , g0 , m0 ) , r0 , m1 ) ; <nl> + __m256i p1 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( g0 , r0 , m0 ) , b0 , m1 ) ; <nl> + __m256i p2 = _mm256_blendv_epi8 ( _mm256_blendv_epi8 ( r0 , b0 , m0 ) , g0 , m1 ) ; <nl> <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l16 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c , const _Tpvec & d ) <nl> - { v256_store_interleave_l8 ( ptr , a , b , c , d ) ; } <nl> + __m256i bgr0 = _mm256_permute2x128_si256 ( p0 , p2 , 0 + 2 * 16 ) ; <nl> + / / __m256i bgr1 = p1 ; <nl> + __m256i bgr2 = _mm256_permute2x128_si256 ( p0 , p2 , 1 + 3 * 16 ) ; <nl> <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_uint16x16 , ushort , l16 ) <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_int16x16 , short , l16 ) <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgr0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 16 ) , p1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 32 ) , bgr2 ) ; <nl> + } <nl> <nl> - / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> - / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l32 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b ) <nl> + inline void v_store_interleave ( unsigned * ptr , const v_uint32x8 & b , const v_uint32x8 & g , const v_uint32x8 & r ) <nl> { <nl> - const __m256i sep = _mm256_setr_epi8 ( <nl> - 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 , <nl> - 0 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 <nl> - ) ; <nl> + __m256i b0 = _mm256_shuffle_epi32 ( b . val , 0x6c ) ; <nl> + __m256i g0 = _mm256_shuffle_epi32 ( g . val , 0xb1 ) ; <nl> + __m256i r0 = _mm256_shuffle_epi32 ( r . val , 0xc6 ) ; <nl> <nl> - _Tpvec ab0 , ab1 ; <nl> - v_recombine ( v256_load ( ptr ) , v256_load ( ptr + _Tpvec : : nlanes ) , ab0 , ab1 ) ; <nl> + __m256i p0 = _mm256_blend_epi32 ( _mm256_blend_epi32 ( b0 , g0 , 0x92 ) , r0 , 0x24 ) ; <nl> + __m256i p1 = _mm256_blend_epi32 ( _mm256_blend_epi32 ( g0 , r0 , 0x92 ) , b0 , 0x24 ) ; <nl> + __m256i p2 = _mm256_blend_epi32 ( _mm256_blend_epi32 ( r0 , b0 , 0x92 ) , g0 , 0x24 ) ; <nl> <nl> - __m256i a0b0 = _mm256_shuffle_epi8 ( ab0 . val , sep ) ; <nl> - __m256i a1b1 = _mm256_shuffle_epi8 ( ab1 . val , sep ) ; <nl> + __m256i bgr0 = _mm256_permute2x128_si256 ( p0 , p1 , 0 + 2 * 16 ) ; <nl> + / / __m256i bgr1 = p2 ; <nl> + __m256i bgr2 = _mm256_permute2x128_si256 ( p0 , p1 , 1 + 3 * 16 ) ; <nl> <nl> - a . val = _mm256_unpacklo_epi64 ( a0b0 , a1b1 ) ; <nl> - b . val = _mm256_unpackhi_epi64 ( a0b0 , a1b1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgr0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 8 ) , p2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 16 ) , bgr2 ) ; <nl> } <nl> <nl> - / / / todo <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l32 ( _Tp * , const _Tpvec & , const _Tpvec & , const _Tpvec & ) <nl> - { } <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l32 ( const _Tp * , _Tpvec & , _Tpvec & , _Tpvec & ) <nl> - { } <nl> - / / / / <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_load_deinterleave_l32 ( const _Tp * ptr , _Tpvec & a , _Tpvec & b , _Tpvec & c , _Tpvec & d ) <nl> + inline void v_store_interleave ( uint64 * ptr , const v_uint64x4 & b , const v_uint64x4 & g , const v_uint64x4 & r ) <nl> { <nl> - const __m256i sep = _mm256_setr_epi8 ( <nl> - 0 , 4 , 8 , 12 , 1 , 5 , 9 , 13 , 2 , 6 , 10 , 14 , 3 , 7 , 11 , 15 , <nl> - 0 , 4 , 8 , 12 , 1 , 5 , 9 , 13 , 2 , 6 , 10 , 14 , 3 , 7 , 11 , 15 <nl> - ) ; <nl> - <nl> - _Tpvec abcd0 , abcd1 , abcd2 , abcd3 ; <nl> - v_recombine ( v256_load ( ptr ) , v256_load ( ptr + _Tpvec : : nlanes * 2 ) , abcd0 , abcd1 ) ; <nl> - v_recombine ( v256_load ( ptr + _Tpvec : : nlanes ) , v256_load ( ptr + _Tpvec : : nlanes * 3 ) , abcd2 , abcd3 ) ; <nl> + __m256i s01 = _mm256_unpacklo_epi64 ( b . val , g . val ) ; <nl> + __m256i s12 = _mm256_unpackhi_epi64 ( g . val , r . val ) ; <nl> + __m256i s20 = _mm256_blend_epi32 ( r . val , b . val , 0xcc ) ; <nl> <nl> - __m256i ab0cd0 = _mm256_shuffle_epi8 ( abcd0 . val , sep ) ; <nl> - __m256i ab1cd1 = _mm256_shuffle_epi8 ( abcd1 . val , sep ) ; <nl> - __m256i ab2cd2 = _mm256_shuffle_epi8 ( abcd2 . val , sep ) ; <nl> - __m256i ab3cd3 = _mm256_shuffle_epi8 ( abcd3 . val , sep ) ; <nl> + __m256i bgr0 = _mm256_permute2x128_si256 ( s01 , s20 , 0 + 2 * 16 ) ; <nl> + __m256i bgr1 = _mm256_blend_epi32 ( s01 , s12 , 0x0f ) ; <nl> + __m256i bgr2 = _mm256_permute2x128_si256 ( s20 , s12 , 1 + 3 * 16 ) ; <nl> <nl> - __m256i ab0 = _mm256_unpacklo_epi32 ( ab0cd0 , ab1cd1 ) ; <nl> - __m256i ab1 = _mm256_unpacklo_epi32 ( ab2cd2 , ab3cd3 ) ; <nl> - __m256i cd0 = _mm256_unpackhi_epi32 ( ab0cd0 , ab1cd1 ) ; <nl> - __m256i cd1 = _mm256_unpackhi_epi32 ( ab2cd2 , ab3cd3 ) ; <nl> - <nl> - a . val = _mm256_unpacklo_epi64 ( ab0 , ab1 ) ; <nl> - b . val = _mm256_unpackhi_epi64 ( ab0 , ab1 ) ; <nl> - c . val = _mm256_unpacklo_epi64 ( cd0 , cd1 ) ; <nl> - d . val = _mm256_unpackhi_epi64 ( cd0 , cd1 ) ; <nl> - } <nl> - <nl> - template < typename _Tp , typename _Tpvec > <nl> - inline void v256_store_interleave_l32 ( _Tp * ptr , const _Tpvec & a , const _Tpvec & b , const _Tpvec & c , const _Tpvec & d ) <nl> - { v256_store_interleave_l8 ( ptr , a , b , c , d ) ; } <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgr0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 4 ) , bgr1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 8 ) , bgr2 ) ; <nl> + } <nl> <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_uint8x32 , uchar , l32 ) <nl> - OPENCV_HAL_IMPL_AVX_INTERLEAVE_ACH ( v_int8x32 , schar , l32 ) <nl> + inline void v_store_interleave ( uchar * ptr , const v_uint8x32 & b , const v_uint8x32 & g , const v_uint8x32 & r , const v_uint8x32 & a ) <nl> + { <nl> + __m256i bg0 = _mm256_unpacklo_epi8 ( b . val , g . val ) ; <nl> + __m256i bg1 = _mm256_unpackhi_epi8 ( b . val , g . val ) ; <nl> + __m256i ra0 = _mm256_unpacklo_epi8 ( r . val , a . val ) ; <nl> + __m256i ra1 = _mm256_unpackhi_epi8 ( r . val , a . val ) ; <nl> + <nl> + __m256i bgra0_ = _mm256_unpacklo_epi16 ( bg0 , ra0 ) ; <nl> + __m256i bgra1_ = _mm256_unpackhi_epi16 ( bg0 , ra0 ) ; <nl> + __m256i bgra2_ = _mm256_unpacklo_epi16 ( bg1 , ra1 ) ; <nl> + __m256i bgra3_ = _mm256_unpackhi_epi16 ( bg1 , ra1 ) ; <nl> + <nl> + __m256i bgra0 = _mm256_permute2x128_si256 ( bgra0_ , bgra1_ , 0 + 2 * 16 ) ; <nl> + __m256i bgra2 = _mm256_permute2x128_si256 ( bgra0_ , bgra1_ , 1 + 3 * 16 ) ; <nl> + __m256i bgra1 = _mm256_permute2x128_si256 ( bgra2_ , bgra3_ , 0 + 2 * 16 ) ; <nl> + __m256i bgra3 = _mm256_permute2x128_si256 ( bgra2_ , bgra3_ , 1 + 3 * 16 ) ; <nl> + <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgra0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 32 ) , bgra1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 64 ) , bgra2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 96 ) , bgra3 ) ; <nl> + } <nl> + <nl> + inline void v_store_interleave ( ushort * ptr , const v_uint16x16 & b , const v_uint16x16 & g , <nl> + const v_uint16x16 & r , const v_uint16x16 & a ) <nl> + { <nl> + __m256i bg0 = _mm256_unpacklo_epi16 ( b . val , g . val ) ; <nl> + __m256i bg1 = _mm256_unpackhi_epi16 ( b . val , g . val ) ; <nl> + __m256i ra0 = _mm256_unpacklo_epi16 ( r . val , a . val ) ; <nl> + __m256i ra1 = _mm256_unpackhi_epi16 ( r . val , a . val ) ; <nl> + <nl> + __m256i bgra0_ = _mm256_unpacklo_epi32 ( bg0 , ra0 ) ; <nl> + __m256i bgra1_ = _mm256_unpackhi_epi32 ( bg0 , ra0 ) ; <nl> + __m256i bgra2_ = _mm256_unpacklo_epi32 ( bg1 , ra1 ) ; <nl> + __m256i bgra3_ = _mm256_unpackhi_epi32 ( bg1 , ra1 ) ; <nl> + <nl> + __m256i bgra0 = _mm256_permute2x128_si256 ( bgra0_ , bgra1_ , 0 + 2 * 16 ) ; <nl> + __m256i bgra2 = _mm256_permute2x128_si256 ( bgra0_ , bgra1_ , 1 + 3 * 16 ) ; <nl> + __m256i bgra1 = _mm256_permute2x128_si256 ( bgra2_ , bgra3_ , 0 + 2 * 16 ) ; <nl> + __m256i bgra3 = _mm256_permute2x128_si256 ( bgra2_ , bgra3_ , 1 + 3 * 16 ) ; <nl> + <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgra0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 16 ) , bgra1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 32 ) , bgra2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 48 ) , bgra3 ) ; <nl> + } <nl> + <nl> + inline void v_store_interleave ( unsigned * ptr , const v_uint32x8 & b , const v_uint32x8 & g , <nl> + const v_uint32x8 & r , const v_uint32x8 & a ) <nl> + { <nl> + __m256i bg0 = _mm256_unpacklo_epi32 ( b . val , g . val ) ; <nl> + __m256i bg1 = _mm256_unpackhi_epi32 ( b . val , g . val ) ; <nl> + __m256i ra0 = _mm256_unpacklo_epi32 ( r . val , a . val ) ; <nl> + __m256i ra1 = _mm256_unpackhi_epi32 ( r . val , a . val ) ; <nl> + <nl> + __m256i bgra0_ = _mm256_unpacklo_epi64 ( bg0 , ra0 ) ; <nl> + __m256i bgra1_ = _mm256_unpackhi_epi64 ( bg0 , ra0 ) ; <nl> + __m256i bgra2_ = _mm256_unpacklo_epi64 ( bg1 , ra1 ) ; <nl> + __m256i bgra3_ = _mm256_unpackhi_epi64 ( bg1 , ra1 ) ; <nl> + <nl> + __m256i bgra0 = _mm256_permute2x128_si256 ( bgra0_ , bgra1_ , 0 + 2 * 16 ) ; <nl> + __m256i bgra2 = _mm256_permute2x128_si256 ( bgra0_ , bgra1_ , 1 + 3 * 16 ) ; <nl> + __m256i bgra1 = _mm256_permute2x128_si256 ( bgra2_ , bgra3_ , 0 + 2 * 16 ) ; <nl> + __m256i bgra3 = _mm256_permute2x128_si256 ( bgra2_ , bgra3_ , 1 + 3 * 16 ) ; <nl> + <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgra0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 8 ) , bgra1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 16 ) , bgra2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 24 ) , bgra3 ) ; <nl> + } <nl> + <nl> + inline void v_store_interleave ( uint64 * ptr , const v_uint64x4 & b , const v_uint64x4 & g , <nl> + const v_uint64x4 & r , const v_uint64x4 & a ) <nl> + { <nl> + __m256i bg0 = _mm256_unpacklo_epi64 ( b . val , g . val ) ; <nl> + __m256i bg1 = _mm256_unpackhi_epi64 ( b . val , g . val ) ; <nl> + __m256i ra0 = _mm256_unpacklo_epi64 ( r . val , a . val ) ; <nl> + __m256i ra1 = _mm256_unpackhi_epi64 ( r . val , a . val ) ; <nl> + <nl> + __m256i bgra0 = _mm256_permute2x128_si256 ( bg0 , ra0 , 0 + 2 * 16 ) ; <nl> + __m256i bgra1 = _mm256_permute2x128_si256 ( bg1 , ra1 , 0 + 2 * 16 ) ; <nl> + __m256i bgra2 = _mm256_permute2x128_si256 ( bg0 , ra0 , 1 + 3 * 16 ) ; <nl> + __m256i bgra3 = _mm256_permute2x128_si256 ( bg1 , ra1 , 1 + 3 * 16 ) ; <nl> + <nl> + _mm256_storeu_si256 ( ( __m256i * ) ptr , bgra0 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 4 ) , bgra1 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 8 ) , bgra2 ) ; <nl> + _mm256_storeu_si256 ( ( __m256i * ) ( ptr + 12 ) , bgra3 ) ; <nl> + } <nl> + <nl> + # define OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( _Tpvec0 , _Tp0 , suffix0 , _Tpvec1 , _Tp1 , suffix1 ) \ <nl> + inline void v_load_deinterleave ( const _Tp0 * ptr , _Tpvec0 & a0 , _Tpvec0 & b0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 , b1 ; \ <nl> + v_load_deinterleave ( ( const _Tp1 * ) ptr , a1 , b1 ) ; \ <nl> + a0 = v_reinterpret_as_ # # suffix0 ( a1 ) ; \ <nl> + b0 = v_reinterpret_as_ # # suffix0 ( b1 ) ; \ <nl> + } \ <nl> + inline void v_load_deinterleave ( const _Tp0 * ptr , _Tpvec0 & a0 , _Tpvec0 & b0 , _Tpvec0 & c0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 , b1 , c1 ; \ <nl> + v_load_deinterleave ( ( const _Tp1 * ) ptr , a1 , b1 , c1 ) ; \ <nl> + a0 = v_reinterpret_as_ # # suffix0 ( a1 ) ; \ <nl> + b0 = v_reinterpret_as_ # # suffix0 ( b1 ) ; \ <nl> + c0 = v_reinterpret_as_ # # suffix0 ( c1 ) ; \ <nl> + } \ <nl> + inline void v_load_deinterleave ( const _Tp0 * ptr , _Tpvec0 & a0 , _Tpvec0 & b0 , _Tpvec0 & c0 , _Tpvec0 & d0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 , b1 , c1 , d1 ; \ <nl> + v_load_deinterleave ( ( const _Tp1 * ) ptr , a1 , b1 , c1 , d1 ) ; \ <nl> + a0 = v_reinterpret_as_ # # suffix0 ( a1 ) ; \ <nl> + b0 = v_reinterpret_as_ # # suffix0 ( b1 ) ; \ <nl> + c0 = v_reinterpret_as_ # # suffix0 ( c1 ) ; \ <nl> + d0 = v_reinterpret_as_ # # suffix0 ( d1 ) ; \ <nl> + } \ <nl> + inline void v_store_interleave ( _Tp0 * ptr , const _Tpvec0 & a0 , const _Tpvec0 & b0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 = v_reinterpret_as_ # # suffix1 ( a0 ) ; \ <nl> + _Tpvec1 b1 = v_reinterpret_as_ # # suffix1 ( b0 ) ; \ <nl> + v_store_interleave ( ( _Tp1 * ) ptr , a1 , b1 ) ; \ <nl> + } \ <nl> + inline void v_store_interleave ( _Tp0 * ptr , const _Tpvec0 & a0 , const _Tpvec0 & b0 , const _Tpvec0 & c0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 = v_reinterpret_as_ # # suffix1 ( a0 ) ; \ <nl> + _Tpvec1 b1 = v_reinterpret_as_ # # suffix1 ( b0 ) ; \ <nl> + _Tpvec1 c1 = v_reinterpret_as_ # # suffix1 ( c0 ) ; \ <nl> + v_store_interleave ( ( _Tp1 * ) ptr , a1 , b1 , c1 ) ; \ <nl> + } \ <nl> + inline void v_store_interleave ( _Tp0 * ptr , const _Tpvec0 & a0 , const _Tpvec0 & b0 , \ <nl> + const _Tpvec0 & c0 , const _Tpvec0 & d0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 = v_reinterpret_as_ # # suffix1 ( a0 ) ; \ <nl> + _Tpvec1 b1 = v_reinterpret_as_ # # suffix1 ( b0 ) ; \ <nl> + _Tpvec1 c1 = v_reinterpret_as_ # # suffix1 ( c0 ) ; \ <nl> + _Tpvec1 d1 = v_reinterpret_as_ # # suffix1 ( d0 ) ; \ <nl> + v_store_interleave ( ( _Tp1 * ) ptr , a1 , b1 , c1 , d1 ) ; \ <nl> + } <nl> + <nl> + OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( v_int8x32 , schar , s8 , v_uint8x32 , uchar , u8 ) <nl> + OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( v_int16x16 , short , s16 , v_uint16x16 , ushort , u16 ) <nl> + OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( v_int32x8 , int , s32 , v_uint32x8 , unsigned , u32 ) <nl> + OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( v_float32x8 , float , f32 , v_uint32x8 , unsigned , u32 ) <nl> + OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( v_int64x4 , int64 , s64 , v_uint64x4 , uint64 , u64 ) <nl> + OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE ( v_float64x4 , double , f64 , v_uint64x4 , uint64 , u64 ) <nl> <nl> inline void v256_cleanup ( ) { _mm256_zeroupper ( ) ; } <nl> <nl> mmm a / modules / core / include / opencv2 / core / hal / intrin_neon . hpp <nl> ppp b / modules / core / include / opencv2 / core / hal / intrin_neon . hpp <nl> inline void v_store_interleave ( _Tp * ptr , const v_ # # _Tpvec & a , const v_ # # _Tpvec & <nl> vst4q_ # # suffix ( ptr , v ) ; \ <nl> } <nl> <nl> + # define OPENCV_HAL_IMPL_NEON_INTERLEAVED_INT64 ( tp , suffix ) \ <nl> + inline void v_load_deinterleave ( const tp * ptr , v_ # # tp # # x2 & a , v_ # # tp # # x2 & b ) \ <nl> + { \ <nl> + tp # # x1_t a0 = vld1_ # # suffix ( ptr ) ; \ <nl> + tp # # x1_t b0 = vld1_ # # suffix ( ptr + 1 ) ; \ <nl> + tp # # x1_t a1 = vld1_ # # suffix ( ptr + 2 ) ; \ <nl> + tp # # x1_t b1 = vld1_ # # suffix ( ptr + 3 ) ; \ <nl> + a = v_ # # tp # # x2 ( vcombine_ # # suffix ( a0 , a1 ) ) ; \ <nl> + b = v_ # # tp # # x2 ( vcombine_ # # suffix ( b0 , b1 ) ) ; \ <nl> + } \ <nl> + \ <nl> + inline void v_load_deinterleave ( const tp * ptr , v_ # # tp # # x2 & a , \ <nl> + v_ # # tp # # x2 & b , v_ # # tp # # x2 & c ) \ <nl> + { \ <nl> + tp # # x1_t a0 = vld1_ # # suffix ( ptr ) ; \ <nl> + tp # # x1_t b0 = vld1_ # # suffix ( ptr + 1 ) ; \ <nl> + tp # # x1_t c0 = vld1_ # # suffix ( ptr + 2 ) ; \ <nl> + tp # # x1_t a1 = vld1_ # # suffix ( ptr + 3 ) ; \ <nl> + tp # # x1_t b1 = vld1_ # # suffix ( ptr + 4 ) ; \ <nl> + tp # # x1_t c1 = vld1_ # # suffix ( ptr + 5 ) ; \ <nl> + a = v_ # # tp # # x2 ( vcombine_ # # suffix ( a0 , a1 ) ) ; \ <nl> + b = v_ # # tp # # x2 ( vcombine_ # # suffix ( b0 , b1 ) ) ; \ <nl> + c = v_ # # tp # # x2 ( vcombine_ # # suffix ( c0 , c1 ) ) ; \ <nl> + } \ <nl> + \ <nl> + inline void v_load_deinterleave ( const tp * ptr , v_ # # tp # # x2 & a , v_ # # tp # # x2 & b , \ <nl> + v_ # # tp # # x2 & c , v_ # # tp # # x2 & d ) \ <nl> + { \ <nl> + tp # # x1_t a0 = vld1_ # # suffix ( ptr ) ; \ <nl> + tp # # x1_t b0 = vld1_ # # suffix ( ptr + 1 ) ; \ <nl> + tp # # x1_t c0 = vld1_ # # suffix ( ptr + 2 ) ; \ <nl> + tp # # x1_t d0 = vld1_ # # suffix ( ptr + 3 ) ; \ <nl> + tp # # x1_t a1 = vld1_ # # suffix ( ptr + 4 ) ; \ <nl> + tp # # x1_t b1 = vld1_ # # suffix ( ptr + 5 ) ; \ <nl> + tp # # x1_t c1 = vld1_ # # suffix ( ptr + 6 ) ; \ <nl> + tp # # x1_t d1 = vld1_ # # suffix ( ptr + 7 ) ; \ <nl> + a = v_ # # tp # # x2 ( vcombine_ # # suffix ( a0 , a1 ) ) ; \ <nl> + b = v_ # # tp # # x2 ( vcombine_ # # suffix ( b0 , b1 ) ) ; \ <nl> + c = v_ # # tp # # x2 ( vcombine_ # # suffix ( c0 , c1 ) ) ; \ <nl> + d = v_ # # tp # # x2 ( vcombine_ # # suffix ( d0 , d1 ) ) ; \ <nl> + } \ <nl> + \ <nl> + inline void v_store_interleave ( tp * ptr , const v_ # # tp # # x2 & a , const v_ # # tp # # x2 & b ) \ <nl> + { \ <nl> + vst1_ # # suffix ( ptr , vget_low_ # # suffix ( a . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 1 , vget_low_ # # suffix ( b . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 2 , vget_high_ # # suffix ( a . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 3 , vget_high_ # # suffix ( b . val ) ) ; \ <nl> + } \ <nl> + \ <nl> + inline void v_store_interleave ( tp * ptr , const v_ # # tp # # x2 & a , \ <nl> + const v_ # # tp # # x2 & b , const v_ # # tp # # x2 & c ) \ <nl> + { \ <nl> + vst1_ # # suffix ( ptr , vget_low_ # # suffix ( a . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 1 , vget_low_ # # suffix ( b . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 2 , vget_low_ # # suffix ( c . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 3 , vget_high_ # # suffix ( a . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 4 , vget_high_ # # suffix ( b . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 5 , vget_high_ # # suffix ( c . val ) ) ; \ <nl> + } \ <nl> + \ <nl> + inline void v_store_interleave ( tp * ptr , const v_ # # tp # # x2 & a , const v_ # # tp # # x2 & b , \ <nl> + const v_ # # tp # # x2 & c , const v_ # # tp # # x2 & d ) \ <nl> + { \ <nl> + vst1_ # # suffix ( ptr , vget_low_ # # suffix ( a . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 1 , vget_low_ # # suffix ( b . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 2 , vget_low_ # # suffix ( c . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 3 , vget_low_ # # suffix ( d . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 4 , vget_high_ # # suffix ( a . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 5 , vget_high_ # # suffix ( b . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 6 , vget_high_ # # suffix ( c . val ) ) ; \ <nl> + vst1_ # # suffix ( ptr + 7 , vget_high_ # # suffix ( d . val ) ) ; \ <nl> + } <nl> + <nl> OPENCV_HAL_IMPL_NEON_INTERLEAVED ( uint8x16 , uchar , u8 ) <nl> OPENCV_HAL_IMPL_NEON_INTERLEAVED ( int8x16 , schar , s8 ) <nl> OPENCV_HAL_IMPL_NEON_INTERLEAVED ( uint16x8 , ushort , u16 ) <nl> OPENCV_HAL_IMPL_NEON_INTERLEAVED ( float32x4 , float , f32 ) <nl> OPENCV_HAL_IMPL_NEON_INTERLEAVED ( float64x2 , double , f64 ) <nl> # endif <nl> <nl> + OPENCV_HAL_IMPL_NEON_INTERLEAVED_INT64 ( int64 , s64 ) <nl> + OPENCV_HAL_IMPL_NEON_INTERLEAVED_INT64 ( uint64 , u64 ) <nl> + <nl> inline v_float32x4 v_cvt_f32 ( const v_int32x4 & a ) <nl> { <nl> return v_float32x4 ( vcvtq_f32_s32 ( a . val ) ) ; <nl> mmm a / modules / core / include / opencv2 / core / hal / intrin_sse . hpp <nl> ppp b / modules / core / include / opencv2 / core / hal / intrin_sse . hpp <nl> namespace cv <nl> <nl> CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN <nl> <nl> - struct v_uint8x16 ; <nl> - struct v_int8x16 ; <nl> - struct v_uint16x8 ; <nl> - struct v_int16x8 ; <nl> - struct v_uint32x4 ; <nl> - struct v_int32x4 ; <nl> - struct v_float32x4 ; <nl> - struct v_uint64x2 ; <nl> - struct v_int64x2 ; <nl> - struct v_float64x2 ; <nl> - <nl> struct v_uint8x16 <nl> { <nl> typedef uchar lane_type ; <nl> OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4 ( v_uint32x4 , epi32 , OPENCV_HAL_NOP , OPENCV_HAL_N <nl> OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4 ( v_int32x4 , epi32 , OPENCV_HAL_NOP , OPENCV_HAL_NOP ) <nl> OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4 ( v_float32x4 , ps , _mm_castps_si128 , _mm_castsi128_ps ) <nl> <nl> - / / adopted from sse_utils . hpp <nl> + / / load deinterleave <nl> inline void v_load_deinterleave ( const uchar * ptr , v_uint8x16 & a , v_uint8x16 & b ) <nl> { <nl> __m128i t00 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; <nl> inline void v_load_deinterleave ( const uchar * ptr , v_uint8x16 & a , v_uint8x16 & b ) <nl> <nl> inline void v_load_deinterleave ( const uchar * ptr , v_uint8x16 & a , v_uint8x16 & b , v_uint8x16 & c ) <nl> { <nl> - # if CV_SSSE3 <nl> + # if CV_SSE4_1 <nl> + static const __m128i m0 = _mm_setr_epi8 ( 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 ) ; <nl> + static const __m128i m1 = _mm_setr_epi8 ( 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 ) ; <nl> + __m128i s0 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; <nl> + __m128i s1 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 16 ) ) ; <nl> + __m128i s2 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 32 ) ) ; <nl> + __m128i a0 = _mm_blendv_epi8 ( _mm_blendv_epi8 ( s0 , s1 , m0 ) , s2 , m1 ) ; <nl> + __m128i b0 = _mm_blendv_epi8 ( _mm_blendv_epi8 ( s1 , s2 , m0 ) , s0 , m1 ) ; <nl> + __m128i c0 = _mm_blendv_epi8 ( _mm_blendv_epi8 ( s2 , s0 , m0 ) , s1 , m1 ) ; <nl> + static const __m128i sh_b = _mm_setr_epi8 ( 0 , 3 , 6 , 9 , 12 , 15 , 2 , 5 , 8 , 11 , 14 , 1 , 4 , 7 , 10 , 13 ) ; <nl> + static const __m128i sh_g = _mm_setr_epi8 ( 1 , 4 , 7 , 10 , 13 , 0 , 3 , 6 , 9 , 12 , 15 , 2 , 5 , 8 , 11 , 14 ) ; <nl> + static const __m128i sh_r = _mm_setr_epi8 ( 2 , 5 , 8 , 11 , 14 , 1 , 4 , 7 , 10 , 13 , 0 , 3 , 6 , 9 , 12 , 15 ) ; <nl> + a0 = _mm_shuffle_epi8 ( a0 , sh_b ) ; <nl> + b0 = _mm_shuffle_epi8 ( b0 , sh_g ) ; <nl> + c0 = _mm_shuffle_epi8 ( c0 , sh_r ) ; <nl> + a . val = a0 ; <nl> + b . val = b0 ; <nl> + c . val = c0 ; <nl> + # elif CV_SSSE3 <nl> static const __m128i m0 = _mm_setr_epi8 ( 0 , 3 , 6 , 9 , 12 , 15 , 1 , 4 , 7 , 10 , 13 , 2 , 5 , 8 , 11 , 14 ) ; <nl> static const __m128i m1 = _mm_alignr_epi8 ( m0 , m0 , 11 ) ; <nl> static const __m128i m2 = _mm_alignr_epi8 ( m0 , m0 , 6 ) ; <nl> inline void v_load_deinterleave ( const uchar * ptr , v_uint8x16 & a , v_uint8x16 & b , <nl> d . val = _mm_unpackhi_epi8 ( v2 , v3 ) ; <nl> } <nl> <nl> + inline void v_load_deinterleave ( const ushort * ptr , v_uint16x8 & a , v_uint16x8 & b ) <nl> + { <nl> + __m128i v0 = _mm_loadu_si128 ( ( __m128i * ) ( ptr ) ) ; / / a0 b0 a1 b1 a2 b2 a3 b3 <nl> + __m128i v1 = _mm_loadu_si128 ( ( __m128i * ) ( ptr + 8 ) ) ; / / a4 b4 a5 b5 a6 b6 a7 b7 <nl> + <nl> + __m128i v2 = _mm_unpacklo_epi16 ( v0 , v1 ) ; / / a0 a4 b0 b4 a1 a5 b1 b5 <nl> + __m128i v3 = _mm_unpackhi_epi16 ( v0 , v1 ) ; / / a2 a6 b2 b6 a3 a7 b3 b7 <nl> + __m128i v4 = _mm_unpacklo_epi16 ( v2 , v3 ) ; / / a0 a2 a4 a6 b0 b2 b4 b6 <nl> + __m128i v5 = _mm_unpackhi_epi16 ( v2 , v3 ) ; / / a1 a3 a5 a7 b1 b3 b5 b7 <nl> + <nl> + a . val = _mm_unpacklo_epi16 ( v4 , v5 ) ; / / a0 a1 a2 a3 a4 a5 a6 a7 <nl> + b . val = _mm_unpackhi_epi16 ( v4 , v5 ) ; / / b0 b1 ab b3 b4 b5 b6 b7 <nl> + } <nl> + <nl> inline void v_load_deinterleave ( const ushort * ptr , v_uint16x8 & a , v_uint16x8 & b , v_uint16x8 & c ) <nl> { <nl> + # if CV_SSE4_1 <nl> + __m128i v0 = _mm_loadu_si128 ( ( __m128i * ) ( ptr ) ) ; <nl> + __m128i v1 = _mm_loadu_si128 ( ( __m128i * ) ( ptr + 8 ) ) ; <nl> + __m128i v2 = _mm_loadu_si128 ( ( __m128i * ) ( ptr + 16 ) ) ; <nl> + __m128i a0 = _mm_blend_epi16 ( _mm_blend_epi16 ( v0 , v1 , 0x92 ) , v2 , 0x24 ) ; <nl> + __m128i b0 = _mm_blend_epi16 ( _mm_blend_epi16 ( v2 , v0 , 0x92 ) , v1 , 0x24 ) ; <nl> + __m128i c0 = _mm_blend_epi16 ( _mm_blend_epi16 ( v1 , v2 , 0x92 ) , v0 , 0x24 ) ; <nl> + <nl> + static const __m128i sh_a = _mm_setr_epi8 ( 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 ) ; <nl> + static const __m128i sh_b = _mm_setr_epi8 ( 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 ) ; <nl> + static const __m128i sh_c = _mm_setr_epi8 ( 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 ) ; <nl> + a0 = _mm_shuffle_epi8 ( a0 , sh_a ) ; <nl> + b0 = _mm_shuffle_epi8 ( b0 , sh_b ) ; <nl> + c0 = _mm_shuffle_epi8 ( c0 , sh_c ) ; <nl> + <nl> + a . val = a0 ; <nl> + b . val = b0 ; <nl> + c . val = c0 ; <nl> + # else <nl> __m128i t00 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; <nl> __m128i t01 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 8 ) ) ; <nl> __m128i t02 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 16 ) ) ; <nl> inline void v_load_deinterleave ( const ushort * ptr , v_uint16x8 & a , v_uint16x8 & b , <nl> a . val = _mm_unpacklo_epi16 ( t20 , _mm_unpackhi_epi64 ( t21 , t21 ) ) ; <nl> b . val = _mm_unpacklo_epi16 ( _mm_unpackhi_epi64 ( t20 , t20 ) , t22 ) ; <nl> c . val = _mm_unpacklo_epi16 ( t21 , _mm_unpackhi_epi64 ( t22 , t22 ) ) ; <nl> + # endif <nl> } <nl> <nl> inline void v_load_deinterleave ( const ushort * ptr , v_uint16x8 & a , v_uint16x8 & b , v_uint16x8 & c , v_uint16x8 & d ) <nl> inline void v_load_deinterleave ( const ushort * ptr , v_uint16x8 & a , v_uint16x8 & b , <nl> d . val = _mm_unpackhi_epi16 ( u2 , u3 ) ; <nl> } <nl> <nl> + inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x4 & a , v_uint32x4 & b ) <nl> + { <nl> + __m128i v0 = _mm_loadu_si128 ( ( __m128i * ) ( ptr ) ) ; / / a0 b0 a1 b1 <nl> + __m128i v1 = _mm_loadu_si128 ( ( __m128i * ) ( ptr + 4 ) ) ; / / a2 b2 a3 b3 <nl> + <nl> + __m128i v2 = _mm_unpacklo_epi32 ( v0 , v1 ) ; / / a0 a2 b0 b2 <nl> + __m128i v3 = _mm_unpackhi_epi32 ( v0 , v1 ) ; / / a1 a3 b1 b3 <nl> + <nl> + a . val = _mm_unpacklo_epi32 ( v2 , v3 ) ; / / a0 a1 a2 a3 <nl> + b . val = _mm_unpackhi_epi32 ( v2 , v3 ) ; / / b0 b1 ab b3 <nl> + } <nl> + <nl> inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x4 & a , v_uint32x4 & b , v_uint32x4 & c ) <nl> { <nl> __m128i t00 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; <nl> inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x4 & a , v_uint32x4 & <nl> <nl> inline void v_load_deinterleave ( const unsigned * ptr , v_uint32x4 & a , v_uint32x4 & b , v_uint32x4 & c , v_uint32x4 & d ) <nl> { <nl> - v_uint32x4 u0 ( _mm_loadu_si128 ( ( const __m128i * ) ptr ) ) ; / / a0 b0 c0 d0 <nl> - v_uint32x4 u1 ( _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 4 ) ) ) ; / / a1 b1 c1 d1 <nl> - v_uint32x4 u2 ( _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 8 ) ) ) ; / / a2 b2 c2 d2 <nl> - v_uint32x4 u3 ( _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 12 ) ) ) ; / / a3 b3 c3 d3 <nl> + v_uint32x4 s0 ( _mm_loadu_si128 ( ( const __m128i * ) ptr ) ) ; / / a0 b0 c0 d0 <nl> + v_uint32x4 s1 ( _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 4 ) ) ) ; / / a1 b1 c1 d1 <nl> + v_uint32x4 s2 ( _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 8 ) ) ) ; / / a2 b2 c2 d2 <nl> + v_uint32x4 s3 ( _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 12 ) ) ) ; / / a3 b3 c3 d3 <nl> <nl> - v_transpose4x4 ( u0 , u1 , u2 , u3 , a , b , c , d ) ; <nl> + v_transpose4x4 ( s0 , s1 , s2 , s3 , a , b , c , d ) ; <nl> + } <nl> + <nl> + inline void v_load_deinterleave ( const float * ptr , v_float32x4 & a , v_float32x4 & b ) <nl> + { <nl> + const int mask_lo = _MM_SHUFFLE ( 2 , 0 , 2 , 0 ) , mask_hi = _MM_SHUFFLE ( 3 , 1 , 3 , 1 ) ; <nl> + <nl> + __m128 u0 = _mm_loadu_ps ( ptr ) ; / / a0 b0 a1 b1 <nl> + __m128 u1 = _mm_loadu_ps ( ( ptr + 4 ) ) ; / / a2 b2 a3 b3 <nl> + <nl> + a . val = _mm_shuffle_ps ( u0 , u1 , mask_lo ) ; / / a0 a1 a2 a3 <nl> + b . val = _mm_shuffle_ps ( u0 , u1 , mask_hi ) ; / / b0 b1 ab b3 <nl> } <nl> <nl> inline void v_load_deinterleave ( const float * ptr , v_float32x4 & a , v_float32x4 & b , v_float32x4 & c ) <nl> inline void v_load_deinterleave ( const float * ptr , v_float32x4 & a , v_float32x4 & b <nl> d . val = _mm_unpackhi_ps ( t02hi , t13hi ) ; <nl> } <nl> <nl> - inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x2 & a , v_uint64x2 & b , v_uint64x2 & c ) <nl> + inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x2 & a , v_uint64x2 & b ) <nl> { <nl> __m128i t0 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; <nl> __m128i t1 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 2 ) ) ; <nl> - __m128i t2 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 4 ) ) ; <nl> <nl> - a = v_uint64x2 ( _mm_unpacklo_epi64 ( t0 , _mm_unpackhi_epi64 ( t1 , t1 ) ) ) ; <nl> - b = v_uint64x2 ( _mm_unpacklo_epi64 ( _mm_unpackhi_epi64 ( t0 , t0 ) , t2 ) ) ; <nl> - c = v_uint64x2 ( _mm_unpacklo_epi64 ( t1 , _mm_unpackhi_epi64 ( t2 , t2 ) ) ) ; <nl> - } <nl> - <nl> - inline void v_load_deinterleave ( const int64 * ptr , v_int64x2 & a , v_int64x2 & b , v_int64x2 & c ) <nl> - { <nl> - v_uint64x2 t0 , t1 , t2 ; <nl> - v_load_deinterleave ( ( const uint64 * ) ptr , t0 , t1 , t2 ) ; <nl> - a = v_reinterpret_as_s64 ( t0 ) ; <nl> - b = v_reinterpret_as_s64 ( t1 ) ; <nl> - c = v_reinterpret_as_s64 ( t2 ) ; <nl> - } <nl> - <nl> - inline void v_load_deinterleave ( const double * ptr , v_float64x2 & a , v_float64x2 & b , v_float64x2 & c ) <nl> - { <nl> - v_uint64x2 t0 , t1 , t2 ; <nl> - v_load_deinterleave ( ( const uint64 * ) ptr , t0 , t1 , t2 ) ; <nl> - a = v_reinterpret_as_f64 ( t0 ) ; <nl> - b = v_reinterpret_as_f64 ( t1 ) ; <nl> - c = v_reinterpret_as_f64 ( t2 ) ; <nl> + a = v_uint64x2 ( _mm_unpacklo_epi64 ( t0 , t1 ) ) ; <nl> + b = v_uint64x2 ( _mm_unpackhi_epi64 ( t0 , t1 ) ) ; <nl> } <nl> <nl> - / / 2 - channel <nl> - inline void v_load_deinterleave ( const float * ptr , v_float32x4 & a , v_float32x4 & b ) <nl> + inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x2 & a , v_uint64x2 & b , v_uint64x2 & c ) <nl> { <nl> - const int mask_lo = _MM_SHUFFLE ( 2 , 0 , 2 , 0 ) , mask_hi = _MM_SHUFFLE ( 3 , 1 , 3 , 1 ) ; <nl> + __m128i t0 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; / / a0 , b0 <nl> + __m128i t1 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 2 ) ) ; / / c0 , a1 <nl> + __m128i t2 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 4 ) ) ; / / b1 , c1 <nl> <nl> - __m128 u0 = _mm_loadu_ps ( ptr ) ; / / a0 b0 a1 b1 <nl> - __m128 u1 = _mm_loadu_ps ( ( ptr + 4 ) ) ; / / a2 b2 a3 b3 <nl> + t1 = _mm_shuffle_epi32 ( t1 , 0x4e ) ; / / a1 , c0 <nl> <nl> - a . val = _mm_shuffle_ps ( u0 , u1 , mask_lo ) ; / / a0 a1 a2 a3 <nl> - b . val = _mm_shuffle_ps ( u0 , u1 , mask_hi ) ; / / b0 b1 ab b3 <nl> + a = v_uint64x2 ( _mm_unpacklo_epi64 ( t0 , t1 ) ) ; <nl> + b = v_uint64x2 ( _mm_unpacklo_epi64 ( _mm_unpackhi_epi64 ( t0 , t0 ) , t2 ) ) ; <nl> + c = v_uint64x2 ( _mm_unpackhi_epi64 ( t1 , t2 ) ) ; <nl> } <nl> <nl> - inline void v_load_deinterleave ( const short * ptr , v_int16x8 & a , v_int16x8 & b ) <nl> + inline void v_load_deinterleave ( const uint64 * ptr , v_uint64x2 & a , <nl> + v_uint64x2 & b , v_uint64x2 & c , v_uint64x2 & d ) <nl> { <nl> - __m128i v0 = _mm_loadu_si128 ( ( __m128i * ) ( ptr ) ) ; / / a0 b0 a1 b1 a2 b2 a3 b3 <nl> - __m128i v1 = _mm_loadu_si128 ( ( __m128i * ) ( ptr + 8 ) ) ; / / a4 b4 a5 b5 a6 b6 a7 b7 <nl> - <nl> - __m128i v2 = _mm_unpacklo_epi16 ( v0 , v1 ) ; / / a0 a4 b0 b4 a1 a5 b1 b5 <nl> - __m128i v3 = _mm_unpackhi_epi16 ( v0 , v1 ) ; / / a2 a6 b2 b6 a3 a7 b3 b7 <nl> - __m128i v4 = _mm_unpacklo_epi16 ( v2 , v3 ) ; / / a0 a2 a4 a6 b0 b2 b4 b6 <nl> - __m128i v5 = _mm_unpackhi_epi16 ( v2 , v3 ) ; / / a1 a3 a5 a7 b1 b3 b5 b7 <nl> + __m128i t0 = _mm_loadu_si128 ( ( const __m128i * ) ptr ) ; / / a0 b0 <nl> + __m128i t1 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 2 ) ) ; / / c0 d0 <nl> + __m128i t2 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 4 ) ) ; / / a1 b1 <nl> + __m128i t3 = _mm_loadu_si128 ( ( const __m128i * ) ( ptr + 6 ) ) ; / / c1 d1 <nl> <nl> - a . val = _mm_unpacklo_epi16 ( v4 , v5 ) ; / / a0 a1 a2 a3 a4 a5 a6 a7 <nl> - b . val = _mm_unpackhi_epi16 ( v4 , v5 ) ; / / b0 b1 ab b3 b4 b5 b6 b7 <nl> + a = v_uint64x2 ( _mm_unpacklo_epi64 ( t0 , t2 ) ) ; <nl> + b = v_uint64x2 ( _mm_unpackhi_epi64 ( t0 , t2 ) ) ; <nl> + c = v_uint64x2 ( _mm_unpacklo_epi64 ( t1 , t3 ) ) ; <nl> + d = v_uint64x2 ( _mm_unpackhi_epi64 ( t1 , t3 ) ) ; <nl> } <nl> <nl> - inline void v_load_deinterleave ( const ushort * ptr , v_uint16x8 & a , v_uint16x8 & b ) <nl> - { <nl> - v_int16x8 sa , sb ; <nl> - v_load_deinterleave ( ( const short * ) ptr , sa , sb ) ; <nl> - a = v_reinterpret_as_u16 ( sa ) ; <nl> - b = v_reinterpret_as_u16 ( sb ) ; <nl> - } <nl> - <nl> - inline void v_store_interleave ( short * ptr , const v_int16x8 & a , const v_int16x8 & b ) <nl> - { <nl> - __m128i t0 , t1 ; <nl> - t0 = _mm_unpacklo_epi16 ( a . val , b . val ) ; <nl> - t1 = _mm_unpackhi_epi16 ( a . val , b . val ) ; <nl> - _mm_storeu_si128 ( ( __m128i * ) ( ptr ) , t0 ) ; <nl> - _mm_storeu_si128 ( ( __m128i * ) ( ptr + 8 ) , t1 ) ; <nl> - } <nl> + / / store interleave <nl> <nl> inline void v_store_interleave ( uchar * ptr , const v_uint8x16 & a , const v_uint8x16 & b ) <nl> { <nl> inline void v_store_interleave ( uchar * ptr , const v_uint8x16 & a , const v_uint8x1 <nl> inline void v_store_interleave ( uchar * ptr , const v_uint8x16 & a , const v_uint8x16 & b , <nl> const v_uint8x16 & c ) <nl> { <nl> - # if CV_SSSE3 <nl> + # if CV_SSE4_1 <nl> + static const __m128i sh_a = _mm_setr_epi8 ( 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , 10 , 5 ) ; <nl> + static const __m128i sh_b = _mm_setr_epi8 ( 5 , 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 , 10 ) ; <nl> + static const __m128i sh_c = _mm_setr_epi8 ( 10 , 5 , 0 , 11 , 6 , 1 , 12 , 7 , 2 , 13 , 8 , 3 , 14 , 9 , 4 , 15 ) ; <nl> + __m128i a0 = _mm_shuffle_epi8 ( a . val , sh_a ) ; <nl> + __m128i b0 = _mm_shuffle_epi8 ( b . val , sh_b ) ; <nl> + __m128i c0 = _mm_shuffle_epi8 ( c . val , sh_c ) ; <nl> + <nl> + static const __m128i m0 = _mm_setr_epi8 ( 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 ) ; <nl> + static const __m128i m1 = _mm_setr_epi8 ( 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 , - 1 , 0 , 0 ) ; <nl> + __m128i v0 = _mm_blendv_epi8 ( _mm_blendv_epi8 ( a0 , b0 , m1 ) , c0 , m0 ) ; <nl> + __m128i v1 = _mm_blendv_epi8 ( _mm_blendv_epi8 ( b0 , c0 , m1 ) , a0 , m0 ) ; <nl> + __m128i v2 = _mm_blendv_epi8 ( _mm_blendv_epi8 ( c0 , a0 , m1 ) , b0 , m0 ) ; <nl> + <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr ) , v0 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 16 ) , v1 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 32 ) , v2 ) ; <nl> + # elif CV_SSSE3 <nl> static const __m128i m0 = _mm_setr_epi8 ( 0 , 6 , 11 , 1 , 7 , 12 , 2 , 8 , 13 , 3 , 9 , 14 , 4 , 10 , 15 , 5 ) ; <nl> static const __m128i m1 = _mm_setr_epi8 ( 5 , 11 , 0 , 6 , 12 , 1 , 7 , 13 , 2 , 8 , 14 , 3 , 9 , 15 , 4 , 10 ) ; <nl> static const __m128i m2 = _mm_setr_epi8 ( 10 , 0 , 5 , 11 , 1 , 6 , 12 , 2 , 7 , 13 , 3 , 8 , 14 , 4 , 9 , 15 ) ; <nl> inline void v_store_interleave ( uchar * ptr , const v_uint8x16 & a , const v_uint8x1 <nl> _mm_storeu_si128 ( ( __m128i * ) ( ptr + 48 ) , v3 ) ; <nl> } <nl> <nl> + inline void v_store_interleave ( ushort * ptr , const v_uint16x8 & a , const v_uint16x8 & b ) <nl> + { <nl> + __m128i t0 , t1 ; <nl> + t0 = _mm_unpacklo_epi16 ( a . val , b . val ) ; <nl> + t1 = _mm_unpackhi_epi16 ( a . val , b . val ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr ) , t0 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 8 ) , t1 ) ; <nl> + } <nl> + <nl> inline void v_store_interleave ( ushort * ptr , const v_uint16x8 & a , <nl> const v_uint16x8 & b , <nl> const v_uint16x8 & c ) <nl> { <nl> + # if CV_SSE4_1 <nl> + static const __m128i sh_a = _mm_setr_epi8 ( 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 , 10 , 11 ) ; <nl> + static const __m128i sh_b = _mm_setr_epi8 ( 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 , 4 , 5 ) ; <nl> + static const __m128i sh_c = _mm_setr_epi8 ( 4 , 5 , 10 , 11 , 0 , 1 , 6 , 7 , 12 , 13 , 2 , 3 , 8 , 9 , 14 , 15 ) ; <nl> + __m128i a0 = _mm_shuffle_epi8 ( a . val , sh_a ) ; <nl> + __m128i b0 = _mm_shuffle_epi8 ( b . val , sh_b ) ; <nl> + __m128i c0 = _mm_shuffle_epi8 ( c . val , sh_c ) ; <nl> + <nl> + __m128i v0 = _mm_blend_epi16 ( _mm_blend_epi16 ( a0 , b0 , 0x92 ) , c0 , 0x24 ) ; <nl> + __m128i v1 = _mm_blend_epi16 ( _mm_blend_epi16 ( c0 , a0 , 0x92 ) , b0 , 0x24 ) ; <nl> + __m128i v2 = _mm_blend_epi16 ( _mm_blend_epi16 ( b0 , c0 , 0x92 ) , a0 , 0x24 ) ; <nl> + <nl> + _mm_storeu_si128 ( ( __m128i * ) ptr , v0 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 8 ) , v1 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 16 ) , v2 ) ; <nl> + # else <nl> __m128i z = _mm_setzero_si128 ( ) ; <nl> __m128i ab0 = _mm_unpacklo_epi16 ( a . val , b . val ) ; <nl> __m128i ab1 = _mm_unpackhi_epi16 ( a . val , b . val ) ; <nl> inline void v_store_interleave ( ushort * ptr , const v_uint16x8 & a , <nl> _mm_storeu_si128 ( ( __m128i * ) ( ptr ) , v0 ) ; <nl> _mm_storeu_si128 ( ( __m128i * ) ( ptr + 8 ) , v1 ) ; <nl> _mm_storeu_si128 ( ( __m128i * ) ( ptr + 16 ) , v2 ) ; <nl> + # endif <nl> } <nl> <nl> inline void v_store_interleave ( ushort * ptr , const v_uint16x8 & a , const v_uint16x8 & b , <nl> inline void v_store_interleave ( ushort * ptr , const v_uint16x8 & a , const v_uint16 <nl> _mm_storeu_si128 ( ( __m128i * ) ( ptr + 24 ) , v3 ) ; <nl> } <nl> <nl> + inline void v_store_interleave ( unsigned * ptr , const v_uint32x4 & a , const v_uint32x4 & b ) <nl> + { <nl> + __m128i t0 = _mm_unpacklo_epi32 ( a . val , b . val ) ; <nl> + __m128i t1 = _mm_unpackhi_epi32 ( a . val , b . val ) ; <nl> + <nl> + _mm_storeu_si128 ( ( __m128i * ) ptr , t0 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 4 ) , t1 ) ; <nl> + } <nl> + <nl> inline void v_store_interleave ( unsigned * ptr , const v_uint32x4 & a , const v_uint32x4 & b , <nl> const v_uint32x4 & c ) <nl> { <nl> inline void v_store_interleave ( float * ptr , const v_float32x4 & a , const v_float32 <nl> _mm_storeu_ps ( ptr + 12 , v3 ) ; <nl> } <nl> <nl> + inline void v_store_interleave ( uint64 * ptr , const v_uint64x2 & a , const v_uint64x2 & b ) <nl> + { <nl> + __m128i t0 = _mm_unpacklo_epi64 ( a . val , b . val ) ; <nl> + __m128i t1 = _mm_unpackhi_epi64 ( a . val , b . val ) ; <nl> + <nl> + _mm_storeu_si128 ( ( __m128i * ) ptr , t0 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 2 ) , t1 ) ; <nl> + } <nl> + <nl> inline void v_store_interleave ( uint64 * ptr , const v_uint64x2 & a , const v_uint64x2 & b , const v_uint64x2 & c ) <nl> { <nl> __m128i t0 = _mm_unpacklo_epi64 ( a . val , b . val ) ; <nl> inline void v_store_interleave ( uint64 * ptr , const v_uint64x2 & a , const v_uint64x <nl> _mm_storeu_si128 ( ( __m128i * ) ( ptr + 4 ) , t2 ) ; <nl> } <nl> <nl> - inline void v_store_interleave ( int64 * ptr , const v_int64x2 & a , const v_int64x2 & b , const v_int64x2 & c ) <nl> + inline void v_store_interleave ( uint64 * ptr , const v_uint64x2 & a , const v_uint64x2 & b , const v_uint64x2 & c , const v_uint64x2 & d ) <nl> { <nl> - v_store_interleave ( ( uint64 * ) ptr , v_reinterpret_as_u64 ( a ) , v_reinterpret_as_u64 ( b ) , v_reinterpret_as_u64 ( c ) ) ; <nl> - } <nl> + __m128i t0 = _mm_unpacklo_epi64 ( a . val , b . val ) ; <nl> + __m128i t1 = _mm_unpacklo_epi64 ( c . val , d . val ) ; <nl> + __m128i t2 = _mm_unpackhi_epi64 ( a . val , b . val ) ; <nl> + __m128i t3 = _mm_unpackhi_epi64 ( c . val , d . val ) ; <nl> <nl> - inline void v_store_interleave ( double * ptr , const v_float64x2 & a , const v_float64x2 & b , const v_float64x2 & c ) <nl> - { <nl> - v_store_interleave ( ( uint64 * ) ptr , v_reinterpret_as_u64 ( a ) , v_reinterpret_as_u64 ( b ) , v_reinterpret_as_u64 ( c ) ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ptr , t0 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 2 ) , t1 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 4 ) , t2 ) ; <nl> + _mm_storeu_si128 ( ( __m128i * ) ( ptr + 6 ) , t3 ) ; <nl> } <nl> <nl> - # define OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( _Tpvec , _Tp , suffix , _Tpuvec , _Tpu , usuffix ) \ <nl> - inline void v_load_deinterleave ( const _Tp * ptr , _Tpvec & a0 , \ <nl> - _Tpvec & b0 , _Tpvec & c0 ) \ <nl> + # define OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( _Tpvec0 , _Tp0 , suffix0 , _Tpvec1 , _Tp1 , suffix1 ) \ <nl> + inline void v_load_deinterleave ( const _Tp0 * ptr , _Tpvec0 & a0 , _Tpvec0 & b0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 , b1 ; \ <nl> + v_load_deinterleave ( ( const _Tp1 * ) ptr , a1 , b1 ) ; \ <nl> + a0 = v_reinterpret_as_ # # suffix0 ( a1 ) ; \ <nl> + b0 = v_reinterpret_as_ # # suffix0 ( b1 ) ; \ <nl> + } \ <nl> + inline void v_load_deinterleave ( const _Tp0 * ptr , _Tpvec0 & a0 , _Tpvec0 & b0 , _Tpvec0 & c0 ) \ <nl> + { \ <nl> + _Tpvec1 a1 , b1 , c1 ; \ <nl> + v_load_deinterleave ( ( const _Tp1 * ) ptr , a1 , b1 , c1 ) ; \ <nl> + a0 = v_reinterpret_as_ # # suffix0 ( a1 ) ; \ <nl> + b0 = v_reinterpret_as_ # # suffix0 ( b1 ) ; \ <nl> + c0 = v_reinterpret_as_ # # suffix0 ( c1 ) ; \ <nl> + } \ <nl> + inline void v_load_deinterleave ( const _Tp0 * ptr , _Tpvec0 & a0 , _Tpvec0 & b0 , _Tpvec0 & c0 , _Tpvec0 & d0 ) \ <nl> { \ <nl> - _Tpuvec a1 , b1 , c1 ; \ <nl> - v_load_deinterleave ( ( const _Tpu * ) ptr , a1 , b1 , c1 ) ; \ <nl> - a0 = v_reinterpret_as_ # # suffix ( a1 ) ; \ <nl> - b0 = v_reinterpret_as_ # # suffix ( b1 ) ; \ <nl> - c0 = v_reinterpret_as_ # # suffix ( c1 ) ; \ <nl> + _Tpvec1 a1 , b1 , c1 , d1 ; \ <nl> + v_load_deinterleave ( ( const _Tp1 * ) ptr , a1 , b1 , c1 , d1 ) ; \ <nl> + a0 = v_reinterpret_as_ # # suffix0 ( a1 ) ; \ <nl> + b0 = v_reinterpret_as_ # # suffix0 ( b1 ) ; \ <nl> + c0 = v_reinterpret_as_ # # suffix0 ( c1 ) ; \ <nl> + d0 = v_reinterpret_as_ # # suffix0 ( d1 ) ; \ <nl> } \ <nl> - inline void v_load_deinterleave ( const _Tp * ptr , _Tpvec & a0 , \ <nl> - _Tpvec & b0 , _Tpvec & c0 , _Tpvec & d0 ) \ <nl> + inline void v_store_interleave ( _Tp0 * ptr , const _Tpvec0 & a0 , const _Tpvec0 & b0 ) \ <nl> { \ <nl> - _Tpuvec a1 , b1 , c1 , d1 ; \ <nl> - v_load_deinterleave ( ( const _Tpu * ) ptr , a1 , b1 , c1 , d1 ) ; \ <nl> - a0 = v_reinterpret_as_ # # suffix ( a1 ) ; \ <nl> - b0 = v_reinterpret_as_ # # suffix ( b1 ) ; \ <nl> - c0 = v_reinterpret_as_ # # suffix ( c1 ) ; \ <nl> - d0 = v_reinterpret_as_ # # suffix ( d1 ) ; \ <nl> + _Tpvec1 a1 = v_reinterpret_as_ # # suffix1 ( a0 ) ; \ <nl> + _Tpvec1 b1 = v_reinterpret_as_ # # suffix1 ( b0 ) ; \ <nl> + v_store_interleave ( ( _Tp1 * ) ptr , a1 , b1 ) ; \ <nl> } \ <nl> - inline void v_store_interleave ( _Tp * ptr , const _Tpvec & a0 , \ <nl> - const _Tpvec & b0 , const _Tpvec & c0 ) \ <nl> + inline void v_store_interleave ( _Tp0 * ptr , const _Tpvec0 & a0 , const _Tpvec0 & b0 , const _Tpvec0 & c0 ) \ <nl> { \ <nl> - _Tpuvec a1 = v_reinterpret_as_ # # usuffix ( a0 ) ; \ <nl> - _Tpuvec b1 = v_reinterpret_as_ # # usuffix ( b0 ) ; \ <nl> - _Tpuvec c1 = v_reinterpret_as_ # # usuffix ( c0 ) ; \ <nl> - v_store_interleave ( ( _Tpu * ) ptr , a1 , b1 , c1 ) ; \ <nl> + _Tpvec1 a1 = v_reinterpret_as_ # # suffix1 ( a0 ) ; \ <nl> + _Tpvec1 b1 = v_reinterpret_as_ # # suffix1 ( b0 ) ; \ <nl> + _Tpvec1 c1 = v_reinterpret_as_ # # suffix1 ( c0 ) ; \ <nl> + v_store_interleave ( ( _Tp1 * ) ptr , a1 , b1 , c1 ) ; \ <nl> } \ <nl> - inline void v_store_interleave ( _Tp * ptr , const _Tpvec & a0 , const _Tpvec & b0 , \ <nl> - const _Tpvec & c0 , const _Tpvec & d0 ) \ <nl> + inline void v_store_interleave ( _Tp0 * ptr , const _Tpvec0 & a0 , const _Tpvec0 & b0 , \ <nl> + const _Tpvec0 & c0 , const _Tpvec0 & d0 ) \ <nl> { \ <nl> - _Tpuvec a1 = v_reinterpret_as_ # # usuffix ( a0 ) ; \ <nl> - _Tpuvec b1 = v_reinterpret_as_ # # usuffix ( b0 ) ; \ <nl> - _Tpuvec c1 = v_reinterpret_as_ # # usuffix ( c0 ) ; \ <nl> - _Tpuvec d1 = v_reinterpret_as_ # # usuffix ( d0 ) ; \ <nl> - v_store_interleave ( ( _Tpu * ) ptr , a1 , b1 , c1 , d1 ) ; \ <nl> + _Tpvec1 a1 = v_reinterpret_as_ # # suffix1 ( a0 ) ; \ <nl> + _Tpvec1 b1 = v_reinterpret_as_ # # suffix1 ( b0 ) ; \ <nl> + _Tpvec1 c1 = v_reinterpret_as_ # # suffix1 ( c0 ) ; \ <nl> + _Tpvec1 d1 = v_reinterpret_as_ # # suffix1 ( d0 ) ; \ <nl> + v_store_interleave ( ( _Tp1 * ) ptr , a1 , b1 , c1 , d1 ) ; \ <nl> } <nl> <nl> OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( v_int8x16 , schar , s8 , v_uint8x16 , uchar , u8 ) <nl> OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( v_int16x8 , short , s16 , v_uint16x8 , ushort , u16 ) <nl> OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( v_int32x4 , int , s32 , v_uint32x4 , unsigned , u32 ) <nl> - / / OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( v_float32x4 , float , f32 , v_uint32x4 , unsigned , u32 ) <nl> + OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( v_int64x2 , int64 , s64 , v_uint64x2 , uint64 , u64 ) <nl> + OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE ( v_float64x2 , double , f64 , v_uint64x2 , uint64 , u64 ) <nl> <nl> inline v_float32x4 v_cvt_f32 ( const v_int32x4 & a ) <nl> { <nl> mmm a / modules / core / include / opencv2 / core / hal / intrin_vsx . hpp <nl> ppp b / modules / core / include / opencv2 / core / hal / intrin_vsx . hpp <nl> OPENCV_HAL_IMPL_VSX_INTERLEAVE ( uint , v_uint32x4 ) <nl> OPENCV_HAL_IMPL_VSX_INTERLEAVE ( int , v_int32x4 ) <nl> OPENCV_HAL_IMPL_VSX_INTERLEAVE ( float , v_float32x4 ) <nl> OPENCV_HAL_IMPL_VSX_INTERLEAVE ( double , v_float64x2 ) <nl> + OPENCV_HAL_IMPL_VSX_INTERLEAVE ( int64 , v_int64x2 ) <nl> + OPENCV_HAL_IMPL_VSX_INTERLEAVE ( uint64 , v_uint64x2 ) <nl> <nl> / * Expand * / <nl> # define OPENCV_HAL_IMPL_VSX_EXPAND ( _Tpvec , _Tpwvec , _Tp , fl , fh ) \ <nl> mmm a / modules / core / src / merge . cpp <nl> ppp b / modules / core / src / merge . cpp <nl> <nl> <nl> namespace cv { namespace hal { <nl> <nl> - # if CV_NEON <nl> - template < typename T > struct VMerge2 ; <nl> - template < typename T > struct VMerge3 ; <nl> - template < typename T > struct VMerge4 ; <nl> - <nl> - # define MERGE2_KERNEL_TEMPLATE ( name , data_type , reg_type , load_func , store_func ) \ <nl> - template < > \ <nl> - struct name < data_type > { \ <nl> - void operator ( ) ( const data_type * src0 , const data_type * src1 , \ <nl> - data_type * dst ) { \ <nl> - reg_type r ; \ <nl> - r . val [ 0 ] = load_func ( src0 ) ; \ <nl> - r . val [ 1 ] = load_func ( src1 ) ; \ <nl> - store_func ( dst , r ) ; \ <nl> - } \ <nl> - } <nl> + # if CV_SIMD <nl> + template < typename T , typename VecT > static void <nl> + vecmerge_ ( const T * * src , T * dst , int len , int cn ) <nl> + { <nl> + int i ; <nl> + const T * src0 = src [ 0 ] ; <nl> + const T * src1 = src [ 1 ] ; <nl> <nl> - # define MERGE3_KERNEL_TEMPLATE ( name , data_type , reg_type , load_func , store_func ) \ <nl> - template < > \ <nl> - struct name < data_type > { \ <nl> - void operator ( ) ( const data_type * src0 , const data_type * src1 , \ <nl> - const data_type * src2 , data_type * dst ) { \ <nl> - reg_type r ; \ <nl> - r . val [ 0 ] = load_func ( src0 ) ; \ <nl> - r . val [ 1 ] = load_func ( src1 ) ; \ <nl> - r . val [ 2 ] = load_func ( src2 ) ; \ <nl> - store_func ( dst , r ) ; \ <nl> - } \ <nl> + const int VECSZ = VecT : : nlanes ; <nl> + if ( cn = = 2 ) <nl> + { <nl> + for ( i = 0 ; i < len ; i + = VECSZ ) <nl> + { <nl> + i = std : : min ( len - VECSZ , i ) ; <nl> + VecT a = vx_load ( src0 + i ) , b = vx_load ( src1 + i ) ; <nl> + v_store_interleave ( dst + i * cn , a , b ) ; <nl> + } <nl> } <nl> - <nl> - # define MERGE4_KERNEL_TEMPLATE ( name , data_type , reg_type , load_func , store_func ) \ <nl> - template < > \ <nl> - struct name < data_type > { \ <nl> - void operator ( ) ( const data_type * src0 , const data_type * src1 , \ <nl> - const data_type * src2 , const data_type * src3 , \ <nl> - data_type * dst ) { \ <nl> - reg_type r ; \ <nl> - r . val [ 0 ] = load_func ( src0 ) ; \ <nl> - r . val [ 1 ] = load_func ( src1 ) ; \ <nl> - r . val [ 2 ] = load_func ( src2 ) ; \ <nl> - r . val [ 3 ] = load_func ( src3 ) ; \ <nl> - store_func ( dst , r ) ; \ <nl> - } \ <nl> + else if ( cn = = 3 ) <nl> + { <nl> + const T * src2 = src [ 2 ] ; <nl> + for ( i = 0 ; i < len ; i + = VECSZ ) <nl> + { <nl> + i = std : : min ( len - VECSZ , i ) ; <nl> + VecT a = vx_load ( src0 + i ) , b = vx_load ( src1 + i ) , c = vx_load ( src2 + i ) ; <nl> + v_store_interleave ( dst + i * cn , a , b , c ) ; <nl> + } <nl> } <nl> - <nl> - MERGE2_KERNEL_TEMPLATE ( VMerge2 , uchar , uint8x16x2_t , vld1q_u8 , vst2q_u8 ) ; <nl> - MERGE2_KERNEL_TEMPLATE ( VMerge2 , ushort , uint16x8x2_t , vld1q_u16 , vst2q_u16 ) ; <nl> - MERGE2_KERNEL_TEMPLATE ( VMerge2 , int , int32x4x2_t , vld1q_s32 , vst2q_s32 ) ; <nl> - MERGE2_KERNEL_TEMPLATE ( VMerge2 , int64 , int64x1x2_t , vld1_s64 , vst2_s64 ) ; <nl> - <nl> - MERGE3_KERNEL_TEMPLATE ( VMerge3 , uchar , uint8x16x3_t , vld1q_u8 , vst3q_u8 ) ; <nl> - MERGE3_KERNEL_TEMPLATE ( VMerge3 , ushort , uint16x8x3_t , vld1q_u16 , vst3q_u16 ) ; <nl> - MERGE3_KERNEL_TEMPLATE ( VMerge3 , int , int32x4x3_t , vld1q_s32 , vst3q_s32 ) ; <nl> - MERGE3_KERNEL_TEMPLATE ( VMerge3 , int64 , int64x1x3_t , vld1_s64 , vst3_s64 ) ; <nl> - <nl> - MERGE4_KERNEL_TEMPLATE ( VMerge4 , uchar , uint8x16x4_t , vld1q_u8 , vst4q_u8 ) ; <nl> - MERGE4_KERNEL_TEMPLATE ( VMerge4 , ushort , uint16x8x4_t , vld1q_u16 , vst4q_u16 ) ; <nl> - MERGE4_KERNEL_TEMPLATE ( VMerge4 , int , int32x4x4_t , vld1q_s32 , vst4q_s32 ) ; <nl> - MERGE4_KERNEL_TEMPLATE ( VMerge4 , int64 , int64x1x4_t , vld1_s64 , vst4_s64 ) ; <nl> - <nl> - # elif CV_SSE2 <nl> - <nl> - template < typename T > <nl> - struct VMerge2 <nl> - { <nl> - VMerge2 ( ) : support ( false ) { } <nl> - void operator ( ) ( const T * , const T * , T * ) const { } <nl> - <nl> - bool support ; <nl> - } ; <nl> - <nl> - template < typename T > <nl> - struct VMerge3 <nl> - { <nl> - VMerge3 ( ) : support ( false ) { } <nl> - void operator ( ) ( const T * , const T * , const T * , T * ) const { } <nl> - <nl> - bool support ; <nl> - } ; <nl> - <nl> - template < typename T > <nl> - struct VMerge4 <nl> - { <nl> - VMerge4 ( ) : support ( false ) { } <nl> - void operator ( ) ( const T * , const T * , const T * , const T * , T * ) const { } <nl> - <nl> - bool support ; <nl> - } ; <nl> - <nl> - # define MERGE2_KERNEL_TEMPLATE ( data_type , reg_type , cast_type , _mm_interleave , flavor , se ) \ <nl> - template < > \ <nl> - struct VMerge2 < data_type > \ <nl> - { \ <nl> - enum \ <nl> - { \ <nl> - ELEMS_IN_VEC = 16 / sizeof ( data_type ) \ <nl> - } ; \ <nl> - \ <nl> - VMerge2 ( ) \ <nl> - { \ <nl> - support = checkHardwareSupport ( se ) ; \ <nl> - } \ <nl> - \ <nl> - void operator ( ) ( const data_type * src0 , const data_type * src1 , \ <nl> - data_type * dst ) const \ <nl> - { \ <nl> - reg_type v_src0 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src0 ) ) ; \ <nl> - reg_type v_src1 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src0 + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src2 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src1 ) ) ; \ <nl> - reg_type v_src3 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src1 + ELEMS_IN_VEC ) ) ; \ <nl> - \ <nl> - _mm_interleave ( v_src0 , v_src1 , v_src2 , v_src3 ) ; \ <nl> - \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst ) , v_src0 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC ) , v_src1 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 2 ) , v_src2 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 3 ) , v_src3 ) ; \ <nl> - } \ <nl> - \ <nl> - bool support ; \ <nl> - } <nl> - <nl> - # define MERGE3_KERNEL_TEMPLATE ( data_type , reg_type , cast_type , _mm_interleave , flavor , se ) \ <nl> - template < > \ <nl> - struct VMerge3 < data_type > \ <nl> - { \ <nl> - enum \ <nl> - { \ <nl> - ELEMS_IN_VEC = 16 / sizeof ( data_type ) \ <nl> - } ; \ <nl> - \ <nl> - VMerge3 ( ) \ <nl> - { \ <nl> - support = checkHardwareSupport ( se ) ; \ <nl> - } \ <nl> - \ <nl> - void operator ( ) ( const data_type * src0 , const data_type * src1 , const data_type * src2 , \ <nl> - data_type * dst ) const \ <nl> - { \ <nl> - reg_type v_src0 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src0 ) ) ; \ <nl> - reg_type v_src1 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src0 + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src2 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src1 ) ) ; \ <nl> - reg_type v_src3 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src1 + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src4 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src2 ) ) ; \ <nl> - reg_type v_src5 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src2 + ELEMS_IN_VEC ) ) ; \ <nl> - \ <nl> - _mm_interleave ( v_src0 , v_src1 , v_src2 , \ <nl> - v_src3 , v_src4 , v_src5 ) ; \ <nl> - \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst ) , v_src0 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC ) , v_src1 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 2 ) , v_src2 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 3 ) , v_src3 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 4 ) , v_src4 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 5 ) , v_src5 ) ; \ <nl> - } \ <nl> - \ <nl> - bool support ; \ <nl> - } <nl> - <nl> - # define MERGE4_KERNEL_TEMPLATE ( data_type , reg_type , cast_type , _mm_interleave , flavor , se ) \ <nl> - template < > \ <nl> - struct VMerge4 < data_type > \ <nl> - { \ <nl> - enum \ <nl> - { \ <nl> - ELEMS_IN_VEC = 16 / sizeof ( data_type ) \ <nl> - } ; \ <nl> - \ <nl> - VMerge4 ( ) \ <nl> - { \ <nl> - support = checkHardwareSupport ( se ) ; \ <nl> - } \ <nl> - \ <nl> - void operator ( ) ( const data_type * src0 , const data_type * src1 , \ <nl> - const data_type * src2 , const data_type * src3 , \ <nl> - data_type * dst ) const \ <nl> - { \ <nl> - reg_type v_src0 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src0 ) ) ; \ <nl> - reg_type v_src1 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src0 + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src2 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src1 ) ) ; \ <nl> - reg_type v_src3 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src1 + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src4 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src2 ) ) ; \ <nl> - reg_type v_src5 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src2 + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src6 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src3 ) ) ; \ <nl> - reg_type v_src7 = _mm_loadu_ # # flavor ( ( const cast_type * ) ( src3 + ELEMS_IN_VEC ) ) ; \ <nl> - \ <nl> - _mm_interleave ( v_src0 , v_src1 , v_src2 , v_src3 , \ <nl> - v_src4 , v_src5 , v_src6 , v_src7 ) ; \ <nl> - \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst ) , v_src0 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC ) , v_src1 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 2 ) , v_src2 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 3 ) , v_src3 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 4 ) , v_src4 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 5 ) , v_src5 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 6 ) , v_src6 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst + ELEMS_IN_VEC * 7 ) , v_src7 ) ; \ <nl> - } \ <nl> - \ <nl> - bool support ; \ <nl> + else <nl> + { <nl> + CV_Assert ( cn = = 4 ) ; <nl> + const T * src2 = src [ 2 ] ; <nl> + const T * src3 = src [ 3 ] ; <nl> + for ( i = 0 ; i < len ; i + = VECSZ ) <nl> + { <nl> + i = std : : min ( len - VECSZ , i ) ; <nl> + VecT a = vx_load ( src0 + i ) , b = vx_load ( src1 + i ) ; <nl> + VecT c = vx_load ( src2 + i ) , d = vx_load ( src3 + i ) ; <nl> + v_store_interleave ( dst + i * cn , a , b , c , d ) ; <nl> + } <nl> + } <nl> + vx_cleanup ( ) ; <nl> } <nl> - <nl> - MERGE2_KERNEL_TEMPLATE ( uchar , __m128i , __m128i , _mm_interleave_epi8 , si128 , CV_CPU_SSE2 ) ; <nl> - MERGE3_KERNEL_TEMPLATE ( uchar , __m128i , __m128i , _mm_interleave_epi8 , si128 , CV_CPU_SSE2 ) ; <nl> - MERGE4_KERNEL_TEMPLATE ( uchar , __m128i , __m128i , _mm_interleave_epi8 , si128 , CV_CPU_SSE2 ) ; <nl> - <nl> - # if CV_SSE4_1 <nl> - MERGE2_KERNEL_TEMPLATE ( ushort , __m128i , __m128i , _mm_interleave_epi16 , si128 , CV_CPU_SSE4_1 ) ; <nl> - MERGE3_KERNEL_TEMPLATE ( ushort , __m128i , __m128i , _mm_interleave_epi16 , si128 , CV_CPU_SSE4_1 ) ; <nl> - MERGE4_KERNEL_TEMPLATE ( ushort , __m128i , __m128i , _mm_interleave_epi16 , si128 , CV_CPU_SSE4_1 ) ; <nl> - # endif <nl> - <nl> - MERGE2_KERNEL_TEMPLATE ( int , __m128 , float , _mm_interleave_ps , ps , CV_CPU_SSE2 ) ; <nl> - MERGE3_KERNEL_TEMPLATE ( int , __m128 , float , _mm_interleave_ps , ps , CV_CPU_SSE2 ) ; <nl> - MERGE4_KERNEL_TEMPLATE ( int , __m128 , float , _mm_interleave_ps , ps , CV_CPU_SSE2 ) ; <nl> - <nl> # endif <nl> <nl> template < typename T > static void <nl> merge_ ( const T * * src , T * dst , int len , int cn ) <nl> { <nl> const T * src0 = src [ 0 ] , * src1 = src [ 1 ] ; <nl> i = j = 0 ; <nl> - # if CV_NEON <nl> - if ( cn = = 2 ) <nl> - { <nl> - int inc_i = ( sizeof ( T ) = = 8 ) ? 1 : 16 / sizeof ( T ) ; <nl> - int inc_j = 2 * inc_i ; <nl> - <nl> - VMerge2 < T > vmerge ; <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vmerge ( src0 + i , src1 + i , dst + j ) ; <nl> - } <nl> - # elif CV_SSE2 <nl> - if ( cn = = 2 ) <nl> - { <nl> - int inc_i = 32 / sizeof ( T ) ; <nl> - int inc_j = 2 * inc_i ; <nl> - <nl> - VMerge2 < T > vmerge ; <nl> - if ( vmerge . support ) <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vmerge ( src0 + i , src1 + i , dst + j ) ; <nl> - } <nl> - # endif <nl> for ( ; i < len ; i + + , j + = cn ) <nl> { <nl> dst [ j ] = src0 [ i ] ; <nl> merge_ ( const T * * src , T * dst , int len , int cn ) <nl> { <nl> const T * src0 = src [ 0 ] , * src1 = src [ 1 ] , * src2 = src [ 2 ] ; <nl> i = j = 0 ; <nl> - # if CV_NEON <nl> - if ( cn = = 3 ) <nl> - { <nl> - int inc_i = ( sizeof ( T ) = = 8 ) ? 1 : 16 / sizeof ( T ) ; <nl> - int inc_j = 3 * inc_i ; <nl> - <nl> - VMerge3 < T > vmerge ; <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vmerge ( src0 + i , src1 + i , src2 + i , dst + j ) ; <nl> - } <nl> - # elif CV_SSE2 <nl> - if ( cn = = 3 ) <nl> - { <nl> - int inc_i = 32 / sizeof ( T ) ; <nl> - int inc_j = 3 * inc_i ; <nl> - <nl> - VMerge3 < T > vmerge ; <nl> - if ( vmerge . support ) <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vmerge ( src0 + i , src1 + i , src2 + i , dst + j ) ; <nl> - } <nl> - # endif <nl> for ( ; i < len ; i + + , j + = cn ) <nl> { <nl> dst [ j ] = src0 [ i ] ; <nl> merge_ ( const T * * src , T * dst , int len , int cn ) <nl> { <nl> const T * src0 = src [ 0 ] , * src1 = src [ 1 ] , * src2 = src [ 2 ] , * src3 = src [ 3 ] ; <nl> i = j = 0 ; <nl> - # if CV_NEON <nl> - if ( cn = = 4 ) <nl> - { <nl> - int inc_i = ( sizeof ( T ) = = 8 ) ? 1 : 16 / sizeof ( T ) ; <nl> - int inc_j = 4 * inc_i ; <nl> - <nl> - VMerge4 < T > vmerge ; <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vmerge ( src0 + i , src1 + i , src2 + i , src3 + i , dst + j ) ; <nl> - } <nl> - # elif CV_SSE2 <nl> - if ( cn = = 4 ) <nl> - { <nl> - int inc_i = 32 / sizeof ( T ) ; <nl> - int inc_j = 4 * inc_i ; <nl> - <nl> - VMerge4 < T > vmerge ; <nl> - if ( vmerge . support ) <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vmerge ( src0 + i , src1 + i , src2 + i , src3 + i , dst + j ) ; <nl> - } <nl> - # endif <nl> for ( ; i < len ; i + + , j + = cn ) <nl> { <nl> dst [ j ] = src0 [ i ] ; dst [ j + 1 ] = src1 [ i ] ; <nl> merge_ ( const T * * src , T * dst , int len , int cn ) <nl> } <nl> } <nl> <nl> - <nl> void merge8u ( const uchar * * src , uchar * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( merge8u , cv_hal_merge8u , src , dst , len , cn ) <nl> - merge_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_uint8 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecmerge_ < uchar , v_uint8 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + merge_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> void merge16u ( const ushort * * src , ushort * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( merge16u , cv_hal_merge16u , src , dst , len , cn ) <nl> - merge_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_uint16 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecmerge_ < ushort , v_uint16 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + merge_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> void merge32s ( const int * * src , int * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( merge32s , cv_hal_merge32s , src , dst , len , cn ) <nl> - merge_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_int32 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecmerge_ < int , v_int32 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + merge_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> void merge64s ( const int64 * * src , int64 * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( merge64s , cv_hal_merge64s , src , dst , len , cn ) <nl> - merge_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_int64 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecmerge_ < int64 , v_int64 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + merge_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> } } / / cv : : hal : : <nl> mmm a / modules / core / src / split . cpp <nl> ppp b / modules / core / src / split . cpp <nl> <nl> <nl> namespace cv { namespace hal { <nl> <nl> - # if CV_NEON <nl> - template < typename T > struct VSplit2 ; <nl> - template < typename T > struct VSplit3 ; <nl> - template < typename T > struct VSplit4 ; <nl> - <nl> - # define SPLIT2_KERNEL_TEMPLATE ( name , data_type , reg_type , load_func , store_func ) \ <nl> - template < > \ <nl> - struct name < data_type > \ <nl> - { \ <nl> - void operator ( ) ( const data_type * src , data_type * dst0 , \ <nl> - data_type * dst1 ) const \ <nl> - { \ <nl> - reg_type r = load_func ( src ) ; \ <nl> - store_func ( dst0 , r . val [ 0 ] ) ; \ <nl> - store_func ( dst1 , r . val [ 1 ] ) ; \ <nl> - } \ <nl> - } <nl> + # if CV_SIMD <nl> + template < typename T , typename VecT > static void <nl> + vecsplit_ ( const T * src , T * * dst , int len , int cn ) <nl> + { <nl> + int i ; <nl> + T * dst0 = dst [ 0 ] ; <nl> + T * dst1 = dst [ 1 ] ; <nl> <nl> - # define SPLIT3_KERNEL_TEMPLATE ( name , data_type , reg_type , load_func , store_func ) \ <nl> - template < > \ <nl> - struct name < data_type > \ <nl> - { \ <nl> - void operator ( ) ( const data_type * src , data_type * dst0 , data_type * dst1 , \ <nl> - data_type * dst2 ) const \ <nl> - { \ <nl> - reg_type r = load_func ( src ) ; \ <nl> - store_func ( dst0 , r . val [ 0 ] ) ; \ <nl> - store_func ( dst1 , r . val [ 1 ] ) ; \ <nl> - store_func ( dst2 , r . val [ 2 ] ) ; \ <nl> - } \ <nl> + const int VECSZ = VecT : : nlanes ; <nl> + if ( cn = = 2 ) <nl> + { <nl> + for ( i = 0 ; i < len ; i + = VECSZ ) <nl> + { <nl> + i = std : : min ( len - VECSZ , i ) ; <nl> + VecT a , b ; <nl> + v_load_deinterleave ( src + i * cn , a , b ) ; <nl> + v_store ( dst0 + i , a ) ; <nl> + v_store ( dst1 + i , b ) ; <nl> + } <nl> } <nl> - <nl> - # define SPLIT4_KERNEL_TEMPLATE ( name , data_type , reg_type , load_func , store_func ) \ <nl> - template < > \ <nl> - struct name < data_type > \ <nl> - { \ <nl> - void operator ( ) ( const data_type * src , data_type * dst0 , data_type * dst1 , \ <nl> - data_type * dst2 , data_type * dst3 ) const \ <nl> - { \ <nl> - reg_type r = load_func ( src ) ; \ <nl> - store_func ( dst0 , r . val [ 0 ] ) ; \ <nl> - store_func ( dst1 , r . val [ 1 ] ) ; \ <nl> - store_func ( dst2 , r . val [ 2 ] ) ; \ <nl> - store_func ( dst3 , r . val [ 3 ] ) ; \ <nl> - } \ <nl> + else if ( cn = = 3 ) <nl> + { <nl> + T * dst2 = dst [ 2 ] ; <nl> + for ( i = 0 ; i < len ; i + = VECSZ ) <nl> + { <nl> + i = std : : min ( len - VECSZ , i ) ; <nl> + VecT a , b , c ; <nl> + v_load_deinterleave ( src + i * cn , a , b , c ) ; <nl> + v_store ( dst0 + i , a ) ; <nl> + v_store ( dst1 + i , b ) ; <nl> + v_store ( dst2 + i , c ) ; <nl> + } <nl> } <nl> - <nl> - SPLIT2_KERNEL_TEMPLATE ( VSplit2 , uchar , uint8x16x2_t , vld2q_u8 , vst1q_u8 ) ; <nl> - SPLIT2_KERNEL_TEMPLATE ( VSplit2 , ushort , uint16x8x2_t , vld2q_u16 , vst1q_u16 ) ; <nl> - SPLIT2_KERNEL_TEMPLATE ( VSplit2 , int , int32x4x2_t , vld2q_s32 , vst1q_s32 ) ; <nl> - SPLIT2_KERNEL_TEMPLATE ( VSplit2 , int64 , int64x1x2_t , vld2_s64 , vst1_s64 ) ; <nl> - <nl> - SPLIT3_KERNEL_TEMPLATE ( VSplit3 , uchar , uint8x16x3_t , vld3q_u8 , vst1q_u8 ) ; <nl> - SPLIT3_KERNEL_TEMPLATE ( VSplit3 , ushort , uint16x8x3_t , vld3q_u16 , vst1q_u16 ) ; <nl> - SPLIT3_KERNEL_TEMPLATE ( VSplit3 , int , int32x4x3_t , vld3q_s32 , vst1q_s32 ) ; <nl> - SPLIT3_KERNEL_TEMPLATE ( VSplit3 , int64 , int64x1x3_t , vld3_s64 , vst1_s64 ) ; <nl> - <nl> - SPLIT4_KERNEL_TEMPLATE ( VSplit4 , uchar , uint8x16x4_t , vld4q_u8 , vst1q_u8 ) ; <nl> - SPLIT4_KERNEL_TEMPLATE ( VSplit4 , ushort , uint16x8x4_t , vld4q_u16 , vst1q_u16 ) ; <nl> - SPLIT4_KERNEL_TEMPLATE ( VSplit4 , int , int32x4x4_t , vld4q_s32 , vst1q_s32 ) ; <nl> - SPLIT4_KERNEL_TEMPLATE ( VSplit4 , int64 , int64x1x4_t , vld4_s64 , vst1_s64 ) ; <nl> - <nl> - # elif CV_SSE2 <nl> - <nl> - template < typename T > <nl> - struct VSplit2 <nl> - { <nl> - VSplit2 ( ) : support ( false ) { } <nl> - void operator ( ) ( const T * , T * , T * ) const { } <nl> - <nl> - bool support ; <nl> - } ; <nl> - <nl> - template < typename T > <nl> - struct VSplit3 <nl> - { <nl> - VSplit3 ( ) : support ( false ) { } <nl> - void operator ( ) ( const T * , T * , T * , T * ) const { } <nl> - <nl> - bool support ; <nl> - } ; <nl> - <nl> - template < typename T > <nl> - struct VSplit4 <nl> - { <nl> - VSplit4 ( ) : support ( false ) { } <nl> - void operator ( ) ( const T * , T * , T * , T * , T * ) const { } <nl> - <nl> - bool support ; <nl> - } ; <nl> - <nl> - # define SPLIT2_KERNEL_TEMPLATE ( data_type , reg_type , cast_type , _mm_deinterleave , flavor ) \ <nl> - template < > \ <nl> - struct VSplit2 < data_type > \ <nl> - { \ <nl> - enum \ <nl> - { \ <nl> - ELEMS_IN_VEC = 16 / sizeof ( data_type ) \ <nl> - } ; \ <nl> - \ <nl> - VSplit2 ( ) \ <nl> - { \ <nl> - support = checkHardwareSupport ( CV_CPU_SSE2 ) ; \ <nl> - } \ <nl> - \ <nl> - void operator ( ) ( const data_type * src , \ <nl> - data_type * dst0 , data_type * dst1 ) const \ <nl> - { \ <nl> - reg_type v_src0 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src ) ) ; \ <nl> - reg_type v_src1 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src2 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 2 ) ) ; \ <nl> - reg_type v_src3 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 3 ) ) ; \ <nl> - \ <nl> - _mm_deinterleave ( v_src0 , v_src1 , v_src2 , v_src3 ) ; \ <nl> - \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst0 ) , v_src0 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst0 + ELEMS_IN_VEC ) , v_src1 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst1 ) , v_src2 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst1 + ELEMS_IN_VEC ) , v_src3 ) ; \ <nl> - } \ <nl> - \ <nl> - bool support ; \ <nl> - } <nl> - <nl> - # define SPLIT3_KERNEL_TEMPLATE ( data_type , reg_type , cast_type , _mm_deinterleave , flavor ) \ <nl> - template < > \ <nl> - struct VSplit3 < data_type > \ <nl> - { \ <nl> - enum \ <nl> - { \ <nl> - ELEMS_IN_VEC = 16 / sizeof ( data_type ) \ <nl> - } ; \ <nl> - \ <nl> - VSplit3 ( ) \ <nl> - { \ <nl> - support = checkHardwareSupport ( CV_CPU_SSE2 ) ; \ <nl> - } \ <nl> - \ <nl> - void operator ( ) ( const data_type * src , \ <nl> - data_type * dst0 , data_type * dst1 , data_type * dst2 ) const \ <nl> - { \ <nl> - reg_type v_src0 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src ) ) ; \ <nl> - reg_type v_src1 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src2 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 2 ) ) ; \ <nl> - reg_type v_src3 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 3 ) ) ; \ <nl> - reg_type v_src4 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 4 ) ) ; \ <nl> - reg_type v_src5 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 5 ) ) ; \ <nl> - \ <nl> - _mm_deinterleave ( v_src0 , v_src1 , v_src2 , \ <nl> - v_src3 , v_src4 , v_src5 ) ; \ <nl> - \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst0 ) , v_src0 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst0 + ELEMS_IN_VEC ) , v_src1 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst1 ) , v_src2 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst1 + ELEMS_IN_VEC ) , v_src3 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst2 ) , v_src4 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst2 + ELEMS_IN_VEC ) , v_src5 ) ; \ <nl> - } \ <nl> - \ <nl> - bool support ; \ <nl> - } <nl> - <nl> - # define SPLIT4_KERNEL_TEMPLATE ( data_type , reg_type , cast_type , _mm_deinterleave , flavor ) \ <nl> - template < > \ <nl> - struct VSplit4 < data_type > \ <nl> - { \ <nl> - enum \ <nl> - { \ <nl> - ELEMS_IN_VEC = 16 / sizeof ( data_type ) \ <nl> - } ; \ <nl> - \ <nl> - VSplit4 ( ) \ <nl> - { \ <nl> - support = checkHardwareSupport ( CV_CPU_SSE2 ) ; \ <nl> - } \ <nl> - \ <nl> - void operator ( ) ( const data_type * src , data_type * dst0 , data_type * dst1 , \ <nl> - data_type * dst2 , data_type * dst3 ) const \ <nl> - { \ <nl> - reg_type v_src0 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src ) ) ; \ <nl> - reg_type v_src1 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC ) ) ; \ <nl> - reg_type v_src2 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 2 ) ) ; \ <nl> - reg_type v_src3 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 3 ) ) ; \ <nl> - reg_type v_src4 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 4 ) ) ; \ <nl> - reg_type v_src5 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 5 ) ) ; \ <nl> - reg_type v_src6 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 6 ) ) ; \ <nl> - reg_type v_src7 = _mm_loadu_ # # flavor ( ( cast_type const * ) ( src + ELEMS_IN_VEC * 7 ) ) ; \ <nl> - \ <nl> - _mm_deinterleave ( v_src0 , v_src1 , v_src2 , v_src3 , \ <nl> - v_src4 , v_src5 , v_src6 , v_src7 ) ; \ <nl> - \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst0 ) , v_src0 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst0 + ELEMS_IN_VEC ) , v_src1 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst1 ) , v_src2 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst1 + ELEMS_IN_VEC ) , v_src3 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst2 ) , v_src4 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst2 + ELEMS_IN_VEC ) , v_src5 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst3 ) , v_src6 ) ; \ <nl> - _mm_storeu_ # # flavor ( ( cast_type * ) ( dst3 + ELEMS_IN_VEC ) , v_src7 ) ; \ <nl> - } \ <nl> - \ <nl> - bool support ; \ <nl> + else <nl> + { <nl> + CV_Assert ( cn = = 4 ) ; <nl> + T * dst2 = dst [ 2 ] ; <nl> + T * dst3 = dst [ 3 ] ; <nl> + for ( i = 0 ; i < len ; i + = VECSZ ) <nl> + { <nl> + i = std : : min ( len - VECSZ , i ) ; <nl> + VecT a , b , c , d ; <nl> + v_load_deinterleave ( src + i * cn , a , b , c , d ) ; <nl> + v_store ( dst0 + i , a ) ; <nl> + v_store ( dst1 + i , b ) ; <nl> + v_store ( dst2 + i , c ) ; <nl> + v_store ( dst3 + i , d ) ; <nl> + } <nl> + } <nl> + vx_cleanup ( ) ; <nl> } <nl> - <nl> - SPLIT2_KERNEL_TEMPLATE ( uchar , __m128i , __m128i , _mm_deinterleave_epi8 , si128 ) ; <nl> - SPLIT2_KERNEL_TEMPLATE ( ushort , __m128i , __m128i , _mm_deinterleave_epi16 , si128 ) ; <nl> - SPLIT2_KERNEL_TEMPLATE ( int , __m128 , float , _mm_deinterleave_ps , ps ) ; <nl> - <nl> - SPLIT3_KERNEL_TEMPLATE ( uchar , __m128i , __m128i , _mm_deinterleave_epi8 , si128 ) ; <nl> - SPLIT3_KERNEL_TEMPLATE ( ushort , __m128i , __m128i , _mm_deinterleave_epi16 , si128 ) ; <nl> - SPLIT3_KERNEL_TEMPLATE ( int , __m128 , float , _mm_deinterleave_ps , ps ) ; <nl> - <nl> - SPLIT4_KERNEL_TEMPLATE ( uchar , __m128i , __m128i , _mm_deinterleave_epi8 , si128 ) ; <nl> - SPLIT4_KERNEL_TEMPLATE ( ushort , __m128i , __m128i , _mm_deinterleave_epi16 , si128 ) ; <nl> - SPLIT4_KERNEL_TEMPLATE ( int , __m128 , float , _mm_deinterleave_ps , ps ) ; <nl> - <nl> # endif <nl> <nl> template < typename T > static void <nl> split_ ( const T * src , T * * dst , int len , int cn ) <nl> T * dst0 = dst [ 0 ] , * dst1 = dst [ 1 ] ; <nl> i = j = 0 ; <nl> <nl> - # if CV_NEON <nl> - if ( cn = = 2 ) <nl> - { <nl> - int inc_i = ( sizeof ( T ) = = 8 ) ? 1 : 16 / sizeof ( T ) ; <nl> - int inc_j = 2 * inc_i ; <nl> - <nl> - VSplit2 < T > vsplit ; <nl> - for ( ; i < len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vsplit ( src + j , dst0 + i , dst1 + i ) ; <nl> - } <nl> - # elif CV_SSE2 <nl> - if ( cn = = 2 ) <nl> - { <nl> - int inc_i = 32 / sizeof ( T ) ; <nl> - int inc_j = 2 * inc_i ; <nl> - <nl> - VSplit2 < T > vsplit ; <nl> - if ( vsplit . support ) <nl> - { <nl> - for ( ; i < = len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vsplit ( src + j , dst0 + i , dst1 + i ) ; <nl> - } <nl> - } <nl> - # endif <nl> for ( ; i < len ; i + + , j + = cn ) <nl> { <nl> dst0 [ i ] = src [ j ] ; <nl> split_ ( const T * src , T * * dst , int len , int cn ) <nl> T * dst0 = dst [ 0 ] , * dst1 = dst [ 1 ] , * dst2 = dst [ 2 ] ; <nl> i = j = 0 ; <nl> <nl> - # if CV_NEON <nl> - if ( cn = = 3 ) <nl> - { <nl> - int inc_i = ( sizeof ( T ) = = 8 ) ? 1 : 16 / sizeof ( T ) ; <nl> - int inc_j = 3 * inc_i ; <nl> - <nl> - VSplit3 < T > vsplit ; <nl> - for ( ; i < = len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vsplit ( src + j , dst0 + i , dst1 + i , dst2 + i ) ; <nl> - } <nl> - # elif CV_SSE2 <nl> - if ( cn = = 3 ) <nl> - { <nl> - int inc_i = 32 / sizeof ( T ) ; <nl> - int inc_j = 3 * inc_i ; <nl> - <nl> - VSplit3 < T > vsplit ; <nl> - <nl> - if ( vsplit . support ) <nl> - { <nl> - for ( ; i < = len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vsplit ( src + j , dst0 + i , dst1 + i , dst2 + i ) ; <nl> - } <nl> - } <nl> - # endif <nl> for ( ; i < len ; i + + , j + = cn ) <nl> { <nl> dst0 [ i ] = src [ j ] ; <nl> split_ ( const T * src , T * * dst , int len , int cn ) <nl> T * dst0 = dst [ 0 ] , * dst1 = dst [ 1 ] , * dst2 = dst [ 2 ] , * dst3 = dst [ 3 ] ; <nl> i = j = 0 ; <nl> <nl> - # if CV_NEON <nl> - if ( cn = = 4 ) <nl> - { <nl> - int inc_i = ( sizeof ( T ) = = 8 ) ? 1 : 16 / sizeof ( T ) ; <nl> - int inc_j = 4 * inc_i ; <nl> - <nl> - VSplit4 < T > vsplit ; <nl> - for ( ; i < = len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vsplit ( src + j , dst0 + i , dst1 + i , dst2 + i , dst3 + i ) ; <nl> - } <nl> - # elif CV_SSE2 <nl> - if ( cn = = 4 ) <nl> - { <nl> - int inc_i = 32 / sizeof ( T ) ; <nl> - int inc_j = 4 * inc_i ; <nl> - <nl> - VSplit4 < T > vsplit ; <nl> - if ( vsplit . support ) <nl> - { <nl> - for ( ; i < = len - inc_i ; i + = inc_i , j + = inc_j ) <nl> - vsplit ( src + j , dst0 + i , dst1 + i , dst2 + i , dst3 + i ) ; <nl> - } <nl> - } <nl> - # endif <nl> for ( ; i < len ; i + + , j + = cn ) <nl> { <nl> dst0 [ i ] = src [ j ] ; dst1 [ i ] = src [ j + 1 ] ; <nl> split_ ( const T * src , T * * dst , int len , int cn ) <nl> void split8u ( const uchar * src , uchar * * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( split8u , cv_hal_split8u , src , dst , len , cn ) <nl> - split_ ( src , dst , len , cn ) ; <nl> + <nl> + # if CV_SIMD <nl> + if ( len > = v_uint8 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecsplit_ < uchar , v_uint8 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + split_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> void split16u ( const ushort * src , ushort * * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( split16u , cv_hal_split16u , src , dst , len , cn ) <nl> - split_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_uint16 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecsplit_ < ushort , v_uint16 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + split_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> void split32s ( const int * src , int * * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( split32s , cv_hal_split32s , src , dst , len , cn ) <nl> - split_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_uint32 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecsplit_ < int , v_int32 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + split_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> void split64s ( const int64 * src , int64 * * dst , int len , int cn ) <nl> { <nl> CALL_HAL ( split64s , cv_hal_split64s , src , dst , len , cn ) <nl> - split_ ( src , dst , len , cn ) ; <nl> + # if CV_SIMD <nl> + if ( len > = v_int64 : : nlanes & & 2 < = cn & & cn < = 4 ) <nl> + vecsplit_ < int64 , v_int64 > ( src , dst , len , cn ) ; <nl> + else <nl> + # endif <nl> + split_ ( src , dst , len , cn ) ; <nl> } <nl> <nl> } } / / cv : : hal : : <nl> | converted split ( ) & merge ( ) to wide univ intrinsics ( ) | opencv/opencv | 9c7040802cf3001ffee551d563640ee6ab2af1dd | 2018-07-24T14:27:56Z |
mmm a / src / core / ext / filters / client_channel / resolver / xds / xds_resolver . cc <nl> ppp b / src / core / ext / filters / client_channel / resolver / xds / xds_resolver . cc <nl> void XdsResolver : : GenerateResult ( ) { <nl> void XdsResolver : : MaybeRemoveUnusedClusters ( ) { <nl> bool update_needed = false ; <nl> for ( auto it = cluster_state_map_ . begin ( ) ; it ! = cluster_state_map_ . end ( ) ; ) { <nl> - if ( it - > second - > RefIfNonZero ( ) ) { <nl> - it - > second - > Unref ( ) ; <nl> + RefCountedPtr < ClusterState > cluster_state = it - > second - > RefIfNonZero ( ) ; <nl> + if ( cluster_state ! = nullptr ) { <nl> + + it ; <nl> } else { <nl> update_needed = true ; <nl> mmm a / src / core / lib / channel / channelz_registry . cc <nl> ppp b / src / core / lib / channel / channelz_registry . cc <nl> RefCountedPtr < BaseNode > ChannelzRegistry : : InternalGet ( intptr_t uuid ) { <nl> / / Found node . Return only if its refcount is not zero ( i . e . , when we <nl> / / know that there is no other thread about to destroy it ) . <nl> BaseNode * node = it - > second ; <nl> - if ( ! node - > RefIfNonZero ( ) ) return nullptr ; <nl> - return RefCountedPtr < BaseNode > ( node ) ; <nl> + return node - > RefIfNonZero ( ) ; <nl> } <nl> <nl> std : : string ChannelzRegistry : : InternalGetTopChannels ( <nl> std : : string ChannelzRegistry : : InternalGetTopChannels ( <nl> for ( auto it = node_map_ . lower_bound ( start_channel_id ) ; <nl> it ! = node_map_ . end ( ) ; + + it ) { <nl> BaseNode * node = it - > second ; <nl> + RefCountedPtr < BaseNode > node_ref ; <nl> if ( node - > type ( ) = = BaseNode : : EntityType : : kTopLevelChannel & & <nl> - node - > RefIfNonZero ( ) ) { <nl> + ( node_ref = node - > RefIfNonZero ( ) ) ! = nullptr ) { <nl> / / Check if we are over pagination limit to determine if we need to set <nl> / / the " end " element . If we don ' t go through this block , we know that <nl> / / when the loop terminates , we have < = to kPaginationLimit . <nl> std : : string ChannelzRegistry : : InternalGetTopChannels ( <nl> / / refcount , we need to decrease it , but we can ' t unref while <nl> / / holding the lock , because this may lead to a deadlock . <nl> if ( top_level_channels . size ( ) = = kPaginationLimit ) { <nl> - node_after_pagination_limit . reset ( node ) ; <nl> + node_after_pagination_limit = std : : move ( node_ref ) ; <nl> break ; <nl> } <nl> - top_level_channels . emplace_back ( node ) ; <nl> + top_level_channels . emplace_back ( std : : move ( node_ref ) ) ; <nl> } <nl> } <nl> } <nl> std : : string ChannelzRegistry : : InternalGetServers ( intptr_t start_server_id ) { <nl> for ( auto it = node_map_ . lower_bound ( start_server_id ) ; <nl> it ! = node_map_ . end ( ) ; + + it ) { <nl> BaseNode * node = it - > second ; <nl> + RefCountedPtr < BaseNode > node_ref ; <nl> if ( node - > type ( ) = = BaseNode : : EntityType : : kServer & & <nl> - node - > RefIfNonZero ( ) ) { <nl> + ( node_ref = node - > RefIfNonZero ( ) ) ! = nullptr ) { <nl> / / Check if we are over pagination limit to determine if we need to set <nl> / / the " end " element . If we don ' t go through this block , we know that <nl> / / when the loop terminates , we have < = to kPaginationLimit . <nl> std : : string ChannelzRegistry : : InternalGetServers ( intptr_t start_server_id ) { <nl> / / refcount , we need to decrease it , but we can ' t unref while <nl> / / holding the lock , because this may lead to a deadlock . <nl> if ( servers . size ( ) = = kPaginationLimit ) { <nl> - node_after_pagination_limit . reset ( node ) ; <nl> + node_after_pagination_limit = std : : move ( node_ref ) ; <nl> break ; <nl> } <nl> - servers . emplace_back ( node ) ; <nl> + servers . emplace_back ( std : : move ( node_ref ) ) ; <nl> } <nl> } <nl> } <nl> void ChannelzRegistry : : InternalLogAllEntities ( ) { <nl> { <nl> MutexLock lock ( & mu_ ) ; <nl> for ( auto & p : node_map_ ) { <nl> - BaseNode * node = p . second ; <nl> - if ( node - > RefIfNonZero ( ) ) { <nl> - nodes . emplace_back ( node ) ; <nl> + RefCountedPtr < BaseNode > node = p . second - > RefIfNonZero ( ) ; <nl> + if ( node ! = nullptr ) { <nl> + nodes . emplace_back ( std : : move ( node ) ) ; <nl> } <nl> } <nl> } <nl> mmm a / src / core / lib / gprpp / ref_counted . h <nl> ppp b / src / core / lib / gprpp / ref_counted . h <nl> class Delete < T , false > { <nl> / / must be tracked in a registry but the object ' s entry in the registry <nl> / / cannot be removed from the object ' s dtor due to synchronization issues . <nl> / / In this case , the registry can be cleaned up later by identifying <nl> - / / entries for which RefIfNonZero ( ) returns false . <nl> + / / entries for which RefIfNonZero ( ) returns null . <nl> / / <nl> / / This will commonly be used by CRTP ( curiously - recurring template pattern ) <nl> / / e . g . , class MyClass : public RefCounted < MyClass > <nl> class RefCounted : public Impl { <nl> } <nl> } <nl> <nl> - bool RefIfNonZero ( ) { return refs_ . RefIfNonZero ( ) ; } <nl> - bool RefIfNonZero ( const DebugLocation & location , const char * reason ) { <nl> - return refs_ . RefIfNonZero ( location , reason ) ; <nl> + RefCountedPtr < Child > RefIfNonZero ( ) GRPC_MUST_USE_RESULT { <nl> + return RefCountedPtr < Child > ( refs_ . RefIfNonZero ( ) ? static_cast < Child * > ( this ) <nl> + : nullptr ) ; <nl> + } <nl> + RefCountedPtr < Child > RefIfNonZero ( const DebugLocation & location , <nl> + const char * reason ) GRPC_MUST_USE_RESULT { <nl> + return RefCountedPtr < Child > ( refs_ . RefIfNonZero ( location , reason ) <nl> + ? static_cast < Child * > ( this ) <nl> + : nullptr ) ; <nl> } <nl> <nl> / / Not copyable nor movable . <nl> mmm a / test / core / gprpp / ref_counted_test . cc <nl> ppp b / test / core / gprpp / ref_counted_test . cc <nl> TEST ( RefCounted , ExtraRef ) { <nl> <nl> class Value : public RefCounted < Value , PolymorphicRefCount , false > { <nl> public : <nl> - Value ( int value , std : : set < Value * > * registry ) : value_ ( value ) { <nl> - registry - > insert ( this ) ; <nl> + Value ( int value , std : : set < std : : unique_ptr < Value > > * registry ) : value_ ( value ) { <nl> + registry - > emplace ( this ) ; <nl> } <nl> <nl> int value ( ) const { return value_ ; } <nl> class Value : public RefCounted < Value , PolymorphicRefCount , false > { <nl> int value_ ; <nl> } ; <nl> <nl> - void GarbageCollectRegistry ( std : : set < Value * > * registry ) { <nl> + void GarbageCollectRegistry ( std : : set < std : : unique_ptr < Value > > * registry ) { <nl> for ( auto it = registry - > begin ( ) ; it ! = registry - > end ( ) ; ) { <nl> - Value * v = * it ; <nl> + RefCountedPtr < Value > v = ( * it ) - > RefIfNonZero ( ) ; <nl> / / Check if the object has any refs remaining . <nl> - if ( v - > RefIfNonZero ( ) ) { <nl> + if ( v ! = nullptr ) { <nl> / / It has refs remaining , so we do not delete it . <nl> - v - > Unref ( ) ; / / Remove the ref we just added . <nl> + + it ; <nl> } else { <nl> - / / No refs remaining , so delete it and remove from registry . <nl> - delete v ; <nl> + / / No refs remaining , so remove it from the registry . <nl> it = registry - > erase ( it ) ; <nl> } <nl> } <nl> } <nl> <nl> TEST ( RefCounted , NoDeleteUponUnref ) { <nl> - std : : set < Value * > registry ; <nl> + std : : set < std : : unique_ptr < Value > > registry ; <nl> / / Add two objects to the registry . <nl> auto v1 = MakeRefCounted < Value > ( 1 , & registry ) ; <nl> auto v2 = MakeRefCounted < Value > ( 2 , & registry ) ; <nl> - EXPECT_THAT ( registry , : : testing : : UnorderedElementsAre ( <nl> - : : testing : : Property ( & Value : : value , 1 ) , <nl> - : : testing : : Property ( & Value : : value , 2 ) ) ) ; <nl> + EXPECT_THAT ( registry , <nl> + : : testing : : UnorderedElementsAre ( <nl> + : : testing : : Pointee ( : : testing : : Property ( & Value : : value , 1 ) ) , <nl> + : : testing : : Pointee ( : : testing : : Property ( & Value : : value , 2 ) ) ) ) ; <nl> / / Running garbage collection should not delete anything , since both <nl> / / entries still have refs . <nl> GarbageCollectRegistry ( & registry ) ; <nl> - EXPECT_THAT ( registry , : : testing : : UnorderedElementsAre ( <nl> - : : testing : : Property ( & Value : : value , 1 ) , <nl> - : : testing : : Property ( & Value : : value , 2 ) ) ) ; <nl> + EXPECT_THAT ( registry , <nl> + : : testing : : UnorderedElementsAre ( <nl> + : : testing : : Pointee ( : : testing : : Property ( & Value : : value , 1 ) ) , <nl> + : : testing : : Pointee ( : : testing : : Property ( & Value : : value , 2 ) ) ) ) ; <nl> / / Unref v2 and run GC to remove it . <nl> v2 . reset ( ) ; <nl> GarbageCollectRegistry ( & registry ) ; <nl> - EXPECT_THAT ( registry , : : testing : : UnorderedElementsAre ( <nl> - : : testing : : Property ( & Value : : value , 1 ) ) ) ; <nl> + EXPECT_THAT ( registry , : : testing : : UnorderedElementsAre ( : : testing : : Pointee ( <nl> + : : testing : : Property ( & Value : : value , 1 ) ) ) ) ; <nl> / / Now unref v1 and run GC again . <nl> v1 . reset ( ) ; <nl> GarbageCollectRegistry ( & registry ) ; <nl> | Merge pull request from markdroth / ref_if_non_zero_api | grpc/grpc | 419fc6394c41cda199fd9036e98797da9227a39a | 2020-09-29T16:36:22Z |
mmm a / modules / photo / src / calibrate . cpp <nl> ppp b / modules / photo / src / calibrate . cpp <nl> class CalibrateDebevecImpl : public CalibrateDebevec <nl> <nl> for ( int i = 0 , x = step_x / 2 ; i < x_points ; i + + , x + = step_x ) { <nl> for ( int j = 0 , y = step_y / 2 ; j < y_points ; j + + , y + = step_y ) { <nl> - sample_points . push_back ( Point ( x , y ) ) ; <nl> + if ( 0 < = x & & x < images [ 0 ] . cols & & <nl> + 0 < = y & & y < images [ 0 ] . rows ) <nl> + sample_points . push_back ( Point ( x , y ) ) ; <nl> } <nl> } <nl> } <nl> | added extra check in CalibrateDebevec to make sure the points are within the image : | opencv/opencv | 3c769edea0c51f7a74693f1ac747dfe9bcb7670e | 2015-05-14T16:54:48Z |
mmm a / modules / gpu / src / arithm . cpp <nl> ppp b / modules / gpu / src / arithm . cpp <nl> Scalar cv : : gpu : : sum ( const GpuMat & src ) <nl> { <nl> CV_Assert ( src . type ( ) = = CV_8UC1 | | src . type ( ) = = CV_8UC4 ) ; <nl> <nl> - Scalar res ; <nl> + <nl> <nl> <nl> NppiSize sz ; <nl> Scalar cv : : gpu : : sum ( const GpuMat & src ) <nl> { <nl> nppiReductionGetBufferHostSize_8u_C1R ( sz , & bufsz ) ; <nl> GpuMat buf ( 1 , bufsz , CV_32S ) ; <nl> - nppSafeCall ( nppiSum_8u_C1R ( src . ptr < Npp8u > ( ) , src . step , sz , buf . ptr < Npp32s > ( ) , res . val ) ) ; <nl> + <nl> + Scalar res ; <nl> + nppSafeCall ( nppiSum_8u_C1R ( src . ptr < Npp8u > ( ) , src . step , sz , buf . ptr < Npp32s > ( ) , res . val ) ) ; <nl> + return res ; <nl> } <nl> else <nl> { <nl> nppiReductionGetBufferHostSize_8u_C4R ( sz , & bufsz ) ; <nl> GpuMat buf ( 1 , bufsz , CV_32S ) ; <nl> + <nl> + Scalar res ; <nl> nppSafeCall ( nppiSum_8u_C4R ( src . ptr < Npp8u > ( ) , src . step , sz , buf . ptr < Npp32s > ( ) , res . val ) ) ; <nl> + return res ; <nl> } <nl> <nl> - return res ; <nl> + <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / modules / gpu / src / error . cpp <nl> ppp b / modules / gpu / src / error . cpp <nl> namespace cv <nl> const string & msg = ( idx ! = error_num ) ? npp_errors [ idx ] . str : string ( " Unknown error code " ) ; <nl> <nl> std : : stringstream interpreter ; <nl> - interpreter < < " < " < < err < < " > " < < msg ; <nl> + interpreter < < msg < < " [ Code = " < < err < < " ] " ; <nl> <nl> return interpreter . str ( ) ; <nl> } <nl> mmm a / modules / gpu / src / filtering_npp . cpp <nl> ppp b / modules / gpu / src / filtering_npp . cpp <nl> namespace <nl> <nl> NppiSize sz ; <nl> sz . width = src . cols ; <nl> - sz . height = dst . rows ; <nl> + sz . height = src . rows ; <nl> <nl> NppiSize mask_sz ; <nl> mask_sz . width = kernel . cols ; <nl> mmm a / tests / gpu / src / arithm . cpp <nl> ppp b / tests / gpu / src / arithm . cpp <nl> CV_GpuNppImageCompareTest CV_GpuNppImageCompare_test ; <nl> CV_GpuNppImageMeanStdDevTest CV_GpuNppImageMeanStdDev_test ; <nl> CV_GpuNppImageNormTest CV_GpuNppImageNorm_test ; <nl> CV_GpuNppImageFlipTest CV_GpuNppImageFlip_test ; <nl> - / / CV_GpuNppImageSumTest CV_GpuNppImageSum_test ; <nl> + CV_GpuNppImageSumTest CV_GpuNppImageSum_test ; <nl> CV_GpuNppImageMinNaxTest CV_GpuNppImageMinNax_test ; <nl> CV_GpuNppImageLUTTest CV_GpuNppImageLUT_test ; <nl> \ No newline at end of file <nl> mmm a / tests / gpu / src / gputest_main . cpp <nl> ppp b / tests / gpu / src / gputest_main . cpp <nl> <nl> <nl> CvTS test_system ; <nl> <nl> + const char * blacklist [ ] = <nl> + { <nl> + " GPU - NppImageSum " , <nl> + 0 <nl> + } ; <nl> + <nl> int main ( int argc , char * * argv ) <nl> { <nl> - return test_system . run ( argc , argv ) ; <nl> + return test_system . run ( argc , argv , blacklist ) ; <nl> } <nl> <nl> / * End of file . * / <nl> | added black list for gpu tests | opencv/opencv | 1b8c00000c2409e5c4f3eb50361b5be0e6c51a1e | 2010-09-27T06:57:25Z |
mmm a / core / os / dir_access . cpp <nl> ppp b / core / os / dir_access . cpp <nl> String DirAccess : : get_full_path ( const String & p_path , AccessType p_access ) { <nl> return full ; <nl> } <nl> <nl> - Error DirAccess : : copy ( String p_from , String p_to ) { <nl> + Error DirAccess : : copy ( String p_from , String p_to , int chmod_flags ) { <nl> <nl> / / printf ( " copy % s - > % s \ n " , p_from . ascii ( ) . get_data ( ) , p_to . ascii ( ) . get_data ( ) ) ; <nl> Error err ; <nl> Error DirAccess : : copy ( String p_from , String p_to ) { <nl> fdst - > store_8 ( fsrc - > get_8 ( ) ) ; <nl> } <nl> <nl> + if ( err = = OK & & chmod_flags ! = - 1 ) { <nl> + fdst - > close ( ) ; <nl> + err = fdst - > _chmod ( p_to , chmod_flags ) ; <nl> + } <nl> + <nl> memdelete ( fsrc ) ; <nl> memdelete ( fdst ) ; <nl> <nl> mmm a / core / os / dir_access . h <nl> ppp b / core / os / dir_access . h <nl> class DirAccess { <nl> static bool exists ( String p_dir ) ; <nl> virtual size_t get_space_left ( ) = 0 ; <nl> <nl> - virtual Error copy ( String p_from , String p_to ) ; <nl> + virtual Error copy ( String p_from , String p_to , int chmod_flags = - 1 ) ; <nl> virtual Error rename ( String p_from , String p_to ) = 0 ; <nl> virtual Error remove ( String p_name ) = 0 ; <nl> <nl> mmm a / core / os / file_access . h <nl> ppp b / core / os / file_access . h <nl> class FileAccess { <nl> <nl> virtual Error reopen ( const String & p_path , int p_mode_flags ) ; / / / < does not change the AccessType <nl> <nl> + virtual Error _chmod ( const String & p_path , int p_mod ) { } <nl> + <nl> static FileAccess * create ( AccessType p_access ) ; / / / Create a file access ( for the current platform ) this is the only portable way of accessing files . <nl> static FileAccess * create_for_path ( const String & p_path ) ; <nl> static FileAccess * open ( const String & p_path , int p_mode_flags , Error * r_error = NULL ) ; / / / Create a file access ( for the current platform ) this is the only portable way of accessing files . <nl> mmm a / drivers / unix / file_access_unix . cpp <nl> ppp b / drivers / unix / file_access_unix . cpp <nl> uint64_t FileAccessUnix : : _get_modified_time ( const String & p_file ) { <nl> } ; <nl> } <nl> <nl> + Error FileAccessUnix : : _chmod ( const String & p_path , int p_mod ) { <nl> + int err = chmod ( p_path . utf8 ( ) . get_data ( ) , p_mod ) ; <nl> + if ( ! err ) { <nl> + return OK ; <nl> + } <nl> + <nl> + return FAILED ; <nl> + } <nl> + <nl> FileAccess * FileAccessUnix : : create_libc ( ) { <nl> <nl> return memnew ( FileAccessUnix ) ; <nl> mmm a / drivers / unix / file_access_unix . h <nl> ppp b / drivers / unix / file_access_unix . h <nl> class FileAccessUnix : public FileAccess { <nl> <nl> virtual uint64_t _get_modified_time ( const String & p_file ) ; <nl> <nl> + virtual Error _chmod ( const String & p_path , int p_mod ) ; <nl> + <nl> FileAccessUnix ( ) ; <nl> virtual ~ FileAccessUnix ( ) ; <nl> } ; <nl> mmm a / editor / editor_export . cpp <nl> ppp b / editor / editor_export . cpp <nl> Error EditorExportPlatformPC : : export_project ( const Ref < EditorExportPreset > & p_pr <nl> } <nl> <nl> DirAccess * da = DirAccess : : create ( DirAccess : : ACCESS_FILESYSTEM ) ; <nl> - da - > copy ( template_path , p_path ) ; <nl> + Error err = da - > copy ( template_path , p_path , get_chmod_flags ( ) ) ; <nl> memdelete ( da ) ; <nl> <nl> + if ( err ! = OK ) { <nl> + return err ; <nl> + } <nl> + <nl> String pck_path = p_path . get_basename ( ) + " . pck " ; <nl> <nl> return save_pack ( p_preset , pck_path ) ; <nl> void EditorExportPlatformPC : : get_platform_features ( List < String > * r_features ) { <nl> } <nl> } <nl> <nl> + int EditorExportPlatformPC : : get_chmod_flags ( ) const { <nl> + <nl> + return chmod_flags ; <nl> + } <nl> + <nl> + void EditorExportPlatformPC : : set_chmod_flags ( int p_flags ) { <nl> + <nl> + chmod_flags = p_flags ; <nl> + } <nl> + <nl> EditorExportPlatformPC : : EditorExportPlatformPC ( ) { <nl> + <nl> + chmod_flags = - 1 ; <nl> } <nl> mmm a / editor / editor_export . h <nl> ppp b / editor / editor_export . h <nl> class EditorExportPlatformPC : public EditorExportPlatform { <nl> Set < String > extra_features ; <nl> <nl> bool use64 ; <nl> + int chmod_flags ; <nl> <nl> public : <nl> virtual void get_preset_features ( const Ref < EditorExportPreset > & p_preset , List < String > * r_features ) ; <nl> class EditorExportPlatformPC : public EditorExportPlatform { <nl> void add_platform_feature ( const String & p_feature ) ; <nl> virtual void get_platform_features ( List < String > * r_features ) ; <nl> <nl> + int get_chmod_flags ( ) const ; <nl> + void set_chmod_flags ( int p_flags ) ; <nl> + <nl> EditorExportPlatformPC ( ) ; <nl> } ; <nl> <nl> mmm a / platform / x11 / export / export . cpp <nl> ppp b / platform / x11 / export / export . cpp <nl> void register_x11_exporter ( ) { <nl> platform - > set_release_64 ( " linux_x11_64_release " ) ; <nl> platform - > set_debug_64 ( " linux_x11_64_debug " ) ; <nl> platform - > set_os_name ( " X11 " ) ; <nl> + platform - > set_chmod_flags ( 0755 ) ; <nl> <nl> EditorExport : : get_singleton ( ) - > add_export_platform ( platform ) ; <nl> } <nl> | Merge pull request from marcelofg55 / fix_x11_export | godotengine/godot | 73b8e5acab7e389d7ce8cfb48d533ba74cd86133 | 2017-09-17T20:40:54Z |
mmm a / . circleci / config . yml <nl> ppp b / . circleci / config . yml <nl> orbs : <nl> executors : <nl> windows - 2xlarge : <nl> machine : <nl> - image : ' windows - server - 2019 - vs2019 : 201908 - 06 ' <nl> + image : ' windows - server - 2019 - vs2019 : stable ' <nl> resource_class : windows . 2xlarge <nl> shell : bash . exe <nl> <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> set ( SOURCES <nl> monitoring / thread_status_util . cc <nl> monitoring / thread_status_util_debug . cc <nl> options / cf_options . cc <nl> + options / configurable . cc <nl> options / db_options . cc <nl> options / options . cc <nl> options / options_helper . cc <nl> set ( SOURCES <nl> table / sst_file_dumper . cc <nl> table / sst_file_reader . cc <nl> table / sst_file_writer . cc <nl> + table / table_factory . cc <nl> table / table_properties . cc <nl> table / two_level_iterator . cc <nl> test_util / sync_point . cc <nl> if ( WITH_TESTS ) <nl> monitoring / statistics_test . cc <nl> monitoring / stats_dump_scheduler_test . cc <nl> monitoring / stats_history_test . cc <nl> + options / configurable_test . cc <nl> options / options_settable_test . cc <nl> options / options_test . cc <nl> table / block_based / block_based_filter_block_test . cc <nl> mmm a / HISTORY . md <nl> ppp b / HISTORY . md <nl> <nl> * ` DB : : GetDbSessionId ( std : : string & session_id ) ` is added . ` session_id ` stores a unique identifier that gets reset every time the DB is opened . This DB session ID should be unique among all open DB instances on all hosts , and should be unique among re - openings of the same or other DBs . This identifier is recorded in the LOG file on the line starting with " DB Session ID : " . <nl> * ` DB : : OpenForReadOnly ( ) ` now returns ` Status : : NotFound ` when the specified DB directory does not exist . Previously the error returned depended on the underlying ` Env ` . This change is available in all 6 . 11 releases as well . <nl> * A parameter ` verify_with_checksum ` is added to ` BackupEngine : : VerifyBackup ` , which is false by default . If it is ture , ` BackupEngine : : VerifyBackup ` verifies checksums and file sizes of backup files . Pass ` false ` for ` verify_with_checksum ` to maintain the previous behavior and performance of ` BackupEngine : : VerifyBackup ` , by only verifying sizes of backup files . <nl> - <nl> - <nl> + * Methods to configure serialize , and compare - - such as TableFactory - - are exposed directly through the Configurable base class ( from which these objects inherity ) . This change will allow for better and more thorough configuration management and retrieval in the future <nl> # # # Behavior Changes <nl> * Best - efforts recovery ignores CURRENT file completely . If CURRENT file is missing during recovery , best - efforts recovery still proceeds with MANIFEST file ( s ) . <nl> * In best - efforts recovery , an error that is not Corruption or IOError : : kNotFound or IOError : : kPathNotFound will be overwritten silently . Fix this by checking all non - ok cases and return early . <nl> <nl> * Added auto resume function to automatically recover the DB from background Retryable IO Error . When retryable IOError happens during flush and WAL write , the error is mapped to Hard Error and DB will be in read mode . When retryable IO Error happens during compaction , the error will be mapped to Soft Error . DB is still in write / read mode . Autoresume function will create a thread for a DB to call DB - > ResumeImpl ( ) to try the recover for Retryable IO Error during flush and WAL write . Compaction will be rescheduled by itself if retryable IO Error happens . Auto resume may also cause other Retryable IO Error during the recovery , so the recovery will fail . Retry the auto resume may solve the issue , so we use max_bgerror_resume_count to decide how many resume cycles will be tried in total . If it is < = 0 , auto resume retryable IO Error is disabled . Default is INT_MAX , which will lead to a infinit auto resume . bgerror_resume_retry_interval decides the time interval between two auto resumes . <nl> * Option ` max_subcompactions ` can be set dynamically using DB : : SetDBOptions ( ) . <nl> * Added experimental ColumnFamilyOptions : : sst_partitioner_factory to define determine the partitioning of sst files . This helps compaction to split the files on interesting boundaries ( key prefixes ) to make propagation of sst files less write amplifying ( covering the whole key space ) . <nl> + * Methods to configure serialize , and compare - - such as TableFactory - - are exposed directly through the Configurable base class ( from which these objects inherity ) . This change will allow for better and more thorough configuration management and retrieval in the future . The options for a Configurable object can be set via the ConfigureFromMap , ConfigureFromString , or ConfigureOption method . The serialized version of the options of an object can be retrieved via the GetOptionString , ToString , or GetOption methods . The list of options supported by an object can be obtained via the GetOptionNames method . The " raw " object ( such as the BlockBasedTableOption ) for an option may be retrieved via the GetOptions method . Configurable options can be compared via the AreEquivalent method . The settings within a Configurable object may be validated via the ValidateOptions method . The object may be intialized ( at which point only mutable options may be updated ) via the PrepareOptions method . <nl> <nl> # # # Performance Improvements <nl> * Eliminate key copies for internal comparisons while accessing ingested block - based tables . <nl> * Reduce key comparisons during random access in all block - based tables . <nl> * BackupEngine avoids unnecessary repeated checksum computation for backing up a table file to the ` shared_checksum ` directory when using ` kOptionalChecksumAndDbSessionId ` , except on SST files generated before this version of RocksDB , which fall back on using ` kChecksumAndFileSize ` . <nl> <nl> + # # # General Improvements <nl> + * The settings of the DBOptions and ColumnFamilyOptions are now managed by Configurable objects ( see New Features ) . The same convenience methods to configure these options still exist but the backend implementation has been unified under a common implementation . <nl> + <nl> # # 6 . 11 ( 6 / 12 / 2020 ) <nl> # # # Bug Fixes <nl> * Fix consistency checking error swallowing in some cases when options . force_consistency_checks = true . <nl> mmm a / Makefile <nl> ppp b / Makefile <nl> PARALLEL_TEST = \ <nl> ifeq ( $ ( USE_FOLLY_DISTRIBUTED_MUTEX ) , 1 ) <nl> TESTS + = folly_synchronization_distributed_mutex_test <nl> PARALLEL_TEST + = folly_synchronization_distributed_mutex_test <nl> + TESTS_PASSING_ASC = folly_synchronization_distributed_mutex_test <nl> endif <nl> <nl> # options_settable_test doesn ' t pass with UBSAN as we use hack in the test <nl> ifdef ASSERT_STATUS_CHECKED <nl> merger_test \ <nl> mock_env_test \ <nl> object_registry_test \ <nl> + configurable_test \ <nl> options_settable_test \ <nl> options_test \ <nl> random_test \ <nl> thread_list_test : $ ( OBJ_DIR ) / util / thread_list_test . o $ ( TEST_LIBRARY ) $ ( LIBRARY ) <nl> compact_files_test : $ ( OBJ_DIR ) / db / compact_files_test . o $ ( TEST_LIBRARY ) $ ( LIBRARY ) <nl> $ ( AM_LINK ) <nl> <nl> + configurable_test : options / configurable_test . o $ ( TEST_LIBRARY ) $ ( LIBRARY ) <nl> + $ ( AM_LINK ) <nl> + <nl> options_test : $ ( OBJ_DIR ) / options / options_test . o $ ( TEST_LIBRARY ) $ ( LIBRARY ) <nl> $ ( AM_LINK ) <nl> <nl> mmm a / TARGETS <nl> ppp b / TARGETS <nl> cpp_library ( <nl> " monitoring / thread_status_util . cc " , <nl> " monitoring / thread_status_util_debug . cc " , <nl> " options / cf_options . cc " , <nl> + " options / configurable . cc " , <nl> " options / db_options . cc " , <nl> " options / options . cc " , <nl> " options / options_helper . cc " , <nl> cpp_library ( <nl> " table / sst_file_dumper . cc " , <nl> " table / sst_file_reader . cc " , <nl> " table / sst_file_writer . cc " , <nl> + " table / table_factory . cc " , <nl> " table / table_properties . cc " , <nl> " table / two_level_iterator . cc " , <nl> " test_util / sync_point . cc " , <nl> ROCKS_TESTS = [ <nl> [ ] , <nl> [ ] , <nl> ] , <nl> + [ <nl> + " configurable_test " , <nl> + " options / configurable_test . cc " , <nl> + " serial " , <nl> + [ ] , <nl> + [ ] , <nl> + ] , <nl> [ <nl> " corruption_test " , <nl> " db / corruption_test . cc " , <nl> mmm a / cache / cache . cc <nl> ppp b / cache / cache . cc <nl> <nl> # include " rocksdb / cache . h " <nl> <nl> # include " cache / lru_cache . h " <nl> - # include " options / options_helper . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " util / string_util . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> lru_cache_options_type_info = { <nl> { " capacity " , <nl> { offsetof ( struct LRUCacheOptions , capacity ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct LRUCacheOptions , capacity ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> { " num_shard_bits " , <nl> { offsetof ( struct LRUCacheOptions , num_shard_bits ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct LRUCacheOptions , num_shard_bits ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> { " strict_capacity_limit " , <nl> { offsetof ( struct LRUCacheOptions , strict_capacity_limit ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct LRUCacheOptions , strict_capacity_limit ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " high_pri_pool_ratio " , <nl> { offsetof ( struct LRUCacheOptions , high_pri_pool_ratio ) , <nl> OptionType : : kDouble , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct LRUCacheOptions , high_pri_pool_ratio ) } } } ; <nl> + OptionTypeFlags : : kMutable } } , <nl> + } ; <nl> # endif / / ROCKSDB_LITE <nl> <nl> Status Cache : : CreateFromString ( const ConfigOptions & config_options , <nl> mmm a / db / column_family . cc <nl> ppp b / db / column_family . cc <nl> <nl> # include " monitoring / thread_status_util . h " <nl> # include " options / options_helper . h " <nl> # include " port / port . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> + # include " rocksdb / table . h " <nl> # include " table / merging_iterator . h " <nl> # include " util / autovector . h " <nl> # include " util / cast_util . h " <nl> ColumnFamilyOptions SanitizeOptions ( const ImmutableDBOptions & db_options , <nl> result . max_compaction_bytes = result . target_file_size_base * 25 ; <nl> } <nl> <nl> - bool is_block_based_table = <nl> - ( result . table_factory - > Name ( ) = = BlockBasedTableFactory : : kName ) ; <nl> + bool is_block_based_table = ( result . table_factory - > IsInstanceOf ( <nl> + TableFactory : : kBlockBasedTableName ( ) ) ) ; <nl> <nl> const uint64_t kAdjustedTtl = 30 * 24 * 60 * 60 ; <nl> if ( result . ttl = = kDefaultTtl ) { <nl> Status ColumnFamilyData : : ValidateOptions ( <nl> } <nl> <nl> if ( cf_options . ttl > 0 & & cf_options . ttl ! = kDefaultTtl ) { <nl> - if ( cf_options . table_factory - > Name ( ) ! = BlockBasedTableFactory : : kName ) { <nl> + if ( ! cf_options . table_factory - > IsInstanceOf ( <nl> + TableFactory : : kBlockBasedTableName ( ) ) ) { <nl> return Status : : NotSupported ( <nl> " TTL is only supported in Block - Based Table format . " ) ; <nl> } <nl> Status ColumnFamilyData : : ValidateOptions ( <nl> <nl> if ( cf_options . periodic_compaction_seconds > 0 & & <nl> cf_options . periodic_compaction_seconds ! = kDefaultPeriodicCompSecs ) { <nl> - if ( cf_options . table_factory - > Name ( ) ! = BlockBasedTableFactory : : kName ) { <nl> + if ( ! cf_options . table_factory - > IsInstanceOf ( <nl> + TableFactory : : kBlockBasedTableName ( ) ) ) { <nl> return Status : : NotSupported ( <nl> " Periodic Compaction is only supported in " <nl> " Block - Based Table format . " ) ; <nl> mmm a / db / db_basic_test . cc <nl> ppp b / db / db_basic_test . cc <nl> TEST_P ( DBBasicTestDeadline , PointLookupDeadline ) { <nl> SetTimeElapseOnlySleepOnReopen ( & options ) ; <nl> Reopen ( options ) ; <nl> <nl> - if ( options . table_factory & & <nl> - ! strcmp ( options . table_factory - > Name ( ) , <nl> - BlockBasedTableFactory : : kName . c_str ( ) ) ) { <nl> - BlockBasedTableFactory * bbtf = <nl> - static_cast < BlockBasedTableFactory * > ( options . table_factory . get ( ) ) ; <nl> - block_cache = bbtf - > table_options ( ) . block_cache . get ( ) ; <nl> + if ( options . table_factory ) { <nl> + block_cache = options . table_factory - > GetOptions < Cache > ( <nl> + TableFactory : : kBlockCacheOpts ( ) ) ; <nl> } <nl> <nl> Random rnd ( 301 ) ; <nl> TEST_P ( DBBasicTestDeadline , IteratorDeadline ) { <nl> SetTimeElapseOnlySleepOnReopen ( & options ) ; <nl> Reopen ( options ) ; <nl> <nl> - if ( options . table_factory & & <nl> - ! strcmp ( options . table_factory - > Name ( ) , <nl> - BlockBasedTableFactory : : kName . c_str ( ) ) ) { <nl> - BlockBasedTableFactory * bbtf = <nl> - static_cast < BlockBasedTableFactory * > ( options . table_factory . get ( ) ) ; <nl> - block_cache = bbtf - > table_options ( ) . block_cache . get ( ) ; <nl> + if ( options . table_factory ) { <nl> + block_cache = options . table_factory - > GetOptions < Cache > ( <nl> + TableFactory : : kBlockCacheOpts ( ) ) ; <nl> } <nl> <nl> Random rnd ( 301 ) ; <nl> mmm a / db / db_block_cache_test . cc <nl> ppp b / db / db_block_cache_test . cc <nl> class DBBlockCacheTest : public DBTestBase { <nl> options . avoid_flush_during_recovery = false ; <nl> / / options . compression = kNoCompression ; <nl> options . statistics = ROCKSDB_NAMESPACE : : CreateDBStatistics ( ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> return options ; <nl> } <nl> <nl> TEST_F ( DBBlockCacheTest , IteratorBlockCacheUsage ) { <nl> <nl> std : : shared_ptr < Cache > cache = NewLRUCache ( 0 , 0 , false ) ; <nl> table_options . block_cache = cache ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> RecordCacheCounters ( options ) ; <nl> <nl> TEST_F ( DBBlockCacheTest , TestWithoutCompressedBlockCache ) { <nl> <nl> std : : shared_ptr < Cache > cache = NewLRUCache ( 0 , 0 , false ) ; <nl> table_options . block_cache = cache ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> RecordCacheCounters ( options ) ; <nl> <nl> TEST_F ( DBBlockCacheTest , TestWithCompressedBlockCache ) { <nl> std : : shared_ptr < Cache > compressed_cache = NewLRUCache ( 1 < < 25 , 0 , false ) ; <nl> table_options . block_cache = cache ; <nl> table_options . block_cache_compressed = compressed_cache ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> RecordCacheCounters ( options ) ; <nl> <nl> TEST_F ( DBBlockCacheTest , IndexAndFilterBlocksOfNewTableAddedToCache ) { <nl> BlockBasedTableOptions table_options ; <nl> table_options . cache_index_and_filter_blocks = true ; <nl> table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> CreateAndReopenWithCF ( { " pikachu " } , options ) ; <nl> <nl> ASSERT_OK ( Put ( 1 , " key " , " val " ) ) ; <nl> TEST_F ( DBBlockCacheTest , FillCacheAndIterateDB ) { <nl> <nl> std : : shared_ptr < Cache > cache = NewLRUCache ( 10 , 0 , true ) ; <nl> table_options . block_cache = cache ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> ASSERT_OK ( Put ( " key1 " , " val1 " ) ) ; <nl> ASSERT_OK ( Put ( " key2 " , " val2 " ) ) ; <nl> TEST_F ( DBBlockCacheTest , IndexAndFilterBlocksStats ) { <nl> std : : shared_ptr < Cache > cache = NewLRUCache ( co ) ; <nl> table_options . block_cache = cache ; <nl> table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 , true ) ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> CreateAndReopenWithCF ( { " pikachu " } , options ) ; <nl> <nl> ASSERT_OK ( Put ( 1 , " longer_key " , " val " ) ) ; <nl> TEST_F ( DBBlockCacheTest , IndexAndFilterBlocksCachePriority ) { <nl> table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ; <nl> table_options . cache_index_and_filter_blocks_with_high_priority = <nl> priority = = Cache : : Priority : : HIGH ? true : false ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> DestroyAndReopen ( options ) ; <nl> <nl> MockCache : : high_pri_insert_count = 0 ; <nl> TEST_F ( DBBlockCacheTest , AddRedundantStats ) { <nl> table_options . cache_index_and_filter_blocks = true ; <nl> table_options . block_cache = cache ; <nl> table_options . filter_policy . reset ( NewBloomFilterPolicy ( 50 ) ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> DestroyAndReopen ( options ) ; <nl> <nl> / / Create a new table . <nl> TEST_F ( DBBlockCacheTest , ParanoidFileChecks ) { <nl> BlockBasedTableOptions table_options ; <nl> table_options . cache_index_and_filter_blocks = false ; <nl> table_options . filter_policy . reset ( NewBloomFilterPolicy ( 20 ) ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> CreateAndReopenWithCF ( { " pikachu " } , options ) ; <nl> <nl> ASSERT_OK ( Put ( 1 , " 1_key " , " val " ) ) ; <nl> TEST_F ( DBBlockCacheTest , CacheCompressionDict ) { <nl> BlockBasedTableOptions table_options ; <nl> table_options . cache_index_and_filter_blocks = true ; <nl> table_options . block_cache . reset ( new MockCache ( ) ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> DestroyAndReopen ( options ) ; <nl> <nl> RecordCacheCountersForCompressionDict ( options ) ; <nl> mmm a / db / db_bloom_filter_test . cc <nl> ppp b / db / db_bloom_filter_test . cc <nl> TEST_P ( DBBloomFilterTestDefFormatVersion , KeyMayExist ) { <nl> options_override . partition_filters = partition_filters_ ; <nl> options_override . metadata_block_size = 32 ; <nl> Options options = CurrentOptions ( options_override ) ; <nl> - if ( partition_filters_ & & <nl> - static_cast < BlockBasedTableOptions * > ( <nl> - options . table_factory - > GetOptions ( ) ) <nl> - - > index_type ! = BlockBasedTableOptions : : kTwoLevelIndexSearch ) { <nl> - / / In the current implementation partitioned filters depend on partitioned <nl> - / / indexes <nl> - continue ; <nl> + if ( partition_filters_ ) { <nl> + auto * table_options = <nl> + options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + if ( table_options ! = nullptr & & <nl> + table_options - > index_type ! = <nl> + BlockBasedTableOptions : : kTwoLevelIndexSearch ) { <nl> + / / In the current implementation partitioned filters depend on <nl> + / / partitioned indexes <nl> + continue ; <nl> + } <nl> } <nl> options . statistics = ROCKSDB_NAMESPACE : : CreateDBStatistics ( ) ; <nl> CreateAndReopenWithCF ( { " pikachu " } , options ) ; <nl> mmm a / db / db_impl / db_impl_open . cc <nl> ppp b / db / db_impl / db_impl_open . cc <nl> <nl> # include " monitoring / persistent_stats_history . h " <nl> # include " monitoring / stats_dump_scheduler . h " <nl> # include " options / options_helper . h " <nl> + # include " rocksdb / table . h " <nl> # include " rocksdb / wal_filter . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> # include " test_util / sync_point . h " <nl> # include " util / rate_limiter . h " <nl> <nl> DBOptions SanitizeOptions ( const std : : string & dbname , const DBOptions & src ) { <nl> } <nl> <nl> namespace { <nl> - Status SanitizeOptionsByTable ( <nl> + Status ValidateOptionsByTable ( <nl> const DBOptions & db_opts , <nl> const std : : vector < ColumnFamilyDescriptor > & column_families ) { <nl> Status s ; <nl> for ( auto cf : column_families ) { <nl> - s = cf . options . table_factory - > SanitizeOptions ( db_opts , cf . options ) ; <nl> + s = ValidateOptions ( db_opts , cf . options ) ; <nl> if ( ! s . ok ( ) ) { <nl> return s ; <nl> } <nl> Status DBImpl : : Open ( const DBOptions & db_options , const std : : string & dbname , <nl> const std : : vector < ColumnFamilyDescriptor > & column_families , <nl> std : : vector < ColumnFamilyHandle * > * handles , DB * * dbptr , <nl> const bool seq_per_batch , const bool batch_per_txn ) { <nl> - Status s = SanitizeOptionsByTable ( db_options , column_families ) ; <nl> + Status s = ValidateOptionsByTable ( db_options , column_families ) ; <nl> if ( ! s . ok ( ) ) { <nl> return s ; <nl> } <nl> mmm a / db / db_iterator_test . cc <nl> ppp b / db / db_iterator_test . cc <nl> TEST_P ( DBIteratorTest , ReadAhead ) { <nl> BlockBasedTableOptions table_options ; <nl> table_options . block_size = 1024 ; <nl> table_options . no_block_cache = true ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> <nl> std : : string value ( 1024 , ' a ' ) ; <nl> mmm a / db / db_options_test . cc <nl> ppp b / db / db_options_test . cc <nl> class DBOptionsTest : public DBTestBase { <nl> std : : unordered_map < std : : string , std : : string > GetMutableDBOptionsMap ( <nl> const DBOptions & options ) { <nl> std : : string options_str ; <nl> + std : : unordered_map < std : : string , std : : string > mutable_map ; <nl> ConfigOptions config_options ; <nl> config_options . delimiter = " ; " ; <nl> - GetStringFromDBOptions ( config_options , options , & options_str ) ; <nl> - std : : unordered_map < std : : string , std : : string > options_map ; <nl> - StringToMap ( options_str , & options_map ) ; <nl> - std : : unordered_map < std : : string , std : : string > mutable_map ; <nl> - for ( const auto & opt : db_options_type_info ) { <nl> - if ( opt . second . IsMutable ( ) & & opt . second . ShouldSerialize ( ) ) { <nl> - mutable_map [ opt . first ] = options_map [ opt . first ] ; <nl> - } <nl> - } <nl> + <nl> + GetStringFromMutableDBOptions ( config_options , MutableDBOptions ( options ) , <nl> + & options_str ) ; <nl> + StringToMap ( options_str , & mutable_map ) ; <nl> return mutable_map ; <nl> } <nl> <nl> class DBOptionsTest : public DBTestBase { <nl> ConfigOptions config_options ; <nl> config_options . delimiter = " ; " ; <nl> <nl> - GetStringFromColumnFamilyOptions ( config_options , options , & options_str ) ; <nl> - std : : unordered_map < std : : string , std : : string > options_map ; <nl> - StringToMap ( options_str , & options_map ) ; <nl> std : : unordered_map < std : : string , std : : string > mutable_map ; <nl> - for ( const auto & opt : cf_options_type_info ) { <nl> - if ( opt . second . IsMutable ( ) & & opt . second . ShouldSerialize ( ) ) { <nl> - mutable_map [ opt . first ] = options_map [ opt . first ] ; <nl> - } <nl> - } <nl> + GetStringFromMutableCFOptions ( config_options , MutableCFOptions ( options ) , <nl> + & options_str ) ; <nl> + StringToMap ( options_str , & mutable_map ) ; <nl> return mutable_map ; <nl> } <nl> <nl> mmm a / db / db_test . cc <nl> ppp b / db / db_test . cc <nl> <nl> # include " rocksdb / utilities / checkpoint . h " <nl> # include " rocksdb / utilities / optimistic_transaction_db . h " <nl> # include " rocksdb / utilities / write_batch_with_index . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> # include " table / mock_table . h " <nl> - # include " table / plain / plain_table_factory . h " <nl> # include " table / scoped_arena_iterator . h " <nl> # include " test_util / sync_point . h " <nl> # include " test_util / testharness . h " <nl> TEST_F ( DBTest , TableOptionsSanitizeTest ) { <nl> DestroyAndReopen ( options ) ; <nl> ASSERT_EQ ( db_ - > GetOptions ( ) . allow_mmap_reads , false ) ; <nl> <nl> - options . table_factory . reset ( new PlainTableFactory ( ) ) ; <nl> + options . table_factory . reset ( NewPlainTableFactory ( ) ) ; <nl> options . prefix_extractor . reset ( NewNoopTransform ( ) ) ; <nl> Destroy ( options ) ; <nl> ASSERT_TRUE ( ! TryReopen ( options ) . IsNotSupported ( ) ) ; <nl> mmm a / db / db_test2 . cc <nl> ppp b / db / db_test2 . cc <nl> TEST_F ( DBTest2 , OptimizeForSmallDB ) { <nl> options . OptimizeForSmallDb ( ) ; <nl> <nl> / / Find the cache object <nl> - ASSERT_EQ ( std : : string ( BlockBasedTableFactory : : kName ) , <nl> - std : : string ( options . table_factory - > Name ( ) ) ) ; <nl> - BlockBasedTableOptions * table_options = <nl> - reinterpret_cast < BlockBasedTableOptions * > ( <nl> - options . table_factory - > GetOptions ( ) ) ; <nl> + ASSERT_TRUE ( options . table_factory - > IsInstanceOf ( <nl> + TableFactory : : kBlockBasedTableName ( ) ) ) ; <nl> + auto table_options = <nl> + options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + <nl> ASSERT_TRUE ( table_options ! = nullptr ) ; <nl> std : : shared_ptr < Cache > cache = table_options - > block_cache ; <nl> <nl> mmm a / db / db_test_util . h <nl> ppp b / db / db_test_util . h <nl> <nl> # include " rocksdb / statistics . h " <nl> # include " rocksdb / table . h " <nl> # include " rocksdb / utilities / checkpoint . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> # include " table / mock_table . h " <nl> - # include " table / plain / plain_table_factory . h " <nl> # include " table / scoped_arena_iterator . h " <nl> # include " test_util / mock_time_env . h " <nl> # include " test_util / sync_point . h " <nl> mmm a / db / flush_job . cc <nl> ppp b / db / flush_job . cc <nl> <nl> # include " rocksdb / statistics . h " <nl> # include " rocksdb / status . h " <nl> # include " rocksdb / table . h " <nl> - # include " table / block_based / block . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> # include " table / merging_iterator . h " <nl> # include " table / table_builder . h " <nl> # include " table / two_level_iterator . h " <nl> mmm a / db / internal_stats . cc <nl> ppp b / db / internal_stats . cc <nl> <nl> <nl> # include " db / column_family . h " <nl> # include " db / db_impl / db_impl . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> + # include " rocksdb / table . h " <nl> # include " util / string_util . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> bool InternalStats : : HandleBlockCacheStat ( Cache * * block_cache ) { <nl> assert ( block_cache ! = nullptr ) ; <nl> auto * table_factory = cfd_ - > ioptions ( ) - > table_factory ; <nl> assert ( table_factory ! = nullptr ) ; <nl> - if ( BlockBasedTableFactory : : kName ! = table_factory - > Name ( ) ) { <nl> - return false ; <nl> - } <nl> - auto * table_options = <nl> - reinterpret_cast < BlockBasedTableOptions * > ( table_factory - > GetOptions ( ) ) ; <nl> - if ( table_options = = nullptr ) { <nl> - return false ; <nl> - } <nl> - * block_cache = table_options - > block_cache . get ( ) ; <nl> - if ( table_options - > no_block_cache | | * block_cache = = nullptr ) { <nl> - return false ; <nl> - } <nl> - return true ; <nl> + * block_cache = <nl> + table_factory - > GetOptions < Cache > ( TableFactory : : kBlockCacheOpts ( ) ) ; <nl> + return * block_cache ! = nullptr ; <nl> } <nl> <nl> bool InternalStats : : HandleBlockCacheCapacity ( uint64_t * value , DBImpl * / * db * / , <nl> mmm a / db / listener_test . cc <nl> ppp b / db / listener_test . cc <nl> <nl> # include " rocksdb / slice_transform . h " <nl> # include " rocksdb / table . h " <nl> # include " rocksdb / table_properties . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> - # include " table / plain / plain_table_factory . h " <nl> # include " test_util / sync_point . h " <nl> # include " test_util / testharness . h " <nl> # include " test_util / testutil . h " <nl> mmm a / db / table_properties_collector_test . cc <nl> ppp b / db / table_properties_collector_test . cc <nl> <nl> / / COPYING file in the root directory ) and Apache 2 . 0 License <nl> / / ( found in the LICENSE . Apache file in the root directory ) . <nl> <nl> + # include " db / table_properties_collector . h " <nl> + <nl> # include < map > <nl> # include < memory > <nl> # include < string > <nl> <nl> <nl> # include " db / db_impl / db_impl . h " <nl> # include " db / dbformat . h " <nl> - # include " db / table_properties_collector . h " <nl> # include " env / composite_env_wrapper . h " <nl> # include " file / sequence_file_reader . h " <nl> # include " file / writable_file_writer . h " <nl> # include " options / cf_options . h " <nl> + # include " rocksdb / flush_block_policy . h " <nl> # include " rocksdb / table . h " <nl> # include " table / block_based / block_based_table_factory . h " <nl> # include " table / meta_blocks . h " <nl> mmm a / examples / options_file_example . cc <nl> ppp b / examples / options_file_example . cc <nl> int main ( ) { <nl> <nl> / / Initialize pointer options for each column family <nl> for ( size_t i = 0 ; i < loaded_cf_descs . size ( ) ; + + i ) { <nl> - auto * loaded_bbt_opt = reinterpret_cast < BlockBasedTableOptions * > ( <nl> - loaded_cf_descs [ 0 ] . options . table_factory - > GetOptions ( ) ) ; <nl> + auto * loaded_bbt_opt = <nl> + loaded_cf_descs [ 0 ] <nl> + . options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> / / Expect the same as BlockBasedTableOptions will be loaded form file . <nl> assert ( loaded_bbt_opt - > block_size = = bbt_opts . block_size ) ; <nl> / / However , block_cache needs to be manually initialized as documented <nl> new file mode 100644 <nl> index 0000000000 . . f4bfbf5328 <nl> mmm / dev / null <nl> ppp b / include / rocksdb / configurable . h <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under both the GPLv2 ( found in the <nl> + / / COPYING file in the root directory ) and Apache 2 . 0 License <nl> + / / ( found in the LICENSE . Apache file in the root directory ) . <nl> + / / Copyright ( c ) 2011 The LevelDB Authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . See the AUTHORS file for names of contributors . <nl> + <nl> + # pragma once <nl> + <nl> + # include < string > <nl> + # include < unordered_map > <nl> + # include < unordered_set > <nl> + # include < vector > <nl> + <nl> + # include " rocksdb / rocksdb_namespace . h " <nl> + # include " rocksdb / status . h " <nl> + <nl> + namespace ROCKSDB_NAMESPACE { <nl> + class Logger ; <nl> + class ObjectRegistry ; <nl> + class OptionTypeInfo ; <nl> + struct ColumnFamilyOptions ; <nl> + struct ConfigOptions ; <nl> + struct DBOptions ; <nl> + <nl> + / / Configurable is a base class used by the rocksdb that describes a <nl> + / / standard way of configuring objects . A Configurable object can : <nl> + / / - > Populate itself given : <nl> + / / - One or more " name / value " pair strings <nl> + / / - A string repesenting the set of name = value properties <nl> + / / - A map of name / value properties . <nl> + / / - > Convert itself into its string representation <nl> + / / - > Dump itself to a Logger <nl> + / / - > Compare itself to another Configurable object to see if the two objects <nl> + / / have equivalent options settings <nl> + / / <nl> + / / If a derived class calls RegisterOptions to register ( by name ) how its <nl> + / / options objects are to be processed , this functionality can typically be <nl> + / / handled by this class without additional overrides . Otherwise , the derived <nl> + / / class will need to implement the methods for handling the corresponding <nl> + / / functionality . <nl> + class Configurable { <nl> + protected : <nl> + friend class ConfigurableHelper ; <nl> + struct RegisteredOptions { <nl> + / / The name of the options being registered <nl> + std : : string name ; <nl> + / / Pointer to the object being registered <nl> + void * opt_ptr ; <nl> + # ifndef ROCKSDB_LITE <nl> + / / The map of options being registered <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * type_map ; <nl> + # endif <nl> + } ; <nl> + <nl> + public : <nl> + Configurable ( ) : prepared_ ( false ) { } <nl> + virtual ~ Configurable ( ) { } <nl> + <nl> + / / Returns the raw pointer of the named options that is used by this <nl> + / / object , or nullptr if this function is not supported . <nl> + / / Since the return value is a raw pointer , the object owns the <nl> + / / pointer and the caller should not delete the pointer . <nl> + / / <nl> + / / Note that changing the underlying options while the object <nl> + / / is currently used by any open DB is undefined behavior . <nl> + / / Developers should use DB : : SetOption ( ) instead to dynamically change <nl> + / / options while the DB is open . <nl> + template < typename T > <nl> + const T * GetOptions ( ) const { <nl> + return GetOptions < T > ( T : : kName ( ) ) ; <nl> + } <nl> + template < typename T > <nl> + T * GetOptions ( ) { <nl> + return GetOptions < T > ( T : : kName ( ) ) ; <nl> + } <nl> + template < typename T > <nl> + const T * GetOptions ( const std : : string & name ) const { <nl> + return reinterpret_cast < const T * > ( GetOptionsPtr ( name ) ) ; <nl> + } <nl> + template < typename T > <nl> + T * GetOptions ( const std : : string & name ) { <nl> + return reinterpret_cast < T * > ( const_cast < void * > ( GetOptionsPtr ( name ) ) ) ; <nl> + } <nl> + <nl> + / / Configures the options for this class based on the input parameters . <nl> + / / On successful completion , the object is updated with the settings from <nl> + / / the opt_map . <nl> + / / If this method fails , an attempt is made to revert the object to original <nl> + / / state . Note that the revert may not be the original state but may be an <nl> + / / equivalent . For example , if the object contains an option that is a <nl> + / / shared_ptr , the shared_ptr may not be the original one but a copy ( e . g . not <nl> + / / the Cache object that was passed in , but a Cache object of the same size ) . <nl> + / / <nl> + / / The acceptable values of the name / value pairs are documented with the <nl> + / / specific class / instance . <nl> + / / <nl> + / / @ param config_options Controls how the arguments are processed . <nl> + / / @ param opt_map Name / value pairs of the options to update <nl> + / / @ param unused If specified , this value will return the name / value <nl> + / / pairs from opt_map that were NotFound for this object . <nl> + / / @ return OK If all values in the map were successfully updated <nl> + / / If invoke_prepare_options is true , OK also implies <nl> + / / PrepareOptions ran successfully . <nl> + / / @ return NotFound If any of the names in the opt_map were not valid <nl> + / / for this object . If unused is specified , it will contain the <nl> + / / collection of NotFound names . <nl> + / / @ return NotSupported If any of the names are valid but the object does <nl> + / / not know how to convert the value . This can happen if , for example , <nl> + / / there is some nested Configurable that cannot be created . <nl> + / / @ return InvalidArgument If any of the values cannot be successfully <nl> + / / parsed . This can also be returned if PrepareOptions encounters an <nl> + / / error . <nl> + / / @ see ConfigOptions for a description of the controls . <nl> + Status ConfigureFromMap ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opt_map ) ; <nl> + Status ConfigureFromMap ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opt_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) ; <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + / / Updates the named option to the input value , returning OK if successful . <nl> + / / Note that ConfigureOption does not cause PrepareOptions to be invoked . <nl> + / / @ param config_options Controls how the name / value is processed . <nl> + / / @ param name The name of the option to update <nl> + / / @ param value The value to set for the named option <nl> + / / @ return OK If the named field was successfully updated to value . <nl> + / / @ return NotFound If the name is not valid for this object . <nl> + / / @ return NotSupported If the name is valid but the object does <nl> + / / not know how to convert the value . This can happen if , for example , <nl> + / / there is some nested Configurable that cannot be created . <nl> + / / @ return InvalidArgument If the value cannot be successfully parsed . <nl> + Status ConfigureOption ( const ConfigOptions & config_options , <nl> + const std : : string & name , const std : : string & value ) ; <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / Configures the options for this class based on the input parameters . <nl> + / / On successful completion , the object is updated with the settings from <nl> + / / the opt_map . If this method fails , an attempt is made to revert the <nl> + / / object to original state . Note that the revert may not be the original <nl> + / / state but may be an equivalent . <nl> + / / @ see ConfigureFromMap for more details <nl> + / / @ param config_options Controls how the arguments are processed . <nl> + / / @ param opt_str string containing the values to update . <nl> + / / @ param unused If specified , this value will return the name / value <nl> + / / pairs from opt_map that were NotFound for this object . <nl> + / / @ return OK If all specified values were successfully updated <nl> + / / If invoke_prepare_options is true , OK also implies <nl> + / / PrepareOptions ran successfully . <nl> + / / @ return NotFound If any of the names were not valid for this object . <nl> + / / If unused is specified , it will contain the collection of NotFound <nl> + / / names . <nl> + / / @ return NotSupported If any of the names are valid but the object does <nl> + / / not know how to convert the value . This can happen if , for example , <nl> + / / there is some nested Configurable that cannot be created . <nl> + / / @ return InvalidArgument If any of the values cannot be successfully <nl> + / / parsed . This can also be returned if PrepareOptions encounters an <nl> + / / error . <nl> + Status ConfigureFromString ( const ConfigOptions & config_options , <nl> + const std : : string & opts ) ; <nl> + <nl> + / / Fills in result with the serialized options for this object . <nl> + / / This is the inverse of ConfigureFromString . <nl> + / / @ param config_options Controls how serialization happens . <nl> + / / @ param result The string representation of this object . <nl> + / / @ return OK If the options for this object wer successfully serialized . <nl> + / / @ return InvalidArgument If one or more of the options could not be <nl> + / / serialized . <nl> + Status GetOptionString ( const ConfigOptions & config_options , <nl> + std : : string * result ) const ; <nl> + # ifndef ROCKSDB_LITE <nl> + / / Returns the serialized options for this object . <nl> + / / This method is similar to GetOptionString with no errors . <nl> + / / @ param config_options Controls how serialization happens . <nl> + / / @ param prefix A string to prepend to every option . <nl> + / / @ return The serialized representation of the options for this object <nl> + std : : string ToString ( const ConfigOptions & config_options ) const { <nl> + return ToString ( config_options , " " ) ; <nl> + } <nl> + std : : string ToString ( const ConfigOptions & config_options , <nl> + const std : : string & prefix ) const ; <nl> + <nl> + / / Returns the list of option names associated with this configurable <nl> + / / @ param config_options Controls how the names are returned <nl> + / / @ param result The set of option names for this object . Note that <nl> + / / options that are deprecated or aliases are not returned . <nl> + / / @ return OK on success . <nl> + Status GetOptionNames ( const ConfigOptions & config_options , <nl> + std : : unordered_set < std : : string > * result ) const ; <nl> + <nl> + / / Returns the value of the option associated with the input name <nl> + / / This method is the functional inverse of ConfigureOption <nl> + / / @ param config_options Controls how the value is returned <nl> + / / @ param name The name of the option to return a value for . <nl> + / / @ param value The returned value associated with the named option . <nl> + / / @ return OK If the named field was successfully updated to value . <nl> + / / @ return NotFound If the name is not valid for this object . <nl> + / / @ param InvalidArgument If the name is valid for this object but <nl> + / / its value cannot be serialized . <nl> + virtual Status GetOption ( const ConfigOptions & config_options , <nl> + const std : : string & name , std : : string * value ) const ; <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / Checks to see if this Configurable is equivalent to other . <nl> + / / This method assumes that the two objects are of the same class . <nl> + / / @ param config_options Controls how the options are compared . <nl> + / / @ param other The other object to compare to . <nl> + / / @ param mismatch If the objects do not match , this parameter contains <nl> + / / the name of the option that triggered the match failure . <nl> + / / @ param True if the objects match , false otherwise . <nl> + virtual bool AreEquivalent ( const ConfigOptions & config_options , <nl> + const Configurable * other , <nl> + std : : string * name ) const ; <nl> + <nl> + / / Returns a pretty - printed , human - readable version of the options . <nl> + / / This method is typically used to dump the options to a log file . <nl> + / / Classes should override this method <nl> + virtual std : : string GetPrintableOptions ( ) const { return " " ; } <nl> + <nl> + / / Validates that the settings are valid / consistent and performs any object <nl> + / / initialization required by this object . This method may be called as part <nl> + / / of Configure ( if invoke_prepare_options is set ) , or may be invoked <nl> + / / separately . <nl> + / / <nl> + / / Once an object has been prepared , non - mutable options can no longer be <nl> + / / updated . <nl> + / / <nl> + / / Classes must override this method to provide any implementation - specific <nl> + / / initialization , such as opening log files or setting up cache parameters . <nl> + / / Implementations should be idempotent ( e . g . don ' t re - open the log file or <nl> + / / reconfigure the cache ) , as there is the potential this method can be called <nl> + / / more than once . <nl> + / / <nl> + / / By default , this method will also prepare all nested ( Inner and <nl> + / / OptionType : : kConfigurable ) objects . <nl> + / / <nl> + / / @ param config_options Controls how the object is prepared . Also contains <nl> + / / a Logger and Env that can be used to initialize this object . <nl> + / / @ return OK If the object was successfully initialized . <nl> + / / @ return InvalidArgument If this object could not be successfull <nl> + / / initialized . <nl> + virtual Status PrepareOptions ( const ConfigOptions & config_options ) ; <nl> + <nl> + / / Checks to see if the settings are valid for this object . <nl> + / / This method checks to see if the input DBOptions and ColumnFamilyOptions <nl> + / / are valid for the settings of this object . For example , an Env might not <nl> + / / support certain mmap modes or a TableFactory might require certain <nl> + / / settings . <nl> + / / <nl> + / / By default , this method will also validate all nested ( Inner and <nl> + / / OptionType : : kConfigurable ) objects . <nl> + / / <nl> + / / @ param db_opts The DBOptions to validate <nl> + / / @ param cf_opts The ColumnFamilyOptions to validate <nl> + / / @ return OK if the options are valid <nl> + / / @ return InvalidArgument If the arguments are not valid for the options <nl> + / / of the current object . <nl> + virtual Status ValidateOptions ( const DBOptions & db_opts , <nl> + const ColumnFamilyOptions & cf_opts ) const ; <nl> + <nl> + / / Returns true if this object has been initialized via PrepareOptions , false <nl> + / / otherwise . Once an object has been prepared , only mutable options may be <nl> + / / changed . <nl> + virtual bool IsPrepared ( ) const { return prepared_ ; } <nl> + <nl> + protected : <nl> + / / True once the object is prepared . Once the object is prepared , only <nl> + / / mutable options can be configured . <nl> + bool prepared_ ; <nl> + / / If this class is a wrapper ( has - a ) , this method should be <nl> + / / over - written to return the inner configurable ( like an EnvWrapper ) . <nl> + / / This method should NOT recurse , but should instead return the <nl> + / / direct Inner object . <nl> + virtual Configurable * Inner ( ) const { return nullptr ; } <nl> + <nl> + / / Returns the raw pointer for the associated named option . <nl> + / / The name is typically the name of an option registered via the <nl> + / / Classes may override this method to provide further specialization ( such as <nl> + / / returning a sub - option ) <nl> + / / <nl> + / / The default implemntation looks at the registered options . If the <nl> + / / input name matches that of a registered option , the pointer registered <nl> + / / with that name is returned . <nl> + / / e . g , , RegisterOptions ( " X " , & my_ptr , . . . ) ; GetOptionsPtr ( " X " ) returns <nl> + / / " my_ptr " <nl> + virtual const void * GetOptionsPtr ( const std : : string & name ) const ; <nl> + <nl> + / / Method for allowing options to be configured outside of the normal <nl> + / / registered options framework . Classes may override this method if they <nl> + / / wish to support non - standard options implementations ( such as configuring <nl> + / / themselves from constant or simple " : " - separated strings . <nl> + / / <nl> + / / The default implementation does nothing and returns OK <nl> + virtual Status ParseStringOptions ( const ConfigOptions & config_options , <nl> + const std : : string & opts_str ) ; <nl> + <nl> + / / Internal method to configure an object from a map of name - value options . <nl> + / / This method uses the input config_options to drive the configuration of <nl> + / / the options in opt_map . Any option name that cannot be found from the <nl> + / / input set will be returned in " unused " . <nl> + / / <nl> + / / Classes may override this method to extend the functionality if required . <nl> + / / @ param config_options Controls how the options are configured and errors <nl> + / / handled . <nl> + / / @ param opts_map The set of options to configure <nl> + / / @ param unused Any options from opt_map that were not configured . <nl> + / / @ returns a Status based on the rules outlined in ConfigureFromMap <nl> + virtual Status ConfigureOptions ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) ; <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + / / Method that configures a the specific opt_name from opt_value . <nl> + / / By default , this method calls opt_info . ParseOption with the <nl> + / / input parameters . <nl> + / / Classes may override this method to extend the functionality , or <nl> + / / change the returned Status . <nl> + virtual Status ParseOption ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , <nl> + const std : : string & opt_value , void * opt_ptr ) ; <nl> + <nl> + / / Internal method to see if the single option name / info matches for this and <nl> + / / that Classes may override this value to change its behavior . <nl> + / / @ param config_options Controls how the options are being matched <nl> + / / @ param opt_info The OptionTypeInfo registered for this option name <nl> + / / that controls what field is matched ( offset ) and how ( type ) . <nl> + / / @ param name The name associated with this opt_info . <nl> + / / @ param this_ptr The base pointer to compare to . This is the object <nl> + / / registered for <nl> + / / for this OptionTypeInfo . <nl> + / / @ param that_ptr The other pointer to compare to . This is the object <nl> + / / registered for <nl> + / / for this OptionTypeInfo . <nl> + / / @ param bad_name If the match fails , the name of the option that failed to <nl> + / / match . <nl> + virtual bool OptionsAreEqual ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & name , <nl> + const void * const this_ptr , <nl> + const void * const that_ptr , <nl> + std : : string * bad_name ) const ; <nl> + # endif <nl> + # ifndef ROCKSDB_LITE <nl> + / / Internal method to serialize options ( ToString ) <nl> + / / Classes may override this value to change its behavior . <nl> + virtual std : : string SerializeOptions ( const ConfigOptions & config_options , <nl> + const std : : string & header ) const ; <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / Given a name ( e . g . rocksdb . my . type . opt ) , returns the short name ( opt ) <nl> + virtual std : : string GetOptionName ( const std : : string & long_name ) const ; <nl> + <nl> + private : <nl> + / / Contains the collection of options ( name , opt_ptr , opt_map ) associated with <nl> + / / this object . This collection is typically set in the constructor of the <nl> + / / Configurable option via <nl> + std : : vector < RegisteredOptions > options_ ; <nl> + } ; <nl> + } / / namespace ROCKSDB_NAMESPACE <nl> mmm a / include / rocksdb / convenience . h <nl> ppp b / include / rocksdb / convenience . h <nl> struct ConfigOptions { <nl> / / When true , any unused options will be ignored and OK will be returned <nl> bool ignore_unknown_options = false ; <nl> <nl> + / / When true , any unsupported options will be ignored and OK will be returned <nl> + bool ignore_unsupported_options = true ; <nl> + <nl> / / If the strings are escaped ( old - style ? ) <nl> bool input_strings_escaped = true ; <nl> <nl> + / / Whether or not to invoke PrepareOptions after configure is called . <nl> + bool invoke_prepare_options = true ; <nl> + <nl> / / The separator between options when converting to a string <nl> std : : string delimiter = " ; " ; <nl> <nl> mmm a / include / rocksdb / table . h <nl> ppp b / include / rocksdb / table . h <nl> <nl> # include < string > <nl> # include < unordered_map > <nl> <nl> - # include " rocksdb / cache . h " <nl> + # include " rocksdb / configurable . h " <nl> # include " rocksdb / env . h " <nl> - # include " rocksdb / iterator . h " <nl> # include " rocksdb / options . h " <nl> # include " rocksdb / status . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> <nl> / / - - Block - based Table <nl> + class Cache ; <nl> class FilterPolicy ; <nl> class FlushBlockPolicyFactory ; <nl> class PersistentCache ; <nl> enum ChecksumType : char { <nl> <nl> / / For advanced user only <nl> struct BlockBasedTableOptions { <nl> + static const char * kName ( ) { return " BlockTableOptions " ; } ; <nl> / / @ flush_block_policy_factory creates the instances of flush block policy . <nl> / / which provides a configurable way to determine when to flush a block in <nl> / / the block based tables . If not set , table builder will use the default <nl> struct PlainTablePropertyNames { <nl> const uint32_t kPlainTableVariableLength = 0 ; <nl> <nl> struct PlainTableOptions { <nl> + static const char * kName ( ) { return " PlainTableOptions " ; } ; <nl> / / @ user_key_len : plain table has optimization for fix - sized keys , which can <nl> / / be specified via user_key_len . Alternatively , you can pass <nl> / / ` kPlainTableVariableLength ` if your keys have variable <nl> struct CuckooTablePropertyNames { <nl> } ; <nl> <nl> struct CuckooTableOptions { <nl> + static const char * kName ( ) { return " CuckooTableOptions " ; } ; <nl> + <nl> / / Determines the utilization of hash tables . Smaller values <nl> / / result in larger hash tables with fewer collisions . <nl> double hash_table_ratio = 0 . 9 ; <nl> extern TableFactory * NewCuckooTableFactory ( <nl> class RandomAccessFileReader ; <nl> <nl> / / A base class for table factories . <nl> - class TableFactory { <nl> + class TableFactory : public Configurable { <nl> public : <nl> - virtual ~ TableFactory ( ) { } <nl> + virtual ~ TableFactory ( ) override { } <nl> + <nl> + static const char * kBlockCacheOpts ( ) { return " BlockCache " ; } ; <nl> + static const char * kBlockBasedTableName ( ) { return " BlockBasedTable " ; } ; <nl> + static const char * kPlainTableName ( ) { return " PlainTable " ; } <nl> + static const char * kCuckooTableName ( ) { return " CuckooTable " ; } ; <nl> + <nl> + / / Creates and configures a new TableFactory from the input options and id . <nl> + static Status CreateFromString ( const ConfigOptions & config_options , <nl> + const std : : string & id , <nl> + std : : shared_ptr < TableFactory > * factory ) ; <nl> <nl> / / The type of the table . <nl> / / <nl> class TableFactory { <nl> / / by any clients of this package . <nl> virtual const char * Name ( ) const = 0 ; <nl> <nl> + / / Returns true if the class is an instance of the input name . <nl> + / / This is typically determined by if the input name matches the <nl> + / / name of this object . <nl> + virtual bool IsInstanceOf ( const std : : string & name ) const { <nl> + return name = = Name ( ) ; <nl> + } <nl> + <nl> / / Returns a Table object table that can fetch data from file specified <nl> / / in parameter file . It ' s the caller ' s responsibility to make sure <nl> / / file is in the correct format . <nl> class TableFactory { <nl> const TableBuilderOptions & table_builder_options , <nl> uint32_t column_family_id , WritableFileWriter * file ) const = 0 ; <nl> <nl> - / / Sanitizes the specified DB Options and ColumnFamilyOptions . <nl> - / / <nl> - / / If the function cannot find a way to sanitize the input DB Options , <nl> - / / a non - ok Status will be returned . <nl> - virtual Status SanitizeOptions ( const DBOptions & db_opts , <nl> - const ColumnFamilyOptions & cf_opts ) const = 0 ; <nl> - <nl> - / / Return a string that contains printable format of table configurations . <nl> - / / RocksDB prints configurations at DB Open ( ) . <nl> - virtual std : : string GetPrintableTableOptions ( ) const = 0 ; <nl> - <nl> - virtual Status GetOptionString ( const ConfigOptions & / * config_options * / , <nl> - std : : string * / * opt_string * / ) const { <nl> - return Status : : NotSupported ( <nl> - " The table factory doesn ' t implement GetOptionString ( ) . " ) ; <nl> - } <nl> - <nl> - / / Returns the raw pointer of the table options that is used by this <nl> - / / TableFactory , or nullptr if this function is not supported . <nl> - / / Since the return value is a raw pointer , the TableFactory owns the <nl> - / / pointer and the caller should not delete the pointer . <nl> - / / <nl> - / / In certain case , it is desirable to alter the underlying options when the <nl> - / / TableFactory is not used by any open DB by casting the returned pointer <nl> - / / to the right class . For instance , if BlockBasedTableFactory is used , <nl> - / / then the pointer can be casted to BlockBasedTableOptions . <nl> - / / <nl> - / / Note that changing the underlying TableFactory options while the <nl> - / / TableFactory is currently used by any open DB is undefined behavior . <nl> - / / Developers should use DB : : SetOption ( ) instead to dynamically change <nl> - / / options while the DB is open . <nl> - virtual void * GetOptions ( ) { return nullptr ; } <nl> - <nl> / / Return is delete range supported <nl> virtual bool IsDeleteRangeSupported ( ) const { return false ; } <nl> } ; <nl> mmm a / include / rocksdb / utilities / object_registry . h <nl> ppp b / include / rocksdb / utilities / object_registry . h <nl> class ObjectRegistry { <nl> <nl> / / Creates a new unique T using the input factory functions . <nl> / / Returns OK if a new unique T was successfully created <nl> - / / Returns NotFound if the type / target could not be created <nl> + / / Returns NotSupported if the type / target could not be created <nl> / / Returns InvalidArgument if the factory return an unguarded object <nl> / / ( meaning it cannot be managed by a unique ptr ) <nl> template < typename T > <nl> class ObjectRegistry { <nl> std : : string errmsg ; <nl> T * ptr = NewObject ( target , result , & errmsg ) ; <nl> if ( ptr = = nullptr ) { <nl> - return Status : : NotFound ( errmsg , target ) ; <nl> + return Status : : NotSupported ( errmsg , target ) ; <nl> } else if ( * result ) { <nl> return Status : : OK ( ) ; <nl> } else { <nl> class ObjectRegistry { <nl> <nl> / / Creates a new shared T using the input factory functions . <nl> / / Returns OK if a new shared T was successfully created <nl> - / / Returns NotFound if the type / target could not be created <nl> + / / Returns NotSupported if the type / target could not be created <nl> / / Returns InvalidArgument if the factory return an unguarded object <nl> / / ( meaning it cannot be managed by a shared ptr ) <nl> template < typename T > <nl> class ObjectRegistry { <nl> std : : unique_ptr < T > guard ; <nl> T * ptr = NewObject ( target , & guard , & errmsg ) ; <nl> if ( ptr = = nullptr ) { <nl> - return Status : : NotFound ( errmsg , target ) ; <nl> + return Status : : NotSupported ( errmsg , target ) ; <nl> } else if ( guard ) { <nl> result - > reset ( guard . release ( ) ) ; <nl> return Status : : OK ( ) ; <nl> class ObjectRegistry { <nl> <nl> / / Creates a new static T using the input factory functions . <nl> / / Returns OK if a new static T was successfully created <nl> - / / Returns NotFound if the type / target could not be created <nl> + / / Returns NotSupported if the type / target could not be created <nl> / / Returns InvalidArgument if the factory return a guarded object <nl> / / ( meaning it is managed by a unique ptr ) <nl> template < typename T > <nl> class ObjectRegistry { <nl> std : : unique_ptr < T > guard ; <nl> T * ptr = NewObject ( target , & guard , & errmsg ) ; <nl> if ( ptr = = nullptr ) { <nl> - return Status : : NotFound ( errmsg , target ) ; <nl> + return Status : : NotSupported ( errmsg , target ) ; <nl> } else if ( guard . get ( ) ) { <nl> return Status : : InvalidArgument ( std : : string ( " Cannot make a static " ) + <nl> T : : Type ( ) + " from a guarded one " , <nl> similarity index 85 % <nl> rename from options / options_type . h <nl> rename to include / rocksdb / utilities / options_type . h <nl> mmm a / options / options_type . h <nl> ppp b / include / rocksdb / utilities / options_type . h <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> class OptionTypeInfo ; <nl> <nl> + / / The underlying " class / type " of the option . <nl> + / / This enum is used to determine how the option should <nl> + / / be converted to / from strings and compared . <nl> enum class OptionType { <nl> kBoolean , <nl> kInt , <nl> enum class OptionType { <nl> kCompactionPri , <nl> kSliceTransform , <nl> kCompressionType , <nl> - kTableFactory , <nl> kComparator , <nl> kCompactionFilter , <nl> kCompactionFilterFactory , <nl> enum class OptionType { <nl> kEnum , <nl> kStruct , <nl> kVector , <nl> + kConfigurable , <nl> kUnknown , <nl> } ; <nl> <nl> enum class OptionVerificationType { <nl> / / independently <nl> } ; <nl> <nl> + / / A set of modifier flags used to alter how an option is evaluated or <nl> + / / processed . These flags can be combined together ( e . g . kMutable | kShared ) . <nl> + / / The kCompare flags can be used to control if / when options are compared . <nl> + / / If kCompareNever is set , two related options would never be compared ( always <nl> + / / equal ) If kCompareExact is set , the options will only be compared if the <nl> + / / sanity mode <nl> + / / is exact <nl> + / / kMutable means the option can be changed after it is prepared <nl> + / / kShared means the option is contained in a std : : shared_ptr <nl> + / / kUnique means the option is contained in a std : : uniqued_ptr <nl> + / / kRawPointer means the option is a raw pointer value . <nl> + / / kAllowNull means that an option is allowed to be null for verification <nl> + / / purposes . <nl> + / / kDontSerialize means this option should not be serialized and included in <nl> + / / the string representation . <nl> + / / kDontPrepare means do not call PrepareOptions for this pointer value . <nl> enum class OptionTypeFlags : uint32_t { <nl> kNone = 0x00 , / / No flags <nl> kCompareDefault = 0x0 , <nl> enum class OptionTypeFlags : uint32_t { <nl> kCompareExact = ConfigOptions : : kSanityLevelExactMatch , <nl> <nl> kMutable = 0x0100 , / / Option is mutable <nl> + kRawPointer = 0x0200 , / / The option is stored as a raw pointer <nl> + kShared = 0x0400 , / / The option is stored as a shared_ptr <nl> + kUnique = 0x0800 , / / The option is stored as a unique_ptr <nl> + kAllowNull = 0x1000 , / / The option can be null <nl> kDontSerialize = 0x2000 , / / Don ' t serialize the option <nl> + kDontPrepare = 0x4000 , / / Don ' t prepare or sanitize this option <nl> } ; <nl> <nl> inline OptionTypeFlags operator | ( const OptionTypeFlags & a , <nl> using EqualsFunc = std : : function < bool ( <nl> / / option type , and offset . <nl> class OptionTypeInfo { <nl> public : <nl> - int offset_ ; <nl> - int mutable_offset_ ; <nl> - <nl> / / A simple " normal " , non - mutable Type " type " at offset <nl> OptionTypeInfo ( int offset , OptionType type ) <nl> : offset_ ( offset ) , <nl> - mutable_offset_ ( 0 ) , <nl> parse_func_ ( nullptr ) , <nl> serialize_func_ ( nullptr ) , <nl> equals_func_ ( nullptr ) , <nl> class OptionTypeInfo { <nl> verification_ ( OptionVerificationType : : kNormal ) , <nl> flags_ ( OptionTypeFlags : : kNone ) { } <nl> <nl> - / / A simple " normal " , mutable Type " type " at offset <nl> - OptionTypeInfo ( int offset , OptionType type , int mutable_offset ) <nl> - : offset_ ( offset ) , <nl> - mutable_offset_ ( mutable_offset ) , <nl> - parse_func_ ( nullptr ) , <nl> - serialize_func_ ( nullptr ) , <nl> - equals_func_ ( nullptr ) , <nl> - type_ ( type ) , <nl> - verification_ ( OptionVerificationType : : kNormal ) , <nl> - flags_ ( OptionTypeFlags : : kMutable ) { } <nl> - <nl> OptionTypeInfo ( int offset , OptionType type , <nl> - OptionVerificationType verification , OptionTypeFlags flags , <nl> - int mutable_offset ) <nl> + OptionVerificationType verification , OptionTypeFlags flags ) <nl> : offset_ ( offset ) , <nl> - mutable_offset_ ( mutable_offset ) , <nl> parse_func_ ( nullptr ) , <nl> serialize_func_ ( nullptr ) , <nl> equals_func_ ( nullptr ) , <nl> class OptionTypeInfo { <nl> <nl> OptionTypeInfo ( int offset , OptionType type , <nl> OptionVerificationType verification , OptionTypeFlags flags , <nl> - int mutable_offset , const ParseFunc & parse_func ) <nl> + const ParseFunc & parse_func ) <nl> : offset_ ( offset ) , <nl> - mutable_offset_ ( mutable_offset ) , <nl> parse_func_ ( parse_func ) , <nl> serialize_func_ ( nullptr ) , <nl> equals_func_ ( nullptr ) , <nl> class OptionTypeInfo { <nl> <nl> OptionTypeInfo ( int offset , OptionType type , <nl> OptionVerificationType verification , OptionTypeFlags flags , <nl> - int mutable_offset , const ParseFunc & parse_func , <nl> + const ParseFunc & parse_func , <nl> const SerializeFunc & serialize_func , <nl> const EqualsFunc & equals_func ) <nl> : offset_ ( offset ) , <nl> - mutable_offset_ ( mutable_offset ) , <nl> parse_func_ ( parse_func ) , <nl> serialize_func_ ( serialize_func ) , <nl> equals_func_ ( equals_func ) , <nl> class OptionTypeInfo { <nl> int offset , const std : : unordered_map < std : : string , T > * const map ) { <nl> return OptionTypeInfo ( <nl> offset , OptionType : : kEnum , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 , <nl> + OptionTypeFlags : : kNone , <nl> / / Uses the map argument to convert the input string into <nl> / / its corresponding enum value . If value is found in the map , <nl> / / addr is updated to the corresponding map entry . <nl> class OptionTypeInfo { <nl> static OptionTypeInfo Struct ( <nl> const std : : string & struct_name , <nl> const std : : unordered_map < std : : string , OptionTypeInfo > * struct_map , <nl> - int offset , OptionVerificationType verification , OptionTypeFlags flags , <nl> - int mutable_offset ) { <nl> + int offset , OptionVerificationType verification , OptionTypeFlags flags ) { <nl> return OptionTypeInfo ( <nl> - offset , OptionType : : kStruct , verification , flags , mutable_offset , <nl> + offset , OptionType : : kStruct , verification , flags , <nl> / / Parses the struct and updates the fields at addr <nl> [ struct_name , struct_map ] ( const ConfigOptions & opts , <nl> const std : : string & name , <nl> class OptionTypeInfo { <nl> const std : : string & struct_name , <nl> const std : : unordered_map < std : : string , OptionTypeInfo > * struct_map , <nl> int offset , OptionVerificationType verification , OptionTypeFlags flags , <nl> - int mutable_offset , const ParseFunc & parse_func ) { <nl> + const ParseFunc & parse_func ) { <nl> return OptionTypeInfo ( <nl> - offset , OptionType : : kStruct , verification , flags , mutable_offset , <nl> - parse_func , <nl> + offset , OptionType : : kStruct , verification , flags , parse_func , <nl> [ struct_name , struct_map ] ( const ConfigOptions & opts , <nl> const std : : string & name , const char * addr , <nl> std : : string * value ) { <nl> class OptionTypeInfo { <nl> template < typename T > <nl> static OptionTypeInfo Vector ( int _offset , <nl> OptionVerificationType _verification , <nl> - OptionTypeFlags _flags , int _mutable_offset , <nl> + OptionTypeFlags _flags , <nl> const OptionTypeInfo & elem_info , <nl> char separator = ' : ' ) { <nl> return OptionTypeInfo ( <nl> - _offset , OptionType : : kVector , _verification , _flags , _mutable_offset , <nl> + _offset , OptionType : : kVector , _verification , _flags , <nl> [ elem_info , separator ] ( const ConfigOptions & opts , <nl> const std : : string & name , <nl> const std : : string & value , char * addr ) { <nl> class OptionTypeInfo { <nl> } <nl> } <nl> <nl> + / / Returns true if the option is allowed to be null . <nl> + / / Options can be null if the verification type is allow from null <nl> + / / or if the flags specify allow null . <nl> + bool CanBeNull ( ) const { <nl> + return ( IsEnabled ( OptionTypeFlags : : kAllowNull ) | | <nl> + IsEnabled ( OptionVerificationType : : kByNameAllowFromNull ) ) ; <nl> + } <nl> + <nl> + bool IsSharedPtr ( ) const { return IsEnabled ( OptionTypeFlags : : kShared ) ; } <nl> + <nl> + bool IsUniquePtr ( ) const { return IsEnabled ( OptionTypeFlags : : kUnique ) ; } <nl> + <nl> + bool IsRawPtr ( ) const { return IsEnabled ( OptionTypeFlags : : kRawPointer ) ; } <nl> + <nl> bool IsByName ( ) const { <nl> return ( verification_ = = OptionVerificationType : : kByName | | <nl> verification_ = = OptionVerificationType : : kByNameAllowNull | | <nl> class OptionTypeInfo { <nl> <nl> bool IsStruct ( ) const { return ( type_ = = OptionType : : kStruct ) ; } <nl> <nl> + bool IsConfigurable ( ) const { return ( type_ = = OptionType : : kConfigurable ) ; } <nl> + <nl> + / / Returns the underlying pointer for the type at base_addr <nl> + / / The value returned is the underlying " raw " pointer , offset from base . <nl> + template < typename T > <nl> + const T * AsRawPointer ( const void * const base_addr ) const { <nl> + if ( base_addr = = nullptr ) { <nl> + return nullptr ; <nl> + } <nl> + const auto opt_addr = reinterpret_cast < const char * > ( base_addr ) + offset_ ; <nl> + if ( IsUniquePtr ( ) ) { <nl> + const std : : unique_ptr < T > * ptr = <nl> + reinterpret_cast < const std : : unique_ptr < T > * > ( opt_addr ) ; <nl> + return ptr - > get ( ) ; <nl> + } else if ( IsSharedPtr ( ) ) { <nl> + const std : : shared_ptr < T > * ptr = <nl> + reinterpret_cast < const std : : shared_ptr < T > * > ( opt_addr ) ; <nl> + return ptr - > get ( ) ; <nl> + } else if ( IsRawPtr ( ) ) { <nl> + const T * const * ptr = reinterpret_cast < const T * const * > ( opt_addr ) ; <nl> + return * ptr ; <nl> + } else { <nl> + return reinterpret_cast < const T * > ( opt_addr ) ; <nl> + } <nl> + } <nl> + <nl> + / / Returns the underlying pointer for the type at base_addr <nl> + / / The value returned is the underlying " raw " pointer , offset from base . <nl> + template < typename T > <nl> + T * AsRawPointer ( void * base_addr ) const { <nl> + if ( base_addr = = nullptr ) { <nl> + return nullptr ; <nl> + } <nl> + auto opt_addr = reinterpret_cast < char * > ( base_addr ) + offset_ ; <nl> + if ( IsUniquePtr ( ) ) { <nl> + std : : unique_ptr < T > * ptr = reinterpret_cast < std : : unique_ptr < T > * > ( opt_addr ) ; <nl> + return ptr - > get ( ) ; <nl> + } else if ( IsSharedPtr ( ) ) { <nl> + std : : shared_ptr < T > * ptr = reinterpret_cast < std : : shared_ptr < T > * > ( opt_addr ) ; <nl> + return ptr - > get ( ) ; <nl> + } else if ( IsRawPtr ( ) ) { <nl> + T * * ptr = reinterpret_cast < T * * > ( opt_addr ) ; <nl> + return * ptr ; <nl> + } else { <nl> + return reinterpret_cast < T * > ( opt_addr ) ; <nl> + } <nl> + } <nl> + <nl> / / Parses the option in " opt_value " according to the rules of this class <nl> - / / and updates the value at " opt_addr " . <nl> + / / and updates the value at " opt_ptr " . <nl> / / On success , Status : : OK ( ) is returned . On failure : <nl> / / NotFound means the opt_name is not valid for this option <nl> / / NotSupported means we do not know how to parse the value for this option <nl> / / InvalidArgument means the opt_value is not valid for this option . <nl> Status Parse ( const ConfigOptions & config_options , const std : : string & opt_name , <nl> - const std : : string & opt_value , char * opt_addr ) const ; <nl> + const std : : string & opt_value , void * const opt_ptr ) const ; <nl> <nl> / / Serializes the option in " opt_addr " according to the rules of this class <nl> / / into the value at " opt_value " . <nl> Status Serialize ( const ConfigOptions & config_options , <nl> - const std : : string & opt_name , const char * opt_addr , <nl> + const std : : string & opt_name , const void * const opt_ptr , <nl> std : : string * opt_value ) const ; <nl> <nl> / / Compares the " addr1 " and " addr2 " values according to the rules of this <nl> / / class and returns true if they match . On a failed match , mismatch is the <nl> / / name of the option that failed to match . <nl> bool AreEqual ( const ConfigOptions & config_options , <nl> - const std : : string & opt_name , const char * addr1 , <nl> - const char * addr2 , std : : string * mismatch ) const ; <nl> + const std : : string & opt_name , const void * const addr1 , <nl> + const void * const addr2 , std : : string * mismatch ) const ; <nl> <nl> / / Used to override the match rules for " ByName " options . <nl> bool AreEqualByName ( const ConfigOptions & config_options , <nl> - const std : : string & opt_name , const char * this_offset , <nl> - const char * that_offset ) const ; <nl> + const std : : string & opt_name , const void * const this_ptr , <nl> + const void * const that_ptr ) const ; <nl> bool AreEqualByName ( const ConfigOptions & config_options , <nl> - const std : : string & opt_name , const char * this_ptr , <nl> + const std : : string & opt_name , const void * const this_ptr , <nl> const std : : string & that_value ) const ; <nl> <nl> / / Parses the input value according to the map for the struct at opt_addr <nl> class OptionTypeInfo { <nl> size_t * end , std : : string * token ) ; <nl> <nl> private : <nl> + int offset_ ; <nl> + <nl> / / The optional function to convert a string to its representation <nl> ParseFunc parse_func_ ; <nl> <nl> mmm a / options / cf_options . cc <nl> ppp b / options / cf_options . cc <nl> <nl> # include < limits > <nl> # include < string > <nl> <nl> + # include " options / configurable_helper . h " <nl> # include " options / db_options . h " <nl> # include " options / options_helper . h " <nl> + # include " options / options_parser . h " <nl> # include " port / port . h " <nl> # include " rocksdb / concurrent_task_limiter . h " <nl> + # include " rocksdb / configurable . h " <nl> # include " rocksdb / convenience . h " <nl> # include " rocksdb / env . h " <nl> # include " rocksdb / file_system . h " <nl> # include " rocksdb / merge_operator . h " <nl> # include " rocksdb / options . h " <nl> + # include " rocksdb / table . h " <nl> # include " rocksdb / utilities / object_registry . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> - # include " table / plain / plain_table_factory . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " util / cast_util . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> namespace ROCKSDB_NAMESPACE { <nl> / / http : / / en . cppreference . com / w / cpp / concept / StandardLayoutType <nl> / / https : / / gist . github . com / graphitemaster / 494f21190bb2c63c5516 <nl> # ifndef ROCKSDB_LITE <nl> - ColumnFamilyOptions OptionsHelper : : dummy_cf_options ; <nl> + static ColumnFamilyOptions dummy_cf_options ; <nl> template < typename T1 > <nl> int offset_of ( T1 ColumnFamilyOptions : : * member ) { <nl> - return int ( size_t ( & ( OptionsHelper : : dummy_cf_options . * member ) ) - <nl> - size_t ( & OptionsHelper : : dummy_cf_options ) ) ; <nl> + return int ( size_t ( & ( dummy_cf_options . * member ) ) - size_t ( & dummy_cf_options ) ) ; <nl> } <nl> template < typename T1 > <nl> int offset_of ( T1 AdvancedColumnFamilyOptions : : * member ) { <nl> - return int ( size_t ( & ( OptionsHelper : : dummy_cf_options . * member ) ) - <nl> - size_t ( & OptionsHelper : : dummy_cf_options ) ) ; <nl> + return int ( size_t ( & ( dummy_cf_options . * member ) ) - size_t ( & dummy_cf_options ) ) ; <nl> } <nl> <nl> static Status ParseCompressionOptions ( const std : : string & value , <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> compression_options_type_info = { <nl> { " window_bits " , <nl> { offsetof ( struct CompressionOptions , window_bits ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , window_bits ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> { " level " , <nl> { offsetof ( struct CompressionOptions , level ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , level ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> { " strategy " , <nl> { offsetof ( struct CompressionOptions , strategy ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , strategy ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> { " max_dict_bytes " , <nl> { offsetof ( struct CompressionOptions , max_dict_bytes ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , max_dict_bytes ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> { " zstd_max_train_bytes " , <nl> { offsetof ( struct CompressionOptions , zstd_max_train_bytes ) , <nl> OptionType : : kUInt32T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , zstd_max_train_bytes ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " parallel_threads " , <nl> { offsetof ( struct CompressionOptions , parallel_threads ) , <nl> OptionType : : kUInt32T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , parallel_threads ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " enabled " , <nl> { offsetof ( struct CompressionOptions , enabled ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompressionOptions , enabled ) } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> } ; <nl> <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> { " max_table_files_size " , <nl> { offsetof ( struct CompactionOptionsFIFO , max_table_files_size ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompactionOptionsFIFO , max_table_files_size ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " ttl " , <nl> { 0 , OptionType : : kUInt64T , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_compaction " , <nl> { offsetof ( struct CompactionOptionsFIFO , allow_compaction ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct CompactionOptionsFIFO , allow_compaction ) } } } ; <nl> + OptionTypeFlags : : kMutable } } , <nl> + } ; <nl> <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> universal_compaction_options_type_info = { <nl> { " size_ratio " , <nl> { offsetof ( class CompactionOptionsUniversal , size_ratio ) , <nl> OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , size_ratio ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " min_merge_width " , <nl> { offsetof ( class CompactionOptionsUniversal , min_merge_width ) , <nl> OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , min_merge_width ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_merge_width " , <nl> { offsetof ( class CompactionOptionsUniversal , max_merge_width ) , <nl> OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , max_merge_width ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_size_amplification_percent " , <nl> { offsetof ( class CompactionOptionsUniversal , <nl> max_size_amplification_percent ) , <nl> OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , <nl> - max_size_amplification_percent ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " compression_size_percent " , <nl> { offsetof ( class CompactionOptionsUniversal , compression_size_percent ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , <nl> - compression_size_percent ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " stop_style " , <nl> { offsetof ( class CompactionOptionsUniversal , stop_style ) , <nl> OptionType : : kCompactionStopStyle , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , stop_style ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " allow_trivial_move " , <nl> { offsetof ( class CompactionOptionsUniversal , allow_trivial_move ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( class CompactionOptionsUniversal , allow_trivial_move ) } } } ; <nl> + OptionTypeFlags : : kMutable } } } ; <nl> <nl> - std : : unordered_map < std : : string , OptionTypeInfo > <nl> - OptionsHelper : : cf_options_type_info = { <nl> - / * not yet supported <nl> - CompressionOptions compression_opts ; <nl> - TablePropertiesCollectorFactories table_properties_collector_factories ; <nl> - typedef std : : vector < std : : shared_ptr < TablePropertiesCollectorFactory > > <nl> - TablePropertiesCollectorFactories ; <nl> - UpdateStatus ( * inplace_callback ) ( char * existing_value , <nl> - uint34_t * existing_value_size , <nl> - Slice delta_value , <nl> - std : : string * merged_value ) ; <nl> - std : : vector < DbPath > cf_paths ; <nl> - * / <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > <nl> + cf_mutable_options_type_info = { <nl> { " report_bg_io_stats " , <nl> - { offset_of ( & ColumnFamilyOptions : : report_bg_io_stats ) , <nl> + { offsetof ( struct MutableCFOptions , report_bg_io_stats ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , report_bg_io_stats ) } } , <nl> - { " compaction_measure_io_stats " , <nl> - { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " disable_auto_compactions " , <nl> - { offset_of ( & ColumnFamilyOptions : : disable_auto_compactions ) , <nl> + { offsetof ( struct MutableCFOptions , disable_auto_compactions ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , disable_auto_compactions ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " filter_deletes " , <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> - { " inplace_update_support " , <nl> - { offset_of ( & ColumnFamilyOptions : : inplace_update_support ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " level_compaction_dynamic_level_bytes " , <nl> - { offset_of ( & ColumnFamilyOptions : : level_compaction_dynamic_level_bytes ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " optimize_filters_for_hits " , <nl> - { offset_of ( & ColumnFamilyOptions : : optimize_filters_for_hits ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " paranoid_file_checks " , <nl> - { offset_of ( & ColumnFamilyOptions : : paranoid_file_checks ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , paranoid_file_checks ) } } , <nl> - { " force_consistency_checks " , <nl> - { offset_of ( & ColumnFamilyOptions : : force_consistency_checks ) , <nl> + { offsetof ( struct MutableCFOptions , paranoid_file_checks ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " purge_redundant_kvs_while_flush " , <nl> - { offset_of ( & ColumnFamilyOptions : : purge_redundant_kvs_while_flush ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " verify_checksums_in_compaction " , <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " soft_pending_compaction_bytes_limit " , <nl> - { offset_of ( & ColumnFamilyOptions : : soft_pending_compaction_bytes_limit ) , <nl> + { offsetof ( struct MutableCFOptions , <nl> + soft_pending_compaction_bytes_limit ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , <nl> - soft_pending_compaction_bytes_limit ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " hard_pending_compaction_bytes_limit " , <nl> - { offset_of ( & ColumnFamilyOptions : : hard_pending_compaction_bytes_limit ) , <nl> + { offsetof ( struct MutableCFOptions , <nl> + hard_pending_compaction_bytes_limit ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , <nl> - hard_pending_compaction_bytes_limit ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " hard_rate_limit " , <nl> { 0 , OptionType : : kDouble , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " soft_rate_limit " , <nl> { 0 , OptionType : : kDouble , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_compaction_bytes " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_compaction_bytes ) , <nl> + { offsetof ( struct MutableCFOptions , max_compaction_bytes ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , max_compaction_bytes ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " expanded_compaction_factor " , <nl> { 0 , OptionType : : kInt , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " level0_file_num_compaction_trigger " , <nl> - { offset_of ( & ColumnFamilyOptions : : level0_file_num_compaction_trigger ) , <nl> + { offsetof ( struct MutableCFOptions , level0_file_num_compaction_trigger ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , <nl> - level0_file_num_compaction_trigger ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " level0_slowdown_writes_trigger " , <nl> - { offset_of ( & ColumnFamilyOptions : : level0_slowdown_writes_trigger ) , <nl> + { offsetof ( struct MutableCFOptions , level0_slowdown_writes_trigger ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , level0_slowdown_writes_trigger ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " level0_stop_writes_trigger " , <nl> - { offset_of ( & ColumnFamilyOptions : : level0_stop_writes_trigger ) , <nl> + { offsetof ( struct MutableCFOptions , level0_stop_writes_trigger ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , level0_stop_writes_trigger ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_grandparent_overlap_factor " , <nl> { 0 , OptionType : : kInt , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> - { " max_mem_compaction_level " , <nl> - { 0 , OptionType : : kInt , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_write_buffer_number " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_write_buffer_number ) , <nl> + { offsetof ( struct MutableCFOptions , max_write_buffer_number ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , max_write_buffer_number ) } } , <nl> - { " max_write_buffer_number_to_maintain " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_write_buffer_number_to_maintain ) , <nl> - OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " max_write_buffer_size_to_maintain " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_write_buffer_size_to_maintain ) , <nl> - OptionType : : kInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " min_write_buffer_number_to_merge " , <nl> - { offset_of ( & ColumnFamilyOptions : : min_write_buffer_number_to_merge ) , <nl> - OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " num_levels " , <nl> - { offset_of ( & ColumnFamilyOptions : : num_levels ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " source_compaction_factor " , <nl> { 0 , OptionType : : kInt , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " target_file_size_multiplier " , <nl> - { offset_of ( & ColumnFamilyOptions : : target_file_size_multiplier ) , <nl> + { offsetof ( struct MutableCFOptions , target_file_size_multiplier ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , target_file_size_multiplier ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " arena_block_size " , <nl> - { offset_of ( & ColumnFamilyOptions : : arena_block_size ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , arena_block_size ) } } , <nl> + { offsetof ( struct MutableCFOptions , arena_block_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " inplace_update_num_locks " , <nl> - { offset_of ( & ColumnFamilyOptions : : inplace_update_num_locks ) , <nl> + { offsetof ( struct MutableCFOptions , inplace_update_num_locks ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , inplace_update_num_locks ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_successive_merges " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_successive_merges ) , <nl> + { offsetof ( struct MutableCFOptions , max_successive_merges ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , max_successive_merges ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " memtable_huge_page_size " , <nl> - { offset_of ( & ColumnFamilyOptions : : memtable_huge_page_size ) , <nl> + { offsetof ( struct MutableCFOptions , memtable_huge_page_size ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , memtable_huge_page_size ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " memtable_prefix_bloom_huge_page_tlb_size " , <nl> { 0 , OptionType : : kSizeT , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " write_buffer_size " , <nl> - { offset_of ( & ColumnFamilyOptions : : write_buffer_size ) , <nl> + { offsetof ( struct MutableCFOptions , write_buffer_size ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , write_buffer_size ) } } , <nl> - { " bloom_locality " , <nl> - { offset_of ( & ColumnFamilyOptions : : bloom_locality ) , OptionType : : kUInt32T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " memtable_prefix_bloom_bits " , <nl> { 0 , OptionType : : kUInt32T , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " memtable_prefix_bloom_size_ratio " , <nl> - { offset_of ( & ColumnFamilyOptions : : memtable_prefix_bloom_size_ratio ) , <nl> + { offsetof ( struct MutableCFOptions , memtable_prefix_bloom_size_ratio ) , <nl> OptionType : : kDouble , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , memtable_prefix_bloom_size_ratio ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " memtable_prefix_bloom_probes " , <nl> { 0 , OptionType : : kUInt32T , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " memtable_whole_key_filtering " , <nl> - { offset_of ( & ColumnFamilyOptions : : memtable_whole_key_filtering ) , <nl> + { offsetof ( struct MutableCFOptions , memtable_whole_key_filtering ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , memtable_whole_key_filtering ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " min_partial_merge_operands " , <nl> { 0 , OptionType : : kUInt32T , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_bytes_for_level_base " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_bytes_for_level_base ) , <nl> + { offsetof ( struct MutableCFOptions , max_bytes_for_level_base ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , max_bytes_for_level_base ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " snap_refresh_nanos " , <nl> { 0 , OptionType : : kUInt64T , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_bytes_for_level_multiplier " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_bytes_for_level_multiplier ) , <nl> + { offsetof ( struct MutableCFOptions , max_bytes_for_level_multiplier ) , <nl> OptionType : : kDouble , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , max_bytes_for_level_multiplier ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " max_bytes_for_level_multiplier_additional " , <nl> OptionTypeInfo : : Vector < int > ( <nl> - offset_of ( & ColumnFamilyOptions : : <nl> - max_bytes_for_level_multiplier_additional ) , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> offsetof ( struct MutableCFOptions , <nl> max_bytes_for_level_multiplier_additional ) , <nl> - { 0 , OptionType : : kInt , 0 } ) } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> + { 0 , OptionType : : kInt } ) } , <nl> { " max_sequential_skip_in_iterations " , <nl> - { offset_of ( & ColumnFamilyOptions : : max_sequential_skip_in_iterations ) , <nl> + { offsetof ( struct MutableCFOptions , max_sequential_skip_in_iterations ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , <nl> - max_sequential_skip_in_iterations ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " target_file_size_base " , <nl> - { offset_of ( & ColumnFamilyOptions : : target_file_size_base ) , <nl> + { offsetof ( struct MutableCFOptions , target_file_size_base ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , target_file_size_base ) } } , <nl> - { " rate_limit_delay_max_milliseconds " , <nl> - { 0 , OptionType : : kUInt , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " compression " , <nl> - { offset_of ( & ColumnFamilyOptions : : compression ) , <nl> + { offsetof ( struct MutableCFOptions , compression ) , <nl> + OptionType : : kCompressionType , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " prefix_extractor " , <nl> + { offsetof ( struct MutableCFOptions , prefix_extractor ) , <nl> + OptionType : : kSliceTransform , OptionVerificationType : : kByNameAllowNull , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " compaction_options_fifo " , <nl> + OptionTypeInfo : : Struct ( <nl> + " compaction_options_fifo " , & fifo_compaction_options_type_info , <nl> + offsetof ( struct MutableCFOptions , compaction_options_fifo ) , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> + [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> + const std : : string & value , char * addr ) { <nl> + / / This is to handle backward compatibility , where <nl> + / / compaction_options_fifo could be assigned a single scalar <nl> + / / value , say , like " 23 " , which would be assigned to <nl> + / / max_table_files_size . <nl> + if ( name = = " compaction_options_fifo " & & <nl> + value . find ( " = " ) = = std : : string : : npos ) { <nl> + / / Old format . Parse just a single uint64_t value . <nl> + auto options = reinterpret_cast < CompactionOptionsFIFO * > ( addr ) ; <nl> + options - > max_table_files_size = ParseUint64 ( value ) ; <nl> + return Status : : OK ( ) ; <nl> + } else { <nl> + return OptionTypeInfo : : ParseStruct ( <nl> + opts , " compaction_options_fifo " , <nl> + & fifo_compaction_options_type_info , name , value , addr ) ; <nl> + } <nl> + } ) } , <nl> + { " compaction_options_universal " , <nl> + OptionTypeInfo : : Struct ( <nl> + " compaction_options_universal " , <nl> + & universal_compaction_options_type_info , <nl> + offsetof ( struct MutableCFOptions , compaction_options_universal ) , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable ) } , <nl> + { " ttl " , <nl> + { offsetof ( struct MutableCFOptions , ttl ) , OptionType : : kUInt64T , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> + { " periodic_compaction_seconds " , <nl> + { offsetof ( struct MutableCFOptions , periodic_compaction_seconds ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " enable_blob_files " , <nl> + { offsetof ( struct MutableCFOptions , enable_blob_files ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " min_blob_size " , <nl> + { offsetof ( struct MutableCFOptions , min_blob_size ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " blob_file_size " , <nl> + { offsetof ( struct MutableCFOptions , blob_file_size ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " blob_compression_type " , <nl> + { offsetof ( struct MutableCFOptions , blob_compression_type ) , <nl> OptionType : : kCompressionType , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , compression ) } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " sample_for_compression " , <nl> + { offsetof ( struct MutableCFOptions , sample_for_compression ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " bottommost_compression " , <nl> + { offsetof ( struct MutableCFOptions , bottommost_compression ) , <nl> + OptionType : : kCompressionType , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { kOptNameCompOpts , <nl> + OptionTypeInfo : : Struct ( <nl> + kOptNameCompOpts , & compression_options_type_info , <nl> + offsetof ( struct MutableCFOptions , compression_opts ) , <nl> + OptionVerificationType : : kNormal , <nl> + ( OptionTypeFlags : : kMutable | OptionTypeFlags : : kCompareNever ) , <nl> + [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> + const std : : string & value , char * addr ) { <nl> + / / This is to handle backward compatibility , where <nl> + / / compression_options was a " : " separated list . <nl> + if ( name = = kOptNameCompOpts & & <nl> + value . find ( " = " ) = = std : : string : : npos ) { <nl> + auto * compression = <nl> + reinterpret_cast < CompressionOptions * > ( addr ) ; <nl> + return ParseCompressionOptions ( value , name , * compression ) ; <nl> + } else { <nl> + return OptionTypeInfo : : ParseStruct ( <nl> + opts , kOptNameCompOpts , & compression_options_type_info , <nl> + name , value , addr ) ; <nl> + } <nl> + } ) } , <nl> + { kOptNameBMCompOpts , <nl> + OptionTypeInfo : : Struct ( <nl> + kOptNameBMCompOpts , & compression_options_type_info , <nl> + offsetof ( struct MutableCFOptions , bottommost_compression_opts ) , <nl> + OptionVerificationType : : kNormal , <nl> + ( OptionTypeFlags : : kMutable | OptionTypeFlags : : kCompareNever ) , <nl> + [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> + const std : : string & value , char * addr ) { <nl> + / / This is to handle backward compatibility , where <nl> + / / compression_options was a " : " separated list . <nl> + if ( name = = kOptNameBMCompOpts & & <nl> + value . find ( " = " ) = = std : : string : : npos ) { <nl> + auto * compression = <nl> + reinterpret_cast < CompressionOptions * > ( addr ) ; <nl> + return ParseCompressionOptions ( value , name , * compression ) ; <nl> + } else { <nl> + return OptionTypeInfo : : ParseStruct ( <nl> + opts , kOptNameBMCompOpts , & compression_options_type_info , <nl> + name , value , addr ) ; <nl> + } <nl> + } ) } , <nl> + / / End special case properties <nl> + } ; <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > <nl> + cf_immutable_options_type_info = { <nl> + / * not yet supported <nl> + CompressionOptions compression_opts ; <nl> + TablePropertiesCollectorFactories table_properties_collector_factories ; <nl> + typedef std : : vector < std : : shared_ptr < TablePropertiesCollectorFactory > > <nl> + TablePropertiesCollectorFactories ; <nl> + UpdateStatus ( * inplace_callback ) ( char * existing_value , <nl> + uint34_t * existing_value_size , <nl> + Slice delta_value , <nl> + std : : string * merged_value ) ; <nl> + std : : vector < DbPath > cf_paths ; <nl> + * / <nl> + { " compaction_measure_io_stats " , <nl> + { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " inplace_update_support " , <nl> + { offset_of ( & ColumnFamilyOptions : : inplace_update_support ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " level_compaction_dynamic_level_bytes " , <nl> + { offset_of ( & ColumnFamilyOptions : : level_compaction_dynamic_level_bytes ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " optimize_filters_for_hits " , <nl> + { offset_of ( & ColumnFamilyOptions : : optimize_filters_for_hits ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " force_consistency_checks " , <nl> + { offset_of ( & ColumnFamilyOptions : : force_consistency_checks ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " purge_redundant_kvs_while_flush " , <nl> + { offset_of ( & ColumnFamilyOptions : : purge_redundant_kvs_while_flush ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " max_mem_compaction_level " , <nl> + { 0 , OptionType : : kInt , OptionVerificationType : : kDeprecated , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " max_write_buffer_number_to_maintain " , <nl> + { offset_of ( & ColumnFamilyOptions : : max_write_buffer_number_to_maintain ) , <nl> + OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone , 0 } } , <nl> + { " max_write_buffer_size_to_maintain " , <nl> + { offset_of ( & ColumnFamilyOptions : : max_write_buffer_size_to_maintain ) , <nl> + OptionType : : kInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " min_write_buffer_number_to_merge " , <nl> + { offset_of ( & ColumnFamilyOptions : : min_write_buffer_number_to_merge ) , <nl> + OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone , 0 } } , <nl> + { " num_levels " , <nl> + { offset_of ( & ColumnFamilyOptions : : num_levels ) , OptionType : : kInt , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> + { " bloom_locality " , <nl> + { offset_of ( & ColumnFamilyOptions : : bloom_locality ) , OptionType : : kUInt32T , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> + { " rate_limit_delay_max_milliseconds " , <nl> + { 0 , OptionType : : kUInt , OptionVerificationType : : kDeprecated , <nl> + OptionTypeFlags : : kNone } } , <nl> { " compression_per_level " , <nl> OptionTypeInfo : : Vector < CompressionType > ( <nl> offset_of ( & ColumnFamilyOptions : : compression_per_level ) , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> { 0 , OptionType : : kCompressionType } ) } , <nl> - { " bottommost_compression " , <nl> - { offset_of ( & ColumnFamilyOptions : : bottommost_compression ) , <nl> - OptionType : : kCompressionType , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , bottommost_compression ) } } , <nl> { " comparator " , <nl> { offset_of ( & ColumnFamilyOptions : : comparator ) , OptionType : : kComparator , <nl> - OptionVerificationType : : kByName , OptionTypeFlags : : kCompareLoose , 0 , <nl> + OptionVerificationType : : kByName , OptionTypeFlags : : kCompareLoose , <nl> / / Parses the string and sets the corresponding comparator <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> const std : : string & value , char * addr ) { <nl> std : : unordered_map < std : : string , OptionTypeInfo > <nl> } <nl> return Status : : OK ( ) ; <nl> } } } , <nl> - { " prefix_extractor " , <nl> - { offset_of ( & ColumnFamilyOptions : : prefix_extractor ) , <nl> - OptionType : : kSliceTransform , OptionVerificationType : : kByNameAllowNull , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , prefix_extractor ) } } , <nl> { " memtable_insert_with_hint_prefix_extractor " , <nl> { offset_of ( <nl> & ColumnFamilyOptions : : memtable_insert_with_hint_prefix_extractor ) , <nl> OptionType : : kSliceTransform , OptionVerificationType : : kByNameAllowNull , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " memtable_factory " , <nl> { offset_of ( & ColumnFamilyOptions : : memtable_factory ) , <nl> OptionType : : kMemTableRepFactory , OptionVerificationType : : kByName , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " memtable " , <nl> { offset_of ( & ColumnFamilyOptions : : memtable_factory ) , <nl> OptionType : : kMemTableRepFactory , OptionVerificationType : : kAlias , <nl> - OptionTypeFlags : : kNone , 0 , <nl> + OptionTypeFlags : : kNone , <nl> / / Parses the value string and updates the memtable_factory <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> const std : : string & value , char * addr ) { <nl> std : : unordered_map < std : : string , OptionTypeInfo > <nl> } } } , <nl> { " table_factory " , <nl> { offset_of ( & ColumnFamilyOptions : : table_factory ) , <nl> - OptionType : : kTableFactory , OptionVerificationType : : kByName , <nl> - OptionTypeFlags : : kCompareLoose , 0 } } , <nl> + OptionType : : kConfigurable , OptionVerificationType : : kByName , <nl> + ( OptionTypeFlags : : kShared | OptionTypeFlags : : kCompareLoose | <nl> + OptionTypeFlags : : kDontPrepare ) , <nl> + / / Creates a new TableFactory based on value <nl> + [ ] ( const ConfigOptions & opts , const std : : string & / * name * / , <nl> + const std : : string & value , char * addr ) { <nl> + auto table_factory = <nl> + reinterpret_cast < std : : shared_ptr < TableFactory > * > ( addr ) ; <nl> + return TableFactory : : CreateFromString ( opts , value , table_factory ) ; <nl> + } , <nl> + / / Converts the TableFactory into its string representation <nl> + [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> + const char * addr , std : : string * value ) { <nl> + const auto * table_factory = <nl> + reinterpret_cast < const std : : shared_ptr < TableFactory > * > ( addr ) ; <nl> + * value = table_factory - > get ( ) ? table_factory - > get ( ) - > Name ( ) <nl> + : kNullptrString ; <nl> + return Status : : OK ( ) ; <nl> + } , <nl> + / * No equals function for table factories * / nullptr } } , <nl> { " block_based_table_factory " , <nl> { offset_of ( & ColumnFamilyOptions : : table_factory ) , <nl> - OptionType : : kTableFactory , OptionVerificationType : : kAlias , <nl> - OptionTypeFlags : : kCompareLoose , 0 , <nl> + OptionType : : kConfigurable , OptionVerificationType : : kAlias , <nl> + OptionTypeFlags : : kShared | OptionTypeFlags : : kCompareLoose , <nl> / / Parses the input value and creates a BlockBasedTableFactory <nl> - [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> + [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> const std : : string & value , char * addr ) { <nl> - / / Nested options <nl> - auto old_table_factory = <nl> + BlockBasedTableOptions * old_opts = nullptr ; <nl> + auto table_factory = <nl> reinterpret_cast < std : : shared_ptr < TableFactory > * > ( addr ) ; <nl> - BlockBasedTableOptions table_opts , base_opts ; <nl> - BlockBasedTableFactory * block_based_table_factory = <nl> - static_cast_with_check < BlockBasedTableFactory > ( <nl> - old_table_factory - > get ( ) ) ; <nl> - if ( block_based_table_factory ! = nullptr ) { <nl> - base_opts = block_based_table_factory - > table_options ( ) ; <nl> + if ( table_factory - > get ( ) ! = nullptr ) { <nl> + old_opts = <nl> + table_factory - > get ( ) - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> } <nl> - Status s = GetBlockBasedTableOptionsFromString ( base_opts , value , <nl> - & table_opts ) ; <nl> - if ( s . ok ( ) ) { <nl> - old_table_factory - > reset ( NewBlockBasedTableFactory ( table_opts ) ) ; <nl> + if ( name = = " block_based_table_factory " ) { <nl> + std : : unique_ptr < TableFactory > new_factory ; <nl> + if ( old_opts ! = nullptr ) { <nl> + new_factory . reset ( NewBlockBasedTableFactory ( * old_opts ) ) ; <nl> + } else { <nl> + new_factory . reset ( NewBlockBasedTableFactory ( ) ) ; <nl> + } <nl> + Status s = new_factory - > ConfigureFromString ( opts , value ) ; <nl> + if ( s . ok ( ) ) { <nl> + table_factory - > reset ( new_factory . release ( ) ) ; <nl> + } <nl> + return s ; <nl> + } else if ( old_opts ! = nullptr ) { <nl> + return table_factory - > get ( ) - > ConfigureOption ( opts , name , value ) ; <nl> + } else { <nl> + return Status : : NotFound ( " Mismatched table option : " , name ) ; <nl> } <nl> - return s ; <nl> } } } , <nl> { " plain_table_factory " , <nl> { offset_of ( & ColumnFamilyOptions : : table_factory ) , <nl> - OptionType : : kTableFactory , OptionVerificationType : : kAlias , <nl> - OptionTypeFlags : : kCompareLoose , 0 , <nl> + OptionType : : kConfigurable , OptionVerificationType : : kAlias , <nl> + OptionTypeFlags : : kShared | OptionTypeFlags : : kCompareLoose , <nl> / / Parses the input value and creates a PlainTableFactory <nl> - [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> + [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> const std : : string & value , char * addr ) { <nl> - / / Nested options <nl> - auto old_table_factory = <nl> + PlainTableOptions * old_opts = nullptr ; <nl> + auto table_factory = <nl> reinterpret_cast < std : : shared_ptr < TableFactory > * > ( addr ) ; <nl> - PlainTableOptions table_opts , base_opts ; <nl> - PlainTableFactory * plain_table_factory = <nl> - static_cast_with_check < PlainTableFactory > ( <nl> - old_table_factory - > get ( ) ) ; <nl> - if ( plain_table_factory ! = nullptr ) { <nl> - base_opts = plain_table_factory - > table_options ( ) ; <nl> + if ( table_factory - > get ( ) ! = nullptr ) { <nl> + old_opts = table_factory - > get ( ) - > GetOptions < PlainTableOptions > ( ) ; <nl> } <nl> - Status s = <nl> - GetPlainTableOptionsFromString ( base_opts , value , & table_opts ) ; <nl> - if ( s . ok ( ) ) { <nl> - old_table_factory - > reset ( NewPlainTableFactory ( table_opts ) ) ; <nl> + if ( name = = " plain_table_factory " ) { <nl> + std : : unique_ptr < TableFactory > new_factory ; <nl> + if ( old_opts ! = nullptr ) { <nl> + new_factory . reset ( NewPlainTableFactory ( * old_opts ) ) ; <nl> + } else { <nl> + new_factory . reset ( NewPlainTableFactory ( ) ) ; <nl> + } <nl> + Status s = new_factory - > ConfigureFromString ( opts , value ) ; <nl> + if ( s . ok ( ) ) { <nl> + table_factory - > reset ( new_factory . release ( ) ) ; <nl> + } <nl> + return s ; <nl> + } else if ( old_opts ! = nullptr ) { <nl> + return table_factory - > get ( ) - > ConfigureOption ( opts , name , value ) ; <nl> + } else { <nl> + return Status : : NotFound ( " Mismatched table option : " , name ) ; <nl> } <nl> - return s ; <nl> } } } , <nl> { " compaction_filter " , <nl> { offset_of ( & ColumnFamilyOptions : : compaction_filter ) , <nl> OptionType : : kCompactionFilter , OptionVerificationType : : kByName , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " compaction_filter_factory " , <nl> { offset_of ( & ColumnFamilyOptions : : compaction_filter_factory ) , <nl> OptionType : : kCompactionFilterFactory , OptionVerificationType : : kByName , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " merge_operator " , <nl> { offset_of ( & ColumnFamilyOptions : : merge_operator ) , <nl> OptionType : : kMergeOperator , <nl> OptionVerificationType : : kByNameAllowFromNull , <nl> - OptionTypeFlags : : kCompareLoose , 0 , <nl> + OptionTypeFlags : : kCompareLoose , <nl> / / Parses the input value as a MergeOperator , updating the value <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> const std : : string & value , char * addr ) { <nl> auto mop = reinterpret_cast < std : : shared_ptr < MergeOperator > * > ( addr ) ; <nl> - ObjectRegistry : : NewInstance ( ) <nl> - - > NewSharedObject < MergeOperator > ( value , mop ) <nl> - . PermitUncheckedError ( ) ; <nl> + Status status = <nl> + ObjectRegistry : : NewInstance ( ) - > NewSharedObject < MergeOperator > ( <nl> + value , mop ) ; <nl> + / / Only support static comparator for now . <nl> + if ( status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> return Status : : OK ( ) ; <nl> } } } , <nl> { " compaction_style " , <nl> { offset_of ( & ColumnFamilyOptions : : compaction_style ) , <nl> OptionType : : kCompactionStyle , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " compaction_pri " , <nl> { offset_of ( & ColumnFamilyOptions : : compaction_pri ) , <nl> OptionType : : kCompactionPri , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " compaction_options_fifo " , <nl> - OptionTypeInfo : : Struct ( <nl> - " compaction_options_fifo " , & fifo_compaction_options_type_info , <nl> - offset_of ( & ColumnFamilyOptions : : compaction_options_fifo ) , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , compaction_options_fifo ) , <nl> - [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> - const std : : string & value , char * addr ) { <nl> - / / This is to handle backward compatibility , where <nl> - / / compaction_options_fifo could be assigned a single scalar <nl> - / / value , say , like " 23 " , which would be assigned to <nl> - / / max_table_files_size . <nl> - if ( name = = " compaction_options_fifo " & & <nl> - value . find ( " = " ) = = std : : string : : npos ) { <nl> - / / Old format . Parse just a single uint64_t value . <nl> - auto options = reinterpret_cast < CompactionOptionsFIFO * > ( addr ) ; <nl> - options - > max_table_files_size = ParseUint64 ( value ) ; <nl> - return Status : : OK ( ) ; <nl> - } else { <nl> - return OptionTypeInfo : : ParseStruct ( <nl> - opts , " compaction_options_fifo " , <nl> - & fifo_compaction_options_type_info , name , value , addr ) ; <nl> - } <nl> - } ) } , <nl> - { " compaction_options_universal " , <nl> - OptionTypeInfo : : Struct ( <nl> - " compaction_options_universal " , <nl> - & universal_compaction_options_type_info , <nl> - offset_of ( & ColumnFamilyOptions : : compaction_options_universal ) , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , compaction_options_universal ) ) } , <nl> - { " ttl " , <nl> - { offset_of ( & ColumnFamilyOptions : : ttl ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , ttl ) } } , <nl> - { " periodic_compaction_seconds " , <nl> - { offset_of ( & ColumnFamilyOptions : : periodic_compaction_seconds ) , <nl> - OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , periodic_compaction_seconds ) } } , <nl> - { " sample_for_compression " , <nl> - { offset_of ( & ColumnFamilyOptions : : sample_for_compression ) , <nl> - OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , sample_for_compression ) } } , <nl> - { " enable_blob_files " , <nl> - { offset_of ( & ColumnFamilyOptions : : enable_blob_files ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , enable_blob_files ) } } , <nl> - { " min_blob_size " , <nl> - { offset_of ( & ColumnFamilyOptions : : min_blob_size ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , min_blob_size ) } } , <nl> - { " blob_file_size " , <nl> - { offset_of ( & ColumnFamilyOptions : : blob_file_size ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , blob_file_size ) } } , <nl> - { " blob_compression_type " , <nl> - { offset_of ( & ColumnFamilyOptions : : blob_compression_type ) , <nl> - OptionType : : kCompressionType , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableCFOptions , blob_compression_type ) } } , <nl> - / / The following properties were handled as special cases in ParseOption <nl> - / / This means that the properties could be read from the options file <nl> - / / but never written to the file or compared to each other . <nl> - { kOptNameCompOpts , <nl> - OptionTypeInfo : : Struct ( <nl> - kOptNameCompOpts , & compression_options_type_info , <nl> - offset_of ( & ColumnFamilyOptions : : compression_opts ) , <nl> - OptionVerificationType : : kNormal , <nl> - ( OptionTypeFlags : : kMutable | OptionTypeFlags : : kCompareNever ) , <nl> - offsetof ( struct MutableCFOptions , compression_opts ) , <nl> - [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> - const std : : string & value , char * addr ) { <nl> - / / This is to handle backward compatibility , where <nl> - / / compression_options was a " : " separated list . <nl> - if ( name = = kOptNameCompOpts & & <nl> - value . find ( " = " ) = = std : : string : : npos ) { <nl> - auto * compression = <nl> - reinterpret_cast < CompressionOptions * > ( addr ) ; <nl> - return ParseCompressionOptions ( value , name , * compression ) ; <nl> - } else { <nl> - return OptionTypeInfo : : ParseStruct ( <nl> - opts , kOptNameCompOpts , & compression_options_type_info , <nl> - name , value , addr ) ; <nl> - } <nl> - } ) } , <nl> - { kOptNameBMCompOpts , <nl> - OptionTypeInfo : : Struct ( <nl> - kOptNameBMCompOpts , & compression_options_type_info , <nl> - offset_of ( & ColumnFamilyOptions : : bottommost_compression_opts ) , <nl> - OptionVerificationType : : kNormal , <nl> - ( OptionTypeFlags : : kMutable | OptionTypeFlags : : kCompareNever ) , <nl> - offsetof ( struct MutableCFOptions , bottommost_compression_opts ) , <nl> - [ ] ( const ConfigOptions & opts , const std : : string & name , <nl> - const std : : string & value , char * addr ) { <nl> - / / This is to handle backward compatibility , where <nl> - / / compression_options was a " : " separated list . <nl> - if ( name = = kOptNameBMCompOpts & & <nl> - value . find ( " = " ) = = std : : string : : npos ) { <nl> - auto * compression = <nl> - reinterpret_cast < CompressionOptions * > ( addr ) ; <nl> - return ParseCompressionOptions ( value , name , * compression ) ; <nl> - } else { <nl> - return OptionTypeInfo : : ParseStruct ( <nl> - opts , kOptNameBMCompOpts , & compression_options_type_info , <nl> - name , value , addr ) ; <nl> - } <nl> - } ) } , <nl> - / / End special case properties <nl> + OptionTypeFlags : : kNone } } , <nl> } ; <nl> <nl> - Status ParseColumnFamilyOption ( const ConfigOptions & config_options , <nl> - const std : : string & name , <nl> - const std : : string & org_value , <nl> - ColumnFamilyOptions * new_options ) { <nl> - const std : : string & value = config_options . input_strings_escaped <nl> - ? UnescapeOptionString ( org_value ) <nl> - : org_value ; <nl> - try { <nl> - std : : string elem ; <nl> - const auto opt_info = <nl> - OptionTypeInfo : : Find ( name , cf_options_type_info , & elem ) ; <nl> - if ( opt_info ! = nullptr ) { <nl> - return opt_info - > Parse ( <nl> - config_options , elem , value , <nl> - reinterpret_cast < char * > ( new_options ) + opt_info - > offset_ ) ; <nl> + const std : : string OptionsHelper : : kCFOptionsName = " ColumnFamilyOptions " ; <nl> + <nl> + class ConfigurableMutableCFOptions : public Configurable { <nl> + public : <nl> + ConfigurableMutableCFOptions ( const MutableCFOptions & mcf ) { <nl> + mutable_ = mcf ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , & mutable_ , <nl> + & cf_mutable_options_type_info ) ; <nl> + } <nl> + <nl> + protected : <nl> + MutableCFOptions mutable_ ; <nl> + } ; <nl> + <nl> + class ConfigurableCFOptions : public ConfigurableMutableCFOptions { <nl> + public : <nl> + ConfigurableCFOptions ( const ColumnFamilyOptions & opts , <nl> + const std : : unordered_map < std : : string , std : : string > * map ) <nl> + : ConfigurableMutableCFOptions ( MutableCFOptions ( opts ) ) , <nl> + immutable_ ( opts ) , <nl> + cf_options_ ( opts ) , <nl> + opt_map_ ( map ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , OptionsHelper : : kCFOptionsName , <nl> + & immutable_ , <nl> + & cf_immutable_options_type_info ) ; <nl> + } <nl> + <nl> + protected : <nl> + Status ConfigureOptions ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) override { <nl> + Status s = ConfigurableHelper : : ConfigureOptions ( config_options , * this , <nl> + opts_map , unused ) ; <nl> + if ( s . ok ( ) ) { <nl> + cf_options_ = BuildColumnFamilyOptions ( immutable_ , mutable_ ) ; <nl> + s = PrepareOptions ( config_options ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + virtual const void * GetOptionsPtr ( const std : : string & name ) const override { <nl> + if ( name = = OptionsHelper : : kCFOptionsName ) { <nl> + return & cf_options_ ; <nl> } else { <nl> - return Status : : InvalidArgument ( <nl> - " Unable to parse the specified CF option " + name ) ; <nl> + return ConfigurableMutableCFOptions : : GetOptionsPtr ( name ) ; <nl> } <nl> - } catch ( const std : : exception & ) { <nl> - return Status : : InvalidArgument ( " unable to parse the specified option " + <nl> - name ) ; <nl> } <nl> + <nl> + bool OptionsAreEqual ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , const void * const this_ptr , <nl> + const void * const that_ptr , <nl> + std : : string * mismatch ) const override { <nl> + bool equals = opt_info . AreEqual ( config_options , opt_name , this_ptr , <nl> + that_ptr , mismatch ) ; <nl> + if ( ! equals & & opt_info . IsByName ( ) ) { <nl> + if ( opt_map_ = = nullptr ) { <nl> + equals = true ; <nl> + } else { <nl> + const auto & iter = opt_map_ - > find ( opt_name ) ; <nl> + if ( iter = = opt_map_ - > end ( ) ) { <nl> + equals = true ; <nl> + } else { <nl> + equals = opt_info . AreEqualByName ( config_options , opt_name , this_ptr , <nl> + iter - > second ) ; <nl> + } <nl> + } <nl> + if ( equals ) { / / False alarm , clear mismatch <nl> + * mismatch = " " ; <nl> + } <nl> + } <nl> + if ( equals & & opt_info . IsConfigurable ( ) & & opt_map_ ! = nullptr ) { <nl> + const auto * this_config = opt_info . AsRawPointer < Configurable > ( this_ptr ) ; <nl> + if ( this_config = = nullptr ) { <nl> + const auto & iter = opt_map_ - > find ( opt_name ) ; <nl> + / / If the name exists in the map and is not empty / null , <nl> + / / then the this_config should be set . <nl> + if ( iter ! = opt_map_ - > end ( ) & & ! iter - > second . empty ( ) & & <nl> + iter - > second ! = kNullptrString ) { <nl> + * mismatch = opt_name ; <nl> + equals = false ; <nl> + } <nl> + } <nl> + } <nl> + return equals ; <nl> + } <nl> + <nl> + private : <nl> + ColumnFamilyOptions immutable_ ; <nl> + ColumnFamilyOptions cf_options_ ; <nl> + const std : : unordered_map < std : : string , std : : string > * opt_map_ ; <nl> + } ; <nl> + <nl> + std : : unique_ptr < Configurable > CFOptionsAsConfigurable ( <nl> + const MutableCFOptions & opts ) { <nl> + std : : unique_ptr < Configurable > ptr ( new ConfigurableMutableCFOptions ( opts ) ) ; <nl> + return ptr ; <nl> + } <nl> + std : : unique_ptr < Configurable > CFOptionsAsConfigurable ( <nl> + const ColumnFamilyOptions & opts , <nl> + const std : : unordered_map < std : : string , std : : string > * opt_map ) { <nl> + std : : unique_ptr < Configurable > ptr ( new ConfigurableCFOptions ( opts , opt_map ) ) ; <nl> + return ptr ; <nl> } <nl> # endif / / ROCKSDB_LITE <nl> <nl> mmm a / options / cf_options . h <nl> ppp b / options / cf_options . h <nl> namespace ROCKSDB_NAMESPACE { <nl> / / of DB . Raw pointers defined in this struct do not have ownership to the data <nl> / / they point to . Options contains std : : shared_ptr to these data . <nl> struct ImmutableCFOptions { <nl> + static const char * kName ( ) { return " ImmutableCFOptions " ; } <nl> explicit ImmutableCFOptions ( const Options & options ) ; <nl> <nl> ImmutableCFOptions ( const ImmutableDBOptions & db_options , <nl> struct ImmutableCFOptions { <nl> } ; <nl> <nl> struct MutableCFOptions { <nl> + static const char * kName ( ) { return " MutableCFOptions " ; } <nl> explicit MutableCFOptions ( const ColumnFamilyOptions & options ) <nl> : write_buffer_size ( options . write_buffer_size ) , <nl> max_write_buffer_number ( options . max_write_buffer_number ) , <nl> new file mode 100644 <nl> index 0000000000 . . 8c11b0b0ed <nl> mmm / dev / null <nl> ppp b / options / configurable . cc <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under both the GPLv2 ( found in the <nl> + / / COPYING file in the root directory ) and Apache 2 . 0 License <nl> + / / ( found in the LICENSE . Apache file in the root directory ) . <nl> + <nl> + # include " rocksdb / configurable . h " <nl> + <nl> + # include " logging / logging . h " <nl> + # include " options / configurable_helper . h " <nl> + # include " options / options_helper . h " <nl> + # include " rocksdb / status . h " <nl> + # include " rocksdb / utilities / object_registry . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> + # include " util / coding . h " <nl> + # include " util / string_util . h " <nl> + <nl> + namespace ROCKSDB_NAMESPACE { <nl> + <nl> + void ConfigurableHelper : : RegisterOptions ( <nl> + Configurable & configurable , const std : : string & name , void * opt_ptr , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * type_map ) { <nl> + Configurable : : RegisteredOptions opts ; <nl> + opts . name = name ; <nl> + # ifndef ROCKSDB_LITE <nl> + opts . type_map = type_map ; <nl> + # else <nl> + ( void ) type_map ; <nl> + # endif / / ROCKSDB_LITE <nl> + opts . opt_ptr = opt_ptr ; <nl> + configurable . options_ . emplace_back ( opts ) ; <nl> + } <nl> + <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + / / <nl> + / / Methods for Initializing and Validating Configurable Objects <nl> + / / <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + <nl> + Status Configurable : : PrepareOptions ( const ConfigOptions & opts ) { <nl> + Status status = Status : : OK ( ) ; <nl> + # ifndef ROCKSDB_LITE <nl> + for ( auto opt_iter : options_ ) { <nl> + for ( auto map_iter : * ( opt_iter . type_map ) ) { <nl> + auto & opt_info = map_iter . second ; <nl> + if ( ! opt_info . IsDeprecated ( ) & & ! opt_info . IsAlias ( ) & & <nl> + opt_info . IsConfigurable ( ) ) { <nl> + if ( ! opt_info . IsEnabled ( OptionTypeFlags : : kDontPrepare ) ) { <nl> + Configurable * config = <nl> + opt_info . AsRawPointer < Configurable > ( opt_iter . opt_ptr ) ; <nl> + if ( config ! = nullptr ) { <nl> + status = config - > PrepareOptions ( opts ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + if ( status . ok ( ) ) { <nl> + auto inner = Inner ( ) ; <nl> + if ( inner ! = nullptr ) { <nl> + status = inner - > PrepareOptions ( opts ) ; <nl> + } <nl> + } <nl> + if ( status . ok ( ) ) { <nl> + prepared_ = true ; <nl> + } <nl> + return status ; <nl> + } <nl> + <nl> + Status Configurable : : ValidateOptions ( const DBOptions & db_opts , <nl> + const ColumnFamilyOptions & cf_opts ) const { <nl> + Status status ; <nl> + # ifndef ROCKSDB_LITE <nl> + for ( auto opt_iter : options_ ) { <nl> + for ( auto map_iter : * ( opt_iter . type_map ) ) { <nl> + auto & opt_info = map_iter . second ; <nl> + if ( ! opt_info . IsDeprecated ( ) & & ! opt_info . IsAlias ( ) ) { <nl> + if ( opt_info . IsConfigurable ( ) ) { <nl> + const Configurable * config = <nl> + opt_info . AsRawPointer < Configurable > ( opt_iter . opt_ptr ) ; <nl> + if ( config ! = nullptr ) { <nl> + status = config - > ValidateOptions ( db_opts , cf_opts ) ; <nl> + } else if ( ! opt_info . CanBeNull ( ) ) { <nl> + status = <nl> + Status : : NotFound ( " Missing configurable object " , map_iter . first ) ; <nl> + } <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + if ( status . ok ( ) ) { <nl> + const auto inner = Inner ( ) ; <nl> + if ( inner ! = nullptr ) { <nl> + status = inner - > ValidateOptions ( db_opts , cf_opts ) ; <nl> + } <nl> + } <nl> + return status ; <nl> + } <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * * / <nl> + / * Methods for Retrieving Options from Configurables * / <nl> + / * * / <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + const void * Configurable : : GetOptionsPtr ( const std : : string & name ) const { <nl> + for ( auto o : options_ ) { <nl> + if ( o . name = = name ) { <nl> + return o . opt_ptr ; <nl> + } <nl> + } <nl> + auto inner = Inner ( ) ; <nl> + if ( inner ! = nullptr ) { <nl> + return inner - > GetOptionsPtr ( name ) ; <nl> + } else { <nl> + return nullptr ; <nl> + } <nl> + } <nl> + <nl> + std : : string Configurable : : GetOptionName ( const std : : string & opt_name ) const { <nl> + return opt_name ; <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + const OptionTypeInfo * ConfigurableHelper : : FindOption ( <nl> + const std : : vector < Configurable : : RegisteredOptions > & options , <nl> + const std : : string & short_name , std : : string * opt_name , void * * opt_ptr ) { <nl> + for ( auto iter : options ) { <nl> + const auto opt_info = <nl> + OptionTypeInfo : : Find ( short_name , * ( iter . type_map ) , opt_name ) ; <nl> + if ( opt_info ! = nullptr ) { <nl> + * opt_ptr = iter . opt_ptr ; <nl> + return opt_info ; <nl> + } <nl> + } <nl> + return nullptr ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + / / <nl> + / / Methods for Configuring Options from Strings / Name - Value Pairs / Maps <nl> + / / <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + <nl> + Status Configurable : : ConfigureFromMap ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map ) { <nl> + Status s = ConfigureFromMap ( config_options , opts_map , nullptr ) ; <nl> + return s ; <nl> + } <nl> + <nl> + Status Configurable : : ConfigureFromMap ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) { <nl> + return ConfigureOptions ( config_options , opts_map , unused ) ; <nl> + } <nl> + <nl> + Status Configurable : : ConfigureOptions ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) { <nl> + std : : string curr_opts ; <nl> + # ifndef ROCKSDB_LITE <nl> + if ( ! config_options . ignore_unknown_options ) { <nl> + / / If we are not ignoring unused , get the defaults in case we need to reset <nl> + GetOptionString ( config_options , & curr_opts ) . PermitUncheckedError ( ) ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + Status s = ConfigurableHelper : : ConfigureOptions ( config_options , * this , <nl> + opts_map , unused ) ; <nl> + if ( config_options . invoke_prepare_options & & s . ok ( ) ) { <nl> + s = PrepareOptions ( config_options ) ; <nl> + } <nl> + # ifndef ROCKSDB_LITE <nl> + if ( ! s . ok ( ) & & ! curr_opts . empty ( ) ) { <nl> + ConfigOptions reset = config_options ; <nl> + reset . ignore_unknown_options = true ; <nl> + reset . invoke_prepare_options = true ; <nl> + / / There are some options to reset from this current error <nl> + ConfigureFromString ( reset , curr_opts ) . PermitUncheckedError ( ) ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + return s ; <nl> + } <nl> + <nl> + Status Configurable : : ParseStringOptions ( const ConfigOptions & / * config_options * / , <nl> + const std : : string & / * opts_str * / ) { <nl> + return Status : : OK ( ) ; <nl> + } <nl> + <nl> + Status Configurable : : ConfigureFromString ( const ConfigOptions & config_options , <nl> + const std : : string & opts_str ) { <nl> + Status s ; <nl> + if ( ! opts_str . empty ( ) ) { <nl> + # ifndef ROCKSDB_LITE <nl> + if ( opts_str . find ( ' ; ' ) ! = std : : string : : npos | | <nl> + opts_str . find ( ' = ' ) ! = std : : string : : npos ) { <nl> + std : : unordered_map < std : : string , std : : string > opt_map ; <nl> + s = StringToMap ( opts_str , & opt_map ) ; <nl> + if ( s . ok ( ) ) { <nl> + s = ConfigureFromMap ( config_options , opt_map , nullptr ) ; <nl> + } <nl> + } else { <nl> + # endif / / ROCKSDB_LITE <nl> + s = ParseStringOptions ( config_options , opts_str ) ; <nl> + if ( s . ok ( ) & & config_options . invoke_prepare_options ) { <nl> + s = PrepareOptions ( config_options ) ; <nl> + } <nl> + # ifndef ROCKSDB_LITE <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + } else if ( config_options . invoke_prepare_options ) { <nl> + s = PrepareOptions ( config_options ) ; <nl> + } else { <nl> + s = Status : : OK ( ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + / * * <nl> + * Sets the value of the named property to the input value , returning OK on <nl> + * succcess . <nl> + * / <nl> + Status Configurable : : ConfigureOption ( const ConfigOptions & config_options , <nl> + const std : : string & name , <nl> + const std : : string & value ) { <nl> + const std : : string & opt_name = GetOptionName ( name ) ; <nl> + return ConfigurableHelper : : ConfigureSingleOption ( config_options , * this , <nl> + opt_name , value ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Looks for the named option amongst the options for this type and sets <nl> + * the value for it to be the input value . <nl> + * If the name was found , found_option will be set to true and the resulting <nl> + * status should be returned . <nl> + * / <nl> + <nl> + Status Configurable : : ParseOption ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , <nl> + const std : : string & opt_value , void * opt_ptr ) { <nl> + if ( opt_info . IsMutable ( ) | | opt_info . IsConfigurable ( ) ) { <nl> + return opt_info . Parse ( config_options , opt_name , opt_value , opt_ptr ) ; <nl> + } else if ( prepared_ ) { <nl> + return Status : : InvalidArgument ( " Option not changeable : " + opt_name ) ; <nl> + } else { <nl> + return opt_info . Parse ( config_options , opt_name , opt_value , opt_ptr ) ; <nl> + } <nl> + } <nl> + <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + Status ConfigurableHelper : : ConfigureOptions ( <nl> + const ConfigOptions & config_options , Configurable & configurable , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) { <nl> + std : : unordered_map < std : : string , std : : string > remaining = opts_map ; <nl> + Status s = Status : : OK ( ) ; <nl> + if ( ! opts_map . empty ( ) ) { <nl> + # ifndef ROCKSDB_LITE <nl> + for ( const auto & iter : configurable . options_ ) { <nl> + s = ConfigureSomeOptions ( config_options , configurable , * ( iter . type_map ) , <nl> + & remaining , iter . opt_ptr ) ; <nl> + if ( remaining . empty ( ) ) { / / Are there more options left ? <nl> + break ; <nl> + } else if ( ! s . ok ( ) ) { <nl> + break ; <nl> + } <nl> + } <nl> + # else <nl> + ( void ) configurable ; <nl> + if ( ! config_options . ignore_unknown_options ) { <nl> + s = Status : : NotSupported ( " ConfigureFromMap not supported in LITE mode " ) ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + } <nl> + if ( unused ! = nullptr & & ! remaining . empty ( ) ) { <nl> + unused - > insert ( remaining . begin ( ) , remaining . end ( ) ) ; <nl> + } <nl> + if ( config_options . ignore_unknown_options ) { <nl> + s = Status : : OK ( ) ; <nl> + } else if ( s . ok ( ) & & unused = = nullptr & & ! remaining . empty ( ) ) { <nl> + s = Status : : NotFound ( " Could not find option : " , remaining . begin ( ) - > first ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + / * * <nl> + * Updates the object with the named - value property values , returning OK on <nl> + * succcess . Any properties that were found are removed from the options list ; <nl> + * upon return only options that were not found in this opt_map remain . <nl> + <nl> + * Returns : <nl> + * - OK if ignore_unknown_options is set <nl> + * - InvalidArgument , if any option was invalid <nl> + * - NotSupported , if any option is unsupported and ignore_unsupported_options <nl> + is OFF <nl> + * - OK , if no option was invalid or not supported ( or ignored ) <nl> + * / <nl> + Status ConfigurableHelper : : ConfigureSomeOptions ( <nl> + const ConfigOptions & config_options , Configurable & configurable , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > & type_map , <nl> + std : : unordered_map < std : : string , std : : string > * options , void * opt_ptr ) { <nl> + Status result = Status : : OK ( ) ; / / The last non - OK result ( if any ) <nl> + Status notsup = Status : : OK ( ) ; / / The last NotSupported result ( if any ) <nl> + std : : string elem_name ; <nl> + int found = 1 ; <nl> + std : : unordered_set < std : : string > unsupported ; <nl> + / / While there are unused properties and we processed at least one , <nl> + / / go through the remaining unused properties and attempt to configure them . <nl> + while ( found > 0 & & ! options - > empty ( ) ) { <nl> + found = 0 ; <nl> + notsup = Status : : OK ( ) ; <nl> + for ( auto it = options - > begin ( ) ; it ! = options - > end ( ) ; ) { <nl> + const std : : string & opt_name = configurable . GetOptionName ( it - > first ) ; <nl> + const std : : string & opt_value = it - > second ; <nl> + const auto opt_info = <nl> + OptionTypeInfo : : Find ( opt_name , type_map , & elem_name ) ; <nl> + if ( opt_info = = nullptr ) { / / Did not find the option . Skip it <nl> + + + it ; <nl> + } else { <nl> + Status s = ConfigureOption ( config_options , configurable , * opt_info , <nl> + opt_name , elem_name , opt_value , opt_ptr ) ; <nl> + if ( s . IsNotFound ( ) ) { <nl> + + + it ; <nl> + } else if ( s . IsNotSupported ( ) ) { <nl> + notsup = s ; <nl> + unsupported . insert ( it - > first ) ; <nl> + + + it ; / / Skip it for now <nl> + } else { <nl> + found + + ; <nl> + it = options - > erase ( it ) ; <nl> + if ( ! s . ok ( ) ) { <nl> + result = s ; <nl> + } <nl> + } <nl> + } <nl> + } / / End for all remaining options <nl> + } / / End while found one or options remain <nl> + <nl> + / / Now that we have been through the list , remove any unsupported <nl> + for ( auto u : unsupported ) { <nl> + auto it = options - > find ( u ) ; <nl> + if ( it ! = options - > end ( ) ) { <nl> + options - > erase ( it ) ; <nl> + } <nl> + } <nl> + if ( config_options . ignore_unknown_options ) { <nl> + if ( ! result . ok ( ) ) result . PermitUncheckedError ( ) ; <nl> + if ( ! notsup . ok ( ) ) notsup . PermitUncheckedError ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } else if ( ! result . ok ( ) ) { <nl> + if ( ! notsup . ok ( ) ) notsup . PermitUncheckedError ( ) ; <nl> + return result ; <nl> + } else if ( config_options . ignore_unsupported_options ) { <nl> + if ( ! notsup . ok ( ) ) notsup . PermitUncheckedError ( ) ; <nl> + return Status : : OK ( ) ; <nl> + } else { <nl> + return notsup ; <nl> + } <nl> + } <nl> + <nl> + Status ConfigurableHelper : : ConfigureSingleOption ( <nl> + const ConfigOptions & config_options , Configurable & configurable , <nl> + const std : : string & name , const std : : string & value ) { <nl> + std : : string opt_name ; <nl> + void * opt_ptr = nullptr ; <nl> + const auto opt_info = <nl> + FindOption ( configurable . options_ , name , & opt_name , & opt_ptr ) ; <nl> + if ( opt_info = = nullptr ) { <nl> + return Status : : NotFound ( " Could not find option : " , name ) ; <nl> + } else { <nl> + return ConfigureOption ( config_options , configurable , * opt_info , name , <nl> + opt_name , value , opt_ptr ) ; <nl> + } <nl> + } <nl> + <nl> + Status ConfigurableHelper : : ConfigureOption ( <nl> + const ConfigOptions & config_options , Configurable & configurable , <nl> + const OptionTypeInfo & opt_info , const std : : string & opt_name , <nl> + const std : : string & name , const std : : string & value , void * opt_ptr ) { <nl> + if ( opt_name = = name ) { <nl> + return configurable . ParseOption ( config_options , opt_info , opt_name , value , <nl> + opt_ptr ) ; <nl> + } else if ( opt_info . IsStruct ( ) | | opt_info . IsConfigurable ( ) ) { <nl> + return configurable . ParseOption ( config_options , opt_info , name , value , <nl> + opt_ptr ) ; <nl> + } else { <nl> + return Status : : NotFound ( " Could not find option : " , name ) ; <nl> + } <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + / / <nl> + / / Methods for Converting Options into strings <nl> + / / <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + <nl> + Status Configurable : : GetOptionString ( const ConfigOptions & config_options , <nl> + std : : string * result ) const { <nl> + assert ( result ) ; <nl> + result - > clear ( ) ; <nl> + # ifndef ROCKSDB_LITE <nl> + return ConfigurableHelper : : SerializeOptions ( config_options , * this , " " , <nl> + result ) ; <nl> + # else <nl> + ( void ) config_options ; <nl> + return Status : : NotSupported ( " GetOptionString not supported in LITE mode " ) ; <nl> + # endif / / ROCKSDB_LITE <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + std : : string Configurable : : ToString ( const ConfigOptions & config_options , <nl> + const std : : string & prefix ) const { <nl> + std : : string result = SerializeOptions ( config_options , prefix ) ; <nl> + if ( result . empty ( ) | | result . find ( ' = ' ) = = std : : string : : npos ) { <nl> + return result ; <nl> + } else { <nl> + return " { " + result + " } " ; <nl> + } <nl> + } <nl> + <nl> + std : : string Configurable : : SerializeOptions ( const ConfigOptions & config_options , <nl> + const std : : string & header ) const { <nl> + std : : string result ; <nl> + Status s = ConfigurableHelper : : SerializeOptions ( config_options , * this , header , <nl> + & result ) ; <nl> + assert ( s . ok ( ) ) ; <nl> + return result ; <nl> + } <nl> + <nl> + Status Configurable : : GetOption ( const ConfigOptions & config_options , <nl> + const std : : string & name , <nl> + std : : string * value ) const { <nl> + return ConfigurableHelper : : GetOption ( config_options , * this , <nl> + GetOptionName ( name ) , value ) ; <nl> + } <nl> + <nl> + Status ConfigurableHelper : : GetOption ( const ConfigOptions & config_options , <nl> + const Configurable & configurable , <nl> + const std : : string & short_name , <nl> + std : : string * value ) { <nl> + / / Look for option directly <nl> + assert ( value ) ; <nl> + value - > clear ( ) ; <nl> + <nl> + std : : string opt_name ; <nl> + void * opt_ptr = nullptr ; <nl> + const auto opt_info = <nl> + FindOption ( configurable . options_ , short_name , & opt_name , & opt_ptr ) ; <nl> + if ( opt_info ! = nullptr ) { <nl> + ConfigOptions embedded = config_options ; <nl> + embedded . delimiter = " ; " ; <nl> + if ( short_name = = opt_name ) { <nl> + return opt_info - > Serialize ( embedded , opt_name , opt_ptr , value ) ; <nl> + } else if ( opt_info - > IsStruct ( ) ) { <nl> + return opt_info - > Serialize ( embedded , opt_name , opt_ptr , value ) ; <nl> + } else if ( opt_info - > IsConfigurable ( ) ) { <nl> + auto const * config = opt_info - > AsRawPointer < Configurable > ( opt_ptr ) ; <nl> + if ( config ! = nullptr ) { <nl> + return config - > GetOption ( embedded , opt_name , value ) ; <nl> + } <nl> + } <nl> + } <nl> + return Status : : NotFound ( " Cannot find option : " , short_name ) ; <nl> + } <nl> + <nl> + Status ConfigurableHelper : : SerializeOptions ( const ConfigOptions & config_options , <nl> + const Configurable & configurable , <nl> + const std : : string & prefix , <nl> + std : : string * result ) { <nl> + assert ( result ) ; <nl> + for ( auto const & opt_iter : configurable . options_ ) { <nl> + for ( const auto & map_iter : * ( opt_iter . type_map ) ) { <nl> + const auto & opt_name = map_iter . first ; <nl> + const auto & opt_info = map_iter . second ; <nl> + if ( opt_info . ShouldSerialize ( ) ) { <nl> + std : : string value ; <nl> + Status s = opt_info . Serialize ( config_options , prefix + opt_name , <nl> + opt_iter . opt_ptr , & value ) ; <nl> + if ( ! s . ok ( ) ) { <nl> + return s ; <nl> + } else if ( ! value . empty ( ) ) { <nl> + / / < prefix > < opt_name > = < value > < delimiter > <nl> + result - > append ( prefix + opt_name + " = " + value + <nl> + config_options . delimiter ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + return Status : : OK ( ) ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + / / <nl> + / / Methods for listing the options from Configurables <nl> + / / <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + # ifndef ROCKSDB_LITE <nl> + Status Configurable : : GetOptionNames ( <nl> + const ConfigOptions & config_options , <nl> + std : : unordered_set < std : : string > * result ) const { <nl> + assert ( result ) ; <nl> + return ConfigurableHelper : : ListOptions ( config_options , * this , " " , result ) ; <nl> + } <nl> + <nl> + Status ConfigurableHelper : : ListOptions ( <nl> + const ConfigOptions & / * config_options * / , const Configurable & configurable , <nl> + const std : : string & prefix , std : : unordered_set < std : : string > * result ) { <nl> + Status status ; <nl> + for ( auto const & opt_iter : configurable . options_ ) { <nl> + for ( const auto & map_iter : * ( opt_iter . type_map ) ) { <nl> + const auto & opt_name = map_iter . first ; <nl> + const auto & opt_info = map_iter . second ; <nl> + / / If the option is no longer used in rocksdb and marked as deprecated , <nl> + / / we skip it in the serialization . <nl> + if ( ! opt_info . IsDeprecated ( ) & & ! opt_info . IsAlias ( ) ) { <nl> + result - > emplace ( prefix + opt_name ) ; <nl> + } <nl> + } <nl> + } <nl> + return status ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + / / <nl> + / / Methods for Comparing Configurables <nl> + / / <nl> + / / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + <nl> + bool Configurable : : AreEquivalent ( const ConfigOptions & config_options , <nl> + const Configurable * other , <nl> + std : : string * name ) const { <nl> + assert ( name ) ; <nl> + name - > clear ( ) ; <nl> + if ( this = = other | | config_options . IsCheckDisabled ( ) ) { <nl> + return true ; <nl> + } else if ( other ! = nullptr ) { <nl> + # ifndef ROCKSDB_LITE <nl> + return ConfigurableHelper : : AreEquivalent ( config_options , * this , * other , <nl> + name ) ; <nl> + # else <nl> + return true ; <nl> + # endif / / ROCKSDB_LITE <nl> + } else { <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + bool Configurable : : OptionsAreEqual ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , <nl> + const void * const this_ptr , <nl> + const void * const that_ptr , <nl> + std : : string * mismatch ) const { <nl> + if ( opt_info . AreEqual ( config_options , opt_name , this_ptr , that_ptr , <nl> + mismatch ) ) { <nl> + return true ; <nl> + } else if ( opt_info . AreEqualByName ( config_options , opt_name , this_ptr , <nl> + that_ptr ) ) { <nl> + * mismatch = " " ; <nl> + return true ; <nl> + } else { <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + bool ConfigurableHelper : : AreEquivalent ( const ConfigOptions & config_options , <nl> + const Configurable & this_one , <nl> + const Configurable & that_one , <nl> + std : : string * mismatch ) { <nl> + assert ( mismatch ! = nullptr ) ; <nl> + for ( auto const & o : this_one . options_ ) { <nl> + const auto this_offset = this_one . GetOptionsPtr ( o . name ) ; <nl> + const auto that_offset = that_one . GetOptionsPtr ( o . name ) ; <nl> + if ( this_offset ! = that_offset ) { <nl> + if ( this_offset = = nullptr | | that_offset = = nullptr ) { <nl> + return false ; <nl> + } else { <nl> + for ( const auto & map_iter : * ( o . type_map ) ) { <nl> + if ( config_options . IsCheckEnabled ( map_iter . second . GetSanityLevel ( ) ) & & <nl> + ! this_one . OptionsAreEqual ( config_options , map_iter . second , <nl> + map_iter . first , this_offset , <nl> + that_offset , mismatch ) ) { <nl> + return false ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + # endif / / ROCKSDB_LITE <nl> + } / / namespace ROCKSDB_NAMESPACE <nl> new file mode 100644 <nl> index 0000000000 . . 6a2454727e <nl> mmm / dev / null <nl> ppp b / options / configurable_helper . h <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under both the GPLv2 ( found in the <nl> + / / COPYING file in the root directory ) and Apache 2 . 0 License <nl> + / / ( found in the LICENSE . Apache file in the root directory ) . <nl> + <nl> + # pragma once <nl> + <nl> + # include < map > <nl> + # include < stdexcept > <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> + # include " rocksdb / configurable . h " <nl> + # include " rocksdb / convenience . h " <nl> + <nl> + namespace ROCKSDB_NAMESPACE { <nl> + / / Helper class defining static methods for supporting the Configurable <nl> + / / class . The purpose of this class is to keep the Configurable class <nl> + / / as tight as possible and provide methods for doing the actual work <nl> + / / of configuring the objects . <nl> + class ConfigurableHelper { <nl> + public : <nl> + / / Registers the input name with the options and associated map . <nl> + / / When classes register their options in this manner , most of the <nl> + / / functionality ( excluding unknown options and validate / prepare ) is <nl> + / / implemented by the base class . <nl> + / / <nl> + / / This method should be called in the class constructor to register the <nl> + / / option set for this object . For example , to register the options <nl> + / / associated with the BlockBasedTableFactory , the constructor calls this <nl> + / / method passing in : <nl> + / / - the name of the options ( " BlockBasedTableOptions " ) ; <nl> + / / - the options object ( the BlockBasedTableOptions object for this object ; <nl> + / / - the options type map for the BlockBasedTableOptions . <nl> + / / This registration allows the Configurable class to process the option <nl> + / / values associated with the BlockBasedTableOptions without further code in <nl> + / / the derived class . <nl> + / / <nl> + / / @ param name The name of this set of options ( @ see GetOptionsPtr ) <nl> + / / @ param opt_ptr Pointer to the options to associate with this name <nl> + / / @ param opt_map Options map that controls how this option is configured . <nl> + template < typename T > <nl> + static void RegisterOptions ( <nl> + Configurable & configurable , T * opt_ptr , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * opt_map ) { <nl> + RegisterOptions ( configurable , T : : kName ( ) , opt_ptr , opt_map ) ; <nl> + } <nl> + static void RegisterOptions ( <nl> + Configurable & configurable , const std : : string & name , void * opt_ptr , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * opt_map ) ; <nl> + <nl> + / / Configures the input Configurable object based on the parameters . <nl> + / / On successful completion , the Configurable is updated with the settings <nl> + / / from the opt_map . <nl> + / / <nl> + / / The acceptable values of the name / value pairs are documented with the <nl> + / / specific class / instance . <nl> + / / <nl> + / / @ param config_options Controls how the arguments are processed . <nl> + / / @ param opt_map Name / value pairs of the options to update <nl> + / / @ param unused If specified , this value will return the name / value <nl> + / / pairs from opt_map that were NotFound for this object . <nl> + / / @ return OK If all values in the map were successfully updated <nl> + / / @ return NotFound If any of the names in the opt_map were not valid <nl> + / / for this object . If unused is specified , it will contain the <nl> + / / collection of NotFound entries <nl> + / / @ return NotSupported If any of the names are valid but the object does <nl> + / / not know how to convert the value . This can happen if , for example , <nl> + / / there is some nested Configurable that cannot be created . <nl> + / / @ return InvalidArgument If any of the values cannot be successfully <nl> + / / parsed . This can also be returned if PrepareOptions encounters an <nl> + / / error . <nl> + static Status ConfigureOptions ( <nl> + const ConfigOptions & config_options , Configurable & configurable , <nl> + const std : : unordered_map < std : : string , std : : string > & options , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) ; <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + / / Internal method to configure a set of options for this object . <nl> + / / Classes may override this value to change its behavior . <nl> + / / @ param config_options Controls how the options are being configured <nl> + / / @ param type_name The name that was registered for this set of options <nl> + / / @ param type_map The map of options for this name <nl> + / / @ param opt_ptr Pointer to the object being configured for this option set . <nl> + / / @ param options The option name / values being updated . On return , any <nl> + / / option that was found is removed from the list . <nl> + / / @ return OK If all of the options were successfully updated . <nl> + / / @ return InvalidArgument If an option was found but the value could not <nl> + / / be updated . <nl> + / / @ return NotFound If an option name was not found in type_mape <nl> + / / @ return NotSupported If the option was found but no rule for converting <nl> + / / the value could be found . <nl> + static Status ConfigureSomeOptions ( <nl> + const ConfigOptions & config_options , Configurable & configurable , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > & type_map , <nl> + std : : unordered_map < std : : string , std : : string > * options , void * opt_ptr ) ; <nl> + <nl> + / / Configures a single option in the input Configurable . <nl> + / / This method will look through the set of option names for this <nl> + / / Configurable searching for one with the input name . If such an option <nl> + / / is found , it will be configured via the input value . <nl> + / / <nl> + / / @ param config_options Controls how the option is being configured <nl> + / / @ param configurable The object to configure <nl> + / / @ param name For options with sub - options ( like Structs or <nl> + / / Configurables ) , <nl> + / / this value may be the name of the sub - field of the option being <nl> + / / updated . For example , if the option is <nl> + / / " compaction_options_fifo . allow_compaction " , then field name would be <nl> + / / " allow_compaction " . For most options , field_name and opt_name will be <nl> + / / equivalent . <nl> + / / @ param value The new value for this option . <nl> + / / @ param See ConfigureOptions for the possible return values <nl> + static Status ConfigureSingleOption ( const ConfigOptions & config_options , <nl> + Configurable & configurable , <nl> + const std : : string & name , <nl> + const std : : string & value ) ; <nl> + <nl> + / / Configures the option referenced by opt_info for this configurable <nl> + / / This method configures the option based on opt_info for the input <nl> + / / configurable . <nl> + / / @ param config_options Controls how the option is being configured <nl> + / / @ param configurable The object to configure <nl> + / / @ param opt_name The full option name <nl> + / / @ param name For options with sub - options ( like Structs or <nl> + / / Configurables ) , <nl> + / / this value may be the name of the sub - field of the option being <nl> + / / updated . For example , if the option is <nl> + / / " compaction_options_fifo . allow_compaction " , then field name would be <nl> + / / " allow_compaction " . For most options , field_name and opt_name will be <nl> + / / equivalent . <nl> + / / @ param value The new value for this option . <nl> + / / @ param See ConfigureOptions for the possible return values <nl> + static Status ConfigureOption ( const ConfigOptions & config_options , <nl> + Configurable & configurable , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , <nl> + const std : : string & name , <nl> + const std : : string & value , void * opt_ptr ) ; <nl> + <nl> + / / Returns the value of the option associated with the input name <nl> + / / This method is the functional inverse of ConfigureOption <nl> + / / @ param config_options Controls how the value is returned <nl> + / / @ param configurable The object from which to get the option . <nl> + / / @ param name The name of the option to return a value for . <nl> + / / @ param value The returned value associated with the named option . <nl> + / / Note that value will be only the serialized version <nl> + / / of the option and not " name = value " <nl> + / / @ return OK If the named field was successfully updated to value . <nl> + / / @ return NotFound If the name is not valid for this object . <nl> + / / @ param InvalidArgument If the name is valid for this object but <nl> + / / its value cannot be serialized . <nl> + static Status GetOption ( const ConfigOptions & config_options , <nl> + const Configurable & configurable , <nl> + const std : : string & name , std : : string * value ) ; <nl> + <nl> + / / Serializes the input Configurable into the output result . <nl> + / / This is the inverse of ConfigureOptions <nl> + / / @ param config_options Controls how serialization happens . <nl> + / / @ param configurable The object to serialize <nl> + / / @ param prefix A prefix to add to the each option as it is serialized . <nl> + / / @ param result The string representation of the configurable . <nl> + / / @ return OK If the options for this object wer successfully serialized . <nl> + / / @ return InvalidArgument If one or more of the options could not be <nl> + / / serialized . <nl> + static Status SerializeOptions ( const ConfigOptions & config_options , <nl> + const Configurable & configurable , <nl> + const std : : string & prefix , <nl> + std : : string * result ) ; <nl> + <nl> + / / Internal method to list the option names for this object . <nl> + / / Classes may override this value to change its behavior . <nl> + / / @ see ListOptions for more details <nl> + static Status ListOptions ( const ConfigOptions & config_options , <nl> + const Configurable & configurable , <nl> + const std : : string & prefix , <nl> + std : : unordered_set < std : : string > * result ) ; <nl> + <nl> + / / Checks to see if the two configurables are equivalent to one other . <nl> + / / This method assumes that the two objects are of the same class . <nl> + / / @ param config_options Controls how the options are compared . <nl> + / / @ param this_one The object to compare to . <nl> + / / @ param that_one The other object being compared . <nl> + / / @ param mismatch If the objects do not match , this parameter contains <nl> + / / the name of the option that triggered the match failure . <nl> + / / @ param True if the objects match , false otherwise . <nl> + static bool AreEquivalent ( const ConfigOptions & config_options , <nl> + const Configurable & this_one , <nl> + const Configurable & that_one , <nl> + std : : string * mismatch ) ; <nl> + <nl> + private : <nl> + / / Looks for the option specified by name in the RegisteredOptions . <nl> + / / This method traverses the types in the input options vector . If an entry <nl> + / / matching name is found , that entry , opt_name , and pointer are returned . <nl> + / / @ param options The vector of options to search through <nl> + / / @ param name The name of the option to search for in the OptionType map <nl> + / / @ param opt_name If the name was found , this value is set to the option name <nl> + / / associated with the input name / type . <nl> + / / @ param opt_ptr If the name was found , this value is set to the option <nl> + / / pointer <nl> + / / in the RegisteredOptions vector associated with this entry <nl> + / / @ return A pointer to the OptionTypeInfo from the options if found , <nl> + / / nullptr if the name was not found in the input options <nl> + static const OptionTypeInfo * FindOption ( <nl> + const std : : vector < Configurable : : RegisteredOptions > & options , <nl> + const std : : string & name , std : : string * opt_name , void * * opt_ptr ) ; <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + <nl> + } / / namespace ROCKSDB_NAMESPACE <nl> new file mode 100644 <nl> index 0000000000 . . b37fa90668 <nl> mmm / dev / null <nl> ppp b / options / configurable_test . cc <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under both the GPLv2 ( found in the <nl> + / / COPYING file in the root directory ) and Apache 2 . 0 License <nl> + / / ( found in the LICENSE . Apache file in the root directory ) . <nl> + / / <nl> + / / Copyright ( c ) 2011 The LevelDB Authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . See the AUTHORS file for names of contributors . <nl> + <nl> + # include " options / configurable_test . h " <nl> + <nl> + # include < cctype > <nl> + # include < cinttypes > <nl> + # include < cstring > <nl> + # include < unordered_map > <nl> + <nl> + # include " options / configurable_helper . h " <nl> + # include " options / options_helper . h " <nl> + # include " options / options_parser . h " <nl> + # include " rocksdb / configurable . h " <nl> + # include " test_util / testharness . h " <nl> + # include " test_util / testutil . h " <nl> + <nl> + # ifndef GFLAGS <nl> + bool FLAGS_enable_print = false ; <nl> + # else <nl> + # include " util / gflags_compat . h " <nl> + using GFLAGS_NAMESPACE : : ParseCommandLineFlags ; <nl> + DEFINE_bool ( enable_print , false , " Print options generated to console . " ) ; <nl> + # endif / / GFLAGS <nl> + <nl> + namespace ROCKSDB_NAMESPACE { <nl> + namespace test { <nl> + class StringLogger : public Logger { <nl> + public : <nl> + using Logger : : Logv ; <nl> + void Logv ( const char * format , va_list ap ) override { <nl> + char buffer [ 1000 ] ; <nl> + vsnprintf ( buffer , sizeof ( buffer ) , format , ap ) ; <nl> + string_ . append ( buffer ) ; <nl> + } <nl> + const std : : string & str ( ) const { return string_ ; } <nl> + void clear ( ) { string_ . clear ( ) ; } <nl> + <nl> + private : <nl> + std : : string string_ ; <nl> + } ; <nl> + <nl> + class SimpleConfigurable : public TestConfigurable < Configurable > { <nl> + public : <nl> + static SimpleConfigurable * Create ( <nl> + const std : : string & name = " simple " , <nl> + int mode = TestConfigMode : : kDefaultMode , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * map = <nl> + & simple_option_info ) { <nl> + return new SimpleConfigurable ( name , mode , map ) ; <nl> + } <nl> + <nl> + SimpleConfigurable ( const std : : string & name , int mode , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * <nl> + map = & simple_option_info ) <nl> + : TestConfigurable ( name , mode , map ) { <nl> + if ( ( mode & TestConfigMode : : kUniqueMode ) ! = 0 ) { <nl> + unique_ . reset ( SimpleConfigurable : : Create ( " Unique " + name_ ) ) ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ + " Unique " , & unique_ , <nl> + & unique_option_info ) ; <nl> + } <nl> + if ( ( mode & TestConfigMode : : kSharedMode ) ! = 0 ) { <nl> + shared_ . reset ( SimpleConfigurable : : Create ( " Shared " + name_ ) ) ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ + " Shared " , & shared_ , <nl> + & shared_option_info ) ; <nl> + } <nl> + if ( ( mode & TestConfigMode : : kRawPtrMode ) ! = 0 ) { <nl> + pointer_ = SimpleConfigurable : : Create ( " Pointer " + name_ ) ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ + " Pointer " , & pointer_ , <nl> + & pointer_option_info ) ; <nl> + } <nl> + } <nl> + <nl> + } ; / / End class SimpleConfigurable <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > wrapped_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " inner " , <nl> + { 0 , OptionType : : kConfigurable , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kShared } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + class WrappedConfigurable : public SimpleConfigurable { <nl> + public : <nl> + WrappedConfigurable ( const std : : string & name , unsigned char mode , <nl> + const std : : shared_ptr < Configurable > & t ) <nl> + : SimpleConfigurable ( name , mode , & simple_option_info ) , inner_ ( t ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , " WrappedOptions " , & inner_ , <nl> + & wrapped_option_info ) ; <nl> + } <nl> + <nl> + protected : <nl> + Configurable * Inner ( ) const override { return inner_ . get ( ) ; } <nl> + <nl> + private : <nl> + std : : shared_ptr < Configurable > inner_ ; <nl> + } ; <nl> + <nl> + using ConfigTestFactoryFunc = std : : function < Configurable * ( ) > ; <nl> + <nl> + class ConfigurableTest : public testing : : Test { <nl> + public : <nl> + ConfigurableTest ( ) { config_options_ . invoke_prepare_options = false ; } <nl> + <nl> + ConfigOptions config_options_ ; <nl> + } ; <nl> + <nl> + class ConfigurableParamTest <nl> + : public ConfigurableTest , <nl> + virtual public : : testing : : WithParamInterface < <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > > { <nl> + public : <nl> + ConfigurableParamTest ( ) { <nl> + configuration_ = GetParam ( ) . first ; <nl> + factory_ = GetParam ( ) . second ; <nl> + object_ . reset ( factory_ ( ) ) ; <nl> + } <nl> + void TestConfigureOptions ( const ConfigOptions & opts ) ; <nl> + ConfigTestFactoryFunc factory_ ; <nl> + std : : string configuration_ ; <nl> + std : : unique_ptr < Configurable > object_ ; <nl> + } ; <nl> + <nl> + TEST_F ( ConfigurableTest , GetOptionsPtrTest ) { <nl> + std : : string opt_str ; <nl> + std : : unique_ptr < Configurable > configurable ( SimpleConfigurable : : Create ( ) ) ; <nl> + ASSERT_NE ( configurable - > GetOptions < TestOptions > ( " simple " ) , nullptr ) ; <nl> + ASSERT_EQ ( configurable - > GetOptions < TestOptions > ( " bad - opt " ) , nullptr ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , ConfigureFromMapTest ) { <nl> + std : : unique_ptr < Configurable > configurable ( SimpleConfigurable : : Create ( ) ) ; <nl> + auto * opts = configurable - > GetOptions < TestOptions > ( " simple " ) ; <nl> + ASSERT_OK ( configurable - > ConfigureFromMap ( config_options_ , { } ) ) ; <nl> + ASSERT_NE ( opts , nullptr ) ; <nl> + # ifndef ROCKSDB_LITE <nl> + std : : unordered_map < std : : string , std : : string > options_map = { <nl> + { " int " , " 1 " } , { " bool " , " true " } , { " string " , " string " } } ; <nl> + ASSERT_OK ( configurable - > ConfigureFromMap ( config_options_ , options_map ) ) ; <nl> + ASSERT_EQ ( opts - > i , 1 ) ; <nl> + ASSERT_EQ ( opts - > b , true ) ; <nl> + ASSERT_EQ ( opts - > s , " string " ) ; <nl> + # endif <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , ConfigureFromStringTest ) { <nl> + std : : unique_ptr < Configurable > configurable ( SimpleConfigurable : : Create ( ) ) ; <nl> + auto * opts = configurable - > GetOptions < TestOptions > ( " simple " ) ; <nl> + ASSERT_OK ( configurable - > ConfigureFromString ( config_options_ , " " ) ) ; <nl> + ASSERT_NE ( opts , nullptr ) ; <nl> + # ifndef ROCKSDB_LITE / / GetOptionsFromMap is not supported in ROCKSDB_LITE <nl> + ASSERT_OK ( configurable - > ConfigureFromString ( config_options_ , <nl> + " int = 1 ; bool = true ; string = s " ) ) ; <nl> + ASSERT_EQ ( opts - > i , 1 ) ; <nl> + ASSERT_EQ ( opts - > b , true ) ; <nl> + ASSERT_EQ ( opts - > s , " s " ) ; <nl> + # endif <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE / / GetOptionsFromMap is not supported in ROCKSDB_LITE <nl> + TEST_F ( ConfigurableTest , ConfigureIgnoreTest ) { <nl> + std : : unique_ptr < Configurable > configurable ( SimpleConfigurable : : Create ( ) ) ; <nl> + std : : unordered_map < std : : string , std : : string > options_map = { { " unused " , " u " } } ; <nl> + ConfigOptions ignore = config_options_ ; <nl> + ignore . ignore_unknown_options = true ; <nl> + ASSERT_NOK ( configurable - > ConfigureFromMap ( config_options_ , options_map ) ) ; <nl> + ASSERT_OK ( configurable - > ConfigureFromMap ( ignore , options_map ) ) ; <nl> + ASSERT_NOK ( configurable - > ConfigureFromString ( config_options_ , " unused = u " ) ) ; <nl> + ASSERT_OK ( configurable - > ConfigureFromString ( ignore , " unused = u " ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , ConfigureNestedOptionsTest ) { <nl> + std : : unique_ptr < Configurable > base , copy ; <nl> + std : : string opt_str ; <nl> + std : : string mismatch ; <nl> + <nl> + base . reset ( SimpleConfigurable : : Create ( " simple " , TestConfigMode : : kAllOptMode ) ) ; <nl> + copy . reset ( SimpleConfigurable : : Create ( " simple " , TestConfigMode : : kAllOptMode ) ) ; <nl> + ASSERT_OK ( base - > ConfigureFromString ( config_options_ , <nl> + " shared = { int = 10 ; string = 10 } ; " <nl> + " unique = { int = 20 ; string = 20 } ; " <nl> + " pointer = { int = 30 ; string = 30 } ; " ) ) ; <nl> + ASSERT_OK ( base - > GetOptionString ( config_options_ , & opt_str ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureFromString ( config_options_ , opt_str ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , GetOptionsTest ) { <nl> + std : : unique_ptr < Configurable > simple ; <nl> + <nl> + simple . reset ( <nl> + SimpleConfigurable : : Create ( " simple " , TestConfigMode : : kAllOptMode ) ) ; <nl> + int i = 11 ; <nl> + for ( auto opt : { " " , " shared . " , " unique . " , " pointer . " } ) { <nl> + std : : string value ; <nl> + std : : string expected = ToString ( i ) ; <nl> + std : : string opt_name = opt ; <nl> + ASSERT_OK ( <nl> + simple - > ConfigureOption ( config_options_ , opt_name + " int " , expected ) ) ; <nl> + ASSERT_OK ( simple - > GetOption ( config_options_ , opt_name + " int " , & value ) ) ; <nl> + ASSERT_EQ ( expected , value ) ; <nl> + ASSERT_OK ( simple - > ConfigureOption ( config_options_ , opt_name + " string " , <nl> + expected ) ) ; <nl> + ASSERT_OK ( simple - > GetOption ( config_options_ , opt_name + " string " , & value ) ) ; <nl> + ASSERT_EQ ( expected , value ) ; <nl> + <nl> + ASSERT_NOK ( <nl> + simple - > ConfigureOption ( config_options_ , opt_name + " bad " , expected ) ) ; <nl> + ASSERT_NOK ( simple - > GetOption ( config_options_ , " bad option " , & value ) ) ; <nl> + ASSERT_TRUE ( value . empty ( ) ) ; <nl> + i + = 11 ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , ConfigureBadOptionsTest ) { <nl> + std : : unique_ptr < Configurable > configurable ( SimpleConfigurable : : Create ( ) ) ; <nl> + auto * opts = configurable - > GetOptions < TestOptions > ( " simple " ) ; <nl> + ASSERT_NE ( opts , nullptr ) ; <nl> + ASSERT_OK ( configurable - > ConfigureOption ( config_options_ , " int " , " 42 " ) ) ; <nl> + ASSERT_EQ ( opts - > i , 42 ) ; <nl> + ASSERT_NOK ( configurable - > ConfigureOption ( config_options_ , " int " , " fred " ) ) ; <nl> + ASSERT_NOK ( configurable - > ConfigureOption ( config_options_ , " bool " , " fred " ) ) ; <nl> + ASSERT_NOK ( <nl> + configurable - > ConfigureFromString ( config_options_ , " int = 33 ; unused = u " ) ) ; <nl> + ASSERT_EQ ( opts - > i , 42 ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , InvalidOptionTest ) { <nl> + std : : unique_ptr < Configurable > configurable ( SimpleConfigurable : : Create ( ) ) ; <nl> + std : : unordered_map < std : : string , std : : string > options_map = { <nl> + { " bad - option " , " bad " } } ; <nl> + ASSERT_NOK ( configurable - > ConfigureFromMap ( config_options_ , options_map ) ) ; <nl> + ASSERT_NOK ( <nl> + configurable - > ConfigureFromString ( config_options_ , " bad - option = bad " ) ) ; <nl> + ASSERT_NOK ( <nl> + configurable - > ConfigureOption ( config_options_ , " bad - option " , " bad " ) ) ; <nl> + } <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > validated_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " validated " , <nl> + { 0 , OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > prepared_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " prepared " , <nl> + { 0 , OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > <nl> + dont_prepare_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " unique " , <nl> + { 0 , OptionType : : kConfigurable , OptionVerificationType : : kNormal , <nl> + ( OptionTypeFlags : : kUnique | OptionTypeFlags : : kDontPrepare ) } } , <nl> + <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + <nl> + class ValidatedConfigurable : public SimpleConfigurable { <nl> + public : <nl> + ValidatedConfigurable ( const std : : string & name , unsigned char mode , <nl> + bool dont_prepare = false ) <nl> + : SimpleConfigurable ( name , TestConfigMode : : kDefaultMode ) , <nl> + validated ( false ) , <nl> + prepared ( 0 ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , " Validated " , & validated , <nl> + & validated_option_info ) ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , " Prepared " , & prepared , <nl> + & prepared_option_info ) ; <nl> + if ( ( mode & TestConfigMode : : kUniqueMode ) ! = 0 ) { <nl> + unique_ . reset ( new ValidatedConfigurable ( <nl> + " Unique " + name_ , TestConfigMode : : kDefaultMode , false ) ) ; <nl> + if ( dont_prepare ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ + " Unique " , & unique_ , <nl> + & dont_prepare_option_info ) ; <nl> + } else { <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ + " Unique " , & unique_ , <nl> + & unique_option_info ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + Status PrepareOptions ( const ConfigOptions & config_options ) override { <nl> + if ( + + prepared < = 0 ) { <nl> + return Status : : InvalidArgument ( " Cannot prepare option " ) ; <nl> + } else { <nl> + return SimpleConfigurable : : PrepareOptions ( config_options ) ; <nl> + } <nl> + } <nl> + <nl> + Status ValidateOptions ( const DBOptions & db_opts , <nl> + const ColumnFamilyOptions & cf_opts ) const override { <nl> + if ( ! validated ) { <nl> + return Status : : InvalidArgument ( " Not Validated " ) ; <nl> + } else { <nl> + return SimpleConfigurable : : ValidateOptions ( db_opts , cf_opts ) ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + bool validated ; <nl> + int prepared ; <nl> + } ; <nl> + <nl> + TEST_F ( ConfigurableTest , ValidateOptionsTest ) { <nl> + std : : unique_ptr < Configurable > configurable ( <nl> + new ValidatedConfigurable ( " validated " , TestConfigMode : : kDefaultMode ) ) ; <nl> + ColumnFamilyOptions cf_opts ; <nl> + DBOptions db_opts ; <nl> + ASSERT_OK ( <nl> + configurable - > ConfigureOption ( config_options_ , " validated " , " false " ) ) ; <nl> + ASSERT_NOK ( configurable - > ValidateOptions ( db_opts , cf_opts ) ) ; <nl> + ASSERT_OK ( <nl> + configurable - > ConfigureOption ( config_options_ , " validated " , " true " ) ) ; <nl> + ASSERT_OK ( configurable - > ValidateOptions ( db_opts , cf_opts ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , PrepareOptionsTest ) { <nl> + std : : unique_ptr < Configurable > c ( <nl> + new ValidatedConfigurable ( " Simple " , TestConfigMode : : kUniqueMode , false ) ) ; <nl> + auto cp = c - > GetOptions < int > ( " Prepared " ) ; <nl> + auto u = c - > GetOptions < std : : unique_ptr < Configurable > > ( " SimpleUnique " ) ; <nl> + auto up = u - > get ( ) - > GetOptions < int > ( " Prepared " ) ; <nl> + config_options_ . invoke_prepare_options = false ; <nl> + <nl> + ASSERT_NE ( cp , nullptr ) ; <nl> + ASSERT_NE ( up , nullptr ) ; <nl> + ASSERT_EQ ( * cp , 0 ) ; <nl> + ASSERT_EQ ( * up , 0 ) ; <nl> + ASSERT_OK ( c - > ConfigureFromMap ( config_options_ , { } ) ) ; <nl> + ASSERT_EQ ( * cp , 0 ) ; <nl> + ASSERT_EQ ( * up , 0 ) ; <nl> + config_options_ . invoke_prepare_options = true ; <nl> + ASSERT_OK ( c - > ConfigureFromMap ( config_options_ , { } ) ) ; <nl> + ASSERT_EQ ( * cp , 1 ) ; <nl> + ASSERT_EQ ( * up , 1 ) ; <nl> + ASSERT_OK ( c - > ConfigureFromString ( config_options_ , " prepared = 0 " ) ) ; <nl> + ASSERT_EQ ( * up , 2 ) ; <nl> + ASSERT_EQ ( * cp , 1 ) ; <nl> + <nl> + ASSERT_NOK ( c - > ConfigureFromString ( config_options_ , " prepared = - 2 " ) ) ; <nl> + <nl> + c . reset ( <nl> + new ValidatedConfigurable ( " Simple " , TestConfigMode : : kUniqueMode , true ) ) ; <nl> + cp = c - > GetOptions < int > ( " Prepared " ) ; <nl> + u = c - > GetOptions < std : : unique_ptr < Configurable > > ( " SimpleUnique " ) ; <nl> + up = u - > get ( ) - > GetOptions < int > ( " Prepared " ) ; <nl> + <nl> + ASSERT_OK ( c - > ConfigureFromString ( config_options_ , " prepared = 0 " ) ) ; <nl> + ASSERT_EQ ( * cp , 1 ) ; <nl> + ASSERT_EQ ( * up , 0 ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , DeprecatedOptionsTest ) { <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > <nl> + deprecated_option_info = { <nl> + { " deprecated " , <nl> + { offsetof ( struct TestOptions , b ) , OptionType : : kBoolean , <nl> + OptionVerificationType : : kDeprecated , OptionTypeFlags : : kNone } } } ; <nl> + std : : unique_ptr < Configurable > orig ; <nl> + orig . reset ( SimpleConfigurable : : Create ( " simple " , TestConfigMode : : kDefaultMode , <nl> + & deprecated_option_info ) ) ; <nl> + auto * opts = orig - > GetOptions < TestOptions > ( " simple " ) ; <nl> + ASSERT_NE ( opts , nullptr ) ; <nl> + opts - > d = true ; <nl> + ASSERT_OK ( orig - > ConfigureOption ( config_options_ , " deprecated " , " false " ) ) ; <nl> + ASSERT_TRUE ( opts - > d ) ; <nl> + ASSERT_OK ( orig - > ConfigureFromString ( config_options_ , " deprecated = false " ) ) ; <nl> + ASSERT_TRUE ( opts - > d ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , AliasOptionsTest ) { <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > alias_option_info = { <nl> + { " bool " , <nl> + { offsetof ( struct TestOptions , b ) , OptionType : : kBoolean , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> + { " alias " , <nl> + { offsetof ( struct TestOptions , b ) , OptionType : : kBoolean , <nl> + OptionVerificationType : : kAlias , OptionTypeFlags : : kNone , 0 } } } ; <nl> + std : : unique_ptr < Configurable > orig ; <nl> + orig . reset ( SimpleConfigurable : : Create ( " simple " , TestConfigMode : : kDefaultMode , <nl> + & alias_option_info ) ) ; <nl> + auto * opts = orig - > GetOptions < TestOptions > ( " simple " ) ; <nl> + ASSERT_NE ( opts , nullptr ) ; <nl> + ASSERT_OK ( orig - > ConfigureOption ( config_options_ , " bool " , " false " ) ) ; <nl> + ASSERT_FALSE ( opts - > b ) ; <nl> + ASSERT_OK ( orig - > ConfigureOption ( config_options_ , " alias " , " true " ) ) ; <nl> + ASSERT_TRUE ( opts - > b ) ; <nl> + std : : string opts_str ; <nl> + ASSERT_OK ( orig - > GetOptionString ( config_options_ , & opts_str ) ) ; <nl> + ASSERT_EQ ( opts_str . find ( " alias " ) , std : : string : : npos ) ; <nl> + <nl> + ASSERT_OK ( orig - > ConfigureOption ( config_options_ , " bool " , " false " ) ) ; <nl> + ASSERT_FALSE ( opts - > b ) ; <nl> + ASSERT_OK ( orig - > GetOption ( config_options_ , " alias " , & opts_str ) ) ; <nl> + ASSERT_EQ ( opts_str , " false " ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , NestedUniqueConfigTest ) { <nl> + std : : unique_ptr < Configurable > simple ; <nl> + simple . reset ( <nl> + SimpleConfigurable : : Create ( " Outer " , TestConfigMode : : kAllOptMode ) ) ; <nl> + const auto outer = simple - > GetOptions < TestOptions > ( " Outer " ) ; <nl> + const auto unique = <nl> + simple - > GetOptions < std : : unique_ptr < Configurable > > ( " OuterUnique " ) ; <nl> + ASSERT_NE ( outer , nullptr ) ; <nl> + ASSERT_NE ( unique , nullptr ) ; <nl> + ASSERT_OK ( <nl> + simple - > ConfigureFromString ( config_options_ , " int = 24 ; string = outer " ) ) ; <nl> + ASSERT_OK ( simple - > ConfigureFromString ( config_options_ , <nl> + " unique = { int = 42 ; string = nested } " ) ) ; <nl> + const auto inner = unique - > get ( ) - > GetOptions < TestOptions > ( " UniqueOuter " ) ; <nl> + ASSERT_NE ( inner , nullptr ) ; <nl> + ASSERT_EQ ( outer - > i , 24 ) ; <nl> + ASSERT_EQ ( outer - > s , " outer " ) ; <nl> + ASSERT_EQ ( inner - > i , 42 ) ; <nl> + ASSERT_EQ ( inner - > s , " nested " ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , NestedSharedConfigTest ) { <nl> + std : : unique_ptr < Configurable > simple ; <nl> + simple . reset ( SimpleConfigurable : : Create ( <nl> + " Outer " , TestConfigMode : : kDefaultMode | TestConfigMode : : kSharedMode ) ) ; <nl> + ASSERT_OK ( <nl> + simple - > ConfigureFromString ( config_options_ , " int = 24 ; string = outer " ) ) ; <nl> + ASSERT_OK ( simple - > ConfigureFromString ( config_options_ , <nl> + " shared = { int = 42 ; string = nested } " ) ) ; <nl> + const auto outer = simple - > GetOptions < TestOptions > ( " Outer " ) ; <nl> + const auto shared = <nl> + simple - > GetOptions < std : : shared_ptr < Configurable > > ( " OuterShared " ) ; <nl> + ASSERT_NE ( outer , nullptr ) ; <nl> + ASSERT_NE ( shared , nullptr ) ; <nl> + const auto inner = shared - > get ( ) - > GetOptions < TestOptions > ( " SharedOuter " ) ; <nl> + ASSERT_NE ( inner , nullptr ) ; <nl> + ASSERT_EQ ( outer - > i , 24 ) ; <nl> + ASSERT_EQ ( outer - > s , " outer " ) ; <nl> + ASSERT_EQ ( inner - > i , 42 ) ; <nl> + ASSERT_EQ ( inner - > s , " nested " ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , NestedRawConfigTest ) { <nl> + std : : unique_ptr < Configurable > simple ; <nl> + simple . reset ( SimpleConfigurable : : Create ( <nl> + " Outer " , TestConfigMode : : kDefaultMode | TestConfigMode : : kRawPtrMode ) ) ; <nl> + ASSERT_OK ( <nl> + simple - > ConfigureFromString ( config_options_ , " int = 24 ; string = outer " ) ) ; <nl> + ASSERT_OK ( simple - > ConfigureFromString ( config_options_ , <nl> + " pointer = { int = 42 ; string = nested } " ) ) ; <nl> + const auto outer = simple - > GetOptions < TestOptions > ( " Outer " ) ; <nl> + const auto pointer = simple - > GetOptions < Configurable * > ( " OuterPointer " ) ; <nl> + ASSERT_NE ( outer , nullptr ) ; <nl> + ASSERT_NE ( pointer , nullptr ) ; <nl> + const auto inner = ( * pointer ) - > GetOptions < TestOptions > ( " PointerOuter " ) ; <nl> + ASSERT_NE ( inner , nullptr ) ; <nl> + ASSERT_EQ ( outer - > i , 24 ) ; <nl> + ASSERT_EQ ( outer - > s , " outer " ) ; <nl> + ASSERT_EQ ( inner - > i , 42 ) ; <nl> + ASSERT_EQ ( inner - > s , " nested " ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , MatchesTest ) { <nl> + std : : string mismatch ; <nl> + std : : unique_ptr < Configurable > base , copy ; <nl> + base . reset ( SimpleConfigurable : : Create ( <nl> + " simple " , TestConfigMode : : kDefaultMode | TestConfigMode : : kNestedMode ) ) ; <nl> + copy . reset ( SimpleConfigurable : : Create ( <nl> + " simple " , TestConfigMode : : kDefaultMode | TestConfigMode : : kNestedMode ) ) ; <nl> + ASSERT_OK ( base - > ConfigureFromString ( <nl> + config_options_ , <nl> + " int = 11 ; string = outer ; unique = { int = 22 ; string = u } ; shared = { int = 33 ; string = s } " ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureFromString ( <nl> + config_options_ , <nl> + " int = 11 ; string = outer ; unique = { int = 22 ; string = u } ; shared = { int = 33 ; string = s } " ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + ASSERT_OK ( base - > ConfigureOption ( config_options_ , " shared " , " int = 44 " ) ) ; <nl> + ASSERT_FALSE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + ASSERT_EQ ( mismatch , " shared . int " ) ; <nl> + std : : string c1value , c2value ; <nl> + ASSERT_OK ( base - > GetOption ( config_options_ , mismatch , & c1value ) ) ; <nl> + ASSERT_OK ( copy - > GetOption ( config_options_ , mismatch , & c2value ) ) ; <nl> + ASSERT_NE ( c1value , c2value ) ; <nl> + } <nl> + <nl> + static Configurable * SimpleStructFactory ( ) { <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > struct_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " struct " , OptionTypeInfo : : Struct ( " struct " , & simple_option_info , 0 , <nl> + OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable ) } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + return SimpleConfigurable : : Create ( <nl> + " simple - struct " , TestConfigMode : : kDefaultMode , & struct_option_info ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , ConfigureStructTest ) { <nl> + std : : unique_ptr < Configurable > base ( SimpleStructFactory ( ) ) ; <nl> + std : : unique_ptr < Configurable > copy ( SimpleStructFactory ( ) ) ; <nl> + std : : string opt_str , value ; <nl> + std : : string mismatch ; <nl> + std : : unordered_set < std : : string > names ; <nl> + <nl> + ASSERT_OK ( <nl> + base - > ConfigureFromString ( config_options_ , " struct = { int = 10 ; string = 10 } " ) ) ; <nl> + ASSERT_OK ( base - > GetOptionString ( config_options_ , & opt_str ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureFromString ( config_options_ , opt_str ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + ASSERT_OK ( base - > GetOptionNames ( config_options_ , & names ) ) ; <nl> + ASSERT_EQ ( names . size ( ) , 1 ) ; <nl> + ASSERT_EQ ( * ( names . begin ( ) ) , " struct " ) ; <nl> + ASSERT_OK ( <nl> + base - > ConfigureFromString ( config_options_ , " struct = { int = 20 ; string = 20 } " ) ) ; <nl> + ASSERT_OK ( base - > GetOption ( config_options_ , " struct " , & value ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureOption ( config_options_ , " struct " , value ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + <nl> + ASSERT_NOK ( base - > ConfigureFromString ( config_options_ , <nl> + " struct = { int = 10 ; string = 10 ; bad = 11 } " ) ) ; <nl> + ASSERT_OK ( base - > ConfigureOption ( config_options_ , " struct . int " , " 42 " ) ) ; <nl> + ASSERT_NOK ( base - > ConfigureOption ( config_options_ , " struct . bad " , " 42 " ) ) ; <nl> + ASSERT_NOK ( base - > GetOption ( config_options_ , " struct . bad " , & value ) ) ; <nl> + ASSERT_OK ( base - > GetOption ( config_options_ , " struct . int " , & value ) ) ; <nl> + ASSERT_EQ ( value , " 42 " ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , ConfigurableEnumTest ) { <nl> + std : : unique_ptr < Configurable > base , copy ; <nl> + base . reset ( SimpleConfigurable : : Create ( " e " , TestConfigMode : : kEnumMode ) ) ; <nl> + copy . reset ( SimpleConfigurable : : Create ( " e " , TestConfigMode : : kEnumMode ) ) ; <nl> + <nl> + std : : string opts_str ; <nl> + std : : string mismatch ; <nl> + <nl> + ASSERT_OK ( base - > ConfigureFromString ( config_options_ , " enum = B " ) ) ; <nl> + ASSERT_FALSE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + ASSERT_OK ( base - > GetOptionString ( config_options_ , & opts_str ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureFromString ( config_options_ , opts_str ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + ASSERT_NOK ( base - > ConfigureOption ( config_options_ , " enum " , " bad " ) ) ; <nl> + ASSERT_NOK ( base - > ConfigureOption ( config_options_ , " unknown " , " bad " ) ) ; <nl> + } <nl> + <nl> + # ifndef ROCKSDB_LITE <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > noserialize_option_info = <nl> + { <nl> + { " int " , <nl> + { offsetof ( struct TestOptions , i ) , OptionType : : kInt , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kDontSerialize } } , <nl> + } ; <nl> + <nl> + TEST_F ( ConfigurableTest , TestNoSerialize ) { <nl> + std : : unique_ptr < Configurable > base ; <nl> + base . reset ( SimpleConfigurable : : Create ( " c " , TestConfigMode : : kDefaultMode , <nl> + & noserialize_option_info ) ) ; <nl> + std : : string opts_str , value ; <nl> + ASSERT_OK ( base - > ConfigureFromString ( config_options_ , " int = 10 " ) ) ; <nl> + ASSERT_OK ( base - > GetOptionString ( config_options_ , & opts_str ) ) ; <nl> + ASSERT_EQ ( opts_str , " " ) ; <nl> + ASSERT_NOK ( base - > GetOption ( config_options_ , " int " , & value ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ConfigurableTest , TestNoCompare ) { <nl> + std : : unordered_map < std : : string , OptionTypeInfo > nocomp_option_info = { <nl> + { " int " , <nl> + { offsetof ( struct TestOptions , i ) , OptionType : : kInt , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kCompareNever } } , <nl> + } ; <nl> + std : : unordered_map < std : : string , OptionTypeInfo > normal_option_info = { <nl> + { " int " , <nl> + { offsetof ( struct TestOptions , i ) , OptionType : : kInt , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> + } ; <nl> + <nl> + std : : unique_ptr < Configurable > base , copy ; <nl> + base . reset ( SimpleConfigurable : : Create ( " c " , TestConfigMode : : kDefaultMode , <nl> + & nocomp_option_info ) ) ; <nl> + copy . reset ( SimpleConfigurable : : Create ( " c " , TestConfigMode : : kDefaultMode , <nl> + & normal_option_info ) ) ; <nl> + ASSERT_OK ( base - > ConfigureFromString ( config_options_ , " int = 10 " ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureFromString ( config_options_ , " int = 20 " ) ) ; <nl> + std : : string bvalue , cvalue , mismatch ; <nl> + ASSERT_OK ( base - > GetOption ( config_options_ , " int " , & bvalue ) ) ; <nl> + ASSERT_OK ( copy - > GetOption ( config_options_ , " int " , & cvalue ) ) ; <nl> + ASSERT_EQ ( bvalue , " 10 " ) ; <nl> + ASSERT_EQ ( cvalue , " 20 " ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + ASSERT_FALSE ( copy - > AreEquivalent ( config_options_ , base . get ( ) , & mismatch ) ) ; <nl> + } <nl> + # endif <nl> + <nl> + void ConfigurableParamTest : : TestConfigureOptions ( <nl> + const ConfigOptions & config_options ) { <nl> + std : : unique_ptr < Configurable > base , copy ; <nl> + std : : unordered_set < std : : string > names ; <nl> + std : : string opt_str , mismatch ; <nl> + <nl> + base . reset ( factory_ ( ) ) ; <nl> + copy . reset ( factory_ ( ) ) ; <nl> + <nl> + ASSERT_OK ( base - > ConfigureFromString ( config_options , configuration_ ) ) ; <nl> + ASSERT_OK ( base - > GetOptionString ( config_options , & opt_str ) ) ; <nl> + ASSERT_OK ( copy - > ConfigureFromString ( config_options , opt_str ) ) ; <nl> + ASSERT_OK ( copy - > GetOptionString ( config_options , & opt_str ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options , copy . get ( ) , & mismatch ) ) ; <nl> + <nl> + copy . reset ( factory_ ( ) ) ; <nl> + ASSERT_OK ( base - > GetOptionNames ( config_options , & names ) ) ; <nl> + std : : unordered_map < std : : string , std : : string > unused ; <nl> + bool found_one = false ; <nl> + for ( auto name : names ) { <nl> + std : : string value ; <nl> + Status s = base - > GetOption ( config_options , name , & value ) ; <nl> + if ( s . ok ( ) ) { <nl> + s = copy - > ConfigureOption ( config_options , name , value ) ; <nl> + if ( s . ok ( ) | | s . IsNotSupported ( ) ) { <nl> + found_one = true ; <nl> + } else { <nl> + unused [ name ] = value ; <nl> + } <nl> + } else { <nl> + ASSERT_TRUE ( s . IsNotSupported ( ) ) ; <nl> + } <nl> + } <nl> + ASSERT_TRUE ( found_one | | names . empty ( ) ) ; <nl> + while ( found_one & & ! unused . empty ( ) ) { <nl> + found_one = false ; <nl> + for ( auto iter = unused . begin ( ) ; iter ! = unused . end ( ) ; ) { <nl> + if ( copy - > ConfigureOption ( config_options , iter - > first , iter - > second ) <nl> + . ok ( ) ) { <nl> + found_one = true ; <nl> + iter = unused . erase ( iter ) ; <nl> + } else { <nl> + + + iter ; <nl> + } <nl> + } <nl> + } <nl> + ASSERT_EQ ( 0 , unused . size ( ) ) ; <nl> + ASSERT_TRUE ( base - > AreEquivalent ( config_options , copy . get ( ) , & mismatch ) ) ; <nl> + } <nl> + <nl> + TEST_P ( ConfigurableParamTest , GetDefaultOptionsTest ) { <nl> + TestConfigureOptions ( config_options_ ) ; <nl> + } <nl> + <nl> + TEST_P ( ConfigurableParamTest , ConfigureFromPropsTest ) { <nl> + std : : string opt_str , mismatch ; <nl> + std : : unordered_set < std : : string > names ; <nl> + std : : unique_ptr < Configurable > copy ( factory_ ( ) ) ; <nl> + <nl> + ASSERT_OK ( object_ - > ConfigureFromString ( config_options_ , configuration_ ) ) ; <nl> + config_options_ . delimiter = " \ n " ; <nl> + ASSERT_OK ( object_ - > GetOptionString ( config_options_ , & opt_str ) ) ; <nl> + std : : istringstream iss ( opt_str ) ; <nl> + std : : unordered_map < std : : string , std : : string > copy_map ; <nl> + std : : string line ; <nl> + for ( int line_num = 0 ; std : : getline ( iss , line ) ; line_num + + ) { <nl> + std : : string name ; <nl> + std : : string value ; <nl> + ASSERT_OK ( <nl> + RocksDBOptionsParser : : ParseStatement ( & name , & value , line , line_num ) ) ; <nl> + copy_map [ name ] = value ; <nl> + } <nl> + ASSERT_OK ( copy - > ConfigureFromMap ( config_options_ , copy_map ) ) ; <nl> + ASSERT_TRUE ( object_ - > AreEquivalent ( config_options_ , copy . get ( ) , & mismatch ) ) ; <nl> + } <nl> + <nl> + static Configurable * SimpleFactory ( ) { <nl> + return SimpleConfigurable : : Create ( " simple " ) ; <nl> + } <nl> + <nl> + static Configurable * UniqueFactory ( ) { <nl> + return SimpleConfigurable : : Create ( <nl> + " simple " , TestConfigMode : : kSimpleMode | TestConfigMode : : kUniqueMode ) ; <nl> + } <nl> + static Configurable * SharedFactory ( ) { <nl> + return SimpleConfigurable : : Create ( <nl> + " simple " , TestConfigMode : : kSimpleMode | TestConfigMode : : kSharedMode ) ; <nl> + } <nl> + <nl> + static Configurable * NestedFactory ( ) { <nl> + return SimpleConfigurable : : Create ( <nl> + " simple " , TestConfigMode : : kSimpleMode | TestConfigMode : : kNestedMode ) ; <nl> + } <nl> + <nl> + static Configurable * MutableFactory ( ) { <nl> + return SimpleConfigurable : : Create ( " simple " , TestConfigMode : : kMutableMode | <nl> + TestConfigMode : : kSimpleMode | <nl> + TestConfigMode : : kNestedMode ) ; <nl> + } <nl> + <nl> + static Configurable * ThreeWrappedFactory ( ) { <nl> + std : : shared_ptr < Configurable > child ; <nl> + child . reset ( <nl> + SimpleConfigurable : : Create ( " child " , TestConfigMode : : kDefaultMode ) ) ; <nl> + std : : shared_ptr < Configurable > parent ; <nl> + parent . reset ( <nl> + new WrappedConfigurable ( " parent " , TestConfigMode : : kDefaultMode , child ) ) ; <nl> + return new WrappedConfigurable ( " master " , TestConfigMode : : kDefaultMode , <nl> + parent ) ; <nl> + } <nl> + <nl> + static Configurable * ThreeDeepFactory ( ) { <nl> + Configurable * simple = SimpleConfigurable : : Create ( <nl> + " Simple " , TestConfigMode : : kUniqueMode | TestConfigMode : : kDefaultMode ) ; <nl> + auto * unique = <nl> + simple - > GetOptions < std : : unique_ptr < Configurable > > ( " SimpleUnique " ) ; <nl> + unique - > reset ( SimpleConfigurable : : Create ( <nl> + " Child " , TestConfigMode : : kUniqueMode | TestConfigMode : : kDefaultMode ) ) ; <nl> + unique = <nl> + unique - > get ( ) - > GetOptions < std : : unique_ptr < Configurable > > ( " ChildUnique " ) ; <nl> + unique - > reset ( <nl> + SimpleConfigurable : : Create ( " Child " , TestConfigMode : : kDefaultMode ) ) ; <nl> + return simple ; <nl> + } <nl> + <nl> + static Configurable * DBOptionsFactory ( ) { <nl> + auto config = DBOptionsAsConfigurable ( DBOptions ( ) ) ; <nl> + return config . release ( ) ; <nl> + } <nl> + <nl> + static Configurable * CFOptionsFactory ( ) { <nl> + auto config = CFOptionsAsConfigurable ( ColumnFamilyOptions ( ) ) ; <nl> + return config . release ( ) ; <nl> + } <nl> + <nl> + static Configurable * BlockBasedFactory ( ) { return NewBlockBasedTableFactory ( ) ; } <nl> + <nl> + INSTANTIATE_TEST_CASE_P ( <nl> + ParamTest , ConfigurableParamTest , <nl> + testing : : Values ( <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 42 ; bool = true ; string = s " , SimpleFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 42 ; unique = { int = 33 ; string = unique } " , MutableFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " struct = { int = 33 ; bool = true ; string = s ; } " , SimpleStructFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 33 ; bool = true ; string = outer ; " <nl> + " shared = { int = 42 ; string = shared } " , <nl> + SharedFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 33 ; bool = true ; string = outer ; " <nl> + " unique = { int = 42 ; string = unique } " , <nl> + UniqueFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 11 ; bool = true ; string = outer ; " <nl> + " pointer = { int = 22 ; string = pointer } ; " <nl> + " unique = { int = 33 ; string = unique } ; " <nl> + " shared = { int = 44 ; string = shared } " , <nl> + NestedFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 11 ; bool = true ; string = outer ; " <nl> + " inner = { int = 22 ; string = parent ; " <nl> + " inner = { int = 33 ; string = child } } ; " , <nl> + ThreeWrappedFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " int = 11 ; bool = true ; string = outer ; " <nl> + " unique = { int = 22 ; string = inner ; " <nl> + " unique = { int = 33 ; string = unique } } ; " , <nl> + ThreeDeepFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( " max_background_jobs = 100 ; " <nl> + " max_open_files = 200 ; " , <nl> + DBOptionsFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( <nl> + " table_factory = BlockBasedTable ; " <nl> + " disable_auto_compactions = true ; " , <nl> + CFOptionsFactory ) , <nl> + std : : pair < std : : string , ConfigTestFactoryFunc > ( " block_size = 1024 ; " <nl> + " no_block_cache = true ; " , <nl> + BlockBasedFactory ) ) ) ; <nl> + # endif / / ROCKSDB_LITE <nl> + <nl> + } / / namespace test <nl> + } / / namespace ROCKSDB_NAMESPACE <nl> + int main ( int argc , char * * argv ) { <nl> + : : testing : : InitGoogleTest ( & argc , argv ) ; <nl> + # ifdef GFLAGS <nl> + ParseCommandLineFlags ( & argc , & argv , true ) ; <nl> + # endif / / GFLAGS <nl> + return RUN_ALL_TESTS ( ) ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000 . . 52c3599f66 <nl> mmm / dev / null <nl> ppp b / options / configurable_test . h <nl> <nl> + / / Copyright ( c ) 2011 - present , Facebook , Inc . All rights reserved . <nl> + / / This source code is licensed under both the GPLv2 ( found in the <nl> + / / COPYING file in the root directory ) and Apache 2 . 0 License <nl> + / / ( found in the LICENSE . Apache file in the root directory ) . <nl> + / / <nl> + / / Copyright ( c ) 2011 The LevelDB Authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . See the AUTHORS file for names of contributors . <nl> + <nl> + # pragma once <nl> + # include < algorithm > <nl> + # include < memory > <nl> + # include < unordered_map > <nl> + <nl> + # include " options / configurable_helper . h " <nl> + # include " rocksdb / configurable . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> + <nl> + namespace ROCKSDB_NAMESPACE { <nl> + struct ColumnFamilyOptions ; <nl> + struct DBOptions ; <nl> + <nl> + namespace test { <nl> + enum TestEnum { kTestA , kTestB } ; <nl> + <nl> + static const std : : unordered_map < std : : string , int > test_enum_map = { <nl> + { " A " , TestEnum : : kTestA } , <nl> + { " B " , TestEnum : : kTestB } , <nl> + } ; <nl> + <nl> + struct TestOptions { <nl> + int i = 0 ; <nl> + bool b = false ; <nl> + bool d = true ; <nl> + TestEnum e = TestEnum : : kTestA ; <nl> + std : : string s = " " ; <nl> + std : : string u = " " ; <nl> + } ; <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > simple_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " int " , <nl> + { offsetof ( struct TestOptions , i ) , OptionType : : kInt , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> + { " bool " , <nl> + { offsetof ( struct TestOptions , b ) , OptionType : : kBoolean , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> + { " string " , <nl> + { offsetof ( struct TestOptions , s ) , OptionType : : kString , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > enum_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " enum " , <nl> + OptionTypeInfo : : Enum ( offsetof ( struct TestOptions , e ) , & test_enum_map ) } <nl> + # endif <nl> + } ; <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > unique_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " unique " , <nl> + { 0 , OptionType : : kConfigurable , OptionVerificationType : : kNormal , <nl> + ( OptionTypeFlags : : kUnique | OptionTypeFlags : : kMutable ) } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > shared_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " shared " , <nl> + { 0 , OptionType : : kConfigurable , OptionVerificationType : : kNormal , <nl> + ( OptionTypeFlags : : kShared ) } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > pointer_option_info = { <nl> + # ifndef ROCKSDB_LITE <nl> + { " pointer " , <nl> + { 0 , OptionType : : kConfigurable , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kRawPointer } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + <nl> + enum TestConfigMode { <nl> + kEmptyMode = 0x0 , / / Don ' t register anything <nl> + kMutableMode = 0x01 , / / Configuration is mutable <nl> + kSimpleMode = 0x02 , / / Use the simple options <nl> + kEnumMode = 0x04 , / / Use the enum options <nl> + kDefaultMode = kSimpleMode , / / Use no inner nested configurations <nl> + kSharedMode = 0x10 , / / Use shared configuration <nl> + kUniqueMode = 0x20 , / / Use unique configuration <nl> + kRawPtrMode = 0x40 , / / Use pointer configuration <nl> + kNestedMode = ( kSharedMode | kUniqueMode | kRawPtrMode ) , <nl> + kAllOptMode = ( kNestedMode | kEnumMode | kSimpleMode ) , <nl> + } ; <nl> + <nl> + template < typename T > <nl> + class TestConfigurable : public Configurable { <nl> + protected : <nl> + std : : string name_ ; <nl> + std : : string prefix_ ; <nl> + TestOptions options_ ; <nl> + <nl> + public : <nl> + std : : unique_ptr < T > unique_ ; <nl> + std : : shared_ptr < T > shared_ ; <nl> + T * pointer_ ; <nl> + <nl> + TestConfigurable ( const std : : string & name , int mode , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > * map = <nl> + & simple_option_info ) <nl> + : name_ ( name ) , pointer_ ( nullptr ) { <nl> + prefix_ = " test . " + name + " . " ; <nl> + if ( ( mode & TestConfigMode : : kSimpleMode ) ! = 0 ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ , & options_ , map ) ; <nl> + } <nl> + if ( ( mode & TestConfigMode : : kEnumMode ) ! = 0 ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , name_ + " Enum " , & options_ , <nl> + & enum_option_info ) ; <nl> + } <nl> + } <nl> + <nl> + ~ TestConfigurable ( ) override { delete pointer_ ; } <nl> + } ; <nl> + <nl> + } / / namespace test <nl> + } / / namespace ROCKSDB_NAMESPACE <nl> mmm a / options / db_options . cc <nl> ppp b / options / db_options . cc <nl> <nl> # include < cinttypes > <nl> <nl> # include " logging / logging . h " <nl> + # include " options / configurable_helper . h " <nl> # include " options / options_helper . h " <nl> + # include " options / options_parser . h " <nl> # include " port / port . h " <nl> - # include " rocksdb / cache . h " <nl> + # include " rocksdb / configurable . h " <nl> # include " rocksdb / env . h " <nl> # include " rocksdb / file_system . h " <nl> # include " rocksdb / rate_limiter . h " <nl> # include " rocksdb / sst_file_manager . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " rocksdb / wal_filter . h " <nl> + # include " util / string_util . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> # ifndef ROCKSDB_LITE <nl> static std : : unordered_map < std : : string , InfoLogLevel > info_log_level_string_map = <nl> { " FATAL_LEVEL " , InfoLogLevel : : FATAL_LEVEL } , <nl> { " HEADER_LEVEL " , InfoLogLevel : : HEADER_LEVEL } } ; <nl> <nl> - std : : unordered_map < std : : string , OptionTypeInfo > <nl> - OptionsHelper : : db_options_type_info = { <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > <nl> + db_mutable_options_type_info = { <nl> + { " allow_os_buffer " , <nl> + { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " max_background_jobs " , <nl> + { offsetof ( struct MutableDBOptions , max_background_jobs ) , <nl> + OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " max_background_compactions " , <nl> + { offsetof ( struct MutableDBOptions , max_background_compactions ) , <nl> + OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " base_background_compactions " , <nl> + { offsetof ( struct MutableDBOptions , base_background_compactions ) , <nl> + OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " max_subcompactions " , <nl> + { offsetof ( struct MutableDBOptions , max_subcompactions ) , <nl> + OptionType : : kUInt32T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " avoid_flush_during_shutdown " , <nl> + { offsetof ( struct MutableDBOptions , avoid_flush_during_shutdown ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " writable_file_max_buffer_size " , <nl> + { offsetof ( struct MutableDBOptions , writable_file_max_buffer_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " delayed_write_rate " , <nl> + { offsetof ( struct MutableDBOptions , delayed_write_rate ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " max_total_wal_size " , <nl> + { offsetof ( struct MutableDBOptions , max_total_wal_size ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " delete_obsolete_files_period_micros " , <nl> + { offsetof ( struct MutableDBOptions , <nl> + delete_obsolete_files_period_micros ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " stats_dump_period_sec " , <nl> + { offsetof ( struct MutableDBOptions , stats_dump_period_sec ) , <nl> + OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " stats_persist_period_sec " , <nl> + { offsetof ( struct MutableDBOptions , stats_persist_period_sec ) , <nl> + OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " stats_history_buffer_size " , <nl> + { offsetof ( struct MutableDBOptions , stats_history_buffer_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " max_open_files " , <nl> + { offsetof ( struct MutableDBOptions , max_open_files ) , OptionType : : kInt , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable } } , <nl> + { " bytes_per_sync " , <nl> + { offsetof ( struct MutableDBOptions , bytes_per_sync ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " wal_bytes_per_sync " , <nl> + { offsetof ( struct MutableDBOptions , wal_bytes_per_sync ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " strict_bytes_per_sync " , <nl> + { offsetof ( struct MutableDBOptions , strict_bytes_per_sync ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " compaction_readahead_size " , <nl> + { offsetof ( struct MutableDBOptions , compaction_readahead_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + { " max_background_flushes " , <nl> + { offsetof ( struct MutableDBOptions , max_background_flushes ) , <nl> + OptionType : : kInt , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kMutable } } , <nl> + } ; <nl> + <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > <nl> + db_immutable_options_type_info = { <nl> / * <nl> / / not yet supported <nl> std : : shared_ptr < Cache > row_cache ; <nl> std : : unordered_map < std : : string , OptionTypeInfo > <nl> std : : vector < std : : shared_ptr < EventListener > > listeners ; <nl> * / <nl> { " advise_random_on_open " , <nl> - { offsetof ( struct DBOptions , advise_random_on_open ) , <nl> + { offsetof ( struct ImmutableDBOptions , advise_random_on_open ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_mmap_reads " , <nl> - { offsetof ( struct DBOptions , allow_mmap_reads ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , allow_mmap_reads ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_fallocate " , <nl> - { offsetof ( struct DBOptions , allow_fallocate ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , allow_fallocate ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_mmap_writes " , <nl> - { offsetof ( struct DBOptions , allow_mmap_writes ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , allow_mmap_writes ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " use_direct_reads " , <nl> - { offsetof ( struct DBOptions , use_direct_reads ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , use_direct_reads ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " use_direct_writes " , <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " use_direct_io_for_flush_and_compaction " , <nl> - { offsetof ( struct DBOptions , use_direct_io_for_flush_and_compaction ) , <nl> + { offsetof ( struct ImmutableDBOptions , <nl> + use_direct_io_for_flush_and_compaction ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_2pc " , <nl> - { offsetof ( struct DBOptions , allow_2pc ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> - { " allow_os_buffer " , <nl> - { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kMutable , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , allow_2pc ) , OptionType : : kBoolean , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " create_if_missing " , <nl> - { offsetof ( struct DBOptions , create_if_missing ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , create_if_missing ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " create_missing_column_families " , <nl> - { offsetof ( struct DBOptions , create_missing_column_families ) , <nl> + { offsetof ( struct ImmutableDBOptions , create_missing_column_families ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " disableDataSync " , <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " disable_data_sync " , / / for compatibility <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " enable_thread_tracking " , <nl> - { offsetof ( struct DBOptions , enable_thread_tracking ) , <nl> + { offsetof ( struct ImmutableDBOptions , enable_thread_tracking ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " error_if_exists " , <nl> - { offsetof ( struct DBOptions , error_if_exists ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , error_if_exists ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " is_fd_close_on_exec " , <nl> - { offsetof ( struct DBOptions , is_fd_close_on_exec ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , is_fd_close_on_exec ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " paranoid_checks " , <nl> - { offsetof ( struct DBOptions , paranoid_checks ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> - { " skip_log_error_on_recovery " , <nl> - { offsetof ( struct DBOptions , skip_log_error_on_recovery ) , <nl> + { offsetof ( struct ImmutableDBOptions , paranoid_checks ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " skip_log_error_on_recovery " , <nl> + { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> + OptionTypeFlags : : kNone } } , <nl> { " skip_stats_update_on_db_open " , <nl> - { offsetof ( struct DBOptions , skip_stats_update_on_db_open ) , <nl> + { offsetof ( struct ImmutableDBOptions , skip_stats_update_on_db_open ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " skip_checking_sst_file_sizes_on_db_open " , <nl> - { offsetof ( struct DBOptions , skip_checking_sst_file_sizes_on_db_open ) , <nl> + { offsetof ( struct ImmutableDBOptions , <nl> + skip_checking_sst_file_sizes_on_db_open ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " new_table_reader_for_compaction_inputs " , <nl> - { offsetof ( struct DBOptions , new_table_reader_for_compaction_inputs ) , <nl> + { offsetof ( struct ImmutableDBOptions , <nl> + new_table_reader_for_compaction_inputs ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " compaction_readahead_size " , <nl> - { offsetof ( struct DBOptions , compaction_readahead_size ) , <nl> - OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , compaction_readahead_size ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " random_access_max_buffer_size " , <nl> - { offsetof ( struct DBOptions , random_access_max_buffer_size ) , <nl> + { offsetof ( struct ImmutableDBOptions , random_access_max_buffer_size ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " use_adaptive_mutex " , <nl> - { offsetof ( struct DBOptions , use_adaptive_mutex ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , use_adaptive_mutex ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " use_fsync " , <nl> - { offsetof ( struct DBOptions , use_fsync ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> - { " max_background_jobs " , <nl> - { offsetof ( struct DBOptions , max_background_jobs ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , max_background_jobs ) } } , <nl> - { " max_background_compactions " , <nl> - { offsetof ( struct DBOptions , max_background_compactions ) , <nl> - OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , max_background_compactions ) } } , <nl> - { " max_subcompactions " , <nl> - { offsetof ( struct DBOptions , max_subcompactions ) , OptionType : : kUInt32T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , max_subcompactions ) } } , <nl> - { " base_background_compactions " , <nl> - { offsetof ( struct DBOptions , base_background_compactions ) , <nl> - OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , base_background_compactions ) } } , <nl> - { " max_background_flushes " , <nl> - { offsetof ( struct DBOptions , max_background_flushes ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , max_background_flushes ) } } , <nl> + { offsetof ( struct ImmutableDBOptions , use_fsync ) , OptionType : : kBoolean , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " max_file_opening_threads " , <nl> - { offsetof ( struct DBOptions , max_file_opening_threads ) , <nl> + { offsetof ( struct ImmutableDBOptions , max_file_opening_threads ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " max_open_files " , <nl> - { offsetof ( struct DBOptions , max_open_files ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , max_open_files ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " table_cache_numshardbits " , <nl> - { offsetof ( struct DBOptions , table_cache_numshardbits ) , <nl> + { offsetof ( struct ImmutableDBOptions , table_cache_numshardbits ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " db_write_buffer_size " , <nl> - { offsetof ( struct DBOptions , db_write_buffer_size ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , db_write_buffer_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " keep_log_file_num " , <nl> - { offsetof ( struct DBOptions , keep_log_file_num ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , keep_log_file_num ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " recycle_log_file_num " , <nl> - { offsetof ( struct DBOptions , recycle_log_file_num ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , recycle_log_file_num ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " log_file_time_to_roll " , <nl> - { offsetof ( struct DBOptions , log_file_time_to_roll ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , log_file_time_to_roll ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " manifest_preallocation_size " , <nl> - { offsetof ( struct DBOptions , manifest_preallocation_size ) , <nl> + { offsetof ( struct ImmutableDBOptions , manifest_preallocation_size ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " max_log_file_size " , <nl> - { offsetof ( struct DBOptions , max_log_file_size ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , max_log_file_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " db_log_dir " , <nl> - { offsetof ( struct DBOptions , db_log_dir ) , OptionType : : kString , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , db_log_dir ) , OptionType : : kString , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " wal_dir " , <nl> - { offsetof ( struct DBOptions , wal_dir ) , OptionType : : kString , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , wal_dir ) , OptionType : : kString , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " WAL_size_limit_MB " , <nl> - { offsetof ( struct DBOptions , WAL_size_limit_MB ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , wal_size_limit_mb ) , <nl> + OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " WAL_ttl_seconds " , <nl> - { offsetof ( struct DBOptions , WAL_ttl_seconds ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> - { " bytes_per_sync " , <nl> - { offsetof ( struct DBOptions , bytes_per_sync ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , bytes_per_sync ) } } , <nl> - { " delayed_write_rate " , <nl> - { offsetof ( struct DBOptions , delayed_write_rate ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , delayed_write_rate ) } } , <nl> - { " delete_obsolete_files_period_micros " , <nl> - { offsetof ( struct DBOptions , delete_obsolete_files_period_micros ) , <nl> + { offsetof ( struct ImmutableDBOptions , wal_ttl_seconds ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , <nl> - delete_obsolete_files_period_micros ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " max_manifest_file_size " , <nl> - { offsetof ( struct DBOptions , max_manifest_file_size ) , <nl> + { offsetof ( struct ImmutableDBOptions , max_manifest_file_size ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " max_total_wal_size " , <nl> - { offsetof ( struct DBOptions , max_total_wal_size ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , max_total_wal_size ) } } , <nl> - { " wal_bytes_per_sync " , <nl> - { offsetof ( struct DBOptions , wal_bytes_per_sync ) , OptionType : : kUInt64T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , wal_bytes_per_sync ) } } , <nl> - { " strict_bytes_per_sync " , <nl> - { offsetof ( struct DBOptions , strict_bytes_per_sync ) , <nl> - OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , strict_bytes_per_sync ) } } , <nl> - { " stats_dump_period_sec " , <nl> - { offsetof ( struct DBOptions , stats_dump_period_sec ) , OptionType : : kUInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , stats_dump_period_sec ) } } , <nl> - { " stats_persist_period_sec " , <nl> - { offsetof ( struct DBOptions , stats_persist_period_sec ) , <nl> - OptionType : : kUInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , stats_persist_period_sec ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " persist_stats_to_disk " , <nl> - { offsetof ( struct DBOptions , persist_stats_to_disk ) , <nl> + { offsetof ( struct ImmutableDBOptions , persist_stats_to_disk ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , persist_stats_to_disk ) } } , <nl> - { " stats_history_buffer_size " , <nl> - { offsetof ( struct DBOptions , stats_history_buffer_size ) , <nl> - OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , stats_history_buffer_size ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " fail_if_options_file_error " , <nl> - { offsetof ( struct DBOptions , fail_if_options_file_error ) , <nl> + { offsetof ( struct ImmutableDBOptions , fail_if_options_file_error ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " enable_pipelined_write " , <nl> - { offsetof ( struct DBOptions , enable_pipelined_write ) , <nl> + { offsetof ( struct ImmutableDBOptions , enable_pipelined_write ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " unordered_write " , <nl> - { offsetof ( struct DBOptions , unordered_write ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , unordered_write ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_concurrent_memtable_write " , <nl> - { offsetof ( struct DBOptions , allow_concurrent_memtable_write ) , <nl> + { offsetof ( struct ImmutableDBOptions , allow_concurrent_memtable_write ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " wal_recovery_mode " , OptionTypeInfo : : Enum < WALRecoveryMode > ( <nl> - offsetof ( struct DBOptions , wal_recovery_mode ) , <nl> - & wal_recovery_mode_string_map ) } , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " wal_recovery_mode " , <nl> + OptionTypeInfo : : Enum < WALRecoveryMode > ( <nl> + offsetof ( struct ImmutableDBOptions , wal_recovery_mode ) , <nl> + & wal_recovery_mode_string_map ) } , <nl> { " enable_write_thread_adaptive_yield " , <nl> - { offsetof ( struct DBOptions , enable_write_thread_adaptive_yield ) , <nl> + { offsetof ( struct ImmutableDBOptions , <nl> + enable_write_thread_adaptive_yield ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " write_thread_slow_yield_usec " , <nl> - { offsetof ( struct DBOptions , write_thread_slow_yield_usec ) , <nl> + { offsetof ( struct ImmutableDBOptions , write_thread_slow_yield_usec ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " max_write_batch_group_size_bytes " , <nl> - { offsetof ( struct DBOptions , max_write_batch_group_size_bytes ) , <nl> + { offsetof ( struct ImmutableDBOptions , max_write_batch_group_size_bytes ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " write_thread_max_yield_usec " , <nl> - { offsetof ( struct DBOptions , write_thread_max_yield_usec ) , <nl> + { offsetof ( struct ImmutableDBOptions , write_thread_max_yield_usec ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " access_hint_on_compaction_start " , <nl> OptionTypeInfo : : Enum < DBOptions : : AccessHint > ( <nl> - offsetof ( struct DBOptions , access_hint_on_compaction_start ) , <nl> + offsetof ( struct ImmutableDBOptions , <nl> + access_hint_on_compaction_start ) , <nl> & access_hint_string_map ) } , <nl> - { " info_log_level " , OptionTypeInfo : : Enum < InfoLogLevel > ( <nl> - offsetof ( struct DBOptions , info_log_level ) , <nl> - & info_log_level_string_map ) } , <nl> + { " info_log_level " , <nl> + OptionTypeInfo : : Enum < InfoLogLevel > ( <nl> + offsetof ( struct ImmutableDBOptions , info_log_level ) , <nl> + & info_log_level_string_map ) } , <nl> { " dump_malloc_stats " , <nl> - { offsetof ( struct DBOptions , dump_malloc_stats ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> - { " avoid_flush_during_recovery " , <nl> - { offsetof ( struct DBOptions , avoid_flush_during_recovery ) , <nl> + { offsetof ( struct ImmutableDBOptions , dump_malloc_stats ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> - { " avoid_flush_during_shutdown " , <nl> - { offsetof ( struct DBOptions , avoid_flush_during_shutdown ) , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " avoid_flush_during_recovery " , <nl> + { offsetof ( struct ImmutableDBOptions , avoid_flush_during_recovery ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , avoid_flush_during_shutdown ) } } , <nl> - { " writable_file_max_buffer_size " , <nl> - { offsetof ( struct DBOptions , writable_file_max_buffer_size ) , <nl> - OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , <nl> - offsetof ( struct MutableDBOptions , writable_file_max_buffer_size ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " allow_ingest_behind " , <nl> - { offsetof ( struct DBOptions , allow_ingest_behind ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , allow_ingest_behind ) } } , <nl> + { offsetof ( struct ImmutableDBOptions , allow_ingest_behind ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " preserve_deletes " , <nl> - { offsetof ( struct DBOptions , preserve_deletes ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , preserve_deletes ) } } , <nl> + { offsetof ( struct ImmutableDBOptions , preserve_deletes ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " concurrent_prepare " , / / Deprecated by two_write_queues <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " two_write_queues " , <nl> - { offsetof ( struct DBOptions , two_write_queues ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , two_write_queues ) } } , <nl> + { offsetof ( struct ImmutableDBOptions , two_write_queues ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " manual_wal_flush " , <nl> - { offsetof ( struct DBOptions , manual_wal_flush ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , manual_wal_flush ) } } , <nl> + { offsetof ( struct ImmutableDBOptions , manual_wal_flush ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " seq_per_batch " , <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " atomic_flush " , <nl> - { offsetof ( struct DBOptions , atomic_flush ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , atomic_flush ) } } , <nl> + { offsetof ( struct ImmutableDBOptions , atomic_flush ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " avoid_unnecessary_blocking_io " , <nl> - { offsetof ( struct DBOptions , avoid_unnecessary_blocking_io ) , <nl> + { offsetof ( struct ImmutableDBOptions , avoid_unnecessary_blocking_io ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , <nl> - offsetof ( struct ImmutableDBOptions , avoid_unnecessary_blocking_io ) } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " write_dbid_to_manifest " , <nl> - { offsetof ( struct DBOptions , write_dbid_to_manifest ) , <nl> + { offsetof ( struct ImmutableDBOptions , write_dbid_to_manifest ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " log_readahead_size " , <nl> - { offsetof ( struct DBOptions , log_readahead_size ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + { offsetof ( struct ImmutableDBOptions , log_readahead_size ) , <nl> + OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " best_efforts_recovery " , <nl> - { offsetof ( struct DBOptions , best_efforts_recovery ) , <nl> + { offsetof ( struct ImmutableDBOptions , best_efforts_recovery ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " max_bgerror_resume_count " , <nl> - { offsetof ( struct DBOptions , max_bgerror_resume_count ) , <nl> + { offsetof ( struct ImmutableDBOptions , max_bgerror_resume_count ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " bgerror_resume_retry_interval " , <nl> - { offsetof ( struct DBOptions , bgerror_resume_retry_interval ) , <nl> + { offsetof ( struct ImmutableDBOptions , bgerror_resume_retry_interval ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> / / The following properties were handled as special cases in ParseOption <nl> / / This means that the properties could be read from the options file <nl> / / but never written to the file or compared to each other . <nl> { " rate_limiter_bytes_per_sec " , <nl> - { offsetof ( struct DBOptions , rate_limiter ) , OptionType : : kUnknown , <nl> - OptionVerificationType : : kNormal , <nl> - ( OptionTypeFlags : : kDontSerialize | OptionTypeFlags : : kCompareNever ) , 0 , <nl> + { offsetof ( struct ImmutableDBOptions , rate_limiter ) , <nl> + OptionType : : kUnknown , OptionVerificationType : : kNormal , <nl> + ( OptionTypeFlags : : kDontSerialize | OptionTypeFlags : : kCompareNever ) , <nl> / / Parse the input value as a RateLimiter <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> const std : : string & value , char * addr ) { <nl> std : : unordered_map < std : : string , OptionTypeInfo > <nl> return Status : : OK ( ) ; <nl> } } } , <nl> { " env " , <nl> - { offsetof ( struct DBOptions , env ) , OptionType : : kUnknown , <nl> + { offsetof ( struct ImmutableDBOptions , env ) , OptionType : : kUnknown , <nl> OptionVerificationType : : kNormal , <nl> - ( OptionTypeFlags : : kDontSerialize | OptionTypeFlags : : kCompareNever ) , 0 , <nl> + ( OptionTypeFlags : : kDontSerialize | OptionTypeFlags : : kCompareNever ) , <nl> / / Parse the input value as an Env <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & / * name * / , <nl> const std : : string & value , char * addr ) { <nl> std : : unordered_map < std : : string , OptionTypeInfo > <nl> return s ; <nl> } } } , <nl> } ; <nl> + <nl> + const std : : string OptionsHelper : : kDBOptionsName = " DBOptions " ; <nl> + <nl> + class MutableDBConfigurable : public Configurable { <nl> + public : <nl> + MutableDBConfigurable ( const MutableDBOptions & mdb ) { <nl> + mutable_ = mdb ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , & mutable_ , <nl> + & db_mutable_options_type_info ) ; <nl> + } <nl> + <nl> + protected : <nl> + MutableDBOptions mutable_ ; <nl> + } ; <nl> + <nl> + class DBOptionsConfigurable : public MutableDBConfigurable { <nl> + public : <nl> + DBOptionsConfigurable ( const DBOptions & opts ) <nl> + : MutableDBConfigurable ( MutableDBOptions ( opts ) ) , db_options_ ( opts ) { <nl> + / / The ImmutableDBOptions currently requires the env to be non - null . Make <nl> + / / sure it is <nl> + if ( opts . env ! = nullptr ) { <nl> + immutable_ = ImmutableDBOptions ( opts ) ; <nl> + } else { <nl> + DBOptions copy = opts ; <nl> + copy . env = Env : : Default ( ) ; <nl> + immutable_ = ImmutableDBOptions ( copy ) ; <nl> + } <nl> + ConfigurableHelper : : RegisterOptions ( * this , & immutable_ , <nl> + & db_immutable_options_type_info ) ; <nl> + } <nl> + <nl> + protected : <nl> + Status ConfigureOptions ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> + std : : unordered_map < std : : string , std : : string > * unused ) override { <nl> + Status s = ConfigurableHelper : : ConfigureOptions ( config_options , * this , <nl> + opts_map , unused ) ; <nl> + if ( s . ok ( ) ) { <nl> + db_options_ = BuildDBOptions ( immutable_ , mutable_ ) ; <nl> + s = PrepareOptions ( config_options ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> + const void * GetOptionsPtr ( const std : : string & name ) const override { <nl> + if ( name = = OptionsHelper : : kDBOptionsName ) { <nl> + return & db_options_ ; <nl> + } else { <nl> + return MutableDBConfigurable : : GetOptionsPtr ( name ) ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + ImmutableDBOptions immutable_ ; <nl> + DBOptions db_options_ ; <nl> + } ; <nl> + <nl> + std : : unique_ptr < Configurable > DBOptionsAsConfigurable ( <nl> + const MutableDBOptions & opts ) { <nl> + std : : unique_ptr < Configurable > ptr ( new MutableDBConfigurable ( opts ) ) ; <nl> + return ptr ; <nl> + } <nl> + std : : unique_ptr < Configurable > DBOptionsAsConfigurable ( const DBOptions & opts ) { <nl> + std : : unique_ptr < Configurable > ptr ( new DBOptionsConfigurable ( opts ) ) ; <nl> + return ptr ; <nl> + } <nl> # endif / / ROCKSDB_LITE <nl> <nl> ImmutableDBOptions : : ImmutableDBOptions ( ) : ImmutableDBOptions ( Options ( ) ) { } <nl> mmm a / options / db_options . h <nl> ppp b / options / db_options . h <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> <nl> struct ImmutableDBOptions { <nl> + static const char * kName ( ) { return " ImmutableDBOptions " ; } <nl> ImmutableDBOptions ( ) ; <nl> explicit ImmutableDBOptions ( const DBOptions & options ) ; <nl> <nl> struct ImmutableDBOptions { <nl> } ; <nl> <nl> struct MutableDBOptions { <nl> + static const char * kName ( ) { return " MutableDBOptions " ; } <nl> MutableDBOptions ( ) ; <nl> explicit MutableDBOptions ( const MutableDBOptions & options ) = default ; <nl> explicit MutableDBOptions ( const DBOptions & options ) ; <nl> mmm a / options / options . cc <nl> ppp b / options / options . cc <nl> void ColumnFamilyOptions : : Dump ( Logger * log ) const { <nl> ROCKS_LOG_HEADER ( log , " Options . table_factory : % s " , <nl> table_factory - > Name ( ) ) ; <nl> ROCKS_LOG_HEADER ( log , " table_factory options : % s " , <nl> - table_factory - > GetPrintableTableOptions ( ) . c_str ( ) ) ; <nl> + table_factory - > GetPrintableOptions ( ) . c_str ( ) ) ; <nl> ROCKS_LOG_HEADER ( log , " Options . write_buffer_size : % " ROCKSDB_PRIszt , <nl> write_buffer_size ) ; <nl> ROCKS_LOG_HEADER ( log , " Options . max_write_buffer_number : % d " , <nl> mmm a / options / options_helper . cc <nl> ppp b / options / options_helper . cc <nl> <nl> # include < unordered_set > <nl> # include < vector > <nl> <nl> - # include " options / options_type . h " <nl> + # include " options / cf_options . h " <nl> + # include " options / db_options . h " <nl> # include " rocksdb / cache . h " <nl> # include " rocksdb / compaction_filter . h " <nl> # include " rocksdb / convenience . h " <nl> # include " rocksdb / filter_policy . h " <nl> + # include " rocksdb / flush_block_policy . h " <nl> # include " rocksdb / memtablerep . h " <nl> # include " rocksdb / merge_operator . h " <nl> # include " rocksdb / options . h " <nl> <nl> # include " rocksdb / slice_transform . h " <nl> # include " rocksdb / table . h " <nl> # include " rocksdb / utilities / object_registry . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> - # include " table / plain / plain_table_factory . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " util / string_util . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> + Status ValidateOptions ( const DBOptions & db_opts , <nl> + const ColumnFamilyOptions & cf_opts ) { <nl> + Status s ; <nl> + # ifndef ROCKSDB_LITE <nl> + auto db_cfg = DBOptionsAsConfigurable ( db_opts ) ; <nl> + auto cf_cfg = CFOptionsAsConfigurable ( cf_opts ) ; <nl> + s = db_cfg - > ValidateOptions ( db_opts , cf_opts ) ; <nl> + if ( s . ok ( ) ) s = cf_cfg - > ValidateOptions ( db_opts , cf_opts ) ; <nl> + # else <nl> + s = cf_opts . table_factory - > ValidateOptions ( db_opts , cf_opts ) ; <nl> + # endif <nl> + return s ; <nl> + } <nl> <nl> DBOptions BuildDBOptions ( const ImmutableDBOptions & immutable_db_options , <nl> const MutableDBOptions & mutable_db_options ) { <nl> bool ParseSliceTransform ( <nl> return false ; <nl> } <nl> <nl> - bool ParseOptionHelper ( char * opt_address , const OptionType & opt_type , <nl> - const std : : string & value ) { <nl> + static bool ParseOptionHelper ( char * opt_address , const OptionType & opt_type , <nl> + const std : : string & value ) { <nl> switch ( opt_type ) { <nl> case OptionType : : kBoolean : <nl> * reinterpret_cast < bool * > ( opt_address ) = ParseBoolean ( " " , value ) ; <nl> bool SerializeSingleOptionHelper ( const char * opt_address , <nl> : kNullptrString ; <nl> break ; <nl> } <nl> - case OptionType : : kTableFactory : { <nl> - const auto * table_factory_ptr = <nl> - reinterpret_cast < const std : : shared_ptr < const TableFactory > * > ( <nl> - opt_address ) ; <nl> - * value = table_factory_ptr - > get ( ) ? table_factory_ptr - > get ( ) - > Name ( ) <nl> - : kNullptrString ; <nl> - break ; <nl> - } <nl> case OptionType : : kComparator : { <nl> / / it ' s a const pointer of const Comparator * <nl> const auto * ptr = reinterpret_cast < const Comparator * const * > ( opt_address ) ; <nl> bool SerializeSingleOptionHelper ( const char * opt_address , <nl> return true ; <nl> } <nl> <nl> + template < typename T > <nl> + Status ConfigureFromMap ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , std : : string > & opt_map , <nl> + const std : : string & option_name , Configurable * config , T * new_opts ) { <nl> + Status s = config - > ConfigureFromMap ( config_options , opt_map ) ; <nl> + if ( s . ok ( ) ) { <nl> + * new_opts = * ( config - > GetOptions < T > ( option_name ) ) ; <nl> + } <nl> + return s ; <nl> + } <nl> + <nl> Status GetMutableOptionsFromStrings ( <nl> const MutableCFOptions & base_options , <nl> const std : : unordered_map < std : : string , std : : string > & options_map , <nl> - Logger * info_log , MutableCFOptions * new_options ) { <nl> + Logger * / * info_log * / , MutableCFOptions * new_options ) { <nl> assert ( new_options ) ; <nl> * new_options = base_options ; <nl> ConfigOptions config_options ; <nl> - for ( const auto & o : options_map ) { <nl> - std : : string elem ; <nl> - const auto opt_info = <nl> - OptionTypeInfo : : Find ( o . first , cf_options_type_info , & elem ) ; <nl> - if ( opt_info = = nullptr ) { <nl> - return Status : : InvalidArgument ( " Unrecognized option : " + o . first ) ; <nl> - } else if ( ! opt_info - > IsMutable ( ) ) { <nl> - return Status : : InvalidArgument ( " Option not changeable : " + o . first ) ; <nl> - } else if ( opt_info - > IsDeprecated ( ) ) { <nl> - / / log warning when user tries to set a deprecated option but don ' t fail <nl> - / / the call for compatibility . <nl> - ROCKS_LOG_WARN ( info_log , " % s is a deprecated option and cannot be set " , <nl> - o . first . c_str ( ) ) ; <nl> - } else { <nl> - Status s = opt_info - > Parse ( <nl> - config_options , elem , o . second , <nl> - reinterpret_cast < char * > ( new_options ) + opt_info - > mutable_offset_ ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> + const auto config = CFOptionsAsConfigurable ( base_options ) ; <nl> + return ConfigureFromMap < MutableCFOptions > ( config_options , options_map , <nl> + MutableCFOptions : : kName ( ) , <nl> + config . get ( ) , new_options ) ; <nl> } <nl> <nl> Status GetMutableDBOptionsFromStrings ( <nl> Status GetMutableDBOptionsFromStrings ( <nl> * new_options = base_options ; <nl> ConfigOptions config_options ; <nl> <nl> - for ( const auto & o : options_map ) { <nl> - try { <nl> - std : : string elem ; <nl> - const auto opt_info = <nl> - OptionTypeInfo : : Find ( o . first , db_options_type_info , & elem ) ; <nl> - if ( opt_info = = nullptr ) { <nl> - return Status : : InvalidArgument ( " Unrecognized option : " + o . first ) ; <nl> - } else if ( ! opt_info - > IsMutable ( ) ) { <nl> - return Status : : InvalidArgument ( " Option not changeable : " + o . first ) ; <nl> - } else { <nl> - Status s = opt_info - > Parse ( <nl> - config_options , elem , o . second , <nl> - reinterpret_cast < char * > ( new_options ) + opt_info - > mutable_offset_ ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> - } <nl> - } catch ( std : : exception & e ) { <nl> - return Status : : InvalidArgument ( " Error parsing " + o . first + " : " + <nl> - std : : string ( e . what ( ) ) ) ; <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> + auto config = DBOptionsAsConfigurable ( base_options ) ; <nl> + return ConfigureFromMap < MutableDBOptions > ( config_options , options_map , <nl> + MutableDBOptions : : kName ( ) , <nl> + config . get ( ) , new_options ) ; <nl> } <nl> <nl> Status StringToMap ( const std : : string & opts_str , <nl> Status StringToMap ( const std : : string & opts_str , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - Status GetStringFromStruct ( <nl> - const ConfigOptions & config_options , const void * const opt_ptr , <nl> - const std : : unordered_map < std : : string , OptionTypeInfo > & type_info , <nl> - std : : string * opt_string ) { <nl> - assert ( opt_string ) ; <nl> - opt_string - > clear ( ) ; <nl> - for ( const auto & iter : type_info ) { <nl> - const auto & opt_info = iter . second ; <nl> - / / If the option is no longer used in rocksdb and marked as deprecated , <nl> - / / we skip it in the serialization . <nl> - if ( opt_info . ShouldSerialize ( ) ) { <nl> - const char * opt_addr = <nl> - reinterpret_cast < const char * > ( opt_ptr ) + opt_info . offset_ ; <nl> - std : : string value ; <nl> - Status s = <nl> - opt_info . Serialize ( config_options , iter . first , opt_addr , & value ) ; <nl> - if ( s . ok ( ) ) { <nl> - opt_string - > append ( iter . first + " = " + value + config_options . delimiter ) ; <nl> - } else { <nl> - return s ; <nl> - } <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> + Status GetStringFromMutableDBOptions ( const ConfigOptions & config_options , <nl> + const MutableDBOptions & mutable_opts , <nl> + std : : string * opt_string ) { <nl> + auto config = DBOptionsAsConfigurable ( mutable_opts ) ; <nl> + return config - > GetOptionString ( config_options , opt_string ) ; <nl> } <nl> <nl> Status GetStringFromDBOptions ( std : : string * opt_string , <nl> Status GetStringFromDBOptions ( std : : string * opt_string , <nl> return GetStringFromDBOptions ( config_options , db_options , opt_string ) ; <nl> } <nl> <nl> - Status GetStringFromDBOptions ( const ConfigOptions & cfg_options , <nl> + Status GetStringFromDBOptions ( const ConfigOptions & config_options , <nl> const DBOptions & db_options , <nl> std : : string * opt_string ) { <nl> - return GetStringFromStruct ( cfg_options , & db_options , db_options_type_info , <nl> - opt_string ) ; <nl> + assert ( opt_string ) ; <nl> + opt_string - > clear ( ) ; <nl> + auto config = DBOptionsAsConfigurable ( db_options ) ; <nl> + return config - > GetOptionString ( config_options , opt_string ) ; <nl> + } <nl> + <nl> + Status GetStringFromMutableCFOptions ( const ConfigOptions & config_options , <nl> + const MutableCFOptions & mutable_opts , <nl> + std : : string * opt_string ) { <nl> + assert ( opt_string ) ; <nl> + opt_string - > clear ( ) ; <nl> + const auto config = CFOptionsAsConfigurable ( mutable_opts ) ; <nl> + return config - > GetOptionString ( config_options , opt_string ) ; <nl> } <nl> <nl> Status GetStringFromColumnFamilyOptions ( std : : string * opt_string , <nl> Status GetStringFromColumnFamilyOptions ( std : : string * opt_string , <nl> Status GetStringFromColumnFamilyOptions ( const ConfigOptions & config_options , <nl> const ColumnFamilyOptions & cf_options , <nl> std : : string * opt_string ) { <nl> - return GetStringFromStruct ( config_options , & cf_options , cf_options_type_info , <nl> - opt_string ) ; <nl> + const auto config = CFOptionsAsConfigurable ( cf_options ) ; <nl> + return config - > GetOptionString ( config_options , opt_string ) ; <nl> } <nl> <nl> Status GetStringFromCompressionType ( std : : string * compression_str , <nl> Status GetStringFromCompressionType ( std : : string * compression_str , <nl> } <nl> } <nl> <nl> - static Status ParseDBOption ( const ConfigOptions & config_options , <nl> - const std : : string & name , <nl> - const std : : string & org_value , <nl> - DBOptions * new_options ) { <nl> - const std : : string & value = config_options . input_strings_escaped <nl> - ? UnescapeOptionString ( org_value ) <nl> - : org_value ; <nl> - std : : string elem ; <nl> - const auto opt_info = OptionTypeInfo : : Find ( name , db_options_type_info , & elem ) ; <nl> - if ( opt_info = = nullptr ) { <nl> - return Status : : InvalidArgument ( " Unrecognized option DBOptions : " , name ) ; <nl> - } else { <nl> - return opt_info - > Parse ( <nl> - config_options , elem , value , <nl> - reinterpret_cast < char * > ( new_options ) + opt_info - > offset_ ) ; <nl> - } <nl> - } <nl> - <nl> Status GetColumnFamilyOptionsFromMap ( <nl> const ColumnFamilyOptions & base_options , <nl> const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> Status GetColumnFamilyOptionsFromMap ( <nl> const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> ColumnFamilyOptions * new_options ) { <nl> assert ( new_options ) ; <nl> + <nl> * new_options = base_options ; <nl> - for ( const auto & o : opts_map ) { <nl> - auto s = <nl> - ParseColumnFamilyOption ( config_options , o . first , o . second , new_options ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - if ( s . IsNotSupported ( ) ) { <nl> - continue ; <nl> - } else if ( s . IsInvalidArgument ( ) & & <nl> - config_options . ignore_unknown_options ) { <nl> - continue ; <nl> - } else { <nl> - / / Restore " new_options " to the default " base_options " . <nl> - * new_options = base_options ; <nl> - return s ; <nl> - } <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> + <nl> + const auto config = CFOptionsAsConfigurable ( base_options ) ; <nl> + return ConfigureFromMap < ColumnFamilyOptions > ( config_options , opts_map , <nl> + OptionsHelper : : kCFOptionsName , <nl> + config . get ( ) , new_options ) ; <nl> } <nl> <nl> Status GetColumnFamilyOptionsFromString ( <nl> Status GetDBOptionsFromMap ( <nl> const ConfigOptions & config_options , const DBOptions & base_options , <nl> const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> DBOptions * new_options ) { <nl> - return GetDBOptionsFromMapInternal ( config_options , base_options , opts_map , <nl> - new_options , nullptr ) ; <nl> - } <nl> - <nl> - Status GetDBOptionsFromMapInternal ( <nl> - const ConfigOptions & config_options , const DBOptions & base_options , <nl> - const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> - DBOptions * new_options , <nl> - std : : vector < std : : string > * unsupported_options_names ) { <nl> assert ( new_options ) ; <nl> * new_options = base_options ; <nl> - if ( unsupported_options_names ) { <nl> - unsupported_options_names - > clear ( ) ; <nl> - } <nl> - for ( const auto & o : opts_map ) { <nl> - auto s = ParseDBOption ( config_options , o . first , o . second , new_options ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - if ( s . IsNotSupported ( ) ) { <nl> - / / If the deserialization of the specified option is not supported <nl> - / / and an output vector of unsupported_options is provided , then <nl> - / / we log the name of the unsupported option and proceed . <nl> - if ( unsupported_options_names ! = nullptr ) { <nl> - unsupported_options_names - > push_back ( o . first ) ; <nl> - } <nl> - / / Note that we still return Status : : OK in such case to maintain <nl> - / / the backward compatibility in the old public API defined in <nl> - / / rocksdb / convenience . h <nl> - } else if ( s . IsInvalidArgument ( ) & & <nl> - config_options . ignore_unknown_options ) { <nl> - continue ; <nl> - } else { <nl> - / / Restore " new_options " to the default " base_options " . <nl> - * new_options = base_options ; <nl> - return s ; <nl> - } <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> + auto config = DBOptionsAsConfigurable ( base_options ) ; <nl> + return ConfigureFromMap < DBOptions > ( config_options , opts_map , <nl> + OptionsHelper : : kDBOptionsName , <nl> + config . get ( ) , new_options ) ; <nl> } <nl> <nl> Status GetDBOptionsFromString ( const DBOptions & base_options , <nl> Status GetOptionsFromString ( const Options & base_options , <nl> Status GetOptionsFromString ( const ConfigOptions & config_options , <nl> const Options & base_options , <nl> const std : : string & opts_str , Options * new_options ) { <nl> + ColumnFamilyOptions new_cf_options ; <nl> + std : : unordered_map < std : : string , std : : string > unused_opts ; <nl> std : : unordered_map < std : : string , std : : string > opts_map ; <nl> + <nl> + * new_options = base_options ; <nl> Status s = StringToMap ( opts_str , & opts_map ) ; <nl> if ( ! s . ok ( ) ) { <nl> return s ; <nl> } <nl> - DBOptions new_db_options ( base_options ) ; <nl> - ColumnFamilyOptions new_cf_options ( base_options ) ; <nl> - for ( const auto & o : opts_map ) { <nl> - if ( ParseDBOption ( config_options , o . first , o . second , & new_db_options ) <nl> - . ok ( ) ) { <nl> - } else if ( ParseColumnFamilyOption ( config_options , o . first , o . second , <nl> - & new_cf_options ) <nl> - . ok ( ) ) { <nl> + auto config = DBOptionsAsConfigurable ( base_options ) ; <nl> + s = config - > ConfigureFromMap ( config_options , opts_map , & unused_opts ) ; <nl> + <nl> + if ( s . ok ( ) ) { <nl> + DBOptions * new_db_options = <nl> + config - > GetOptions < DBOptions > ( OptionsHelper : : kDBOptionsName ) ; <nl> + if ( ! unused_opts . empty ( ) ) { <nl> + s = GetColumnFamilyOptionsFromMap ( config_options , base_options , <nl> + unused_opts , & new_cf_options ) ; <nl> + if ( s . ok ( ) ) { <nl> + * new_options = Options ( * new_db_options , new_cf_options ) ; <nl> + } <nl> } else { <nl> - return Status : : InvalidArgument ( " Can ' t parse option " + o . first ) ; <nl> - } <nl> - } <nl> - * new_options = Options ( new_db_options , new_cf_options ) ; <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status GetTableFactoryFromMap ( <nl> - const std : : string & factory_name , <nl> - const std : : unordered_map < std : : string , std : : string > & opt_map , <nl> - std : : shared_ptr < TableFactory > * table_factory , bool ignore_unknown_options ) { <nl> - ConfigOptions <nl> - config_options ; / / Use default for escaped ( true ) and check ( exact ) <nl> - config_options . ignore_unknown_options = ignore_unknown_options ; <nl> - return GetTableFactoryFromMap ( config_options , factory_name , opt_map , <nl> - table_factory ) ; <nl> - } <nl> - <nl> - Status GetTableFactoryFromMap ( <nl> - const ConfigOptions & config_options , const std : : string & factory_name , <nl> - const std : : unordered_map < std : : string , std : : string > & opt_map , <nl> - std : : shared_ptr < TableFactory > * table_factory ) { <nl> - Status s ; <nl> - if ( factory_name = = BlockBasedTableFactory : : kName ) { <nl> - BlockBasedTableOptions bbt_opt ; <nl> - s = GetBlockBasedTableOptionsFromMap ( <nl> - config_options , BlockBasedTableOptions ( ) , opt_map , & bbt_opt ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> - } <nl> - table_factory - > reset ( new BlockBasedTableFactory ( bbt_opt ) ) ; <nl> - return s ; <nl> - } else if ( factory_name = = PlainTableFactory : : kName ) { <nl> - PlainTableOptions pt_opt ; <nl> - s = GetPlainTableOptionsFromMap ( config_options , PlainTableOptions ( ) , <nl> - opt_map , & pt_opt ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + * new_options = Options ( * new_db_options , base_options ) ; <nl> } <nl> - table_factory - > reset ( new PlainTableFactory ( pt_opt ) ) ; <nl> - return s ; <nl> } <nl> - / / Return OK for not supported table factories as TableFactory <nl> - / / Deserialization is optional . <nl> - table_factory - > reset ( ) ; <nl> return s ; <nl> } <nl> <nl> Status OptionTypeInfo : : NextToken ( const std : : string & opts , char delimiter , <nl> <nl> Status OptionTypeInfo : : Parse ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , <nl> - const std : : string & opt_value , <nl> - char * opt_addr ) const { <nl> + const std : : string & value , void * opt_ptr ) const { <nl> if ( IsDeprecated ( ) ) { <nl> return Status : : OK ( ) ; <nl> } <nl> try { <nl> + char * opt_addr = reinterpret_cast < char * > ( opt_ptr ) + offset_ ; <nl> + const std : : string & opt_value = config_options . input_strings_escaped <nl> + ? UnescapeOptionString ( value ) <nl> + : value ; <nl> + <nl> if ( opt_addr = = nullptr ) { <nl> return Status : : NotFound ( " Could not find option " , opt_name ) ; <nl> } else if ( parse_func_ ! = nullptr ) { <nl> - return parse_func_ ( config_options , opt_name , opt_value , opt_addr ) ; <nl> + ConfigOptions copy = config_options ; <nl> + copy . invoke_prepare_options = false ; <nl> + return parse_func_ ( copy , opt_name , opt_value , opt_addr ) ; <nl> } else if ( ParseOptionHelper ( opt_addr , type_ , opt_value ) ) { <nl> return Status : : OK ( ) ; <nl> + } else if ( IsConfigurable ( ) ) { <nl> + / / The option is < config > . < name > <nl> + Configurable * config = AsRawPointer < Configurable > ( opt_ptr ) ; <nl> + if ( opt_value . empty ( ) ) { <nl> + return Status : : OK ( ) ; <nl> + } else if ( config = = nullptr ) { <nl> + return Status : : NotFound ( " Could not find configurable : " , opt_name ) ; <nl> + } else { <nl> + ConfigOptions copy = config_options ; <nl> + copy . ignore_unknown_options = false ; <nl> + copy . invoke_prepare_options = false ; <nl> + if ( opt_value . find ( " = " ) ! = std : : string : : npos ) { <nl> + return config - > ConfigureFromString ( copy , opt_value ) ; <nl> + } else { <nl> + return config - > ConfigureOption ( copy , opt_name , opt_value ) ; <nl> + } <nl> + } <nl> } else if ( IsByName ( ) ) { <nl> return Status : : NotSupported ( " Deserializing the option " + opt_name + <nl> " is not supported " ) ; <nl> Status OptionTypeInfo : : ParseStruct ( <nl> } <nl> const auto iter = struct_map - > find ( map_iter . first ) ; <nl> if ( iter ! = struct_map - > end ( ) ) { <nl> - status = <nl> - iter - > second . Parse ( config_options , map_iter . first , map_iter . second , <nl> - opt_addr + iter - > second . offset_ ) ; <nl> + status = iter - > second . Parse ( config_options , map_iter . first , <nl> + map_iter . second , opt_addr ) ; <nl> } else { <nl> status = Status : : InvalidArgument ( " Unrecognized option " , <nl> struct_name + " . " + map_iter . first ) ; <nl> Status OptionTypeInfo : : ParseStruct ( <nl> const auto opt_info = <nl> Find ( opt_name . substr ( struct_name . size ( ) + 1 ) , * struct_map , & elem_name ) ; <nl> if ( opt_info ! = nullptr ) { <nl> - status = opt_info - > Parse ( config_options , elem_name , opt_value , <nl> - opt_addr + opt_info - > offset_ ) ; <nl> + status = opt_info - > Parse ( config_options , elem_name , opt_value , opt_addr ) ; <nl> } else { <nl> status = Status : : InvalidArgument ( " Unrecognized option " , opt_name ) ; <nl> } <nl> Status OptionTypeInfo : : ParseStruct ( <nl> std : : string elem_name ; <nl> const auto opt_info = Find ( opt_name , * struct_map , & elem_name ) ; <nl> if ( opt_info ! = nullptr ) { <nl> - status = opt_info - > Parse ( config_options , elem_name , opt_value , <nl> - opt_addr + opt_info - > offset_ ) ; <nl> + status = opt_info - > Parse ( config_options , elem_name , opt_value , opt_addr ) ; <nl> } else { <nl> status = Status : : InvalidArgument ( " Unrecognized option " , <nl> struct_name + " . " + opt_name ) ; <nl> Status OptionTypeInfo : : ParseStruct ( <nl> <nl> Status OptionTypeInfo : : Serialize ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , <nl> - const char * opt_addr , <nl> + const void * const opt_ptr , <nl> std : : string * opt_value ) const { <nl> / / If the option is no longer used in rocksdb and marked as deprecated , <nl> / / we skip it in the serialization . <nl> - if ( opt_addr ! = nullptr & & ShouldSerialize ( ) ) { <nl> - if ( serialize_func_ ! = nullptr ) { <nl> - return serialize_func_ ( config_options , opt_name , opt_addr , opt_value ) ; <nl> - } else if ( ! SerializeSingleOptionHelper ( opt_addr , type_ , opt_value ) ) { <nl> - return Status : : InvalidArgument ( " Cannot serialize option " , opt_name ) ; <nl> + const char * opt_addr = reinterpret_cast < const char * > ( opt_ptr ) + offset_ ; <nl> + if ( opt_addr = = nullptr | | IsDeprecated ( ) ) { <nl> + return Status : : OK ( ) ; <nl> + } else if ( IsEnabled ( OptionTypeFlags : : kDontSerialize ) ) { <nl> + return Status : : NotSupported ( " Cannot serialize option : " , opt_name ) ; <nl> + } else if ( serialize_func_ ! = nullptr ) { <nl> + return serialize_func_ ( config_options , opt_name , opt_addr , opt_value ) ; <nl> + } else if ( SerializeSingleOptionHelper ( opt_addr , type_ , opt_value ) ) { <nl> + return Status : : OK ( ) ; <nl> + } else if ( IsConfigurable ( ) ) { <nl> + const Configurable * config = AsRawPointer < Configurable > ( opt_ptr ) ; <nl> + if ( config ! = nullptr ) { <nl> + ConfigOptions embedded = config_options ; <nl> + embedded . delimiter = " ; " ; <nl> + * opt_value = config - > ToString ( embedded ) ; <nl> } <nl> + return Status : : OK ( ) ; <nl> + } else { <nl> + return Status : : InvalidArgument ( " Cannot serialize option : " , opt_name ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> } <nl> <nl> Status OptionTypeInfo : : SerializeStruct ( <nl> Status OptionTypeInfo : : SerializeStruct ( <nl> std : : string single ; <nl> const auto & opt_info = iter . second ; <nl> if ( opt_info . ShouldSerialize ( ) ) { <nl> - status = opt_info . Serialize ( embedded , iter . first , <nl> - opt_addr + opt_info . offset_ , & single ) ; <nl> + status = opt_info . Serialize ( embedded , iter . first , opt_addr , & single ) ; <nl> if ( ! status . ok ( ) ) { <nl> return status ; <nl> } else { <nl> Status OptionTypeInfo : : SerializeStruct ( <nl> const auto opt_info = <nl> Find ( opt_name . substr ( struct_name . size ( ) + 1 ) , * struct_map , & elem_name ) ; <nl> if ( opt_info ! = nullptr ) { <nl> - status = opt_info - > Serialize ( config_options , elem_name , <nl> - opt_addr + opt_info - > offset_ , value ) ; <nl> + status = opt_info - > Serialize ( config_options , elem_name , opt_addr , value ) ; <nl> } else { <nl> status = Status : : InvalidArgument ( " Unrecognized option " , opt_name ) ; <nl> } <nl> Status OptionTypeInfo : : SerializeStruct ( <nl> status = Status : : InvalidArgument ( " Unrecognized option " , opt_name ) ; <nl> } else if ( opt_info - > ShouldSerialize ( ) ) { <nl> status = opt_info - > Serialize ( config_options , opt_name + " . " + elem_name , <nl> - opt_addr + opt_info - > offset_ , value ) ; <nl> + opt_addr , value ) ; <nl> } <nl> } <nl> return status ; <nl> static bool AreOptionsEqual ( OptionType type , const char * this_offset , <nl> <nl> bool OptionTypeInfo : : AreEqual ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , <nl> - const char * this_addr , const char * that_addr , <nl> + const void * const this_ptr , <nl> + const void * const that_ptr , <nl> std : : string * mismatch ) const { <nl> - if ( ! config_options . IsCheckEnabled ( GetSanityLevel ( ) ) ) { <nl> + auto level = GetSanityLevel ( ) ; <nl> + if ( ! config_options . IsCheckEnabled ( level ) ) { <nl> return true ; / / If the sanity level is not being checked , skip it <nl> } <nl> + const auto this_addr = reinterpret_cast < const char * > ( this_ptr ) + offset_ ; <nl> + const auto that_addr = reinterpret_cast < const char * > ( that_ptr ) + offset_ ; <nl> if ( this_addr = = nullptr | | that_addr = = nullptr ) { <nl> if ( this_addr = = that_addr ) { <nl> return true ; <nl> bool OptionTypeInfo : : AreEqual ( const ConfigOptions & config_options , <nl> } <nl> } else if ( AreOptionsEqual ( type_ , this_addr , that_addr ) ) { <nl> return true ; <nl> + } else if ( IsConfigurable ( ) ) { <nl> + const auto * this_config = AsRawPointer < Configurable > ( this_ptr ) ; <nl> + const auto * that_config = AsRawPointer < Configurable > ( that_ptr ) ; <nl> + if ( this_config = = that_config ) { <nl> + return true ; <nl> + } else if ( this_config ! = nullptr & & that_config ! = nullptr ) { <nl> + std : : string bad_name ; <nl> + bool matches ; <nl> + if ( level < config_options . sanity_level ) { <nl> + ConfigOptions copy = config_options ; <nl> + copy . sanity_level = level ; <nl> + matches = this_config - > AreEquivalent ( copy , that_config , & bad_name ) ; <nl> + } else { <nl> + matches = <nl> + this_config - > AreEquivalent ( config_options , that_config , & bad_name ) ; <nl> + } <nl> + if ( ! matches ) { <nl> + * mismatch = opt_name + " . " + bad_name ; <nl> + } <nl> + return matches ; <nl> + } <nl> } <nl> if ( mismatch - > empty ( ) ) { <nl> * mismatch = opt_name ; <nl> bool OptionTypeInfo : : StructsAreEqual ( <nl> for ( const auto & iter : * struct_map ) { <nl> const auto & opt_info = iter . second ; <nl> <nl> - matches = opt_info . AreEqual ( config_options , iter . first , <nl> - this_addr + opt_info . offset_ , <nl> - that_addr + opt_info . offset_ , & result ) ; <nl> + matches = opt_info . AreEqual ( config_options , iter . first , this_addr , <nl> + that_addr , & result ) ; <nl> if ( ! matches ) { <nl> * mismatch = struct_name + " . " + result ; <nl> return false ; <nl> bool OptionTypeInfo : : StructsAreEqual ( <nl> if ( opt_info = = nullptr ) { <nl> * mismatch = opt_name ; <nl> matches = false ; <nl> - } else if ( ! opt_info - > AreEqual ( config_options , elem_name , <nl> - this_addr + opt_info - > offset_ , <nl> - that_addr + opt_info - > offset_ , & result ) ) { <nl> + } else if ( ! opt_info - > AreEqual ( config_options , elem_name , this_addr , <nl> + that_addr , & result ) ) { <nl> matches = false ; <nl> * mismatch = struct_name + " . " + result ; <nl> } <nl> bool OptionTypeInfo : : StructsAreEqual ( <nl> if ( opt_info = = nullptr ) { <nl> * mismatch = struct_name + " . " + opt_name ; <nl> matches = false ; <nl> - } else if ( ! opt_info - > AreEqual ( config_options , elem_name , <nl> - this_addr + opt_info - > offset_ , <nl> - that_addr + opt_info - > offset_ , & result ) ) { <nl> + } else if ( ! opt_info - > AreEqual ( config_options , elem_name , this_addr , <nl> + that_addr , & result ) ) { <nl> matches = false ; <nl> * mismatch = struct_name + " . " + result ; <nl> } <nl> bool OptionTypeInfo : : StructsAreEqual ( <nl> return matches ; <nl> } <nl> <nl> + bool MatchesOptionsTypeFromMap ( <nl> + const ConfigOptions & config_options , <nl> + const std : : unordered_map < std : : string , OptionTypeInfo > & type_map , <nl> + const void * const this_ptr , const void * const that_ptr , <nl> + std : : string * mismatch ) { <nl> + for ( auto & pair : type_map ) { <nl> + / / We skip checking deprecated variables as they might <nl> + / / contain random values since they might not be initialized <nl> + if ( config_options . IsCheckEnabled ( pair . second . GetSanityLevel ( ) ) ) { <nl> + if ( ! pair . second . AreEqual ( config_options , pair . first , this_ptr , that_ptr , <nl> + mismatch ) & & <nl> + ! pair . second . AreEqualByName ( config_options , pair . first , this_ptr , <nl> + that_ptr ) ) { <nl> + return false ; <nl> + } <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> bool OptionTypeInfo : : AreEqualByName ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , <nl> - const char * this_addr , <nl> - const char * that_addr ) const { <nl> + const void * const this_ptr , <nl> + const void * const that_ptr ) const { <nl> if ( IsByName ( ) ) { <nl> std : : string that_value ; <nl> - if ( Serialize ( config_options , opt_name , that_addr , & that_value ) . ok ( ) ) { <nl> - return AreEqualByName ( config_options , opt_name , this_addr , that_value ) ; <nl> + if ( Serialize ( config_options , opt_name , that_ptr , & that_value ) . ok ( ) ) { <nl> + return AreEqualByName ( config_options , opt_name , this_ptr , that_value ) ; <nl> } <nl> } <nl> return false ; <nl> bool OptionTypeInfo : : AreEqualByName ( const ConfigOptions & config_options , <nl> <nl> bool OptionTypeInfo : : AreEqualByName ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , <nl> - const char * opt_addr , <nl> + const void * const opt_ptr , <nl> const std : : string & that_value ) const { <nl> std : : string this_value ; <nl> if ( ! IsByName ( ) ) { <nl> return false ; <nl> - } else if ( ! Serialize ( config_options , opt_name , opt_addr , & this_value ) . ok ( ) ) { <nl> + } else if ( ! Serialize ( config_options , opt_name , opt_ptr , & this_value ) . ok ( ) ) { <nl> return false ; <nl> } else if ( IsEnabled ( OptionVerificationType : : kByNameAllowFromNull ) ) { <nl> if ( that_value = = kNullptrString ) { <nl> const OptionTypeInfo * OptionTypeInfo : : Find ( <nl> auto siter = <nl> opt_map . find ( opt_name . substr ( 0 , idx ) ) ; / / Look for the short name <nl> if ( siter ! = opt_map . end ( ) ) { / / We found the short name <nl> - if ( siter - > second . IsStruct ( ) ) { / / If the object is a struct <nl> + if ( siter - > second . IsStruct ( ) | | / / If the object is a struct <nl> + siter - > second . IsConfigurable ( ) ) { / / or a Configurable <nl> * elem_name = opt_name . substr ( idx + 1 ) ; / / Return the rest <nl> return & ( siter - > second ) ; / / Return the contents of the iterator <nl> } <nl> mmm a / options / options_helper . h <nl> ppp b / options / options_helper . h <nl> <nl> # include < string > <nl> # include < vector > <nl> <nl> - # include " options / cf_options . h " <nl> - # include " options / db_options . h " <nl> - # include " options / options_type . h " <nl> # include " rocksdb / options . h " <nl> # include " rocksdb / status . h " <nl> # include " rocksdb / table . h " <nl> - # include " rocksdb / universal_compaction . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> + struct ColumnFamilyOptions ; <nl> struct ConfigOptions ; <nl> + struct DBOptions ; <nl> + struct ImmutableDBOptions ; <nl> + struct MutableDBOptions ; <nl> + struct MutableCFOptions ; <nl> + struct Options ; <nl> <nl> std : : vector < CompressionType > GetSupportedCompressions ( ) ; <nl> <nl> + / / Checks that the combination of DBOptions and ColumnFamilyOptions are valid <nl> + Status ValidateOptions ( const DBOptions & db_opts , <nl> + const ColumnFamilyOptions & cf_opts ) ; <nl> + <nl> DBOptions BuildDBOptions ( const ImmutableDBOptions & immutable_db_options , <nl> const MutableDBOptions & mutable_db_options ) ; <nl> <nl> ColumnFamilyOptions BuildColumnFamilyOptions ( <nl> const MutableCFOptions & mutable_cf_options ) ; <nl> <nl> # ifndef ROCKSDB_LITE <nl> - Status GetStringFromStruct ( <nl> - const ConfigOptions & config_options , const void * const opt_ptr , <nl> - const std : : unordered_map < std : : string , OptionTypeInfo > & type_info , <nl> - std : : string * opt_string ) ; <nl> - <nl> - Status ParseColumnFamilyOption ( const ConfigOptions & config_options , <nl> - const std : : string & name , <nl> - const std : : string & org_value , <nl> - ColumnFamilyOptions * new_options ) ; <nl> + std : : unique_ptr < Configurable > DBOptionsAsConfigurable ( <nl> + const MutableDBOptions & opts ) ; <nl> + std : : unique_ptr < Configurable > DBOptionsAsConfigurable ( const DBOptions & opts ) ; <nl> + std : : unique_ptr < Configurable > CFOptionsAsConfigurable ( <nl> + const MutableCFOptions & opts ) ; <nl> + std : : unique_ptr < Configurable > CFOptionsAsConfigurable ( <nl> + const ColumnFamilyOptions & opts , <nl> + const std : : unordered_map < std : : string , std : : string > * opt_map = nullptr ) ; <nl> + <nl> + Status GetStringFromMutableCFOptions ( const ConfigOptions & config_options , <nl> + const MutableCFOptions & mutable_opts , <nl> + std : : string * opt_string ) ; <nl> + <nl> + Status GetStringFromMutableDBOptions ( const ConfigOptions & config_options , <nl> + const MutableDBOptions & mutable_opts , <nl> + std : : string * opt_string ) ; <nl> <nl> Status GetMutableOptionsFromStrings ( <nl> const MutableCFOptions & base_options , <nl> Status GetMutableDBOptionsFromStrings ( <nl> const std : : unordered_map < std : : string , std : : string > & options_map , <nl> MutableDBOptions * new_options ) ; <nl> <nl> - Status GetTableFactoryFromMap ( <nl> - const std : : string & factory_name , <nl> - const std : : unordered_map < std : : string , std : : string > & opt_map , <nl> - std : : shared_ptr < TableFactory > * table_factory , <nl> - bool ignore_unknown_options = false ) ; <nl> - <nl> - Status GetTableFactoryFromMap ( <nl> - const ConfigOptions & config_options , const std : : string & factory_name , <nl> - const std : : unordered_map < std : : string , std : : string > & opt_map , <nl> - std : : shared_ptr < TableFactory > * table_factory ) ; <nl> - <nl> - / / A helper function that converts " opt_address " to a std : : string <nl> - / / based on the specified OptionType . <nl> - bool SerializeSingleOptionHelper ( const char * opt_address , <nl> - const OptionType opt_type , std : : string * value ) ; <nl> - <nl> - / / In addition to its public version defined in rocksdb / convenience . h , <nl> - / / this further takes an optional output vector " unsupported_options_names " , <nl> - / / which stores the name of all the unsupported options specified in " opts_map " . <nl> - Status GetDBOptionsFromMapInternal ( <nl> - const ConfigOptions & config_options , const DBOptions & base_options , <nl> - const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> - DBOptions * new_options , <nl> - std : : vector < std : : string > * unsupported_options_names = nullptr ) ; <nl> - <nl> bool ParseSliceTransform ( <nl> const std : : string & value , <nl> std : : shared_ptr < const SliceTransform > * slice_transform ) ; <nl> bool ParseSliceTransform ( <nl> extern Status StringToMap ( <nl> const std : : string & opts_str , <nl> std : : unordered_map < std : : string , std : : string > * opts_map ) ; <nl> - <nl> - extern bool ParseOptionHelper ( char * opt_address , const OptionType & opt_type , <nl> - const std : : string & value ) ; <nl> # endif / / ! ROCKSDB_LITE <nl> <nl> struct OptionsHelper { <nl> + static const std : : string kCFOptionsName / * = " ColumnFamilyOptions " * / ; <nl> + static const std : : string kDBOptionsName / * = " DBOptions " * / ; <nl> static std : : map < CompactionStyle , std : : string > compaction_style_to_string ; <nl> static std : : map < CompactionPri , std : : string > compaction_pri_to_string ; <nl> static std : : map < CompactionStopStyle , std : : string > <nl> struct OptionsHelper { <nl> static std : : unordered_map < std : : string , CompressionType > <nl> compression_type_string_map ; <nl> # ifndef ROCKSDB_LITE <nl> - static std : : unordered_map < std : : string , OptionTypeInfo > cf_options_type_info ; <nl> static std : : unordered_map < std : : string , CompactionStopStyle > <nl> compaction_stop_style_string_map ; <nl> - static std : : unordered_map < std : : string , OptionTypeInfo > db_options_type_info ; <nl> static std : : unordered_map < std : : string , EncodingType > encoding_type_string_map ; <nl> static std : : unordered_map < std : : string , CompactionStyle > <nl> compaction_style_string_map ; <nl> static std : : unordered_map < std : : string , CompactionPri > <nl> compaction_pri_string_map ; <nl> - static ColumnFamilyOptions dummy_cf_options ; <nl> # endif / / ! ROCKSDB_LITE <nl> } ; <nl> <nl> static auto & compaction_stop_style_to_string = <nl> OptionsHelper : : compaction_stop_style_to_string ; <nl> static auto & checksum_type_string_map = OptionsHelper : : checksum_type_string_map ; <nl> # ifndef ROCKSDB_LITE <nl> - static auto & cf_options_type_info = OptionsHelper : : cf_options_type_info ; <nl> static auto & compaction_stop_style_string_map = <nl> OptionsHelper : : compaction_stop_style_string_map ; <nl> - static auto & db_options_type_info = OptionsHelper : : db_options_type_info ; <nl> static auto & compression_type_string_map = <nl> OptionsHelper : : compression_type_string_map ; <nl> static auto & encoding_type_string_map = OptionsHelper : : encoding_type_string_map ; <nl> mmm a / options / options_parser . cc <nl> ppp b / options / options_parser . cc <nl> <nl> <nl> # include " file / read_write_util . h " <nl> # include " file / writable_file_writer . h " <nl> + # include " options / cf_options . h " <nl> + # include " options / db_options . h " <nl> # include " options / options_helper . h " <nl> # include " port / port . h " <nl> # include " rocksdb / convenience . h " <nl> # include " rocksdb / db . h " <nl> - # include " table / block_based / block_based_table_factory . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " test_util / sync_point . h " <nl> # include " util / cast_util . h " <nl> # include " util / string_util . h " <nl> Status PersistRocksDBOptions ( const DBOptions & db_opt , <nl> ConfigOptions <nl> config_options ; / / Use default for escaped ( true ) and check ( exact ) <nl> config_options . delimiter = " \ n " ; <nl> + / / Do not invoke PrepareOptions when we are doing validation . <nl> + config_options . invoke_prepare_options = false ; <nl> / / If a readahead size was set in the input options , use it <nl> if ( db_opt . log_readahead_size > 0 ) { <nl> config_options . file_readahead_size = db_opt . log_readahead_size ; <nl> Status RocksDBOptionsParser : : EndSection ( <nl> section_arg ) ; <nl> } <nl> / / Ignore error as table factory deserialization is optional <nl> - s = GetTableFactoryFromMap ( <nl> + s = TableFactory : : CreateFromString ( <nl> config_options , <nl> section_title . substr ( <nl> opt_section_titles [ kOptionSectionTableOptions ] . size ( ) ) , <nl> - opt_map , & ( cf_opt - > table_factory ) ) ; <nl> - if ( ! s . ok ( ) ) { <nl> - return s ; <nl> + & ( cf_opt - > table_factory ) ) ; <nl> + if ( s . ok ( ) ) { <nl> + return cf_opt - > table_factory - > ConfigureFromMap ( config_options , opt_map ) ; <nl> + } else { <nl> + / / Return OK for not supported table factories as TableFactory <nl> + / / Deserialization is optional . <nl> + cf_opt - > table_factory . reset ( ) ; <nl> + return Status : : OK ( ) ; <nl> } <nl> } else if ( section = = kOptionSectionVersion ) { <nl> for ( const auto & pair : opt_map ) { <nl> std : : string RocksDBOptionsParser : : TrimAndRemoveComment ( const std : : string & line , <nl> } <nl> <nl> Status RocksDBOptionsParser : : VerifyRocksDBOptionsFromFile ( <nl> - const ConfigOptions & config_options , const DBOptions & db_opt , <nl> + const ConfigOptions & config_options_in , const DBOptions & db_opt , <nl> const std : : vector < std : : string > & cf_names , <nl> const std : : vector < ColumnFamilyOptions > & cf_opts , <nl> const std : : string & file_name , FileSystem * fs ) { <nl> RocksDBOptionsParser parser ; <nl> + ConfigOptions config_options = config_options_in ; <nl> + config_options . invoke_prepare_options = <nl> + false ; / / No need to do a prepare for verify <nl> Status s = parser . Parse ( config_options , file_name , fs ) ; <nl> if ( ! s . ok ( ) ) { <nl> return s ; <nl> Status RocksDBOptionsParser : : VerifyDBOptions ( <nl> const ConfigOptions & config_options , const DBOptions & base_opt , <nl> const DBOptions & file_opt , <nl> const std : : unordered_map < std : : string , std : : string > * / * opt_map * / ) { <nl> - for ( const auto & pair : db_options_type_info ) { <nl> - const auto & opt_info = pair . second ; <nl> - if ( config_options . IsCheckEnabled ( opt_info . GetSanityLevel ( ) ) ) { <nl> - const char * base_addr = <nl> - reinterpret_cast < const char * > ( & base_opt ) + opt_info . offset_ ; <nl> - const char * file_addr = <nl> - reinterpret_cast < const char * > ( & file_opt ) + opt_info . offset_ ; <nl> - std : : string mismatch ; <nl> - if ( ! opt_info . AreEqual ( config_options , pair . first , base_addr , file_addr , <nl> - & mismatch ) & & <nl> - ! opt_info . AreEqualByName ( config_options , pair . first , base_addr , <nl> - file_addr ) ) { <nl> - const size_t kBufferSize = 2048 ; <nl> - char buffer [ kBufferSize ] ; <nl> - std : : string base_value ; <nl> - std : : string file_value ; <nl> - int offset = <nl> - snprintf ( buffer , sizeof ( buffer ) , <nl> - " [ RocksDBOptionsParser ] : " <nl> - " failed the verification on ColumnFamilyOptions : : % s " , <nl> - pair . first . c_str ( ) ) ; <nl> - Status s = opt_info . Serialize ( config_options , pair . first , base_addr , <nl> - & base_value ) ; <nl> - if ( s . ok ( ) ) { <nl> - s = opt_info . Serialize ( config_options , pair . first , file_addr , <nl> - & file_value ) ; <nl> - } <nl> - snprintf ( buffer , sizeof ( buffer ) , <nl> - " [ RocksDBOptionsParser ] : " <nl> - " failed the verification on DBOptions : : % s mmm " <nl> - " The specified one is % s while the persisted one is % s . \ n " , <nl> - pair . first . c_str ( ) , base_value . c_str ( ) , file_value . c_str ( ) ) ; <nl> - assert ( offset > = 0 ) ; <nl> - assert ( static_cast < size_t > ( offset ) < sizeof ( buffer ) ) ; <nl> - if ( s . ok ( ) ) { <nl> - snprintf ( <nl> - buffer + offset , sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> - " mmm The specified one is % s while the persisted one is % s . \ n " , <nl> - base_value . c_str ( ) , file_value . c_str ( ) ) ; <nl> - } else { <nl> - snprintf ( buffer + offset , <nl> - sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> - " mmm Unable to re - serialize an option : % s . \ n " , <nl> - s . ToString ( ) . c_str ( ) ) ; <nl> - } <nl> - return Status : : InvalidArgument ( Slice ( buffer , strlen ( buffer ) ) ) ; <nl> - } <nl> + auto base_config = DBOptionsAsConfigurable ( base_opt ) ; <nl> + auto file_config = DBOptionsAsConfigurable ( file_opt ) ; <nl> + std : : string mismatch ; <nl> + if ( ! base_config - > AreEquivalent ( config_options , file_config . get ( ) , <nl> + & mismatch ) ) { <nl> + const size_t kBufferSize = 2048 ; <nl> + char buffer [ kBufferSize ] ; <nl> + std : : string base_value ; <nl> + std : : string file_value ; <nl> + int offset = snprintf ( buffer , sizeof ( buffer ) , <nl> + " [ RocksDBOptionsParser ] : " <nl> + " failed the verification on DBOptions : : % s - - " , <nl> + mismatch . c_str ( ) ) ; <nl> + Status s = base_config - > GetOption ( config_options , mismatch , & base_value ) ; <nl> + if ( s . ok ( ) ) { <nl> + s = file_config - > GetOption ( config_options , mismatch , & file_value ) ; <nl> + } <nl> + assert ( offset > = 0 ) ; <nl> + assert ( static_cast < size_t > ( offset ) < sizeof ( buffer ) ) ; <nl> + if ( s . ok ( ) ) { <nl> + snprintf ( buffer + offset , sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> + " - - The specified one is % s while the persisted one is % s . \ n " , <nl> + base_value . c_str ( ) , file_value . c_str ( ) ) ; <nl> + } else { <nl> + snprintf ( buffer + offset , sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> + " - - Unable to re - serialize an option : % s . \ n " , <nl> + s . ToString ( ) . c_str ( ) ) ; <nl> } <nl> + return Status : : InvalidArgument ( Slice ( buffer , strlen ( buffer ) ) ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> Status RocksDBOptionsParser : : VerifyCFOptions ( <nl> const ConfigOptions & config_options , const ColumnFamilyOptions & base_opt , <nl> const ColumnFamilyOptions & file_opt , <nl> const std : : unordered_map < std : : string , std : : string > * opt_map ) { <nl> - for ( const auto & pair : cf_options_type_info ) { <nl> - const auto & opt_info = pair . second ; <nl> - <nl> - if ( config_options . IsCheckEnabled ( opt_info . GetSanityLevel ( ) ) ) { <nl> - std : : string mismatch ; <nl> - const char * base_addr = <nl> - reinterpret_cast < const char * > ( & base_opt ) + opt_info . offset_ ; <nl> - const char * file_addr = <nl> - reinterpret_cast < const char * > ( & file_opt ) + opt_info . offset_ ; <nl> - bool matches = opt_info . AreEqual ( config_options , pair . first , base_addr , <nl> - file_addr , & mismatch ) ; <nl> - if ( ! matches & & opt_info . IsByName ( ) ) { <nl> - if ( opt_map = = nullptr ) { <nl> - matches = true ; <nl> - } else { <nl> - auto iter = opt_map - > find ( pair . first ) ; <nl> - if ( iter = = opt_map - > end ( ) ) { <nl> - matches = true ; <nl> - } else { <nl> - matches = opt_info . AreEqualByName ( config_options , pair . first , <nl> - base_addr , iter - > second ) ; <nl> - } <nl> - } <nl> - } <nl> - if ( ! matches ) { <nl> - / / The options do not match <nl> - const size_t kBufferSize = 2048 ; <nl> - char buffer [ kBufferSize ] ; <nl> - std : : string base_value ; <nl> - std : : string file_value ; <nl> - Status s = opt_info . Serialize ( config_options , pair . first , base_addr , <nl> - & base_value ) ; <nl> - if ( s . ok ( ) ) { <nl> - s = opt_info . Serialize ( config_options , pair . first , file_addr , <nl> - & file_value ) ; <nl> - } <nl> - int offset = <nl> - snprintf ( buffer , sizeof ( buffer ) , <nl> - " [ RocksDBOptionsParser ] : " <nl> - " failed the verification on ColumnFamilyOptions : : % s " , <nl> - pair . first . c_str ( ) ) ; <nl> - assert ( offset > = 0 ) ; <nl> - assert ( static_cast < size_t > ( offset ) < sizeof ( buffer ) ) ; <nl> - if ( s . ok ( ) ) { <nl> - snprintf ( <nl> - buffer + offset , sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> - " mmm The specified one is % s while the persisted one is % s . \ n " , <nl> - base_value . c_str ( ) , file_value . c_str ( ) ) ; <nl> - } else { <nl> - snprintf ( buffer + offset , <nl> - sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> - " mmm Unable to re - serialize an option : % s . \ n " , <nl> - s . ToString ( ) . c_str ( ) ) ; <nl> - } <nl> - return Status : : InvalidArgument ( Slice ( buffer , sizeof ( buffer ) ) ) ; <nl> - } / / if ( ! matches ) <nl> - } / / CheckSanityLevel <nl> - } / / For each option <nl> + auto base_config = CFOptionsAsConfigurable ( base_opt , opt_map ) ; <nl> + auto file_config = CFOptionsAsConfigurable ( file_opt , opt_map ) ; <nl> + std : : string mismatch ; <nl> + if ( ! base_config - > AreEquivalent ( config_options , file_config . get ( ) , <nl> + & mismatch ) ) { <nl> + std : : string base_value ; <nl> + std : : string file_value ; <nl> + / / The options do not match <nl> + const size_t kBufferSize = 2048 ; <nl> + char buffer [ kBufferSize ] ; <nl> + Status s = base_config - > GetOption ( config_options , mismatch , & base_value ) ; <nl> + if ( s . ok ( ) ) { <nl> + s = file_config - > GetOption ( config_options , mismatch , & file_value ) ; <nl> + } <nl> + int offset = snprintf ( buffer , sizeof ( buffer ) , <nl> + " [ RocksDBOptionsParser ] : " <nl> + " failed the verification on ColumnFamilyOptions : : % s " , <nl> + mismatch . c_str ( ) ) ; <nl> + assert ( offset > = 0 ) ; <nl> + assert ( static_cast < size_t > ( offset ) < sizeof ( buffer ) ) ; <nl> + if ( s . ok ( ) ) { <nl> + snprintf ( buffer + offset , sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> + " mmm The specified one is % s while the persisted one is % s . \ n " , <nl> + base_value . c_str ( ) , file_value . c_str ( ) ) ; <nl> + } else { <nl> + snprintf ( buffer + offset , sizeof ( buffer ) - static_cast < size_t > ( offset ) , <nl> + " mmm Unable to re - serialize an option : % s . \ n " , <nl> + s . ToString ( ) . c_str ( ) ) ; <nl> + } <nl> + return Status : : InvalidArgument ( Slice ( buffer , sizeof ( buffer ) ) ) ; <nl> + } / / For each option <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> Status RocksDBOptionsParser : : VerifyTableFactory ( <nl> const ConfigOptions & config_options , const TableFactory * base_tf , <nl> const TableFactory * file_tf ) { <nl> + std : : string mismatch ; <nl> if ( base_tf & & file_tf ) { <nl> if ( config_options . sanity_level > ConfigOptions : : kSanityLevelNone & & <nl> std : : string ( base_tf - > Name ( ) ) ! = std : : string ( file_tf - > Name ( ) ) ) { <nl> return Status : : Corruption ( <nl> " [ RocksDBOptionsParser ] : " <nl> " failed the verification on TableFactory - > Name ( ) " ) ; <nl> + } else if ( ! base_tf - > AreEquivalent ( config_options , file_tf , & mismatch ) ) { <nl> + return Status : : Corruption ( std : : string ( " [ RocksDBOptionsParser ] : " <nl> + " failed the verification on " ) + <nl> + base_tf - > Name ( ) + " : : " , <nl> + mismatch ) ; <nl> } <nl> - if ( base_tf - > Name ( ) = = BlockBasedTableFactory : : kName ) { <nl> - return VerifyBlockBasedTableFactory ( <nl> - config_options , <nl> - static_cast_with_check < const BlockBasedTableFactory , <nl> - const TableFactory > ( base_tf ) , <nl> - static_cast_with_check < const BlockBasedTableFactory , <nl> - const TableFactory > ( file_tf ) ) ; <nl> - } <nl> - / / TODO ( yhchiang ) : add checks for other table factory types <nl> } else { <nl> / / TODO ( yhchiang ) : further support sanity check here <nl> } <nl> mmm a / options / options_parser . h <nl> ppp b / options / options_parser . h <nl> class RocksDBOptionsParser { <nl> <nl> static Status ExtraParserCheck ( const RocksDBOptionsParser & input_parser ) ; <nl> <nl> + static Status ParseStatement ( std : : string * name , std : : string * value , <nl> + const std : : string & line , const int line_num ) ; <nl> + <nl> protected : <nl> bool IsSection ( const std : : string & line ) ; <nl> Status ParseSection ( OptionSection * section , std : : string * title , <nl> class RocksDBOptionsParser { <nl> Status CheckSection ( const OptionSection section , <nl> const std : : string & section_arg , const int line_num ) ; <nl> <nl> - Status ParseStatement ( std : : string * name , std : : string * value , <nl> - const std : : string & line , const int line_num ) ; <nl> - <nl> Status EndSection ( <nl> const ConfigOptions & config_options , const OptionSection section , <nl> const std : : string & title , const std : : string & section_arg , <nl> class RocksDBOptionsParser { <nl> <nl> Status ValidityCheck ( ) ; <nl> <nl> - Status InvalidArgument ( const int line_num , const std : : string & message ) ; <nl> + static Status InvalidArgument ( const int line_num , const std : : string & message ) ; <nl> <nl> Status ParseVersionNumber ( const std : : string & ver_name , <nl> const std : : string & ver_string , const int max_count , <nl> mmm a / options / options_settable_test . cc <nl> ppp b / options / options_settable_test . cc <nl> <nl> <nl> # include < cstring > <nl> <nl> + # include " options / cf_options . h " <nl> + # include " options / db_options . h " <nl> # include " options / options_helper . h " <nl> # include " rocksdb / convenience . h " <nl> # include " test_util / testharness . h " <nl> TEST_F ( OptionsSettableTest , ColumnFamilyOptionsAllFieldsSettable ) { <nl> options - > compaction_options_universal = CompactionOptionsUniversal ( ) ; <nl> options - > hard_rate_limit = 0 ; <nl> options - > soft_rate_limit = 0 ; <nl> + options - > num_levels = 42 ; / / Initialize options for MutableCF <nl> options - > purge_redundant_kvs_while_flush = false ; <nl> options - > max_mem_compaction_level = 0 ; <nl> options - > compaction_filter = nullptr ; <nl> mmm a / options / options_test . cc <nl> ppp b / options / options_test . cc <nl> <nl> # include " rocksdb / memtablerep . h " <nl> # include " rocksdb / utilities / leveldb_options . h " <nl> # include " rocksdb / utilities / object_registry . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " table / block_based / filter_policy_internal . h " <nl> # include " test_util / testharness . h " <nl> # include " test_util / testutil . h " <nl> TEST_F ( OptionsTest , GetBlockBasedTableOptionsFromString ) { <nl> ASSERT_OK ( GetBlockBasedTableOptionsFromString ( <nl> config_options , table_opt , <nl> " cache_index_and_filter_blocks = 1 ; index_type = kHashSearch ; " <nl> - " checksum = kxxHash ; hash_index_allow_collision = 1 ; no_block_cache = 1 ; " <nl> + " checksum = kxxHash ; hash_index_allow_collision = 1 ; " <nl> " block_cache = 1M ; block_cache_compressed = 1k ; block_size = 1024 ; " <nl> " block_size_deviation = 8 ; block_restart_interval = 4 ; " <nl> " format_version = 5 ; whole_key_filtering = 1 ; " <nl> TEST_F ( OptionsTest , GetBlockBasedTableOptionsFromString ) { <nl> ASSERT_EQ ( new_opt . index_type , BlockBasedTableOptions : : kHashSearch ) ; <nl> ASSERT_EQ ( new_opt . checksum , ChecksumType : : kxxHash ) ; <nl> ASSERT_TRUE ( new_opt . hash_index_allow_collision ) ; <nl> - ASSERT_TRUE ( new_opt . no_block_cache ) ; <nl> ASSERT_TRUE ( new_opt . block_cache ! = nullptr ) ; <nl> ASSERT_EQ ( new_opt . block_cache - > GetCapacity ( ) , 1024UL * 1024UL ) ; <nl> ASSERT_TRUE ( new_opt . block_cache_compressed ! = nullptr ) ; <nl> TEST_F ( OptionsTest , GetOptionsFromStringTest ) { <nl> ASSERT_EQ ( new_options . bottommost_compression_opts . enabled , false ) ; <nl> ASSERT_EQ ( new_options . write_buffer_size , 10U ) ; <nl> ASSERT_EQ ( new_options . max_write_buffer_number , 16 ) ; <nl> - BlockBasedTableOptions new_block_based_table_options = <nl> - dynamic_cast < BlockBasedTableFactory * > ( new_options . table_factory . get ( ) ) <nl> - - > table_options ( ) ; <nl> - ASSERT_EQ ( new_block_based_table_options . block_cache - > GetCapacity ( ) , 1U < < 20 ) ; <nl> - ASSERT_EQ ( new_block_based_table_options . block_size , 4U ) ; <nl> + const auto new_bbto = <nl> + new_options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( new_bbto , nullptr ) ; <nl> + ASSERT_EQ ( new_bbto - > block_cache - > GetCapacity ( ) , 1U < < 20 ) ; <nl> + ASSERT_EQ ( new_bbto - > block_size , 4U ) ; <nl> / / don ' t overwrite block based table options <nl> - ASSERT_TRUE ( new_block_based_table_options . cache_index_and_filter_blocks ) ; <nl> + ASSERT_TRUE ( new_bbto - > cache_index_and_filter_blocks ) ; <nl> <nl> ASSERT_EQ ( new_options . create_if_missing , true ) ; <nl> ASSERT_EQ ( new_options . max_open_files , 1 ) ; <nl> TEST_F ( OptionsTest , GetOptionsFromStringTest ) { <nl> ASSERT_OK ( Env : : LoadEnv ( kCustomEnvName , & newEnv ) ) ; <nl> ASSERT_EQ ( newEnv , new_options . env ) ; <nl> <nl> - / / Test the old interfaxe <nl> + config_options . ignore_unknown_options = false ; <nl> + / / Test a bad value for a DBOption returns a failure <nl> + base_options . dump_malloc_stats = false ; <nl> + base_options . write_buffer_size = 1024 ; <nl> + Options bad_options = new_options ; <nl> + ASSERT_NOK ( GetOptionsFromString ( config_options , base_options , <nl> + " create_if_missing = XX ; dump_malloc_stats = true " , <nl> + & bad_options ) ) ; <nl> + ASSERT_EQ ( bad_options . dump_malloc_stats , false ) ; <nl> + <nl> + bad_options = new_options ; <nl> + ASSERT_NOK ( GetOptionsFromString ( config_options , base_options , <nl> + " write_buffer_size = XX ; dump_malloc_stats = true " , <nl> + & bad_options ) ) ; <nl> + ASSERT_EQ ( bad_options . dump_malloc_stats , false ) ; <nl> + <nl> + / / Test a bad value for a TableFactory Option returns a failure <nl> + bad_options = new_options ; <nl> + ASSERT_NOK ( GetOptionsFromString ( config_options , base_options , <nl> + " write_buffer_size = 16 ; dump_malloc_stats = true " <nl> + " block_based_table_factory = { block_size = XX ; } ; " , <nl> + & bad_options ) ) ; <nl> + ASSERT_EQ ( bad_options . dump_malloc_stats , false ) ; <nl> + ASSERT_EQ ( bad_options . write_buffer_size , 1024 ) ; <nl> + <nl> + config_options . ignore_unknown_options = true ; <nl> + ASSERT_OK ( GetOptionsFromString ( config_options , base_options , <nl> + " create_if_missing = XX ; dump_malloc_stats = true ; " <nl> + " write_buffer_size = XX ; " <nl> + " block_based_table_factory = { block_size = XX ; } ; " , <nl> + & bad_options ) ) ; <nl> + ASSERT_EQ ( bad_options . create_if_missing , base_options . create_if_missing ) ; <nl> + ASSERT_EQ ( bad_options . dump_malloc_stats , true ) ; <nl> + ASSERT_EQ ( bad_options . write_buffer_size , base_options . write_buffer_size ) ; <nl> + <nl> + / / Test the old interface <nl> ASSERT_OK ( GetOptionsFromString ( <nl> base_options , <nl> " write_buffer_size = 22 ; max_write_buffer_number = 33 ; max_open_files = 44 ; " , <nl> TEST_F ( OptionsTest , ColumnFamilyOptionsSerialization ) { <nl> } <nl> } <nl> <nl> + TEST_F ( OptionsTest , CheckBlockBasedTableOptions ) { <nl> + ColumnFamilyOptions cf_opts ; <nl> + DBOptions db_opts ; <nl> + ConfigOptions config_opts ; <nl> + <nl> + ASSERT_OK ( GetColumnFamilyOptionsFromString ( <nl> + config_opts , cf_opts , " prefix_extractor = capped : 8 " , & cf_opts ) ) ; <nl> + ASSERT_OK ( TableFactory : : CreateFromString ( config_opts , " BlockBasedTable " , <nl> + & cf_opts . table_factory ) ) ; <nl> + ASSERT_NE ( cf_opts . table_factory . get ( ) , nullptr ) ; <nl> + ASSERT_TRUE ( cf_opts . table_factory - > IsInstanceOf ( <nl> + TableFactory : : kBlockBasedTableName ( ) ) ) ; <nl> + auto bbto = cf_opts . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_OK ( cf_opts . table_factory - > ConfigureFromString ( <nl> + config_opts , <nl> + " block_cache = { capacity = 1M ; num_shard_bits = 4 ; } ; " <nl> + " block_size_deviation = 101 ; " <nl> + " block_restart_interval = 0 ; " <nl> + " index_block_restart_interval = 5 ; " <nl> + " partition_filters = true ; " <nl> + " index_type = kHashSearch ; " <nl> + " no_block_cache = 1 ; " ) ) ; <nl> + ASSERT_NE ( bbto , nullptr ) ; <nl> + ASSERT_EQ ( bbto - > block_cache . get ( ) , nullptr ) ; <nl> + ASSERT_EQ ( bbto - > block_size_deviation , 0 ) ; <nl> + ASSERT_EQ ( bbto - > block_restart_interval , 1 ) ; <nl> + ASSERT_EQ ( bbto - > index_block_restart_interval , 1 ) ; <nl> + ASSERT_FALSE ( bbto - > partition_filters ) ; <nl> + ASSERT_OK ( TableFactory : : CreateFromString ( config_opts , " BlockBasedTable " , <nl> + & cf_opts . table_factory ) ) ; <nl> + bbto = cf_opts . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + <nl> + ASSERT_OK ( cf_opts . table_factory - > ConfigureFromString ( config_opts , <nl> + " no_block_cache = 0 ; " ) ) ; <nl> + ASSERT_NE ( bbto - > block_cache . get ( ) , nullptr ) ; <nl> + ASSERT_OK ( cf_opts . table_factory - > ValidateOptions ( db_opts , cf_opts ) ) ; <nl> + } <nl> + <nl> + TEST_F ( OptionsTest , MutableTableOptions ) { <nl> + ConfigOptions config_options ; <nl> + std : : shared_ptr < TableFactory > bbtf ; <nl> + bbtf . reset ( NewBlockBasedTableFactory ( ) ) ; <nl> + auto bbto = bbtf - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( bbto , nullptr ) ; <nl> + ASSERT_FALSE ( bbtf - > IsPrepared ( ) ) ; <nl> + ASSERT_OK ( bbtf - > ConfigureOption ( config_options , " block_align " , " true " ) ) ; <nl> + ASSERT_OK ( bbtf - > ConfigureOption ( config_options , " block_size " , " 1024 " ) ) ; <nl> + ASSERT_EQ ( bbto - > block_align , true ) ; <nl> + ASSERT_EQ ( bbto - > block_size , 1024 ) ; <nl> + ASSERT_OK ( bbtf - > PrepareOptions ( config_options ) ) ; <nl> + ASSERT_TRUE ( bbtf - > IsPrepared ( ) ) ; <nl> + ASSERT_OK ( bbtf - > ConfigureOption ( config_options , " block_size " , " 1024 " ) ) ; <nl> + ASSERT_EQ ( bbto - > block_align , true ) ; <nl> + ASSERT_NOK ( bbtf - > ConfigureOption ( config_options , " block_align " , " false " ) ) ; <nl> + ASSERT_OK ( bbtf - > ConfigureOption ( config_options , " block_size " , " 2048 " ) ) ; <nl> + ASSERT_EQ ( bbto - > block_align , true ) ; <nl> + ASSERT_EQ ( bbto - > block_size , 2048 ) ; <nl> + <nl> + ColumnFamilyOptions cf_opts ; <nl> + cf_opts . table_factory = bbtf ; <nl> + ASSERT_NOK ( GetColumnFamilyOptionsFromString ( <nl> + config_options , cf_opts , " block_based_table_factory . block_align = false " , <nl> + & cf_opts ) ) ; <nl> + ASSERT_OK ( GetColumnFamilyOptionsFromString ( <nl> + config_options , cf_opts , " block_based_table_factory . block_size = 8192 " , <nl> + & cf_opts ) ) ; <nl> + ASSERT_EQ ( bbto - > block_align , true ) ; <nl> + ASSERT_EQ ( bbto - > block_size , 8192 ) ; <nl> + } <nl> + <nl> # endif / / ! ROCKSDB_LITE <nl> <nl> Status StringToMap ( <nl> TEST_F ( OptionsTest , ConvertOptionsTest ) { <nl> ASSERT_EQ ( converted_opt . max_open_files , leveldb_opt . max_open_files ) ; <nl> ASSERT_EQ ( converted_opt . compression , leveldb_opt . compression ) ; <nl> <nl> - std : : shared_ptr < TableFactory > tb_guard = converted_opt . table_factory ; <nl> - BlockBasedTableFactory * table_factory = <nl> - dynamic_cast < BlockBasedTableFactory * > ( converted_opt . table_factory . get ( ) ) ; <nl> - <nl> - ASSERT_TRUE ( table_factory ! = nullptr ) ; <nl> + std : : shared_ptr < TableFactory > table_factory = converted_opt . table_factory ; <nl> + const auto table_opt = table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( table_opt , nullptr ) ; <nl> <nl> - const BlockBasedTableOptions table_opt = table_factory - > table_options ( ) ; <nl> - <nl> - ASSERT_EQ ( table_opt . block_cache - > GetCapacity ( ) , 8UL < < 20 ) ; <nl> - ASSERT_EQ ( table_opt . block_size , leveldb_opt . block_size ) ; <nl> - ASSERT_EQ ( table_opt . block_restart_interval , <nl> + ASSERT_EQ ( table_opt - > block_cache - > GetCapacity ( ) , 8UL < < 20 ) ; <nl> + ASSERT_EQ ( table_opt - > block_size , leveldb_opt . block_size ) ; <nl> + ASSERT_EQ ( table_opt - > block_restart_interval , <nl> leveldb_opt . block_restart_interval ) ; <nl> - ASSERT_EQ ( table_opt . filter_policy . get ( ) , leveldb_opt . filter_policy ) ; <nl> + ASSERT_EQ ( table_opt - > filter_policy . get ( ) , leveldb_opt . filter_policy ) ; <nl> } <nl> <nl> # ifndef ROCKSDB_LITE <nl> TEST_F ( OptionsOldApiTest , GetOptionsFromStringTest ) { <nl> ASSERT_EQ ( new_options . bottommost_compression_opts . enabled , false ) ; <nl> ASSERT_EQ ( new_options . write_buffer_size , 10U ) ; <nl> ASSERT_EQ ( new_options . max_write_buffer_number , 16 ) ; <nl> - BlockBasedTableOptions new_block_based_table_options = <nl> - dynamic_cast < BlockBasedTableFactory * > ( new_options . table_factory . get ( ) ) <nl> - - > table_options ( ) ; <nl> - ASSERT_EQ ( new_block_based_table_options . block_cache - > GetCapacity ( ) , 1U < < 20 ) ; <nl> - ASSERT_EQ ( new_block_based_table_options . block_size , 4U ) ; <nl> + <nl> + auto new_block_based_table_options = <nl> + new_options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( new_block_based_table_options , nullptr ) ; <nl> + ASSERT_EQ ( new_block_based_table_options - > block_cache - > GetCapacity ( ) , <nl> + 1U < < 20 ) ; <nl> + ASSERT_EQ ( new_block_based_table_options - > block_size , 4U ) ; <nl> / / don ' t overwrite block based table options <nl> - ASSERT_TRUE ( new_block_based_table_options . cache_index_and_filter_blocks ) ; <nl> + ASSERT_TRUE ( new_block_based_table_options - > cache_index_and_filter_blocks ) ; <nl> <nl> ASSERT_EQ ( new_options . create_if_missing , true ) ; <nl> ASSERT_EQ ( new_options . max_open_files , 1 ) ; <nl> TEST_F ( OptionsParserTest , DumpAndParse ) { <nl> <nl> / / Make sure block - based table factory options was deserialized correctly <nl> std : : shared_ptr < TableFactory > ttf = ( * parser . cf_opts ( ) ) [ 4 ] . table_factory ; <nl> - ASSERT_EQ ( BlockBasedTableFactory : : kName , std : : string ( ttf - > Name ( ) ) ) ; <nl> - const BlockBasedTableOptions & parsed_bbto = <nl> - static_cast < BlockBasedTableFactory * > ( ttf . get ( ) ) - > table_options ( ) ; <nl> - ASSERT_EQ ( special_bbto . block_size , parsed_bbto . block_size ) ; <nl> + ASSERT_EQ ( TableFactory : : kBlockBasedTableName ( ) , std : : string ( ttf - > Name ( ) ) ) ; <nl> + const auto parsed_bbto = ttf - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( parsed_bbto , nullptr ) ; <nl> + ASSERT_EQ ( special_bbto . block_size , parsed_bbto - > block_size ) ; <nl> ASSERT_EQ ( special_bbto . cache_index_and_filter_blocks , <nl> - parsed_bbto . cache_index_and_filter_blocks ) ; <nl> + parsed_bbto - > cache_index_and_filter_blocks ) ; <nl> <nl> ASSERT_OK ( RocksDBOptionsParser : : VerifyRocksDBOptionsFromFile ( <nl> config_options , base_db_opt , cf_names , base_cf_opts , kOptionsFileName , <nl> static void TestAndCompareOption ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , void * base_ptr , <nl> void * comp_ptr ) { <nl> std : : string result , mismatch ; <nl> - char * base_addr = reinterpret_cast < char * > ( base_ptr ) + opt_info . offset_ ; <nl> - char * comp_addr = reinterpret_cast < char * > ( comp_ptr ) + opt_info . offset_ ; <nl> - ASSERT_OK ( opt_info . Serialize ( config_options , opt_name , base_addr , & result ) ) ; <nl> - ASSERT_OK ( opt_info . Parse ( config_options , opt_name , result , comp_addr ) ) ; <nl> - ASSERT_TRUE ( opt_info . AreEqual ( config_options , opt_name , base_addr , comp_addr , <nl> + ASSERT_OK ( opt_info . Serialize ( config_options , opt_name , base_ptr , & result ) ) ; <nl> + ASSERT_OK ( opt_info . Parse ( config_options , opt_name , result , comp_ptr ) ) ; <nl> + ASSERT_TRUE ( opt_info . AreEqual ( config_options , opt_name , base_ptr , comp_ptr , <nl> & mismatch ) ) ; <nl> } <nl> <nl> static void TestAndCompareOption ( const ConfigOptions & config_options , <nl> const std : : string & opt_name , <nl> const std : : string & opt_value , void * base_ptr , <nl> void * comp_ptr ) { <nl> - char * base_addr = reinterpret_cast < char * > ( base_ptr ) + opt_info . offset_ ; <nl> - ASSERT_OK ( opt_info . Parse ( config_options , opt_name , opt_value , base_addr ) ) ; <nl> + ASSERT_OK ( opt_info . Parse ( config_options , opt_name , opt_value , base_ptr ) ) ; <nl> TestAndCompareOption ( config_options , opt_info , opt_name , base_ptr , comp_ptr ) ; <nl> } <nl> <nl> void TestOptInfo ( const ConfigOptions & config_options , OptionType opt_type , <nl> T * base , T * comp ) { <nl> std : : string result ; <nl> OptionTypeInfo opt_info ( 0 , opt_type ) ; <nl> - char * base_addr = reinterpret_cast < char * > ( base ) ; <nl> - char * comp_addr = reinterpret_cast < char * > ( comp ) ; <nl> - ASSERT_FALSE ( <nl> - opt_info . AreEqual ( config_options , " base " , base_addr , comp_addr , & result ) ) ; <nl> + ASSERT_FALSE ( opt_info . AreEqual ( config_options , " base " , base , comp , & result ) ) ; <nl> ASSERT_EQ ( result , " base " ) ; <nl> ASSERT_NE ( * base , * comp ) ; <nl> - TestAndCompareOption ( config_options , opt_info , " base " , base_addr , comp_addr ) ; <nl> + TestAndCompareOption ( config_options , opt_info , " base " , base , comp ) ; <nl> ASSERT_EQ ( * base , * comp ) ; <nl> } <nl> <nl> TEST_F ( OptionTypeInfoTest , TestInvalidArgs ) { <nl> double d ; <nl> <nl> ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kBoolean ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & b ) ) ) ; <nl> - ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kInt ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & i ) ) ) ; <nl> - ASSERT_NOK ( <nl> - OptionTypeInfo ( 0 , OptionType : : kInt32T ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & i32 ) ) ) ; <nl> + . Parse ( config_options , " b " , " x " , & b ) ) ; <nl> ASSERT_NOK ( <nl> - OptionTypeInfo ( 0 , OptionType : : kInt64T ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & i64 ) ) ) ; <nl> - ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kUInt ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & u ) ) ) ; <nl> + OptionTypeInfo ( 0 , OptionType : : kInt ) . Parse ( config_options , " b " , " x " , & i ) ) ; <nl> + ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kInt32T ) <nl> + . Parse ( config_options , " b " , " x " , & i32 ) ) ; <nl> + ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kInt64T ) <nl> + . Parse ( config_options , " b " , " x " , & i64 ) ) ; <nl> ASSERT_NOK ( <nl> - OptionTypeInfo ( 0 , OptionType : : kUInt32T ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & u32 ) ) ) ; <nl> - ASSERT_NOK ( <nl> - OptionTypeInfo ( 0 , OptionType : : kUInt64T ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & u64 ) ) ) ; <nl> - ASSERT_NOK ( <nl> - OptionTypeInfo ( 0 , OptionType : : kSizeT ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & sz ) ) ) ; <nl> + OptionTypeInfo ( 0 , OptionType : : kUInt ) . Parse ( config_options , " b " , " x " , & u ) ) ; <nl> + ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kUInt32T ) <nl> + . Parse ( config_options , " b " , " x " , & u32 ) ) ; <nl> + ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kUInt64T ) <nl> + . Parse ( config_options , " b " , " x " , & u64 ) ) ; <nl> + ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kSizeT ) <nl> + . Parse ( config_options , " b " , " x " , & sz ) ) ; <nl> ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kDouble ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & d ) ) ) ; <nl> + . Parse ( config_options , " b " , " x " , & d ) ) ; <nl> <nl> / / Don ' t know how to convert Unknowns to anything else <nl> ASSERT_NOK ( OptionTypeInfo ( 0 , OptionType : : kUnknown ) <nl> - . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & d ) ) ) ; <nl> + . Parse ( config_options , " b " , " x " , & d ) ) ; <nl> <nl> / / Verify that if the parse function throws an exception , it is also trapped <nl> OptionTypeInfo func_info ( 0 , OptionType : : kUnknown , <nl> OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 , <nl> + OptionTypeFlags : : kNone , <nl> [ ] ( const ConfigOptions & , const std : : string & , <nl> const std : : string & value , char * addr ) { <nl> auto ptr = reinterpret_cast < int * > ( addr ) ; <nl> * ptr = ParseInt ( value ) ; <nl> return Status : : OK ( ) ; <nl> } ) ; <nl> - ASSERT_OK ( <nl> - func_info . Parse ( config_options , " b " , " 1 " , reinterpret_cast < char * > ( & i ) ) ) ; <nl> - ASSERT_NOK ( <nl> - func_info . Parse ( config_options , " b " , " x " , reinterpret_cast < char * > ( & i ) ) ) ; <nl> + ASSERT_OK ( func_info . Parse ( config_options , " b " , " 1 " , & i ) ) ; <nl> + ASSERT_NOK ( func_info . Parse ( config_options , " b " , " x " , & i ) ) ; <nl> } <nl> <nl> TEST_F ( OptionTypeInfoTest , TestParseFunc ) { <nl> OptionTypeInfo opt_info ( <nl> 0 , OptionType : : kUnknown , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 , <nl> + OptionTypeFlags : : kNone , <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & name , <nl> const std : : string & value , char * addr ) { <nl> auto ptr = reinterpret_cast < std : : string * > ( addr ) ; <nl> TEST_F ( OptionTypeInfoTest , TestParseFunc ) { <nl> } ) ; <nl> ConfigOptions config_options ; <nl> std : : string base ; <nl> - ASSERT_OK ( opt_info . Parse ( config_options , " World " , " Hello " , <nl> - reinterpret_cast < char * > ( & base ) ) ) ; <nl> + ASSERT_OK ( opt_info . Parse ( config_options , " World " , " Hello " , & base ) ) ; <nl> ASSERT_EQ ( base , " Hello World " ) ; <nl> - ASSERT_NOK ( opt_info . Parse ( config_options , " Oops " , " Hello " , <nl> - reinterpret_cast < char * > ( & base ) ) ) ; <nl> + ASSERT_NOK ( opt_info . Parse ( config_options , " Oops " , " Hello " , & base ) ) ; <nl> } <nl> <nl> TEST_F ( OptionTypeInfoTest , TestSerializeFunc ) { <nl> OptionTypeInfo opt_info ( <nl> 0 , OptionType : : kString , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 , nullptr , <nl> + OptionTypeFlags : : kNone , nullptr , <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & name , <nl> const char * / * addr * / , std : : string * value ) { <nl> if ( name = = " Oops " ) { <nl> TEST_F ( OptionTypeInfoTest , TestSerializeFunc ) { <nl> ConfigOptions config_options ; <nl> std : : string base ; <nl> std : : string value ; <nl> - ASSERT_OK ( opt_info . Serialize ( config_options , " Hello " , <nl> - reinterpret_cast < char * > ( & base ) , & value ) ) ; <nl> + ASSERT_OK ( opt_info . Serialize ( config_options , " Hello " , & base , & value ) ) ; <nl> ASSERT_EQ ( value , " Hello " ) ; <nl> - ASSERT_NOK ( opt_info . Serialize ( config_options , " Oops " , <nl> - reinterpret_cast < char * > ( & base ) , & value ) ) ; <nl> + ASSERT_NOK ( opt_info . Serialize ( config_options , " Oops " , & base , & value ) ) ; <nl> } <nl> <nl> TEST_F ( OptionTypeInfoTest , TestEqualsFunc ) { <nl> OptionTypeInfo opt_info ( <nl> 0 , OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 , nullptr , nullptr , <nl> + OptionTypeFlags : : kNone , nullptr , nullptr , <nl> [ ] ( const ConfigOptions & / * opts * / , const std : : string & name , <nl> const char * addr1 , const char * addr2 , std : : string * mismatch ) { <nl> auto i1 = * ( reinterpret_cast < const int * > ( addr1 ) ) ; <nl> TEST_F ( OptionTypeInfoTest , TestEqualsFunc ) { <nl> int int1 = 100 ; <nl> int int2 = 200 ; <nl> std : : string mismatch ; <nl> - ASSERT_TRUE ( opt_info . AreEqual ( <nl> - config_options , " LT " , reinterpret_cast < const char * > ( & int1 ) , <nl> - reinterpret_cast < const char * > ( & int2 ) , & mismatch ) ) ; <nl> + ASSERT_TRUE ( opt_info . AreEqual ( config_options , " LT " , & int1 , & int2 , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " " ) ; <nl> - ASSERT_FALSE ( opt_info . AreEqual ( config_options , " GT " , <nl> - reinterpret_cast < char * > ( & int1 ) , <nl> - reinterpret_cast < char * > ( & int2 ) , & mismatch ) ) ; <nl> + ASSERT_FALSE ( <nl> + opt_info . AreEqual ( config_options , " GT " , & int1 , & int2 , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " GT " ) ; <nl> - ASSERT_FALSE ( opt_info . AreEqual ( config_options , " NO " , <nl> - reinterpret_cast < char * > ( & int1 ) , <nl> - reinterpret_cast < char * > ( & int2 ) , & mismatch ) ) ; <nl> + ASSERT_FALSE ( <nl> + opt_info . AreEqual ( config_options , " NO " , & int1 , & int2 , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " NO ? ? ? " ) ; <nl> } <nl> <nl> TEST_F ( OptionTypeInfoTest , TestOptionFlags ) { <nl> OptionTypeInfo opt_none ( 0 , OptionType : : kString , <nl> OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kDontSerialize , 0 ) ; <nl> + OptionTypeFlags : : kDontSerialize ) ; <nl> OptionTypeInfo opt_never ( 0 , OptionType : : kString , <nl> OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kCompareNever , 0 ) ; <nl> + OptionTypeFlags : : kCompareNever ) ; <nl> OptionTypeInfo opt_alias ( 0 , OptionType : : kString , <nl> OptionVerificationType : : kAlias , <nl> - OptionTypeFlags : : kNone , 0 ) ; <nl> + OptionTypeFlags : : kNone ) ; <nl> OptionTypeInfo opt_deprecated ( 0 , OptionType : : kString , <nl> OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 ) ; <nl> + OptionTypeFlags : : kNone ) ; <nl> ConfigOptions config_options ; <nl> + std : : string opts_str ; <nl> std : : string base = " base " ; <nl> std : : string comp = " comp " ; <nl> <nl> - / / If marked string none , the serialization returns okay but does nothing <nl> - ASSERT_OK ( opt_none . Serialize ( config_options , " None " , <nl> - reinterpret_cast < char * > ( & base ) , & base ) ) ; <nl> + / / If marked string none , the serialization returns not supported <nl> + ASSERT_NOK ( opt_none . Serialize ( config_options , " None " , & base , & opts_str ) ) ; <nl> / / If marked never compare , they match even when they do not <nl> - ASSERT_TRUE ( opt_never . AreEqual ( config_options , " Never " , <nl> - reinterpret_cast < char * > ( & base ) , <nl> - reinterpret_cast < char * > ( & comp ) , & base ) ) ; <nl> - ASSERT_FALSE ( opt_none . AreEqual ( config_options , " Never " , <nl> - reinterpret_cast < char * > ( & base ) , <nl> - reinterpret_cast < char * > ( & comp ) , & base ) ) ; <nl> + ASSERT_TRUE ( opt_never . AreEqual ( config_options , " Never " , & base , & comp , & base ) ) ; <nl> + ASSERT_FALSE ( opt_none . AreEqual ( config_options , " Never " , & base , & comp , & base ) ) ; <nl> <nl> / / An alias can change the value via parse , but does nothing on serialize on <nl> / / match <nl> std : : string result ; <nl> ASSERT_OK ( opt_alias . Parse ( config_options , " Alias " , " Alias " , <nl> reinterpret_cast < char * > ( & base ) ) ) ; <nl> - ASSERT_OK ( opt_alias . Serialize ( config_options , " Alias " , <nl> - reinterpret_cast < char * > ( & base ) , & result ) ) ; <nl> - ASSERT_TRUE ( opt_alias . AreEqual ( config_options , " Alias " , <nl> - reinterpret_cast < char * > ( & base ) , <nl> - reinterpret_cast < char * > ( & comp ) , & result ) ) ; <nl> + ASSERT_OK ( opt_alias . Serialize ( config_options , " Alias " , & base , & result ) ) ; <nl> + ASSERT_TRUE ( <nl> + opt_alias . AreEqual ( config_options , " Alias " , & base , & comp , & result ) ) ; <nl> ASSERT_EQ ( base , " Alias " ) ; <nl> ASSERT_NE ( base , comp ) ; <nl> <nl> / / Deprecated options do nothing on any of the commands <nl> - ASSERT_OK ( opt_deprecated . Parse ( config_options , " Alias " , " Deprecated " , <nl> - reinterpret_cast < char * > ( & base ) ) ) ; <nl> - ASSERT_OK ( opt_deprecated . Serialize ( config_options , " Alias " , <nl> - reinterpret_cast < char * > ( & base ) , & result ) ) ; <nl> - ASSERT_TRUE ( opt_deprecated . AreEqual ( config_options , " Alias " , <nl> - reinterpret_cast < char * > ( & base ) , <nl> - reinterpret_cast < char * > ( & comp ) , & result ) ) ; <nl> + ASSERT_OK ( opt_deprecated . Parse ( config_options , " Alias " , " Deprecated " , & base ) ) ; <nl> + ASSERT_OK ( opt_deprecated . Serialize ( config_options , " Alias " , & base , & result ) ) ; <nl> + ASSERT_TRUE ( <nl> + opt_deprecated . AreEqual ( config_options , " Alias " , & base , & comp , & result ) ) ; <nl> ASSERT_EQ ( base , " Alias " ) ; <nl> ASSERT_NE ( base , comp ) ; <nl> } <nl> TEST_F ( OptionTypeInfoTest , TestCustomEnum ) { <nl> <nl> e2 = TestEnum : : kA ; <nl> <nl> - ASSERT_OK ( <nl> - opt_info . Parse ( config_options , " " , " B " , reinterpret_cast < char * > ( & e1 ) ) ) ; <nl> - ASSERT_OK ( opt_info . Serialize ( config_options , " " , reinterpret_cast < char * > ( & e1 ) , <nl> - & result ) ) ; <nl> + ASSERT_OK ( opt_info . Parse ( config_options , " " , " B " , & e1 ) ) ; <nl> + ASSERT_OK ( opt_info . Serialize ( config_options , " " , & e1 , & result ) ) ; <nl> ASSERT_EQ ( e1 , TestEnum : : kB ) ; <nl> ASSERT_EQ ( result , " B " ) ; <nl> <nl> - ASSERT_FALSE ( opt_info . AreEqual ( config_options , " Enum " , <nl> - reinterpret_cast < char * > ( & e1 ) , <nl> - reinterpret_cast < char * > ( & e2 ) , & mismatch ) ) ; <nl> + ASSERT_FALSE ( opt_info . AreEqual ( config_options , " Enum " , & e1 , & e2 , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " Enum " ) ; <nl> <nl> - TestAndCompareOption ( config_options , opt_info , " " , " C " , <nl> - reinterpret_cast < char * > ( & e1 ) , <nl> - reinterpret_cast < char * > ( & e2 ) ) ; <nl> + TestAndCompareOption ( config_options , opt_info , " " , " C " , & e1 , & e2 ) ; <nl> ASSERT_EQ ( e2 , TestEnum : : kC ) ; <nl> <nl> - ASSERT_NOK ( <nl> - opt_info . Parse ( config_options , " " , " D " , reinterpret_cast < char * > ( & e1 ) ) ) ; <nl> + ASSERT_NOK ( opt_info . Parse ( config_options , " " , " D " , & e1 ) ) ; <nl> ASSERT_EQ ( e1 , TestEnum : : kC ) ; <nl> } <nl> <nl> TEST_F ( OptionTypeInfoTest , TestStruct ) { <nl> } ; <nl> OptionTypeInfo basic_info = OptionTypeInfo : : Struct ( <nl> " b " , & basic_type_map , 0 , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , 0 ) ; <nl> + OptionTypeFlags : : kMutable ) ; <nl> <nl> std : : unordered_map < std : : string , OptionTypeInfo > extended_type_map = { <nl> { " j " , { offsetof ( struct Extended , j ) , OptionType : : kInt } } , <nl> { " b " , OptionTypeInfo : : Struct ( <nl> " b " , & basic_type_map , offsetof ( struct Extended , b ) , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 ) } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone ) } , <nl> { " m " , OptionTypeInfo : : Struct ( <nl> " m " , & basic_type_map , offsetof ( struct Extended , b ) , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable , <nl> - offsetof ( struct Extended , b ) ) } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kMutable ) } , <nl> } ; <nl> OptionTypeInfo extended_info = OptionTypeInfo : : Struct ( <nl> " e " , & extended_type_map , 0 , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kMutable , 0 ) ; <nl> + OptionTypeFlags : : kMutable ) ; <nl> Extended e1 , e2 ; <nl> ConfigOptions config_options ; <nl> std : : string mismatch ; <nl> TEST_F ( OptionTypeInfoTest , TestStruct ) { <nl> ASSERT_EQ ( e1 . b . i , 55 ) ; <nl> <nl> e1 . b . i = 0 ; <nl> - auto e1bc = reinterpret_cast < char * > ( & e1 . b ) ; <nl> - auto e2bc = reinterpret_cast < char * > ( & e2 . b ) ; <nl> <nl> - ASSERT_FALSE ( basic_info . AreEqual ( config_options , " b " , e1bc , e2bc , & mismatch ) ) ; <nl> + ASSERT_FALSE ( <nl> + basic_info . AreEqual ( config_options , " b " , & e1 . b , & e2 . b , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " b . i " ) ; <nl> mismatch . clear ( ) ; <nl> ASSERT_FALSE ( <nl> - basic_info . AreEqual ( config_options , " b . i " , e1bc , e2bc , & mismatch ) ) ; <nl> + basic_info . AreEqual ( config_options , " b . i " , & e1 . b , & e2 . b , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " b . i " ) ; <nl> mismatch . clear ( ) ; <nl> - ASSERT_FALSE ( basic_info . AreEqual ( config_options , " i " , e1bc , e2bc , & mismatch ) ) ; <nl> + ASSERT_FALSE ( <nl> + basic_info . AreEqual ( config_options , " i " , & e1 . b , & e2 . b , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " b . i " ) ; <nl> mismatch . clear ( ) ; <nl> <nl> e1 = e2 ; <nl> - ASSERT_NOK ( basic_info . Parse ( config_options , " b " , " { i = 33 ; s = 33 ; j = 44 } " , e1bc ) ) ; <nl> - ASSERT_TRUE ( <nl> - basic_info . AreEqual ( config_options , " b . i " , e1bc , e2bc , & mismatch ) ) ; <nl> - ASSERT_NOK ( basic_info . Parse ( config_options , " b . j " , " 44 " , e1bc ) ) ; <nl> - ASSERT_TRUE ( <nl> - basic_info . AreEqual ( config_options , " b . i " , e1bc , e2bc , & mismatch ) ) ; <nl> - ASSERT_NOK ( basic_info . Parse ( config_options , " j " , " 44 " , e1bc ) ) ; <nl> - ASSERT_TRUE ( <nl> - basic_info . AreEqual ( config_options , " b . i " , e1bc , e2bc , & mismatch ) ) ; <nl> + ASSERT_NOK ( basic_info . Parse ( config_options , " b " , " { i = 33 ; s = 33 ; j = 44 } " , & e1 . b ) ) ; <nl> + ASSERT_NOK ( basic_info . Parse ( config_options , " b . j " , " 44 " , & e1 . b ) ) ; <nl> + ASSERT_NOK ( basic_info . Parse ( config_options , " j " , " 44 " , & e1 . b ) ) ; <nl> <nl> TestAndCompareOption ( config_options , extended_info , " e " , <nl> " b = { i = 55 ; s = 55 } ; j = 22 ; " , & e1 , & e2 ) ; <nl> TEST_F ( OptionTypeInfoTest , TestStruct ) { <nl> <nl> TEST_F ( OptionTypeInfoTest , TestVectorType ) { <nl> OptionTypeInfo vec_info = OptionTypeInfo : : Vector < std : : string > ( <nl> - 0 , OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 , <nl> + 0 , OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> { 0 , OptionType : : kString } ) ; <nl> std : : vector < std : : string > vec1 , vec2 ; <nl> std : : string mismatch ; <nl> TEST_F ( OptionTypeInfoTest , TestVectorType ) { <nl> ASSERT_EQ ( vec1 [ 2 ] , " c " ) ; <nl> ASSERT_EQ ( vec1 [ 3 ] , " d " ) ; <nl> vec1 [ 3 ] = " e " ; <nl> - ASSERT_FALSE ( vec_info . AreEqual ( config_options , " v " , <nl> - reinterpret_cast < char * > ( & vec1 ) , <nl> - reinterpret_cast < char * > ( & vec2 ) , & mismatch ) ) ; <nl> + ASSERT_FALSE ( vec_info . AreEqual ( config_options , " v " , & vec1 , & vec2 , & mismatch ) ) ; <nl> ASSERT_EQ ( mismatch , " v " ) ; <nl> <nl> / / Test vectors with inner brackets <nl> TEST_F ( OptionTypeInfoTest , TestVectorType ) { <nl> ASSERT_EQ ( vec1 [ 3 ] , " d " ) ; <nl> <nl> OptionTypeInfo bar_info = OptionTypeInfo : : Vector < std : : string > ( <nl> - 0 , OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 , <nl> + 0 , OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , <nl> { 0 , OptionType : : kString } , ' | ' ) ; <nl> TestAndCompareOption ( config_options , vec_info , " v " , " x | y | z " , & vec1 , & vec2 ) ; <nl> / / Test vectors with inner vector <nl> mmm a / src . mk <nl> ppp b / src . mk <nl> LIB_SOURCES = \ <nl> monitoring / thread_status_util . cc \ <nl> monitoring / thread_status_util_debug . cc \ <nl> options / cf_options . cc \ <nl> + options / configurable . cc \ <nl> options / db_options . cc \ <nl> options / options . cc \ <nl> options / options_helper . cc \ <nl> LIB_SOURCES = \ <nl> table / sst_file_dumper . cc \ <nl> table / sst_file_reader . cc \ <nl> table / sst_file_writer . cc \ <nl> + table / table_factory . cc \ <nl> table / table_properties . cc \ <nl> table / two_level_iterator . cc \ <nl> test_util / sync_point . cc \ <nl> TEST_MAIN_SOURCES = \ <nl> monitoring / statistics_test . cc \ <nl> monitoring / stats_dump_scheduler_test . cc \ <nl> monitoring / stats_history_test . cc \ <nl> + options / configurable_test . cc \ <nl> options / options_settable_test . cc \ <nl> options / options_test . cc \ <nl> table / block_based / block_based_filter_block_test . cc \ <nl> mmm a / table / adaptive / adaptive_table_factory . cc <nl> ppp b / table / adaptive / adaptive_table_factory . cc <nl> TableBuilder * AdaptiveTableFactory : : NewTableBuilder ( <nl> column_family_id , file ) ; <nl> } <nl> <nl> - std : : string AdaptiveTableFactory : : GetPrintableTableOptions ( ) const { <nl> + std : : string AdaptiveTableFactory : : GetPrintableOptions ( ) const { <nl> std : : string ret ; <nl> ret . reserve ( 20000 ) ; <nl> const int kBufferSize = 200 ; <nl> std : : string AdaptiveTableFactory : : GetPrintableTableOptions ( ) const { <nl> snprintf ( buffer , kBufferSize , " write factory ( % s ) options : \ n % s \ n " , <nl> ( table_factory_to_write_ - > Name ( ) ? table_factory_to_write_ - > Name ( ) <nl> : " " ) , <nl> - table_factory_to_write_ - > GetPrintableTableOptions ( ) . c_str ( ) ) ; <nl> + table_factory_to_write_ - > GetPrintableOptions ( ) . c_str ( ) ) ; <nl> ret . append ( buffer ) ; <nl> } <nl> if ( plain_table_factory_ ) { <nl> snprintf ( buffer , kBufferSize , " % s options : \ n % s \ n " , <nl> plain_table_factory_ - > Name ( ) ? plain_table_factory_ - > Name ( ) : " " , <nl> - plain_table_factory_ - > GetPrintableTableOptions ( ) . c_str ( ) ) ; <nl> + plain_table_factory_ - > GetPrintableOptions ( ) . c_str ( ) ) ; <nl> ret . append ( buffer ) ; <nl> } <nl> if ( block_based_table_factory_ ) { <nl> std : : string AdaptiveTableFactory : : GetPrintableTableOptions ( ) const { <nl> buffer , kBufferSize , " % s options : \ n % s \ n " , <nl> ( block_based_table_factory_ - > Name ( ) ? block_based_table_factory_ - > Name ( ) <nl> : " " ) , <nl> - block_based_table_factory_ - > GetPrintableTableOptions ( ) . c_str ( ) ) ; <nl> + block_based_table_factory_ - > GetPrintableOptions ( ) . c_str ( ) ) ; <nl> ret . append ( buffer ) ; <nl> } <nl> if ( cuckoo_table_factory_ ) { <nl> snprintf ( buffer , kBufferSize , " % s options : \ n % s \ n " , <nl> cuckoo_table_factory_ - > Name ( ) ? cuckoo_table_factory_ - > Name ( ) : " " , <nl> - cuckoo_table_factory_ - > GetPrintableTableOptions ( ) . c_str ( ) ) ; <nl> + cuckoo_table_factory_ - > GetPrintableOptions ( ) . c_str ( ) ) ; <nl> ret . append ( buffer ) ; <nl> } <nl> return ret ; <nl> mmm a / table / adaptive / adaptive_table_factory . h <nl> ppp b / table / adaptive / adaptive_table_factory . h <nl> class AdaptiveTableFactory : public TableFactory { <nl> const TableBuilderOptions & table_builder_options , <nl> uint32_t column_family_id , WritableFileWriter * file ) const override ; <nl> <nl> - / / Sanitizes the specified DB Options . <nl> - Status SanitizeOptions ( <nl> - const DBOptions & / * db_opts * / , <nl> - const ColumnFamilyOptions & / * cf_opts * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - std : : string GetPrintableTableOptions ( ) const override ; <nl> + std : : string GetPrintableOptions ( ) const override ; <nl> <nl> private : <nl> std : : shared_ptr < TableFactory > table_factory_to_write_ ; <nl> mmm a / table / block_based / block_based_table_factory . cc <nl> ppp b / table / block_based / block_based_table_factory . cc <nl> <nl> # include < memory > <nl> # include < string > <nl> <nl> - # include " options / options_helper . h " <nl> - # include " options / options_parser . h " <nl> + # include " options / configurable_helper . h " <nl> # include " port / port . h " <nl> # include " rocksdb / cache . h " <nl> # include " rocksdb / convenience . h " <nl> # include " rocksdb / flush_block_policy . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " table / block_based / block_based_table_builder . h " <nl> # include " table / block_based / block_based_table_reader . h " <nl> # include " table / format . h " <nl> static std : : unordered_map < std : : string , <nl> { " kShortenSeparatorsAndSuccessor " , <nl> BlockBasedTableOptions : : IndexShorteningMode : : <nl> kShortenSeparatorsAndSuccessor } } ; <nl> + # endif / / ROCKSDB_LITE <nl> <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> block_based_table_type_info = { <nl> + # ifndef ROCKSDB_LITE <nl> / * currently not supported <nl> std : : shared_ptr < Cache > block_cache = nullptr ; <nl> std : : shared_ptr < Cache > block_cache_compressed = nullptr ; <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> { " flush_block_policy_factory " , <nl> { offsetof ( struct BlockBasedTableOptions , flush_block_policy_factory ) , <nl> OptionType : : kFlushBlockPolicyFactory , OptionVerificationType : : kByName , <nl> - OptionTypeFlags : : kCompareNever , 0 } } , <nl> + OptionTypeFlags : : kCompareNever } } , <nl> { " cache_index_and_filter_blocks " , <nl> { offsetof ( struct BlockBasedTableOptions , <nl> cache_index_and_filter_blocks ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " cache_index_and_filter_blocks_with_high_priority " , <nl> { offsetof ( struct BlockBasedTableOptions , <nl> cache_index_and_filter_blocks_with_high_priority ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " pin_l0_filter_and_index_blocks_in_cache " , <nl> { offsetof ( struct BlockBasedTableOptions , <nl> pin_l0_filter_and_index_blocks_in_cache ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " index_type " , OptionTypeInfo : : Enum < BlockBasedTableOptions : : IndexType > ( <nl> offsetof ( struct BlockBasedTableOptions , index_type ) , <nl> & block_base_table_index_type_string_map ) } , <nl> { " hash_index_allow_collision " , <nl> { offsetof ( struct BlockBasedTableOptions , hash_index_allow_collision ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " data_block_index_type " , <nl> OptionTypeInfo : : Enum < BlockBasedTableOptions : : DataBlockIndexType > ( <nl> offsetof ( struct BlockBasedTableOptions , data_block_index_type ) , <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> { offsetof ( struct BlockBasedTableOptions , <nl> data_block_hash_table_util_ratio ) , <nl> OptionType : : kDouble , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " checksum " , <nl> { offsetof ( struct BlockBasedTableOptions , checksum ) , <nl> OptionType : : kChecksumType , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " no_block_cache " , <nl> { offsetof ( struct BlockBasedTableOptions , no_block_cache ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " block_size " , <nl> { offsetof ( struct BlockBasedTableOptions , block_size ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " block_size_deviation " , <nl> { offsetof ( struct BlockBasedTableOptions , block_size_deviation ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " block_restart_interval " , <nl> { offsetof ( struct BlockBasedTableOptions , block_restart_interval ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kMutable } } , <nl> { " index_block_restart_interval " , <nl> { offsetof ( struct BlockBasedTableOptions , index_block_restart_interval ) , <nl> OptionType : : kInt , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " index_per_partition " , <nl> { 0 , OptionType : : kUInt64T , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " metadata_block_size " , <nl> { offsetof ( struct BlockBasedTableOptions , metadata_block_size ) , <nl> OptionType : : kUInt64T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " partition_filters " , <nl> { offsetof ( struct BlockBasedTableOptions , partition_filters ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " optimize_filters_for_memory " , <nl> { offsetof ( struct BlockBasedTableOptions , optimize_filters_for_memory ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " filter_policy " , <nl> { offsetof ( struct BlockBasedTableOptions , filter_policy ) , <nl> OptionType : : kUnknown , OptionVerificationType : : kByNameAllowFromNull , <nl> - OptionTypeFlags : : kNone , 0 , <nl> + OptionTypeFlags : : kNone , <nl> / / Parses the Filter policy <nl> [ ] ( const ConfigOptions & opts , const std : : string & , <nl> const std : : string & value , char * addr ) { <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> { " whole_key_filtering " , <nl> { offsetof ( struct BlockBasedTableOptions , whole_key_filtering ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " skip_table_builder_flush " , <nl> { 0 , OptionType : : kBoolean , OptionVerificationType : : kDeprecated , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " format_version " , <nl> { offsetof ( struct BlockBasedTableOptions , format_version ) , <nl> OptionType : : kUInt32T , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " verify_compression " , <nl> { offsetof ( struct BlockBasedTableOptions , verify_compression ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " read_amp_bytes_per_bit " , <nl> { offsetof ( struct BlockBasedTableOptions , read_amp_bytes_per_bit ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " enable_index_compression " , <nl> { offsetof ( struct BlockBasedTableOptions , enable_index_compression ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " block_align " , <nl> { offsetof ( struct BlockBasedTableOptions , block_align ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " pin_top_level_index_and_filter " , <nl> { offsetof ( struct BlockBasedTableOptions , <nl> pin_top_level_index_and_filter ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " block_cache " , <nl> { offsetof ( struct BlockBasedTableOptions , block_cache ) , <nl> OptionType : : kUnknown , OptionVerificationType : : kNormal , <nl> - ( OptionTypeFlags : : kCompareNever | OptionTypeFlags : : kDontSerialize ) , 0 , <nl> + ( OptionTypeFlags : : kCompareNever | OptionTypeFlags : : kDontSerialize ) , <nl> / / Parses the input vsalue as a Cache <nl> [ ] ( const ConfigOptions & opts , const std : : string & , <nl> const std : : string & value , char * addr ) { <nl> static std : : unordered_map < std : : string , OptionTypeInfo > <nl> { " block_cache_compressed " , <nl> { offsetof ( struct BlockBasedTableOptions , block_cache_compressed ) , <nl> OptionType : : kUnknown , OptionVerificationType : : kNormal , <nl> - ( OptionTypeFlags : : kCompareNever | OptionTypeFlags : : kDontSerialize ) , 0 , <nl> + ( OptionTypeFlags : : kCompareNever | OptionTypeFlags : : kDontSerialize ) , <nl> / / Parses the input vsalue as a Cache <nl> [ ] ( const ConfigOptions & opts , const std : : string & , <nl> const std : : string & value , char * addr ) { <nl> auto * cache = reinterpret_cast < std : : shared_ptr < Cache > * > ( addr ) ; <nl> return Cache : : CreateFromString ( opts , value , cache ) ; <nl> } } } , <nl> - } ; <nl> # endif / / ROCKSDB_LITE <nl> + } ; <nl> <nl> / / TODO ( myabandeh ) : We should return an error instead of silently changing the <nl> / / options <nl> BlockBasedTableFactory : : BlockBasedTableFactory ( <nl> const BlockBasedTableOptions & _table_options ) <nl> : table_options_ ( _table_options ) { <nl> + InitializeOptions ( ) ; <nl> + ConfigurableHelper : : RegisterOptions ( * this , & table_options_ , <nl> + & block_based_table_type_info ) ; <nl> + } <nl> + <nl> + void BlockBasedTableFactory : : InitializeOptions ( ) { <nl> if ( table_options_ . flush_block_policy_factory = = nullptr ) { <nl> table_options_ . flush_block_policy_factory . reset ( <nl> new FlushBlockBySizePolicyFactory ( ) ) ; <nl> BlockBasedTableFactory : : BlockBasedTableFactory ( <nl> } <nl> } <nl> <nl> + Status BlockBasedTableFactory : : PrepareOptions ( const ConfigOptions & opts ) { <nl> + InitializeOptions ( ) ; <nl> + return TableFactory : : PrepareOptions ( opts ) ; <nl> + } <nl> + <nl> Status BlockBasedTableFactory : : NewTableReader ( <nl> const ReadOptions & ro , const TableReaderOptions & table_reader_options , <nl> std : : unique_ptr < RandomAccessFileReader > & & file , uint64_t file_size , <nl> TableBuilder * BlockBasedTableFactory : : NewTableBuilder ( <nl> return table_builder ; <nl> } <nl> <nl> - Status BlockBasedTableFactory : : SanitizeOptions ( <nl> + Status BlockBasedTableFactory : : ValidateOptions ( <nl> const DBOptions & db_opts , const ColumnFamilyOptions & cf_opts ) const { <nl> if ( table_options_ . index_type = = BlockBasedTableOptions : : kHashSearch & & <nl> cf_opts . prefix_extractor = = nullptr ) { <nl> Status BlockBasedTableFactory : : SanitizeOptions ( <nl> " max_successive_merges larger than 0 is currently inconsistent with " <nl> " unordered_write " ) ; <nl> } <nl> - return Status : : OK ( ) ; <nl> + return TableFactory : : ValidateOptions ( db_opts , cf_opts ) ; <nl> } <nl> <nl> - std : : string BlockBasedTableFactory : : GetPrintableTableOptions ( ) const { <nl> + std : : string BlockBasedTableFactory : : GetPrintableOptions ( ) const { <nl> std : : string ret ; <nl> ret . reserve ( 20000 ) ; <nl> const int kBufferSize = 200 ; <nl> std : : string BlockBasedTableFactory : : GetPrintableTableOptions ( ) const { <nl> return ret ; <nl> } <nl> <nl> - # ifndef ROCKSDB_LITE <nl> - Status BlockBasedTableFactory : : GetOptionString ( <nl> - const ConfigOptions & config_options , std : : string * opt_string ) const { <nl> - assert ( opt_string ) ; <nl> - opt_string - > clear ( ) ; <nl> - return GetStringFromStruct ( config_options , & table_options_ , <nl> - block_based_table_type_info , opt_string ) ; <nl> - } <nl> - # else <nl> - Status BlockBasedTableFactory : : GetOptionString ( <nl> - const ConfigOptions & / * opts * / , std : : string * / * opt_string * / ) const { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - # endif / / ! ROCKSDB_LITE <nl> - <nl> - const BlockBasedTableOptions & BlockBasedTableFactory : : table_options ( ) const { <nl> - return table_options_ ; <nl> + const void * BlockBasedTableFactory : : GetOptionsPtr ( <nl> + const std : : string & name ) const { <nl> + if ( name = = kBlockCacheOpts ( ) ) { <nl> + if ( table_options_ . no_block_cache ) { <nl> + return nullptr ; <nl> + } else { <nl> + return table_options_ . block_cache . get ( ) ; <nl> + } <nl> + } else { <nl> + return TableFactory : : GetOptionsPtr ( name ) ; <nl> + } <nl> } <nl> <nl> # ifndef ROCKSDB_LITE <nl> - namespace { <nl> - std : : string ParseBlockBasedTableOption ( const ConfigOptions & config_options , <nl> - const std : : string & name , <nl> - const std : : string & org_value , <nl> - BlockBasedTableOptions * new_options ) { <nl> - const std : : string & value = config_options . input_strings_escaped <nl> - ? UnescapeOptionString ( org_value ) <nl> - : org_value ; <nl> - const auto iter = block_based_table_type_info . find ( name ) ; <nl> - if ( iter = = block_based_table_type_info . end ( ) ) { <nl> - if ( config_options . ignore_unknown_options ) { <nl> - return " " ; <nl> - } else { <nl> - return " Unrecognized option " ; <nl> + / / Take a default BlockBasedTableOptions " table_options " in addition to a <nl> + / / map " opts_map " of option name to option value to construct the new <nl> + / / BlockBasedTableOptions " new_table_options " . <nl> + / / <nl> + / / Below are the instructions of how to config some non - primitive - typed <nl> + / / options in BlockBasedTableOptions : <nl> + / / <nl> + / / * filter_policy : <nl> + / / We currently only support the following FilterPolicy in the convenience <nl> + / / functions : <nl> + / / - BloomFilter : use " bloomfilter : [ bits_per_key ] : [ use_block_based_builder ] " <nl> + / / to specify BloomFilter . The above string is equivalent to calling <nl> + / / NewBloomFilterPolicy ( bits_per_key , use_block_based_builder ) . <nl> + / / [ Example ] : <nl> + / / - Pass { " filter_policy " , " bloomfilter : 4 : true " } in <nl> + / / GetBlockBasedTableOptionsFromMap to use a BloomFilter with 4 - bits <nl> + / / per key and use_block_based_builder enabled . <nl> + / / <nl> + / / * block_cache / block_cache_compressed : <nl> + / / We currently only support LRU cache in the GetOptions API . The LRU <nl> + / / cache can be set by directly specifying its size . <nl> + / / [ Example ] : <nl> + / / - Passing { " block_cache " , " 1M " } in GetBlockBasedTableOptionsFromMap is <nl> + / / equivalent to setting block_cache using NewLRUCache ( 1024 * 1024 ) . <nl> + / / <nl> + / / @ param table_options the default options of the output " new_table_options " . <nl> + / / @ param opts_map an option name to value map for specifying how <nl> + / / " new_table_options " should be set . <nl> + / / @ param new_table_options the resulting options based on " table_options " <nl> + / / with the change specified in " opts_map " . <nl> + / / @ param input_strings_escaped when set to true , each escaped characters <nl> + / / prefixed by ' \ ' in the values of the opts_map will be further converted <nl> + / / back to the raw string before assigning to the associated options . <nl> + / / @ param ignore_unknown_options when set to true , unknown options are ignored <nl> + / / instead of resulting in an unknown - option error . <nl> + / / @ return Status : : OK ( ) on success . Otherwise , a non - ok status indicating <nl> + / / error will be returned , and " new_table_options " will be set to <nl> + / / " table_options " . <nl> + Status BlockBasedTableFactory : : ParseOption ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , <nl> + const std : : string & opt_value , <nl> + void * opt_ptr ) { <nl> + Status status = TableFactory : : ParseOption ( config_options , opt_info , opt_name , <nl> + opt_value , opt_ptr ) ; <nl> + if ( config_options . input_strings_escaped & & ! status . ok ( ) ) { / / Got an error <nl> + / / ! input_strings_escaped indicates the old API , where everything is <nl> + / / parsable . <nl> + if ( opt_info . IsByName ( ) ) { <nl> + status = Status : : OK ( ) ; <nl> } <nl> } <nl> - const auto & opt_info = iter - > second ; <nl> - Status s = <nl> - opt_info . Parse ( config_options , iter - > first , value , <nl> - reinterpret_cast < char * > ( new_options ) + opt_info . offset_ ) ; <nl> - if ( s . ok ( ) ) { <nl> - return " " ; <nl> - } else { <nl> - return s . ToString ( ) ; <nl> - } <nl> + return status ; <nl> } <nl> - } / / namespace <nl> <nl> Status GetBlockBasedTableOptionsFromString ( <nl> const BlockBasedTableOptions & table_options , const std : : string & opts_str , <nl> Status GetBlockBasedTableOptionsFromString ( <nl> ConfigOptions config_options ; <nl> config_options . input_strings_escaped = false ; <nl> config_options . ignore_unknown_options = false ; <nl> + config_options . invoke_prepare_options = false ; <nl> return GetBlockBasedTableOptionsFromString ( config_options , table_options , <nl> opts_str , new_table_options ) ; <nl> } <nl> Status GetBlockBasedTableOptionsFromMap ( <nl> ConfigOptions config_options ; <nl> config_options . input_strings_escaped = input_strings_escaped ; <nl> config_options . ignore_unknown_options = ignore_unknown_options ; <nl> + config_options . invoke_prepare_options = false ; <nl> <nl> return GetBlockBasedTableOptionsFromMap ( config_options , table_options , <nl> opts_map , new_table_options ) ; <nl> Status GetBlockBasedTableOptionsFromMap ( <nl> const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> BlockBasedTableOptions * new_table_options ) { <nl> assert ( new_table_options ) ; <nl> - * new_table_options = table_options ; <nl> - for ( const auto & o : opts_map ) { <nl> - auto error_message = ParseBlockBasedTableOption ( <nl> - config_options , o . first , o . second , new_table_options ) ; <nl> - if ( error_message ! = " " ) { <nl> - const auto iter = block_based_table_type_info . find ( o . first ) ; <nl> - if ( iter = = block_based_table_type_info . end ( ) | | <nl> - ! config_options <nl> - . input_strings_escaped | | / / ! input_strings_escaped indicates <nl> - / / the old API , where everything is <nl> - / / parsable . <nl> - ( ! iter - > second . IsByName ( ) & & ! iter - > second . IsDeprecated ( ) ) ) { <nl> - / / Restore " new_options " to the default " base_options " . <nl> - * new_table_options = table_options ; <nl> - return Status : : InvalidArgument ( " Can ' t parse BlockBasedTableOptions : " , <nl> - o . first + " " + error_message ) ; <nl> - } <nl> - } <nl> - } <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - Status VerifyBlockBasedTableFactory ( const ConfigOptions & config_options , <nl> - const BlockBasedTableFactory * base_tf , <nl> - const BlockBasedTableFactory * file_tf ) { <nl> - if ( ( base_tf ! = nullptr ) ! = ( file_tf ! = nullptr ) & & <nl> - config_options . sanity_level > ConfigOptions : : kSanityLevelNone ) { <nl> - return Status : : Corruption ( <nl> - " [ RocksDBOptionsParser ] : Inconsistent TableFactory class type " ) ; <nl> - } <nl> - if ( base_tf = = nullptr ) { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - assert ( file_tf ! = nullptr ) ; <nl> - <nl> - const auto & base_opt = base_tf - > table_options ( ) ; <nl> - const auto & file_opt = file_tf - > table_options ( ) ; <nl> - <nl> - std : : string mismatch ; <nl> - for ( auto & pair : block_based_table_type_info ) { <nl> - / / We skip checking deprecated variables as they might <nl> - / / contain random values since they might not be initialized <nl> - if ( config_options . IsCheckEnabled ( pair . second . GetSanityLevel ( ) ) ) { <nl> - const char * base_addr = <nl> - reinterpret_cast < const char * > ( & base_opt ) + pair . second . offset_ ; <nl> - const char * file_addr = <nl> - reinterpret_cast < const char * > ( & file_opt ) + pair . second . offset_ ; <nl> - <nl> - if ( ! pair . second . AreEqual ( config_options , pair . first , base_addr , <nl> - file_addr , & mismatch ) & & <nl> - ! pair . second . AreEqualByName ( config_options , pair . first , base_addr , <nl> - file_addr ) ) { <nl> - return Status : : Corruption ( <nl> - " [ RocksDBOptionsParser ] : " <nl> - " failed the verification on BlockBasedTableOptions : : " , <nl> - pair . first ) ; <nl> - } <nl> - } <nl> + BlockBasedTableFactory bbtf ( table_options ) ; <nl> + Status s = bbtf . ConfigureFromMap ( config_options , opts_map ) ; <nl> + if ( s . ok ( ) ) { <nl> + * new_table_options = * ( bbtf . GetOptions < BlockBasedTableOptions > ( ) ) ; <nl> + } else { <nl> + * new_table_options = table_options ; <nl> } <nl> - return Status : : OK ( ) ; <nl> + return s ; <nl> } <nl> # endif / / ! ROCKSDB_LITE <nl> <nl> TableFactory * NewBlockBasedTableFactory ( <nl> return new BlockBasedTableFactory ( _table_options ) ; <nl> } <nl> <nl> - const std : : string BlockBasedTableFactory : : kName = " BlockBasedTable " ; <nl> const std : : string BlockBasedTablePropertyNames : : kIndexType = <nl> " rocksdb . block . based . table . index . type " ; <nl> const std : : string BlockBasedTablePropertyNames : : kWholeKeyFiltering = <nl> mmm a / table / block_based / block_based_table_factory . h <nl> ppp b / table / block_based / block_based_table_factory . h <nl> class BlockBasedTableFactory : public TableFactory { <nl> <nl> ~ BlockBasedTableFactory ( ) { } <nl> <nl> - const char * Name ( ) const override { return kName . c_str ( ) ; } <nl> + const char * Name ( ) const override { return kBlockBasedTableName ( ) ; } <nl> <nl> using TableFactory : : NewTableReader ; <nl> Status NewTableReader ( <nl> class BlockBasedTableFactory : public TableFactory { <nl> const TableBuilderOptions & table_builder_options , <nl> uint32_t column_family_id , WritableFileWriter * file ) const override ; <nl> <nl> - / / Sanitizes the specified DB Options . <nl> - Status SanitizeOptions ( const DBOptions & db_opts , <nl> + / / Valdates the specified DB Options . <nl> + Status ValidateOptions ( const DBOptions & db_opts , <nl> const ColumnFamilyOptions & cf_opts ) const override ; <nl> + Status PrepareOptions ( const ConfigOptions & opts ) override ; <nl> <nl> - std : : string GetPrintableTableOptions ( ) const override ; <nl> - <nl> - Status GetOptionString ( const ConfigOptions & config_options , <nl> - std : : string * opt_string ) const override ; <nl> - <nl> - const BlockBasedTableOptions & table_options ( ) const ; <nl> - <nl> - void * GetOptions ( ) override { return & table_options_ ; } <nl> + std : : string GetPrintableOptions ( ) const override ; <nl> <nl> bool IsDeleteRangeSupported ( ) const override { return true ; } <nl> <nl> TailPrefetchStats * tail_prefetch_stats ( ) { return & tail_prefetch_stats_ ; } <nl> <nl> - static const std : : string kName ; <nl> + protected : <nl> + const void * GetOptionsPtr ( const std : : string & name ) const override ; <nl> + # ifndef ROCKSDB_LITE <nl> + Status ParseOption ( const ConfigOptions & config_options , <nl> + const OptionTypeInfo & opt_info , <nl> + const std : : string & opt_name , const std : : string & opt_value , <nl> + void * opt_ptr ) override ; <nl> + # endif <nl> + void InitializeOptions ( ) ; <nl> <nl> private : <nl> BlockBasedTableOptions table_options_ ; <nl> extern const std : : string kHashIndexPrefixesBlock ; <nl> extern const std : : string kHashIndexPrefixesMetadataBlock ; <nl> extern const std : : string kPropTrue ; <nl> extern const std : : string kPropFalse ; <nl> - <nl> - # ifndef ROCKSDB_LITE <nl> - extern Status VerifyBlockBasedTableFactory ( <nl> - const ConfigOptions & config_options , const BlockBasedTableFactory * base_tf , <nl> - const BlockBasedTableFactory * file_tf ) ; <nl> - # endif / / ! ROCKSDB_LITE <nl> } / / namespace ROCKSDB_NAMESPACE <nl> mmm a / table / block_based / block_based_table_reader_test . cc <nl> ppp b / table / block_based / block_based_table_reader_test . cc <nl> class BlockBasedTableReaderTest <nl> <nl> std : : unique_ptr < TableReader > table_reader ; <nl> ReadOptions ro ; <nl> - ASSERT_OK ( BlockBasedTable : : Open ( ro , ioptions , EnvOptions ( ) , <nl> - table_factory_ - > table_options ( ) , comparator , <nl> - std : : move ( file ) , file_size , & table_reader ) ) ; <nl> + const auto * table_options = <nl> + table_factory_ - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( table_options , nullptr ) ; <nl> + ASSERT_OK ( BlockBasedTable : : Open ( ro , ioptions , EnvOptions ( ) , * table_options , <nl> + comparator , std : : move ( file ) , file_size , <nl> + & table_reader ) ) ; <nl> <nl> table - > reset ( reinterpret_cast < BlockBasedTable * > ( table_reader . release ( ) ) ) ; <nl> } <nl> mmm a / table / block_based / filter_policy . cc <nl> ppp b / table / block_based / filter_policy . cc <nl> Status FilterPolicy : : CreateFromString ( <nl> NewBloomFilterPolicy ( bits_per_key , use_block_based_builder ) ) ; <nl> } <nl> } else { <nl> - return Status : : InvalidArgument ( " Invalid filter policy name " , value ) ; <nl> + return Status : : NotFound ( " Invalid filter policy name " , value ) ; <nl> # else <nl> } else { <nl> return Status : : NotSupported ( " Cannot load filter policy in LITE mode " , <nl> mmm a / table / block_based / reader_common . h <nl> ppp b / table / block_based / reader_common . h <nl> <nl> / / found in the LICENSE file . See the AUTHORS file for names of contributors . <nl> # pragma once <nl> <nl> + # include " rocksdb / cache . h " <nl> # include " rocksdb / table . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> mmm a / table / block_fetcher_test . cc <nl> ppp b / table / block_fetcher_test . cc <nl> class BlockFetcherTest : public testing : : Test { <nl> <nl> std : : unique_ptr < TableReader > table_reader ; <nl> ReadOptions ro ; <nl> - ASSERT_OK ( BlockBasedTable : : Open ( ro , ioptions , EnvOptions ( ) , <nl> - table_factory_ . table_options ( ) , comparator , <nl> - std : : move ( file ) , file_size , & table_reader ) ) ; <nl> + const auto * table_options = <nl> + table_factory_ . GetOptions < BlockBasedTableOptions > ( ) ; <nl> + ASSERT_NE ( table_options , nullptr ) ; <nl> + ASSERT_OK ( BlockBasedTable : : Open ( ro , ioptions , EnvOptions ( ) , * table_options , <nl> + comparator , std : : move ( file ) , file_size , <nl> + & table_reader ) ) ; <nl> <nl> table - > reset ( reinterpret_cast < BlockBasedTable * > ( table_reader . release ( ) ) ) ; <nl> } <nl> mmm a / table / cuckoo / cuckoo_table_factory . cc <nl> ppp b / table / cuckoo / cuckoo_table_factory . cc <nl> <nl> # include " table / cuckoo / cuckoo_table_factory . h " <nl> <nl> # include " db / dbformat . h " <nl> + # include " options / configurable_helper . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " table / cuckoo / cuckoo_table_builder . h " <nl> # include " table / cuckoo / cuckoo_table_reader . h " <nl> <nl> TableBuilder * CuckooTableFactory : : NewTableBuilder ( <nl> table_builder_options . db_id , table_builder_options . db_session_id ) ; <nl> } <nl> <nl> - std : : string CuckooTableFactory : : GetPrintableTableOptions ( ) const { <nl> + std : : string CuckooTableFactory : : GetPrintableOptions ( ) const { <nl> std : : string ret ; <nl> ret . reserve ( 2000 ) ; <nl> const int kBufferSize = 200 ; <nl> std : : string CuckooTableFactory : : GetPrintableTableOptions ( ) const { <nl> return ret ; <nl> } <nl> <nl> + static std : : unordered_map < std : : string , OptionTypeInfo > cuckoo_table_type_info = <nl> + { <nl> + # ifndef ROCKSDB_LITE <nl> + { " hash_table_ratio " , <nl> + { offsetof ( struct CuckooTableOptions , hash_table_ratio ) , <nl> + OptionType : : kDouble , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " max_search_depth " , <nl> + { offsetof ( struct CuckooTableOptions , max_search_depth ) , <nl> + OptionType : : kUInt32T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " cuckoo_block_size " , <nl> + { offsetof ( struct CuckooTableOptions , cuckoo_block_size ) , <nl> + OptionType : : kUInt32T , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " identity_as_first_hash " , <nl> + { offsetof ( struct CuckooTableOptions , identity_as_first_hash ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + { " use_module_hash " , <nl> + { offsetof ( struct CuckooTableOptions , use_module_hash ) , <nl> + OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> + # endif / / ROCKSDB_LITE <nl> + } ; <nl> + <nl> + CuckooTableFactory : : CuckooTableFactory ( const CuckooTableOptions & table_options ) <nl> + : table_options_ ( table_options ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , & table_options_ , <nl> + & cuckoo_table_type_info ) ; <nl> + } <nl> + <nl> TableFactory * NewCuckooTableFactory ( const CuckooTableOptions & table_options ) { <nl> return new CuckooTableFactory ( table_options ) ; <nl> } <nl> mmm a / table / cuckoo / cuckoo_table_factory . h <nl> ppp b / table / cuckoo / cuckoo_table_factory . h <nl> static inline uint64_t CuckooHash ( <nl> / / - Does not support prefix bloom filters . <nl> class CuckooTableFactory : public TableFactory { <nl> public : <nl> - explicit CuckooTableFactory ( const CuckooTableOptions & table_options ) <nl> - : table_options_ ( table_options ) { } <nl> + explicit CuckooTableFactory ( <nl> + const CuckooTableOptions & table_option = CuckooTableOptions ( ) ) ; <nl> ~ CuckooTableFactory ( ) { } <nl> <nl> - const char * Name ( ) const override { return " CuckooTable " ; } <nl> + const char * Name ( ) const override { return kCuckooTableName ( ) ; } <nl> <nl> using TableFactory : : NewTableReader ; <nl> Status NewTableReader ( <nl> class CuckooTableFactory : public TableFactory { <nl> const TableBuilderOptions & table_builder_options , <nl> uint32_t column_family_id , WritableFileWriter * file ) const override ; <nl> <nl> - / / Sanitizes the specified DB Options . <nl> - Status SanitizeOptions ( <nl> - const DBOptions & / * db_opts * / , <nl> - const ColumnFamilyOptions & / * cf_opts * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - std : : string GetPrintableTableOptions ( ) const override ; <nl> - <nl> - void * GetOptions ( ) override { return & table_options_ ; } <nl> - <nl> - Status GetOptionString ( const ConfigOptions & / * config_options * / , <nl> - std : : string * / * opt_string * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> + std : : string GetPrintableOptions ( ) const override ; <nl> <nl> private : <nl> CuckooTableOptions table_options_ ; <nl> mmm a / table / mock_table . h <nl> ppp b / table / mock_table . h <nl> class MockTableFactory : public TableFactory { <nl> Status CreateMockTable ( Env * env , const std : : string & fname , <nl> stl_wrappers : : KVMap file_contents ) ; <nl> <nl> - virtual Status SanitizeOptions ( <nl> - const DBOptions & / * db_opts * / , <nl> - const ColumnFamilyOptions & / * cf_opts * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - virtual std : : string GetPrintableTableOptions ( ) const override { <nl> + virtual std : : string GetPrintableOptions ( ) const override { <nl> return std : : string ( ) ; <nl> } <nl> <nl> mmm a / table / plain / plain_table_factory . cc <nl> ppp b / table / plain / plain_table_factory . cc <nl> <nl> # include < memory > <nl> <nl> # include " db / dbformat . h " <nl> - # include " options / options_helper . h " <nl> + # include " options / configurable_helper . h " <nl> # include " port / port . h " <nl> # include " rocksdb / convenience . h " <nl> + # include " rocksdb / utilities / options_type . h " <nl> # include " table / plain / plain_table_builder . h " <nl> # include " table / plain / plain_table_reader . h " <nl> # include " util / string_util . h " <nl> namespace ROCKSDB_NAMESPACE { <nl> static std : : unordered_map < std : : string , OptionTypeInfo > plain_table_type_info = { <nl> { " user_key_len " , <nl> { offsetof ( struct PlainTableOptions , user_key_len ) , OptionType : : kUInt32T , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " bloom_bits_per_key " , <nl> { offsetof ( struct PlainTableOptions , bloom_bits_per_key ) , OptionType : : kInt , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " hash_table_ratio " , <nl> { offsetof ( struct PlainTableOptions , hash_table_ratio ) , OptionType : : kDouble , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " index_sparseness " , <nl> { offsetof ( struct PlainTableOptions , index_sparseness ) , OptionType : : kSizeT , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " huge_page_tlb_size " , <nl> { offsetof ( struct PlainTableOptions , huge_page_tlb_size ) , <nl> OptionType : : kSizeT , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionTypeFlags : : kNone } } , <nl> { " encoding_type " , <nl> { offsetof ( struct PlainTableOptions , encoding_type ) , <nl> - OptionType : : kEncodingType , OptionVerificationType : : kByName , <nl> - OptionTypeFlags : : kNone , 0 } } , <nl> + OptionType : : kEncodingType , OptionVerificationType : : kNormal , <nl> + OptionTypeFlags : : kNone } } , <nl> { " full_scan_mode " , <nl> { offsetof ( struct PlainTableOptions , full_scan_mode ) , OptionType : : kBoolean , <nl> - OptionVerificationType : : kNormal , OptionTypeFlags : : kNone , 0 } } , <nl> + OptionVerificationType : : kNormal , OptionTypeFlags : : kNone } } , <nl> { " store_index_in_file " , <nl> { offsetof ( struct PlainTableOptions , store_index_in_file ) , <nl> OptionType : : kBoolean , OptionVerificationType : : kNormal , <nl> - OptionTypeFlags : : kNone , 0 } } } ; <nl> + OptionTypeFlags : : kNone } } , <nl> + } ; <nl> + <nl> + PlainTableFactory : : PlainTableFactory ( const PlainTableOptions & options ) <nl> + : table_options_ ( options ) { <nl> + ConfigurableHelper : : RegisterOptions ( * this , & table_options_ , <nl> + & plain_table_type_info ) ; <nl> + } <nl> <nl> Status PlainTableFactory : : NewTableReader ( <nl> const ReadOptions & / * ro * / , const TableReaderOptions & table_reader_options , <nl> TableBuilder * PlainTableFactory : : NewTableBuilder ( <nl> table_builder_options . db_session_id ) ; <nl> } <nl> <nl> - std : : string PlainTableFactory : : GetPrintableTableOptions ( ) const { <nl> + std : : string PlainTableFactory : : GetPrintableOptions ( ) const { <nl> std : : string ret ; <nl> ret . reserve ( 20000 ) ; <nl> const int kBufferSize = 200 ; <nl> std : : string PlainTableFactory : : GetPrintableTableOptions ( ) const { <nl> return ret ; <nl> } <nl> <nl> - const PlainTableOptions & PlainTableFactory : : table_options ( ) const { <nl> - return table_options_ ; <nl> - } <nl> - <nl> Status GetPlainTableOptionsFromString ( const PlainTableOptions & table_options , <nl> const std : : string & opts_str , <nl> PlainTableOptions * new_table_options ) { <nl> ConfigOptions config_options ; <nl> config_options . input_strings_escaped = false ; <nl> config_options . ignore_unknown_options = false ; <nl> + config_options . invoke_prepare_options = false ; <nl> return GetPlainTableOptionsFromString ( config_options , table_options , opts_str , <nl> new_table_options ) ; <nl> } <nl> Status GetMemTableRepFactoryFromString ( <nl> <nl> MemTableRepFactory * mem_factory = nullptr ; <nl> <nl> - if ( opts_list [ 0 ] = = " skip_list " ) { <nl> + if ( opts_list [ 0 ] = = " skip_list " | | opts_list [ 0 ] = = " SkipListFactory " ) { <nl> / / Expecting format <nl> / / skip_list : < lookahead > <nl> if ( 2 = = len ) { <nl> Status GetMemTableRepFactoryFromString ( <nl> } else if ( 1 = = len ) { <nl> mem_factory = new SkipListFactory ( ) ; <nl> } <nl> - } else if ( opts_list [ 0 ] = = " prefix_hash " ) { <nl> + } else if ( opts_list [ 0 ] = = " prefix_hash " | | <nl> + opts_list [ 0 ] = = " HashSkipListRepFactory " ) { <nl> / / Expecting format <nl> / / prfix_hash : < hash_bucket_count > <nl> if ( 2 = = len ) { <nl> Status GetMemTableRepFactoryFromString ( <nl> } else if ( 1 = = len ) { <nl> mem_factory = NewHashSkipListRepFactory ( ) ; <nl> } <nl> - } else if ( opts_list [ 0 ] = = " hash_linkedlist " ) { <nl> + } else if ( opts_list [ 0 ] = = " hash_linkedlist " | | <nl> + opts_list [ 0 ] = = " HashLinkListRepFactory " ) { <nl> / / Expecting format <nl> / / hash_linkedlist : < hash_bucket_count > <nl> if ( 2 = = len ) { <nl> Status GetMemTableRepFactoryFromString ( <nl> } else if ( 1 = = len ) { <nl> mem_factory = NewHashLinkListRepFactory ( ) ; <nl> } <nl> - } else if ( opts_list [ 0 ] = = " vector " ) { <nl> + } else if ( opts_list [ 0 ] = = " vector " | | opts_list [ 0 ] = = " VectorRepFactory " ) { <nl> / / Expecting format <nl> / / vector : < count > <nl> if ( 2 = = len ) { <nl> Status GetMemTableRepFactoryFromString ( <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - std : : string ParsePlainTableOptions ( const ConfigOptions & config_options , <nl> - const std : : string & name , <nl> - const std : : string & org_value , <nl> - PlainTableOptions * new_options ) { <nl> - const std : : string & value = config_options . input_strings_escaped <nl> - ? UnescapeOptionString ( org_value ) <nl> - : org_value ; <nl> - const auto iter = plain_table_type_info . find ( name ) ; <nl> - if ( iter = = plain_table_type_info . end ( ) ) { <nl> - if ( config_options . ignore_unknown_options ) { <nl> - return " " ; <nl> - } else { <nl> - return " Unrecognized option " ; <nl> - } <nl> - } <nl> - const auto & opt_info = iter - > second ; <nl> - Status s = <nl> - opt_info . Parse ( config_options , name , value , <nl> - reinterpret_cast < char * > ( new_options ) + opt_info . offset_ ) ; <nl> - if ( s . ok ( ) ) { <nl> - return " " ; <nl> - } else { <nl> - return s . ToString ( ) ; <nl> - } <nl> - } <nl> - <nl> Status GetPlainTableOptionsFromMap ( <nl> const PlainTableOptions & table_options , <nl> const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> Status GetPlainTableOptionsFromMap ( <nl> const std : : unordered_map < std : : string , std : : string > & opts_map , <nl> PlainTableOptions * new_table_options ) { <nl> assert ( new_table_options ) ; <nl> - * new_table_options = table_options ; <nl> - for ( const auto & o : opts_map ) { <nl> - auto error_message = ParsePlainTableOptions ( config_options , o . first , <nl> - o . second , new_table_options ) ; <nl> - if ( error_message ! = " " ) { <nl> - const auto iter = plain_table_type_info . find ( o . first ) ; <nl> - if ( iter = = plain_table_type_info . end ( ) | | <nl> - ! config_options <nl> - . input_strings_escaped | | / / ! input_strings_escaped indicates <nl> - / / the old API , where everything is <nl> - / / parsable . <nl> - ( ! iter - > second . IsByName ( ) & & ! iter - > second . IsDeprecated ( ) ) ) { <nl> - / / Restore " new_options " to the default " base_options " . <nl> - * new_table_options = table_options ; <nl> - return Status : : InvalidArgument ( " Can ' t parse PlainTableOptions : " , <nl> - o . first + " " + error_message ) ; <nl> - } <nl> - } <nl> + PlainTableFactory ptf ( table_options ) ; <nl> + Status s = ptf . ConfigureFromMap ( config_options , opts_map ) ; <nl> + if ( s . ok ( ) ) { <nl> + * new_table_options = * ( ptf . GetOptions < PlainTableOptions > ( ) ) ; <nl> + } else { <nl> + / / Restore " new_options " to the default " base_options " . <nl> + * new_table_options = table_options ; <nl> } <nl> - return Status : : OK ( ) ; <nl> + return s ; <nl> } <nl> <nl> extern TableFactory * NewPlainTableFactory ( const PlainTableOptions & options ) { <nl> return new PlainTableFactory ( options ) ; <nl> } <nl> <nl> - const std : : string PlainTableFactory : : kName = " PlainTable " ; <nl> const std : : string PlainTablePropertyNames : : kEncodingType = <nl> " rocksdb . plain . table . encoding . type " ; <nl> <nl> mmm a / table / plain / plain_table_factory . h <nl> ppp b / table / plain / plain_table_factory . h <nl> <nl> # include < string > <nl> # include < stdint . h > <nl> <nl> - # include " options / options_helper . h " <nl> - # include " rocksdb / options . h " <nl> # include " rocksdb / table . h " <nl> <nl> namespace ROCKSDB_NAMESPACE { <nl> class PlainTableFactory : public TableFactory { <nl> / / page TLB and the page size if allocating from there . See comments of <nl> / / Arena : : AllocateAligned ( ) for details . <nl> explicit PlainTableFactory ( <nl> - const PlainTableOptions & _table_options = PlainTableOptions ( ) ) <nl> - : table_options_ ( _table_options ) { } <nl> + const PlainTableOptions & _table_options = PlainTableOptions ( ) ) ; <nl> <nl> - const char * Name ( ) const override { return kName . c_str ( ) ; } <nl> + const char * Name ( ) const override { return kPlainTableName ( ) ; } <nl> using TableFactory : : NewTableReader ; <nl> Status NewTableReader ( const ReadOptions & ro , <nl> const TableReaderOptions & table_reader_options , <nl> class PlainTableFactory : public TableFactory { <nl> const TableBuilderOptions & table_builder_options , <nl> uint32_t column_family_id , WritableFileWriter * file ) const override ; <nl> <nl> - std : : string GetPrintableTableOptions ( ) const override ; <nl> - <nl> - const PlainTableOptions & table_options ( ) const ; <nl> - <nl> + std : : string GetPrintableOptions ( ) const override ; <nl> static const char kValueTypeSeqId0 = char ( ~ 0 ) ; <nl> <nl> - / / Sanitizes the specified DB Options . <nl> - Status SanitizeOptions ( <nl> - const DBOptions & / * db_opts * / , <nl> - const ColumnFamilyOptions & / * cf_opts * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - void * GetOptions ( ) override { return & table_options_ ; } <nl> - <nl> - Status GetOptionString ( const ConfigOptions & / * config_options * / , <nl> - std : : string * / * opt_string * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> - <nl> - static const std : : string kName ; <nl> - <nl> private : <nl> PlainTableOptions table_options_ ; <nl> } ; <nl> mmm a / table / sst_file_dumper . cc <nl> ppp b / table / sst_file_dumper . cc <nl> Status SstFileDumper : : NewTableReader ( <nl> <nl> / / We need to turn off pre - fetching of index and filter nodes for <nl> / / BlockBasedTable <nl> - if ( BlockBasedTableFactory : : kName = = options_ . table_factory - > Name ( ) ) { <nl> + if ( options_ . table_factory - > IsInstanceOf ( <nl> + TableFactory : : kBlockBasedTableName ( ) ) ) { <nl> return options_ . table_factory - > NewTableReader ( t_opt , std : : move ( file_ ) , <nl> file_size , & table_reader_ , <nl> / * enable_prefetch = * / false ) ; <nl> new file mode 100644 <nl> index 0000000000 . . ebf5cfaf7d <nl> mmm / dev / null <nl> ppp b / table / table_factory . cc <nl> <nl> + / / Copyright ( c ) Facebook , Inc . and its affiliates . All Rights Reserved . <nl> + / / Copyright ( c ) 2011 The LevelDB Authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . See the AUTHORS file for names of contributors . <nl> + <nl> + # include " rocksdb / convenience . h " <nl> + # include " rocksdb / table . h " <nl> + # include " table / block_based / block_based_table_factory . h " <nl> + # include " table / cuckoo / cuckoo_table_factory . h " <nl> + # include " table / plain / plain_table_factory . h " <nl> + <nl> + namespace ROCKSDB_NAMESPACE { <nl> + <nl> + Status TableFactory : : CreateFromString ( const ConfigOptions & config_options_in , <nl> + const std : : string & id , <nl> + std : : shared_ptr < TableFactory > * factory ) { <nl> + Status status ; <nl> + std : : string name = id ; <nl> + <nl> + std : : string existing_opts ; <nl> + <nl> + ConfigOptions config_options = config_options_in ; <nl> + if ( factory - > get ( ) ! = nullptr & & name = = factory - > get ( ) - > Name ( ) ) { <nl> + config_options . delimiter = " ; " ; <nl> + <nl> + status = factory - > get ( ) - > GetOptionString ( config_options , & existing_opts ) ; <nl> + if ( ! status . ok ( ) ) { <nl> + return status ; <nl> + } <nl> + } <nl> + if ( name = = TableFactory : : kBlockBasedTableName ( ) ) { <nl> + factory - > reset ( new BlockBasedTableFactory ( ) ) ; <nl> + # ifndef ROCKSDB_LITE <nl> + } else if ( name = = TableFactory : : kPlainTableName ( ) ) { <nl> + factory - > reset ( new PlainTableFactory ( ) ) ; <nl> + } else if ( name = = TableFactory : : kCuckooTableName ( ) ) { <nl> + factory - > reset ( new CuckooTableFactory ( ) ) ; <nl> + # endif / / ROCKSDB_LITE <nl> + } else { <nl> + return Status : : NotSupported ( " Could not load table factory : " , name ) ; <nl> + } <nl> + if ( ! existing_opts . empty ( ) ) { <nl> + config_options . invoke_prepare_options = false ; <nl> + status = factory - > get ( ) - > ConfigureFromString ( config_options , existing_opts ) ; <nl> + } <nl> + return status ; <nl> + } <nl> + <nl> + } / / namespace ROCKSDB_NAMESPACE <nl> mmm a / table / table_test . cc <nl> ppp b / table / table_test . cc <nl> void ValidateBlockSizeDeviation ( int value , int expected ) { <nl> BlockBasedTableFactory * factory = new BlockBasedTableFactory ( table_options ) ; <nl> <nl> const BlockBasedTableOptions * normalized_table_options = <nl> - ( const BlockBasedTableOptions * ) factory - > GetOptions ( ) ; <nl> + factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> ASSERT_EQ ( normalized_table_options - > block_size_deviation , expected ) ; <nl> <nl> delete factory ; <nl> void ValidateBlockRestartInterval ( int value , int expected ) { <nl> BlockBasedTableFactory * factory = new BlockBasedTableFactory ( table_options ) ; <nl> <nl> const BlockBasedTableOptions * normalized_table_options = <nl> - ( const BlockBasedTableOptions * ) factory - > GetOptions ( ) ; <nl> + factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> ASSERT_EQ ( normalized_table_options - > block_restart_interval , expected ) ; <nl> <nl> delete factory ; <nl> mmm a / test_util / testutil . cc <nl> ppp b / test_util / testutil . cc <nl> <nl> # include " file / sequence_file_reader . h " <nl> # include " file / writable_file_writer . h " <nl> # include " port / port . h " <nl> + # include " rocksdb / convenience . h " <nl> # include " test_util / sync_point . h " <nl> # include " util / random . h " <nl> <nl> mmm a / tools / db_bench_tool . cc <nl> ppp b / tools / db_bench_tool . cc <nl> class Benchmark { <nl> options . compression_opts . parallel_threads = <nl> FLAGS_compression_parallel_threads ; <nl> / / If this is a block based table , set some related options <nl> - if ( options . table_factory - > Name ( ) = = BlockBasedTableFactory : : kName & & <nl> - options . table_factory - > GetOptions ( ) ! = nullptr ) { <nl> - BlockBasedTableOptions * table_options = <nl> - reinterpret_cast < BlockBasedTableOptions * > ( <nl> - options . table_factory - > GetOptions ( ) ) ; <nl> + auto table_options = <nl> + options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + if ( table_options ! = nullptr ) { <nl> if ( FLAGS_cache_size ) { <nl> table_options - > block_cache = cache_ ; <nl> } <nl> mmm a / tools / trace_analyzer_tool . cc <nl> ppp b / tools / trace_analyzer_tool . cc <nl> <nl> # include " rocksdb / utilities / ldb_cmd . h " <nl> # include " rocksdb / write_batch . h " <nl> # include " table / meta_blocks . h " <nl> - # include " table / plain / plain_table_factory . h " <nl> # include " table / table_reader . h " <nl> # include " tools / trace_analyzer_tool . h " <nl> # include " trace_replay / trace_replay . h " <nl> mmm a / util / timer_test . cc <nl> ppp b / util / timer_test . cc <nl> TEST_F ( TimerTest , AddAfterStartTest ) { <nl> } <nl> <nl> TEST_F ( TimerTest , CancelRunningTask ) { <nl> + static constexpr char kTestFuncName [ ] = " test_func " ; <nl> const int kRepeatUs = 1 * kUsPerSec ; <nl> - constexpr char kTestFuncName [ ] = " test_func " ; <nl> Timer timer ( mock_env_ . get ( ) ) ; <nl> ASSERT_TRUE ( timer . Start ( ) ) ; <nl> int * value = new int ; <nl> mmm a / utilities / memory / memory_test . cc <nl> ppp b / utilities / memory / memory_test . cc <nl> class MemoryTest : public testing : : Test { <nl> void GetCachePointersFromTableFactory ( <nl> const TableFactory * factory , <nl> std : : unordered_set < const Cache * > * cache_set ) { <nl> - const BlockBasedTableFactory * bbtf = <nl> - dynamic_cast < const BlockBasedTableFactory * > ( factory ) ; <nl> - if ( bbtf ! = nullptr ) { <nl> - const auto bbt_opts = bbtf - > table_options ( ) ; <nl> - cache_set - > insert ( bbt_opts . block_cache . get ( ) ) ; <nl> - cache_set - > insert ( bbt_opts . block_cache_compressed . get ( ) ) ; <nl> + const auto bbto = factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + if ( bbto ! = nullptr ) { <nl> + cache_set - > insert ( bbto - > block_cache . get ( ) ) ; <nl> + cache_set - > insert ( bbto - > block_cache_compressed . get ( ) ) ; <nl> } <nl> } <nl> <nl> mmm a / utilities / options / options_util . cc <nl> ppp b / utilities / options / options_util . cc <nl> Status LoadOptionsFromFile ( const ConfigOptions & config_options , <nl> cf_descs - > push_back ( { cf_names [ i ] , cf_opts [ i ] } ) ; <nl> if ( cache ! = nullptr ) { <nl> TableFactory * tf = cf_opts [ i ] . table_factory . get ( ) ; <nl> - if ( tf ! = nullptr & & tf - > GetOptions ( ) ! = nullptr & & <nl> - tf - > Name ( ) = = BlockBasedTableFactory : : kName ) { <nl> - auto * loaded_bbt_opt = <nl> - reinterpret_cast < BlockBasedTableOptions * > ( tf - > GetOptions ( ) ) ; <nl> - loaded_bbt_opt - > block_cache = * cache ; <nl> + if ( tf ! = nullptr ) { <nl> + auto * opts = tf - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + if ( opts ! = nullptr ) { <nl> + opts - > block_cache = * cache ; <nl> + } <nl> } <nl> } <nl> } <nl> mmm a / utilities / options / options_util_test . cc <nl> ppp b / utilities / options / options_util_test . cc <nl> class OptionsUtilTest : public testing : : Test { <nl> Random rnd_ ; <nl> } ; <nl> <nl> - bool IsBlockBasedTableFactory ( TableFactory * tf ) { <nl> - return tf - > Name ( ) = = BlockBasedTableFactory : : kName ; <nl> - } <nl> - <nl> TEST_F ( OptionsUtilTest , SaveAndLoad ) { <nl> const size_t kCFCount = 5 ; <nl> <nl> TEST_F ( OptionsUtilTest , SaveAndLoad ) { <nl> ASSERT_EQ ( cf_names [ i ] , loaded_cf_descs [ i ] . name ) ; <nl> ASSERT_OK ( RocksDBOptionsParser : : VerifyCFOptions ( <nl> exact , cf_opts [ i ] , loaded_cf_descs [ i ] . options ) ) ; <nl> - if ( IsBlockBasedTableFactory ( cf_opts [ i ] . table_factory . get ( ) ) ) { <nl> - ASSERT_OK ( RocksDBOptionsParser : : VerifyTableFactory ( <nl> - exact , cf_opts [ i ] . table_factory . get ( ) , <nl> - loaded_cf_descs [ i ] . options . table_factory . get ( ) ) ) ; <nl> - } <nl> + ASSERT_OK ( RocksDBOptionsParser : : VerifyTableFactory ( <nl> + exact , cf_opts [ i ] . table_factory . get ( ) , <nl> + loaded_cf_descs [ i ] . options . table_factory . get ( ) ) ) ; <nl> test : : RandomInitCFOptions ( & cf_opts [ i ] , db_opt , & rnd_ ) ; <nl> ASSERT_NOK ( RocksDBOptionsParser : : VerifyCFOptions ( <nl> exact , cf_opts [ i ] , loaded_cf_descs [ i ] . options ) ) ; <nl> TEST_F ( OptionsUtilTest , SaveAndLoadWithCacheCheck ) { <nl> ASSERT_OK ( LoadOptionsFromFile ( config_options , kFileName , & loaded_db_opt , <nl> & loaded_cf_descs , & cache ) ) ; <nl> for ( size_t i = 0 ; i < loaded_cf_descs . size ( ) ; i + + ) { <nl> - if ( IsBlockBasedTableFactory ( cf_opts [ i ] . table_factory . get ( ) ) ) { <nl> - auto * loaded_bbt_opt = reinterpret_cast < BlockBasedTableOptions * > ( <nl> - loaded_cf_descs [ i ] . options . table_factory - > GetOptions ( ) ) ; <nl> - / / Expect the same cache will be loaded <nl> - if ( loaded_bbt_opt ! = nullptr ) { <nl> - ASSERT_EQ ( loaded_bbt_opt - > block_cache . get ( ) , cache . get ( ) ) ; <nl> - } <nl> + auto * loaded_bbt_opt = <nl> + loaded_cf_descs [ i ] <nl> + . options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + / / Expect the same cache will be loaded <nl> + if ( loaded_bbt_opt ! = nullptr ) { <nl> + ASSERT_EQ ( loaded_bbt_opt - > block_cache . get ( ) , cache . get ( ) ) ; <nl> } <nl> } <nl> <nl> TEST_F ( OptionsUtilTest , SaveAndLoadWithCacheCheck ) { <nl> ASSERT_OK ( LoadOptionsFromFile ( kFileName , env_ . get ( ) , & loaded_db_opt , <nl> & loaded_cf_descs , false , & cache ) ) ; <nl> for ( size_t i = 0 ; i < loaded_cf_descs . size ( ) ; i + + ) { <nl> - if ( IsBlockBasedTableFactory ( cf_opts [ i ] . table_factory . get ( ) ) ) { <nl> - auto * loaded_bbt_opt = reinterpret_cast < BlockBasedTableOptions * > ( <nl> - loaded_cf_descs [ i ] . options . table_factory - > GetOptions ( ) ) ; <nl> - / / Expect the same cache will be loaded <nl> - if ( loaded_bbt_opt ! = nullptr ) { <nl> - ASSERT_EQ ( loaded_bbt_opt - > block_cache . get ( ) , cache . get ( ) ) ; <nl> - } <nl> + auto * loaded_bbt_opt = <nl> + loaded_cf_descs [ i ] <nl> + . options . table_factory - > GetOptions < BlockBasedTableOptions > ( ) ; <nl> + / / Expect the same cache will be loaded <nl> + if ( loaded_bbt_opt ! = nullptr ) { <nl> + ASSERT_EQ ( loaded_bbt_opt - > block_cache . get ( ) , cache . get ( ) ) ; <nl> } <nl> } <nl> } <nl> class DummyTableFactory : public TableFactory { <nl> return nullptr ; <nl> } <nl> <nl> - Status SanitizeOptions ( <nl> + Status ValidateOptions ( <nl> const DBOptions & / * db_opts * / , <nl> const ColumnFamilyOptions & / * cf_opts * / ) const override { <nl> return Status : : NotSupported ( ) ; <nl> } <nl> <nl> - std : : string GetPrintableTableOptions ( ) const override { return " " ; } <nl> - <nl> - Status GetOptionString ( const ConfigOptions & / * opts * / , <nl> - std : : string * / * opt_string * / ) const override { <nl> - return Status : : OK ( ) ; <nl> - } <nl> + std : : string GetPrintableOptions ( ) const override { return " " ; } <nl> } ; <nl> <nl> class DummyMergeOperator : public MergeOperator { <nl> mmm a / utilities / simulator_cache / sim_cache_test . cc <nl> ppp b / utilities / simulator_cache / sim_cache_test . cc <nl> class SimCacheTest : public DBTestBase { <nl> options . create_if_missing = true ; <nl> / / options . compression = kNoCompression ; <nl> options . statistics = ROCKSDB_NAMESPACE : : CreateDBStatistics ( ) ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> return options ; <nl> } <nl> <nl> TEST_F ( SimCacheTest , SimCache ) { <nl> co . metadata_charge_policy = kDontChargeCacheMetadata ; <nl> std : : shared_ptr < SimCache > simCache = NewSimCache ( NewLRUCache ( co ) , 20000 , 0 ) ; <nl> table_options . block_cache = simCache ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> RecordCacheCounters ( options ) ; <nl> <nl> TEST_F ( SimCacheTest , SimCacheLogging ) { <nl> co . metadata_charge_policy = kDontChargeCacheMetadata ; <nl> std : : shared_ptr < SimCache > sim_cache = NewSimCache ( NewLRUCache ( co ) , 20000 , 0 ) ; <nl> table_options . block_cache = sim_cache ; <nl> - options . table_factory . reset ( new BlockBasedTableFactory ( table_options ) ) ; <nl> + options . table_factory . reset ( NewBlockBasedTableFactory ( table_options ) ) ; <nl> Reopen ( options ) ; <nl> <nl> int num_block_entries = 20 ; <nl> | Bring the Configurable options together ( ) | facebook/rocksdb | 7d472accdca996d7d83ae8ce78ad17f799696926 | 2020-09-15T00:01:01Z |
mmm a / wrapper / xgboost_R . cpp <nl> ppp b / wrapper / xgboost_R . cpp <nl> extern " C " { <nl> } <nl> } <nl> SEXP XGDMatrixGetInfo_R ( SEXP handle , SEXP field ) { <nl> - size_t olen ; <nl> + uint64_t olen ; <nl> const float * res = XGDMatrixGetFloatInfo ( R_ExternalPtrAddr ( handle ) , <nl> CHAR ( asChar ( field ) ) , & olen ) ; <nl> SEXP ret = PROTECT ( allocVector ( REALSXP , olen ) ) ; <nl> extern " C " { <nl> & vec_dmats [ 0 ] , & vec_sptr [ 0 ] , len ) ) ; <nl> } <nl> SEXP XGBoosterPredict_R ( SEXP handle , SEXP dmat , SEXP output_margin ) { <nl> - size_t olen ; <nl> + uint64_t olen ; <nl> const float * res = XGBoosterPredict ( R_ExternalPtrAddr ( handle ) , <nl> R_ExternalPtrAddr ( dmat ) , <nl> asInteger ( output_margin ) , <nl> extern " C " { <nl> XGBoosterSaveModel ( R_ExternalPtrAddr ( handle ) , CHAR ( asChar ( fname ) ) ) ; <nl> } <nl> void XGBoosterDumpModel_R ( SEXP handle , SEXP fname , SEXP fmap ) { <nl> - size_t olen ; <nl> + uint64_t olen ; <nl> const char * * res = XGBoosterDumpModel ( R_ExternalPtrAddr ( handle ) , <nl> CHAR ( asChar ( fmap ) ) , <nl> & olen ) ; <nl> mmm a / wrapper / xgboost_wrapper . cpp <nl> ppp b / wrapper / xgboost_wrapper . cpp <nl> class Booster : public learner : : BoostLearner < FMatrixS > { <nl> this - > init_model = false ; <nl> this - > SetCacheData ( mats ) ; <nl> } <nl> - const float * Pred ( const DataMatrix & dmat , int output_margin , size_t * len ) { <nl> + const float * Pred ( const DataMatrix & dmat , int output_margin , uint64_t * len ) { <nl> this - > CheckInitModel ( ) ; <nl> this - > Predict ( dmat , output_margin , & this - > preds_ ) ; <nl> * len = this - > preds_ . size ( ) ; <nl> return & this - > preds_ [ 0 ] ; <nl> } <nl> inline void BoostOneIter ( const DataMatrix & train , <nl> - float * grad , float * hess , size_t len ) { <nl> + float * grad , float * hess , uint64_t len ) { <nl> this - > gpair_ . resize ( len ) ; <nl> const unsigned ndata = static_cast < unsigned > ( len ) ; <nl> # pragma omp parallel for schedule ( static ) <nl> class Booster : public learner : : BoostLearner < FMatrixS > { <nl> learner : : BoostLearner < FMatrixS > : : LoadModel ( fname ) ; <nl> this - > init_model = true ; <nl> } <nl> - inline const char * * GetModelDump ( const utils : : FeatMap & fmap , bool with_stats , size_t * len ) { <nl> + inline const char * * GetModelDump ( const utils : : FeatMap & fmap , bool with_stats , uint64_t * len ) { <nl> model_dump = this - > DumpModel ( fmap , with_stats ) ; <nl> model_dump_cptr . resize ( model_dump . size ( ) ) ; <nl> for ( size_t i = 0 ; i < model_dump . size ( ) ; + + i ) { <nl> extern " C " { <nl> void * XGDMatrixCreateFromFile ( const char * fname , int silent ) { <nl> return LoadDataMatrix ( fname , silent , false ) ; <nl> } <nl> - void * XGDMatrixCreateFromCSR ( const size_t * indptr , <nl> + void * XGDMatrixCreateFromCSR ( const uint64_t * indptr , <nl> const unsigned * indices , <nl> const float * data , <nl> - size_t nindptr , <nl> - size_t nelem ) { <nl> + uint64_t nindptr , <nl> + uint64_t nelem ) { <nl> DMatrixSimple * p_mat = new DMatrixSimple ( ) ; <nl> DMatrixSimple & mat = * p_mat ; <nl> mat . row_ptr_ . resize ( nindptr ) ; <nl> - memcpy ( & mat . row_ptr_ [ 0 ] , indptr , sizeof ( size_t ) * nindptr ) ; <nl> + for ( uint64_t i = 0 ; i < nindptr ; + + i ) { <nl> + mat . row_ptr_ [ i ] = static_cast < size_t > ( indptr [ i ] ) ; <nl> + } <nl> mat . row_data_ . resize ( nelem ) ; <nl> - for ( size_t i = 0 ; i < nelem ; + + i ) { <nl> + for ( uint64_t i = 0 ; i < nelem ; + + i ) { <nl> mat . row_data_ [ i ] = SparseBatch : : Entry ( indices [ i ] , data [ i ] ) ; <nl> mat . info . info . num_col = std : : max ( mat . info . info . num_col , <nl> - static_cast < size_t > ( indices [ i ] + 1 ) ) ; <nl> + static_cast < uint64_t > ( indices [ i ] + 1 ) ) ; <nl> } <nl> mat . info . info . num_row = nindptr - 1 ; <nl> return p_mat ; <nl> } <nl> void * XGDMatrixCreateFromMat ( const float * data , <nl> - size_t nrow , <nl> - size_t ncol , <nl> + uint64_t nrow , <nl> + uint64_t ncol , <nl> float missing ) { <nl> DMatrixSimple * p_mat = new DMatrixSimple ( ) ; <nl> DMatrixSimple & mat = * p_mat ; <nl> mat . info . info . num_row = nrow ; <nl> mat . info . info . num_col = ncol ; <nl> - for ( size_t i = 0 ; i < nrow ; + + i , data + = ncol ) { <nl> - size_t nelem = 0 ; <nl> - for ( size_t j = 0 ; j < ncol ; + + j ) { <nl> + for ( uint64_t i = 0 ; i < nrow ; + + i , data + = ncol ) { <nl> + uint64_t nelem = 0 ; <nl> + for ( uint64_t j = 0 ; j < ncol ; + + j ) { <nl> if ( data [ j ] ! = missing ) { <nl> mat . row_data_ . push_back ( SparseBatch : : Entry ( j , data [ j ] ) ) ; <nl> + + nelem ; <nl> extern " C " { <nl> } <nl> void * XGDMatrixSliceDMatrix ( void * handle , <nl> const int * idxset , <nl> - size_t len ) { <nl> + uint64_t len ) { <nl> DMatrixSimple tmp ; <nl> DataMatrix & dsrc = * static_cast < DataMatrix * > ( handle ) ; <nl> if ( dsrc . magic ! = DMatrixSimple : : kMagic ) { <nl> extern " C " { <nl> iter - > BeforeFirst ( ) ; <nl> utils : : Assert ( iter - > Next ( ) , " slice " ) ; <nl> const SparseBatch & batch = iter - > Value ( ) ; <nl> - for ( size_t i = 0 ; i < len ; + + i ) { <nl> + for ( uint64_t i = 0 ; i < len ; + + i ) { <nl> const int ridx = idxset [ i ] ; <nl> SparseBatch : : Inst inst = batch [ ridx ] ; <nl> - utils : : Check ( static_cast < size_t > ( ridx ) < batch . size , " slice index exceed number of rows " ) ; <nl> + utils : : Check ( static_cast < uint64_t > ( ridx ) < batch . size , " slice index exceed number of rows " ) ; <nl> ret . row_data_ . resize ( ret . row_data_ . size ( ) + inst . length ) ; <nl> memcpy ( & ret . row_data_ [ ret . row_ptr_ . back ( ) ] , inst . data , <nl> sizeof ( SparseBatch : : Entry ) * inst . length ) ; <nl> extern " C " { <nl> void XGDMatrixSaveBinary ( void * handle , const char * fname , int silent ) { <nl> SaveDataMatrix ( * static_cast < DataMatrix * > ( handle ) , fname , silent ) ; <nl> } <nl> - void XGDMatrixSetFloatInfo ( void * handle , const char * field , const float * info , size_t len ) { <nl> + void XGDMatrixSetFloatInfo ( void * handle , const char * field , const float * info , uint64_t len ) { <nl> std : : vector < float > & vec = <nl> static_cast < DataMatrix * > ( handle ) - > info . GetFloatInfo ( field ) ; <nl> vec . resize ( len ) ; <nl> memcpy ( & vec [ 0 ] , info , sizeof ( float ) * len ) ; <nl> } <nl> - void XGDMatrixSetUIntInfo ( void * handle , const char * field , const unsigned * info , size_t len ) { <nl> + void XGDMatrixSetUIntInfo ( void * handle , const char * field , const unsigned * info , uint64_t len ) { <nl> std : : vector < unsigned > & vec = <nl> static_cast < DataMatrix * > ( handle ) - > info . GetUIntInfo ( field ) ; <nl> vec . resize ( len ) ; <nl> memcpy ( & vec [ 0 ] , info , sizeof ( unsigned ) * len ) ; <nl> } <nl> - void XGDMatrixSetGroup ( void * handle , const unsigned * group , size_t len ) { <nl> + void XGDMatrixSetGroup ( void * handle , const unsigned * group , uint64_t len ) { <nl> DataMatrix * pmat = static_cast < DataMatrix * > ( handle ) ; <nl> pmat - > info . group_ptr . resize ( len + 1 ) ; <nl> pmat - > info . group_ptr [ 0 ] = 0 ; <nl> - for ( size_t i = 0 ; i < len ; + + i ) { <nl> + for ( uint64_t i = 0 ; i < len ; + + i ) { <nl> pmat - > info . group_ptr [ i + 1 ] = pmat - > info . group_ptr [ i ] + group [ i ] ; <nl> } <nl> } <nl> - const float * XGDMatrixGetFloatInfo ( const void * handle , const char * field , size_t * len ) { <nl> + const float * XGDMatrixGetFloatInfo ( const void * handle , const char * field , uint64_t * len ) { <nl> const std : : vector < float > & vec = <nl> static_cast < const DataMatrix * > ( handle ) - > info . GetFloatInfo ( field ) ; <nl> * len = vec . size ( ) ; <nl> return & vec [ 0 ] ; <nl> } <nl> - const unsigned * XGDMatrixGetUIntInfo ( const void * handle , const char * field , size_t * len ) { <nl> + const unsigned * XGDMatrixGetUIntInfo ( const void * handle , const char * field , uint64_t * len ) { <nl> const std : : vector < unsigned > & vec = <nl> static_cast < const DataMatrix * > ( handle ) - > info . GetUIntInfo ( field ) ; <nl> * len = vec . size ( ) ; <nl> return & vec [ 0 ] ; <nl> } <nl> - size_t XGDMatrixNumRow ( const void * handle ) { <nl> + uint64_t XGDMatrixNumRow ( const void * handle ) { <nl> return static_cast < const DataMatrix * > ( handle ) - > info . num_row ( ) ; <nl> } <nl> <nl> / / xgboost implementation <nl> - void * XGBoosterCreate ( void * dmats [ ] , size_t len ) { <nl> + void * XGBoosterCreate ( void * dmats [ ] , uint64_t len ) { <nl> std : : vector < DataMatrix * > mats ; <nl> - for ( size_t i = 0 ; i < len ; + + i ) { <nl> + for ( uint64_t i = 0 ; i < len ; + + i ) { <nl> DataMatrix * dtr = static_cast < DataMatrix * > ( dmats [ i ] ) ; <nl> mats . push_back ( dtr ) ; <nl> } <nl> extern " C " { <nl> bst - > UpdateOneIter ( iter , * dtr ) ; <nl> } <nl> void XGBoosterBoostOneIter ( void * handle , void * dtrain , <nl> - float * grad , float * hess , size_t len ) { <nl> + float * grad , float * hess , uint64_t len ) { <nl> Booster * bst = static_cast < Booster * > ( handle ) ; <nl> DataMatrix * dtr = static_cast < DataMatrix * > ( dtrain ) ; <nl> bst - > CheckInitModel ( ) ; <nl> extern " C " { <nl> bst - > BoostOneIter ( * dtr , grad , hess , len ) ; <nl> } <nl> const char * XGBoosterEvalOneIter ( void * handle , int iter , void * dmats [ ] , <nl> - const char * evnames [ ] , size_t len ) { <nl> + const char * evnames [ ] , uint64_t len ) { <nl> Booster * bst = static_cast < Booster * > ( handle ) ; <nl> std : : vector < std : : string > names ; <nl> std : : vector < const DataMatrix * > mats ; <nl> - for ( size_t i = 0 ; i < len ; + + i ) { <nl> + for ( uint64_t i = 0 ; i < len ; + + i ) { <nl> mats . push_back ( static_cast < DataMatrix * > ( dmats [ i ] ) ) ; <nl> names . push_back ( std : : string ( evnames [ i ] ) ) ; <nl> } <nl> extern " C " { <nl> bst - > eval_str = bst - > EvalOneIter ( iter , mats , names ) ; <nl> return bst - > eval_str . c_str ( ) ; <nl> } <nl> - const float * XGBoosterPredict ( void * handle , void * dmat , int output_margin , size_t * len ) { <nl> + const float * XGBoosterPredict ( void * handle , void * dmat , int output_margin , uint64_t * len ) { <nl> return static_cast < Booster * > ( handle ) - > Pred ( * static_cast < DataMatrix * > ( dmat ) , output_margin , len ) ; <nl> } <nl> void XGBoosterLoadModel ( void * handle , const char * fname ) { <nl> extern " C " { <nl> void XGBoosterSaveModel ( const void * handle , const char * fname ) { <nl> static_cast < const Booster * > ( handle ) - > SaveModel ( fname ) ; <nl> } <nl> - const char * * XGBoosterDumpModel ( void * handle , const char * fmap , size_t * len ) { <nl> + const char * * XGBoosterDumpModel ( void * handle , const char * fmap , uint64_t * len ) { <nl> utils : : FeatMap featmap ; <nl> if ( strlen ( fmap ) ! = 0 ) { <nl> featmap . LoadText ( fmap ) ; <nl> mmm a / wrapper / xgboost_wrapper . h <nl> ppp b / wrapper / xgboost_wrapper . h <nl> <nl> * can be used to create wrapper of other languages <nl> * / <nl> # include < cstdio > <nl> + / / define uint64_t <nl> + typedef unsigned long uint64_t ; <nl> <nl> extern " C " { <nl> / * ! <nl> extern " C " { <nl> * \ param nelem number of nonzero elements in the matrix <nl> * \ return created dmatrix <nl> * / <nl> - void * XGDMatrixCreateFromCSR ( const size_t * indptr , <nl> + void * XGDMatrixCreateFromCSR ( const uint64_t * indptr , <nl> const unsigned * indices , <nl> const float * data , <nl> - size_t nindptr , <nl> - size_t nelem ) ; <nl> + uint64_t nindptr , <nl> + uint64_t nelem ) ; <nl> / * ! <nl> * \ brief create matrix content from dense matrix <nl> * \ param data pointer to the data space <nl> extern " C " { <nl> * \ return created dmatrix <nl> * / <nl> void * XGDMatrixCreateFromMat ( const float * data , <nl> - size_t nrow , <nl> - size_t ncol , <nl> + uint64_t nrow , <nl> + uint64_t ncol , <nl> float missing ) ; <nl> / * ! <nl> * \ brief create a new dmatrix from sliced content of existing matrix <nl> extern " C " { <nl> * / <nl> void * XGDMatrixSliceDMatrix ( void * handle , <nl> const int * idxset , <nl> - size_t len ) ; <nl> + uint64_t len ) ; <nl> / * ! <nl> * \ brief free space in data matrix <nl> * / <nl> extern " C " { <nl> * \ param array pointer to float vector <nl> * \ param len length of array <nl> * / <nl> - void XGDMatrixSetFloatInfo ( void * handle , const char * field , const float * array , size_t len ) ; <nl> + void XGDMatrixSetFloatInfo ( void * handle , const char * field , const float * array , uint64_t len ) ; <nl> / * ! <nl> * \ brief set uint32 vector to a content in info <nl> * \ param handle a instance of data matrix <nl> extern " C " { <nl> * \ param array pointer to float vector <nl> * \ param len length of array <nl> * / <nl> - void XGDMatrixSetUIntInfo ( void * handle , const char * field , const unsigned * array , size_t len ) ; <nl> + void XGDMatrixSetUIntInfo ( void * handle , const char * field , const unsigned * array , uint64_t len ) ; <nl> / * ! <nl> * \ brief set label of the training matrix <nl> * \ param handle a instance of data matrix <nl> * \ param group pointer to group size <nl> * \ param len length of array <nl> * / <nl> - void XGDMatrixSetGroup ( void * handle , const unsigned * group , size_t len ) ; <nl> + void XGDMatrixSetGroup ( void * handle , const unsigned * group , uint64_t len ) ; <nl> / * ! <nl> * \ brief get float info vector from matrix <nl> * \ param handle a instance of data matrix <nl> extern " C " { <nl> * \ param out_len used to set result length <nl> * \ return pointer to the result <nl> * / <nl> - const float * XGDMatrixGetFloatInfo ( const void * handle , const char * field , size_t * out_len ) ; <nl> + const float * XGDMatrixGetFloatInfo ( const void * handle , const char * field , uint64_t * out_len ) ; <nl> / * ! <nl> * \ brief get uint32 info vector from matrix <nl> * \ param handle a instance of data matrix <nl> extern " C " { <nl> * \ param out_len used to set result length <nl> * \ return pointer to the result <nl> * / <nl> - const unsigned * XGDMatrixGetUIntInfo ( const void * handle , const char * field , size_t * out_len ) ; <nl> + const unsigned * XGDMatrixGetUIntInfo ( const void * handle , const char * field , uint64_t * out_len ) ; <nl> / * ! <nl> * \ brief return number of rows <nl> * / <nl> - size_t XGDMatrixNumRow ( const void * handle ) ; <nl> + uint64_t XGDMatrixNumRow ( const void * handle ) ; <nl> / / mmm start XGBoost class <nl> / * ! <nl> * \ brief create xgboost learner <nl> * \ param dmats matrices that are set to be cached <nl> * \ param len length of dmats <nl> * / <nl> - void * XGBoosterCreate ( void * dmats [ ] , size_t len ) ; <nl> + void * XGBoosterCreate ( void * dmats [ ] , uint64_t len ) ; <nl> / * ! <nl> * \ brief free obj in handle <nl> * \ param handle handle to be freed <nl> extern " C " { <nl> * \ param len length of grad / hess array <nl> * / <nl> void XGBoosterBoostOneIter ( void * handle , void * dtrain , <nl> - float * grad , float * hess , size_t len ) ; <nl> + float * grad , float * hess , uint64_t len ) ; <nl> / * ! <nl> * \ brief get evaluation statistics for xgboost <nl> * \ param handle handle <nl> extern " C " { <nl> * \ return the string containing evaluation stati <nl> * / <nl> const char * XGBoosterEvalOneIter ( void * handle , int iter , void * dmats [ ] , <nl> - const char * evnames [ ] , size_t len ) ; <nl> + const char * evnames [ ] , uint64_t len ) ; <nl> / * ! <nl> * \ brief make prediction based on dmat <nl> * \ param handle handle <nl> extern " C " { <nl> * \ param output_margin whether only output raw margin value <nl> * \ param len used to store length of returning result <nl> * / <nl> - const float * XGBoosterPredict ( void * handle , void * dmat , int output_margin , size_t * len ) ; <nl> + const float * XGBoosterPredict ( void * handle , void * dmat , int output_margin , uint64_t * len ) ; <nl> / * ! <nl> * \ brief load model from existing file <nl> * \ param handle handle <nl> extern " C " { <nl> * \ return char * data [ ] , representing dump of each model <nl> * / <nl> const char * * XGBoosterDumpModel ( void * handle , const char * fmap , <nl> - size_t * out_len ) ; <nl> + uint64_t * out_len ) ; <nl> } ; <nl> # endif / / XGBOOST_WRAPPER_H_ <nl> | chg size_t to uint64_t unsigned long in wrapper | dmlc/xgboost | 2623ab0a60138dd6d12304a21f6c3bfc8f696186 | 2014-08-27T02:06:53Z |
deleted file mode 100644 <nl> index b190175e48 . . 0000000000 <nl> mmm a / change / react - native - windows - 2019 - 12 - 01 - 22 - 55 - 26 - auto - update - versions060 . 0microsoft . 24 . json <nl> ppp / dev / null <nl> <nl> - { <nl> - " type " : " prerelease " , <nl> - " comment " : " Updating react - native to version : 0 . 60 . 0 - microsoft . 24 " , <nl> - " packageName " : " react - native - windows " , <nl> - " email " : " 53619745 + rnbot @ users . noreply . github . com " , <nl> - " commit " : " a1091818524a587066df580064a426738c01df46 " , <nl> - " date " : " 2019 - 12 - 01T22 : 55 : 26 . 533Z " <nl> - } <nl> \ No newline at end of file <nl> deleted file mode 100644 <nl> index a48915dc69 . . 0000000000 <nl> mmm a / change / react - native - windows - extended - 2019 - 12 - 01 - 22 - 55 - 28 - auto - update - versions060 . 0microsoft . 24 . json <nl> ppp / dev / null <nl> <nl> - { <nl> - " type " : " patch " , <nl> - " comment " : " Updating react - native to version : 0 . 60 . 0 - microsoft . 24 " , <nl> - " packageName " : " react - native - windows - extended " , <nl> - " email " : " 53619745 + rnbot @ users . noreply . github . com " , <nl> - " commit " : " 227b368b47249d0cb4ac468a3a3152a18b6e296e " , <nl> - " date " : " 2019 - 12 - 01T22 : 55 : 28 . 003Z " <nl> - } <nl> \ No newline at end of file <nl> mmm a / packages / E2ETest / package . json <nl> ppp b / packages / E2ETest / package . json <nl> <nl> { <nl> " name " : " e2etest " , <nl> - " version " : " 0 . 0 . 15 " , <nl> + " version " : " 0 . 0 . 16 " , <nl> " private " : true , <nl> " scripts " : { <nl> " build " : " just - scripts build " , <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 8 . 6 " , <nl> " react - native " : " https : / / github . com / microsoft / react - native / archive / v0 . 60 . 0 - microsoft . 24 . tar . gz " , <nl> - " react - native - windows " : " 0 . 60 . 0 - vnext . 84 " , <nl> - " react - native - windows - extended " : " 0 . 60 . 32 " , <nl> + " react - native - windows " : " 0 . 60 . 0 - vnext . 85 " , <nl> + " react - native - windows - extended " : " 0 . 60 . 33 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 3 . 8 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / microsoft - reactnative - sampleapps / package . json <nl> ppp b / packages / microsoft - reactnative - sampleapps / package . json <nl> <nl> { <nl> " name " : " microsoft - reactnative - sampleapps " , <nl> - " version " : " 0 . 0 . 15 " , <nl> + " version " : " 0 . 0 . 16 " , <nl> " private " : true , <nl> " scripts " : { <nl> " build " : " just - scripts build " , <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 8 . 6 " , <nl> " react - native " : " https : / / github . com / microsoft / react - native / archive / v0 . 60 . 0 - microsoft . 24 . tar . gz " , <nl> - " react - native - windows " : " 0 . 60 . 0 - vnext . 84 " , <nl> - " react - native - windows - extended " : " 0 . 60 . 32 " , <nl> + " react - native - windows " : " 0 . 60 . 0 - vnext . 85 " , <nl> + " react - native - windows - extended " : " 0 . 60 . 33 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 3 . 8 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / playground / package . json <nl> ppp b / packages / playground / package . json <nl> <nl> { <nl> " name " : " playground " , <nl> - " version " : " 0 . 0 . 15 " , <nl> + " version " : " 0 . 0 . 16 " , <nl> " private " : true , <nl> " scripts " : { <nl> " build " : " just - scripts build " , <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 8 . 6 " , <nl> " react - native " : " https : / / github . com / microsoft / react - native / archive / v0 . 60 . 0 - microsoft . 24 . tar . gz " , <nl> - " react - native - windows " : " 0 . 60 . 0 - vnext . 84 " , <nl> - " react - native - windows - extended " : " 0 . 60 . 32 " , <nl> + " react - native - windows " : " 0 . 60 . 0 - vnext . 85 " , <nl> + " react - native - windows - extended " : " 0 . 60 . 33 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 3 . 8 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / react - native - windows - extended / CHANGELOG . json <nl> ppp b / packages / react - native - windows - extended / CHANGELOG . json <nl> <nl> { <nl> " name " : " react - native - windows - extended " , <nl> " entries " : [ <nl> + { <nl> + " date " : " Mon , 02 Dec 2019 17 : 34 : 30 GMT " , <nl> + " tag " : " react - native - windows - extended_v0 . 60 . 33 " , <nl> + " version " : " 0 . 60 . 33 " , <nl> + " comments " : { <nl> + " patch " : [ <nl> + { <nl> + " comment " : " Updating react - native to version : 0 . 60 . 0 - microsoft . 24 " , <nl> + " author " : " 53619745 + rnbot @ users . noreply . github . com " , <nl> + " commit " : " 227b368b47249d0cb4ac468a3a3152a18b6e296e " <nl> + } <nl> + ] <nl> + } <nl> + } , <nl> { <nl> " date " : " Thu , 21 Nov 2019 01 : 46 : 31 GMT " , <nl> " tag " : " react - native - windows - extended_v0 . 60 . 23 " , <nl> mmm a / packages / react - native - windows - extended / CHANGELOG . md <nl> ppp b / packages / react - native - windows - extended / CHANGELOG . md <nl> <nl> # Change Log - react - native - windows - extended <nl> <nl> - This log was last generated on Thu , 21 Nov 2019 01 : 46 : 31 GMT and should not be manually modified . <nl> + This log was last generated on Mon , 02 Dec 2019 17 : 34 : 30 GMT and should not be manually modified . <nl> <nl> + # # 0 . 60 . 33 <nl> + Mon , 02 Dec 2019 17 : 34 : 30 GMT <nl> + <nl> + # # # Patches <nl> + <nl> + - Updating react - native to version : 0 . 60 . 0 - microsoft . 24 ( 53619745 + rnbot @ users . noreply . github . com ) <nl> # # 0 . 60 . 23 <nl> Thu , 21 Nov 2019 01 : 46 : 31 GMT <nl> <nl> mmm a / packages / react - native - windows - extended / package . json <nl> ppp b / packages / react - native - windows - extended / package . json <nl> <nl> { <nl> " name " : " react - native - windows - extended " , <nl> - " version " : " 0 . 60 . 32 " , <nl> + " version " : " 0 . 60 . 33 " , <nl> " description " : " Additional react - native - windows components that are not part of RN lean - core . " , <nl> " main " : " lib / index . js " , <nl> " repository " : { <nl> <nl> " license " : " MIT " , <nl> " private " : false , <nl> " dependencies " : { <nl> - " react - native - windows " : " 0 . 60 . 0 - vnext . 84 " <nl> + " react - native - windows " : " 0 . 60 . 0 - vnext . 85 " <nl> } , <nl> " devDependencies " : { <nl> " @ react - native - community / eslint - config " : " ^ 0 . 0 . 5 " , <nl> mmm a / vnext / CHANGELOG . md <nl> ppp b / vnext / CHANGELOG . md <nl> <nl> # Change Log - react - native - windows <nl> <nl> - This log was last generated on Tue , 26 Nov 2019 00 : 13 : 20 GMT and should not be manually modified . <nl> + This log was last generated on Mon , 02 Dec 2019 17 : 34 : 30 GMT and should not be manually modified . <nl> <nl> + # # 0 . 60 . 0 - vnext . 85 <nl> + Mon , 02 Dec 2019 17 : 34 : 30 GMT <nl> + <nl> + # # # Changes <nl> + <nl> + - Updating react - native to version : 0 . 60 . 0 - microsoft . 24 ( 53619745 + rnbot @ users . noreply . github . com ) <nl> # # 0 . 60 . 0 - vnext . 84 <nl> Tue , 26 Nov 2019 00 : 13 : 20 GMT <nl> <nl> mmm a / vnext / package . json <nl> ppp b / vnext / package . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> - " version " : " 0 . 60 . 0 - vnext . 84 " , <nl> + " version " : " 0 . 60 . 0 - vnext . 85 " , <nl> " license " : " MIT " , <nl> " repository " : { <nl> " type " : " git " , <nl> | applying package updates * * * NO_CI * * * | microsoft/react-native-windows | 0b9cab386d8525ebf1792ef6d559c582d87c0cb2 | 2019-12-02T17:34:31Z |
mmm a / aten / src / ATen / core / Tensor . h <nl> ppp b / aten / src / ATen / core / Tensor . h <nl> class CAFFE2_API Tensor { <nl> return this - > unsafeGetTensorImpl ( ) - > data ( ) ; <nl> } <nl> <nl> + template < typename T > <nl> + T * data_ptr ( ) const ; <nl> + <nl> template < typename T > <nl> - T * data ( ) const ; <nl> + T * data ( ) const { <nl> + return data_ptr < T > ( ) ; <nl> + } <nl> <nl> template < typename T > <nl> T item ( ) const ; <nl> mmm a / aten / src / ATen / core / TensorMethods . h <nl> ppp b / aten / src / ATen / core / TensorMethods . h <nl> inline bool is_quantized ( Tensor self ) { <nl> <nl> # define DEFINE_CAST ( T , name ) \ <nl> template < > \ <nl> - inline T * Tensor : : data ( ) const { \ <nl> + inline T * Tensor : : data_ptr ( ) const { \ <nl> TORCH_CHECK ( \ <nl> scalar_type ( ) = = ScalarType : : name , \ <nl> " expected scalar type " , \ <nl> # name , \ <nl> " but found " , \ <nl> c10 : : toString ( scalar_type ( ) ) ) ; \ <nl> - return static_cast < T * > ( this - > data_ptr ( ) ) ; \ <nl> + return static_cast < T * > ( this - > unsafeGetTensorImpl ( ) - > data ( ) ) ; \ <nl> } <nl> <nl> AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF ( DEFINE_CAST ) <nl> mmm a / aten / src / ATen / templates / Tensor . h <nl> ppp b / aten / src / ATen / templates / Tensor . h <nl> class CAFFE2_API Tensor { <nl> return this - > unsafeGetTensorImpl ( ) - > data ( ) ; <nl> } <nl> <nl> + template < typename T > <nl> + T * data_ptr ( ) const ; <nl> + <nl> template < typename T > <nl> - T * data ( ) const ; <nl> + T * data ( ) const { <nl> + return data_ptr < T > ( ) ; <nl> + } <nl> <nl> template < typename T > <nl> T item ( ) const ; <nl> mmm a / aten / src / ATen / templates / TensorMethods . h <nl> ppp b / aten / src / ATen / templates / TensorMethods . h <nl> inline bool is_quantized ( Tensor self ) { <nl> <nl> # define DEFINE_CAST ( T , name ) \ <nl> template < > \ <nl> - inline T * Tensor : : data ( ) const { \ <nl> + inline T * Tensor : : data_ptr ( ) const { \ <nl> TORCH_CHECK ( \ <nl> scalar_type ( ) = = ScalarType : : name , \ <nl> " expected scalar type " , \ <nl> # name , \ <nl> " but found " , \ <nl> c10 : : toString ( scalar_type ( ) ) ) ; \ <nl> - return static_cast < T * > ( this - > data_ptr ( ) ) ; \ <nl> + return static_cast < T * > ( this - > unsafeGetTensorImpl ( ) - > data ( ) ) ; \ <nl> } <nl> <nl> AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF ( DEFINE_CAST ) <nl> mmm a / test / cpp / api / tensor . cpp <nl> ppp b / test / cpp / api / tensor . cpp <nl> TEST ( TensorTest , Item_CUDA ) { <nl> ASSERT_EQ ( scalar . to < int > ( ) , 123 ) ; <nl> } <nl> } <nl> + <nl> + TEST ( TensorTest , DataPtr ) { <nl> + auto tensor = at : : empty ( { 3 , 4 } , at : : kFloat ) ; <nl> + auto tensor_not_copy = tensor . to ( tensor . options ( ) ) ; <nl> + ASSERT_EQ ( tensor_not_copy . data_ptr < float > ( ) , tensor . data_ptr < float > ( ) ) ; <nl> + ASSERT_EQ ( tensor_not_copy . data_ptr ( ) , tensor . data_ptr ( ) ) ; <nl> + } <nl> | Templatize Tensor . data_ptr ( ) ( ) | pytorch/pytorch | eb7b39e02f7d75c26d8a795ea8c7fd911334da7e | 2019-08-20T00:02:18Z |
mmm a / test / cpp / end2end / streaming_throughput_test . cc <nl> ppp b / test / cpp / end2end / streaming_throughput_test . cc <nl> <nl> * <nl> * / <nl> <nl> - # include < atomic > <nl> # include < mutex > <nl> # include < thread > <nl> + # include < time . h > <nl> <nl> # include < grpc + + / channel . h > <nl> # include < grpc + + / client_context . h > <nl> <nl> # include < grpc + + / server_builder . h > <nl> # include < grpc + + / server_context . h > <nl> # include < grpc / grpc . h > <nl> + # include < grpc / support / atm . h > <nl> # include < grpc / support / thd . h > <nl> # include < grpc / support / time . h > <nl> # include < gtest / gtest . h > <nl> namespace testing { <nl> <nl> class TestServiceImpl : public : : grpc : : cpp : : test : : util : : TestService : : Service { <nl> public : <nl> - static void BidiStream_Sender ( ServerReaderWriter < EchoResponse , EchoRequest > * stream , std : : atomic < bool > * should_exit ) { <nl> + static void BidiStream_Sender ( ServerReaderWriter < EchoResponse , EchoRequest > * stream , gpr_atm * should_exit ) { <nl> EchoResponse response ; <nl> response . set_message ( kLargeString ) ; <nl> - while ( ! should_exit - > load ( ) ) { <nl> - / / TODO ( vpai ) : Decide if the below requires blocking annotation <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 1 ) ) ; <nl> + while ( gpr_atm_acq_load ( should_exit ) = = static_cast < gpr_atm > ( 0 ) ) { <nl> + struct timespec tv = { 0 , 1000000 } ; / / 1 ms <nl> + struct timespec rem ; <nl> + / / TODO ( vpai ) : Mark this blocking <nl> + while ( nanosleep ( & tv , & rem ) ! = 0 ) { <nl> + tv = rem ; <nl> + } ; <nl> + <nl> stream - > Write ( response ) ; <nl> } <nl> } <nl> class TestServiceImpl : public : : grpc : : cpp : : test : : util : : TestService : : Service { <nl> ServerReaderWriter < EchoResponse , EchoRequest > * stream ) <nl> GRPC_OVERRIDE { <nl> EchoRequest request ; <nl> - std : : atomic < bool > should_exit ( false ) ; <nl> + gpr_atm should_exit ; <nl> + gpr_atm_rel_store ( & should_exit , static_cast < gpr_atm > ( 0 ) ) ; <nl> + <nl> std : : thread sender ( std : : bind ( & TestServiceImpl : : BidiStream_Sender , stream , & should_exit ) ) ; <nl> <nl> while ( stream - > Read ( & request ) ) { <nl> - / / TODO ( vpai ) : Decide if the below requires blocking annotation <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 3 ) ) ; <nl> + struct timespec tv = { 0 , 3000000 } ; / / 3 ms <nl> + struct timespec rem ; <nl> + / / TODO ( vpai ) : Mark this blocking <nl> + while ( nanosleep ( & tv , & rem ) ! = 0 ) { <nl> + tv = rem ; <nl> + } ; <nl> } <nl> - should_exit . store ( true ) ; <nl> + gpr_atm_rel_store ( & should_exit , static_cast < gpr_atm > ( 1 ) ) ; <nl> sender . join ( ) ; <nl> return Status : : OK ; <nl> } <nl> | For compatibility with gcc - 4 . 4 , eliminate use of sleep_for and | grpc/grpc | 8423203cbb316a251bcefb076c76c59a68b6bfea | 2015-09-29T23:16:08Z |
mmm a / modules / imgcodecs / include / opencv2 / imgcodecs . hpp <nl> ppp b / modules / imgcodecs / include / opencv2 / imgcodecs . hpp <nl> returns an empty matrix ( Mat : : data = = NULL ) . <nl> Currently , the following file formats are supported : <nl> <nl> - Windows bitmaps - \ * . bmp , \ * . dib ( always supported ) <nl> - - JPEG files - \ * . jpeg , \ * . jpg , \ * . jpe ( see the * Notes * section ) <nl> - - JPEG 2000 files - \ * . jp2 ( see the * Notes * section ) <nl> - - Portable Network Graphics - \ * . png ( see the * Notes * section ) <nl> - - WebP - \ * . webp ( see the * Notes * section ) <nl> + - JPEG files - \ * . jpeg , \ * . jpg , \ * . jpe ( see the * Note * section ) <nl> + - JPEG 2000 files - \ * . jp2 ( see the * Note * section ) <nl> + - Portable Network Graphics - \ * . png ( see the * Note * section ) <nl> + - WebP - \ * . webp ( see the * Note * section ) <nl> - Portable image format - \ * . pbm , \ * . pgm , \ * . ppm \ * . pxm , \ * . pnm ( always supported ) <nl> - Sun rasters - \ * . sr , \ * . ras ( always supported ) <nl> - - TIFF files - \ * . tiff , \ * . tif ( see the * Notes * section ) <nl> - - OpenEXR Image files - \ * . exr ( see the * Notes * section ) <nl> + - TIFF files - \ * . tiff , \ * . tif ( see the * Note * section ) <nl> + - OpenEXR Image files - \ * . exr ( see the * Note * section ) <nl> - Radiance HDR - \ * . hdr , \ * . pic ( always supported ) <nl> - - Raster and Vector geospatial data supported by Gdal ( see the * Notes * section ) <nl> + - Raster and Vector geospatial data supported by GDAL ( see the * Note * section ) <nl> <nl> @ note <nl> - <nl> - The function determines the type of an image by the content , not by the file extension . <nl> - In the case of color images , the decoded images will have the channels stored in * * B G R * * order . <nl> - When using IMREAD_GRAYSCALE , the codec ' s internal grayscale conversion will be used , if available . <nl> Currently , the following file formats are supported : <nl> files , for example , " libjpeg - dev " , in Debian \ * and Ubuntu \ * ) to get the codec support or turn <nl> on the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake . <nl> - In the case you set * WITH_GDAL * flag to true in CMake and @ ref IMREAD_LOAD_GDAL to load the image , <nl> - then [ GDAL ] ( http : / / www . gdal . org ) driver will be used in order to decode the image by supporting <nl> + then the [ GDAL ] ( http : / / www . gdal . org ) driver will be used in order to decode the image , supporting <nl> the following formats : [ Raster ] ( http : / / www . gdal . org / formats_list . html ) , <nl> [ Vector ] ( http : / / www . gdal . org / ogr_formats . html ) . <nl> - If EXIF information are embedded in the image file , the EXIF orientation will be taken into account <nl> and thus the image will be rotated accordingly except if the flag @ ref IMREAD_IGNORE_ORIENTATION is passed . <nl> + <nl> @ param filename Name of file to be loaded . <nl> @ param flags Flag that can take values of cv : : ImreadModes <nl> * / <nl> CV_EXPORTS_W bool imreadmulti ( const String & filename , CV_OUT std : : vector < Mat > & m <nl> / * * @ brief Saves an image to a specified file . <nl> <nl> The function imwrite saves the image to the specified file . The image format is chosen based on the <nl> - filename extension ( see cv : : imread for the list of extensions ) . Only 8 - bit ( or 16 - bit unsigned ( CV_16U ) <nl> - in case of PNG , JPEG 2000 , and TIFF ) single - channel or 3 - channel ( with ' BGR ' channel order ) images <nl> - can be saved using this function . If the format , depth or channel order is different , use <nl> - Mat : : convertTo , and cv : : cvtColor to convert it before saving . Or , use the universal FileStorage I / O <nl> - functions to save the image to XML or YAML format . <nl> - <nl> - It is possible to store PNG images with an alpha channel using this function . To do this , create <nl> + filename extension ( see cv : : imread for the list of extensions ) . In general , only 8 - bit <nl> + single - channel or 3 - channel ( with ' BGR ' channel order ) images <nl> + can be saved using this function , with these exceptions : <nl> + <nl> + - 16 - bit unsigned ( CV_16U ) images can be saved in the case of PNG , JPEG 2000 , and TIFF formats <nl> + - 32 - bit float ( CV_32F ) images can be saved in TIFF , OpenEXR , and Radiance HDR formats ; 3 - channel <nl> + ( CV_32FC3 ) TIFF images will be saved using the LogLuv high dynamic range encoding ( 4 bytes per pixel ) <nl> + - PNG images with an alpha channel can be saved using this function . To do this , create <nl> 8 - bit ( or 16 - bit ) 4 - channel image BGRA , where the alpha channel goes last . Fully transparent pixels <nl> - should have alpha set to 0 , fully opaque pixels should have alpha set to 255 / 65535 . <nl> + should have alpha set to 0 , fully opaque pixels should have alpha set to 255 / 65535 ( see the code sample below ) . <nl> + <nl> + If the format , depth or channel order is different , use <nl> + Mat : : convertTo and cv : : cvtColor to convert it before saving . Or , use the universal FileStorage I / O <nl> + functions to save the image to XML or YAML format . <nl> <nl> - The sample below shows how to create such a BGRA image and store to PNG file . It also demonstrates how to set custom <nl> - compression parameters : <nl> + The sample below shows how to create a BGRA image and save it to a PNG file . It also demonstrates how to set custom <nl> + compression parameters : <nl> @ include snippets / imgcodecs_imwrite . cpp <nl> @ param filename Name of the file . <nl> @ param img Image to be saved . <nl> | Clean up documentation for imread and imwrite | opencv/opencv | 978ad4981ee1d2e902291e52bdd8daa81f6a1e20 | 2018-10-12T21:08:01Z |
mmm a / docs / configuration / cluster_manager / cluster . rst <nl> ppp b / docs / configuration / cluster_manager / cluster . rst <nl> dns_refresh_rate_ms <nl> <nl> outlier_detection <nl> * ( optional , object ) * If specified , outlier detection will be enabled for this upstream cluster . <nl> - Currently the presence of the empty object enables it and there are no options . See the <nl> - : ref : ` architecture overview < arch_overview_outlier_detection > ` for more information on outlier <nl> - detection . <nl> + See the : ref : ` architecture overview < arch_overview_outlier_detection > ` for more information on outlier <nl> + detection . The following configuration values are supported : <nl> + <nl> + . . _config_cluster_manager_cluster_outlier_detection_consecutive_5xx : <nl> + <nl> + consecutive_5xx <nl> + The number of consecutive 5xx responses before a consecutive 5xx ejection occurs . Defaults to 5 . <nl> + <nl> + . . _config_cluster_manager_cluster_outlier_detection_interval_ms : <nl> + <nl> + interval_ms <nl> + The time interval between ejection analysis sweeps . This can result in both new ejections as well <nl> + as hosts being returned to service . Defaults to 10000ms or 10s . <nl> + <nl> + . . _config_cluster_manager_cluster_outlier_detection_base_ejection_time_ms : <nl> + <nl> + base_ejection_time_ms <nl> + The base time that a host is ejected for . The real time is equal to the base time multiplied by <nl> + the number of times the host has been ejected . Defaults to 30000ms or 30s . <nl> + <nl> + . . _config_cluster_manager_cluster_outlier_detection_max_ejection_percent : <nl> + <nl> + max_ejection_percent <nl> + The maximum % of an upstream cluster that can be ejected due to outlier detection . Defaults to 10 % . <nl> + <nl> + . . _config_cluster_manager_cluster_outlier_detection_enforcing : <nl> + <nl> + enforcing <nl> + The % chance that a host will be actually ejected when an outlier status is detected . This setting <nl> + can be used to disable ejection or to ramp it up slowly . Defaults to 100 . <nl> + <nl> + Each of the above configuration values can be overridden via <nl> + : ref : ` runtime values < config_cluster_manager_cluster_runtime_outlier_detection > ` . <nl> + <nl> <nl> . . toctree : : <nl> : hidden : <nl> mmm a / docs / configuration / cluster_manager / cluster_runtime . rst <nl> ppp b / docs / configuration / cluster_manager / cluster_runtime . rst <nl> Outlier detection <nl> mmmmmmmmmmmmmmm - - <nl> <nl> See the outlier detection : ref : ` architecture overview < arch_overview_outlier_detection > ` for more <nl> - information on outlier detection . <nl> + information on outlier detection . The runtime parameters supported by outlier detection are the <nl> + same as the : ref : ` static configuration parameters < config_cluster_manager_cluster_outlier_detection > ` , namely <nl> <nl> outlier_detection . consecutive_5xx <nl> - The number of consecutive 5xx responses before a consecutive 5xx ejection occurs . Defaults to 5 . <nl> + : ref : ` consecutive_5XX <nl> + < config_cluster_manager_cluster_outlier_detection_consecutive_5xx > ` <nl> + setting in outlier detection <nl> <nl> outlier_detection . interval_ms <nl> - The time interval between ejection analysis sweeps . This can result in both new ejections as well <nl> - as hosts being returned to service . Defaults to 10000ms or 10s . <nl> + : ref : ` interval_ms <nl> + < config_cluster_manager_cluster_outlier_detection_interval_ms > ` <nl> + setting in outlier detection <nl> <nl> outlier_detection . base_ejection_time_ms <nl> - The base time that a host is ejected for . The real time is equal to the base time multiplied by <nl> - the number of times the host has been ejected . Defaults to 30000ms or 30s . <nl> + : ref : ` base_ejection_time_ms <nl> + < config_cluster_manager_cluster_outlier_detection_base_ejection_time_ms > ` <nl> + setting in outlier detection <nl> <nl> outlier_detection . max_ejection_percent <nl> - The maximum % of an upstream cluster that can be ejected due to outlier detection . Defaults to <nl> - 10 % . <nl> + : ref : ` max_ejection_percent <nl> + < config_cluster_manager_cluster_outlier_detection_max_ejection_percent > ` <nl> + setting in outlier detection <nl> <nl> outlier_detection . enforcing <nl> - The % chance that a host will be actually ejected when an outlier status is detected . This setting <nl> - can be used to disable ejection or to ramp it up slowly . Defaults to 100 . <nl> + : ref : ` enforcing <nl> + < config_cluster_manager_cluster_outlier_detection_enforcing > ` <nl> + setting in outlier detection <nl> <nl> Core <nl> mmm - <nl> mmm a / docs / intro / arch_overview / outlier . rst <nl> ppp b / docs / intro / arch_overview / outlier . rst <nl> ejection algorithm works as follows : <nl> <nl> # . A host is determined to be an outlier . <nl> # . Envoy checks to make sure the number of ejected hosts is below the allowed threshold ( specified <nl> - via the : ref : ` outlier_detection . max_ejection_percent <nl> - < config_cluster_manager_cluster_runtime_outlier_detection > ` runtime value ) . <nl> + via the : ref : ` outlier_detection . max_ejection_percent <nl> + < config_cluster_manager_cluster_outlier_detection > ` setting ) . <nl> If the number of ejected hosts is above the threshold the host is not ejected . <nl> # . The host is ejected for some number of milliseconds . Ejection means that the host is marked <nl> unhealthy and will not be used during load balancing unless the load balancer is in a <nl> : ref : ` panic < arch_overview_load_balancing_panic_threshold > ` scenario . The number of milliseconds <nl> is equal to the : ref : ` outlier_detection . base_ejection_time_ms <nl> - < config_cluster_manager_cluster_runtime_outlier_detection > ` runtime value <nl> + < config_cluster_manager_cluster_outlier_detection > ` value <nl> multiplied by the number of times the host has been ejected . This causes hosts to get ejected <nl> for longer and longer periods if they continue to fail . <nl> # . An ejected host will automatically be brought back into service after the ejection time has <nl> If an upstream host returns some number of consecutive 5xx , it will be ejected . <nl> case a 5xx means an actual 5xx respond code , or an event that would cause the HTTP router to return <nl> one on the upstream ' s behalf ( reset , connection failure , etc . ) . The number of consecutive 5xx <nl> required for ejection is controlled by the : ref : ` outlier_detection . consecutive_5xx <nl> - < config_cluster_manager_cluster_runtime_outlier_detection > ` runtime value . <nl> + < config_cluster_manager_cluster_outlier_detection > ` value . <nl> <nl> Ejection event logging <nl> mmmmmmmmmmmmmmmmmmmmm - <nl> mmm a / source / common / json / config_schemas . cc <nl> ppp b / source / common / json / config_schemas . cc <nl> const std : : string Json : : Schema : : CLUSTER_SCHEMA ( R " EOF ( <nl> " minimum " : 0 , <nl> " exclusiveMinimum " : true <nl> } , <nl> - " outlier_detection " : { " type " : " object " } <nl> + " outlier_detection " : { <nl> + " type " : " object " , <nl> + " properties " : { <nl> + " consecutive_5xx " : { <nl> + " type " : " integer " , <nl> + " minimum " : 0 , <nl> + " exclusiveMinimum " : true <nl> + } , <nl> + " interval_ms " : { <nl> + " type " : " integer " , <nl> + " minimum " : 0 , <nl> + " exclusiveMinimum " : true <nl> + } , <nl> + " base_ejection_time_ms " : { <nl> + " type " : " integer " , <nl> + " minimum " : 0 , <nl> + " exclusiveMinimum " : true <nl> + } , <nl> + " max_ejection_percent " : { <nl> + " type " : " integer " , <nl> + " minimum " : 0 , <nl> + " maximum " : 100 <nl> + } , <nl> + " enforcing " : { <nl> + " type " : " integer " , <nl> + " minimum " : 0 , <nl> + " maximum " : 100 <nl> + } <nl> + } , <nl> + " additionalProperties " : false <nl> + } <nl> } , <nl> " required " : [ " name " , " type " , " connect_timeout_ms " , " lb_type " ] , <nl> " additionalProperties " : false <nl> mmm a / source / common / upstream / outlier_detection_impl . cc <nl> ppp b / source / common / upstream / outlier_detection_impl . cc <nl> DetectorPtr DetectorImplFactory : : createForCluster ( Cluster & cluster , <nl> Event : : Dispatcher & dispatcher , <nl> Runtime : : Loader & runtime , <nl> EventLoggerPtr event_logger ) { <nl> - / / Right now we don ' t support any configuration but in order to make the config backwards <nl> - / / compatible we just look for an empty object . <nl> if ( cluster_config . hasObject ( " outlier_detection " ) ) { <nl> - return DetectorImpl : : create ( cluster , dispatcher , runtime , ProdSystemTimeSource : : instance_ , <nl> - event_logger ) ; <nl> + return DetectorImpl : : create ( cluster , * cluster_config . getObject ( " outlier_detection " ) , dispatcher , <nl> + runtime , ProdSystemTimeSource : : instance_ , event_logger ) ; <nl> } else { <nl> return nullptr ; <nl> } <nl> void DetectorHostSinkImpl : : putHttpResponseCode ( uint64_t response_code ) { <nl> } <nl> <nl> if ( + + consecutive_5xx_ = = <nl> - detector - > runtime ( ) . snapshot ( ) . getInteger ( " outlier_detection . consecutive_5xx " , 5 ) ) { <nl> + detector - > runtime ( ) . snapshot ( ) . getInteger ( " outlier_detection . consecutive_5xx " , <nl> + detector - > config ( ) . consecutive5xx ( ) ) ) { <nl> detector - > onConsecutive5xx ( host_ . lock ( ) ) ; <nl> } <nl> } else { <nl> void DetectorHostSinkImpl : : putHttpResponseCode ( uint64_t response_code ) { <nl> } <nl> } <nl> <nl> - DetectorImpl : : DetectorImpl ( const Cluster & cluster , Event : : Dispatcher & dispatcher , <nl> - Runtime : : Loader & runtime , SystemTimeSource & time_source , <nl> - EventLoggerPtr event_logger ) <nl> - : dispatcher_ ( dispatcher ) , runtime_ ( runtime ) , time_source_ ( time_source ) , <nl> + DetectorConfig : : DetectorConfig ( const Json : : Object & json_config ) <nl> + : interval_ms_ ( static_cast < uint64_t > ( json_config . getInteger ( " interval_ms " , 10000 ) ) ) , <nl> + base_ejection_time_ms_ ( <nl> + static_cast < uint64_t > ( json_config . getInteger ( " base_ejection_time_ms " , 30000 ) ) ) , <nl> + consecutive_5xx_ ( static_cast < uint64_t > ( json_config . getInteger ( " consecutive_5xx " , 5 ) ) ) , <nl> + max_ejection_percent_ ( <nl> + static_cast < uint64_t > ( json_config . getInteger ( " max_ejection_percent " , 10 ) ) ) , <nl> + enforcing_ ( static_cast < uint64_t > ( json_config . getInteger ( " enforcing " , 100 ) ) ) { } <nl> + <nl> + DetectorImpl : : DetectorImpl ( const Cluster & cluster , const Json : : Object & json_config , <nl> + Event : : Dispatcher & dispatcher , Runtime : : Loader & runtime , <nl> + SystemTimeSource & time_source , EventLoggerPtr event_logger ) <nl> + : config_ ( json_config ) , dispatcher_ ( dispatcher ) , runtime_ ( runtime ) , time_source_ ( time_source ) , <nl> stats_ ( generateStats ( cluster . info ( ) - > statsScope ( ) ) ) , <nl> interval_timer_ ( dispatcher . createTimer ( [ this ] ( ) - > void { onIntervalTimer ( ) ; } ) ) , <nl> event_logger_ ( event_logger ) { } <nl> DetectorImpl : : ~ DetectorImpl ( ) { <nl> } <nl> } <nl> <nl> - std : : shared_ptr < DetectorImpl > DetectorImpl : : create ( const Cluster & cluster , <nl> - Event : : Dispatcher & dispatcher , <nl> - Runtime : : Loader & runtime , <nl> - SystemTimeSource & time_source , <nl> - EventLoggerPtr event_logger ) { <nl> + std : : shared_ptr < DetectorImpl > <nl> + DetectorImpl : : create ( const Cluster & cluster , const Json : : Object & json_config , <nl> + Event : : Dispatcher & dispatcher , Runtime : : Loader & runtime , <nl> + SystemTimeSource & time_source , EventLoggerPtr event_logger ) { <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - new DetectorImpl ( cluster , dispatcher , runtime , time_source , event_logger ) ) ; <nl> + new DetectorImpl ( cluster , json_config , dispatcher , runtime , time_source , event_logger ) ) ; <nl> detector - > initialize ( cluster ) ; <nl> return detector ; <nl> } <nl> void DetectorImpl : : addHostSink ( HostPtr host ) { <nl> <nl> void DetectorImpl : : armIntervalTimer ( ) { <nl> interval_timer_ - > enableTimer ( std : : chrono : : milliseconds ( <nl> - runtime_ . snapshot ( ) . getInteger ( " outlier_detection . interval_ms " , 10000 ) ) ) ; <nl> + runtime_ . snapshot ( ) . getInteger ( " outlier_detection . interval_ms " , config_ . intervalMs ( ) ) ) ) ; <nl> } <nl> <nl> void DetectorImpl : : checkHostForUneject ( HostPtr host , DetectorHostSinkImpl * sink , SystemTime now ) { <nl> void DetectorImpl : : checkHostForUneject ( HostPtr host , DetectorHostSinkImpl * sink , <nl> return ; <nl> } <nl> <nl> - std : : chrono : : milliseconds base_eject_time = std : : chrono : : milliseconds ( <nl> - runtime_ . snapshot ( ) . getInteger ( " outlier_detection . base_ejection_time_ms " , 30000 ) ) ; <nl> + std : : chrono : : milliseconds base_eject_time = <nl> + std : : chrono : : milliseconds ( runtime_ . snapshot ( ) . getInteger ( <nl> + " outlier_detection . base_ejection_time_ms " , config_ . baseEjectionTimeMs ( ) ) ) ; <nl> ASSERT ( sink - > numEjections ( ) > 0 ) <nl> if ( ( base_eject_time * sink - > numEjections ( ) ) < = ( now - sink - > lastEjectionTime ( ) . value ( ) ) ) { <nl> stats_ . ejections_active_ . dec ( ) ; <nl> void DetectorImpl : : checkHostForUneject ( HostPtr host , DetectorHostSinkImpl * sink , <nl> <nl> void DetectorImpl : : ejectHost ( HostPtr host , EjectionType type ) { <nl> uint64_t max_ejection_percent = std : : min < uint64_t > ( <nl> - 100 , runtime_ . snapshot ( ) . getInteger ( " outlier_detection . max_ejection_percent " , 10 ) ) ; <nl> + 100 , runtime_ . snapshot ( ) . getInteger ( " outlier_detection . max_ejection_percent " , <nl> + config_ . maxEjectionPercent ( ) ) ) ; <nl> double ejected_percent = 100 . 0 * stats_ . ejections_active_ . value ( ) / host_sinks_ . size ( ) ; <nl> if ( ejected_percent < max_ejection_percent ) { <nl> stats_ . ejections_total_ . inc ( ) ; <nl> - if ( runtime_ . snapshot ( ) . featureEnabled ( " outlier_detection . enforcing " , 100 ) ) { <nl> + if ( runtime_ . snapshot ( ) . featureEnabled ( " outlier_detection . enforcing " , config_ . enforcing ( ) ) ) { <nl> stats_ . ejections_active_ . inc ( ) ; <nl> host_sinks_ [ host ] - > eject ( time_source_ . currentSystemTime ( ) ) ; <nl> runCallbacks ( host ) ; <nl> mmm a / source / common / upstream / outlier_detection_impl . h <nl> ppp b / source / common / upstream / outlier_detection_impl . h <nl> struct DetectionStats { <nl> ALL_OUTLIER_DETECTION_STATS ( GENERATE_COUNTER_STRUCT , GENERATE_GAUGE_STRUCT ) <nl> } ; <nl> <nl> + / * * <nl> + * Configuration for the outlier detection . <nl> + * / <nl> + class DetectorConfig { <nl> + public : <nl> + DetectorConfig ( const Json : : Object & json_config ) ; <nl> + <nl> + uint64_t intervalMs ( ) { return interval_ms_ ; } <nl> + uint64_t baseEjectionTimeMs ( ) { return base_ejection_time_ms_ ; } <nl> + uint64_t consecutive5xx ( ) { return consecutive_5xx_ ; } <nl> + uint64_t maxEjectionPercent ( ) { return max_ejection_percent_ ; } <nl> + uint64_t enforcing ( ) { return enforcing_ ; } <nl> + <nl> + private : <nl> + const uint64_t interval_ms_ ; <nl> + const uint64_t base_ejection_time_ms_ ; <nl> + const uint64_t consecutive_5xx_ ; <nl> + const uint64_t max_ejection_percent_ ; <nl> + const uint64_t enforcing_ ; <nl> + } ; <nl> + <nl> / * * <nl> * An implementation of an outlier detector . In the future we may support multiple outlier detection <nl> * implementations with different configuration . For now , as we iterate everything is contained <nl> struct DetectionStats { <nl> * / <nl> class DetectorImpl : public Detector , public std : : enable_shared_from_this < DetectorImpl > { <nl> public : <nl> - static std : : shared_ptr < DetectorImpl > create ( const Cluster & cluster , Event : : Dispatcher & dispatcher , <nl> - Runtime : : Loader & runtime , <nl> - SystemTimeSource & time_source , <nl> - EventLoggerPtr event_logger ) ; <nl> + static std : : shared_ptr < DetectorImpl > <nl> + create ( const Cluster & cluster , const Json : : Object & json_config , Event : : Dispatcher & dispatcher , <nl> + Runtime : : Loader & runtime , SystemTimeSource & time_source , EventLoggerPtr event_logger ) ; <nl> ~ DetectorImpl ( ) ; <nl> <nl> void onConsecutive5xx ( HostPtr host ) ; <nl> Runtime : : Loader & runtime ( ) { return runtime_ ; } <nl> + DetectorConfig & config ( ) { return config_ ; } <nl> <nl> / / Upstream : : Outlier : : Detector <nl> void addChangedStateCb ( ChangeStateCb cb ) override { callbacks_ . push_back ( cb ) ; } <nl> <nl> private : <nl> - DetectorImpl ( const Cluster & cluster , Event : : Dispatcher & dispatcher , Runtime : : Loader & runtime , <nl> + DetectorImpl ( const Cluster & cluster , const Json : : Object & json_config , <nl> + Event : : Dispatcher & dispatcher , Runtime : : Loader & runtime , <nl> SystemTimeSource & time_source , EventLoggerPtr event_logger ) ; <nl> <nl> void addHostSink ( HostPtr host ) ; <nl> class DetectorImpl : public Detector , public std : : enable_shared_from_this < Detect <nl> void onIntervalTimer ( ) ; <nl> void runCallbacks ( HostPtr host ) ; <nl> <nl> + DetectorConfig config_ ; <nl> Event : : Dispatcher & dispatcher_ ; <nl> Runtime : : Loader & runtime_ ; <nl> SystemTimeSource & time_source_ ; <nl> mmm a / test / common / upstream / outlier_detection_impl_test . cc <nl> ppp b / test / common / upstream / outlier_detection_impl_test . cc <nl> class OutlierDetectorImplTest : public testing : : Test { <nl> CallbackChecker checker_ ; <nl> MockSystemTimeSource time_source_ ; <nl> std : : shared_ptr < MockEventLogger > event_logger_ { new MockEventLogger ( ) } ; <nl> + Json : : ObjectPtr loader_ = Json : : Factory : : LoadFromString ( " { } " ) ; <nl> } ; <nl> <nl> + TEST_F ( OutlierDetectorImplTest , DetectorStaticConfig ) { <nl> + std : : string json = R " EOF ( <nl> + { <nl> + " interval_ms " : 100 , <nl> + " base_ejection_time_ms " : 10000 , <nl> + " consecutive_5xx " : 10 , <nl> + " max_ejection_percent " : 50 , <nl> + " enforcing " : 10 <nl> + } <nl> + ) EOF " ; <nl> + <nl> + Json : : ObjectPtr custom_config = Json : : Factory : : LoadFromString ( json ) ; <nl> + std : : shared_ptr < DetectorImpl > detector ( DetectorImpl : : create ( <nl> + cluster_ , * custom_config , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + <nl> + EXPECT_EQ ( 100UL , detector - > config ( ) . intervalMs ( ) ) ; <nl> + EXPECT_EQ ( 10000UL , detector - > config ( ) . baseEjectionTimeMs ( ) ) ; <nl> + EXPECT_EQ ( 10UL , detector - > config ( ) . consecutive5xx ( ) ) ; <nl> + EXPECT_EQ ( 50UL , detector - > config ( ) . maxEjectionPercent ( ) ) ; <nl> + EXPECT_EQ ( 10UL , detector - > config ( ) . enforcing ( ) ) ; <nl> + } <nl> + <nl> TEST_F ( OutlierDetectorImplTest , DestroyWithActive ) { <nl> EXPECT_CALL ( cluster_ , addMemberUpdateCb ( _ ) ) ; <nl> cluster_ . hosts_ = { HostPtr { new HostImpl ( <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ [ 0 ] - > outlierDetector ( ) . putHttpResponseCode ( 503 ) ; <nl> TEST_F ( OutlierDetectorImplTest , DestroyHostInUse ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> detector . reset ( ) ; <nl> TEST_F ( OutlierDetectorImplTest , BasicFlow ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ . push_back ( HostPtr { new HostImpl ( <nl> TEST_F ( OutlierDetectorImplTest , RemoveWhileEjected ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ [ 0 ] - > outlierDetector ( ) . putHttpResponseCode ( 503 ) ; <nl> TEST_F ( OutlierDetectorImplTest , Overflow ) { <nl> false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> ON_CALL ( runtime_ . snapshot_ , getInteger ( " outlier_detection . max_ejection_percent " , _ ) ) <nl> TEST_F ( OutlierDetectorImplTest , NotEnforcing ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ [ 0 ] - > outlierDetector ( ) . putHttpResponseCode ( 503 ) ; <nl> TEST_F ( OutlierDetectorImplTest , CrossThreadRemoveRace ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ [ 0 ] - > outlierDetector ( ) . putHttpResponseCode ( 503 ) ; <nl> TEST_F ( OutlierDetectorImplTest , CrossThreadDestroyRace ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ [ 0 ] - > outlierDetector ( ) . putHttpResponseCode ( 503 ) ; <nl> TEST_F ( OutlierDetectorImplTest , CrossThreadFailRace ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> cluster_ . hosts_ [ 0 ] - > outlierDetector ( ) . putHttpResponseCode ( 503 ) ; <nl> TEST_F ( OutlierDetectorImplTest , Consecutive5xxAlreadyEjected ) { <nl> cluster_ . info_ , " " , Network : : Utility : : resolveUrl ( " tcp : / / 127 . 0 . 0 . 1 : 80 " ) , false , 1 , " " ) } } ; <nl> EXPECT_CALL ( * interval_timer_ , enableTimer ( std : : chrono : : milliseconds ( 10000 ) ) ) ; <nl> std : : shared_ptr < DetectorImpl > detector ( <nl> - DetectorImpl : : create ( cluster_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> + DetectorImpl : : create ( cluster_ , * loader_ , dispatcher_ , runtime_ , time_source_ , event_logger_ ) ) ; <nl> detector - > addChangedStateCb ( [ & ] ( HostPtr host ) - > void { checker_ . check ( host ) ; } ) ; <nl> <nl> / / Cause a consecutive 5xx error . <nl> | Outlier detection config ( ) | envoyproxy/envoy | e49093e7e3c571aaab9633df0f4b9ecf6d6351b2 | 2017-03-09T20:09:59Z |
mmm a / stdlib / public / SDK / ObjectiveC / ObjectiveC . swift <nl> ppp b / stdlib / public / SDK / ObjectiveC / ObjectiveC . swift <nl> public var NO : ObjCBool { <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> / / NSObject implements Equatable ' s = = as - [ NSObject isEqual : ] <nl> - / / NSObject implements Hashable ' s hashValue ( ) as - [ NSObject hash ] <nl> + / / NSObject implements Hashable ' s hashValue as - [ NSObject hash ] <nl> / / FIXME : what about NSObjectProtocol ? <nl> <nl> extension NSObject : Equatable , Hashable { <nl> + / / / Returns a Boolean value indicating whether two values are <nl> + / / / equal . ` NSObject ` implements this by calling ` lhs . isEqual ( rhs ) ` . <nl> + / / / <nl> + / / / Subclasses of ` NSObject ` can customize Equatable conformance by overriding <nl> + / / / ` isEqual ( _ : ) ` . If two objects are equal , they must have the same hash <nl> + / / / value , so if you override ` isEqual ( _ : ) ` , make sure you also override the <nl> + / / / ` hash ` property . <nl> + / / / <nl> + / / / - Parameters : <nl> + / / / - lhs : A value to compare . <nl> + / / / - rhs : Another value to compare . <nl> public static func = = ( lhs : NSObject , rhs : NSObject ) - > Bool { <nl> return lhs . isEqual ( rhs ) <nl> } <nl> <nl> / / / The hash value . <nl> / / / <nl> + / / / ` NSObject ` implements this by returning ` self . hash ` . Subclasses can <nl> + / / / customize hashing by overriding the ` hash ` property . <nl> + / / / <nl> / / / * * Axiom : * * ` x = = y ` implies ` x . hashValue = = y . hashValue ` <nl> / / / <nl> / / / - Note : the hash value is not guaranteed to be stable across <nl> / / / different invocations of the same program . Do not persist the <nl> / / / hash value across program runs . <nl> - @ objc <nl> - open var hashValue : Int { <nl> + @ objc open / / FIXME : Should be @ nonobjc public . rdar : / / problem / 42623458 <nl> + var hashValue : Int { <nl> return hash <nl> } <nl> + <nl> + / / / Hashes the essential components of this value by feeding them into the <nl> + / / / given hasher . <nl> + / / / <nl> + / / / NSObject implements this by feeding ` self . hash ` to the hasher . Subclasses <nl> + / / / can customize hashing by overriding the ` hash ` property . <nl> + public func hash ( into hasher : inout Hasher ) { <nl> + / / FIXME : We should combine self . hash here , but hashValue is currently <nl> + / / overridable . <nl> + hasher . combine ( hashValue ) <nl> + } <nl> + <nl> + public func _rawHashValue ( seed : ( UInt64 , UInt64 ) ) - > Int { <nl> + / / FIXME : We should use self . hash here , but hashValue is currently <nl> + / / overridable . <nl> + return self . hashValue . _rawHashValue ( seed : seed ) <nl> + } <nl> } <nl> <nl> extension NSObject : CVarArg { <nl> | Merge remote - tracking branch ' origin / master ' into master - next | apple/swift | 634329dba7525bc8e7222846e90cd3e15865710e | 2018-07-27T10:49:16Z |
mmm a / templates / lua - template - runtime / . project <nl> ppp b / templates / lua - template - runtime / . project <nl> <nl> < nature > org . ccdt . cocosproject < / nature > <nl> < nature > org . eclipse . koneki . ldt . nature < / nature > <nl> < / natures > <nl> - < cocosprojecttemplate > <nl> - < version > 1 . 2 < / version > <nl> - < / cocosprojecttemplate > <nl> < / projectDescription > <nl> new file mode 100644 <nl> index 000000000000 . . 5f06abfbad6c <nl> mmm / dev / null <nl> ppp b / templates / lua - template - runtime / . settings / version . json <nl> <nl> + { <nl> + " templateVersion " : " 1 . 2 " , <nl> + " runtimeVersion " : " 1 . 2 " <nl> + } <nl> \ No newline at end of file <nl> | version . json | cocos2d/cocos2d-x | eb7523e0c60900ef826d660d9ad7c887a28782c5 | 2014-07-04T02:56:21Z |
mmm a / src / d8 / d8 - posix . cc <nl> ppp b / src / d8 / d8 - posix . cc <nl> char * Shell : : ReadCharsFromTcpPort ( const char * name , int * size_out ) { <nl> if ( connect ( sockfd , reinterpret_cast < sockaddr * > ( & serv_addr ) , <nl> sizeof ( serv_addr ) ) < 0 ) { <nl> fprintf ( stderr , " Failed to connect to localhost : % d \ n " , <nl> - Shell : : options . read_from_tcp_port ) ; <nl> + Shell : : options . read_from_tcp_port . get ( ) ) ; <nl> close ( sockfd ) ; <nl> return nullptr ; <nl> } <nl> char * Shell : : ReadCharsFromTcpPort ( const char * name , int * size_out ) { <nl> ssize_t sent_now = send ( sockfd , name + sent_len , name_len - sent_len , 0 ) ; <nl> if ( sent_now < 0 ) { <nl> fprintf ( stderr , " Failed to send % s to localhost : % d \ n " , name , <nl> - Shell : : options . read_from_tcp_port ) ; <nl> + Shell : : options . read_from_tcp_port . get ( ) ) ; <nl> close ( sockfd ) ; <nl> return nullptr ; <nl> } <nl> char * Shell : : ReadCharsFromTcpPort ( const char * name , int * size_out ) { <nl> / / We need those 4 bytes to read off the file length . <nl> if ( received < 4 ) { <nl> fprintf ( stderr , " Failed to receive % s ' s length from localhost : % d \ n " , name , <nl> - Shell : : options . read_from_tcp_port ) ; <nl> + Shell : : options . read_from_tcp_port . get ( ) ) ; <nl> close ( sockfd ) ; <nl> return nullptr ; <nl> } <nl> char * Shell : : ReadCharsFromTcpPort ( const char * name , int * size_out ) { <nl> <nl> if ( file_length < 0 ) { <nl> fprintf ( stderr , " Received length % d for % s from localhost : % d \ n " , <nl> - file_length , name , Shell : : options . read_from_tcp_port ) ; <nl> + file_length , name , Shell : : options . read_from_tcp_port . get ( ) ) ; <nl> close ( sockfd ) ; <nl> return nullptr ; <nl> } <nl> char * Shell : : ReadCharsFromTcpPort ( const char * name , int * size_out ) { <nl> recv ( sockfd , chars + total_received , file_length - total_received , 0 ) ; <nl> if ( received < 0 ) { <nl> fprintf ( stderr , " Failed to receive % s from localhost : % d \ n " , name , <nl> - Shell : : options . read_from_tcp_port ) ; <nl> + Shell : : options . read_from_tcp_port . get ( ) ) ; <nl> close ( sockfd ) ; <nl> delete [ ] chars ; <nl> return nullptr ; <nl> mmm a / src / d8 / d8 . cc <nl> ppp b / src / d8 / d8 . cc <nl> void Worker : : PostMessageOut ( const v8 : : FunctionCallbackInfo < v8 : : Value > & args ) { <nl> <nl> bool Shell : : SetOptions ( int argc , char * argv [ ] ) { <nl> bool logfile_per_isolate = false ; <nl> + bool no_always_opt = false ; <nl> for ( int i = 0 ; i < argc ; i + + ) { <nl> if ( strcmp ( argv [ i ] , " - - " ) = = 0 ) { <nl> argv [ i ] = nullptr ; <nl> bool Shell : : SetOptions ( int argc , char * argv [ ] ) { <nl> argv [ i ] = nullptr ; <nl> } else if ( strcmp ( argv [ i ] , " - - noalways - opt " ) = = 0 | | <nl> strcmp ( argv [ i ] , " - - no - always - opt " ) = = 0 ) { <nl> - / / No support for stressing if we can ' t use - - always - opt . <nl> - options . stress_opt = false ; <nl> + no_always_opt = true ; <nl> } else if ( strcmp ( argv [ i ] , " - - logfile - per - isolate " ) = = 0 ) { <nl> logfile_per_isolate = true ; <nl> argv [ i ] = nullptr ; <nl> bool Shell : : SetOptions ( int argc , char * argv [ ] ) { <nl> } <nl> } <nl> <nl> + if ( options . stress_opt & & no_always_opt ) { <nl> + FATAL ( " Flag - - no - always - opt is incompatible with - - stress - opt . " ) ; <nl> + } <nl> + <nl> const char * usage = <nl> " Synopsis : \ n " <nl> " shell [ options ] [ - - shell ] [ < file > . . . ] \ n " <nl> bool Shell : : SetOptions ( int argc , char * argv [ ] ) { <nl> " - - shell run an interactive JavaScript shell \ n " <nl> " - - module execute a file as a JavaScript module \ n \ n " ; <nl> using HelpOptions = i : : FlagList : : HelpOptions ; <nl> + i : : FLAG_abort_on_contradictory_flags = true ; <nl> i : : FlagList : : SetFlagsFromCommandLine ( & argc , argv , true , <nl> HelpOptions ( HelpOptions : : kExit , usage ) ) ; <nl> options . mock_arraybuffer_allocator = i : : FLAG_mock_arraybuffer_allocator ; <nl> class D8Testing { <nl> " - - max - inlined - bytecode - size = 999999 " <nl> " - - max - inlined - bytecode - size - cumulative = 999999 " <nl> " - - noalways - opt " ; <nl> - static const char * kForcedOptimizations = " - - always - opt " ; <nl> <nl> - if ( run = = GetStressRuns ( ) - 1 ) { <nl> - V8 : : SetFlagsFromString ( kForcedOptimizations ) ; <nl> - } else { <nl> + if ( run = = 0 ) { <nl> V8 : : SetFlagsFromString ( kLazyOptimizations ) ; <nl> + } else { <nl> + i : : FLAG_always_opt = true ; <nl> } <nl> } <nl> <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> options . stress_runs = D8Testing : : GetStressRuns ( ) ; <nl> for ( int i = 0 ; i < options . stress_runs & & result = = 0 ; i + + ) { <nl> printf ( " = = = = = = = = = = = = Stress % d / % d = = = = = = = = = = = = \ n " , i + 1 , <nl> - options . stress_runs ) ; <nl> + options . stress_runs . get ( ) ) ; <nl> D8Testing : : PrepareStressRun ( i ) ; <nl> bool last_run = i = = options . stress_runs - 1 ; <nl> result = RunMain ( isolate , last_run ) ; <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> options . stress_runs = i : : FLAG_stress_runs ; <nl> for ( int i = 0 ; i < options . stress_runs & & result = = 0 ; i + + ) { <nl> printf ( " = = = = = = = = = = = = Run % d / % d = = = = = = = = = = = = \ n " , i + 1 , <nl> - options . stress_runs ) ; <nl> + options . stress_runs . get ( ) ) ; <nl> bool last_run = i = = options . stress_runs - 1 ; <nl> result = RunMain ( isolate , last_run ) ; <nl> } <nl> int Shell : : Main ( int argc , char * argv [ ] ) { <nl> DCHECK ( options . compile_options = = v8 : : ScriptCompiler : : kEagerCompile | | <nl> options . compile_options = = <nl> v8 : : ScriptCompiler : : kNoCompileOptions ) ; <nl> - options . compile_options = v8 : : ScriptCompiler : : kConsumeCodeCache ; <nl> - options . code_cache_options = <nl> - ShellOptions : : CodeCacheOptions : : kNoProduceCache ; <nl> + options . compile_options . Overwrite ( <nl> + v8 : : ScriptCompiler : : kConsumeCodeCache ) ; <nl> + options . code_cache_options . Overwrite ( <nl> + ShellOptions : : CodeCacheOptions : : kNoProduceCache ) ; <nl> <nl> printf ( " = = = = = = = = = = = = Run : Consume code cache = = = = = = = = = = = = \ n " ) ; <nl> / / Second run to consume the cache in current isolate <nl> result = RunMain ( isolate , true ) ; <nl> - options . compile_options = v8 : : ScriptCompiler : : kNoCompileOptions ; <nl> + options . compile_options . Overwrite ( <nl> + v8 : : ScriptCompiler : : kNoCompileOptions ) ; <nl> } else { <nl> bool last_run = true ; <nl> result = RunMain ( isolate , last_run ) ; <nl> mmm a / src / d8 / d8 . h <nl> ppp b / src / d8 / d8 . h <nl> class ShellOptions { <nl> <nl> ~ ShellOptions ( ) { delete [ ] isolate_sources ; } <nl> <nl> - bool fuzzilli_coverage_statistics = false ; <nl> - bool fuzzilli_enable_builtins_coverage = true ; <nl> - bool send_idle_notification = false ; <nl> - bool invoke_weak_callbacks = false ; <nl> - bool omit_quit = false ; <nl> - bool wait_for_wasm = true ; <nl> - bool stress_opt = false ; <nl> - int stress_runs = 1 ; <nl> - bool stress_snapshot = false ; <nl> - bool interactive_shell = false ; <nl> + template < class T > <nl> + class DisallowReassignment { <nl> + public : <nl> + DisallowReassignment ( const char * name , T value ) <nl> + : name_ ( name ) , value_ ( value ) { } <nl> + <nl> + operator T ( ) const { return value_ ; } / / NOLINT <nl> + T get ( ) const { return value_ ; } <nl> + DisallowReassignment < T > & operator = ( T value ) { <nl> + / / In analogy to Flag : : CheckFlagChange ( ) in src / flags / flag . cc , only allow <nl> + / / repeated flags for identical boolean values . <nl> + if ( std : : is_same < T , bool > : : value ) { <nl> + if ( specified_ & & value_ ! = value ) { <nl> + FATAL ( " Contradictory values for d8 flag - - % s " , name_ ) ; <nl> + } <nl> + } else { <nl> + if ( specified_ ) { <nl> + FATAL ( " Repeated specification of d8 flag - - % s " , name_ ) ; <nl> + } <nl> + } <nl> + value_ = value ; <nl> + specified_ = true ; <nl> + return * this ; <nl> + } <nl> + void Overwrite ( T value ) { value_ = value ; } <nl> + <nl> + private : <nl> + const char * name_ ; <nl> + T value_ ; <nl> + bool specified_ = false ; <nl> + } ; <nl> + <nl> + DisallowReassignment < bool > fuzzilli_coverage_statistics = { <nl> + " fuzzilli - coverage - statistics " , false } ; <nl> + DisallowReassignment < bool > fuzzilli_enable_builtins_coverage = { <nl> + " fuzzilli - enable - builtins - coverage " , true } ; <nl> + DisallowReassignment < bool > send_idle_notification = { " send - idle - notification " , <nl> + false } ; <nl> + DisallowReassignment < bool > invoke_weak_callbacks = { " invoke - weak - callbacks " , <nl> + false } ; <nl> + DisallowReassignment < bool > omit_quit = { " omit - quit " , false } ; <nl> + DisallowReassignment < bool > wait_for_wasm = { " wait - for - wasm " , true } ; <nl> + DisallowReassignment < bool > stress_opt = { " stress - opt " , false } ; <nl> + DisallowReassignment < int > stress_runs = { " stress - runs " , 1 } ; <nl> + DisallowReassignment < bool > stress_snapshot = { " stress - snapshot " , false } ; <nl> + DisallowReassignment < bool > interactive_shell = { " shell " , false } ; <nl> bool test_shell = false ; <nl> - bool expected_to_throw = false ; <nl> - bool ignore_unhandled_promises = false ; <nl> - bool mock_arraybuffer_allocator = false ; <nl> - size_t mock_arraybuffer_allocator_limit = 0 ; <nl> - bool multi_mapped_mock_allocator = false ; <nl> - bool enable_inspector = false ; <nl> + DisallowReassignment < bool > expected_to_throw = { " throws " , false } ; <nl> + DisallowReassignment < bool > ignore_unhandled_promises = { <nl> + " ignore - unhandled - promises " , false } ; <nl> + DisallowReassignment < bool > mock_arraybuffer_allocator = { <nl> + " mock - arraybuffer - allocator " , false } ; <nl> + DisallowReassignment < size_t > mock_arraybuffer_allocator_limit = { <nl> + " mock - arraybuffer - allocator - limit " , 0 } ; <nl> + DisallowReassignment < bool > multi_mapped_mock_allocator = { <nl> + " multi - mapped - mock - allocator " , false } ; <nl> + DisallowReassignment < bool > enable_inspector = { " enable - inspector " , false } ; <nl> int num_isolates = 1 ; <nl> - v8 : : ScriptCompiler : : CompileOptions compile_options = <nl> - v8 : : ScriptCompiler : : kNoCompileOptions ; <nl> - CodeCacheOptions code_cache_options = CodeCacheOptions : : kNoProduceCache ; <nl> - bool streaming_compile = false ; <nl> - SourceGroup * isolate_sources = nullptr ; <nl> - const char * icu_data_file = nullptr ; <nl> - const char * icu_locale = nullptr ; <nl> - const char * snapshot_blob = nullptr ; <nl> - bool trace_enabled = false ; <nl> - const char * trace_path = nullptr ; <nl> - const char * trace_config = nullptr ; <nl> - const char * lcov_file = nullptr ; <nl> - bool disable_in_process_stack_traces = false ; <nl> - int read_from_tcp_port = - 1 ; <nl> - bool enable_os_system = false ; <nl> - bool quiet_load = false ; <nl> - int thread_pool_size = 0 ; <nl> - bool stress_delay_tasks = false ; <nl> + DisallowReassignment < v8 : : ScriptCompiler : : CompileOptions > compile_options = { <nl> + " cache " , v8 : : ScriptCompiler : : kNoCompileOptions } ; <nl> + DisallowReassignment < CodeCacheOptions > code_cache_options = { <nl> + " cache " , CodeCacheOptions : : kNoProduceCache } ; <nl> + DisallowReassignment < bool > streaming_compile = { " streaming - compile " , false } ; <nl> + DisallowReassignment < SourceGroup * > isolate_sources = { " isolate - sources " , <nl> + nullptr } ; <nl> + DisallowReassignment < const char * > icu_data_file = { " icu - data - file " , nullptr } ; <nl> + DisallowReassignment < const char * > icu_locale = { " icu - locale " , nullptr } ; <nl> + DisallowReassignment < const char * > snapshot_blob = { " snapshot_blob " , nullptr } ; <nl> + DisallowReassignment < bool > trace_enabled = { " trace - enabled " , false } ; <nl> + DisallowReassignment < const char * > trace_path = { " trace - path " , nullptr } ; <nl> + DisallowReassignment < const char * > trace_config = { " trace - config " , nullptr } ; <nl> + DisallowReassignment < const char * > lcov_file = { " lcov " , nullptr } ; <nl> + DisallowReassignment < bool > disable_in_process_stack_traces = { <nl> + " disable - in - process - stack - traces " , false } ; <nl> + DisallowReassignment < int > read_from_tcp_port = { " read - from - tcp - port " , - 1 } ; <nl> + DisallowReassignment < bool > enable_os_system = { " enable - os - system " , false } ; <nl> + DisallowReassignment < bool > quiet_load = { " quiet - load " , false } ; <nl> + DisallowReassignment < int > thread_pool_size = { " thread - pool - size " , 0 } ; <nl> + DisallowReassignment < bool > stress_delay_tasks = { " stress - delay - tasks " , false } ; <nl> std : : vector < const char * > arguments ; <nl> - bool include_arguments = true ; <nl> - bool cpu_profiler = false ; <nl> - bool cpu_profiler_print = false ; <nl> - bool fuzzy_module_file_extensions = true ; <nl> + DisallowReassignment < bool > include_arguments = { " arguments " , true } ; <nl> + DisallowReassignment < bool > cpu_profiler = { " cpu - profiler " , false } ; <nl> + DisallowReassignment < bool > cpu_profiler_print = { " cpu - profiler - print " , false } ; <nl> + DisallowReassignment < bool > fuzzy_module_file_extensions = { <nl> + " fuzzy - module - file - extensions " , true } ; <nl> } ; <nl> <nl> class Shell : public i : : AllStatic { <nl> mmm a / src / flags / flag - definitions . h <nl> ppp b / src / flags / flag - definitions . h <nl> <nl> # define DEFINE_IMPLICATION ( whenflag , thenflag ) \ <nl> DEFINE_VALUE_IMPLICATION ( whenflag , thenflag , true ) <nl> <nl> + / / A weak implication will be overwritten by a normal implication or by an <nl> + / / explicit flag . <nl> + # define DEFINE_WEAK_IMPLICATION ( whenflag , thenflag ) \ <nl> + DEFINE_WEAK_VALUE_IMPLICATION ( whenflag , thenflag , true ) <nl> + <nl> # define DEFINE_NEG_IMPLICATION ( whenflag , thenflag ) \ <nl> DEFINE_VALUE_IMPLICATION ( whenflag , thenflag , false ) <nl> <nl> <nl> <nl> / / We produce the code to set flags when it is implied by another flag . <nl> # elif defined ( FLAG_MODE_DEFINE_IMPLICATIONS ) <nl> - # define DEFINE_VALUE_IMPLICATION ( whenflag , thenflag , value ) \ <nl> - if ( FLAG_ # # whenflag ) FLAG_ # # thenflag = value ; <nl> + # define DEFINE_VALUE_IMPLICATION ( whenflag , thenflag , value ) \ <nl> + changed | = TriggerImplication ( FLAG_ # # whenflag , # whenflag , & FLAG_ # # thenflag , \ <nl> + value , false ) ; <nl> + <nl> + / / A weak implication will be overwritten by a normal implication or by an <nl> + / / explicit flag . <nl> + # define DEFINE_WEAK_VALUE_IMPLICATION ( whenflag , thenflag , value ) \ <nl> + changed | = TriggerImplication ( FLAG_ # # whenflag , # whenflag , & FLAG_ # # thenflag , \ <nl> + value , true ) ; <nl> <nl> # define DEFINE_GENERIC_IMPLICATION ( whenflag , statement ) \ <nl> if ( FLAG_ # # whenflag ) statement ; <nl> <nl> - # define DEFINE_NEG_VALUE_IMPLICATION ( whenflag , thenflag , value ) \ <nl> - if ( ! FLAG_ # # whenflag ) FLAG_ # # thenflag = value ; <nl> + # define DEFINE_NEG_VALUE_IMPLICATION ( whenflag , thenflag , value ) \ <nl> + changed | = TriggerImplication ( ! FLAG_ # # whenflag , # whenflag , & FLAG_ # # thenflag , \ <nl> + value , false ) ; <nl> <nl> / / We apply a generic macro to the flags . <nl> # elif defined ( FLAG_MODE_APPLY ) <nl> <nl> # define DEFINE_VALUE_IMPLICATION ( whenflag , thenflag , value ) <nl> # endif <nl> <nl> + # ifndef DEFINE_WEAK_VALUE_IMPLICATION <nl> + # define DEFINE_WEAK_VALUE_IMPLICATION ( whenflag , thenflag , value ) <nl> + # endif <nl> + <nl> # ifndef DEFINE_GENERIC_IMPLICATION <nl> # define DEFINE_GENERIC_IMPLICATION ( whenflag , statement ) <nl> # endif <nl> struct MaybeBoolFlag { <nl> / / <nl> # define FLAG FLAG_FULL <nl> <nl> + / / ATTENTION : This is set to true by default in d8 . But for API compatibility , <nl> + / / it generally defaults to false . <nl> + DEFINE_BOOL ( abort_on_contradictory_flags , false , <nl> + " Disallow flags or implications overriding each other . " ) <nl> + / / This implication is also hard - coded into the flags processing to make sure it <nl> + / / becomes active before we even process subsequent flags . <nl> + DEFINE_NEG_IMPLICATION ( fuzzing , abort_on_contradictory_flags ) <nl> + <nl> / / Flags for language modes and experimental language features . <nl> DEFINE_BOOL ( use_strict , false , " enforce strict mode " ) <nl> <nl> DEFINE_BOOL ( future , FUTURE_BOOL , <nl> " Implies all staged features that we want to ship in the " <nl> " not - too - far future " ) <nl> <nl> - DEFINE_IMPLICATION ( future , write_protect_code_memory ) <nl> - DEFINE_IMPLICATION ( future , finalize_streaming_on_background ) <nl> + DEFINE_WEAK_IMPLICATION ( future , write_protect_code_memory ) <nl> + DEFINE_WEAK_IMPLICATION ( future , finalize_streaming_on_background ) <nl> + <nl> + / / Flags for jitless <nl> + DEFINE_BOOL ( jitless , V8_LITE_BOOL , <nl> + " Disable runtime allocation of executable memory . " ) <nl> + <nl> + / / Jitless V8 has a few implications : <nl> + DEFINE_NEG_IMPLICATION ( jitless , opt ) <nl> + / / Field representation tracking is only used by TurboFan . <nl> + DEFINE_NEG_IMPLICATION ( jitless , track_field_types ) <nl> + DEFINE_NEG_IMPLICATION ( jitless , track_heap_object_fields ) <nl> + / / Regexps are interpreted . <nl> + DEFINE_IMPLICATION ( jitless , regexp_interpret_all ) <nl> + / / asm . js validation is disabled since it triggers wasm code generation . <nl> + DEFINE_NEG_IMPLICATION ( jitless , validate_asm ) <nl> + / / - - jitless also implies - - no - expose - wasm , see InitializeOncePerProcessImpl . <nl> + <nl> + # ifndef V8_TARGET_ARCH_ARM <nl> + / / Unsupported on arm . See https : / / crbug . com / v8 / 8713 . <nl> + DEFINE_NEG_IMPLICATION ( jitless , interpreted_frames_native_stack ) <nl> + # endif <nl> <nl> DEFINE_BOOL ( assert_types , false , <nl> " generate runtime type assertions to test the typer " ) <nl> DEFINE_BOOL_READONLY ( string_slices , true , " use string slices " ) <nl> DEFINE_INT ( interrupt_budget , 144 * KB , <nl> " interrupt budget which should be used for the profiler counter " ) <nl> <nl> - / / Flags for jitless <nl> - DEFINE_BOOL ( jitless , V8_LITE_BOOL , <nl> - " Disable runtime allocation of executable memory . " ) <nl> - <nl> - / / Jitless V8 has a few implications : <nl> - DEFINE_NEG_IMPLICATION ( jitless , opt ) <nl> - / / Field representation tracking is only used by TurboFan . <nl> - DEFINE_NEG_IMPLICATION ( jitless , track_field_types ) <nl> - DEFINE_NEG_IMPLICATION ( jitless , track_heap_object_fields ) <nl> - / / Regexps are interpreted . <nl> - DEFINE_IMPLICATION ( jitless , regexp_interpret_all ) <nl> - / / asm . js validation is disabled since it triggers wasm code generation . <nl> - DEFINE_NEG_IMPLICATION ( jitless , validate_asm ) <nl> - / / - - jitless also implies - - no - expose - wasm , see InitializeOncePerProcessImpl . <nl> - <nl> - # ifndef V8_TARGET_ARCH_ARM <nl> - / / Unsupported on arm . See https : / / crbug . com / v8 / 8713 . <nl> - DEFINE_NEG_IMPLICATION ( jitless , interpreted_frames_native_stack ) <nl> - # endif <nl> - <nl> / / Flags for inline caching and feedback vectors . <nl> DEFINE_BOOL ( use_ic , true , " use inline caching " ) <nl> DEFINE_INT ( budget_for_feedback_vector_allocation , 1 * KB , <nl> DEFINE_BOOL ( concurrent_inlining , false , <nl> " run optimizing compiler ' s inlining phase on a separate thread " ) <nl> DEFINE_INT ( max_serializer_nesting , 25 , <nl> " maximum levels for nesting child serializers " ) <nl> - DEFINE_IMPLICATION ( future , concurrent_inlining ) <nl> + DEFINE_WEAK_IMPLICATION ( future , concurrent_inlining ) <nl> DEFINE_BOOL ( trace_heap_broker_verbose , false , <nl> " trace the heap broker verbosely ( all reports ) " ) <nl> DEFINE_BOOL ( trace_heap_broker_memory , false , <nl> DEFINE_IMPLICATION ( unbox_double_fields , track_double_fields ) <nl> # undef DEFINE_STRING <nl> # undef DEFINE_FLOAT <nl> # undef DEFINE_IMPLICATION <nl> + # undef DEFINE_WEAK_IMPLICATION <nl> # undef DEFINE_NEG_IMPLICATION <nl> # undef DEFINE_NEG_VALUE_IMPLICATION <nl> # undef DEFINE_VALUE_IMPLICATION <nl> + # undef DEFINE_WEAK_VALUE_IMPLICATION <nl> # undef DEFINE_GENERIC_IMPLICATION <nl> # undef DEFINE_ALIAS_BOOL <nl> # undef DEFINE_ALIAS_INT <nl> mmm a / src / flags / flags . cc <nl> ppp b / src / flags / flags . cc <nl> <nl> # include < cerrno > <nl> # include < cinttypes > <nl> # include < cstdlib > <nl> + # include < cstring > <nl> # include < sstream > <nl> <nl> # include " src / base / functional . h " <nl> + # include " src / base / logging . h " <nl> # include " src / base / platform / platform . h " <nl> # include " src / codegen / cpu - features . h " <nl> # include " src / logging / counters . h " <nl> namespace internal { <nl> <nl> namespace { <nl> <nl> + struct Flag ; <nl> + Flag * FindFlagByPointer ( const void * ptr ) ; <nl> + Flag * FindFlagByName ( const char * name ) ; <nl> + <nl> / / This structure represents a single entry in the flag system , with a pointer <nl> / / to the actual flag , default value , comment , etc . This is designed to be POD <nl> / / initialized as to avoid requiring static constructors . <nl> struct Flag { <nl> TYPE_STRING , <nl> } ; <nl> <nl> + enum class SetBy { kDefault , kWeakImplication , kImplication , kCommandLine } ; <nl> + <nl> FlagType type_ ; / / What type of flag , bool , int , or string . <nl> const char * name_ ; / / Name of the flag , ex " my_flag " . <nl> void * valptr_ ; / / Pointer to the global flag variable . <nl> const void * defptr_ ; / / Pointer to the default value . <nl> const char * cmt_ ; / / A comment about the flags purpose . <nl> bool owns_ptr_ ; / / Does the flag own its string value ? <nl> + SetBy set_by_ = SetBy : : kDefault ; <nl> + const char * implied_by_ = nullptr ; <nl> <nl> FlagType type ( ) const { return type_ ; } <nl> <nl> struct Flag { <nl> <nl> const char * comment ( ) const { return cmt_ ; } <nl> <nl> - bool * bool_variable ( ) const { <nl> + bool PointsTo ( const void * ptr ) const { return valptr_ = = ptr ; } <nl> + <nl> + bool bool_variable ( ) const { <nl> + DCHECK ( type_ = = TYPE_BOOL ) ; <nl> + return * reinterpret_cast < bool * > ( valptr_ ) ; <nl> + } <nl> + <nl> + void set_bool_variable ( bool value , SetBy set_by ) { <nl> DCHECK ( type_ = = TYPE_BOOL ) ; <nl> - return reinterpret_cast < bool * > ( valptr_ ) ; <nl> + bool change_flag = * reinterpret_cast < bool * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < bool * > ( valptr_ ) = value ; <nl> } <nl> <nl> - MaybeBoolFlag * maybe_bool_variable ( ) const { <nl> + MaybeBoolFlag maybe_bool_variable ( ) const { <nl> DCHECK ( type_ = = TYPE_MAYBE_BOOL ) ; <nl> - return reinterpret_cast < MaybeBoolFlag * > ( valptr_ ) ; <nl> + return * reinterpret_cast < MaybeBoolFlag * > ( valptr_ ) ; <nl> + } <nl> + <nl> + void set_maybe_bool_variable ( MaybeBoolFlag value , SetBy set_by ) { <nl> + DCHECK ( type_ = = TYPE_MAYBE_BOOL ) ; <nl> + bool change_flag = * reinterpret_cast < MaybeBoolFlag * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < MaybeBoolFlag * > ( valptr_ ) = value ; <nl> + } <nl> + <nl> + int int_variable ( ) const { <nl> + DCHECK ( type_ = = TYPE_INT ) ; <nl> + return * reinterpret_cast < int * > ( valptr_ ) ; <nl> } <nl> <nl> - int * int_variable ( ) const { <nl> + void set_int_variable ( int value , SetBy set_by ) { <nl> DCHECK ( type_ = = TYPE_INT ) ; <nl> - return reinterpret_cast < int * > ( valptr_ ) ; <nl> + bool change_flag = * reinterpret_cast < int * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < int * > ( valptr_ ) = value ; <nl> + } <nl> + <nl> + unsigned int uint_variable ( ) const { <nl> + DCHECK ( type_ = = TYPE_UINT ) ; <nl> + return * reinterpret_cast < unsigned int * > ( valptr_ ) ; <nl> } <nl> <nl> - unsigned int * uint_variable ( ) const { <nl> + void set_uint_variable ( unsigned int value , SetBy set_by ) { <nl> DCHECK ( type_ = = TYPE_UINT ) ; <nl> - return reinterpret_cast < unsigned int * > ( valptr_ ) ; <nl> + bool change_flag = * reinterpret_cast < unsigned int * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < unsigned int * > ( valptr_ ) = value ; <nl> } <nl> <nl> - uint64_t * uint64_variable ( ) const { <nl> + uint64_t uint64_variable ( ) const { <nl> DCHECK ( type_ = = TYPE_UINT64 ) ; <nl> - return reinterpret_cast < uint64_t * > ( valptr_ ) ; <nl> + return * reinterpret_cast < uint64_t * > ( valptr_ ) ; <nl> } <nl> <nl> - double * float_variable ( ) const { <nl> + void set_uint64_variable ( uint64_t value , SetBy set_by ) { <nl> + DCHECK ( type_ = = TYPE_UINT64 ) ; <nl> + bool change_flag = * reinterpret_cast < uint64_t * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < uint64_t * > ( valptr_ ) = value ; <nl> + } <nl> + <nl> + double float_variable ( ) const { <nl> DCHECK ( type_ = = TYPE_FLOAT ) ; <nl> - return reinterpret_cast < double * > ( valptr_ ) ; <nl> + return * reinterpret_cast < double * > ( valptr_ ) ; <nl> + } <nl> + <nl> + void set_float_variable ( double value , SetBy set_by ) { <nl> + DCHECK ( type_ = = TYPE_FLOAT ) ; <nl> + bool change_flag = * reinterpret_cast < double * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < double * > ( valptr_ ) = value ; <nl> + } <nl> + <nl> + size_t size_t_variable ( ) const { <nl> + DCHECK ( type_ = = TYPE_SIZE_T ) ; <nl> + return * reinterpret_cast < size_t * > ( valptr_ ) ; <nl> } <nl> <nl> - size_t * size_t_variable ( ) const { <nl> + void set_size_t_variable ( size_t value , SetBy set_by ) { <nl> DCHECK ( type_ = = TYPE_SIZE_T ) ; <nl> - return reinterpret_cast < size_t * > ( valptr_ ) ; <nl> + bool change_flag = * reinterpret_cast < size_t * > ( valptr_ ) ! = value ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) * reinterpret_cast < size_t * > ( valptr_ ) = value ; <nl> } <nl> <nl> const char * string_value ( ) const { <nl> struct Flag { <nl> return * reinterpret_cast < const char * * > ( valptr_ ) ; <nl> } <nl> <nl> - void set_string_value ( const char * value , bool owns_ptr ) { <nl> + void set_string_value ( const char * value , bool owns_ptr , SetBy set_by ) { <nl> DCHECK ( type_ = = TYPE_STRING ) ; <nl> const char * * ptr = reinterpret_cast < const char * * > ( valptr_ ) ; <nl> - if ( owns_ptr_ & & * ptr ! = nullptr ) DeleteArray ( * ptr ) ; <nl> - * ptr = value ; <nl> - owns_ptr_ = owns_ptr ; <nl> + bool change_flag = ( * ptr = = nullptr ) ! = ( value = = nullptr ) | | <nl> + ( * ptr & & value & & std : : strcmp ( * ptr , value ) ! = 0 ) ; <nl> + change_flag = CheckFlagChange ( set_by , change_flag ) ; <nl> + if ( change_flag ) { <nl> + if ( owns_ptr_ & & * ptr ! = nullptr ) DeleteArray ( * ptr ) ; <nl> + * ptr = value ; <nl> + owns_ptr_ = owns_ptr ; <nl> + } else { <nl> + if ( owns_ptr & & value ! = nullptr ) DeleteArray ( value ) ; <nl> + } <nl> } <nl> <nl> bool bool_default ( ) const { <nl> struct Flag { <nl> return * reinterpret_cast < const char * const * > ( defptr_ ) ; <nl> } <nl> <nl> + static bool ShouldCheckFlagContradictions ( ) { <nl> + return FLAG_abort_on_contradictory_flags & & ! FLAG_fuzzing ; <nl> + } <nl> + <nl> + / / { change_flag } indicates if we ' re going to change the flag value . <nl> + / / Returns an updated value for { change_flag } , which is changed to false if a <nl> + / / weak implication is being ignored beause a flag is already set by a normal <nl> + / / implication or from the command - line . <nl> + bool CheckFlagChange ( SetBy new_set_by , bool change_flag , <nl> + const char * implied_by = nullptr ) { <nl> + if ( new_set_by = = SetBy : : kWeakImplication & & <nl> + ( set_by_ = = SetBy : : kImplication | | set_by_ = = SetBy : : kCommandLine ) ) { <nl> + return false ; <nl> + } <nl> + if ( ShouldCheckFlagContradictions ( ) ) { <nl> + / / For bool flags , we only check for a conflict if the value actually <nl> + / / changes . So specifying the same flag with the same value multiple times <nl> + / / is allowed . <nl> + / / For other flags , we disallow specifying them explicitly or in the <nl> + / / presence of an implication even if the value is the same . <nl> + / / This is to simplify the rules describing conflicts in variants . py : A <nl> + / / repeated non - boolean flag is considered an error independently of its <nl> + / / value . <nl> + bool is_bool_flag = type_ = = TYPE_MAYBE_BOOL | | type_ = = TYPE_BOOL ; <nl> + bool check_implications = change_flag ; <nl> + bool check_command_line_flags = change_flag | | ! is_bool_flag ; <nl> + const char * hint = <nl> + " To fix this , it might be necessary to specify additional " <nl> + " contradictory flags in tools / testrunner / local / variants . py . " ; <nl> + switch ( set_by_ ) { <nl> + case SetBy : : kDefault : <nl> + break ; <nl> + case SetBy : : kWeakImplication : <nl> + if ( new_set_by = = SetBy : : kWeakImplication & & check_implications ) { <nl> + FATAL ( <nl> + " Contradictory weak flag implications from - - % s and - - % s for " <nl> + " flag % s \ n % s " , <nl> + implied_by_ , implied_by , name ( ) , hint ) ; <nl> + } <nl> + break ; <nl> + case SetBy : : kImplication : <nl> + if ( new_set_by = = SetBy : : kImplication & & check_implications ) { <nl> + FATAL ( <nl> + " Contradictory flag implications from - - % s and - - % s for flag " <nl> + " % s \ n % s " , <nl> + implied_by_ , implied_by , name ( ) , hint ) ; <nl> + } <nl> + break ; <nl> + case SetBy : : kCommandLine : <nl> + if ( new_set_by = = SetBy : : kImplication & & check_command_line_flags ) { <nl> + FATAL ( <nl> + " Flag - - % s is implied by - - % s but also specified " <nl> + " explicitly . \ n % s " , <nl> + name ( ) , implied_by , hint ) ; <nl> + } else if ( new_set_by = = SetBy : : kCommandLine & & <nl> + check_command_line_flags ) { <nl> + FATAL ( <nl> + " Command - line provided flag - - % s specified multiple times . \ n % s " , <nl> + name ( ) , hint ) ; <nl> + } <nl> + break ; <nl> + } <nl> + } <nl> + set_by_ = new_set_by ; <nl> + if ( new_set_by = = SetBy : : kImplication | | <nl> + new_set_by = = SetBy : : kWeakImplication ) { <nl> + DCHECK_NOT_NULL ( implied_by ) ; <nl> + implied_by_ = implied_by ; <nl> + } <nl> + return change_flag ; <nl> + } <nl> + <nl> / / Compare this flag ' s current value against the default . <nl> bool IsDefault ( ) const { <nl> switch ( type_ ) { <nl> case TYPE_BOOL : <nl> - return * bool_variable ( ) = = bool_default ( ) ; <nl> + return bool_variable ( ) = = bool_default ( ) ; <nl> case TYPE_MAYBE_BOOL : <nl> - return maybe_bool_variable ( ) - > has_value = = false ; <nl> + return maybe_bool_variable ( ) . has_value = = false ; <nl> case TYPE_INT : <nl> - return * int_variable ( ) = = int_default ( ) ; <nl> + return int_variable ( ) = = int_default ( ) ; <nl> case TYPE_UINT : <nl> - return * uint_variable ( ) = = uint_default ( ) ; <nl> + return uint_variable ( ) = = uint_default ( ) ; <nl> case TYPE_UINT64 : <nl> - return * uint64_variable ( ) = = uint64_default ( ) ; <nl> + return uint64_variable ( ) = = uint64_default ( ) ; <nl> case TYPE_FLOAT : <nl> - return * float_variable ( ) = = float_default ( ) ; <nl> + return float_variable ( ) = = float_default ( ) ; <nl> case TYPE_SIZE_T : <nl> - return * size_t_variable ( ) = = size_t_default ( ) ; <nl> + return size_t_variable ( ) = = size_t_default ( ) ; <nl> case TYPE_STRING : { <nl> const char * str1 = string_value ( ) ; <nl> const char * str2 = string_default ( ) ; <nl> struct Flag { <nl> void Reset ( ) { <nl> switch ( type_ ) { <nl> case TYPE_BOOL : <nl> - * bool_variable ( ) = bool_default ( ) ; <nl> + set_bool_variable ( bool_default ( ) , SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_MAYBE_BOOL : <nl> - * maybe_bool_variable ( ) = MaybeBoolFlag : : Create ( false , false ) ; <nl> + set_maybe_bool_variable ( MaybeBoolFlag : : Create ( false , false ) , <nl> + SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_INT : <nl> - * int_variable ( ) = int_default ( ) ; <nl> + set_int_variable ( int_default ( ) , SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_UINT : <nl> - * uint_variable ( ) = uint_default ( ) ; <nl> + set_uint_variable ( uint_default ( ) , SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_UINT64 : <nl> - * uint64_variable ( ) = uint64_default ( ) ; <nl> + set_uint64_variable ( uint64_default ( ) , SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_FLOAT : <nl> - * float_variable ( ) = float_default ( ) ; <nl> + set_float_variable ( float_default ( ) , SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_SIZE_T : <nl> - * size_t_variable ( ) = size_t_default ( ) ; <nl> + set_size_t_variable ( size_t_default ( ) , SetBy : : kDefault ) ; <nl> break ; <nl> case TYPE_STRING : <nl> - set_string_value ( string_default ( ) , false ) ; <nl> + set_string_value ( string_default ( ) , false , SetBy : : kDefault ) ; <nl> break ; <nl> } <nl> } <nl> + <nl> + void AllowOverwriting ( ) { set_by_ = SetBy : : kDefault ; } <nl> } ; <nl> <nl> Flag flags [ ] = { <nl> Flag flags [ ] = { <nl> <nl> const size_t num_flags = sizeof ( flags ) / sizeof ( * flags ) ; <nl> <nl> + inline char NormalizeChar ( char ch ) { return ch = = ' _ ' ? ' - ' : ch ; } <nl> + <nl> + bool EqualNames ( const char * a , const char * b ) { <nl> + for ( int i = 0 ; NormalizeChar ( a [ i ] ) = = NormalizeChar ( b [ i ] ) ; i + + ) { <nl> + if ( a [ i ] = = ' \ 0 ' ) { <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + Flag * FindFlagByName ( const char * name ) { <nl> + for ( size_t i = 0 ; i < num_flags ; + + i ) { <nl> + if ( EqualNames ( name , flags [ i ] . name ( ) ) ) return & flags [ i ] ; <nl> + } <nl> + return nullptr ; <nl> + } <nl> + <nl> + Flag * FindFlagByPointer ( const void * ptr ) { <nl> + for ( size_t i = 0 ; i < num_flags ; + + i ) { <nl> + if ( flags [ i ] . PointsTo ( ptr ) ) return & flags [ i ] ; <nl> + } <nl> + return nullptr ; <nl> + } <nl> + <nl> } / / namespace <nl> <nl> static const char * Type2String ( Flag : : FlagType type ) { <nl> static const char * Type2String ( Flag : : FlagType type ) { <nl> std : : ostream & operator < < ( std : : ostream & os , const Flag & flag ) { / / NOLINT <nl> switch ( flag . type ( ) ) { <nl> case Flag : : TYPE_BOOL : <nl> - os < < ( * flag . bool_variable ( ) ? " true " : " false " ) ; <nl> + os < < ( flag . bool_variable ( ) ? " true " : " false " ) ; <nl> break ; <nl> case Flag : : TYPE_MAYBE_BOOL : <nl> - os < < ( flag . maybe_bool_variable ( ) - > has_value <nl> - ? ( flag . maybe_bool_variable ( ) - > value ? " true " : " false " ) <nl> + os < < ( flag . maybe_bool_variable ( ) . has_value <nl> + ? ( flag . maybe_bool_variable ( ) . value ? " true " : " false " ) <nl> : " unset " ) ; <nl> break ; <nl> case Flag : : TYPE_INT : <nl> - os < < * flag . int_variable ( ) ; <nl> + os < < flag . int_variable ( ) ; <nl> break ; <nl> case Flag : : TYPE_UINT : <nl> - os < < * flag . uint_variable ( ) ; <nl> + os < < flag . uint_variable ( ) ; <nl> break ; <nl> case Flag : : TYPE_UINT64 : <nl> - os < < * flag . uint64_variable ( ) ; <nl> + os < < flag . uint64_variable ( ) ; <nl> break ; <nl> case Flag : : TYPE_FLOAT : <nl> - os < < * flag . float_variable ( ) ; <nl> + os < < flag . float_variable ( ) ; <nl> break ; <nl> case Flag : : TYPE_SIZE_T : <nl> - os < < * flag . size_t_variable ( ) ; <nl> + os < < flag . size_t_variable ( ) ; <nl> break ; <nl> case Flag : : TYPE_STRING : { <nl> const char * str = flag . string_value ( ) ; <nl> std : : vector < const char * > * FlagList : : argv ( ) { <nl> Flag * f = & flags [ i ] ; <nl> if ( ! f - > IsDefault ( ) ) { <nl> { <nl> - bool disabled = f - > type ( ) = = Flag : : TYPE_BOOL & & ! * f - > bool_variable ( ) ; <nl> + bool disabled = f - > type ( ) = = Flag : : TYPE_BOOL & & ! f - > bool_variable ( ) ; <nl> std : : ostringstream os ; <nl> os < < ( disabled ? " - - no " : " - - " ) < < f - > name ( ) ; <nl> args - > push_back ( StrDup ( os . str ( ) . c_str ( ) ) ) ; <nl> std : : vector < const char * > * FlagList : : argv ( ) { <nl> return args ; <nl> } <nl> <nl> - inline char NormalizeChar ( char ch ) { return ch = = ' _ ' ? ' - ' : ch ; } <nl> - <nl> / / Helper function to parse flags : Takes an argument arg and splits it into <nl> / / a flag name and flag value ( or nullptr if they are missing ) . negated is set <nl> / / if the arg started with " - no " or " - - no " . The buffer may be used to NUL - <nl> static void SplitArgument ( const char * arg , char * buffer , int buffer_size , <nl> } <nl> } <nl> <nl> - static bool EqualNames ( const char * a , const char * b ) { <nl> - for ( int i = 0 ; NormalizeChar ( a [ i ] ) = = NormalizeChar ( b [ i ] ) ; i + + ) { <nl> - if ( a [ i ] = = ' \ 0 ' ) { <nl> - return true ; <nl> - } <nl> - } <nl> - return false ; <nl> - } <nl> - <nl> - static Flag * FindFlag ( const char * name ) { <nl> - for ( size_t i = 0 ; i < num_flags ; + + i ) { <nl> - if ( EqualNames ( name , flags [ i ] . name ( ) ) ) return & flags [ i ] ; <nl> - } <nl> - return nullptr ; <nl> - } <nl> - <nl> template < typename T > <nl> bool TryParseUnsigned ( Flag * flag , const char * arg , const char * value , <nl> char * * endp , T * out_val ) { <nl> int FlagList : : SetFlagsFromCommandLine ( int * argc , char * * argv , bool remove_flags , <nl> <nl> if ( name ! = nullptr ) { <nl> / / lookup the flag <nl> - Flag * flag = FindFlag ( name ) ; <nl> + Flag * flag = FindFlagByName ( name ) ; <nl> if ( flag = = nullptr ) { <nl> if ( remove_flags ) { <nl> / / We don ' t recognize this flag but since we ' re removing <nl> int FlagList : : SetFlagsFromCommandLine ( int * argc , char * * argv , bool remove_flags , <nl> char * endp = const_cast < char * > ( " " ) ; / / * endp is only read <nl> switch ( flag - > type ( ) ) { <nl> case Flag : : TYPE_BOOL : <nl> - * flag - > bool_variable ( ) = ! negated ; <nl> + flag - > set_bool_variable ( ! negated , Flag : : SetBy : : kCommandLine ) ; <nl> break ; <nl> case Flag : : TYPE_MAYBE_BOOL : <nl> - * flag - > maybe_bool_variable ( ) = MaybeBoolFlag : : Create ( true , ! negated ) ; <nl> + flag - > set_maybe_bool_variable ( MaybeBoolFlag : : Create ( true , ! negated ) , <nl> + Flag : : SetBy : : kCommandLine ) ; <nl> break ; <nl> case Flag : : TYPE_INT : <nl> - * flag - > int_variable ( ) = static_cast < int > ( strtol ( value , & endp , 10 ) ) ; <nl> + flag - > set_int_variable ( static_cast < int > ( strtol ( value , & endp , 10 ) ) , <nl> + Flag : : SetBy : : kCommandLine ) ; <nl> break ; <nl> - case Flag : : TYPE_UINT : <nl> - if ( ! TryParseUnsigned ( flag , arg , value , & endp , <nl> - flag - > uint_variable ( ) ) ) { <nl> + case Flag : : TYPE_UINT : { <nl> + unsigned int parsed_value ; <nl> + if ( TryParseUnsigned ( flag , arg , value , & endp , & parsed_value ) ) { <nl> + flag - > set_uint_variable ( parsed_value , Flag : : SetBy : : kCommandLine ) ; <nl> + } else { <nl> return_code = j ; <nl> } <nl> break ; <nl> - case Flag : : TYPE_UINT64 : <nl> - if ( ! TryParseUnsigned ( flag , arg , value , & endp , <nl> - flag - > uint64_variable ( ) ) ) { <nl> + } <nl> + case Flag : : TYPE_UINT64 : { <nl> + uint64_t parsed_value ; <nl> + if ( TryParseUnsigned ( flag , arg , value , & endp , & parsed_value ) ) { <nl> + flag - > set_uint64_variable ( parsed_value , Flag : : SetBy : : kCommandLine ) ; <nl> + } else { <nl> return_code = j ; <nl> } <nl> break ; <nl> + } <nl> case Flag : : TYPE_FLOAT : <nl> - * flag - > float_variable ( ) = strtod ( value , & endp ) ; <nl> + flag - > set_float_variable ( strtod ( value , & endp ) , <nl> + Flag : : SetBy : : kCommandLine ) ; <nl> break ; <nl> - case Flag : : TYPE_SIZE_T : <nl> - if ( ! TryParseUnsigned ( flag , arg , value , & endp , <nl> - flag - > size_t_variable ( ) ) ) { <nl> + case Flag : : TYPE_SIZE_T : { <nl> + size_t parsed_value ; <nl> + if ( TryParseUnsigned ( flag , arg , value , & endp , & parsed_value ) ) { <nl> + flag - > set_size_t_variable ( parsed_value , Flag : : SetBy : : kCommandLine ) ; <nl> + } else { <nl> return_code = j ; <nl> } <nl> break ; <nl> + } <nl> case Flag : : TYPE_STRING : <nl> - flag - > set_string_value ( value ? StrDup ( value ) : nullptr , true ) ; <nl> + flag - > set_string_value ( value ? StrDup ( value ) : nullptr , true , <nl> + Flag : : SetBy : : kCommandLine ) ; <nl> break ; <nl> } <nl> <nl> void FlagList : : PrintHelp ( ) { <nl> } <nl> } <nl> <nl> + namespace { <nl> + <nl> static uint32_t flag_hash = 0 ; <nl> <nl> void ComputeFlagListHash ( ) { <nl> void ComputeFlagListHash ( ) { <nl> } <nl> for ( size_t i = 0 ; i < num_flags ; + + i ) { <nl> Flag * current = & flags [ i ] ; <nl> - if ( current - > type ( ) = = Flag : : TYPE_BOOL & & <nl> - current - > bool_variable ( ) = = & FLAG_profile_deserialization ) { <nl> + if ( current - > PointsTo ( & FLAG_profile_deserialization ) ) { <nl> / / We want to be able to flip - - profile - deserialization without <nl> / / causing the code cache to get invalidated by this hash . <nl> continue ; <nl> void ComputeFlagListHash ( ) { <nl> base : : hash_range ( args . c_str ( ) , args . c_str ( ) + args . length ( ) ) ) ; <nl> } <nl> <nl> + template < class A , class B > <nl> + bool TriggerImplication ( bool premise , const char * premise_name , <nl> + A * conclusion_pointer , B value , bool weak_implication ) { <nl> + if ( ! premise ) return false ; <nl> + bool change_flag = * conclusion_pointer ! = implicit_cast < A > ( value ) ; <nl> + Flag * conclusion_flag = FindFlagByPointer ( conclusion_pointer ) ; <nl> + change_flag = conclusion_flag - > CheckFlagChange ( <nl> + weak_implication ? Flag : : SetBy : : kWeakImplication <nl> + : Flag : : SetBy : : kImplication , <nl> + change_flag , premise_name ) ; <nl> + if ( change_flag ) * conclusion_pointer = value ; <nl> + return change_flag ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> / / static <nl> void FlagList : : EnforceFlagImplications ( ) { <nl> + bool changed ; <nl> + do { <nl> + changed = false ; <nl> # define FLAG_MODE_DEFINE_IMPLICATIONS <nl> # include " src / flags / flag - definitions . h " / / NOLINT ( build / include ) <nl> # undef FLAG_MODE_DEFINE_IMPLICATIONS <nl> + } while ( changed ) ; <nl> ComputeFlagListHash ( ) ; <nl> } <nl> <nl> mmm a / test / inspector / inspector - test . cc <nl> ppp b / test / inspector / inspector - test . cc <nl> int main ( int argc , char * argv [ ] ) { <nl> v8 : : V8 : : InitializeICUDefaultLocation ( argv [ 0 ] ) ; <nl> std : : unique_ptr < v8 : : Platform > platform ( v8 : : platform : : NewDefaultPlatform ( ) ) ; <nl> v8 : : V8 : : InitializePlatform ( platform . get ( ) ) ; <nl> + v8 : : internal : : FLAG_abort_on_contradictory_flags = true ; <nl> v8 : : V8 : : SetFlagsFromCommandLine ( & argc , argv , true ) ; <nl> v8 : : V8 : : InitializeExternalStartupData ( argv [ 0 ] ) ; <nl> v8 : : V8 : : Initialize ( ) ; <nl> mmm a / test / inspector / inspector . status <nl> ppp b / test / inspector / inspector . status <nl> <nl> ' * ' : [ SKIP ] , # only relevant for mjsunit tests . <nl> } ] , <nl> <nl> + # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> + [ ' variant = = stress ' , { <nl> + ' * ' : [ SKIP ] , # only relevant for mjsunit tests . <nl> + } ] , <nl> + <nl> # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> [ ' tsan = = True ' , { <nl> # TSan handles SIGPROF incorrectly ( https : / / crbug . com / v8 / 9869 ) . <nl> mmm a / test / message / wasm - trace - memory - liftoff . js <nl> ppp b / test / message / wasm - trace - memory - liftoff . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - no - stress - opt - - trace - wasm - memory - - liftoff - - no - future <nl> + / / Flags : - - no - stress - opt - - trace - wasm - memory - - liftoff <nl> / / Flags : - - no - wasm - tier - up - - experimental - wasm - simd <nl> / / Flags : - - enable - sse3 - - enable - sse4 - 1 <nl> <nl> mmm a / test / message / wasm - trace - memory . js <nl> ppp b / test / message / wasm - trace - memory . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - no - stress - opt - - trace - wasm - memory - - no - liftoff - - no - future <nl> + / / Flags : - - no - stress - opt - - trace - wasm - memory - - no - liftoff <nl> / / Flags : - - experimental - wasm - simd <nl> <nl> load ( " test / mjsunit / wasm / wasm - module - builder . js " ) ; <nl> mmm a / test / mjsunit / ensure - growing - store - learns . js <nl> ppp b / test / mjsunit / ensure - growing - store - learns . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - allow - natives - syntax - - noverify - heap - - noenable - slow - asserts <nl> - / / Flags : - - opt - - no - always - opt <nl> + / / The flags are processed left to right . - - no - abort - on - contradictory - flags <nl> + / / disables the checking for conflicts , then we process - - noverify - heap and <nl> + / / - - noenable - slow - asserts , which the test runner already set to true before . <nl> + / / This causes the flags to be overwritten while silencing the error . Then we <nl> + / / re - enable - - abort - on - contradictory - flags to make sure that the processing of <nl> + / / other flags and flag implications , which happens later , still produces <nl> + / / errors . <nl> + / / Flags : - - no - abort - on - contradictory - flags - - noverify - heap - - noenable - slow - asserts - - abort - on - contradictory - flags <nl> + / / Flags : - - allow - natives - syntax - - opt - - no - always - opt <nl> <nl> / / - - noverify - heap and - - noenable - slow - asserts are set because the test is too <nl> / / slow with it on . <nl> mmm a / test / mjsunit / mjsunit . status <nl> ppp b / test / mjsunit / mjsunit . status <nl> <nl> ' regexp - tier - up - multiple ' : [ SKIP ] , <nl> ' regress / regress - 996234 ' : [ SKIP ] , <nl> <nl> - # Tests that depend on optimization ( beyond doing assertOptimized ) . <nl> - ' compiler / is - being - interpreted - * ' : [ SKIP ] , <nl> - ' compiler / serializer - accessors ' : [ SKIP ] , <nl> - ' compiler / serializer - apply ' : [ SKIP ] , <nl> - ' compiler / serializer - call ' : [ SKIP ] , <nl> - ' compiler / serializer - dead - after - jump ' : [ SKIP ] , <nl> - ' compiler / serializer - dead - after - return ' : [ SKIP ] , <nl> - ' compiler / serializer - transition - propagation ' : [ SKIP ] , <nl> - ' regress / regress - 1049982 - 1 ' : [ SKIP ] , <nl> - ' regress / regress - 1049982 - 2 ' : [ SKIP ] , <nl> - <nl> # These tests check that we can trace the compiler . <nl> ' tools / compiler - trace - flags ' : [ SKIP ] , <nl> ' tools / compiler - trace - flags - wasm ' : [ SKIP ] , <nl> <nl> ' regress / regress - 1049982 - 2 ' : [ SKIP ] , <nl> ' es6 / iterator - eager - deopt ' : [ SKIP ] , <nl> <nl> - # interrupt_budget overrides don ' t work with TurboProp . <nl> - ' interrupt - budget - override ' : [ SKIP ] , <nl> - ' never - optimize ' : [ SKIP ] , <nl> - <nl> # In turboprop we reuse the optimized code on soft deopt . The following tests <nl> # test for a soft deopt and they won ' t work in TurboProp . <nl> ' deopt - recursive - soft - once ' : [ SKIP ] , <nl> mmm a / test / mjsunit / random - bit - correlations . js <nl> ppp b / test / mjsunit / random - bit - correlations . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - random - seed = 20 - - nostress - opt - - noalways - opt - - predictable <nl> + / / Overwrite the random seed provided by the test runner to make this test less <nl> + / / flaky . <nl> + / / The flags are processed left to right . - - no - abort - on - contradictory - flags <nl> + / / disables the checking for conflicts , then we process - - random - seed = 20 to <nl> + / / overwrite the value the test runner already set before . Then we re - enable <nl> + / / - - abort - on - contradictory - flags to make sure that the processing of other <nl> + / / flags and flag implications , which happens later , still produces errors . <nl> + / / Flags : - - no - abort - on - contradictory - flags - - random - seed = 20 - - abort - on - contradictory - flags <nl> + / / Flags : - - nostress - opt - - noalways - opt - - predictable <nl> <nl> ( function ( ) { <nl> var kHistory = 2 ; <nl> mmm a / test / mjsunit / regress / regress - 356053 . js <nl> ppp b / test / mjsunit / regress / regress - 356053 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - noconcurrent - recompilation - - expose - gc - - allow - natives - syntax <nl> + / / Flags : - - expose - gc - - allow - natives - syntax <nl> / / Flags : - - concurrent - recompilation - - block - concurrent - recompilation <nl> <nl> gc ( ) ; <nl> mmm a / test / mjsunit / regress / regress - 411210 . js <nl> ppp b / test / mjsunit / regress / regress - 411210 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - allow - natives - syntax - - gc - interval = 439 - - random - seed = - 423594851 <nl> + / / Flags : - - allow - natives - syntax - - gc - interval = 439 <nl> <nl> var __v_3 ; <nl> function __f_2 ( ) { <nl> mmm a / test / mjsunit / regress / regress - 487981 . js <nl> ppp b / test / mjsunit / regress / regress - 487981 . js <nl> <nl> <nl> / / Flags : - - allow - natives - syntax - - stress - compaction <nl> <nl> - / / To reliably reproduce the crash use - - verify - heap - - random - seed = - 133185440 <nl> + / / To reliably reproduce the crash use - - verify - heap <nl> <nl> function __f_2 ( o ) { <nl> return o . field . b . x ; <nl> mmm a / test / mjsunit / regress / regress - 8265 . js <nl> ppp b / test / mjsunit / regress / regress - 8265 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - random - seed = 1 <nl> - <nl> for ( let i = 0 ; i < 54 ; + + i ) Math . random ( ) ; <nl> let sum = 0 ; <nl> for ( let i = 0 ; i < 10 ; + + i ) <nl> mmm a / test / mjsunit / regress / regress - 863810 . js <nl> ppp b / test / mjsunit / regress / regress - 863810 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - no - liftoff - - no - future - - debug - code <nl> + / / Flags : - - no - liftoff - - debug - code <nl> <nl> load ( ' test / mjsunit / wasm / wasm - module - builder . js ' ) ; <nl> <nl> mmm a / test / mjsunit / regress / regress - 883059 . js <nl> ppp b / test / mjsunit / regress / regress - 883059 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - random - seed = - 1595876594 - - disable - in - process - stack - traces - - no - lazy <nl> + / / Flags : - - disable - in - process - stack - traces - - no - lazy <nl> <nl> var __v_47 = ( { [ __v_46 ] : __f_52 } ) = > { var __v_46 = ' b ' ; return __f_52 ; } ; <nl> mmm a / test / mjsunit / regress / regress - crbug - 664506 . js <nl> ppp b / test / mjsunit / regress / regress - crbug - 664506 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - expose - gc - - predictable - - random - seed = - 1109634722 <nl> + / / Flags : - - expose - gc - - predictable <nl> <nl> gc ( ) ; <nl> gc ( ) ; <nl> mmm a / test / mjsunit / regress / wasm / regress - 02256 . js <nl> ppp b / test / mjsunit / regress / wasm / regress - 02256 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> / / <nl> - / / Flags : - - random - seed = 891196975 - - expose - gc - - allow - natives - syntax <nl> + / / Flags : - - expose - gc - - allow - natives - syntax <nl> / / Flags : - - gc - interval = 207 - - stress - compaction - - validate - asm <nl> / / Flags : - - opt - - no - always - opt <nl> / / <nl> mmm a / test / mjsunit / regress / wasm / regress - 02256b . js <nl> ppp b / test / mjsunit / regress / wasm / regress - 02256b . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> / / <nl> - / / Flags : - - random - seed = 891196975 - - expose - gc - - allow - natives - syntax <nl> - / / Flags : - - gc - interval = 207 - - stress - compaction - - validate - asm <nl> - / / Flags : - - opt - - no - always - opt <nl> + / / Flags : - - expose - gc - - allow - natives - syntax - - gc - interval = 207 <nl> + / / Flags : - - stress - compaction - - validate - asm - - opt - - no - always - opt <nl> / / <nl> / / / v8 / test / mjsunit / wasm / grow - memory . js <nl> / / / v8 / test / mjsunit / regress / regress - 540 . js <nl> mmm a / test / mjsunit / regress / wasm / regress - 02862 . js <nl> ppp b / test / mjsunit / regress / wasm / regress - 02862 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - random - seed = 1557792826 - - expose - gc - - invoke - weak - callbacks - - omit - quit - - gc - interval = 469 - - validate - asm <nl> + / / Flags : - - expose - gc - - invoke - weak - callbacks - - omit - quit - - gc - interval = 469 - - validate - asm <nl> <nl> function nop ( ) { } <nl> var __v_42 = { } ; <nl> mmm a / test / mjsunit / regress / wasm / regress - 666741 . js <nl> ppp b / test / mjsunit / regress / wasm / regress - 666741 . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> / / <nl> - / / Flags : - - random - seed = - 1101427159 - - enable - slow - asserts - - expose - wasm <nl> + / / Flags : - - enable - slow - asserts - - expose - wasm <nl> <nl> ( function __f_7 ( ) { <nl> assertThrows ( ( ) = > new WebAssembly . Memory ( { initial : 59199 } ) , RangeError ) ; <nl> mmm a / test / mjsunit / string - case . js <nl> ppp b / test / mjsunit / string - case . js <nl> <nl> / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> <nl> - / / Flags : - - random - seed = 17 - - allow - natives - syntax <nl> - / / Flags : - - expose - externalize - string <nl> + / / Flags : - - allow - natives - syntax - - expose - externalize - string <nl> <nl> assertEquals ( " ΚΟΣΜΟΣ ΚΟΣΜΟΣ " . toLowerCase ( ) , " κοσμος κοσμος " ) ; <nl> <nl> mmm a / test / mjsunit / wasm / tier - up - testing - flag . js <nl> ppp b / test / mjsunit / wasm / tier - up - testing - flag . js <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - / / Flags : - - allow - natives - syntax - - liftoff - - no - future - - no - wasm - tier - up <nl> + / / Flags : - - allow - natives - syntax - - liftoff - - no - wasm - tier - up <nl> / / Compile functions 0 and 2 with Turbofan , the rest with Liftoff : <nl> / / Flags : - - wasm - tier - mask - for - testing = 5 <nl> <nl> mmm a / tools / testrunner / local / statusfile . py <nl> ppp b / tools / testrunner / local / statusfile . py <nl> def __init__ ( self , path , variables ) : <nl> _rules : { variant : { test name : [ rule ] } } <nl> _prefix_rules : { variant : { test name prefix : [ rule ] } } <nl> " " " <nl> + self . variables = variables <nl> with open ( path ) as f : <nl> self . _rules , self . _prefix_rules = ReadStatusFile ( f . read ( ) , variables ) <nl> <nl> mmm a / tools / testrunner / local / variants . py <nl> ppp b / tools / testrunner / local / variants . py <nl> <nl> # independent of JS optimizations , so we can combine those configs . <nl> " nooptimization " : [ [ " - - no - opt " , " - - liftoff " , " - - no - wasm - tier - up " ] ] , <nl> " slow_path " : [ [ " - - force - slow - path " ] ] , <nl> - " stress " : [ [ " - - stress - opt " , " - - always - opt " , " - - no - liftoff " , <nl> - " - - stress - lazy - source - positions " ] ] , <nl> + " stress " : [ [ " - - stress - opt " , " - - no - liftoff " , " - - stress - lazy - source - positions " ] ] , <nl> " stress_js_bg_compile_wasm_code_gc " : [ [ " - - stress - background - compile " , <nl> " - - stress - wasm - code - gc " ] ] , <nl> " stress_incremental_marking " : [ [ " - - stress - incremental - marking " ] ] , <nl> <nl> " top_level_await " : [ [ " - - harmony - top - level - await " ] ] , <nl> } <nl> <nl> + # Flags that lead to a contradiction with the flags provided by the respective <nl> + # variant . This depends on the flags specified in ALL_VARIANT_FLAGS and on the <nl> + # implications defined in flag - definitions . h . <nl> + INCOMPATIBLE_FLAGS_PER_VARIANT = { <nl> + " assert_types " : [ " - - no - assert - types " ] , <nl> + " jitless " : [ " - - opt " , " - - liftoff " , " - - track - field - types " , " - - validate - asm " ] , <nl> + " no_wasm_traps " : [ " - - wasm - trap - handler " ] , <nl> + " nooptimization " : [ " - - opt " , " - - no - liftoff " , " - - predictable " , " - - wasm - tier - up " ] , <nl> + " slow_path " : [ " - - no - force - slow - path " ] , <nl> + " stress_incremental_marking " : [ " - - no - stress - incremental - marking " ] , <nl> + " stress_js_bg_compile_wasm_code_gc " : [ " - - no - stress - background - compile " ] , <nl> + " stress " : [ " - - no - stress - opt " , " - - always - opt " , " - - no - always - opt " , " - - liftoff " , " - - max - inlined - bytecode - size = * " , <nl> + " - - max - inlined - bytecode - size - cumulative = * " , " - - stress - inline " ] , <nl> + " turboprop " : [ " - - turbo - inlining " , " - - interrupt - budget = * " , " - - no - turboprop " ] , <nl> + } <nl> + <nl> + # Flags that lead to a contradiction under certain build variables . <nl> + # This corresponds to the build variables usable in status files as generated <nl> + # in _get_statusfile_variables in base_runner . py . <nl> + # The conflicts might be directly contradictory flags or be caused by the <nl> + # implications defined in flag - definitions . h . <nl> + INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = { <nl> + " lite_mode " : [ " - - no - lazy - feedback - allocation " , " - - max - semi - space - size = * " ] <nl> + + INCOMPATIBLE_FLAGS_PER_VARIANT [ " jitless " ] , <nl> + " predictable " : [ " - - liftoff " , " - - parallel - compile - tasks " , <nl> + " - - concurrent - recompilation " , <nl> + " - - wasm - num - compilation - tasks = * " ] , <nl> + } <nl> + <nl> + # Flags that lead to a contradiction when a certain extra - flag is present . <nl> + # Such extra - flags are defined for example in infra / testing / builders . pyl or in <nl> + # standard_runner . py . <nl> + # The conflicts might be directly contradictory flags or be caused by the <nl> + # implications defined in flag - definitions . h . <nl> + INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = { <nl> + " - - concurrent - recompilation " : [ " - - no - concurrent - recompilation " , " - - predictable " ] , <nl> + " - - enable - armv8 " : [ " - - no - enable - armv8 " ] , <nl> + " - - gc - interval = * " : [ " - - gc - interval = * " ] , <nl> + " - - no - enable - sse3 " : [ " - - enable - sse3 " ] , <nl> + " - - no - enable - sse4 - 1 " : [ " - - enable - sse4 - 1 " ] , <nl> + " - - optimize - for - size " : [ " - - max - semi - space - size = * " ] , <nl> + " - - stress - flush - bytecode " : [ " - - no - stress - flush - bytecode " ] , <nl> + " - - stress - incremental - marking " : INCOMPATIBLE_FLAGS_PER_VARIANT [ " stress_incremental_marking " ] , <nl> + } <nl> + <nl> SLOW_VARIANTS = set ( [ <nl> ' stress ' , <nl> ' stress_snapshot ' , <nl> mmm a / tools / testrunner / objects / testcase . py <nl> ppp b / tools / testrunner / objects / testcase . py <nl> <nl> from . . local import command <nl> from . . local import statusfile <nl> from . . local import utils <nl> + from . . local . variants import INCOMPATIBLE_FLAGS_PER_VARIANT <nl> + from . . local . variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE <nl> + from . . local . variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG <nl> + <nl> <nl> FLAGS_PATTERN = re . compile ( r " / / \ s + Flags : ( . * ) " ) <nl> <nl> def __init__ ( self , suite , path , name , test_config ) : <nl> <nl> # Outcomes <nl> self . _statusfile_outcomes = None <nl> - self . expected_outcomes = None <nl> + self . _expected_outcomes = None <nl> + self . _checked_flag_contradictions = False <nl> self . _statusfile_flags = None <nl> <nl> self . _prepare_outcomes ( ) <nl> def not_flag ( outcome ) : <nl> outcomes = self . suite . statusfile . get_outcomes ( self . name , self . variant ) <nl> self . _statusfile_outcomes = filter ( not_flag , outcomes ) <nl> self . _statusfile_flags = filter ( is_flag , outcomes ) <nl> - self . expected_outcomes = ( <nl> + self . _expected_outcomes = ( <nl> self . _parse_status_file_outcomes ( self . _statusfile_outcomes ) ) <nl> <nl> def _parse_status_file_outcomes ( self , outcomes ) : <nl> def _parse_status_file_outcomes ( self , outcomes ) : <nl> return outproc . OUTCOMES_FAIL <nl> return expected_outcomes or outproc . OUTCOMES_PASS <nl> <nl> + @ property <nl> + def expected_outcomes ( self ) : <nl> + def normalize_flag ( flag ) : <nl> + return flag . replace ( " _ " , " - " ) . replace ( " - - no - " , " - - no " ) <nl> + <nl> + def has_flag ( conflicting_flag , flags ) : <nl> + conflicting_flag = normalize_flag ( conflicting_flag ) <nl> + if conflicting_flag in flags : <nl> + return True <nl> + if conflicting_flag . endswith ( " * " ) : <nl> + return any ( flag . startswith ( conflicting_flag [ : - 1 ] ) for flag in flags ) <nl> + return False <nl> + <nl> + if not self . _checked_flag_contradictions : <nl> + self . _checked_flag_contradictions = True <nl> + <nl> + file_specific_flags = ( self . _get_source_flags ( ) + self . _get_suite_flags ( ) <nl> + + self . _get_statusfile_flags ( ) ) <nl> + file_specific_flags = [ normalize_flag ( flag ) for flag in file_specific_flags ] <nl> + extra_flags = [ normalize_flag ( flag ) for flag in self . _get_extra_flags ( ) ] <nl> + <nl> + incompatible_flags = [ ] <nl> + <nl> + if self . variant in INCOMPATIBLE_FLAGS_PER_VARIANT : <nl> + incompatible_flags + = INCOMPATIBLE_FLAGS_PER_VARIANT [ self . variant ] <nl> + <nl> + for variable , flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE . items ( ) : <nl> + if self . suite . statusfile . variables [ variable ] : <nl> + incompatible_flags + = flags <nl> + <nl> + for extra_flag , flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG . items ( ) : <nl> + if has_flag ( extra_flag , extra_flags ) : <nl> + incompatible_flags + = flags <nl> + <nl> + for incompatible_flag in incompatible_flags : <nl> + if has_flag ( incompatible_flag , file_specific_flags ) : <nl> + self . _expected_outcomes = outproc . OUTCOMES_FAIL <nl> + return self . _expected_outcomes <nl> + <nl> @ property <nl> def do_skip ( self ) : <nl> return ( statusfile . SKIP in self . _statusfile_outcomes and <nl> | Reland " [ flags ] warn about contradictory flags " | v8/v8 | d8f8a7e2105305921ca31450ecc38e009bc075c3 | 2020-07-22T12:22:03Z |
mmm a / src / core / ext / filters / client_channel / client_channel . cc <nl> ppp b / src / core / ext / filters / client_channel / client_channel . cc <nl> typedef struct client_channel_channel_data { <nl> grpc_core : : UniquePtr < char > info_service_config_json ; <nl> / * backpointer to grpc_channel ' s channelz node * / <nl> grpc_core : : channelz : : ClientChannelNode * channelz_channel ; <nl> + / * caches if the last resolution event contained addresses * / <nl> + bool previous_resolution_contained_addresses ; <nl> } channel_data ; <nl> <nl> typedef struct { <nl> static void request_reresolution_locked ( void * arg , grpc_error * error ) { <nl> chand - > lb_policy - > SetReresolutionClosureLocked ( & args - > closure ) ; <nl> } <nl> <nl> + using TraceStringVector = grpc_core : : InlinedVector < char * , 3 > ; <nl> + <nl> / / Creates a new LB policy , replacing any previous one . <nl> / / If the new policy is created successfully , sets * connectivity_state and <nl> / / * connectivity_error to its initial connectivity state ; otherwise , <nl> static void request_reresolution_locked ( void * arg , grpc_error * error ) { <nl> static void create_new_lb_policy_locked ( <nl> channel_data * chand , char * lb_policy_name , <nl> grpc_connectivity_state * connectivity_state , <nl> - grpc_error * * connectivity_error ) { <nl> + grpc_error * * connectivity_error , TraceStringVector * trace_strings ) { <nl> grpc_core : : LoadBalancingPolicy : : Args lb_policy_args ; <nl> lb_policy_args . combiner = chand - > combiner ; <nl> lb_policy_args . client_channel_factory = chand - > client_channel_factory ; <nl> static void create_new_lb_policy_locked ( <nl> lb_policy_name , lb_policy_args ) ; <nl> if ( GPR_UNLIKELY ( new_lb_policy = = nullptr ) ) { <nl> gpr_log ( GPR_ERROR , " could not create LB policy \ " % s \ " " , lb_policy_name ) ; <nl> + if ( chand - > channelz_channel ! = nullptr ) { <nl> + char * str ; <nl> + gpr_asprintf ( & str , " Could not create LB policy \ ' % s \ ' " , lb_policy_name ) ; <nl> + trace_strings - > push_back ( str ) ; <nl> + } <nl> } else { <nl> if ( grpc_client_channel_trace . enabled ( ) ) { <nl> gpr_log ( GPR_INFO , " chand = % p : created new LB policy \ " % s \ " ( % p ) " , chand , <nl> lb_policy_name , new_lb_policy . get ( ) ) ; <nl> } <nl> + if ( chand - > channelz_channel ! = nullptr ) { <nl> + char * str ; <nl> + gpr_asprintf ( & str , " Created new LB policy \ ' % s \ ' " , lb_policy_name ) ; <nl> + trace_strings - > push_back ( str ) ; <nl> + } <nl> / / Swap out the LB policy and update the fds in <nl> / / chand - > interested_parties . <nl> if ( chand - > lb_policy ! = nullptr ) { <nl> get_service_config_from_resolver_result_locked ( channel_data * chand ) { <nl> return grpc_core : : UniquePtr < char > ( gpr_strdup ( service_config_json ) ) ; <nl> } <nl> <nl> + static void maybe_add_trace_message_for_address_changes_locked ( <nl> + channel_data * chand , TraceStringVector * trace_strings ) { <nl> + int resolution_contains_addresses = false ; <nl> + const grpc_arg * channel_arg = <nl> + grpc_channel_args_find ( chand - > resolver_result , GRPC_ARG_LB_ADDRESSES ) ; <nl> + if ( channel_arg ! = nullptr & & channel_arg - > type = = GRPC_ARG_POINTER ) { <nl> + grpc_lb_addresses * addresses = <nl> + static_cast < grpc_lb_addresses * > ( channel_arg - > value . pointer . p ) ; <nl> + if ( addresses - > num_addresses > 0 ) { <nl> + resolution_contains_addresses = true ; <nl> + } <nl> + } <nl> + if ( ! resolution_contains_addresses & & <nl> + chand - > previous_resolution_contained_addresses ) { <nl> + trace_strings - > push_back ( gpr_strdup ( " Address list became empty " ) ) ; <nl> + } else if ( resolution_contains_addresses & & <nl> + ! chand - > previous_resolution_contained_addresses ) { <nl> + trace_strings - > push_back ( gpr_strdup ( " Address list became non - empty " ) ) ; <nl> + } <nl> + chand - > previous_resolution_contained_addresses = <nl> + resolution_contains_addresses ; <nl> + } <nl> + <nl> + static void concatenate_and_add_channel_trace_locked ( <nl> + channel_data * chand , TraceStringVector * trace_strings ) { <nl> + if ( ! trace_strings - > empty ( ) ) { <nl> + gpr_strvec v ; <nl> + gpr_strvec_init ( & v ) ; <nl> + gpr_strvec_add ( & v , gpr_strdup ( " Resolution event : " ) ) ; <nl> + bool is_first = 1 ; <nl> + for ( size_t i = 0 ; i < trace_strings - > size ( ) ; + + i ) { <nl> + if ( ! is_first ) gpr_strvec_add ( & v , gpr_strdup ( " , " ) ) ; <nl> + is_first = false ; <nl> + gpr_strvec_add ( & v , ( * trace_strings ) [ i ] ) ; <nl> + } <nl> + char * flat ; <nl> + size_t flat_len = 0 ; <nl> + flat = gpr_strvec_flatten ( & v , & flat_len ) ; <nl> + chand - > channelz_channel - > AddTraceEvent ( <nl> + grpc_core : : channelz : : ChannelTrace : : Severity : : Info , <nl> + grpc_slice_new ( flat , flat_len , gpr_free ) ) ; <nl> + gpr_strvec_destroy ( & v ) ; <nl> + } <nl> + } <nl> + <nl> / / Callback invoked when a resolver result is available . <nl> static void on_resolver_result_changed_locked ( void * arg , grpc_error * error ) { <nl> channel_data * chand = static_cast < channel_data * > ( arg ) ; <nl> static void on_resolver_result_changed_locked ( void * arg , grpc_error * error ) { <nl> } <nl> / / Data used to set the channel ' s connectivity state . <nl> bool set_connectivity_state = true ; <nl> + / / We only want to trace the address resolution in the follow cases : <nl> + / / ( a ) Address resolution resulted in service config change . <nl> + / / ( b ) Address resolution that causes number of backends to go from <nl> + / / zero to non - zero . <nl> + / / ( c ) Address resolution that causes number of backends to go from <nl> + / / non - zero to zero . <nl> + / / ( d ) Address resolution that causes a new LB policy to be created . <nl> + / / <nl> + / / we track a list of strings to eventually be concatenated and traced . <nl> + TraceStringVector trace_strings ; <nl> grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE ; <nl> grpc_error * connectivity_error = <nl> GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " No load balancing policy " ) ; <nl> static void on_resolver_result_changed_locked ( void * arg , grpc_error * error ) { <nl> } else { <nl> / / Instantiate new LB policy . <nl> create_new_lb_policy_locked ( chand , lb_policy_name . get ( ) , <nl> - & connectivity_state , & connectivity_error ) ; <nl> + & connectivity_state , & connectivity_error , <nl> + & trace_strings ) ; <nl> } <nl> / / Find service config . <nl> grpc_core : : UniquePtr < char > service_config_json = <nl> get_service_config_from_resolver_result_locked ( chand ) ; <nl> + / / Note : It ' s safe to use chand - > info_service_config_json here without <nl> + / / taking a lock on chand - > info_mu , because this function is the <nl> + / / only thing that modifies its value , and it can only be invoked <nl> + / / once at any given time . <nl> + if ( chand - > channelz_channel ! = nullptr ) { <nl> + if ( ( ( service_config_json = = nullptr ) ! = <nl> + ( chand - > info_service_config_json = = nullptr ) ) | | <nl> + ( service_config_json ! = nullptr & & <nl> + strcmp ( service_config_json . get ( ) , <nl> + chand - > info_service_config_json . get ( ) ) ! = 0 ) ) { <nl> + / / TODO ( ncteisen ) : might be worth somehow including a snippet of the <nl> + / / config in the trace , at the risk of bloating the trace logs . <nl> + trace_strings . push_back ( gpr_strdup ( " Service config changed " ) ) ; <nl> + } <nl> + maybe_add_trace_message_for_address_changes_locked ( chand , & trace_strings ) ; <nl> + concatenate_and_add_channel_trace_locked ( chand , & trace_strings ) ; <nl> + } <nl> / / Swap out the data used by cc_get_channel_info ( ) . <nl> gpr_mu_lock ( & chand - > info_mu ) ; <nl> chand - > info_lb_policy_name = std : : move ( lb_policy_name ) ; <nl> static grpc_error * cc_init_channel_elem ( grpc_channel_element * elem , <nl> arg = grpc_channel_args_find ( args - > channel_args , GRPC_ARG_ENABLE_RETRIES ) ; <nl> chand - > enable_retries = grpc_channel_arg_get_bool ( arg , true ) ; <nl> chand - > channelz_channel = nullptr ; <nl> + chand - > previous_resolution_contained_addresses = false ; <nl> / / Record client channel factory . <nl> arg = grpc_channel_args_find ( args - > channel_args , <nl> GRPC_ARG_CLIENT_CHANNEL_FACTORY ) ; <nl> mmm a / src / core / lib / channel / channel_trace . cc <nl> ppp b / src / core / lib / channel / channel_trace . cc <nl> void ChannelTrace : : AddTraceEventHelper ( TraceEvent * new_trace_event ) { <nl> } <nl> <nl> void ChannelTrace : : AddTraceEvent ( Severity severity , grpc_slice data ) { <nl> - if ( max_event_memory_ = = 0 ) <nl> + if ( max_event_memory_ = = 0 ) { <nl> + grpc_slice_unref_internal ( data ) ; <nl> return ; / / tracing is disabled if max_event_memory_ = = 0 <nl> + } <nl> AddTraceEventHelper ( New < TraceEvent > ( severity , data ) ) ; <nl> } <nl> <nl> void ChannelTrace : : AddTraceEventWithReference ( <nl> Severity severity , grpc_slice data , <nl> RefCountedPtr < BaseNode > referenced_entity ) { <nl> - if ( max_event_memory_ = = 0 ) <nl> + if ( max_event_memory_ = = 0 ) { <nl> + grpc_slice_unref_internal ( data ) ; <nl> return ; / / tracing is disabled if max_event_memory_ = = 0 <nl> + } <nl> / / create and fill up the new event <nl> AddTraceEventHelper ( <nl> New < TraceEvent > ( severity , data , std : : move ( referenced_entity ) ) ) ; <nl> mmm a / test / core / end2end / tests / channelz . cc <nl> ppp b / test / core / end2end / tests / channelz . cc <nl> static void test_channelz_with_channel_trace ( grpc_end2end_test_config config ) { <nl> grpc_server_get_channelz_node ( f . server ) ; <nl> GPR_ASSERT ( channelz_server ! = nullptr ) ; <nl> <nl> + run_one_request ( config , f , true ) ; <nl> + <nl> char * json = channelz_channel - > RenderJsonString ( ) ; <nl> GPR_ASSERT ( json ! = nullptr ) ; <nl> gpr_log ( GPR_INFO , " % s " , json ) ; <nl> mmm a / test / core / end2end / tests / retry_streaming . cc <nl> ppp b / test / core / end2end / tests / retry_streaming . cc <nl> <nl> # include < grpc / support / string_util . h > <nl> # include < grpc / support / time . h > <nl> <nl> + # include " src / core / lib / surface / channel . h " <nl> + # include " src / core / lib / surface / server . h " <nl> + <nl> # include " src / core / lib / channel / channel_args . h " <nl> # include " src / core / lib / gpr / string . h " <nl> # include " src / core / lib / gpr / useful . h " <nl> static void test_retry_streaming ( grpc_end2end_test_config config ) { <nl> int was_cancelled = 2 ; <nl> char * peer ; <nl> <nl> - grpc_arg arg ; <nl> - arg . type = GRPC_ARG_STRING ; <nl> - arg . key = const_cast < char * > ( GRPC_ARG_SERVICE_CONFIG ) ; <nl> - arg . value . string = const_cast < char * > ( <nl> - " { \ n " <nl> - " \ " methodConfig \ " : [ { \ n " <nl> - " \ " name \ " : [ \ n " <nl> - " { \ " service \ " : \ " service \ " , \ " method \ " : \ " method \ " } \ n " <nl> - " ] , \ n " <nl> - " \ " retryPolicy \ " : { \ n " <nl> - " \ " maxAttempts \ " : 3 , \ n " <nl> - " \ " initialBackoff \ " : \ " 1s \ " , \ n " <nl> - " \ " maxBackoff \ " : \ " 120s \ " , \ n " <nl> - " \ " backoffMultiplier \ " : 1 . 6 , \ n " <nl> - " \ " retryableStatusCodes \ " : [ \ " ABORTED \ " ] \ n " <nl> - " } \ n " <nl> - " } ] \ n " <nl> - " } " ) ; <nl> - grpc_channel_args client_args = { 1 , & arg } ; <nl> + grpc_arg args [ ] = { <nl> + grpc_channel_arg_integer_create ( <nl> + const_cast < char * > ( GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE ) , <nl> + 1024 * 8 ) , <nl> + grpc_channel_arg_integer_create ( <nl> + const_cast < char * > ( GRPC_ARG_ENABLE_CHANNELZ ) , true ) , <nl> + grpc_channel_arg_string_create ( <nl> + const_cast < char * > ( GRPC_ARG_SERVICE_CONFIG ) , <nl> + const_cast < char * > ( <nl> + " { \ n " <nl> + " \ " methodConfig \ " : [ { \ n " <nl> + " \ " name \ " : [ \ n " <nl> + " { \ " service \ " : \ " service \ " , \ " method \ " : \ " method \ " } \ n " <nl> + " ] , \ n " <nl> + " \ " retryPolicy \ " : { \ n " <nl> + " \ " maxAttempts \ " : 3 , \ n " <nl> + " \ " initialBackoff \ " : \ " 1s \ " , \ n " <nl> + " \ " maxBackoff \ " : \ " 120s \ " , \ n " <nl> + " \ " backoffMultiplier \ " : 1 . 6 , \ n " <nl> + " \ " retryableStatusCodes \ " : [ \ " ABORTED \ " ] \ n " <nl> + " } \ n " <nl> + " } ] \ n " <nl> + " } " ) ) } ; <nl> + grpc_channel_args client_args = { GPR_ARRAY_SIZE ( args ) , args } ; <nl> grpc_end2end_test_fixture f = <nl> begin_test ( config , " retry_streaming " , & client_args , nullptr ) ; <nl> <nl> static void test_retry_streaming ( grpc_end2end_test_config config ) { <nl> c = grpc_channel_create_call ( f . client , nullptr , GRPC_PROPAGATE_DEFAULTS , f . cq , <nl> grpc_slice_from_static_string ( " / service / method " ) , <nl> nullptr , deadline , nullptr ) ; <nl> + grpc_core : : channelz : : ChannelNode * channelz_channel = <nl> + grpc_channel_get_channelz_node ( f . client ) ; <nl> + <nl> GPR_ASSERT ( c ) ; <nl> <nl> peer = grpc_call_get_peer ( c ) ; <nl> static void test_retry_streaming ( grpc_end2end_test_config config ) { <nl> GPR_ASSERT ( 0 = = call_details . flags ) ; <nl> GPR_ASSERT ( was_cancelled = = 1 ) ; <nl> <nl> + GPR_ASSERT ( channelz_channel ! = nullptr ) ; <nl> + char * json = channelz_channel - > RenderJsonString ( ) ; <nl> + GPR_ASSERT ( json ! = nullptr ) ; <nl> + gpr_log ( GPR_INFO , " % s " , json ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " trace \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " description \ " : \ " Channel created \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " \ " severity \ " : \ " CT_INFO \ " " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " Resolution event " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " Created new LB policy " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " Service config changed " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " Address list became non - empty " ) ) ; <nl> + GPR_ASSERT ( nullptr ! = strstr ( json , " Channel state change to CONNECTING " ) ) ; <nl> + gpr_free ( json ) ; <nl> + <nl> grpc_slice_unref ( details ) ; <nl> grpc_metadata_array_destroy ( & initial_metadata_recv ) ; <nl> grpc_metadata_array_destroy ( & trailing_metadata_recv ) ; <nl> static void test_retry_streaming ( grpc_end2end_test_config config ) { <nl> <nl> void retry_streaming ( grpc_end2end_test_config config ) { <nl> GPR_ASSERT ( config . feature_mask & FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL ) ; <nl> + <nl> test_retry_streaming ( config ) ; <nl> } <nl> <nl> | Merge pull request from ncteisen / more - channel - tracing | grpc/grpc | 14a09c4849c15cb07d12b5e0c4cf611b68f3bcba | 2018-10-19T19:26:45Z |
mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> Expr * ExprRewriter : : coerceExistential ( Expr * expr , Type toType , <nl> Type toInstanceType = toType ; <nl> <nl> / / Look through metatypes <nl> - while ( fromInstanceType - > is < AnyMetatypeType > ( ) & & <nl> + while ( ( fromInstanceType - > is < UnresolvedType > ( ) | | <nl> + fromInstanceType - > is < AnyMetatypeType > ( ) ) & & <nl> toInstanceType - > is < ExistentialMetatypeType > ( ) ) { <nl> - fromInstanceType = fromInstanceType - > castTo < AnyMetatypeType > ( ) - > getInstanceType ( ) ; <nl> + if ( ! fromInstanceType - > is < UnresolvedType > ( ) ) <nl> + fromInstanceType = fromInstanceType - > castTo < AnyMetatypeType > ( ) - > getInstanceType ( ) ; <nl> toInstanceType = toInstanceType - > castTo < ExistentialMetatypeType > ( ) - > getInstanceType ( ) ; <nl> } <nl> <nl> mmm a / test / Constraints / diagnostics . swift <nl> ppp b / test / Constraints / diagnostics . swift <nl> func badTypes ( ) { <nl> / / expected - error @ - 1 { { type of expression is ambiguous without more context } } <nl> / / FIXME : terrible diagnostic <nl> } <nl> + <nl> + / / rdar : / / 34357545 <nl> + func unresolvedTypeExistential ( ) - > Bool { <nl> + return ( Int . self = = _ { } ) <nl> + / / expected - error @ - 1 { { ambiguous reference to member ' = = ' } } <nl> + } <nl> similarity index 86 % <nl> rename from validation - test / compiler_crashers / 28739 - unreachable - executed - at - swift - lib - ast - type - cpp - 229 . swift <nl> rename to validation - test / compiler_crashers_fixed / 28739 - unreachable - executed - at - swift - lib - ast - type - cpp - 229 . swift <nl> mmm a / validation - test / compiler_crashers / 28739 - unreachable - executed - at - swift - lib - ast - type - cpp - 229 . swift <nl> ppp b / validation - test / compiler_crashers_fixed / 28739 - unreachable - executed - at - swift - lib - ast - type - cpp - 229 . swift <nl> <nl> / / See https : / / swift . org / LICENSE . txt for license information <nl> / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> <nl> - / / RUN : not - - crash % target - swift - frontend % s - emit - ir <nl> + / / RUN : not % target - swift - frontend % s - emit - ir <nl> ( Int = = _ { <nl> | Sema : Fix crash with UnresolvedType in coerceExistential ( ) | apple/swift | 4fa4133134446a7d56f3da81ab655b268569e02e | 2018-07-11T00:01:12Z |
mmm a / version <nl> ppp b / version <nl> @ @ - 1 + 1 @ @ <nl> - 13 . 0 . 2 <nl> + 13 . 0 . 3 <nl> | Version 13 . 0 . 3 | pqrs-org/Karabiner-Elements | b858e99f505b7f327bfdf97955a5583bea5b4f91 | 2020-10-12T15:25:27Z |
mmm a / test / core / transport / metadata_test . c <nl> ppp b / test / core / transport / metadata_test . c <nl> static void test_things_stick_around ( void ) { <nl> size_t i , j ; <nl> char * buffer ; <nl> size_t nstrs = 1000 ; <nl> - grpc_slice * strs = gpr_malloc ( sizeof ( grpc_slice * ) * nstrs ) ; <nl> + grpc_slice * strs = gpr_malloc ( sizeof ( grpc_slice ) * nstrs ) ; <nl> size_t * shuf = gpr_malloc ( sizeof ( size_t ) * nstrs ) ; <nl> grpc_slice test ; <nl> <nl> | Fix mis - sized array | grpc/grpc | 58317fcc8900bc0dceb78b397bc14fc026eff035 | 2016-11-17T21:39:09Z |
mmm a / vnext / Desktop . DLL / Version . rc <nl> ppp b / vnext / Desktop . DLL / Version . rc <nl> <nl> # include < winver . h > <nl> <nl> - # define VER_FILEVERSION 3 , 10 , 349 , 0 <nl> - # define VER_FILEVERSION_STR " 3 . 10 . 349 . 0 \ 0 " <nl> + # define VER_FILEVERSION 0 , 58 , 0 , 156 <nl> + # define VER_FILEVERSION_STR " 0 . 58 . 0 - vnext . 156 " <nl> <nl> # ifndef DEBUG <nl> # define VER_DEBUG 0 <nl> mmm a / vnext / package . json <nl> ppp b / vnext / package . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> - " version " : " 0 . 58 . 0 - vnext . 155 " , <nl> + " version " : " 0 . 58 . 0 - vnext . 156 " , <nl> " license " : " MIT " , <nl> " repository " : { <nl> " type " : " git " , <nl> | Applying package update to 0 . 58 . 0 - vnext . 156 | microsoft/react-native-windows | e2299b17d15e293104178df846c45c4badf9a718 | 2019-06-07T23:51:21Z |
mmm a / Code / CryEngine / Cry3DEngine / 3dEngine . cpp <nl> ppp b / Code / CryEngine / Cry3DEngine / 3dEngine . cpp <nl> C3DEngine : : C3DEngine ( ISystem * pSystem ) <nl> m_bInUnload = false ; <nl> m_bInLoad = false ; <nl> <nl> - m_nCloudShadowTexId = 0 ; <nl> - <nl> m_pDeferredPhysicsEventManager = new CDeferredPhysicsEventManager ( ) ; <nl> <nl> # if defined ( USE_GEOM_CACHES ) <nl> mmm a / Code / CryEngine / Cry3DEngine / 3dEngine . h <nl> ppp b / Code / CryEngine / Cry3DEngine / 3dEngine . h <nl> class C3DEngine : public I3DEngine , public Cry3DEngineBase <nl> bool m_bSunShadows ; <nl> bool m_bSunShadowsFromTerrain ; <nl> <nl> - int m_nCloudShadowTexId ; <nl> + _smart_ptr < ITexture > m_pCloudShadowTex ; <nl> <nl> float m_fGsmRange ; <nl> float m_fGsmRangeStep ; <nl> mmm a / Code / CryEngine / Cry3DEngine / 3dEngineLoad . cpp <nl> ppp b / Code / CryEngine / Cry3DEngine / 3dEngineLoad . cpp <nl> void C3DEngine : : UnloadLevel ( ) <nl> for ( int skyTypeIdx = 0 ; skyTypeIdx < eSkyType_NumSkyTypes ; + + skyTypeIdx ) <nl> SAFE_RELEASE ( m_pSkyMat [ skyTypeIdx ] ) ; <nl> <nl> - if ( m_nCloudShadowTexId ) <nl> + if ( m_pCloudShadowTex . get ( ) ) <nl> { <nl> - ITexture * tex = GetRenderer ( ) - > EF_GetTextureByID ( m_nCloudShadowTexId ) ; <nl> - if ( tex ) <nl> - tex - > Release ( ) ; <nl> - <nl> - m_nCloudShadowTexId = 0 ; <nl> + m_pCloudShadowTex . reset ( ) ; <nl> GetRenderer ( ) - > SetCloudShadowsParams ( 0 , Vec3 ( 0 , 0 , 0 ) , 1 , false , 1 ) ; <nl> SetGlobalParameter ( E3DPARAM_VOLFOG_SHADOW_ENABLE , Vec3 ( 0 , 0 , 0 ) ) ; <nl> } <nl> void C3DEngine : : UpdateWindParams ( ) <nl> <nl> void C3DEngine : : UpdateCloudShadows ( ) <nl> { <nl> - / / load cloud shadow parameters <nl> - const auto & cloudParams = GetTimeOfDay ( ) - > GetCloudShadowsParams ( ) ; <nl> - <nl> - ITexture * pTex = 0 ; <nl> - if ( cloudParams . texture [ 0 ] ! = ' \ 0 ' & & GetRenderer ( ) ) <nl> - pTex = GetRenderer ( ) - > EF_LoadTexture ( cloudParams . texture , FT_DONT_STREAM ) ; <nl> - <nl> - m_nCloudShadowTexId = pTex ? pTex - > GetTextureID ( ) : 0 ; <nl> - <nl> if ( GetRenderer ( ) ) <nl> { <nl> - GetRenderer ( ) - > SetCloudShadowsParams ( m_nCloudShadowTexId , cloudParams . speed , cloudParams . tiling , cloudParams . invert , cloudParams . brightness ) ; <nl> + const auto & cloudParams = GetTimeOfDay ( ) - > GetCloudShadowsParams ( ) ; <nl> + <nl> + if ( ! m_pCloudShadowTex . get ( ) & & ! cloudParams . texture . empty ( ) ) <nl> + { <nl> + m_pCloudShadowTex . Assign_NoAddRef ( GetRenderer ( ) - > EF_LoadTexture ( cloudParams . texture . c_str ( ) , FT_DONT_STREAM ) ) ; <nl> + } <nl> + <nl> + const int textureId = m_pCloudShadowTex . get ( ) ? m_pCloudShadowTex - > GetTextureID ( ) : 0 ; <nl> + GetRenderer ( ) - > SetCloudShadowsParams ( textureId , cloudParams . speed , cloudParams . tiling , cloudParams . invert , cloudParams . brightness ) ; <nl> } <nl> } <nl> <nl> | ! B ( 3dEngine ) Fix cloud textures leak # review - 1949635 | CRYTEK/CRYENGINE | e97ef5cbb3069c5aba1ef4e14f06c124deb2efa6 | 2019-03-27T15:43:58Z |
mmm a / DEPS <nl> ppp b / DEPS <nl> deps = { <nl> ' v8 / build ' : <nl> Var ( ' chromium_url ' ) + ' / chromium / src / build . git ' + ' @ ' + ' dfca77bb0d1aee2eb7ab15d1550d5c4821b48811 ' , <nl> ' v8 / third_party / depot_tools ' : <nl> - Var ( ' chromium_url ' ) + ' / chromium / tools / depot_tools . git ' + ' @ ' + ' 0dc1fa046374656368b60805fad4a752d7716a21 ' , <nl> + Var ( ' chromium_url ' ) + ' / chromium / tools / depot_tools . git ' + ' @ ' + ' baf09271514cf3d535865e16ece89654220149cb ' , <nl> ' v8 / third_party / icu ' : <nl> Var ( ' chromium_url ' ) + ' / chromium / deps / icu . git ' + ' @ ' + ' 7ca3ffa77d635e44b9735e1b54fb9c4da3b6c821 ' , <nl> ' v8 / third_party / instrumented_libraries ' : <nl> deps = { <nl> ' condition ' : ' checkout_android ' , <nl> } , <nl> ' v8 / third_party / catapult ' : { <nl> - ' url ' : Var ( ' chromium_url ' ) + ' / catapult . git ' + ' @ ' + ' 218f46686f2f7e110a17aaa486becce46a99d5d6 ' , <nl> + ' url ' : Var ( ' chromium_url ' ) + ' / catapult . git ' + ' @ ' + ' 868fbbf41b33c51dd147cfe842b41b9fe0bc808e ' , <nl> } , <nl> ' v8 / third_party / colorama / src ' : { <nl> ' url ' : Var ( ' chromium_url ' ) + ' / external / colorama . git ' + ' @ ' + ' 799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8 ' , <nl> | Update V8 DEPS . | v8/v8 | c9ef63e568b7f5e96d252da56b6a218fe7e39f65 | 2018-09-24T13:19:46Z |
mmm a / js / common / modules / org / arangodb / simple - query - common . js <nl> ppp b / js / common / modules / org / arangodb / simple - query - common . js <nl> SimpleQueryNear = function ( collection , latitude , longitude , iid ) { <nl> if ( this . _index = = = null ) { <nl> var err = new ArangoError ( ) ; <nl> err . errorNum = arangodb . ERROR_QUERY_GEO_INDEX_MISSING ; <nl> - err . errorMessage = arangodb . errors . ERROR_QUERY_GEO_INDEX_MISSING . message ; <nl> + err . errorMessage = require ( " internal " ) . sprintf ( arangodb . errors . ERROR_QUERY_GEO_INDEX_MISSING . message , collection . name ( ) ) ; <nl> throw err ; <nl> } <nl> } ; <nl> | Fix Errormessage : add collection name . | arangodb/arangodb | 651c77685c9a4f9eaf5243f94fd53dfda2019d5c | 2015-08-03T08:41:27Z |
mmm a / src / preamble . js <nl> ppp b / src / preamble . js <nl> function Pointer_stringify ( ptr , / * optional * / length ) { <nl> } <nl> Module [ ' Pointer_stringify ' ] = Pointer_stringify ; <nl> <nl> + / / Given a pointer ' ptr ' to a null - terminated ASCII - encoded string in the emscripten HEAP , returns <nl> + / / a copy of that string as a Javascript String object . <nl> + <nl> + function AsciiToString ( ptr ) { <nl> + var str = ' ' ; <nl> + while ( 1 ) { <nl> + var ch = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } ; <nl> + if ( ! ch ) return str ; <nl> + str + = String . fromCharCode ( ch ) ; <nl> + } <nl> + } <nl> + Module [ ' AsciiToString ' ] = AsciiToString ; <nl> + <nl> + / / Copies the given Javascript String object ' str ' to the emscripten HEAP at address ' outPtr ' , <nl> + / / null - terminated and encoded in ASCII form . The copy will require at most str . length + 1 bytes of space in the HEAP . <nl> + <nl> + function stringToAscii ( str , outPtr ) { <nl> + for ( var i = 0 ; i < str . length ; + + i ) { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' str . charCodeAt ( i ) ' , ' i8 ' ) } } } ; <nl> + } <nl> + / / Null - terminate the pointer to the HEAP . <nl> + { { { makeSetValue ( ' outPtr ' , 0 , 0 , ' i8 ' ) } } } ; <nl> + } <nl> + Module [ ' stringToAscii ' ] = stringToAscii ; <nl> + <nl> + / / Given a pointer ' ptr ' to a null - terminated UTF8 - encoded string in the emscripten HEAP , returns <nl> + / / a copy of that string as a Javascript String object . <nl> + <nl> + function UTF8ToString ( ptr ) { <nl> + var u0 , u1 , u2 , u3 , u4 , u5 ; <nl> + <nl> + var str = ' ' ; <nl> + while ( 1 ) { <nl> + u0 = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } ; <nl> + if ( ! u0 ) return str ; <nl> + if ( ! ( u0 & 0x80 ) ) { str + = String . fromCharCode ( u0 ) ; continue ; } <nl> + u1 = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } & 63 ; <nl> + if ( ( u0 & 0xE0 ) = = 0xC0 ) { str + = String . fromCharCode ( ( ( u0 & 31 ) < < 6 ) | u1 ) ; continue ; } <nl> + u2 = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } & 63 ; <nl> + if ( ( u0 & 0xF0 ) = = 0xE0 ) { <nl> + u0 = ( ( u0 & 15 ) < < 12 ) | ( u1 < < 6 ) | u2 ; <nl> + } else { <nl> + u3 = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } & 63 ; <nl> + if ( ( u0 & 0xF8 ) = = 0xF0 ) { <nl> + u0 = ( ( u0 & 7 ) < < 18 ) | ( u1 < < 12 ) | ( u2 < < 6 ) | u3 ; <nl> + } else { <nl> + u4 = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } & 63 ; <nl> + if ( ( u0 & 0xFC ) = = 0xF8 ) { <nl> + u0 = ( ( u0 & 3 ) < < 24 ) | ( u1 < < 18 ) | ( u2 < < 12 ) | ( u3 < < 6 ) | u4 ; <nl> + } else { <nl> + u5 = { { { makeGetValue ( ' ptr + + ' , 0 , ' i8 ' ) } } } & 63 ; <nl> + u0 = ( ( u0 & 1 ) < < 30 ) | ( u1 < < 24 ) | ( u2 < < 18 ) | ( u3 < < 12 ) | ( u4 < < 6 ) | u5 ; <nl> + } <nl> + } <nl> + } <nl> + if ( u0 < 0x10000 ) { <nl> + str + = String . fromCharCode ( u0 ) ; <nl> + } else { <nl> + var ch = u0 - 0x10000 ; <nl> + str + = String . fromCharCode ( 0xD800 | ( ch > > 10 ) , 0xDC00 | ( ch & 0x3FF ) ) ; <nl> + } <nl> + } <nl> + } <nl> + Module [ ' UTF8ToString ' ] = UTF8ToString ; <nl> + <nl> + / / Copies the given Javascript String object ' str ' to the emscripten HEAP at address ' outPtr ' , <nl> + / / null - terminated and encoded in UTF8 form . The copy will require at most str . length * 6 + 1 bytes of space in the HEAP . <nl> + <nl> + function stringToUTF8 ( str , outPtr ) { <nl> + for ( var i = 0 ; i < str . length ; + + i ) { <nl> + / / Gotcha : charCodeAt returns a 16 - bit word that is a UTF - 16 encoded code unit , not a Unicode code point of the character ! So decode UTF16 - > UTF32 - > UTF8 . <nl> + var u = str . charCodeAt ( i ) ; / / possibly a lead surrogate <nl> + if ( u > = 0xD800 & & u < = 0xDFFF ) u = 0x10000 + ( ( u & 0x3FF ) < < 10 ) | ( str . charCodeAt ( + + i ) & 0x3FF ) ; <nl> + if ( u < = 0x7F ) { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' u ' , ' i8 ' ) } } } ; <nl> + } else if ( u < = 0x7FF ) { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0xC0 | ( u > > 6 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( u & 63 ) ' , ' i8 ' ) } } } ; <nl> + } else if ( u < = 0xFFFF ) { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0xE0 | ( u > > 12 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 6 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( u & 63 ) ' , ' i8 ' ) } } } ; <nl> + } else if ( u < = 0x1FFFFF ) { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0xF0 | ( u > > 18 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 12 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 6 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( u & 63 ) ' , ' i8 ' ) } } } ; <nl> + } else if ( u < = 0x3FFFFFF ) { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0xF8 | ( u > > 24 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 18 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 12 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 6 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( u & 63 ) ' , ' i8 ' ) } } } ; <nl> + } else { <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0xFC | ( u > > 30 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 24 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 18 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 12 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( ( u > > 6 ) & 63 ) ' , ' i8 ' ) } } } ; <nl> + { { { makeSetValue ( ' outPtr + + ' , 0 , ' 0x80 | ( u & 63 ) ' , ' i8 ' ) } } } ; <nl> + } <nl> + } <nl> + / / Null - terminate the pointer to the HEAP . <nl> + { { { makeSetValue ( ' outPtr ' , 0 , 0 , ' i8 ' ) } } } ; <nl> + } <nl> + Module [ ' stringToUTF8 ' ] = stringToUTF8 ; <nl> + <nl> + / / Given a pointer ' ptr ' to a null - terminated UTF16LE - encoded string in the emscripten HEAP , returns <nl> + / / a copy of that string as a Javascript String object . <nl> + <nl> function UTF16ToString ( ptr ) { <nl> var i = 0 ; <nl> <nl> mmm a / tests / test_core . py <nl> ppp b / tests / test_core . py <nl> def test_utf32 ( self ) : <nl> self . do_run ( open ( path_from_root ( ' tests ' , ' utf32 . cpp ' ) ) . read ( ) , ' OK . ' ) <nl> self . do_run ( open ( path_from_root ( ' tests ' , ' utf32 . cpp ' ) ) . read ( ) , ' OK . ' , args = [ ' - fshort - wchar ' ] ) <nl> <nl> + def test_utf8 ( self ) : <nl> + if not self . is_emscripten_abi ( ) : return self . skip ( ' this test uses inline js , which requires asmjs - unknown - emscripten ' ) <nl> + <nl> + Building . COMPILER_TEST_OPTS + = [ ' - std = c + + 11 ' ] <nl> + self . do_run ( open ( path_from_root ( ' tests ' , ' utf8 . cpp ' ) ) . read ( ) , ' OK . ' ) <nl> + <nl> def test_wprintf ( self ) : <nl> if self . emcc_args is None : return self . skip ( ' requires libcxx ' ) <nl> test_path = path_from_root ( ' tests ' , ' core ' , ' test_wprintf ' ) <nl> new file mode 100644 <nl> index 00000000000 . . d1e2be04b2a <nl> mmm / dev / null <nl> ppp b / tests / utf8 . cpp <nl> <nl> + # include < stdio . h > <nl> + # include < string . h > <nl> + # include < wchar . h > <nl> + # include < iostream > <nl> + # include < cassert > <nl> + # include < emscripten . h > <nl> + <nl> + / / This code tests that Unicode std : : wstrings can be marshalled between C + + and JS . <nl> + int main ( ) { <nl> + const char asciiString [ ] = " Hello world ! " ; <nl> + char asciiString2 [ 128 ] = { } ; <nl> + EM_ASM_INT ( { <nl> + var str = Module . AsciiToString ( $ 0 ) ; <nl> + Module . print ( str ) ; <nl> + Module . stringToAscii ( str , $ 1 ) ; <nl> + } , asciiString , asciiString2 ) ; <nl> + assert ( ! strcmp ( asciiString , asciiString2 ) ) ; <nl> + <nl> + char asciiString3 [ 128 ] = { } ; <nl> + EM_ASM_INT ( { <nl> + var str = Module . UTF8ToString ( $ 0 ) ; <nl> + Module . print ( str ) ; <nl> + Module . stringToUTF8 ( str , $ 1 ) ; <nl> + } , asciiString , asciiString3 ) ; <nl> + assert ( ! strcmp ( asciiString , asciiString3 ) ) ; <nl> + <nl> + const char utf8String [ ] = u8 " Hyv \ u00E4 \ u00E4 p \ u00E4iv \ u00E4 \ u00E4 ! T \ u00F6 \ u00F6 \ u00F6 \ u00F6t ! abc \ u2603 \ u20AC \ U0002007C123 mmm abc \ u2603 \ u20AC \ U0002007C123 . " ; / / U + 2603 is snowman , U + 20AC is the Euro sign , U + 2007C is a Chinese Han character that looks like three raindrops . <nl> + char utf8String2 [ 128 ] = { } ; <nl> + EM_ASM_INT ( { <nl> + var str = Module . UTF8ToString ( $ 0 ) ; <nl> + Module . print ( str ) ; <nl> + Module . stringToUTF8 ( str , $ 1 ) ; <nl> + } , utf8String , utf8String2 ) ; <nl> + assert ( strlen ( utf8String ) = = strlen ( utf8String2 ) ) ; <nl> + for ( int i = 0 ; i < strlen ( utf8String ) + 1 ; + + i ) <nl> + if ( utf8String [ i ] ! = utf8String2 [ i ] ) <nl> + printf ( " i = % d : % u , % u \ n " , i , ( unsigned int ) ( unsigned char ) utf8String [ i ] , ( unsigned int ) ( unsigned char ) utf8String2 [ i ] ) ; <nl> + assert ( ! strcmp ( utf8String , utf8String2 ) ) ; <nl> + <nl> + printf ( " OK . \ n " ) ; <nl> + } <nl> | Add new functions AsciiToString , stringToAscii , UTF8ToString and stringToUTF8 to complete the set of string marshalling functions . | emscripten-core/emscripten | 78dc7a4fa80d49295ac7b18737567f3fe89fb3b6 | 2015-01-15T08:45:19Z |
mmm a / src / ia32 / stub - cache - ia32 . cc <nl> ppp b / src / ia32 / stub - cache - ia32 . cc <nl> Object * CallStubCompiler : : CompileStringCharCodeAtCall ( Object * object , <nl> / / - - esp [ ( argc + 1 ) * 4 ] : receiver <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> + / / If object is not a string , bail out to regular call . <nl> + if ( ! object - > IsString ( ) ) return Heap : : undefined_value ( ) ; <nl> + <nl> const int argc = arguments ( ) . immediate ( ) ; <nl> <nl> Label miss ; <nl> Object * CallStubCompiler : : CompileStringCharCodeAtCall ( Object * object , <nl> GenerateDirectLoadGlobalFunctionPrototype ( masm ( ) , <nl> Context : : STRING_FUNCTION_INDEX , <nl> eax ) ; <nl> + ASSERT ( object ! = holder ) ; <nl> CheckPrototypes ( JSObject : : cast ( object - > GetPrototype ( ) ) , eax , holder , <nl> ebx , edx , edi , name , & miss ) ; <nl> <nl> Object * CallStubCompiler : : CompileStringCharAtCall ( Object * object , <nl> / / - - esp [ ( argc + 1 ) * 4 ] : receiver <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> + / / If object is not a string , bail out to regular call . <nl> + if ( ! object - > IsString ( ) ) return Heap : : undefined_value ( ) ; <nl> + <nl> const int argc = arguments ( ) . immediate ( ) ; <nl> <nl> Label miss ; <nl> Object * CallStubCompiler : : CompileStringCharAtCall ( Object * object , <nl> GenerateDirectLoadGlobalFunctionPrototype ( masm ( ) , <nl> Context : : STRING_FUNCTION_INDEX , <nl> eax ) ; <nl> + ASSERT ( object ! = holder ) ; <nl> CheckPrototypes ( JSObject : : cast ( object - > GetPrototype ( ) ) , eax , holder , <nl> ebx , edx , edi , name , & miss ) ; <nl> <nl> mmm a / src / mark - compact . cc <nl> ppp b / src / mark - compact . cc <nl> void MarkCompactCollector : : MarkDescriptorArray ( <nl> ASSERT ( contents - > IsFixedArray ( ) ) ; <nl> ASSERT ( contents - > length ( ) > = 2 ) ; <nl> SetMark ( contents ) ; <nl> - / / Contents contains ( value , details ) pairs . If the details say <nl> - / / that the type of descriptor is MAP_TRANSITION , CONSTANT_TRANSITION , <nl> - / / or NULL_DESCRIPTOR , we don ' t mark the value as live . Only for <nl> - / / type MAP_TRANSITION is the value a Object * ( a Map * ) . <nl> + / / Contents contains ( value , details ) pairs . If the details say that <nl> + / / the type of descriptor is MAP_TRANSITION , CONSTANT_TRANSITION , or <nl> + / / NULL_DESCRIPTOR , we don ' t mark the value as live . Only for <nl> + / / MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object * ( a <nl> + / / Map * ) . <nl> for ( int i = 0 ; i < contents - > length ( ) ; i + = 2 ) { <nl> / / If the pair ( value , details ) at index i , i + 1 is not <nl> / / a transition or null descriptor , mark the value . <nl> mmm a / src / objects - inl . h <nl> ppp b / src / objects - inl . h <nl> int DescriptorArray : : Search ( String * name ) { <nl> } <nl> <nl> <nl> + int DescriptorArray : : SearchWithCache ( String * name ) { <nl> + int number = DescriptorLookupCache : : Lookup ( this , name ) ; <nl> + if ( number = = DescriptorLookupCache : : kAbsent ) { <nl> + number = Search ( name ) ; <nl> + DescriptorLookupCache : : Update ( this , name , number ) ; <nl> + } <nl> + return number ; <nl> + } <nl> + <nl> + <nl> String * DescriptorArray : : GetKey ( int descriptor_number ) { <nl> ASSERT ( descriptor_number < number_of_descriptors ( ) ) ; <nl> return String : : cast ( get ( ToKeyIndex ( descriptor_number ) ) ) ; <nl> mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> Object * JSObject : : AddConstantFunctionProperty ( String * name , <nl> if ( attributes ! = NONE ) { <nl> return function ; <nl> } <nl> - ConstTransitionDescriptor mark ( name ) ; <nl> + ConstTransitionDescriptor mark ( name , Map : : cast ( new_map ) ) ; <nl> new_descriptors = <nl> old_map - > instance_descriptors ( ) - > CopyInsert ( & mark , KEEP_TRANSITIONS ) ; <nl> if ( new_descriptors - > IsFailure ( ) ) { <nl> bool JSObject : : SetElementWithCallbackSetterInPrototypes ( uint32_t index , <nl> <nl> void JSObject : : LookupInDescriptor ( String * name , LookupResult * result ) { <nl> DescriptorArray * descriptors = map ( ) - > instance_descriptors ( ) ; <nl> - int number = DescriptorLookupCache : : Lookup ( descriptors , name ) ; <nl> - if ( number = = DescriptorLookupCache : : kAbsent ) { <nl> - number = descriptors - > Search ( name ) ; <nl> - DescriptorLookupCache : : Update ( descriptors , name , number ) ; <nl> - } <nl> + int number = descriptors - > SearchWithCache ( name ) ; <nl> if ( number ! = DescriptorArray : : kNotFound ) { <nl> result - > DescriptorResult ( this , descriptors - > GetDetails ( number ) , number ) ; <nl> } else { <nl> Object * JSObject : : SetProperty ( LookupResult * result , <nl> result - > holder ( ) ) ; <nl> case INTERCEPTOR : <nl> return SetPropertyWithInterceptor ( name , value , attributes ) ; <nl> - case CONSTANT_TRANSITION : <nl> - / / Replace with a MAP_TRANSITION to a new map with a FIELD , even <nl> - / / if the value is a function . <nl> + case CONSTANT_TRANSITION : { <nl> + / / If the same constant function is being added we can simply <nl> + / / transition to the target map . <nl> + Map * target_map = result - > GetTransitionMap ( ) ; <nl> + DescriptorArray * target_descriptors = target_map - > instance_descriptors ( ) ; <nl> + int number = target_descriptors - > SearchWithCache ( name ) ; <nl> + ASSERT ( number ! = DescriptorArray : : kNotFound ) ; <nl> + ASSERT ( target_descriptors - > GetType ( number ) = = CONSTANT_FUNCTION ) ; <nl> + JSFunction * function = <nl> + JSFunction : : cast ( target_descriptors - > GetValue ( number ) ) ; <nl> + ASSERT ( ! Heap : : InNewSpace ( function ) ) ; <nl> + if ( value = = function ) { <nl> + set_map ( target_map ) ; <nl> + return value ; <nl> + } <nl> + / / Otherwise , replace with a MAP_TRANSITION to a new map with a <nl> + / / FIELD , even if the value is a constant function . <nl> return ConvertDescriptorToFieldAndMapTransition ( name , value , attributes ) ; <nl> + } <nl> case NULL_DESCRIPTOR : <nl> return ConvertDescriptorToFieldAndMapTransition ( name , value , attributes ) ; <nl> default : <nl> void String : : PrintOn ( FILE * file ) { <nl> void Map : : CreateBackPointers ( ) { <nl> DescriptorArray * descriptors = instance_descriptors ( ) ; <nl> for ( int i = 0 ; i < descriptors - > number_of_descriptors ( ) ; i + + ) { <nl> - if ( descriptors - > GetType ( i ) = = MAP_TRANSITION ) { <nl> + if ( descriptors - > GetType ( i ) = = MAP_TRANSITION | | <nl> + descriptors - > GetType ( i ) = = CONSTANT_TRANSITION ) { <nl> / / Get target . <nl> Map * target = Map : : cast ( descriptors - > GetValue ( i ) ) ; <nl> # ifdef DEBUG <nl> void Map : : ClearNonLiveTransitions ( Object * real_prototype ) { <nl> / / map is not reached again by following a back pointer from a <nl> / / non - live object . <nl> PropertyDetails details ( Smi : : cast ( contents - > get ( i + 1 ) ) ) ; <nl> - if ( details . type ( ) = = MAP_TRANSITION ) { <nl> + if ( details . type ( ) = = MAP_TRANSITION | | <nl> + details . type ( ) = = CONSTANT_TRANSITION ) { <nl> Map * target = reinterpret_cast < Map * > ( contents - > get ( i ) ) ; <nl> ASSERT ( target - > IsHeapObject ( ) ) ; <nl> if ( ! target - > IsMarked ( ) ) { <nl> mmm a / src / objects . h <nl> ppp b / src / objects . h <nl> class DescriptorArray : public FixedArray { <nl> / / Search the instance descriptors for given name . <nl> inline int Search ( String * name ) ; <nl> <nl> + / / As the above , but uses DescriptorLookupCache and updates it when <nl> + / / necessary . <nl> + inline int SearchWithCache ( String * name ) ; <nl> + <nl> / / Tells whether the name is present int the array . <nl> bool Contains ( String * name ) { return kNotFound ! = Search ( name ) ; } <nl> <nl> mmm a / src / property . h <nl> ppp b / src / property . h <nl> class MapTransitionDescriptor : public Descriptor { <nl> / / the same CONSTANT_FUNCTION field . <nl> class ConstTransitionDescriptor : public Descriptor { <nl> public : <nl> - explicit ConstTransitionDescriptor ( String * key ) <nl> - : Descriptor ( key , Smi : : FromInt ( 0 ) , NONE , CONSTANT_TRANSITION ) { } <nl> + explicit ConstTransitionDescriptor ( String * key , Map * map ) <nl> + : Descriptor ( key , map , NONE , CONSTANT_TRANSITION ) { } <nl> } ; <nl> <nl> <nl> class LookupResult BASE_EMBEDDED { <nl> <nl> Map * GetTransitionMap ( ) { <nl> ASSERT ( lookup_type_ = = DESCRIPTOR_TYPE ) ; <nl> - ASSERT ( type ( ) = = MAP_TRANSITION ) ; <nl> + ASSERT ( type ( ) = = MAP_TRANSITION | | type ( ) = = CONSTANT_TRANSITION ) ; <nl> return Map : : cast ( GetValue ( ) ) ; <nl> } <nl> <nl> | Preserve constant function transition when adding the same function . | v8/v8 | 421db370d9b53dca7f20eb8e98a957f97756b08b | 2010-08-12T14:51:59Z |
mmm a / test / DebugInfo / guard - let . swift <nl> ppp b / test / DebugInfo / guard - let . swift <nl> public func f ( _ i : Int ? ) <nl> / / CHECK1 : % debug . copy = alloca % TSiSg <nl> / / CHECK1 : @ llvm . dbg . declare ( metadata % TSiSg * % debug . copy <nl> / / CHECK1 : @ llvm . dbg . declare ( metadata { { ( i32 | i64 ) } } * % val . addr , { { . * } } , ! dbg ! [ [ DBG0 : . * ] ] <nl> + / / CHECK1 : % 5 = bitcast % TSiSg * % debug . copy to i64 * , ! dbg <nl> + / / CHECK1 : store i64 % 0 , i64 * % 5 , align 8 , ! dbg <nl> / / CHECK1 : ! [ [ F : . * ] ] = distinct ! DISubprogram ( name : " f " , <nl> / / CHECK1 : ! [ [ BLK : . * ] ] = distinct ! DILexicalBlock ( scope : ! [ [ F ] ] , <nl> / / CHECK1 : ! [ [ DBG0 ] ] = ! DILocation ( line : [ [ @ LINE + 2 ] ] , <nl> public func g ( _ s : String ? ) <nl> / / CHECK2 : @ llvm . dbg . declare ( metadata % TSSSg * <nl> / / CHECK2 : % debug . copy1 = alloca % TSS <nl> / / CHECK2 : @ llvm . dbg . declare ( metadata % TSS * <nl> + / / CHECK2 : % 4 = bitcast % TSSSg * % debug . copy to { i64 , i64 } * , ! dbg <nl> + / / CHECK2 : % 5 = getelementptr inbounds { i64 , i64 } , { i64 , i64 } * % 4 , i32 0 , i32 0 , ! dbg <nl> + / / CHECK2 : store i64 % 0 , i64 * % 5 , align 8 , ! dbg <nl> / / CHECK2 : ! [ [ G : . * ] ] = distinct ! DISubprogram ( name : " g " <nl> guard let val = s else { return } <nl> use ( val ) <nl> | Merge remote - tracking branch ' origin / master ' into master - llvm - swift5 - transition | apple/swift | 252627f97f64f19a1b566ac8dee5f27589b15038 | 2018-01-25T17:58:37Z |
mmm a / tools / depends / target / ffmpeg / FFMPEG - VERSION <nl> ppp b / tools / depends / target / ffmpeg / FFMPEG - VERSION <nl> <nl> LIBNAME = ffmpeg <nl> BASE_URL = https : / / github . com / xbmc / FFmpeg <nl> - VERSION = 3 . 4 - Leia - Alpha - 1 <nl> + VERSION = 3 . 4 . 1 - Leia - Alpha - 1 <nl> ARCHIVE = $ ( LIBNAME ) - $ ( VERSION ) . tar . gz <nl> GNUTLS_VER = 3 . 4 . 14 <nl> | FFmpeg : Bump to 3 . 4 . 1 - Leia - Alpha - 1 after rebase | xbmc/xbmc | 3c3c07027cad3ffc8dde04d189650137831d3df5 | 2018-01-07T11:15:29Z |
Binary files a / tools / android / packaging / media / drawable - hdpi / ic_launcher . png and b / tools / android / packaging / media / drawable - hdpi / ic_launcher . png differ <nl> Binary files a / tools / android / packaging / media / drawable - ldpi / ic_launcher . png and b / tools / android / packaging / media / drawable - ldpi / ic_launcher . png differ <nl> Binary files a / tools / android / packaging / media / drawable - mdpi / ic_launcher . png and b / tools / android / packaging / media / drawable - mdpi / ic_launcher . png differ <nl> Binary files a / tools / android / packaging / media / drawable - xhdpi / banner . png and b / tools / android / packaging / media / drawable - xhdpi / banner . png differ <nl> Binary files a / tools / android / packaging / media / drawable - xhdpi / ic_launcher . png and b / tools / android / packaging / media / drawable - xhdpi / ic_launcher . png differ <nl> Binary files a / tools / android / packaging / media / drawable - xhdpi / ouya_icon . png and b / tools / android / packaging / media / drawable - xhdpi / ouya_icon . png differ <nl> Binary files a / tools / android / packaging / media / drawable - xxhdpi / ic_launcher . png and b / tools / android / packaging / media / drawable - xxhdpi / ic_launcher . png differ <nl> Binary files a / tools / android / packaging / media / drawable - xxxhdpi / ic_launcher . png and b / tools / android / packaging / media / drawable - xxxhdpi / ic_launcher . png differ <nl> Binary files a / tools / android / packaging / media / playstore . png and b / tools / android / packaging / media / playstore . png differ <nl> | [ rebrand ] [ android ] new icons | xbmc/xbmc | 918b9d9c4b4a3b96edc1edea0a862d765a66fea3 | 2014-12-06T21:53:01Z |
mmm a / src / bootstrapper . cc <nl> ppp b / src / bootstrapper . cc <nl> void Genesis : : InitializeGlobal ( Handle < JSGlobalObject > global_object , <nl> } <nl> <nl> { / / - - I t e r a t o r R e s u l t <nl> - Handle < Map > map = factory - > NewMap ( JS_OBJECT_TYPE , JSIteratorResult : : kSize , <nl> - TERMINAL_FAST_ELEMENTS_KIND , 2 ) ; <nl> - Map : : SetPrototype ( isolate ( ) , map , isolate_ - > initial_object_prototype ( ) ) ; <nl> - Map : : EnsureDescriptorSlack ( isolate_ , map , 2 ) ; <nl> - <nl> - { / / value <nl> - Descriptor d = Descriptor : : DataField ( isolate ( ) , factory - > value_string ( ) , <nl> - JSIteratorResult : : kValueIndex , NONE , <nl> - Representation : : Tagged ( ) ) ; <nl> - map - > AppendDescriptor ( isolate ( ) , & d ) ; <nl> - } <nl> - <nl> - { / / done <nl> - Descriptor d = Descriptor : : DataField ( isolate ( ) , factory - > done_string ( ) , <nl> - JSIteratorResult : : kDoneIndex , NONE , <nl> - Representation : : Tagged ( ) ) ; <nl> - map - > AppendDescriptor ( isolate ( ) , & d ) ; <nl> - } <nl> + / / Setup the map for IterResultObjects created from builtins in such a <nl> + / / way that it ' s exactly the same map as the one produced by object <nl> + / / literals in the form ` { value , done } ` . This way we have better sharing <nl> + / / of maps ( i . e . less polymorphism ) and also make it possible to hit the <nl> + / / fast - paths in various builtins ( i . e . promises and collections ) with <nl> + / / user defined iterators . <nl> + Handle < Map > map = factory - > ObjectLiteralMapFromCache ( native_context ( ) , 2 ) ; <nl> + <nl> + / / value <nl> + map = Map : : CopyWithField ( isolate ( ) , map , factory - > value_string ( ) , <nl> + FieldType : : Any ( isolate ( ) ) , NONE , <nl> + PropertyConstness : : kConst , <nl> + Representation : : Tagged ( ) , INSERT_TRANSITION ) <nl> + . ToHandleChecked ( ) ; <nl> + <nl> + / / done <nl> + / / TODO ( bmeurer ) : Once FLAG_modify_field_representation_inplace is always <nl> + / / on , we can say Representation : : HeapObject ( ) here and have the inplace <nl> + / / update logic take care of the case where someone ever stores a Smi into <nl> + / / the done field . <nl> + map = Map : : CopyWithField ( isolate ( ) , map , factory - > done_string ( ) , <nl> + FieldType : : Any ( isolate ( ) ) , NONE , <nl> + PropertyConstness : : kConst , <nl> + Representation : : Tagged ( ) , INSERT_TRANSITION ) <nl> + . ToHandleChecked ( ) ; <nl> <nl> - map - > SetConstructor ( native_context ( ) - > object_function ( ) ) ; <nl> native_context ( ) - > set_iterator_result_map ( * map ) ; <nl> } <nl> <nl> mmm a / src / heap / factory . cc <nl> ppp b / src / heap / factory . cc <nl> Handle < Map > Factory : : ObjectLiteralMapFromCache ( Handle < NativeContext > context , <nl> return handle ( context - > object_function ( ) - > initial_map ( ) , isolate ( ) ) ; <nl> } <nl> <nl> - / / We do not cache maps for too many properties or when running builtin code . <nl> - if ( isolate ( ) - > bootstrapper ( ) - > IsActive ( ) ) { <nl> - return Map : : Create ( isolate ( ) , number_of_properties ) ; <nl> - } <nl> - <nl> / / Use initial slow object proto map for too many properties . <nl> const int kMapCacheSize = 128 ; <nl> if ( number_of_properties > kMapCacheSize ) { <nl> mmm a / src / objects / map . cc <nl> ppp b / src / objects / map . cc <nl> void Map : : ConnectTransition ( Isolate * isolate , Handle < Map > parent , <nl> child - > may_have_interesting_symbols ( ) ) ; <nl> DCHECK_IMPLIES ( parent - > may_have_interesting_symbols ( ) , <nl> child - > may_have_interesting_symbols ( ) ) ; <nl> - / / Do not track transitions during bootstrap except for element transitions . <nl> - if ( isolate - > bootstrapper ( ) - > IsActive ( ) & & <nl> - ! name . is_identical_to ( isolate - > factory ( ) - > elements_transition_symbol ( ) ) ) { <nl> - if ( FLAG_trace_maps ) { <nl> - LOG ( isolate , <nl> - MapEvent ( " Transition " , * parent , * child , <nl> - child - > is_prototype_map ( ) ? " prototype " : " " , * name ) ) ; <nl> - } <nl> - return ; <nl> - } <nl> if ( ! parent - > GetBackPointer ( ) - > IsUndefined ( isolate ) ) { <nl> parent - > set_owns_descriptors ( false ) ; <nl> } else { <nl> Handle < Map > Map : : CopyForPreventExtensions ( <nl> attrs_to_add ) ; <nl> Handle < LayoutDescriptor > new_layout_descriptor ( map - > GetLayoutDescriptor ( ) , <nl> isolate ) ; <nl> + / / Do not track transitions during bootstrapping . <nl> + TransitionFlag flag = <nl> + isolate - > bootstrapper ( ) - > IsActive ( ) ? OMIT_TRANSITION : INSERT_TRANSITION ; <nl> Handle < Map > new_map = CopyReplaceDescriptors ( <nl> - isolate , map , new_desc , new_layout_descriptor , INSERT_TRANSITION , <nl> - transition_marker , reason , SPECIAL_TRANSITION ) ; <nl> + isolate , map , new_desc , new_layout_descriptor , flag , transition_marker , <nl> + reason , SPECIAL_TRANSITION ) ; <nl> new_map - > set_is_extensible ( false ) ; <nl> if ( ! IsFixedTypedArrayElementsKind ( map - > elements_kind ( ) ) ) { <nl> ElementsKind new_kind = IsStringWrapperElementsKind ( map - > elements_kind ( ) ) <nl> Handle < Map > Map : : TransitionToDataProperty ( Isolate * isolate , Handle < Map > map , <nl> value ) ; <nl> } <nl> <nl> - TransitionFlag flag = INSERT_TRANSITION ; <nl> + / / Do not track transitions during bootstrapping . <nl> + TransitionFlag flag = <nl> + isolate - > bootstrapper ( ) - > IsActive ( ) ? OMIT_TRANSITION : INSERT_TRANSITION ; <nl> MaybeHandle < Map > maybe_map ; <nl> if ( ! map - > TooManyFastProperties ( store_origin ) ) { <nl> Representation representation = value - > OptimalRepresentation ( ) ; <nl> Handle < Map > Map : : TransitionToAccessorProperty ( Isolate * isolate , Handle < Map > map , <nl> <nl> pair - > SetComponents ( * getter , * setter ) ; <nl> <nl> - TransitionFlag flag = INSERT_TRANSITION ; <nl> + / / Do not track transitions during bootstrapping . <nl> + TransitionFlag flag = <nl> + isolate - > bootstrapper ( ) - > IsActive ( ) ? OMIT_TRANSITION : INSERT_TRANSITION ; <nl> Descriptor d = Descriptor : : AccessorConstant ( name , pair , attributes ) ; <nl> return Map : : CopyInsertDescriptor ( isolate , map , & d , flag ) ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . 23ca935f6b9 <nl> mmm / dev / null <nl> ppp b / test / mjsunit / regress / regress - v8 - 9243 . js <nl> <nl> + / / Copyright 2019 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + / / Flags : - - allow - natives - syntax <nl> + <nl> + / / The special IterResultObject map that builtins use should be the same <nl> + / / as the one produced by the ` { value , done } ` object literal . <nl> + const user = { value : undefined , done : true } ; <nl> + <nl> + / / Array iterator . <nl> + const arrayResult = ( new Array ( ) ) [ Symbol . iterator ] ( ) . next ( ) ; <nl> + assertTrue ( % HaveSameMap ( user , arrayResult ) ) ; <nl> + <nl> + / / Map iterator . <nl> + const mapResult = ( new Map ( ) ) [ Symbol . iterator ] ( ) . next ( ) ; <nl> + assertTrue ( % HaveSameMap ( user , mapResult ) ) ; <nl> + <nl> + / / Set iterator . <nl> + const setResult = ( new Set ( ) ) [ Symbol . iterator ] ( ) . next ( ) ; <nl> + assertTrue ( % HaveSameMap ( user , setResult ) ) ; <nl> + <nl> + / / Generator . <nl> + function * generator ( ) { } <nl> + const generatorResult = generator ( ) . next ( ) ; <nl> + assertTrue ( % HaveSameMap ( user , setResult ) ) ; <nl> | [ map ] Properly share the map for builtin iterator result objects . | v8/v8 | d2ea316f2ac6252e7302b96492230779360f38fb | 2019-05-14T14:02:29Z |
mmm a / googletest / CMakeLists . txt <nl> ppp b / googletest / CMakeLists . txt <nl> else ( ) <nl> endif ( ) <nl> cmake_minimum_required ( VERSION 2 . 6 . 4 ) <nl> <nl> + if ( POLICY CMP0063 ) # Visibility <nl> + cmake_policy ( SET CMP0063 NEW ) <nl> + endif ( POLICY CMP0063 ) <nl> + <nl> if ( COMMAND set_up_hermetic_build ) <nl> set_up_hermetic_build ( ) <nl> endif ( ) <nl> | Adding CMake visibility policy setting | google/googletest | 5518a1d350d59b22669440b175a5be045d544c35 | 2017-08-18T19:18:58Z |
mmm a / cmake / modules / AddSwift . cmake <nl> ppp b / cmake / modules / AddSwift . cmake <nl> function ( _add_variant_link_flags ) <nl> RESULT_VAR_NAME result ) <nl> <nl> if ( " $ { LFLAGS_SDK } " STREQUAL " LINUX " ) <nl> - list ( APPEND result " - lpthread " " - ldl " ) <nl> + list ( APPEND result " - lpthread " " - latomic " " - ldl " ) <nl> elseif ( " $ { LFLAGS_SDK } " STREQUAL " FREEBSD " ) <nl> list ( APPEND result " - lpthread " ) <nl> elseif ( " $ { LFLAGS_SDK } " STREQUAL " CYGWIN " ) <nl> mmm a / cmake / modules / AddSwiftUnittests . cmake <nl> ppp b / cmake / modules / AddSwiftUnittests . cmake <nl> function ( add_swift_unittest test_dirname ) <nl> if ( " $ { CMAKE_SYSTEM_NAME } " STREQUAL " Darwin " ) <nl> set_property ( TARGET " $ { test_dirname } " APPEND_STRING PROPERTY <nl> LINK_FLAGS " - Xlinker - rpath - Xlinker $ { SWIFT_LIBRARY_OUTPUT_INTDIR } / swift / macosx " ) <nl> + elseif ( " $ { CMAKE_SYSTEM_NAME } " STREQUAL " Linux " ) <nl> + set_property ( TARGET " $ { test_dirname } " APPEND_STRING PROPERTY <nl> + LINK_FLAGS " - latomic " ) <nl> endif ( ) <nl> <nl> if ( SWIFT_ENABLE_GOLD_LINKER AND <nl> mmm a / include / swift / Runtime / Concurrent . h <nl> ppp b / include / swift / Runtime / Concurrent . h <nl> class ConcurrentMapBase < EntryTy , false , Allocator > : protected Allocator { <nl> / / Destroy the node ' s payload . <nl> node - > ~ Node ( ) ; <nl> <nl> - / / Deallocate the node . <nl> - this - > Deallocate ( node , allocSize ) ; <nl> + / / Deallocate the node . The static_cast here is required <nl> + / / because LLVM ' s allocator API is insane . <nl> + this - > Deallocate ( static_cast < void * > ( node ) , allocSize ) ; <nl> } <nl> } ; <nl> <nl> mmm a / stdlib / public / runtime / Metadata . cpp <nl> ppp b / stdlib / public / runtime / Metadata . cpp <nl> const WitnessTable * swift : : swift_getGenericWitnessTable ( <nl> } <nl> <nl> uint64_t swift : : RelativeDirectPointerNullPtr = 0 ; <nl> + <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * * * Allocator implementation * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + namespace { <nl> + struct PoolRange { <nl> + static constexpr uintptr_t PageSize = 16 * 1024 ; <nl> + static constexpr uintptr_t MaxPoolAllocationSize = PageSize / 2 ; <nl> + <nl> + / / / The start of the allocation . <nl> + char * Begin ; <nl> + <nl> + / / / The number of bytes remaining . <nl> + size_t Remaining ; <nl> + } ; <nl> + } <nl> + <nl> + / / A statically - allocated pool . It ' s zero - initialized , so this <nl> + / / doesn ' t cost us anything in binary size . <nl> + LLVM_ALIGNAS ( alignof ( void * ) ) static char InitialAllocationPool [ 64 * 1024 ] ; <nl> + static std : : atomic < PoolRange > <nl> + AllocationPool { PoolRange { InitialAllocationPool , <nl> + sizeof ( InitialAllocationPool ) } } ; <nl> + <nl> + void * MetadataAllocator : : Allocate ( size_t size , size_t alignment ) { <nl> + assert ( alignment < = alignof ( void * ) ) ; <nl> + assert ( size % alignof ( void * ) = = 0 ) ; <nl> + <nl> + / / If the size is larger than the maximum , just use malloc . <nl> + if ( size > PoolRange : : MaxPoolAllocationSize ) <nl> + return malloc ( size ) ; <nl> + <nl> + / / Allocate out of the pool . <nl> + PoolRange curState = AllocationPool . load ( std : : memory_order_relaxed ) ; <nl> + while ( true ) { <nl> + char * allocation ; <nl> + PoolRange newState ; <nl> + bool allocatedNewPage ; <nl> + <nl> + / / Try to allocate out of the current page . <nl> + if ( size < = curState . Remaining ) { <nl> + allocatedNewPage = false ; <nl> + allocation = curState . Begin ; <nl> + newState = PoolRange { curState . Begin + size , curState . Remaining - size } ; <nl> + } else { <nl> + allocatedNewPage = true ; <nl> + allocation = new char [ PoolRange : : PageSize ] ; <nl> + newState = PoolRange { allocation + size , PoolRange : : PageSize - size } ; <nl> + __asan_poison_memory_region ( allocation , PoolRange : : PageSize ) ; <nl> + } <nl> + <nl> + / / Swap in the new state . <nl> + if ( std : : atomic_compare_exchange_weak_explicit ( & AllocationPool , <nl> + & curState , newState , <nl> + std : : memory_order_relaxed , <nl> + std : : memory_order_relaxed ) ) { <nl> + / / If that succeeded , we ' ve successfully allocated . <nl> + __msan_allocated_memory ( allocation , size ) ; <nl> + __asan_poison_memory_region ( allocation , size ) ; <nl> + return allocation ; <nl> + } <nl> + <nl> + / / If it failed , go back to a neutral state and try again . <nl> + if ( allocatedNewPage ) { <nl> + delete [ ] allocation ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void MetadataAllocator : : Deallocate ( const void * allocation , size_t size ) { <nl> + __asan_poison_memory_region ( allocation , size ) ; <nl> + <nl> + if ( size > PoolRange : : MaxPoolAllocationSize ) { <nl> + free ( const_cast < void * > ( allocation ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / Check whether the allocation pool is still in the state it was in <nl> + / / immediately after the given allocation . <nl> + PoolRange curState = AllocationPool . load ( std : : memory_order_relaxed ) ; <nl> + if ( reinterpret_cast < const char * > ( allocation ) + size ! = curState . Begin ) { <nl> + return ; <nl> + } <nl> + <nl> + / / Try to swap back to the pre - allocation state . If this fails , <nl> + / / don ' t bother trying again ; we ' ll just leak the allocation . <nl> + PoolRange newState = { reinterpret_cast < char * > ( const_cast < void * > ( allocation ) ) , <nl> + curState . Remaining + size } ; <nl> + ( void ) <nl> + std : : atomic_compare_exchange_strong_explicit ( & AllocationPool , <nl> + & curState , newState , <nl> + std : : memory_order_relaxed , <nl> + std : : memory_order_relaxed ) ; <nl> + } <nl> + <nl> mmm a / stdlib / public / runtime / MetadataCache . h <nl> ppp b / stdlib / public / runtime / MetadataCache . h <nl> <nl> <nl> namespace swift { <nl> <nl> - / / For now , use malloc and free as our standard allocator for <nl> - / / metadata caches . It might make sense in the future to take <nl> - / / advantage of the fact that we know that most allocations here <nl> - / / won ' t ever be deallocated . <nl> - using MetadataAllocator = llvm : : MallocAllocator ; <nl> + class MetadataAllocator : public llvm : : AllocatorBase < MetadataAllocator > { <nl> + public : <nl> + void Reset ( ) { } <nl> + <nl> + LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate ( size_t size , size_t alignment ) ; <nl> + using AllocatorBase < MetadataAllocator > : : Allocate ; <nl> + <nl> + void Deallocate ( const void * Ptr , size_t size ) ; <nl> + using AllocatorBase < MetadataAllocator > : : Deallocate ; <nl> + <nl> + void PrintStats ( ) const { } <nl> + } ; <nl> <nl> / / / A typedef for simple global caches . <nl> template < class EntryTy > <nl> mmm a / utils / gen - static - stdlib - link - args <nl> ppp b / utils / gen - static - stdlib - link - args <nl> function write_linkfile { <nl> - ldl <nl> - lpthread <nl> - lswiftCore <nl> + - latomic <nl> - lswiftImageInspectionShared <nl> $ ICU_LIBS <nl> - Xlinker <nl> mmm a / utils / static - executable - args . lnk <nl> ppp b / utils / static - executable - args . lnk <nl> <nl> - Xlinker <nl> - - defsym = __import_pthread_key_create = pthread_key_create <nl> - lpthread <nl> + - latomic <nl> - licui18n <nl> - licuuc <nl> - licudata <nl> | Switch MetadataCache to use a global slab allocator . | apple/swift | 038303b1b1263b00906037018d03946d633e3125 | 2017-02-14T16:10:44Z |
mmm a / cocos / audio / AudioEngine . cpp <nl> ppp b / cocos / audio / AudioEngine . cpp <nl> int AudioEngine : : play2d ( const std : : string & filePath , bool loop , float volume , co <nl> audioRef . is3dAudio = false ; <nl> audioRef . filePath = & it - > first ; <nl> <nl> - manage - > lastPlayTime = utils : : gettime ( ) ; <nl> - manage - > audioIDs . push_back ( ret ) ; <nl> + if ( manage ) { <nl> + manage - > lastPlayTime = utils : : gettime ( ) ; <nl> + manage - > audioIDs . push_back ( ret ) ; <nl> + } <nl> audioRef . profileManage = manage ; <nl> } <nl> } while ( 0 ) ; <nl> mmm a / cocos / audio / ios / AudioEngine - inl . h <nl> ppp b / cocos / audio / ios / AudioEngine - inl . h <nl> <nl> # include " AudioPlayer . h " <nl> <nl> NS_CC_BEGIN <nl> - class AudioEngine ; <nl> class AudioProfile ; <nl> <nl> # define kMaxSources 32 <nl> class AudioEngineThreadPool ; <nl> class AudioEngineImpl : public cocos2d : : Ref <nl> { <nl> public : <nl> - AudioEngineImpl ( AudioEngine * audioEngine ) ; <nl> + AudioEngineImpl ( ) ; <nl> ~ AudioEngineImpl ( ) ; <nl> <nl> bool init ( ) ; <nl> - int play2d ( const std : : string & fileFullPath , bool loop , float volume , AudioProfile * profile ) ; <nl> + int play2d ( const std : : string & fileFullPath , bool loop , float volume ) ; <nl> void setVolume ( int audioID , float volume ) ; <nl> void setLoop ( int audioID , bool loop ) ; <nl> bool pause ( int audioID ) ; <nl> class AudioEngineImpl : public cocos2d : : Ref <nl> void _play2d ( AudioCache * cache , int audioID ) ; <nl> <nl> AudioEngineThreadPool * _threadPool ; <nl> - AudioEngine * _audioEngine ; <nl> <nl> ALuint _alSources [ kMaxSources ] ; <nl> <nl> mmm a / cocos / audio / ios / AudioEngine - inl . mm <nl> ppp b / cocos / audio / ios / AudioEngine - inl . mm <nl> void threadFunc ( int index ) <nl> } ; <nl> } <nl> <nl> - AudioEngineImpl : : AudioEngineImpl ( AudioEngine * audioEngine ) <nl> - : _audioEngine ( audioEngine ) <nl> - , _lazyInitLoop ( true ) <nl> + AudioEngineImpl : : AudioEngineImpl ( ) <nl> + : _lazyInitLoop ( true ) <nl> , nextAudioID ( 0 ) <nl> , _threadPool ( nullptr ) <nl> { <nl> void threadFunc ( int index ) <nl> return ret ; <nl> } <nl> <nl> - int AudioEngineImpl : : play2d ( const std : : string & filePath , bool loop , float volume , AudioProfile * profile ) <nl> + int AudioEngineImpl : : play2d ( const std : : string & filePath , bool loop , float volume ) <nl> { <nl> if ( s_ALDevice = = nullptr ) { <nl> return AudioEngine : : INVAILD_AUDIO_ID ; <nl> void threadFunc ( int index ) <nl> <nl> _alSourceUsed [ alSource ] = true ; <nl> <nl> - if ( profile ) { <nl> - profile - > lastPlayTime = utils : : gettime ( ) ; <nl> - profile - > audioIDs . push_back ( nextAudioID ) ; <nl> - } <nl> - <nl> if ( _lazyInitLoop ) { <nl> _lazyInitLoop = false ; <nl> <nl> void threadFunc ( int index ) <nl> auto playerIt = _audioPlayers . find ( audioID ) ; <nl> if ( playerIt ! = _audioPlayers . end ( ) ) { <nl> if ( playerIt - > second . play2d ( cache ) ) { <nl> - _audioEngine - > _audioInfos [ audioID ] . state = AudioEngine : : AudioState : : PLAYING ; <nl> + AudioEngine : : _audioInfos [ audioID ] . state = AudioEngine : : AudioState : : PLAYING ; <nl> } <nl> else { <nl> _threadMutex . lock ( ) ; <nl> void threadFunc ( int index ) <nl> if ( playerIt ! = _audioPlayers . end ( ) ) { <nl> _alSourceUsed [ playerIt - > second . _alSource ] = false ; <nl> _audioPlayers . erase ( audioID ) ; <nl> - _audioEngine - > remove ( audioID ) ; <nl> + AudioEngine : : remove ( audioID ) ; <nl> } <nl> } <nl> size_t removeCacheCount = _removeCaches . size ( ) ; <nl> void threadFunc ( int index ) <nl> <nl> if ( player . _ready & & sourceState = = AL_STOPPED ) { <nl> _alSourceUsed [ player . _alSource ] = false ; <nl> - auto & audioInfo = _audioEngine - > _audioInfos [ audioID ] ; <nl> + auto & audioInfo = AudioEngine : : _audioInfos [ audioID ] ; <nl> if ( player . _finishCallbak ) { <nl> player . _finishCallbak ( audioID , * audioInfo . filePath ) ; <nl> } <nl> <nl> - _audioEngine - > remove ( audioID ) ; <nl> + AudioEngine : : remove ( audioID ) ; <nl> <nl> it = _audioPlayers . erase ( it ) ; <nl> } <nl> | Refactoring API : | cocos2d/cocos2d-x | 1d900940ab70964d60da06bc517a09bf3ab8f6c1 | 2014-09-05T06:34:30Z |
mmm a / xbmc / utils / SystemInfo . cpp <nl> ppp b / xbmc / utils / SystemInfo . cpp <nl> std : : string CSysInfo : : GetKernelCpuFamily ( void ) <nl> { <nl> # ifdef TARGET_WINDOWS <nl> SYSTEM_INFO si ; <nl> - GetSystemInfo ( & si ) ; <nl> + GetNativeSystemInfo ( & si ) ; <nl> if ( si . wProcessorArchitecture = = PROCESSOR_ARCHITECTURE_INTEL | | <nl> si . wProcessorArchitecture = = PROCESSOR_ARCHITECTURE_AMD64 ) <nl> return " x86 " ; <nl> | [ win32 ] SysInfo : : GetKernelCpuFamily : use GetNativeSystemInfo ( ) instead of GetSystemInfo ( ) | xbmc/xbmc | b5a025ac5e59ae625e97ffc1c32e8905900fbef3 | 2014-06-04T13:00:12Z |
mmm a / include / coroutine . h <nl> ppp b / include / coroutine . h <nl> <nl> <nl> # include " swoole . h " <nl> # include " context . h " <nl> - # include " lru_cache . h " <nl> <nl> # include < string > <nl> # include < unordered_map > <nl> typedef void ( * coro_php_yield_t ) ( void * ) ; <nl> typedef void ( * coro_php_resume_t ) ( void * ) ; <nl> typedef void ( * coro_php_close_t ) ( void * ) ; <nl> <nl> + void set_dns_cache_expire ( time_t expire ) ; <nl> + void set_dns_cache_capacity ( size_t capacity ) ; <nl> + void clear_dns_cache ( ) ; <nl> + <nl> namespace swoole <nl> { <nl> class Coroutine <nl> class Coroutine <nl> static coro_php_resume_t on_resume ; / * before php resume coro * / <nl> static coro_php_close_t on_close ; / * before php close coro * / <nl> <nl> - static LRUCache * dns_cache ; <nl> - static size_t dns_cache_capacity ; <nl> - static time_t dns_cache_expire ; <nl> - <nl> public : <nl> static std : : unordered_map < long , Coroutine * > coroutines ; <nl> <nl> class Coroutine <nl> { <nl> return peak_num ; <nl> } <nl> - <nl> - static inline void set_dns_cache_expire ( time_t expire ) <nl> - { <nl> - dns_cache_expire = expire ; <nl> - } <nl> - <nl> - static inline void set_dns_cache_capacity ( size_t capacity ) <nl> - { <nl> - dns_cache_capacity = capacity ; <nl> - delete dns_cache ; <nl> - dns_cache = nullptr ; <nl> - } <nl> } ; <nl> } <nl> mmm a / src / coroutine / hook . cc <nl> ppp b / src / coroutine / hook . cc <nl> <nl> # include " socket . h " <nl> # include " async . h " <nl> # include " coroutine . h " <nl> + # include " lru_cache . h " <nl> <nl> # ifndef _WIN32 <nl> <nl> <nl> using namespace swoole ; <nl> using namespace std ; <nl> <nl> - size_t Coroutine : : dns_cache_capacity = 1000 ; <nl> - time_t Coroutine : : dns_cache_expire = 60 ; <nl> - LRUCache * Coroutine : : dns_cache = nullptr ; <nl> + static size_t dns_cache_capacity = 1000 ; <nl> + static time_t dns_cache_expire = 60 ; <nl> + static LRUCache * dns_cache = nullptr ; <nl> + <nl> + void set_dns_cache_expire ( time_t expire ) <nl> + { <nl> + dns_cache_expire = expire ; <nl> + } <nl> + <nl> + void set_dns_cache_capacity ( size_t capacity ) <nl> + { <nl> + dns_cache_capacity = capacity ; <nl> + delete dns_cache ; <nl> + dns_cache = nullptr ; <nl> + } <nl> + <nl> + void clear_dns_cache ( ) <nl> + { <nl> + dns_cache - > clear ( ) ; <nl> + } <nl> <nl> extern " C " <nl> { <nl> ssize_t Coroutine : : write_file ( const char * file , char * buf , size_t length , int lo <nl> <nl> string Coroutine : : gethostbyname ( const string & hostname , int domain , float timeout ) <nl> { <nl> - if ( Coroutine : : dns_cache = = nullptr & & Coroutine : : dns_cache_capacity ! = 0 ) <nl> + if ( dns_cache = = nullptr & & dns_cache_capacity ! = 0 ) <nl> { <nl> - Coroutine : : dns_cache = new LRUCache ( Coroutine : : dns_cache_capacity ) ; <nl> + dns_cache = new LRUCache ( dns_cache_capacity ) ; <nl> } <nl> <nl> string cache_key ; <nl> - if ( Coroutine : : dns_cache ) <nl> + if ( dns_cache ) <nl> { <nl> cache_key . append ( domain = = AF_INET ? " 4_ " : " 6_ " ) ; <nl> cache_key . append ( hostname ) ; <nl> - auto cache = Coroutine : : dns_cache - > get ( cache_key ) ; <nl> + auto cache = dns_cache - > get ( cache_key ) ; <nl> <nl> if ( cache ) <nl> { <nl> string Coroutine : : gethostbyname ( const string & hostname , int domain , float timeou <nl> } <nl> else <nl> { <nl> - if ( Coroutine : : dns_cache ) <nl> + if ( dns_cache ) <nl> { <nl> string * addr = new string ( ( char * ) ev . buf ) ; <nl> - Coroutine : : dns_cache - > set ( cache_key , shared_ptr < string > ( addr ) , Coroutine : : dns_cache_expire ) ; <nl> + dns_cache - > set ( cache_key , shared_ptr < string > ( addr ) , dns_cache_expire ) ; <nl> sw_free ( ev . buf ) ; <nl> return * addr ; <nl> } <nl> mmm a / swoole_coroutine_util . cc <nl> ppp b / swoole_coroutine_util . cc <nl> static PHP_METHOD ( swoole_coroutine_util , set ) <nl> if ( php_swoole_array_get_value ( vht , " dns_cache_expire " , v ) ) <nl> { <nl> convert_to_long ( v ) ; <nl> - Coroutine : : set_dns_cache_expire ( ( time_t ) Z_LVAL_P ( v ) ) ; <nl> + set_dns_cache_expire ( ( time_t ) Z_LVAL_P ( v ) ) ; <nl> } <nl> if ( php_swoole_array_get_value ( vht , " dns_cache_capacity " , v ) ) <nl> { <nl> convert_to_long ( v ) ; <nl> - Coroutine : : set_dns_cache_capacity ( ( size_t ) Z_LVAL_P ( v ) ) ; <nl> + set_dns_cache_capacity ( ( size_t ) Z_LVAL_P ( v ) ) ; <nl> } <nl> zval_ptr_dtor ( zset ) ; <nl> } <nl> <nl> + PHP_FUNCTION ( swoole_clear_dns_cache ) <nl> + { <nl> + clear_dns_cache ( ) ; <nl> + } <nl> + <nl> PHP_FUNCTION ( swoole_coroutine_create ) <nl> { <nl> zend_fcall_info fci = empty_fcall_info ; <nl> | update | swoole/swoole-src | faa0b93cc547d12240d7c8535342020ab9c90812 | 2018-12-27T10:43:25Z |
mmm a / test / common . py <nl> ppp b / test / common . py <nl> <nl> import sys <nl> import os <nl> import re <nl> + import inspect <nl> import argparse <nl> import unittest <nl> import warnings <nl> def to_gpu ( obj , type_map = { } ) : <nl> return deepcopy ( obj ) <nl> <nl> <nl> + def get_function_arglist ( func ) : <nl> + return inspect . getargspec ( func ) . args <nl> + <nl> + <nl> def set_rng_seed ( seed ) : <nl> torch . manual_seed ( seed ) <nl> random . seed ( seed ) <nl> mmm a / test / test_nn . py <nl> ppp b / test / test_nn . py <nl> <nl> TEST_CUDNN_VERSION , loss_reference_fns , get_size_average , get_weight , \ <nl> smoothl1loss_reference , kldivloss_reference <nl> from common import freeze_rng_state , run_tests , TestCase , skipIfNoLapack , \ <nl> - TEST_SCIPY , download_file , PY3 , PY34 <nl> + TEST_SCIPY , download_file , PY3 , PY34 , to_gpu , get_function_arglist <nl> <nl> if TEST_SCIPY : <nl> from scipy import stats <nl> def _do_test ( self , test_case , module , input ) : <nl> test_case . assertIsInstance ( p , torch . cuda . FloatTensor ) <nl> test_case . assertEqual ( p . get_device ( ) , 1 ) <nl> <nl> + # test double ( ) <nl> + input = input . double ( ) . cuda ( ) <nl> + module . double ( ) . cuda ( ) <nl> + module ( input ) <nl> + for p in module . parameters ( ) : <nl> + test_case . assertIsInstance ( p , torch . cuda . DoubleTensor ) <nl> + test_case . assertEqual ( p . get_device ( ) , 0 ) <nl> + <nl> + # test half ( ) <nl> + input = input . half ( ) . cuda ( ) <nl> + module . half ( ) . cuda ( ) <nl> + module ( input ) <nl> + for o in module . parameters ( ) : <nl> + test_case . assertIsInstance ( p , torch . cuda . HalfTensor ) <nl> + test_case . assertEqual ( p . get_device ( ) , 0 ) <nl> + <nl> def _get_target ( self ) : <nl> return self . _get_arg ( ' target ' , False ) <nl> <nl> def apply_fn ( input1 , input2 , * params ) : <nl> gradcheck ( apply_fn , inputs ) <nl> gradgradcheck ( apply_fn , inputs ) <nl> <nl> + def test_cuda ( self , test_case , dtype = None ) : <nl> + def convert_dtype ( obj , dtype , requires_grad = False ) : <nl> + if isinstance ( obj , Variable ) : <nl> + return Variable ( obj . data . type ( dtype ) , requires_grad = requires_grad ) <nl> + elif torch . is_tensor ( obj ) : <nl> + return obj . type ( dtype ) <nl> + elif isinstance ( obj , tuple ) : <nl> + return tuple ( convert_dtype ( o , dtype , requires_grad ) for o in obj ) <nl> + else : <nl> + return obj <nl> + <nl> + if not TEST_CUDA or not self . should_test_cuda : <nl> + raise unittest . SkipTest ( ' Excluded from CUDA tests ' ) <nl> + try : <nl> + cpu_input = self . _get_input ( ) <nl> + cpu_target = self . _get_target ( ) <nl> + cpu_module = self . constructor ( * self . constructor_args ) <nl> + gpu_module = self . constructor ( * self . constructor_args ) <nl> + <nl> + # Convert input , target and module parameters to dtype <nl> + if dtype is not None : <nl> + cpu_input = convert_dtype ( cpu_input , dtype , True ) <nl> + # NLLLoss requires target to be LongTensor <nl> + if not isinstance ( cpu_target , torch . LongTensor ) : <nl> + cpu_target = convert_dtype ( cpu_target , dtype ) <nl> + cpu_module . type ( dtype ) <nl> + gpu_module . type ( dtype ) <nl> + <nl> + # GPU setup <nl> + gpu_input = to_gpu ( cpu_input ) <nl> + gpu_target = to_gpu ( cpu_target ) <nl> + gpu_module . cuda ( ) <nl> + <nl> + # torch . HalfTensor doesn ' t support most operations , converting back to default <nl> + if dtype = = torch . HalfTensor : <nl> + cpu_input = self . _get_input ( ) <nl> + cpu_target = self . _get_target ( ) <nl> + # Loss modules with weights require consistent input / module weight types <nl> + cpu_module = self . constructor ( * self . constructor_args ) <nl> + <nl> + cpu_output = test_case . _forward_criterion ( cpu_module , cpu_input , cpu_target ) <nl> + gpu_output = test_case . _forward_criterion ( gpu_module , gpu_input , gpu_target ) <nl> + # dtype can be None , so set precision in this way instead of a precision map <nl> + test_case . assertEqual ( cpu_output , gpu_output , 1e - 1 if dtype = = torch . HalfTensor else 4e - 4 ) <nl> + <nl> + cpu_gradInput = test_case . _backward_criterion ( cpu_module , cpu_input , cpu_target ) <nl> + gpu_gradInput = test_case . _backward_criterion ( gpu_module , gpu_input , gpu_target ) <nl> + test_case . assertEqual ( cpu_gradInput , gpu_gradInput , 1e - 1 if dtype = = torch . HalfTensor else 4e - 4 ) <nl> + except NotImplementedError : <nl> + pass <nl> + <nl> def _get_target ( self ) : <nl> return self . _get_arg ( ' target ' , False ) <nl> <nl> def add_test ( test ) : <nl> setattr ( TestNN , test_name , lambda self , test = test : test ( self ) ) <nl> # Hardshrink is not implemented in CUDA , so we must not test it . <nl> if not test_name . startswith ( " test_Hardshrink " ) : <nl> - setattr ( TestNN , cuda_test_name , lambda self , test = test : test . test_cuda ( self ) ) <nl> + # With dtype enable , it ' s good enough to test against three floating types <nl> + if ' dtype ' in get_function_arglist ( test . test_cuda ) : <nl> + setattr ( TestNN , cuda_test_name + ' _float ' , lambda self , <nl> + test = test : test . test_cuda ( self , dtype = torch . FloatTensor ) ) <nl> + setattr ( TestNN , cuda_test_name + ' _double ' , lambda self , <nl> + test = test : test . test_cuda ( self , dtype = torch . DoubleTensor ) ) <nl> + setattr ( TestNN , cuda_test_name + ' _half ' , lambda self , <nl> + test = test : test . test_cuda ( self , dtype = torch . HalfTensor ) ) <nl> + else : <nl> + setattr ( TestNN , cuda_test_name , lambda self , test = test : test . test_cuda ( self ) ) <nl> <nl> <nl> def wrap_functional ( fn , * * kwargs ) : <nl> | Add half test in test_nn for auto generated tests . ( ) | pytorch/pytorch | d707dae013778aa5fef01c787c903b0edce90add | 2018-03-21T20:55:06Z |
mmm a / mars / log / src / formater . cc <nl> ppp b / mars / log / src / formater . cc <nl> <nl> # include < stdio . h > <nl> # include < limits . h > <nl> # include < algorithm > <nl> + # include < string > <nl> <nl> # include " mars / comm / xlogger / xloggerbase . h " <nl> # include " mars / comm / xlogger / loginfo_extract . h " <nl> void log_formater ( const XLoggerInfo * _info , const char * _logbody , PtrBuffer & _lo <nl> if ( 0 ! = _info - > timeval . tv_sec ) { <nl> time_t sec = _info - > timeval . tv_sec ; <nl> tm tm = * localtime ( ( const time_t * ) & sec ) ; <nl> + std : : string gmt = std : : to_string ( tm . tm_gmtoff / 3600 . 0 ) ; <nl> + <nl> # ifdef ANDROID <nl> - snprintf ( temp_time , sizeof ( temp_time ) , " % d - % 02d - % 02d % + . 1f % 02d : % 02d : % 02d . % . 3ld " , 1900 + tm . tm_year , 1 + tm . tm_mon , tm . tm_mday , <nl> + snprintf ( temp_time , sizeof ( temp_time ) , " % d - % 02d - % 02d + % . 3s % 02d : % 02d : % 02d . % . 3ld " , 1900 + tm . tm_year , 1 + tm . tm_mon , tm . tm_mday , <nl> tm . tm_gmtoff / 3600 . 0 , tm . tm_hour , tm . tm_min , tm . tm_sec , _info - > timeval . tv_usec / 1000 ) ; <nl> # elif _WIN32 <nl> - snprintf ( temp_time , sizeof ( temp_time ) , " % d - % 02d - % 02d % + . 1f % 02d : % 02d : % 02d . % . 3d " , 1900 + tm . tm_year , 1 + tm . tm_mon , tm . tm_mday , <nl> + snprintf ( temp_time , sizeof ( temp_time ) , " % d - % 02d - % 02d + % . 3s % 02d : % 02d : % 02d . % . 3d " , 1900 + tm . tm_year , 1 + tm . tm_mon , tm . tm_mday , <nl> ( - _timezone ) / 3600 . 0 , tm . tm_hour , tm . tm_min , tm . tm_sec , _info - > timeval . tv_usec / 1000 ) ; <nl> # else <nl> - snprintf ( temp_time , sizeof ( temp_time ) , " % d - % 02d - % 02d % + . 1f % 02d : % 02d : % 02d . % . 3d " , 1900 + tm . tm_year , 1 + tm . tm_mon , tm . tm_mday , <nl> - tm . tm_gmtoff / 3600 . 0 , tm . tm_hour , tm . tm_min , tm . tm_sec , _info - > timeval . tv_usec / 1000 ) ; <nl> + snprintf ( temp_time , sizeof ( temp_time ) , " % d - % 02d - % 02d + % . 3s % 02d : % 02d : % 02d . % . 3d " , 1900 + tm . tm_year , 1 + tm . tm_mon , tm . tm_mday , <nl> + gmt . c_str ( ) , tm . tm_hour , tm . tm_min , tm . tm_sec , _info - > timeval . tv_usec / 1000 ) ; <nl> # endif <nl> } <nl> <nl> | crashfix : dtoa | Tencent/mars | 4f0ca060b8fedc877f2f0670d077955cf3ea3c70 | 2020-06-23T04:41:33Z |
mmm a / tensorflow / core / framework / tensor_key . h <nl> ppp b / tensorflow / core / framework / tensor_key . h <nl> class TensorKey : public Tensor { <nl> } <nl> <nl> friend bool operator ! = ( const TensorKey & t1 , const TensorKey & t2 ) { <nl> - return ! ( t1 = = t2 ) ; <nl> + return ! ( t1 = = t2 ) ; <nl> } <nl> <nl> - / / AbslHashValue ( ) function , needed for absl hashing . <nl> + / / Needed for absl hash function . <nl> template < typename H > <nl> friend H AbslHashValue ( H h , const TensorKey & k ) { <nl> - uint8 * d = ( uint8 * ) ( k . data ( ) ) ; <nl> + const uint8 * d = static_cast < uint8 * > ( k . data ( ) ) ; <nl> size_t s = k . AllocatedBytes ( ) ; <nl> std : : vector < uint8 > vec ; <nl> for ( int i = 0 ; i < s ; i + + ) { <nl> mmm a / tensorflow / core / kernels / map_kernels . cc <nl> ppp b / tensorflow / core / kernels / map_kernels . cc <nl> REGISTER_KERNEL_BUILDER ( Name ( " TensorMapErase " ) . Device ( DEVICE_CPU ) , <nl> <nl> REGISTER_KERNEL_BUILDER ( Name ( " TensorMapReplace " ) . Device ( DEVICE_CPU ) , <nl> TensorMapReplace ) ; <nl> - } <nl> \ No newline at end of file <nl> + } <nl> mmm a / tensorflow / core / kernels / map_kernels . h <nl> ppp b / tensorflow / core / kernels / map_kernels . h <nl> limitations under the License . <nl> # include " tensorflow / core / framework / variant_encode_decode . h " <nl> <nl> # include < iostream > <nl> - using namespace std ; <nl> <nl> namespace tensorflow { <nl> <nl> <nl> Status GetInputMap ( OpKernelContext * c , int index , const TensorMap * * map ) { <nl> if ( ! TensorShapeUtils : : IsScalar ( c - > input ( index ) . shape ( ) ) ) { <nl> - return errors : : InvalidArgument ( " Input map must be a scalar saw : " , <nl> + return errors : : InvalidArgument ( " Input map must be a scalar . Saw : " , <nl> c - > input ( index ) . shape ( ) . DebugString ( ) ) ; <nl> } <nl> const TensorMap * m = c - > input ( index ) . scalar < Variant > ( ) ( ) . get < TensorMap > ( ) ; <nl> Status GetInputMap ( OpKernelContext * c , int index , const TensorMap * * map ) { <nl> } <nl> <nl> <nl> + / / TODO ( kattian ) : change into templated function <nl> Status ForwardInputOrCreateNewMap ( OpKernelContext * c , int32 input_index , <nl> int32 output_index , <nl> const TensorMap & input_map , <nl> Status ForwardInputOrCreateNewMap ( OpKernelContext * c , int32 input_index , <nl> } <nl> <nl> / / If forwarding is not possible allocate a new output tensor and copy <nl> - / / the ` input_list ` to it . <nl> + / / the ` input_map ` to it . <nl> AllocatorAttributes attr ; <nl> attr . set_on_host ( true ) ; <nl> TF_RETURN_IF_ERROR ( <nl> class TensorMapErase : public OpKernel { <nl> DataType element_dtype_ ; <nl> } ; <nl> <nl> + <nl> class TensorMapReplace : public OpKernel { <nl> public : <nl> explicit TensorMapReplace ( OpKernelConstruction * c ) : OpKernel ( c ) { <nl> mmm a / tensorflow / core / kernels / tensor_map . cc <nl> ppp b / tensorflow / core / kernels / tensor_map . cc <nl> void TensorMap : : Encode ( VariantTensorData * data ) const { <nl> / / Metadata format : <nl> / / < element_dtype > < element_shape_proto > <nl> core : : PutVarint64 ( & metadata , static_cast < uint64 > ( element_dtype ) ) ; <nl> - core : : PutVarint64 ( & metadata , static_cast < uint64 > ( max_num_elements ) ) ; <nl> TensorShapeProto element_shape_proto ; <nl> element_shape . AsProto ( & element_shape_proto ) ; <nl> element_shape_proto . AppendToString ( & metadata ) ; <nl> static Status TensorMapDeviceCopy ( <nl> const UnaryVariantOpRegistry : : AsyncTensorDeviceCopyFn & copy ) { <nl> to - > element_shape = from . element_shape ; <nl> to - > element_dtype = from . element_dtype ; <nl> - to - > max_num_elements = from . max_num_elements ; <nl> for ( const std : : pair < TensorKey , Tensor > & p : from . tensors ( ) ) { <nl> - to - > tensors ( ) . emplace ( p ) ; / / TODO : check valid dtype <nl> - / / if ( t . dtype ( ) ! = DT_INVALID ) { <nl> - / / TF_RETURN_IF_ERROR ( copy ( p , & to - > tensors ( ) . back ( ) ) ) ; <nl> - / / } <nl> + if ( p . first . dtype ( ) ! = DT_INVALID & & p . second . dtype ( ) ! = DT_INVALID ) { <nl> + to - > tensors ( ) . emplace ( p . first , p . second ) ; <nl> + } <nl> } <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - # define REGISTER_LIST_COPY ( DIRECTION ) \ <nl> + # define REGISTER_LIST_COPY ( DIRECTION ) \ <nl> INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION ( TensorMap , DIRECTION , \ <nl> TensorMapDeviceCopy ) <nl> <nl> bool TensorMap : : Decode ( const VariantTensorData & data ) { <nl> <nl> while ( tensors_it ! = data . tensors ( ) . end ( ) ) <nl> { <nl> - / / should assert that tensors_it + 1 is also not the end <nl> if ( std : : next ( tensors_it ) = = data . tensors ( ) . end ( ) ) { <nl> return false ; <nl> } <nl> - TensorKey k = TensorKey ( * tensors_it ) ; / / copy inefficient ? <nl> - tensors ( ) . emplace ( k , * + + tensors_it ) ; <nl> - tensors_it + + ; <nl> + tensors ( ) . emplace ( tensors_it [ 0 ] , tensors_it [ 1 ] ) ; <nl> + tensors_it + = 2 ; <nl> } <nl> <nl> core : : GetVarint64 ( & iter , & scratch ) ; <nl> element_dtype = static_cast < DataType > ( scratch ) ; <nl> core : : GetVarint64 ( & iter , & scratch ) ; <nl> - max_num_elements = static_cast < int > ( scratch ) ; <nl> TensorShapeProto element_shape_proto ; <nl> element_shape_proto . ParseFromString ( string ( iter . data ( ) , iter . size ( ) ) ) ; <nl> element_shape = PartialTensorShape ( element_shape_proto ) ; <nl> mmm a / tensorflow / core / kernels / tensor_map . h <nl> ppp b / tensorflow / core / kernels / tensor_map . h <nl> namespace tensorflow { <nl> / / <nl> / / Do not create a true copy of the underlying container - but instead increment <nl> / / a reference count . Modifying b . tensors ( ) modifies a . tensors ( ) . In this way , <nl> - / / TensorList should be considered similar to the tf : : Tensor object . <nl> + / / TensorMap should be considered similar to the tf : : Tensor object . <nl> / / <nl> / / In order to get a copy of the underlying map , use the Copy method : <nl> / / <nl> - / / TensorList b = a . Copy ( ) ; <nl> + / / TensorMap b = a . Copy ( ) ; <nl> / / b . tensors ( ) . insert ( k , v ) ; / / This does not modify a . tensors ( ) . <nl> / / <nl> / / Note that this is not a deep copy : the memory locations of the underlying <nl> namespace tensorflow { <nl> / / in the original . To truly perform a deep copy , Device and Type - specific <nl> / / code needs to be applied to the underlying tensors as usual . <nl> / / <nl> - / / The most important implication of RefCounted TLs is that OpKernels <nl> - / / wishing to reuse TensorList inputs as outputs via context - > forward_input ( ) <nl> + / / The most important implication of RefCounted TensorMaps is that OpKernels <nl> + / / wishing to reuse TensorMap inputs as outputs via context - > forward_input ( ) <nl> / / need to perform an additional check on the refcount of the TensorList , <nl> / / to ensure aliasing can be performed safely . For example : <nl> / / <nl> class TensorMap { <nl> TensorMap ( const TensorMap & other ) <nl> : element_shape ( other . element_shape ) , <nl> element_dtype ( other . element_dtype ) , <nl> - max_num_elements ( other . max_num_elements ) , <nl> tensors_ ( other . tensors_ ) { <nl> tensors_ - > Ref ( ) ; <nl> } <nl> class TensorMap { <nl> TensorMap ( TensorMap & & rhs ) <nl> : element_shape ( std : : move ( rhs . element_shape ) ) , <nl> element_dtype ( rhs . element_dtype ) , <nl> - max_num_elements ( rhs . max_num_elements ) , <nl> tensors_ ( rhs . tensors_ ) { <nl> rhs . tensors_ = nullptr ; <nl> } <nl> class TensorMap { <nl> if ( this = = & rhs ) return * this ; <nl> element_shape = rhs . element_shape ; <nl> element_dtype = rhs . element_dtype ; <nl> - max_num_elements = rhs . max_num_elements ; <nl> tensors_ - > Unref ( ) ; <nl> tensors_ = rhs . tensors_ ; <nl> tensors_ - > Ref ( ) ; <nl> class TensorMap { <nl> if ( this = = & rhs ) return * this ; <nl> element_shape = rhs . element_shape ; <nl> element_dtype = rhs . element_dtype ; <nl> - max_num_elements = rhs . max_num_elements ; <nl> std : : swap ( tensors_ , rhs . tensors_ ) ; <nl> return * this ; <nl> } <nl> class TensorMap { <nl> <nl> DataType element_dtype ; <nl> <nl> - / / The maximum allowed size of ` tensors ` . Defaults to - 1 meaning that the size <nl> - / / of ` tensors ` is unbounded . <nl> - int max_num_elements = - 1 ; <nl> - <nl> / / Access to the underlying tensor container . <nl> absl : : flat_hash_map < TensorKey , Tensor > & tensors ( ) { return tensors_ - > values_ ; } <nl> const absl : : flat_hash_map < TensorKey , Tensor > & tensors ( ) const { return tensors_ - > values_ ; } <nl> class TensorMap { <nl> TensorMap out ; <nl> out . element_shape = element_shape ; <nl> out . element_dtype = element_dtype ; <nl> - out . max_num_elements = max_num_elements ; <nl> / / This performs a copy of the absl : : hashmap . <nl> out . tensors_ - > values_ = tensors_ - > values_ ; <nl> return out ; <nl> mmm a / tensorflow / core / kernels / tensor_map_test . cc <nl> ppp b / tensorflow / core / kernels / tensor_map_test . cc <nl> TEST ( TensorMapTest , Empty ) { <nl> TEST ( TensorKeyTest , Equal ) { <nl> TensorKey k1 = Tensor ( 15 ) ; <nl> TensorKey k2 = Tensor ( 15 ) ; <nl> - EXPECT_EQ ( k1 , k2 ) ; <nl> + EXPECT_EQ ( k1 , k2 ) ; <nl> <nl> TensorKey k3 = Tensor ( 15 ) ; <nl> TensorKey k4 = Tensor ( 37 ) ; <nl> - EXPECT_NE ( k3 , k4 ) ; <nl> + EXPECT_NE ( k3 , k4 ) ; <nl> } <nl> <nl> TEST ( TensorMapTest , Insert ) { <nl> - EXPECT_EQ ( 1 , 1 ) ; <nl> + EXPECT_EQ ( 1 , 1 ) ; <nl> TensorMap tm ; <nl> TensorKey k = Tensor ( 11 ) ; <nl> Tensor v = Tensor ( 22 ) ; <nl> - tm . insert ( k , v ) ; <nl> + tm . insert ( k , v ) ; <nl> absl : : flat_hash_map < TensorKey , Tensor > am ; <nl> - am . try_emplace ( k , v ) ; <nl> + am . try_emplace ( k , v ) ; <nl> <nl> absl : : flat_hash_map < TensorKey , Tensor > : : iterator map_it = tm . tensors ( ) . begin ( ) ; <nl> EXPECT_EQ ( map_it - > first , k ) ; <nl> TEST ( TensorMapTest , Lookup ) { <nl> TensorMap tm ; <nl> TensorKey k = Tensor ( 11 ) ; <nl> Tensor v = Tensor ( 22 ) ; <nl> - tm . insert ( k , v ) ; <nl> + tm . insert ( k , v ) ; <nl> absl : : flat_hash_map < TensorKey , Tensor > : : iterator map_it = tm . find ( k ) ; <nl> Tensor f = map_it - > second ; <nl> <nl> TEST ( TensorMapTest , Erase ) { <nl> TensorMap tm ; <nl> TensorKey k = Tensor ( 11 ) ; <nl> Tensor v = Tensor ( 22 ) ; <nl> - tm . insert ( k , v ) ; <nl> + tm . insert ( k , v ) ; <nl> tm . erase ( k ) ; <nl> EXPECT_EQ ( tm . find ( k ) , tm . tensors ( ) . end ( ) ) ; <nl> } <nl> TEST ( TensorMapTest , SameKeyInsert ) { <nl> TensorKey k = Tensor ( 11 ) ; <nl> Tensor v1 = Tensor ( 22 ) ; <nl> Tensor v2 = Tensor ( 23 ) ; <nl> - bool b1 = tm . insert ( k , v1 ) ; <nl> - bool b2 = tm . insert ( k , v2 ) ; <nl> + bool b1 = tm . insert ( k , v1 ) ; <nl> + bool b2 = tm . insert ( k , v2 ) ; <nl> EXPECT_EQ ( b1 , true ) ; <nl> EXPECT_EQ ( b2 , false ) ; <nl> absl : : flat_hash_map < TensorKey , Tensor > : : iterator map_it = tm . find ( k ) ; <nl> TEST ( TensorMapTest , Copy ) { <nl> TensorMap tm ; <nl> TensorKey k = Tensor ( 11 ) ; <nl> Tensor v = Tensor ( 22 ) ; <nl> - tm . insert ( k , v ) ; <nl> + tm . insert ( k , v ) ; <nl> TensorMap tmc = tm . Copy ( ) ; <nl> EXPECT_EQ ( tm . dtype ( ) , tmc . dtype ( ) ) ; <nl> EXPECT_EQ ( tm . size ( ) , tmc . size ( ) ) ; <nl> TEST ( TensorMapTest , EncodeDecode ) { <nl> TensorMap tm ; <nl> TensorKey k = Tensor ( 11 ) ; <nl> Tensor v = Tensor ( 22 ) ; <nl> - tm . insert ( k , v ) ; <nl> + tm . insert ( k , v ) ; <nl> VariantTensorData data ; <nl> tm . Encode ( & data ) ; <nl> TensorMap tmc ; <nl> mmm a / tensorflow / core / ops / map_ops . cc <nl> ppp b / tensorflow / core / ops / map_ops . cc <nl> limitations under the License . <nl> namespace tensorflow { <nl> namespace { <nl> <nl> - bool IsValidTensorMapHandleData ( <nl> - const std : : vector < shape_inference : : ShapeAndType > * handle_data ) { <nl> - std : : cout < < " is valid tensor map handle data " < < handle_data - > size ( ) < < std : : endl ; <nl> - return handle_data ! = nullptr & & handle_data - > size ( ) = = 1 ; <nl> - } <nl> - <nl> + / / TODO ( kttian ) : Support non - scalar values <nl> REGISTER_OP ( " EmptyTensorMap " ) <nl> . Output ( " handle : variant " ) <nl> . SetShapeFn ( [ ] ( shape_inference : : InferenceContext * c ) { <nl> mmm a / tensorflow / python / kernel_tests / BUILD <nl> ppp b / tensorflow / python / kernel_tests / BUILD <nl> cuda_py_test ( <nl> size = " small " , <nl> srcs = [ " map_ops_test . py " ] , <nl> grpc_enabled = True , <nl> - tags = [ <nl> - " noasan " , # TODO ( b / 155406705 ) : flaky <nl> - ] , <nl> deps = [ <nl> " / / tensorflow / python : array_ops " , <nl> " / / tensorflow / python : client_testlib " , <nl> mmm a / tensorflow / python / kernel_tests / map_ops_test . py <nl> ppp b / tensorflow / python / kernel_tests / map_ops_test . py <nl> <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - " " " Tests for zero_out ops . " " " <nl> + " " " Tests for TensorMap ops . " " " <nl> from __future__ import absolute_import <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> - # import numpy as np <nl> from tensorflow . python . platform import test <nl> from absl . testing import parameterized <nl> from tensorflow . python . framework import test_util <nl> def testEmptyTensorMap ( self ) : <nl> def testTensorMapSize ( self ) : <nl> m = map_ops . empty_tensor_map ( ) <nl> s = map_ops . tensor_map_size ( m ) <nl> - self . assertAllClose ( s , 0 ) <nl> + self . assertAllEqual ( s , 0 ) <nl> <nl> def testTensorMapInsert ( self ) : <nl> m = map_ops . empty_tensor_map ( ) <nl> def testTensorMapInsert ( self ) : <nl> v = constant_op . constant ( 2 . 0 ) <nl> m = map_ops . tensor_map_insert ( m , k , v ) <nl> s = map_ops . tensor_map_size ( m ) <nl> - self . assertAllClose ( s , 1 ) <nl> + self . assertAllEqual ( s , 1 ) <nl> <nl> def testTensorMapLookup ( self ) : <nl> m = map_ops . empty_tensor_map ( ) <nl> def testInsertLookupGrad ( self ) : <nl> <nl> <nl> if __name__ = = ' __main__ ' : <nl> - test . main ( ) <nl> \ No newline at end of file <nl> + test . main ( ) <nl> mmm a / tensorflow / python / ops / map_ops . py <nl> ppp b / tensorflow / python / ops / map_ops . py <nl> <nl> # See the License for the specific language governing permissions and <nl> # limitations under the License . <nl> # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - " " " Use zero_out ops in python . " " " <nl> + " " " Ops to manipulate hashmap of tensors . " " " <nl> <nl> from __future__ import absolute_import <nl> from __future__ import division <nl> <nl> from tensorflow . python . ops . gen_map_ops import * <nl> from tensorflow . python . framework import constant_op <nl> <nl> - <nl> - # zero_out_ops = load_library . load_op_library ( <nl> - # resource_loader . get_path_to_datafile ( ' _zero_out_ops . so ' ) ) <nl> - # zero_out = zero_out_ops . zero_out <nl> - <nl> ops . NotDifferentiable ( " EmptyTensorMap " ) <nl> <nl> def empty_tensor_map ( ) : <nl> | most pr edits | tensorflow/tensorflow | 395380e82d2b49d20b8cb46eaef98fc640a2cb58 | 2020-07-14T04:02:58Z |
mmm a / src / wasm / module - compiler . cc <nl> ppp b / src / wasm / module - compiler . cc <nl> const wasm : : WasmCode * LazyCompilationOrchestrator : : CompileFunction ( <nl> static_cast < uint32_t > ( func_index ) ) ; <nl> if ( existing_code ! = nullptr & & <nl> existing_code - > kind ( ) = = wasm : : WasmCode : : Function ) { <nl> + TRACE_LAZY ( " Function % d already compiled . \ n " , func_index ) ; <nl> return existing_code ; <nl> } <nl> } else { <nl> if ( Code : : cast ( compiled_module - > code_table ( ) - > get ( func_index ) ) - > kind ( ) = = <nl> Code : : WASM_FUNCTION ) { <nl> + TRACE_LAZY ( " Function % d already compiled . \ n " , func_index ) ; <nl> return nullptr ; <nl> } <nl> } <nl> const wasm : : WasmCode * LazyCompilationOrchestrator : : CompileFunction ( <nl> return ! code_wrapper . IsCodeObject ( ) ? code_wrapper . GetWasmCode ( ) : nullptr ; <nl> } <nl> <nl> + namespace { <nl> + <nl> int AdvanceSourcePositionTableIterator ( SourcePositionTableIterator & iterator , <nl> int offset ) { <nl> DCHECK ( ! iterator . done ( ) ) ; <nl> int AdvanceSourcePositionTableIterator ( SourcePositionTableIterator & iterator , <nl> return byte_pos ; <nl> } <nl> <nl> + Code * ExtractWasmToWasmCallee ( Code * wasm_to_wasm ) { <nl> + DCHECK_EQ ( Code : : WASM_TO_WASM_FUNCTION , wasm_to_wasm - > kind ( ) ) ; <nl> + / / Find the one code target in this wrapper . <nl> + RelocIterator it ( wasm_to_wasm , RelocInfo : : kCodeTargetMask ) ; <nl> + DCHECK ( ! it . done ( ) ) ; <nl> + Code * callee = Code : : GetCodeFromTargetAddress ( it . rinfo ( ) - > target_address ( ) ) ; <nl> + # ifdef DEBUG <nl> + it . next ( ) ; <nl> + DCHECK ( it . done ( ) ) ; <nl> + # endif <nl> + return callee ; <nl> + } <nl> + <nl> + void PatchWasmToWasmWrapper ( Isolate * isolate , Code * wasm_to_wasm , <nl> + Code * new_target ) { <nl> + DCHECK_EQ ( Code : : WASM_TO_WASM_FUNCTION , wasm_to_wasm - > kind ( ) ) ; <nl> + / / Find the one code target in this wrapper . <nl> + RelocIterator it ( wasm_to_wasm , RelocInfo : : kCodeTargetMask ) ; <nl> + DCHECK ( ! it . done ( ) ) ; <nl> + DCHECK_EQ ( Builtins : : kWasmCompileLazy , <nl> + Code : : GetCodeFromTargetAddress ( it . rinfo ( ) - > target_address ( ) ) <nl> + - > builtin_index ( ) ) ; <nl> + it . rinfo ( ) - > set_target_address ( isolate , new_target - > instruction_start ( ) ) ; <nl> + # ifdef DEBUG <nl> + it . next ( ) ; <nl> + DCHECK ( it . done ( ) ) ; <nl> + # endif <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> Handle < Code > LazyCompilationOrchestrator : : CompileLazyOnGCHeap ( <nl> Isolate * isolate , Handle < WasmInstanceObject > instance , Handle < Code > caller , <nl> int call_offset , int exported_func_index , bool patch_caller ) { <nl> Handle < Code > LazyCompilationOrchestrator : : CompileLazyOnGCHeap ( <nl> " patch caller : % d ) . \ n " , <nl> exported_func_index , call_offset , is_js_to_wasm , patch_caller ) ; <nl> <nl> + / / If this lazy compile stub is being called through a wasm - to - wasm wrapper , <nl> + / / remember that code object . <nl> + Handle < Code > wasm_to_wasm_callee ; <nl> + <nl> if ( is_js_to_wasm ) { <nl> non_compiled_functions . push_back ( { 0 , exported_func_index } ) ; <nl> } else if ( patch_caller ) { <nl> Handle < Code > LazyCompilationOrchestrator : : CompileLazyOnGCHeap ( <nl> module_bytes - > GetChars ( ) + caller_module - > module ( ) <nl> - > functions [ caller_func_info . func_index ] <nl> . code . offset ( ) ; <nl> + Code * lazy_callee = nullptr ; <nl> for ( RelocIterator it ( * caller , RelocInfo : : kCodeTargetMask ) ; ! it . done ( ) ; <nl> it . next ( ) ) { <nl> Code * callee = <nl> Code : : GetCodeFromTargetAddress ( it . rinfo ( ) - > target_address ( ) ) ; <nl> - if ( callee - > builtin_index ( ) ! = Builtins : : kWasmCompileLazy ) continue ; <nl> / / TODO ( clemensh ) : Introduce safe_cast < T , bool > which ( D ) CHECKS <nl> / / ( depending on the bool ) against limits of T and then static_casts . <nl> size_t offset_l = it . rinfo ( ) - > pc ( ) - caller - > instruction_start ( ) ; <nl> DCHECK_GE ( kMaxInt , offset_l ) ; <nl> int offset = static_cast < int > ( offset_l ) ; <nl> + / / Call offset points to the instruction after the call . Remember the last <nl> + / / called code object before that offset . <nl> + if ( offset < call_offset ) lazy_callee = callee ; <nl> + if ( callee - > builtin_index ( ) ! = Builtins : : kWasmCompileLazy ) continue ; <nl> int byte_pos = <nl> AdvanceSourcePositionTableIterator ( source_pos_iterator , offset ) ; <nl> int called_func_index = <nl> ExtractDirectCallIndex ( decoder , func_bytes + byte_pos ) ; <nl> non_compiled_functions . push_back ( { offset , called_func_index } ) ; <nl> - / / Call offset one instruction after the call . Remember the last called <nl> - / / function before that offset . <nl> if ( offset < call_offset ) func_to_return_idx = called_func_index ; <nl> } <nl> + TRACE_LAZY ( " Found % zu non - compiled functions in caller . \ n " , <nl> + non_compiled_functions . size ( ) ) ; <nl> + DCHECK_NOT_NULL ( lazy_callee ) ; <nl> + if ( lazy_callee - > kind ( ) = = Code : : WASM_TO_WASM_FUNCTION ) { <nl> + TRACE_LAZY ( " Callee is a wasm - to - wasm . \ n " ) ; <nl> + wasm_to_wasm_callee = handle ( lazy_callee , isolate ) ; <nl> + / / If we call a wasm - to - wasm wrapper , then this wrapper actually <nl> + / / tail - called the lazy compile stub . Find it in the wrapper . <nl> + lazy_callee = ExtractWasmToWasmCallee ( lazy_callee ) ; <nl> + / / This lazy compile stub belongs to the instance that was passed . <nl> + DCHECK_EQ ( * instance , <nl> + * GetWasmFunctionInfo ( isolate , handle ( lazy_callee , isolate ) ) <nl> + . instance . ToHandleChecked ( ) ) ; <nl> + DCHECK_LE ( 2 , lazy_callee - > deoptimization_data ( ) - > length ( ) ) ; <nl> + func_to_return_idx = <nl> + Smi : : ToInt ( lazy_callee - > deoptimization_data ( ) - > get ( 1 ) ) ; <nl> + } <nl> + DCHECK_EQ ( Builtins : : kWasmCompileLazy , lazy_callee - > builtin_index ( ) ) ; <nl> + / / There must be at least one call to patch ( the one that lead to calling <nl> + / / the lazy compile stub ) . <nl> + DCHECK ( ! non_compiled_functions . empty ( ) | | ! wasm_to_wasm_callee . is_null ( ) ) ; <nl> } <nl> <nl> TRACE_LAZY ( " Compiling function % d . \ n " , func_to_return_idx ) ; <nl> Handle < Code > LazyCompilationOrchestrator : : CompileLazyOnGCHeap ( <nl> / / background , wait for func_to_return_idx . <nl> CompileFunction ( isolate , instance , func_to_return_idx ) ; <nl> <nl> + Handle < Code > compiled_function ( <nl> + Code : : cast ( compiled_module - > code_table ( ) - > get ( func_to_return_idx ) ) , <nl> + isolate ) ; <nl> + DCHECK_EQ ( Code : : WASM_FUNCTION , compiled_function - > kind ( ) ) ; <nl> + <nl> if ( is_js_to_wasm | | patch_caller ) { <nl> DisallowHeapAllocation no_gc ; <nl> / / TODO ( 6792 ) : No longer needed once WebAssembly code is off heap . <nl> Handle < Code > LazyCompilationOrchestrator : : CompileLazyOnGCHeap ( <nl> it . next ( ) ) { <nl> Code * callee = <nl> Code : : GetCodeFromTargetAddress ( it . rinfo ( ) - > target_address ( ) ) ; <nl> - if ( callee - > builtin_index ( ) ! = Builtins : : kWasmCompileLazy ) continue ; <nl> + if ( callee - > builtin_index ( ) ! = Builtins : : kWasmCompileLazy ) { <nl> + / / If the callee is the wasm - to - wasm wrapper triggering this lazy <nl> + / / compilation , patch it . <nl> + if ( ! wasm_to_wasm_callee . is_null ( ) & & callee = = * wasm_to_wasm_callee ) { <nl> + TRACE_LAZY ( " Patching wasm - to - wasm wrapper . \ n " ) ; <nl> + PatchWasmToWasmWrapper ( isolate , callee , * compiled_function ) ; <nl> + + + patched ; <nl> + } <nl> + continue ; <nl> + } <nl> DCHECK_GT ( non_compiled_functions . size ( ) , idx ) ; <nl> int called_func_index = non_compiled_functions [ idx ] . func_index ; <nl> / / Check that the callee agrees with our assumed called_func_index . <nl> Handle < Code > LazyCompilationOrchestrator : : CompileLazyOnGCHeap ( <nl> } <nl> DCHECK_EQ ( non_compiled_functions . size ( ) , idx ) ; <nl> TRACE_LAZY ( " Patched % d location ( s ) in the caller . \ n " , patched ) ; <nl> - / / TODO ( clemensh , crbug . com / 788441 ) : Fix patching issues , enable this check . <nl> - / / DCHECK_LT ( 0 , patched ) ; <nl> + DCHECK_LT ( 0 , patched ) ; <nl> USE ( patched ) ; <nl> } <nl> <nl> - Code * ret = <nl> - Code : : cast ( compiled_module - > code_table ( ) - > get ( func_to_return_idx ) ) ; <nl> - DCHECK_EQ ( Code : : WASM_FUNCTION , ret - > kind ( ) ) ; <nl> - return handle ( ret , isolate ) ; <nl> + return compiled_function ; <nl> } <nl> <nl> const wasm : : WasmCode * LazyCompilationOrchestrator : : CompileFromJsToWasm ( <nl> | [ wasm ] Lazy - compilation : Fix patching of wasm - to - wasm wrappers | v8/v8 | efed6ba94a722c919d23241117fc4a44f2d06d8a | 2017-11-29T11:53:26Z |
mmm a / Makefile <nl> ppp b / Makefile <nl> endif <nl> <nl> <nl> LIBGOOGLE_BENCHMARK_SRC = \ <nl> - third_party / google_benchmark / src / benchmark . cc \ <nl> - third_party / google_benchmark / src / benchmark_register . cc \ <nl> - third_party / google_benchmark / src / colorprint . cc \ <nl> - third_party / google_benchmark / src / commandlineflags . cc \ <nl> - third_party / google_benchmark / src / complexity . cc \ <nl> - third_party / google_benchmark / src / console_reporter . cc \ <nl> - third_party / google_benchmark / src / csv_reporter . cc \ <nl> - third_party / google_benchmark / src / json_reporter . cc \ <nl> - third_party / google_benchmark / src / reporter . cc \ <nl> - third_party / google_benchmark / src / sleep . cc \ <nl> - third_party / google_benchmark / src / string_util . cc \ <nl> - third_party / google_benchmark / src / sysinfo . cc \ <nl> - third_party / google_benchmark / src / timers . cc \ <nl> <nl> PUBLIC_HEADERS_CXX + = \ <nl> <nl> mmm a / tools / fuzzer / runners / ssl_server_fuzzer . sh <nl> ppp b / tools / fuzzer / runners / ssl_server_fuzzer . sh <nl> <nl> <nl> flags = " - max_total_time = $ runtime - artifact_prefix = fuzzer_output / - max_len = 2048 - timeout = 120 " <nl> <nl> + <nl> if [ " $ jobs " ! = " 1 " ] <nl> then <nl> flags = " - jobs = $ jobs - workers = $ jobs $ flags " <nl> mmm a / tools / run_tests / sources_and_headers . json <nl> ppp b / tools / run_tests / sources_and_headers . json <nl> <nl> } , <nl> { <nl> " deps " : [ ] , <nl> - " headers " : [ <nl> - " third_party / google_benchmark / include / benchmark / benchmark . h " , <nl> - " third_party / google_benchmark / include / benchmark / benchmark_api . h " , <nl> - " third_party / google_benchmark / include / benchmark / macros . h " , <nl> - " third_party / google_benchmark / include / benchmark / reporter . h " , <nl> - " third_party / google_benchmark / src / arraysize . h " , <nl> - " third_party / google_benchmark / src / benchmark_api_internal . h " , <nl> - " third_party / google_benchmark / src / check . h " , <nl> - " third_party / google_benchmark / src / colorprint . h " , <nl> - " third_party / google_benchmark / src / commandlineflags . h " , <nl> - " third_party / google_benchmark / src / complexity . h " , <nl> - " third_party / google_benchmark / src / cycleclock . h " , <nl> - " third_party / google_benchmark / src / internal_macros . h " , <nl> - " third_party / google_benchmark / src / log . h " , <nl> - " third_party / google_benchmark / src / mutex . h " , <nl> - " third_party / google_benchmark / src / re . h " , <nl> - " third_party / google_benchmark / src / sleep . h " , <nl> - " third_party / google_benchmark / src / stat . h " , <nl> - " third_party / google_benchmark / src / string_util . h " , <nl> - " third_party / google_benchmark / src / sysinfo . h " , <nl> - " third_party / google_benchmark / src / timers . h " <nl> - ] , <nl> + " headers " : [ ] , <nl> " is_filegroup " : false , <nl> " language " : " c + + " , <nl> " name " : " google_benchmark " , <nl> mmm a / tools / run_tests / tests . json <nl> ppp b / tools / run_tests / tests . json <nl> <nl> ] , <nl> " uses_polling " : false <nl> } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 05cda1e986096f42698ee2d86ab0a4a3f6a6690b " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 0f65ef472e8308561c77ada56afd4de5932d950a " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 128915cb83e66a736f8a1833c8901eccb81e0656 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 134d3a5e7a1609a583f6282c48ef9b871e0fdc15 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 13fccd43a6b52c62851ea24e8be4f8cfe6c0103d " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 171bc6b14b94c72435d2da2e31e9682f12a3f13c " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 17dea38d21e9282ecd062466cf287ecf5b30c1cf " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 1961eb9d4dd4bf21cbcd9c45a17b1d025eb0d200 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 226ebb5cc16ac42fae3be273de533ac79759ae01 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 2333cf428cb1e2976679ac84e64873bf76c6595e " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 2885553a9e6829265d5f44ea4e24fcf7d6513436 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 28bce9a7cb21f3232239b9b71ef568137bf801f4 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 2a0286615be426d1e7fd5894aadf1a503df05a0a " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 2b4a1f3ebe223d91c042a5e86aff31e460f6cc3f " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 2cfbe809bcf53160ecc0109b2df01a8696a226fd " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 2f770de96db36ef9a71f7eb09b2e8695ac1f0655 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 30ba8ad171657470b5312232387b7da70c387219 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 36548a97a8b847e17a77d1e646c6eb5ec001d84a " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 3c442804f73cfe826a609d97c12ef87852742883 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 3dad758011b16a3771376f9af91242953be3e47b " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 43b56a1adffaf2c3c994679bf2b6fe6414e13df4 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 4446c60ab89c34e5ccc26bec18d7e7d21fe5aec1 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 44924b3866956d0668ec65750c3663279ac84a36 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 44e4370a4eafde61f8e7dc7e4542e0ad5ecf5253 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 44fb8c02117ebd75c97e517624c0abc9f9a76aa7 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 45d76dd1f0a29fce3f8d289b5177263871eb3f83 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 4fcbf18c4135352539eaf445c26f2f8a5da9b68e " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 508ca86c6f4e6ecc30c252ccf74e78256a893b17 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 515d240b860fcd1e77d4a5af291aa4a667d9b609 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 528e9738d5016f01cf59d74f20a8aa3f341ad89e " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 54b636a2bb66ccf8247b53ff76a6400e9f1355d0 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 5cc337c4b33cd703cd354804530f5b72684260d0 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 5ce3ec59111bf328044e41fcf26b3bc542df527e " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 5eaac270339f19cfce4eafa2e69d62adf100ad1c " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 6129382083353687a5c3acb3d4274b811227bf3d " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 616a25f7c4557ef9eb33d4367c6884abc336802d " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 6214e558afbfdb8451e49b62619896492f1a8972 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 6c6a6b2ce74acf8a8b90fc0b268ecc7dc992cd60 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 6f72309e1b23b824e9bbb9abf74a014b78be038e " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 7137ffad853e4dd76c6d6490f37b36e20de7ede0 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 71c9356b6b6c5a3a07033d0fa4ed417fa74a77e5 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 7414938799ed61282f41d5fb1474751ca52b2682 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 75e853eb2ff7e78efb327e114b39baff5a1dd5bb " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 7607c92952b4429e350260d8074c3c460468fd49 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 7677373c082fee56d8cdde009d9db5b117a4c8de " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 76abde5c970743f9fb8bc781e46c431dee2aa104 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 77e0b4e2066853df9d32d475b8788e3d7d19329b " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 78e66840048ddc3c75e0e4abffbb3109af0d750e " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 7dc620eb45764390c7b106362fc4922227415407 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 82ca7a52d589e9dbae37ebf1c59fac7ad876eb7c " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 8997d472f73eec84fea712638abd762818ec92ec " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 933bc3fdc56718d7ac0486c26eecddb6db1c5ba2 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 94431bda60ddc175cf86273ddc07cb41ecf45fa1 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 94c4272b2bb4fa9178eb4ae7dcf4b796fdcb22ac " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9551c32a794250fb425005d8faf4bd24475acd41 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9677da7c82f18cec3e0ed2e78aadd6e590271a52 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9c2e6291aff608f3f5307a7c80db6b17107f0575 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9c39dc04c7414ca0bb64fb942422bebe83ed8e8f " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9cc8547d183a4f2ab7022b36376ca4a6230726c6 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9e19e5e77789c34f99bbe1e6de198610d6765806 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / 9f0853ccf6912df9fba2d5fc3a1ddece41c377ac " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / a6d521e501322f052df5a81ee622e0e4942ddcda " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / a9dc20f09890403be510357a7665a8f0db2468a4 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / ac1c60970910880558ae7a2ca2e155cfd7772e05 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / afe36d0187a155fc6e4e5c055c0ed0f3802cf696 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / b12b5fc39edc5407b4a525c414ff6b5e116eee05 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / b5b088c6e3a96f88119a940874ab04cb954797ae " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / b65ad77a43ede3394ba714238829860c4ef4bc9f " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / b6f265cad9d47e2ccd17a73a6d309d8898dc5428 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / b7ca5868bca7ae7d1952f44ca966218b26fb7207 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / bd20809eacabadb9bcc77d31e42d3359117b03f2 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / c328623c4ce12505a54cf1a7a1606e1db36e870d " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / c823e8bd5526d9fe7d51319737f51bd18bdd75e8 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / c9394095d86ff36b69d90f7122592bf51cafe7dd " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / cdbf0d2ae953bec07a67b7152785b548e55f85a4 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / cef06f9c35ee338998703555847d70c26bfc9474 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / cfb40ab8eb7031e978bed2418cdc2f0b8a8d8ec7 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d30840c3f48f11179ef976ada30477045c6d1e98 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d5afef69141edc7f4911243cf2deb19c912999cf " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d5cf71396e1a04da1a7ec266957ffd2de29d6a57 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d6dd8a2b085db5d33ef24b23502293ce1ce906a3 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d7676dcd39b7c7cfaac513a98b56fe4ac8ea27d8 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d8c9e9ef14abc23b36cb493283ba3e2812d9e537 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / d9edb0aa5d2fe4af26ac861770c1530a4075f919 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / da1b52041957334b9ea1371bd2993013118bc82d " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / dca7861424c8f92d3720de5c4488454cde1c39df " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / df3755e257d024ef8ab08f6d5cefcf28148ea4b1 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / e360a49faefb87d671edb99e777f528f52cac9ae " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / e5e789605744d47e5a5d433bb04db1b413bc91a8 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / e6e44a6aa0ece409450c85e43d02c57e338ce1da " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / e8ebd49ee98cf57ca7eb35b6e96ef8866270aac6 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / ea0645f46ccd233337a8389b6118db5b0289f040 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / f6f7687df6b7056d3c819c03c9268e22a956b6b5 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / f725caa73aa9467c5e934c49780fc409b36b251c " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / f8d3326a860091edd4d60725f96f429d13f3abe6 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / f9261344b4049e90e88b5af784dd29b938c5c838 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / f95b97ece3b46815204a8e6d6e94f92ec40a9672 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / f97db29497e4e3225016a6ced837e20a13622f16 " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> + { <nl> + " args " : [ <nl> + " test / core / security / corpus / ssl_server_corpus / ff1a900b12f19772f9a86bd5f560a754cdb18d1a " <nl> + ] , <nl> + " ci_platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " cpu_cost " : 0 . 1 , <nl> + " exclude_configs " : [ <nl> + " tsan " <nl> + ] , <nl> + " exclude_iomgrs " : [ <nl> + " uv " <nl> + ] , <nl> + " flaky " : false , <nl> + " language " : " c " , <nl> + " name " : " ssl_server_fuzzer_one_entry " , <nl> + " platforms " : [ <nl> + " linux " <nl> + ] , <nl> + " uses_polling " : false <nl> + } , <nl> { <nl> " args " : [ <nl> " test / core / client_channel / uri_corpus / 02d156dc5e6f2c11c90c2e06fcee04adf036a342 " <nl> mmm a / vsprojects / vcxproj / google_benchmark / google_benchmark . vcxproj <nl> ppp b / vsprojects / vcxproj / google_benchmark / google_benchmark . vcxproj <nl> <nl> < / ItemDefinitionGroup > <nl> <nl> < ItemGroup > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ benchmark . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ benchmark_api . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ macros . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ reporter . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ arraysize . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ benchmark_api_internal . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ check . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ colorprint . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ commandlineflags . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ complexity . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ cycleclock . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ internal_macros . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ log . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ mutex . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ re . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sleep . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ stat . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ string_util . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sysinfo . h " / > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ timers . h " / > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ benchmark . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ benchmark_register . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ colorprint . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ commandlineflags . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ complexity . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ console_reporter . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ csv_reporter . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ json_reporter . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ reporter . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sleep . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ string_util . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sysinfo . cc " > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ timers . cc " > <nl> + < ClCompile Include = " $ ( SolutionDir ) \ . . \ vsprojects \ dummy . c " > <nl> < / ClCompile > <nl> < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> mmm a / vsprojects / vcxproj / google_benchmark / google_benchmark . vcxproj . filters <nl> ppp b / vsprojects / vcxproj / google_benchmark / google_benchmark . vcxproj . filters <nl> <nl> < ? xml version = " 1 . 0 " encoding = " utf - 8 " ? > <nl> < Project ToolsVersion = " 4 . 0 " xmlns = " http : / / schemas . microsoft . com / developer / msbuild / 2003 " > <nl> - < ItemGroup > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ benchmark . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ benchmark_register . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ colorprint . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ commandlineflags . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ complexity . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ console_reporter . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ csv_reporter . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ json_reporter . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ reporter . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sleep . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ string_util . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sysinfo . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < ClCompile Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ timers . cc " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClCompile > <nl> - < / ItemGroup > <nl> - < ItemGroup > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ benchmark . h " > <nl> - < Filter > third_party \ google_benchmark \ include \ benchmark < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ benchmark_api . h " > <nl> - < Filter > third_party \ google_benchmark \ include \ benchmark < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ macros . h " > <nl> - < Filter > third_party \ google_benchmark \ include \ benchmark < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ include \ benchmark \ reporter . h " > <nl> - < Filter > third_party \ google_benchmark \ include \ benchmark < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ arraysize . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ benchmark_api_internal . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ check . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ colorprint . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ commandlineflags . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ complexity . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ cycleclock . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ internal_macros . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ log . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ mutex . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ re . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sleep . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ stat . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ string_util . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ sysinfo . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < ClInclude Include = " $ ( SolutionDir ) \ . . \ third_party \ google_benchmark \ src \ timers . h " > <nl> - < Filter > third_party \ google_benchmark \ src < / Filter > <nl> - < / ClInclude > <nl> - < / ItemGroup > <nl> <nl> < ItemGroup > <nl> - < Filter Include = " third_party " > <nl> - < UniqueIdentifier > { 7458b63d - 7ba4 - 103d - 2bed - 3e3ad30d8237 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " third_party \ google_benchmark " > <nl> - < UniqueIdentifier > { 54a154e8 - 669b - a7c1 - 9b6e - bd1aab2f86e3 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " third_party \ google_benchmark \ include " > <nl> - < UniqueIdentifier > { f54c3cb1 - ec20 - a651 - 6956 - 78379b51e1a5 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " third_party \ google_benchmark \ include \ benchmark " > <nl> - < UniqueIdentifier > { 0483a457 - 8050 - 4565 - bc15 - 09695bf7b822 } < / UniqueIdentifier > <nl> - < / Filter > <nl> - < Filter Include = " third_party \ google_benchmark \ src " > <nl> - < UniqueIdentifier > { c39ff2d1 - 691e - 4614 - 4d75 - 4bc20db05e09 } < / UniqueIdentifier > <nl> - < / Filter > <nl> < / ItemGroup > <nl> < / Project > <nl> <nl> | regenerate projects | grpc/grpc | 6ab2ccd4372df54c2efb6741529b65c190ba0573 | 2016-11-07T18:11:34Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.