diff
stringlengths 41
2.03M
| msg
stringlengths 1
1.5k
⌀ | repo
stringlengths 5
40
| sha
stringlengths 40
40
| time
stringlengths 20
20
|
---|---|---|---|---|
mmm a / third_party / gpus / rocm_configure . bzl <nl> ppp b / third_party / gpus / rocm_configure . bzl <nl> def _find_libs ( repository_ctx , rocm_config , bash_bin ) : <nl> libs_paths = [ <nl> ( name , _rocm_lib_paths ( repository_ctx , name , path ) ) <nl> for name , path in [ <nl> - ( " hip_hcc " , rocm_config . rocm_toolkit_path ) , <nl> + ( " hip_hcc " , rocm_config . rocm_toolkit_path + " / hip " ) , <nl> ( " rocblas " , rocm_config . rocm_toolkit_path + " / rocblas " ) , <nl> ( " rocfft " , rocm_config . rocm_toolkit_path + " / rocfft " ) , <nl> ( " hiprand " , rocm_config . rocm_toolkit_path + " / hiprand " ) , <nl> | Merge pull request from acxz : fix - hip - path | tensorflow/tensorflow | d62ab3aeda8829cbefddaa968afbe70884fc194f | 2020-08-06T23:44:49Z |
mmm a / src / mongo / SConscript <nl> ppp b / src / mongo / SConscript <nl> env . Library ( ' coreshard ' , [ # This is only here temporarily for auto - split logic i <nl> ' s / config . cpp ' , <nl> ' s / grid . cpp ' , <nl> ' s / chunk . cpp ' , <nl> + ' s / chunk_manager . cpp ' , <nl> # No good reason to be here other than chunk . cpp needs this . <nl> ' s / config_server_checker_service . cpp ' , <nl> ' s / shard . cpp ' , <nl> mmm a / src / mongo / client / parallel . cpp <nl> ppp b / src / mongo / client / parallel . cpp <nl> <nl> # include " mongo / client / replica_set_monitor . h " <nl> # include " mongo / db / dbmessage . h " <nl> # include " mongo / db / query / lite_parsed_query . h " <nl> - # include " mongo / s / chunk . h " <nl> - # include " mongo / s / chunk_version . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / grid . h " <nl> # include " mongo / s / shard . h " <nl> mmm a / src / mongo / db / commands / mr . cpp <nl> ppp b / src / mongo / db / commands / mr . cpp <nl> <nl> - / / mr . cpp <nl> - <nl> / * * <nl> * Copyright ( C ) 2012 10gen Inc . <nl> * <nl> <nl> # include " mongo / db / operation_context_impl . h " <nl> # include " mongo / db / storage_options . h " <nl> # include " mongo / scripting / engine . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / collection_metadata . h " <nl> # include " mongo / s / d_state . h " <nl> # include " mongo / s / grid . h " <nl> mmm a / src / mongo / dbtests / chunktests . cpp <nl> ppp b / src / mongo / dbtests / chunktests . cpp <nl> <nl> <nl> # include " mongo / db / json . h " <nl> # include " mongo / dbtests / dbtests . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> <nl> namespace mongo { <nl> <nl> mmm a / src / mongo / dbtests / sharding . cpp <nl> ppp b / src / mongo / dbtests / sharding . cpp <nl> <nl> # include " mongo / dbtests / config_server_fixture . h " <nl> # include " mongo / dbtests / dbtests . h " <nl> # include " mongo / s / chunk_diff . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / chunk_version . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / type_chunk . h " <nl> mmm a / src / mongo / s / balance . cpp <nl> ppp b / src / mongo / s / balance . cpp <nl> <nl> # include " mongo / base / owned_pointer_map . h " <nl> # include " mongo / client / dbclientcursor . h " <nl> # include " mongo / db / jsobj . h " <nl> + # include " mongo / db / server_options . h " <nl> # include " mongo / db / write_concern . h " <nl> # include " mongo / db / write_concern_options . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / cluster_write . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / config_server_checker_service . h " <nl> mmm a / src / mongo / s / balance . h <nl> ppp b / src / mongo / s / balance . h <nl> <nl> <nl> # pragma once <nl> <nl> - # include " mongo / platform / basic . h " <nl> - <nl> # include < boost / scoped_ptr . hpp > <nl> # include < boost / shared_ptr . hpp > <nl> <nl> mmm a / src / mongo / s / balancer_policy . cpp <nl> ppp b / src / mongo / s / balancer_policy . cpp <nl> <nl> <nl> # include " mongo / client / connpool . h " <nl> # include " mongo / s / balancer_policy . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / type_shard . h " <nl> # include " mongo / s / type_tags . h " <nl> mmm a / src / mongo / s / chunk . cpp <nl> ppp b / src / mongo / s / chunk . cpp <nl> <nl> <nl> # include " mongo / s / chunk . h " <nl> <nl> - # include < boost / shared_ptr . hpp > <nl> # include < iostream > <nl> <nl> # include " mongo / base / owned_pointer_map . h " <nl> # include " mongo / client / connpool . h " <nl> # include " mongo / client / dbclientcursor . h " <nl> - # include " mongo / db / query / lite_parsed_query . h " <nl> - # include " mongo / db / index_names . h " <nl> # include " mongo / db / lasterror . h " <nl> - # include " mongo / db / write_concern . h " <nl> + # include " mongo / db / query / query_solution . h " <nl> # include " mongo / db / server_parameters . h " <nl> + # include " mongo / db / write_concern . h " <nl> + # include " mongo / db / write_concern_options . h " <nl> # include " mongo / platform / random . h " <nl> # include " mongo / s / balancer_policy . h " <nl> - # include " mongo / s / chunk_diff . h " <nl> - # include " mongo / s / chunk_version . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / cluster_write . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / config_server_checker_service . h " <nl> # include " mongo / s / cursors . h " <nl> - # include " mongo / s / distlock . h " <nl> # include " mongo / s / grid . h " <nl> - # include " mongo / s / strategy . h " <nl> - # include " mongo / s / type_collection . h " <nl> # include " mongo / s / type_settings . h " <nl> # include " mongo / util / concurrency / ticketholder . h " <nl> # include " mongo / util / log . h " <nl> # include " mongo / util / print . h " <nl> - # include " mongo / util / startup_test . h " <nl> - # include " mongo / util / timer . h " <nl> - # include " mongo / db / query / canonical_query . h " <nl> - # include " mongo / db / query / query_planner . h " <nl> - # include " mongo / db / query / query_planner_common . h " <nl> - # include " mongo / db / query / index_bounds_builder . h " <nl> - # include " mongo / db / write_concern_options . h " <nl> <nl> namespace mongo { <nl> <nl> using boost : : shared_ptr ; <nl> using std : : auto_ptr ; <nl> - using std : : cout ; <nl> - using std : : endl ; <nl> - using std : : pair ; <nl> - using std : : make_pair ; <nl> using std : : map ; <nl> - using std : : max ; <nl> using std : : ostringstream ; <nl> using std : : set ; <nl> using std : : string ; <nl> using std : : stringstream ; <nl> using std : : vector ; <nl> <nl> - inline bool allOfType ( BSONType type , const BSONObj & o ) { <nl> - BSONObjIterator it ( o ) ; <nl> - while ( it . more ( ) ) { <nl> - if ( it . next ( ) . type ( ) ! = type ) <nl> - return false ; <nl> - } <nl> - return true ; <nl> - } <nl> - <nl> - static const int kTooManySplitPoints = 4 ; <nl> - <nl> - / / mmmmmm - Shard mmmmmm - - <nl> + namespace { <nl> <nl> - long long Chunk : : MaxChunkSize = 1024 * 1024 * 64 ; <nl> - int Chunk : : MaxObjectPerChunk = 250000 ; <nl> - <nl> - / / Can be overridden from command line <nl> - bool Chunk : : ShouldAutoSplit = true ; <nl> + const int kTooManySplitPoints = 4 ; <nl> <nl> / * * <nl> * Attempts to move the given chunk to another shard . <nl> * <nl> * Returns true if the chunk was actually moved . <nl> * / <nl> - static bool tryMoveToOtherShard ( const ChunkManager & manager , const ChunkType & chunk ) { <nl> + bool tryMoveToOtherShard ( const ChunkManager & manager , const ChunkType & chunk ) { <nl> / / reload sharding metadata before starting migration <nl> ChunkManagerPtr chunkMgr = manager . reload ( false / * just reloaded in mulitsplit * / ) ; <nl> <nl> namespace mongo { <nl> } <nl> <nl> if ( shardInfo . size ( ) < 2 ) { <nl> - LOG ( 0 ) < < " no need to move top chunk since there ' s only 1 shard " < < endl ; <nl> + LOG ( 0 ) < < " no need to move top chunk since there ' s only 1 shard " ; <nl> return false ; <nl> } <nl> <nl> namespace mongo { <nl> chunk ) ; <nl> if ( ! tagStatus . isOK ( ) ) { <nl> warning ( ) < < " Not auto - moving chunk because of an error encountered while " <nl> - < < " checking tag for chunk : " < < tagStatus . getStatus ( ) < < endl ; <nl> + < < " checking tag for chunk : " < < tagStatus . getStatus ( ) ; <nl> return false ; <nl> } <nl> <nl> namespace mongo { <nl> if ( chunk . getShard ( ) = = newLocation ) { <nl> / / if this is the best shard , then we shouldn ' t do anything . <nl> LOG ( 1 ) < < " recently split chunk : " < < chunk <nl> - < < " already in the best shard " < < endl ; <nl> + < < " already in the best shard " ; <nl> return false ; <nl> } <nl> <nl> namespace mongo { <nl> <nl> if ( ! ( toMove - > getMin ( ) = = chunk . getMin ( ) & & toMove - > getMax ( ) = = chunk . getMax ( ) ) ) { <nl> LOG ( 1 ) < < " recently split chunk : " < < chunk <nl> - < < " modified before we could migrate " < < toMove - > toString ( ) < < endl ; <nl> + < < " modified before we could migrate " < < toMove - > toString ( ) ; <nl> return false ; <nl> } <nl> <nl> - log ( ) < < " moving chunk ( auto ) : " < < toMove - > toString ( ) < < " to : " < < newLocation < < endl ; <nl> + log ( ) < < " moving chunk ( auto ) : " < < toMove - > toString ( ) < < " to : " < < newLocation ; <nl> <nl> BSONObj res ; <nl> <nl> namespace mongo { <nl> return true ; <nl> } <nl> <nl> + } / / namespace <nl> + <nl> + long long Chunk : : MaxChunkSize = 1024 * 1024 * 64 ; <nl> + int Chunk : : MaxObjectPerChunk = 250000 ; <nl> + <nl> + / / Can be overridden from command line <nl> + bool Chunk : : ShouldAutoSplit = true ; <nl> + <nl> Chunk : : Chunk ( const ChunkManager * manager , BSONObj from ) <nl> : _manager ( manager ) , _lastmod ( 0 , 0 , OID ( ) ) , _dataWritten ( mkDataWritten ( ) ) <nl> { <nl> namespace mongo { <nl> return getMin ( ) . woCompare ( shardKey ) < = 0 & & shardKey . woCompare ( getMax ( ) ) < 0 ; <nl> } <nl> <nl> - bool Chunk : : minIsInf ( ) const { <nl> + bool Chunk : : _minIsInf ( ) const { <nl> return 0 = = <nl> - _manager - > getShardKeyPattern ( ) . getKeyPattern ( ) . globalMin ( ) . woCompare ( getMin ( ) ) ; <nl> + _manager - > getShardKeyPattern ( ) . getKeyPattern ( ) . globalMin ( ) . woCompare ( getMin ( ) ) ; <nl> } <nl> <nl> - bool Chunk : : maxIsInf ( ) const { <nl> + bool Chunk : : _maxIsInf ( ) const { <nl> return 0 = = <nl> - _manager - > getShardKeyPattern ( ) . getKeyPattern ( ) . globalMax ( ) . woCompare ( getMax ( ) ) ; <nl> + _manager - > getShardKeyPattern ( ) . getKeyPattern ( ) . globalMax ( ) . woCompare ( getMax ( ) ) ; <nl> } <nl> <nl> BSONObj Chunk : : _getExtremeKey ( bool doSplitAtLower ) const { <nl> namespace mongo { <nl> conn . done ( ) ; <nl> } <nl> <nl> - void Chunk : : determineSplitPoints ( bool atMedian , std : : vector < BSONObj > * splitPoints ) const { <nl> + void Chunk : : determineSplitPoints ( bool atMedian , vector < BSONObj > * splitPoints ) const { <nl> / / if splitting is not obligatory we may return early if there are not enough data <nl> / / we cap the number of objects that would fall in the first half ( before the split point ) <nl> / / the rationale is we ' ll find a split point without traversing all the data <nl> namespace mongo { <nl> msg = " chunk not full enough to trigger auto - split " ; <nl> } <nl> <nl> - LOG ( 1 ) < < msg < < endl ; <nl> + LOG ( 1 ) < < msg ; <nl> return Status ( ErrorCodes : : CannotSplit , msg ) ; <nl> } <nl> <nl> namespace mongo { <nl> if ( mode = = Chunk : : autoSplitInternal & & <nl> KeyPattern : : isOrderedKeyPattern ( _manager - > getShardKeyPattern ( ) . toBSON ( ) ) ) { <nl> <nl> - if ( minIsInf ( ) ) { <nl> + if ( _minIsInf ( ) ) { <nl> BSONObj key = _getExtremeKey ( true ) ; <nl> if ( ! key . isEmpty ( ) ) { <nl> splitPoints [ 0 ] = key . getOwned ( ) ; <nl> } <nl> } <nl> - else if ( maxIsInf ( ) ) { <nl> + else if ( _maxIsInf ( ) ) { <nl> BSONObj key = _getExtremeKey ( false ) ; <nl> if ( ! key . isEmpty ( ) ) { <nl> splitPoints . pop_back ( ) ; <nl> namespace mongo { <nl> string msg ( str : : stream ( ) < < " not splitting chunk " < < toString ( ) <nl> < < " , split point " < < splitPoints . front ( ) <nl> < < " is exactly on chunk bounds " ) ; <nl> - log ( ) < < msg < < endl ; <nl> + log ( ) < < msg ; <nl> return Status ( ErrorCodes : : CannotSplit , msg ) ; <nl> } <nl> <nl> namespace mongo { <nl> string msg ( str : : stream ( ) < < " not splitting chunk " < < toString ( ) <nl> < < " , split point " < < splitPoints . back ( ) <nl> < < " is exactly on chunk bounds " ) ; <nl> - log ( ) < < msg < < endl ; <nl> + log ( ) < < msg ; <nl> return Status ( ErrorCodes : : CannotSplit , msg ) ; <nl> } <nl> <nl> namespace mongo { <nl> if ( ! conn - > runCommand ( " admin " , cmdObj , * res ) ) { <nl> string msg ( str : : stream ( ) < < " splitChunk failed - cmd : " <nl> < < cmdObj < < " result : " < < * res ) ; <nl> - warning ( ) < < msg < < endl ; <nl> + warning ( ) < < msg ; <nl> conn . done ( ) ; <nl> <nl> return Status ( ErrorCodes : : SplitFailed , msg ) ; <nl> namespace mongo { <nl> uassert ( 10167 , " can ' t move shard to its current location ! " , getShard ( ) ! = to ) ; <nl> <nl> log ( ) < < " moving chunk ns : " < < _manager - > getns ( ) < < " moving ( " < < toString ( ) < < " ) " <nl> - < < _shard . toString ( ) < < " - > " < < to . toString ( ) < < endl ; <nl> + < < _shard . toString ( ) < < " - > " < < to . toString ( ) ; <nl> <nl> Shard from = _shard ; <nl> ScopedDbConnection fromconn ( from . getConnString ( ) ) ; <nl> namespace mongo { <nl> bool worked = fromconn - > runCommand ( " admin " , builder . done ( ) , res ) ; <nl> fromconn . done ( ) ; <nl> <nl> - LOG ( worked ? 1 : 0 ) < < " moveChunk result : " < < res < < endl ; <nl> + LOG ( worked ? 1 : 0 ) < < " moveChunk result : " < < res ; <nl> <nl> / / if succeeded , needs to reload to pick up the new location <nl> / / if failed , mongos may be stale <nl> namespace mongo { <nl> try { <nl> _dataWritten + = dataWritten ; <nl> int splitThreshold = getManager ( ) - > getCurrentDesiredChunkSize ( ) ; <nl> - if ( minIsInf ( ) | | maxIsInf ( ) ) { <nl> - splitThreshold = ( int ) ( ( double ) splitThreshold * . 9 ) ; <nl> + if ( _minIsInf ( ) | | _maxIsInf ( ) ) { <nl> + splitThreshold = ( int ) ( ( double ) splitThreshold * . 9 ) ; <nl> } <nl> <nl> if ( _dataWritten < splitThreshold / ChunkManager : : SplitHeuristics : : splitTestFactor ) <nl> return false ; <nl> <nl> if ( ! getManager ( ) - > _splitHeuristics . _splitTickets . tryAcquire ( ) ) { <nl> - LOG ( 1 ) < < " won ' t auto split because not enough tickets : " < < getManager ( ) - > getns ( ) < < endl ; <nl> + LOG ( 1 ) < < " won ' t auto split because not enough tickets : " < < getManager ( ) - > getns ( ) ; <nl> return false ; <nl> } <nl> TicketHolderReleaser releaser ( & ( getManager ( ) - > _splitHeuristics . _splitTickets ) ) ; <nl> namespace mongo { <nl> <nl> if ( ! isConfigServerConsistent ( ) ) { <nl> RARELY warning ( ) < < " will not perform auto - split because " <nl> - < < " config servers are inconsistent " < < endl ; <nl> + < < " config servers are inconsistent " ; <nl> return false ; <nl> } <nl> <nl> - LOG ( 1 ) < < " about to initiate autosplit : " < < * this < < " dataWritten : " < < _dataWritten < < " splitThreshold : " < < splitThreshold < < endl ; <nl> + LOG ( 1 ) < < " about to initiate autosplit : " < < * this < < " dataWritten : " < < _dataWritten < < " splitThreshold : " < < splitThreshold ; <nl> <nl> BSONObj res ; <nl> size_t splitCount = 0 ; <nl> Status status = split ( Chunk : : autoSplitInternal , <nl> & splitCount , <nl> & res ) ; <nl> - if ( ! status . isOK ( ) ) { <nl> - / / split would have issued a message if we got here <nl> - _dataWritten = 0 ; / / this means there wasn ' t enough data to split , so don ' t want to try again until considerable more data <nl> + if ( ! status . isOK ( ) ) { <nl> + / / Split would have issued a message if we got here . This means there wasn ' t enough <nl> + / / data to split , so don ' t want to try again until considerable more data <nl> + _dataWritten = 0 ; <nl> return false ; <nl> } <nl> <nl> - if ( maxIsInf ( ) | | minIsInf ( ) ) { <nl> + if ( _maxIsInf ( ) | | _minIsInf ( ) ) { <nl> / / we don ' t want to reset _dataWritten since we kind of want to check the other side right away <nl> } <nl> else { <nl> - _dataWritten = 0 ; / / we ' re splitting , so should wait a bit <nl> + / / we ' re splitting , so should wait a bit <nl> + _dataWritten = 0 ; <nl> } <nl> <nl> const bool shouldBalance = grid . getConfigShouldBalance ( ) & & <nl> namespace mongo { <nl> < < " size : " < < getPhysicalSize ( ) / / slow - but can be useful when debugging <nl> # endif <nl> < < ( res [ " shouldMigrate " ] . eoo ( ) ? " " : ( string ) " ( migrate suggested " + <nl> - ( shouldBalance ? " ) " : " , but no migrations allowed ) " ) ) < < endl ; <nl> + ( shouldBalance ? " ) " : " , but no migrations allowed ) " ) ) ; <nl> <nl> / / Top chunk optimization - try to move the top chunk out of this shard <nl> / / to prevent the hot spot from staying on a single shard . This is based on <nl> namespace mongo { <nl> _dataWritten = mkDataWritten ( ) ; <nl> <nl> / / if the collection lock is taken ( e . g . we ' re migrating ) , it is fine for the split to fail . <nl> - warning ( ) < < " could not autosplit collection " < < _manager - > getns ( ) < < causedBy ( e ) < < endl ; <nl> + warning ( ) < < " could not autosplit collection " < < _manager - > getns ( ) < < causedBy ( e ) ; <nl> return false ; <nl> } <nl> } <nl> namespace mongo { <nl> <nl> if ( ! result . isOK ( ) ) { <nl> warning ( ) < < " couldn ' t set jumbo for chunk : " <nl> - < < genID ( ) < < result . reason ( ) < < endl ; <nl> + < < genID ( ) < < result . reason ( ) ; <nl> } <nl> } <nl> <nl> namespace mongo { <nl> / / validate chunksize before proceeding <nl> if ( csize = = 0 ) { <nl> / / setting was not modified ; mark as such <nl> - log ( ) < < " warning : invalid chunksize ( " < < csize < < " ) ignored " < < endl ; <nl> + log ( ) < < " warning : invalid chunksize ( " < < csize < < " ) ignored " ; <nl> return ; <nl> } <nl> <nl> - LOG ( 1 ) < < " Refreshing MaxChunkSize : " < < csize < < " MB " < < endl ; <nl> + LOG ( 1 ) < < " Refreshing MaxChunkSize : " < < csize < < " MB " ; <nl> <nl> if ( csize ! = Chunk : : MaxChunkSize / ( 1024 * 1024 ) ) { <nl> log ( ) < < " MaxChunkSize changing from " < < Chunk : : MaxChunkSize / ( 1024 * 1024 ) < < " MB " <nl> - < < " to " < < csize < < " MB " < < endl ; <nl> + < < " to " < < csize < < " MB " ; <nl> } <nl> <nl> if ( ! setMaxChunkSizeSizeMB ( csize ) ) { <nl> - warning ( ) < < " invalid MaxChunkSize : " < < csize < < endl ; <nl> + warning ( ) < < " invalid MaxChunkSize : " < < csize ; <nl> } <nl> } <nl> <nl> namespace mongo { <nl> return true ; <nl> } <nl> <nl> - / / mmmmmm - ChunkManager mmmmmm - - <nl> - <nl> - AtomicUInt32 ChunkManager : : NextSequenceNumber ( 1U ) ; <nl> - <nl> - ChunkManager : : ChunkManager ( const string & ns , const ShardKeyPattern & pattern , bool unique ) : <nl> - _ns ( ns ) , <nl> - _keyPattern ( pattern . getKeyPattern ( ) ) , <nl> - _unique ( unique ) , <nl> - _chunkRanges ( ) , <nl> - _mutex ( " ChunkManager " ) , <nl> - _sequenceNumber ( NextSequenceNumber . addAndFetch ( 1 ) ) <nl> - { <nl> - / / <nl> - / / Sets up a chunk manager from new data <nl> - / / <nl> - } <nl> - <nl> - ChunkManager : : ChunkManager ( const BSONObj & collDoc ) : <nl> - / / Need the ns early , to construct the lock <nl> - / / TODO : Construct lock on demand ? Not sure why we need to keep it around <nl> - _ns ( collDoc [ CollectionType : : ns ( ) ] . type ( ) = = String ? <nl> - collDoc [ CollectionType : : ns ( ) ] . String ( ) : <nl> - " " ) , <nl> - _keyPattern ( collDoc [ CollectionType : : keyPattern ( ) ] . type ( ) = = Object ? <nl> - collDoc [ CollectionType : : keyPattern ( ) ] . Obj ( ) . getOwned ( ) : <nl> - BSONObj ( ) ) , <nl> - _unique ( collDoc [ CollectionType : : unique ( ) ] . trueValue ( ) ) , <nl> - _chunkRanges ( ) , <nl> - _mutex ( " ChunkManager " ) , <nl> - / / The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager ' s . <nl> - / / Increasing this number here will prompt checkShardVersion ( ) to refresh the connection - level versions to <nl> - / / the most up to date value . <nl> - _sequenceNumber ( NextSequenceNumber . addAndFetch ( 1 ) ) <nl> - { <nl> - <nl> - / / <nl> - / / Sets up a chunk manager from an existing sharded collection document <nl> - / / <nl> - <nl> - verify ( _ns ! = " " ) ; <nl> - verify ( ! _keyPattern . toBSON ( ) . isEmpty ( ) ) ; <nl> - <nl> - _version = ChunkVersion : : fromBSON ( collDoc ) ; <nl> - } <nl> - <nl> - void ChunkManager : : loadExistingRanges ( const string & config , const ChunkManager * oldManager ) { <nl> - <nl> - int tries = 3 ; <nl> - while ( tries - - ) { <nl> - ChunkMap chunkMap ; <nl> - set < Shard > shards ; <nl> - ShardVersionMap shardVersions ; <nl> - Timer t ; <nl> - <nl> - bool success = _load ( config , chunkMap , shards , shardVersions , oldManager ) ; <nl> - <nl> - if ( success ) { <nl> - { <nl> - int ms = t . millis ( ) ; <nl> - log ( ) < < " ChunkManager : time to load chunks for " < < _ns < < " : " < < ms < < " ms " <nl> - < < " sequenceNumber : " < < _sequenceNumber <nl> - < < " version : " < < _version . toString ( ) <nl> - < < " based on : " < < <nl> - ( oldManager ? oldManager - > getVersion ( ) . toString ( ) : " ( empty ) " ) <nl> - < < endl ; <nl> - } <nl> - <nl> - / / TODO : Merge into diff code above , so we validate in one place <nl> - if ( _isValid ( chunkMap ) ) { <nl> - / / These variables are const for thread - safety . Since the <nl> - / / constructor can only be called from one thread , we don ' t have <nl> - / / to worry about that here . <nl> - const_cast < ChunkMap & > ( _chunkMap ) . swap ( chunkMap ) ; <nl> - const_cast < set < Shard > & > ( _shards ) . swap ( shards ) ; <nl> - const_cast < ShardVersionMap & > ( _shardVersions ) . swap ( shardVersions ) ; <nl> - const_cast < ChunkRangeManager & > ( _chunkRanges ) . reloadAll ( _chunkMap ) ; <nl> - <nl> - return ; <nl> - } <nl> - } <nl> - <nl> - if ( _chunkMap . size ( ) < 10 ) { <nl> - _printChunks ( ) ; <nl> - } <nl> - <nl> - warning ( ) < < " ChunkManager loaded an invalid config for " < < _ns <nl> - < < " , trying again " < < endl ; <nl> - <nl> - sleepmillis ( 10 * ( 3 - tries ) ) ; <nl> - } <nl> - <nl> - / / this will abort construction so we should never have a reference to an invalid config <nl> - msgasserted ( 13282 , " Couldn ' t load a valid config for " + _ns + " after 3 attempts . Please try again . " ) ; <nl> - } <nl> - <nl> - <nl> - / * * <nl> - * This is an adapter so we can use config diffs - mongos and mongod do them slightly <nl> - * differently <nl> - * <nl> - * The mongos adapter here tracks all shards , and stores ranges by ( max , Chunk ) in the map . <nl> - * / <nl> - class CMConfigDiffTracker : public ConfigDiffTracker < ChunkPtr , std : : string > { <nl> - public : <nl> - CMConfigDiffTracker ( ChunkManager * manager ) : _manager ( manager ) { } <nl> - <nl> - virtual bool isTracked ( const BSONObj & chunkDoc ) const { <nl> - / / Mongos tracks all shards <nl> - return true ; <nl> - } <nl> - <nl> - virtual BSONObj minFrom ( const ChunkPtr & val ) const { <nl> - return val . get ( ) - > getMin ( ) ; <nl> - } <nl> - <nl> - virtual bool isMinKeyIndexed ( ) const { return false ; } <nl> - <nl> - virtual pair < BSONObj , ChunkPtr > rangeFor ( const BSONObj & chunkDoc , const BSONObj & min , const BSONObj & max ) const { <nl> - ChunkPtr c ( new Chunk ( _manager , chunkDoc ) ) ; <nl> - return make_pair ( max , c ) ; <nl> - } <nl> - <nl> - virtual string shardFor ( const string & hostName ) const { <nl> - Shard shard = Shard : : make ( hostName ) ; <nl> - return shard . getName ( ) ; <nl> - } <nl> - <nl> - ChunkManager * _manager ; <nl> - <nl> - } ; <nl> - <nl> - bool ChunkManager : : _load ( const string & config , <nl> - ChunkMap & chunkMap , <nl> - set < Shard > & shards , <nl> - ShardVersionMap & shardVersions , <nl> - const ChunkManager * oldManager ) <nl> - { <nl> - <nl> - / / Reset the max version , but not the epoch , when we aren ' t loading from the oldManager <nl> - _version = ChunkVersion ( 0 , 0 , _version . epoch ( ) ) ; <nl> - <nl> - / / If we have a previous version of the ChunkManager to work from , use that info to reduce <nl> - / / our config query <nl> - if ( oldManager & & oldManager - > getVersion ( ) . isSet ( ) ) { <nl> - <nl> - / / Get the old max version <nl> - _version = oldManager - > getVersion ( ) ; <nl> - / / Load a copy of the old versions <nl> - shardVersions = oldManager - > _shardVersions ; <nl> - <nl> - / / Load a copy of the chunk map , replacing the chunk manager with our own <nl> - const ChunkMap & oldChunkMap = oldManager - > getChunkMap ( ) ; <nl> - <nl> - / / Could be v . expensive <nl> - / / TODO : If chunks were immutable and didn ' t reference the manager , we could do more <nl> - / / interesting things here <nl> - for ( ChunkMap : : const_iterator it = oldChunkMap . begin ( ) ; it ! = oldChunkMap . end ( ) ; it + + ) { <nl> - <nl> - ChunkPtr oldC = it - > second ; <nl> - ChunkPtr c ( new Chunk ( this , oldC - > getMin ( ) , <nl> - oldC - > getMax ( ) , <nl> - oldC - > getShard ( ) , <nl> - oldC - > getLastmod ( ) ) ) ; <nl> - <nl> - c - > setBytesWritten ( oldC - > getBytesWritten ( ) ) ; <nl> - <nl> - chunkMap . insert ( make_pair ( oldC - > getMax ( ) , c ) ) ; <nl> - } <nl> - <nl> - LOG ( 2 ) < < " loading chunk manager for collection " < < _ns <nl> - < < " using old chunk manager w / version " < < _version . toString ( ) <nl> - < < " and " < < oldChunkMap . size ( ) < < " chunks " < < endl ; <nl> - } <nl> - <nl> - / / Attach a diff tracker for the versioned chunk data <nl> - CMConfigDiffTracker differ ( this ) ; <nl> - differ . attach ( _ns , chunkMap , _version , shardVersions ) ; <nl> - <nl> - / / Diff tracker should * always * find at least one chunk if collection exists <nl> - int diffsApplied = differ . calculateConfigDiff ( config ) ; <nl> - if ( diffsApplied > 0 ) { <nl> - <nl> - LOG ( 2 ) < < " loaded " < < diffsApplied < < " chunks into new chunk manager for " < < _ns <nl> - < < " with version " < < _version < < endl ; <nl> - <nl> - / / Add all the shards we find to the shards set <nl> - for ( ShardVersionMap : : iterator it = shardVersions . begin ( ) ; it ! = shardVersions . end ( ) ; it + + ) { <nl> - shards . insert ( it - > first ) ; <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - else if ( diffsApplied = = 0 ) { <nl> - <nl> - / / No chunks were found for the ns <nl> - warning ( ) < < " no chunks found when reloading " < < _ns <nl> - < < " , previous version was " < < _version < < endl ; <nl> - <nl> - / / Set all our data to empty <nl> - chunkMap . clear ( ) ; <nl> - shardVersions . clear ( ) ; <nl> - _version = ChunkVersion ( 0 , 0 , OID ( ) ) ; <nl> - <nl> - return true ; <nl> - } <nl> - else { / / diffsApplied < 0 <nl> - <nl> - bool allInconsistent = differ . numValidDiffs ( ) = = 0 ; <nl> - <nl> - if ( allInconsistent ) { <nl> - / / All versions are different , this can be normal <nl> - warning ( ) < < " major change in chunk information found when reloading " <nl> - < < _ns < < " , previous version was " < < _version < < endl ; <nl> - } <nl> - else { <nl> - / / Inconsistent load halfway through ( due to yielding cursor during load ) <nl> - / / should be rare <nl> - warning ( ) < < " inconsistent chunks found when reloading " <nl> - < < _ns < < " , previous version was " < < _version <nl> - < < " , this should be rare " < < endl ; <nl> - } <nl> - <nl> - / / Set all our data to empty to be extra safe <nl> - chunkMap . clear ( ) ; <nl> - shardVersions . clear ( ) ; <nl> - _version = ChunkVersion ( 0 , 0 , OID ( ) ) ; <nl> - <nl> - return allInconsistent ; <nl> - } <nl> - <nl> - } <nl> - <nl> - ChunkManagerPtr ChunkManager : : reload ( bool force ) const { <nl> - return grid . getDBConfig ( getns ( ) ) - > getChunkManager ( getns ( ) , force ) ; <nl> - } <nl> - <nl> - bool ChunkManager : : _isValid ( const ChunkMap & chunkMap ) { <nl> - # define ENSURE ( x ) do { if ( ! ( x ) ) { log ( ) < < " ChunkManager : : _isValid failed : " # x < < endl ; return false ; } } while ( 0 ) <nl> - <nl> - if ( chunkMap . empty ( ) ) <nl> - return true ; <nl> - <nl> - / / Check endpoints <nl> - ENSURE ( allOfType ( MinKey , chunkMap . begin ( ) - > second - > getMin ( ) ) ) ; <nl> - ENSURE ( allOfType ( MaxKey , boost : : prior ( chunkMap . end ( ) ) - > second - > getMax ( ) ) ) ; <nl> - <nl> - / / Make sure there are no gaps or overlaps <nl> - for ( ChunkMap : : const_iterator it = boost : : next ( chunkMap . begin ( ) ) , end = chunkMap . end ( ) ; it ! = end ; + + it ) { <nl> - ChunkMap : : const_iterator last = boost : : prior ( it ) ; <nl> - <nl> - if ( ! ( it - > second - > getMin ( ) = = last - > second - > getMax ( ) ) ) { <nl> - PRINT ( last - > second - > toString ( ) ) ; <nl> - PRINT ( it - > second - > toString ( ) ) ; <nl> - PRINT ( it - > second - > getMin ( ) ) ; <nl> - PRINT ( last - > second - > getMax ( ) ) ; <nl> - } <nl> - ENSURE ( it - > second - > getMin ( ) = = last - > second - > getMax ( ) ) ; <nl> - } <nl> - <nl> - return true ; <nl> - <nl> - # undef ENSURE <nl> - } <nl> - <nl> - void ChunkManager : : _printChunks ( ) const { <nl> - for ( ChunkMap : : const_iterator it = _chunkMap . begin ( ) , end = _chunkMap . end ( ) ; it ! = end ; + + it ) { <nl> - log ( ) < < * it - > second < < endl ; <nl> - } <nl> - } <nl> - <nl> - void ChunkManager : : calcInitSplitsAndShards ( const Shard & primary , <nl> - const vector < BSONObj > * initPoints , <nl> - const vector < Shard > * initShards , <nl> - vector < BSONObj > * splitPoints , <nl> - vector < Shard > * shards ) const <nl> - { <nl> - verify ( _chunkMap . size ( ) = = 0 ) ; <nl> - <nl> - unsigned long long numObjects = 0 ; <nl> - Chunk c ( this , _keyPattern . getKeyPattern ( ) . globalMin ( ) , <nl> - _keyPattern . getKeyPattern ( ) . globalMax ( ) , primary ) ; <nl> - <nl> - if ( ! initPoints | | ! initPoints - > size ( ) ) { <nl> - / / discover split points <nl> - { <nl> - / / get stats to see if there is any data <nl> - ScopedDbConnection shardConn ( primary . getConnString ( ) ) ; <nl> - <nl> - numObjects = shardConn - > count ( getns ( ) ) ; <nl> - shardConn . done ( ) ; <nl> - } <nl> - <nl> - if ( numObjects > 0 ) <nl> - c . pickSplitVector ( * splitPoints , Chunk : : MaxChunkSize ) ; <nl> - <nl> - / / since docs alread exists , must use primary shard <nl> - shards - > push_back ( primary ) ; <nl> - } else { <nl> - / / make sure points are unique and ordered <nl> - set < BSONObj > orderedPts ; <nl> - for ( unsigned i = 0 ; i < initPoints - > size ( ) ; + + i ) { <nl> - BSONObj pt = ( * initPoints ) [ i ] ; <nl> - orderedPts . insert ( pt ) ; <nl> - } <nl> - for ( set < BSONObj > : : iterator it = orderedPts . begin ( ) ; it ! = orderedPts . end ( ) ; + + it ) { <nl> - splitPoints - > push_back ( * it ) ; <nl> - } <nl> - <nl> - if ( ! initShards | | ! initShards - > size ( ) ) { <nl> - / / If not specified , only use the primary shard ( note that it ' s not safe for mongos <nl> - / / to put initial chunks on other shards without the primary mongod knowing ) . <nl> - shards - > push_back ( primary ) ; <nl> - } else { <nl> - std : : copy ( initShards - > begin ( ) , initShards - > end ( ) , std : : back_inserter ( * shards ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void ChunkManager : : createFirstChunks ( const string & config , <nl> - const Shard & primary , <nl> - const vector < BSONObj > * initPoints , <nl> - const vector < Shard > * initShards ) <nl> - { <nl> - / / TODO distlock ? <nl> - / / TODO : Race condition if we shard the collection and insert data while we split across <nl> - / / the non - primary shard . <nl> - <nl> - vector < BSONObj > splitPoints ; <nl> - vector < Shard > shards ; <nl> - <nl> - calcInitSplitsAndShards ( primary , initPoints , initShards , <nl> - & splitPoints , & shards ) ; <nl> - <nl> - / / this is the first chunk ; start the versioning from scratch <nl> - ChunkVersion version ; <nl> - version . incEpoch ( ) ; <nl> - version . incMajor ( ) ; <nl> - <nl> - log ( ) < < " going to create " < < splitPoints . size ( ) + 1 < < " chunk ( s ) for : " < < _ns <nl> - < < " using new epoch " < < version . epoch ( ) < < endl ; <nl> - <nl> - ScopedDbConnection conn ( config , 30 ) ; <nl> - <nl> - / / Make sure we don ' t have any chunks that already exist here <nl> - unsigned long long existingChunks = <nl> - conn - > count ( ChunkType : : ConfigNS , BSON ( ChunkType : : ns ( _ns ) ) ) ; <nl> - <nl> - uassert ( 13449 , str : : stream ( ) < < " collection " < < _ns < < " already sharded with " <nl> - < < existingChunks < < " chunks " , existingChunks = = 0 ) ; <nl> - conn . done ( ) ; <nl> - <nl> - for ( unsigned i = 0 ; i < = splitPoints . size ( ) ; i + + ) { <nl> - BSONObj min = i = = 0 ? _keyPattern . getKeyPattern ( ) . globalMin ( ) : splitPoints [ i - 1 ] ; <nl> - BSONObj max = i < splitPoints . size ( ) ? <nl> - splitPoints [ i ] : _keyPattern . getKeyPattern ( ) . globalMax ( ) ; <nl> - <nl> - Chunk temp ( this , min , max , shards [ i % shards . size ( ) ] , version ) ; <nl> - <nl> - BSONObjBuilder chunkBuilder ; <nl> - temp . serialize ( chunkBuilder ) ; <nl> - BSONObj chunkObj = chunkBuilder . obj ( ) ; <nl> - <nl> - Status result = clusterUpdate ( ChunkType : : ConfigNS , <nl> - BSON ( ChunkType : : name ( temp . genID ( ) ) ) , <nl> - chunkObj , <nl> - true , / / upsert <nl> - false , / / multi <nl> - NULL ) ; <nl> - <nl> - version . incMinor ( ) ; <nl> - <nl> - if ( ! result . isOK ( ) ) { <nl> - string ss = str : : stream ( ) < < " creating first chunks failed . result : " <nl> - < < result . reason ( ) ; <nl> - error ( ) < < ss < < endl ; <nl> - msgasserted ( 15903 , ss ) ; <nl> - } <nl> - } <nl> - <nl> - _version = ChunkVersion ( 0 , 0 , version . epoch ( ) ) ; <nl> - } <nl> - <nl> - ChunkPtr ChunkManager : : findIntersectingChunk ( const BSONObj & shardKey ) const { <nl> - { <nl> - BSONObj chunkMin ; <nl> - ChunkPtr chunk ; <nl> - { <nl> - ChunkMap : : const_iterator it = _chunkMap . upper_bound ( shardKey ) ; <nl> - if ( it ! = _chunkMap . end ( ) ) { <nl> - chunkMin = it - > first ; <nl> - chunk = it - > second ; <nl> - } <nl> - } <nl> - <nl> - if ( chunk ) { <nl> - if ( chunk - > containsKey ( shardKey ) ) { <nl> - return chunk ; <nl> - } <nl> - <nl> - PRINT ( chunkMin ) ; <nl> - PRINT ( * chunk ) ; <nl> - PRINT ( shardKey ) ; <nl> - <nl> - reload ( ) ; <nl> - massert ( 13141 , " Chunk map pointed to incorrect chunk " , false ) ; <nl> - } <nl> - } <nl> - <nl> - msgasserted ( 8070 , <nl> - str : : stream ( ) < < " couldn ' t find a chunk intersecting : " < < shardKey <nl> - < < " for ns : " < < _ns <nl> - < < " at version : " < < _version . toString ( ) <nl> - < < " , number of chunks : " < < _chunkMap . size ( ) ) ; <nl> - } <nl> - <nl> - void ChunkManager : : getShardsForQuery ( set < Shard > & shards , const BSONObj & query ) const { <nl> - CanonicalQuery * canonicalQuery = NULL ; <nl> - Status status = CanonicalQuery : : canonicalize ( <nl> - _ns , <nl> - query , <nl> - & canonicalQuery , <nl> - WhereCallbackNoop ( ) ) ; <nl> - <nl> - boost : : scoped_ptr < CanonicalQuery > canonicalQueryPtr ( canonicalQuery ) ; <nl> - <nl> - uassert ( status . code ( ) , status . reason ( ) , status . isOK ( ) ) ; <nl> - <nl> - / / Query validation <nl> - if ( QueryPlannerCommon : : hasNode ( canonicalQuery - > root ( ) , MatchExpression : : GEO_NEAR ) ) { <nl> - uassert ( 13501 , " use geoNear command rather than $ near query " , false ) ; <nl> - } <nl> - <nl> - / / Transforms query into bounds for each field in the shard key <nl> - / / for example : <nl> - / / Key { a : 1 , b : 1 } , <nl> - / / Query { a : { $ gte : 1 , $ lt : 2 } , <nl> - / / b : { $ gte : 3 , $ lt : 4 } } <nl> - / / = > Bounds { a : [ 1 , 2 ) , b : [ 3 , 4 ) } <nl> - IndexBounds bounds = getIndexBoundsForQuery ( _keyPattern . toBSON ( ) , canonicalQuery ) ; <nl> - <nl> - / / Transforms bounds for each shard key field into full shard key ranges <nl> - / / for example : <nl> - / / Key { a : 1 , b : 1 } <nl> - / / Bounds { a : [ 1 , 2 ) , b : [ 3 , 4 ) } <nl> - / / = > Ranges { a : 1 , b : 3 } = > { a : 2 , b : 4 } <nl> - BoundList ranges = _keyPattern . flattenBounds ( bounds ) ; <nl> - <nl> - for ( BoundList : : const_iterator it = ranges . begin ( ) ; it ! = ranges . end ( ) ; <nl> - + + it ) { <nl> - <nl> - getShardsForRange ( shards , it - > first / * min * / , it - > second / * max * / ) ; <nl> - <nl> - / / once we know we need to visit all shards no need to keep looping <nl> - if ( shards . size ( ) = = _shards . size ( ) ) break ; <nl> - } <nl> - <nl> - / / SERVER - 4914 Some clients of getShardsForQuery ( ) assume at least one shard will be <nl> - / / returned . For now , we satisfy that assumption by adding a shard with no matches rather <nl> - / / than return an empty set of shards . <nl> - if ( shards . empty ( ) ) { <nl> - massert ( 16068 , " no chunk ranges available " , ! _chunkRanges . ranges ( ) . empty ( ) ) ; <nl> - shards . insert ( _chunkRanges . ranges ( ) . begin ( ) - > second - > getShard ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - void ChunkManager : : getShardsForRange ( set < Shard > & shards , <nl> - const BSONObj & min , <nl> - const BSONObj & max ) const { <nl> - <nl> - ChunkRangeMap : : const_iterator it = _chunkRanges . upper_bound ( min ) ; <nl> - ChunkRangeMap : : const_iterator end = _chunkRanges . upper_bound ( max ) ; <nl> - <nl> - massert ( 13507 , str : : stream ( ) < < " no chunks found between bounds " < < min < < " and " < < max , it ! = _chunkRanges . ranges ( ) . end ( ) ) ; <nl> - <nl> - if ( end ! = _chunkRanges . ranges ( ) . end ( ) ) + + end ; <nl> - <nl> - for ( ; it ! = end ; + + it ) { <nl> - shards . insert ( it - > second - > getShard ( ) ) ; <nl> - <nl> - / / once we know we need to visit all shards no need to keep looping <nl> - if ( shards . size ( ) = = _shards . size ( ) ) break ; <nl> - } <nl> - } <nl> - <nl> - void ChunkManager : : getAllShards ( set < Shard > & all ) const { <nl> - all . insert ( _shards . begin ( ) , _shards . end ( ) ) ; <nl> - } <nl> - <nl> - IndexBounds ChunkManager : : getIndexBoundsForQuery ( const BSONObj & key , const CanonicalQuery * canonicalQuery ) { <nl> - / / $ text is not allowed in planning since we don ' t have text index on mongos . <nl> - / / <nl> - / / TODO : Treat $ text query as a no - op in planning . So with shard key { a : 1 } , <nl> - / / the query { a : 2 , $ text : { . . . } } will only target to { a : 2 } . <nl> - if ( QueryPlannerCommon : : hasNode ( canonicalQuery - > root ( ) , MatchExpression : : TEXT ) ) { <nl> - IndexBounds bounds ; <nl> - IndexBoundsBuilder : : allValuesBounds ( key , & bounds ) ; / / [ minKey , maxKey ] <nl> - return bounds ; <nl> - } <nl> - <nl> - / / Consider shard key as an index <nl> - string accessMethod = IndexNames : : findPluginName ( key ) ; <nl> - dassert ( accessMethod = = IndexNames : : BTREE | | accessMethod = = IndexNames : : HASHED ) ; <nl> - <nl> - / / Use query framework to generate index bounds <nl> - QueryPlannerParams plannerParams ; <nl> - / / Must use " shard key " index <nl> - plannerParams . options = QueryPlannerParams : : NO_TABLE_SCAN ; <nl> - IndexEntry indexEntry ( key , accessMethod , false / * multiKey * / , false / * sparse * / , <nl> - false / * unique * / , " shardkey " , BSONObj ( ) ) ; <nl> - plannerParams . indices . push_back ( indexEntry ) ; <nl> - <nl> - OwnedPointerVector < QuerySolution > solutions ; <nl> - Status status = QueryPlanner : : plan ( * canonicalQuery , plannerParams , & solutions . mutableVector ( ) ) ; <nl> - uassert ( status . code ( ) , status . reason ( ) , status . isOK ( ) ) ; <nl> - <nl> - IndexBounds bounds ; <nl> - <nl> - for ( vector < QuerySolution * > : : const_iterator it = solutions . begin ( ) ; <nl> - bounds . size ( ) = = 0 & & it ! = solutions . end ( ) ; it + + ) { <nl> - / / Try next solution if we failed to generate index bounds , i . e . bounds . size ( ) = = 0 <nl> - bounds = collapseQuerySolution ( ( * it ) - > root . get ( ) ) ; <nl> - } <nl> - <nl> - if ( bounds . size ( ) = = 0 ) { <nl> - / / We cannot plan the query without collection scan , so target to all shards . <nl> - IndexBoundsBuilder : : allValuesBounds ( key , & bounds ) ; / / [ minKey , maxKey ] <nl> - } <nl> - return bounds ; <nl> - } <nl> - <nl> - IndexBounds ChunkManager : : collapseQuerySolution ( const QuerySolutionNode * node ) { <nl> - if ( node - > children . size ( ) = = 0 ) { <nl> - invariant ( node - > getType ( ) = = STAGE_IXSCAN ) ; <nl> - <nl> - const IndexScanNode * ixNode = static_cast < const IndexScanNode * > ( node ) ; <nl> - return ixNode - > bounds ; <nl> - } <nl> - <nl> - if ( node - > children . size ( ) = = 1 ) { <nl> - / / e . g . FETCH - > IXSCAN <nl> - return collapseQuerySolution ( node - > children . front ( ) ) ; <nl> - } <nl> - <nl> - / / children . size ( ) > 1 , assert it ' s OR / SORT_MERGE . <nl> - if ( node - > getType ( ) ! = STAGE_OR & & node - > getType ( ) ! = STAGE_SORT_MERGE ) { <nl> - / / Unexpected node . We should never reach here . <nl> - error ( ) < < " could not generate index bounds on query solution tree : " < < node - > toString ( ) ; <nl> - dassert ( false ) ; / / We ' d like to know this error in testing . <nl> - <nl> - / / Bail out with all shards in production , since this isn ' t a fatal error . <nl> - return IndexBounds ( ) ; <nl> - } <nl> - <nl> - IndexBounds bounds ; <nl> - for ( vector < QuerySolutionNode * > : : const_iterator it = node - > children . begin ( ) ; <nl> - it ! = node - > children . end ( ) ; it + + ) <nl> - { <nl> - / / The first branch under OR <nl> - if ( it = = node - > children . begin ( ) ) { <nl> - invariant ( bounds . size ( ) = = 0 ) ; <nl> - bounds = collapseQuerySolution ( * it ) ; <nl> - if ( bounds . size ( ) = = 0 ) { / / Got unexpected node in query solution tree <nl> - return IndexBounds ( ) ; <nl> - } <nl> - continue ; <nl> - } <nl> - <nl> - IndexBounds childBounds = collapseQuerySolution ( * it ) ; <nl> - if ( childBounds . size ( ) = = 0 ) { / / Got unexpected node in query solution tree <nl> - return IndexBounds ( ) ; <nl> - } <nl> - <nl> - invariant ( childBounds . size ( ) = = bounds . size ( ) ) ; <nl> - for ( size_t i = 0 ; i < bounds . size ( ) ; i + + ) { <nl> - bounds . fields [ i ] . intervals . insert ( bounds . fields [ i ] . intervals . end ( ) , <nl> - childBounds . fields [ i ] . intervals . begin ( ) , <nl> - childBounds . fields [ i ] . intervals . end ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - for ( size_t i = 0 ; i < bounds . size ( ) ; i + + ) { <nl> - IndexBoundsBuilder : : unionize ( & bounds . fields [ i ] ) ; <nl> - } <nl> - <nl> - return bounds ; <nl> - } <nl> - <nl> - bool ChunkManager : : compatibleWith ( const ChunkManager & other , const string & shardName ) const { <nl> - / / Return true if the shard version is the same in the two chunk managers <nl> - / / TODO : This doesn ' t need to be so strong , just major vs <nl> - return other . getVersion ( shardName ) . equals ( getVersion ( shardName ) ) ; <nl> - } <nl> - <nl> - void ChunkManager : : drop ( ChunkManagerPtr me ) const { <nl> - scoped_lock lk ( _mutex ) ; <nl> - <nl> - configServer . logChange ( " dropCollection . start " , _ns , BSONObj ( ) ) ; <nl> - <nl> - DistributedLock nsLock ( ConnectionString ( configServer . modelServer ( ) , <nl> - ConnectionString : : SYNC ) , <nl> - _ns ) ; <nl> - <nl> - dist_lock_try dlk ; <nl> - try { <nl> - dlk = dist_lock_try ( & nsLock , " drop " ) ; <nl> - } <nl> - catch ( LockException & e ) { <nl> - uassert ( 14022 , str : : stream ( ) < < " Error locking distributed lock for chunk drop . " < < causedBy ( e ) , false ) ; <nl> - } <nl> - <nl> - uassert ( 13331 , " collection ' s metadata is undergoing changes . Please try again . " , dlk . got ( ) ) ; <nl> - <nl> - uassert ( 10174 , " config servers not all up " , configServer . allUp ( false ) ) ; <nl> - <nl> - set < Shard > seen ; <nl> - <nl> - LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < endl ; <nl> - <nl> - / / lock all shards so no one can do a split / migrate <nl> - for ( ChunkMap : : const_iterator i = _chunkMap . begin ( ) ; i ! = _chunkMap . end ( ) ; + + i ) { <nl> - ChunkPtr c = i - > second ; <nl> - seen . insert ( c - > getShard ( ) ) ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t all locked " < < endl ; <nl> - <nl> - map < string , BSONObj > errors ; <nl> - / / delete data from mongod <nl> - for ( set < Shard > : : iterator i = seen . begin ( ) ; i ! = seen . end ( ) ; i + + ) { <nl> - ScopedDbConnection conn ( i - > getConnString ( ) ) ; <nl> - BSONObj info ; <nl> - if ( ! conn - > dropCollection ( _ns , & info ) ) { <nl> - errors [ i - > getConnString ( ) ] = info ; <nl> - } <nl> - conn . done ( ) ; <nl> - } <nl> - if ( ! errors . empty ( ) ) { <nl> - stringstream ss ; <nl> - ss < < " Dropping collection failed on the following hosts : " ; <nl> - for ( map < string , BSONObj > : : const_iterator it = errors . begin ( ) ; it ! = errors . end ( ) ; ) { <nl> - ss < < it - > first < < " : " < < it - > second ; <nl> - + + it ; <nl> - if ( it ! = errors . end ( ) ) { <nl> - ss < < " , " ; <nl> - } <nl> - } <nl> - uasserted ( 16338 , ss . str ( ) ) ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t removed shard data " < < endl ; <nl> - <nl> - / / remove chunk data <nl> - Status result = clusterDelete ( ChunkType : : ConfigNS , <nl> - BSON ( ChunkType : : ns ( _ns ) ) , <nl> - 0 / * limit * / , <nl> - NULL ) ; <nl> - <nl> - / / Make sure we ' re dropped on the config <nl> - if ( ! result . isOK ( ) ) { <nl> - uasserted ( 17001 , str : : stream ( ) < < " could not drop chunks for " < < _ns <nl> - < < " : " < < result . reason ( ) ) ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t removed chunk data " < < endl ; <nl> - <nl> - for ( set < Shard > : : iterator i = seen . begin ( ) ; i ! = seen . end ( ) ; i + + ) { <nl> - ScopedDbConnection conn ( i - > getConnString ( ) ) ; <nl> - BSONObj res ; <nl> - <nl> - / / this is horrible <nl> - / / we need a special command for dropping on the d side <nl> - / / this hack works for the moment <nl> - <nl> - if ( ! setShardVersion ( conn . conn ( ) , <nl> - _ns , <nl> - configServer . modelServer ( ) , <nl> - ChunkVersion ( 0 , 0 , OID ( ) ) , <nl> - NULL , <nl> - true , <nl> - res ) ) { <nl> - <nl> - uasserted ( 8071 , str : : stream ( ) < < " cleaning up after drop failed : " < < res ) ; <nl> - } <nl> - <nl> - conn - > simpleCommand ( " admin " , 0 , " unsetSharding " ) ; <nl> - conn . done ( ) ; <nl> - } <nl> - <nl> - LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t DONE " < < endl ; <nl> - configServer . logChange ( " dropCollection " , _ns , BSONObj ( ) ) ; <nl> - } <nl> - <nl> - ChunkVersion ChunkManager : : getVersion ( const std : : string & shardName ) const { <nl> - ShardVersionMap : : const_iterator i = _shardVersions . find ( shardName ) ; <nl> - if ( i = = _shardVersions . end ( ) ) { <nl> - / / Shards without explicitly tracked shard versions ( meaning they have <nl> - / / no chunks ) always have a version of ( 0 , 0 , epoch ) . Note this is <nl> - / / * different * from the dropped chunk version of ( 0 , 0 , OID ( 000 . . . ) ) . <nl> - / / See s / chunk_version . h . <nl> - return ChunkVersion ( 0 , 0 , _version . epoch ( ) ) ; <nl> - } <nl> - return i - > second ; <nl> - } <nl> - <nl> - ChunkVersion ChunkManager : : getVersion ( ) const { <nl> - return _version ; <nl> - } <nl> - <nl> - void ChunkManager : : getInfo ( BSONObjBuilder & b ) const { <nl> - b . append ( CollectionType : : keyPattern ( ) , _keyPattern . toBSON ( ) ) ; <nl> - b . appendBool ( CollectionType : : unique ( ) , _unique ) ; <nl> - _version . addEpochToBSON ( b , CollectionType : : DEPRECATED_lastmod ( ) ) ; <nl> - } <nl> - <nl> - string ChunkManager : : toString ( ) const { <nl> - stringstream ss ; <nl> - ss < < " ChunkManager : " < < _ns < < " key : " < < _keyPattern . toString ( ) < < ' \ n ' ; <nl> - for ( ChunkMap : : const_iterator i = _chunkMap . begin ( ) ; i ! = _chunkMap . end ( ) ; + + i ) { <nl> - const ChunkPtr c = i - > second ; <nl> - ss < < " \ t " < < c - > toString ( ) < < ' \ n ' ; <nl> - } <nl> - return ss . str ( ) ; <nl> - } <nl> - <nl> - void ChunkRangeManager : : assertValid ( ) const { <nl> - if ( _ranges . empty ( ) ) <nl> - return ; <nl> - <nl> - try { <nl> - / / No Nulls <nl> - for ( ChunkRangeMap : : const_iterator it = _ranges . begin ( ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> - verify ( it - > second ) ; <nl> - } <nl> - <nl> - / / Check endpoints <nl> - verify ( allOfType ( MinKey , _ranges . begin ( ) - > second - > getMin ( ) ) ) ; <nl> - verify ( allOfType ( MaxKey , boost : : prior ( _ranges . end ( ) ) - > second - > getMax ( ) ) ) ; <nl> - <nl> - / / Make sure there are no gaps or overlaps <nl> - for ( ChunkRangeMap : : const_iterator it = boost : : next ( _ranges . begin ( ) ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> - ChunkRangeMap : : const_iterator last = boost : : prior ( it ) ; <nl> - verify ( it - > second - > getMin ( ) = = last - > second - > getMax ( ) ) ; <nl> - } <nl> - <nl> - / / Check Map keys <nl> - for ( ChunkRangeMap : : const_iterator it = _ranges . begin ( ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> - verify ( it - > first = = it - > second - > getMax ( ) ) ; <nl> - } <nl> - <nl> - / / Make sure we match the original chunks <nl> - const ChunkMap chunks = _ranges . begin ( ) - > second - > getManager ( ) - > _chunkMap ; <nl> - for ( ChunkMap : : const_iterator i = chunks . begin ( ) ; i ! = chunks . end ( ) ; + + i ) { <nl> - const ChunkPtr chunk = i - > second ; <nl> - <nl> - ChunkRangeMap : : const_iterator min = _ranges . upper_bound ( chunk - > getMin ( ) ) ; <nl> - ChunkRangeMap : : const_iterator max = _ranges . lower_bound ( chunk - > getMax ( ) ) ; <nl> - <nl> - verify ( min ! = _ranges . end ( ) ) ; <nl> - verify ( max ! = _ranges . end ( ) ) ; <nl> - verify ( min = = max ) ; <nl> - verify ( min - > second - > getShard ( ) = = chunk - > getShard ( ) ) ; <nl> - verify ( min - > second - > containsKey ( chunk - > getMin ( ) ) ) ; <nl> - verify ( min - > second - > containsKey ( chunk - > getMax ( ) ) | | ( min - > second - > getMax ( ) = = chunk - > getMax ( ) ) ) ; <nl> - } <nl> - <nl> - } <nl> - catch ( . . . ) { <nl> - error ( ) < < " \ t invalid ChunkRangeMap ! printing ranges : " < < endl ; <nl> - <nl> - for ( ChunkRangeMap : : const_iterator it = _ranges . begin ( ) , end = _ranges . end ( ) ; it ! = end ; + + it ) <nl> - cout < < it - > first < < " : " < < * it - > second < < endl ; <nl> - <nl> - throw ; <nl> - } <nl> - } <nl> - <nl> - void ChunkRangeManager : : reloadAll ( const ChunkMap & chunks ) { <nl> - _ranges . clear ( ) ; <nl> - _insertRange ( chunks . begin ( ) , chunks . end ( ) ) ; <nl> - <nl> - DEV assertValid ( ) ; <nl> - } <nl> - <nl> - void ChunkRangeManager : : _insertRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) { <nl> - while ( begin ! = end ) { <nl> - ChunkMap : : const_iterator first = begin ; <nl> - Shard shard = first - > second - > getShard ( ) ; <nl> - while ( begin ! = end & & ( begin - > second - > getShard ( ) = = shard ) ) <nl> - + + begin ; <nl> - <nl> - shared_ptr < ChunkRange > cr ( new ChunkRange ( first , begin ) ) ; <nl> - _ranges [ cr - > getMax ( ) ] = cr ; <nl> - } <nl> - } <nl> - <nl> - int ChunkManager : : getCurrentDesiredChunkSize ( ) const { <nl> - / / split faster in early chunks helps spread out an initial load better <nl> - const int minChunkSize = 1 < < 20 ; / / 1 MBytes <nl> - <nl> - int splitThreshold = Chunk : : MaxChunkSize ; <nl> - <nl> - int nc = numChunks ( ) ; <nl> - <nl> - if ( nc < = 1 ) { <nl> - return 1024 ; <nl> - } <nl> - else if ( nc < 3 ) { <nl> - return minChunkSize / 2 ; <nl> - } <nl> - else if ( nc < 10 ) { <nl> - splitThreshold = max ( splitThreshold / 4 , minChunkSize ) ; <nl> - } <nl> - else if ( nc < 20 ) { <nl> - splitThreshold = max ( splitThreshold / 2 , minChunkSize ) ; <nl> - } <nl> - <nl> - return splitThreshold ; <nl> - } <nl> - <nl> } / / namespace mongo <nl> mmm a / src / mongo / s / chunk . h <nl> ppp b / src / mongo / s / chunk . h <nl> <nl> <nl> # pragma once <nl> <nl> - # include < boost / next_prior . hpp > <nl> # include < boost / shared_ptr . hpp > <nl> <nl> - # include " mongo / base / string_data . h " <nl> # include " mongo / db / keypattern . h " <nl> - # include " mongo / db / query / query_solution . h " <nl> # include " mongo / platform / atomic_word . h " <nl> # include " mongo / s / chunk_version . h " <nl> # include " mongo / s / shard . h " <nl> <nl> <nl> namespace mongo { <nl> <nl> - class DBConfig ; <nl> - class Chunk ; <nl> - class ChunkRange ; <nl> class ChunkManager ; <nl> - class ChunkObjUnitTest ; <nl> struct WriteConcernOptions ; <nl> <nl> - typedef boost : : shared_ptr < const Chunk > ChunkPtr ; <nl> - <nl> - / / key is max for each Chunk or ChunkRange <nl> - typedef std : : map < BSONObj , ChunkPtr , BSONObjCmp > ChunkMap ; <nl> - typedef std : : map < BSONObj , boost : : shared_ptr < ChunkRange > , BSONObjCmp > ChunkRangeMap ; <nl> - <nl> - typedef boost : : shared_ptr < ChunkManager > ChunkManagerPtr ; <nl> - <nl> / * * <nl> config . chunks <nl> { ns : " alleyinsider . fs . chunks " , min : { } , max : { } , server : " localhost : 30001 " } <nl> namespace mongo { <nl> const BSONObj & getMin ( ) const { return _min ; } <nl> const BSONObj & getMax ( ) const { return _max ; } <nl> <nl> - / / if min / max key is pos / neg infinity <nl> - bool minIsInf ( ) const ; <nl> - bool maxIsInf ( ) const ; <nl> - <nl> / / Returns true if this chunk contains the given shard key , and false otherwise <nl> / / <nl> / / Note : this function takes an extracted * key * , not an original document <nl> namespace mongo { <nl> <nl> <nl> private : <nl> + / / if min / max key is pos / neg infinity <nl> + bool _minIsInf ( ) const ; <nl> + bool _maxIsInf ( ) const ; <nl> <nl> - / / main shard info <nl> - <nl> - const ChunkManager * _manager ; <nl> + / / The chunk manager , which owns this chunk . Not owned by the chunk . <nl> + const ChunkManager * _manager ; <nl> <nl> BSONObj _min ; <nl> BSONObj _max ; <nl> namespace mongo { <nl> static int mkDataWritten ( ) ; <nl> } ; <nl> <nl> - class ChunkRange { <nl> - public : <nl> - const ChunkManager * getManager ( ) const { return _manager ; } <nl> - Shard getShard ( ) const { return _shard ; } <nl> - <nl> - const BSONObj & getMin ( ) const { return _min ; } <nl> - const BSONObj & getMax ( ) const { return _max ; } <nl> - <nl> - / / clones of Chunk methods <nl> - / / Returns true if this ChunkRange contains the given shard key , and false otherwise <nl> - / / <nl> - / / Note : this function takes an extracted * key * , not an original document <nl> - / / ( the point may be computed by , say , hashing a given field or projecting <nl> - / / to a subset of fields ) . <nl> - bool containsKey ( const BSONObj & shardKey ) const ; <nl> - <nl> - ChunkRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) <nl> - : _manager ( begin - > second - > getManager ( ) ) <nl> - , _shard ( begin - > second - > getShard ( ) ) <nl> - , _min ( begin - > second - > getMin ( ) ) <nl> - , _max ( boost : : prior ( end ) - > second - > getMax ( ) ) { <nl> - verify ( begin ! = end ) ; <nl> - <nl> - DEV while ( begin ! = end ) { <nl> - verify ( begin - > second - > getManager ( ) = = _manager ) ; <nl> - verify ( begin - > second - > getShard ( ) = = _shard ) ; <nl> - + + begin ; <nl> - } <nl> - } <nl> - <nl> - / / Merge min and max ( must be adjacent ranges ) <nl> - ChunkRange ( const ChunkRange & min , const ChunkRange & max ) <nl> - : _manager ( min . getManager ( ) ) <nl> - , _shard ( min . getShard ( ) ) <nl> - , _min ( min . getMin ( ) ) <nl> - , _max ( max . getMax ( ) ) { <nl> - verify ( min . getShard ( ) = = max . getShard ( ) ) ; <nl> - verify ( min . getManager ( ) = = max . getManager ( ) ) ; <nl> - verify ( min . getMax ( ) = = max . getMin ( ) ) ; <nl> - } <nl> - <nl> - friend std : : ostream & operator < < ( std : : ostream & out , const ChunkRange & cr ) { <nl> - return ( out < < " ChunkRange ( min = " < < cr . _min < < " , max = " < < cr . _max < < " , shard = " < < cr . _shard < < " ) " ) ; <nl> - } <nl> - <nl> - private : <nl> - const ChunkManager * _manager ; <nl> - const Shard _shard ; <nl> - const BSONObj _min ; <nl> - const BSONObj _max ; <nl> - } ; <nl> - <nl> - <nl> - class ChunkRangeManager { <nl> - public : <nl> - const ChunkRangeMap & ranges ( ) const { return _ranges ; } <nl> - <nl> - void clear ( ) { _ranges . clear ( ) ; } <nl> - <nl> - void reloadAll ( const ChunkMap & chunks ) ; <nl> - <nl> - / / Slow operation - - wrap with DEV <nl> - void assertValid ( ) const ; <nl> - <nl> - ChunkRangeMap : : const_iterator upper_bound ( const BSONObj & o ) const { return _ranges . upper_bound ( o ) ; } <nl> - ChunkRangeMap : : const_iterator lower_bound ( const BSONObj & o ) const { return _ranges . lower_bound ( o ) ; } <nl> - <nl> - private : <nl> - / / assumes nothing in this range exists in _ranges <nl> - void _insertRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) ; <nl> - <nl> - ChunkRangeMap _ranges ; <nl> - } ; <nl> - <nl> - / * config . sharding <nl> - { ns : ' alleyinsider . fs . chunks ' , <nl> - key : { ts : 1 } , <nl> - shards : [ { min : 1 , max : 100 , server : a } , { min : 101 , max : 200 , server : b } ] <nl> - } <nl> - * / <nl> - class ChunkManager { <nl> - public : <nl> - typedef std : : map < std : : string , ChunkVersion > ShardVersionMap ; <nl> - <nl> - / / Loads a new chunk manager from a collection document <nl> - ChunkManager ( const BSONObj & collDoc ) ; <nl> - <nl> - / / Creates an empty chunk manager for the namespace <nl> - ChunkManager ( const std : : string & ns , const ShardKeyPattern & pattern , bool unique ) ; <nl> - <nl> - std : : string getns ( ) const { return _ns ; } <nl> - <nl> - const ShardKeyPattern & getShardKeyPattern ( ) const { return _keyPattern ; } <nl> - <nl> - bool isUnique ( ) const { return _unique ; } <nl> - <nl> - / * * <nl> - * this is just an increasing number of how many ChunkManagers we have so we know if something has been updated <nl> - * / <nl> - unsigned long long getSequenceNumber ( ) const { return _sequenceNumber ; } <nl> - <nl> - / / <nl> - / / After constructor is invoked , we need to call loadExistingRanges . If this is a new <nl> - / / sharded collection , we can call createFirstChunks first . <nl> - / / <nl> - <nl> - / / Creates new chunks based on info in chunk manager <nl> - void createFirstChunks ( const std : : string & config , <nl> - const Shard & primary , <nl> - const std : : vector < BSONObj > * initPoints , <nl> - const std : : vector < Shard > * initShards ) ; <nl> - <nl> - / / Loads existing ranges based on info in chunk manager <nl> - void loadExistingRanges ( const std : : string & config , const ChunkManager * oldManager ) ; <nl> - <nl> - <nl> - / / Helpers for load <nl> - void calcInitSplitsAndShards ( const Shard & primary , <nl> - const std : : vector < BSONObj > * initPoints , <nl> - const std : : vector < Shard > * initShards , <nl> - std : : vector < BSONObj > * splitPoints , <nl> - std : : vector < Shard > * shards ) const ; <nl> - <nl> - / / <nl> - / / Methods to use once loaded / created <nl> - / / <nl> - <nl> - int numChunks ( ) const { return _chunkMap . size ( ) ; } <nl> - <nl> - / * * <nl> - * Given a key that has been extracted from a document , returns the <nl> - * chunk that contains that key . <nl> - * <nl> - * For instance , to locate the chunk for document { a : " foo " , b : " bar " } <nl> - * when the shard key is { a : " hashed " } , you can call <nl> - * findIntersectingChunk ( ) on { a : hash ( " foo " ) } <nl> - * / <nl> - ChunkPtr findIntersectingChunk ( const BSONObj & shardKey ) const ; <nl> - <nl> - void getShardsForQuery ( std : : set < Shard > & shards , const BSONObj & query ) const ; <nl> - void getAllShards ( std : : set < Shard > & all ) const ; <nl> - / * * @ param shards set to the shards covered by the interval [ min , max ] , see SERVER - 4791 * / <nl> - void getShardsForRange ( std : : set < Shard > & shards , const BSONObj & min , const BSONObj & max ) const ; <nl> - <nl> - / / Transforms query into bounds for each field in the shard key <nl> - / / for example : <nl> - / / Key { a : 1 , b : 1 } , <nl> - / / Query { a : { $ gte : 1 , $ lt : 2 } , <nl> - / / b : { $ gte : 3 , $ lt : 4 } } <nl> - / / = > Bounds { a : [ 1 , 2 ) , b : [ 3 , 4 ) } <nl> - static IndexBounds getIndexBoundsForQuery ( const BSONObj & key , const CanonicalQuery * canonicalQuery ) ; <nl> - <nl> - / / Collapse query solution tree . <nl> - / / <nl> - / / If it has OR node , the result could be a superset of the index bounds generated . <nl> - / / Since to give a single IndexBounds , this gives the union of bounds on each field . <nl> - / / for example : <nl> - / / OR : { a : ( 0 , 1 ) , b : ( 0 , 1 ) } , <nl> - / / { a : ( 2 , 3 ) , b : ( 2 , 3 ) } <nl> - / / = > { a : ( 0 , 1 ) , ( 2 , 3 ) , b : ( 0 , 1 ) , ( 2 , 3 ) } <nl> - static IndexBounds collapseQuerySolution ( const QuerySolutionNode * node ) ; <nl> - <nl> - const ChunkMap & getChunkMap ( ) const { return _chunkMap ; } <nl> - <nl> - / * * <nl> - * Returns true if , for this shard , the chunks are identical in both chunk managers <nl> - * / <nl> - bool compatibleWith ( const ChunkManager & other , const std : : string & shard ) const ; <nl> - <nl> - std : : string toString ( ) const ; <nl> - <nl> - ChunkVersion getVersion ( const std : : string & shardName ) const ; <nl> - ChunkVersion getVersion ( ) const ; <nl> - <nl> - void getInfo ( BSONObjBuilder & b ) const ; <nl> - <nl> - / * * <nl> - * @ param me - so i don ' t get deleted before i ' m done <nl> - * / <nl> - void drop ( ChunkManagerPtr me ) const ; <nl> - <nl> - void _printChunks ( ) const ; <nl> - <nl> - int getCurrentDesiredChunkSize ( ) const ; <nl> - <nl> - ChunkManagerPtr reload ( bool force = true ) const ; / / doesn ' t modify self ! <nl> - <nl> - void markMinorForReload ( ChunkVersion majorVersion ) const ; <nl> - void getMarkedMinorVersions ( std : : set < ChunkVersion > & minorVersions ) const ; <nl> - <nl> - private : <nl> - <nl> - / / helpers for loading <nl> - <nl> - / / returns true if load was consistent <nl> - bool _load ( const std : : string & config , <nl> - ChunkMap & chunks , <nl> - std : : set < Shard > & shards , <nl> - ShardVersionMap & shardVersions , <nl> - const ChunkManager * oldManager ) ; <nl> - static bool _isValid ( const ChunkMap & chunks ) ; <nl> - <nl> - / / end helpers <nl> - <nl> - / / All members should be const for thread - safety <nl> - const std : : string _ns ; <nl> - const ShardKeyPattern _keyPattern ; <nl> - const bool _unique ; <nl> - <nl> - const ChunkMap _chunkMap ; <nl> - const ChunkRangeManager _chunkRanges ; <nl> - <nl> - const std : : set < Shard > _shards ; <nl> - <nl> - const ShardVersionMap _shardVersions ; / / max version per shard <nl> - <nl> - / / max version of any chunk <nl> - ChunkVersion _version ; <nl> - <nl> - mutable mutex _mutex ; / / only used with _nsLock <nl> - <nl> - const unsigned long long _sequenceNumber ; <nl> - <nl> - / / <nl> - / / Split Heuristic info <nl> - / / <nl> - <nl> - <nl> - class SplitHeuristics { <nl> - public : <nl> - <nl> - SplitHeuristics ( ) <nl> - : _splitTickets ( maxParallelSplits ) { <nl> - } <nl> - <nl> - TicketHolder _splitTickets ; <nl> - <nl> - / / Test whether we should split once data * splitTestFactor > chunkSize ( approximately ) <nl> - static const int splitTestFactor = 5 ; <nl> - / / Maximum number of parallel threads requesting a split <nl> - static const int maxParallelSplits = 5 ; <nl> - <nl> - / / The idea here is that we ' re over - aggressive on split testing by a factor of <nl> - / / splitTestFactor , so we can safely wait until we get to splitTestFactor invalid splits <nl> - / / before changing . Unfortunately , we also potentially over - request the splits by a <nl> - / / factor of maxParallelSplits , but since the factors are identical it works out <nl> - / / ( for now ) for parallel or sequential oversplitting . <nl> - / / TODO : Make splitting a separate thread with notifications ? <nl> - static const int staleMinorReloadThreshold = maxParallelSplits ; <nl> - } ; <nl> - <nl> - mutable SplitHeuristics _splitHeuristics ; <nl> - <nl> - / / <nl> - / / End split heuristics <nl> - / / <nl> - <nl> - friend class Chunk ; <nl> - friend class ChunkRangeManager ; / / only needed for CRM : : assertValid ( ) <nl> - static AtomicUInt32 NextSequenceNumber ; <nl> - <nl> - friend class TestableChunkManager ; <nl> - } ; <nl> + typedef boost : : shared_ptr < const Chunk > ChunkPtr ; <nl> <nl> } / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . 0270fbe2ba62 <nl> mmm / dev / null <nl> ppp b / src / mongo / s / chunk_manager . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2015 MongoDB Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the GNU Affero General Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kSharding <nl> + <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + # include " mongo / s / chunk_manager . h " <nl> + <nl> + # include < map > <nl> + # include < set > <nl> + <nl> + # include " mongo / db / query / index_bounds_builder . h " <nl> + # include " mongo / db / query / query_planner . h " <nl> + # include " mongo / db / query / query_planner_common . h " <nl> + # include " mongo / s / chunk_diff . h " <nl> + # include " mongo / s / client / shard_connection . h " <nl> + # include " mongo / s / cluster_write . h " <nl> + # include " mongo / s / distlock . h " <nl> + # include " mongo / s / grid . h " <nl> + # include " mongo / s / type_collection . h " <nl> + # include " mongo / util / log . h " <nl> + # include " mongo / util / timer . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + using boost : : shared_ptr ; <nl> + <nl> + using std : : make_pair ; <nl> + using std : : map ; <nl> + using std : : max ; <nl> + using std : : pair ; <nl> + using std : : set ; <nl> + using std : : string ; <nl> + using std : : vector ; <nl> + <nl> + namespace { <nl> + <nl> + / * * <nl> + * This is an adapter so we can use config diffs - mongos and mongod do them slightly <nl> + * differently <nl> + * <nl> + * The mongos adapter here tracks all shards , and stores ranges by ( max , Chunk ) in the map . <nl> + * / <nl> + class CMConfigDiffTracker : public ConfigDiffTracker < ChunkPtr , std : : string > { <nl> + public : <nl> + CMConfigDiffTracker ( ChunkManager * manager ) : _manager ( manager ) { } <nl> + <nl> + virtual bool isTracked ( const BSONObj & chunkDoc ) const { <nl> + / / Mongos tracks all shards <nl> + return true ; <nl> + } <nl> + <nl> + virtual BSONObj minFrom ( const ChunkPtr & val ) const { <nl> + return val . get ( ) - > getMin ( ) ; <nl> + } <nl> + <nl> + virtual bool isMinKeyIndexed ( ) const { return false ; } <nl> + <nl> + virtual pair < BSONObj , ChunkPtr > rangeFor ( const BSONObj & chunkDoc , const BSONObj & min , const BSONObj & max ) const { <nl> + ChunkPtr c ( new Chunk ( _manager , chunkDoc ) ) ; <nl> + return make_pair ( max , c ) ; <nl> + } <nl> + <nl> + virtual string shardFor ( const string & hostName ) const { <nl> + Shard shard = Shard : : make ( hostName ) ; <nl> + return shard . getName ( ) ; <nl> + } <nl> + <nl> + private : <nl> + ChunkManager * _manager ; <nl> + } ; <nl> + <nl> + <nl> + bool allOfType ( BSONType type , const BSONObj & o ) { <nl> + BSONObjIterator it ( o ) ; <nl> + while ( it . more ( ) ) { <nl> + if ( it . next ( ) . type ( ) ! = type ) { <nl> + return false ; <nl> + } <nl> + } <nl> + return true ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> + AtomicUInt32 ChunkManager : : NextSequenceNumber ( 1U ) ; <nl> + <nl> + ChunkManager : : ChunkManager ( const string & ns , const ShardKeyPattern & pattern , bool unique ) : <nl> + _ns ( ns ) , <nl> + _keyPattern ( pattern . getKeyPattern ( ) ) , <nl> + _unique ( unique ) , <nl> + _chunkRanges ( ) , <nl> + _mutex ( " ChunkManager " ) , <nl> + _sequenceNumber ( NextSequenceNumber . addAndFetch ( 1 ) ) <nl> + { <nl> + / / <nl> + / / Sets up a chunk manager from new data <nl> + / / <nl> + } <nl> + <nl> + ChunkManager : : ChunkManager ( const BSONObj & collDoc ) : <nl> + / / Need the ns early , to construct the lock <nl> + / / TODO : Construct lock on demand ? Not sure why we need to keep it around <nl> + _ns ( collDoc [ CollectionType : : ns ( ) ] . type ( ) = = String ? <nl> + collDoc [ CollectionType : : ns ( ) ] . String ( ) : <nl> + " " ) , <nl> + _keyPattern ( collDoc [ CollectionType : : keyPattern ( ) ] . type ( ) = = Object ? <nl> + collDoc [ CollectionType : : keyPattern ( ) ] . Obj ( ) . getOwned ( ) : <nl> + BSONObj ( ) ) , <nl> + _unique ( collDoc [ CollectionType : : unique ( ) ] . trueValue ( ) ) , <nl> + _chunkRanges ( ) , <nl> + _mutex ( " ChunkManager " ) , <nl> + / / The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager ' s . <nl> + / / Increasing this number here will prompt checkShardVersion ( ) to refresh the connection - level versions to <nl> + / / the most up to date value . <nl> + _sequenceNumber ( NextSequenceNumber . addAndFetch ( 1 ) ) <nl> + { <nl> + <nl> + / / <nl> + / / Sets up a chunk manager from an existing sharded collection document <nl> + / / <nl> + <nl> + verify ( _ns ! = " " ) ; <nl> + verify ( ! _keyPattern . toBSON ( ) . isEmpty ( ) ) ; <nl> + <nl> + _version = ChunkVersion : : fromBSON ( collDoc ) ; <nl> + } <nl> + <nl> + void ChunkManager : : loadExistingRanges ( const string & config , const ChunkManager * oldManager ) { <nl> + int tries = 3 ; <nl> + while ( tries - - ) { <nl> + ChunkMap chunkMap ; <nl> + set < Shard > shards ; <nl> + ShardVersionMap shardVersions ; <nl> + Timer t ; <nl> + <nl> + bool success = _load ( config , chunkMap , shards , shardVersions , oldManager ) ; <nl> + <nl> + if ( success ) { <nl> + { <nl> + int ms = t . millis ( ) ; <nl> + log ( ) < < " ChunkManager : time to load chunks for " < < _ns < < " : " < < ms < < " ms " <nl> + < < " sequenceNumber : " < < _sequenceNumber <nl> + < < " version : " < < _version . toString ( ) <nl> + < < " based on : " < < <nl> + ( oldManager ? oldManager - > getVersion ( ) . toString ( ) : " ( empty ) " ) ; ; <nl> + } <nl> + <nl> + / / TODO : Merge into diff code above , so we validate in one place <nl> + if ( _isValid ( chunkMap ) ) { <nl> + / / These variables are const for thread - safety . Since the <nl> + / / constructor can only be called from one thread , we don ' t have <nl> + / / to worry about that here . <nl> + const_cast < ChunkMap & > ( _chunkMap ) . swap ( chunkMap ) ; <nl> + const_cast < set < Shard > & > ( _shards ) . swap ( shards ) ; <nl> + const_cast < ShardVersionMap & > ( _shardVersions ) . swap ( shardVersions ) ; <nl> + const_cast < ChunkRangeManager & > ( _chunkRanges ) . reloadAll ( _chunkMap ) ; <nl> + <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + if ( _chunkMap . size ( ) < 10 ) { <nl> + _printChunks ( ) ; <nl> + } <nl> + <nl> + warning ( ) < < " ChunkManager loaded an invalid config for " < < _ns <nl> + < < " , trying again " ; <nl> + <nl> + sleepmillis ( 10 * ( 3 - tries ) ) ; <nl> + } <nl> + <nl> + / / this will abort construction so we should never have a reference to an invalid config <nl> + msgasserted ( 13282 , " Couldn ' t load a valid config for " + _ns + " after 3 attempts . Please try again . " ) ; <nl> + } <nl> + <nl> + bool ChunkManager : : _load ( const string & config , <nl> + ChunkMap & chunkMap , <nl> + set < Shard > & shards , <nl> + ShardVersionMap & shardVersions , <nl> + const ChunkManager * oldManager ) <nl> + { <nl> + <nl> + / / Reset the max version , but not the epoch , when we aren ' t loading from the oldManager <nl> + _version = ChunkVersion ( 0 , 0 , _version . epoch ( ) ) ; <nl> + <nl> + / / If we have a previous version of the ChunkManager to work from , use that info to reduce <nl> + / / our config query <nl> + if ( oldManager & & oldManager - > getVersion ( ) . isSet ( ) ) { <nl> + <nl> + / / Get the old max version <nl> + _version = oldManager - > getVersion ( ) ; <nl> + / / Load a copy of the old versions <nl> + shardVersions = oldManager - > _shardVersions ; <nl> + <nl> + / / Load a copy of the chunk map , replacing the chunk manager with our own <nl> + const ChunkMap & oldChunkMap = oldManager - > getChunkMap ( ) ; <nl> + <nl> + / / Could be v . expensive <nl> + / / TODO : If chunks were immutable and didn ' t reference the manager , we could do more <nl> + / / interesting things here <nl> + for ( ChunkMap : : const_iterator it = oldChunkMap . begin ( ) ; it ! = oldChunkMap . end ( ) ; it + + ) { <nl> + <nl> + ChunkPtr oldC = it - > second ; <nl> + ChunkPtr c ( new Chunk ( this , oldC - > getMin ( ) , <nl> + oldC - > getMax ( ) , <nl> + oldC - > getShard ( ) , <nl> + oldC - > getLastmod ( ) ) ) ; <nl> + <nl> + c - > setBytesWritten ( oldC - > getBytesWritten ( ) ) ; <nl> + <nl> + chunkMap . insert ( make_pair ( oldC - > getMax ( ) , c ) ) ; <nl> + } <nl> + <nl> + LOG ( 2 ) < < " loading chunk manager for collection " < < _ns <nl> + < < " using old chunk manager w / version " < < _version . toString ( ) <nl> + < < " and " < < oldChunkMap . size ( ) < < " chunks " ; <nl> + } <nl> + <nl> + / / Attach a diff tracker for the versioned chunk data <nl> + CMConfigDiffTracker differ ( this ) ; <nl> + differ . attach ( _ns , chunkMap , _version , shardVersions ) ; <nl> + <nl> + / / Diff tracker should * always * find at least one chunk if collection exists <nl> + int diffsApplied = differ . calculateConfigDiff ( config ) ; <nl> + if ( diffsApplied > 0 ) { <nl> + <nl> + LOG ( 2 ) < < " loaded " < < diffsApplied < < " chunks into new chunk manager for " < < _ns <nl> + < < " with version " < < _version ; <nl> + <nl> + / / Add all the shards we find to the shards set <nl> + for ( ShardVersionMap : : iterator it = shardVersions . begin ( ) ; it ! = shardVersions . end ( ) ; it + + ) { <nl> + shards . insert ( it - > first ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + else if ( diffsApplied = = 0 ) { <nl> + <nl> + / / No chunks were found for the ns <nl> + warning ( ) < < " no chunks found when reloading " < < _ns <nl> + < < " , previous version was " < < _version ; <nl> + <nl> + / / Set all our data to empty <nl> + chunkMap . clear ( ) ; <nl> + shardVersions . clear ( ) ; <nl> + _version = ChunkVersion ( 0 , 0 , OID ( ) ) ; <nl> + <nl> + return true ; <nl> + } <nl> + else { / / diffsApplied < 0 <nl> + <nl> + bool allInconsistent = differ . numValidDiffs ( ) = = 0 ; <nl> + <nl> + if ( allInconsistent ) { <nl> + / / All versions are different , this can be normal <nl> + warning ( ) < < " major change in chunk information found when reloading " <nl> + < < _ns < < " , previous version was " < < _version ; <nl> + } <nl> + else { <nl> + / / Inconsistent load halfway through ( due to yielding cursor during load ) <nl> + / / should be rare <nl> + warning ( ) < < " inconsistent chunks found when reloading " <nl> + < < _ns < < " , previous version was " < < _version <nl> + < < " , this should be rare " ; <nl> + } <nl> + <nl> + / / Set all our data to empty to be extra safe <nl> + chunkMap . clear ( ) ; <nl> + shardVersions . clear ( ) ; <nl> + _version = ChunkVersion ( 0 , 0 , OID ( ) ) ; <nl> + <nl> + return allInconsistent ; <nl> + } <nl> + <nl> + } <nl> + <nl> + ChunkManagerPtr ChunkManager : : reload ( bool force ) const { <nl> + return grid . getDBConfig ( getns ( ) ) - > getChunkManager ( getns ( ) , force ) ; <nl> + } <nl> + <nl> + bool ChunkManager : : _isValid ( const ChunkMap & chunkMap ) { <nl> + # define ENSURE ( x ) do { if ( ! ( x ) ) { log ( ) < < " ChunkManager : : _isValid failed : " # x ; return false ; } } while ( 0 ) <nl> + <nl> + if ( chunkMap . empty ( ) ) <nl> + return true ; <nl> + <nl> + / / Check endpoints <nl> + ENSURE ( allOfType ( MinKey , chunkMap . begin ( ) - > second - > getMin ( ) ) ) ; <nl> + ENSURE ( allOfType ( MaxKey , boost : : prior ( chunkMap . end ( ) ) - > second - > getMax ( ) ) ) ; <nl> + <nl> + / / Make sure there are no gaps or overlaps <nl> + for ( ChunkMap : : const_iterator it = boost : : next ( chunkMap . begin ( ) ) , end = chunkMap . end ( ) ; it ! = end ; + + it ) { <nl> + ChunkMap : : const_iterator last = boost : : prior ( it ) ; <nl> + <nl> + if ( ! ( it - > second - > getMin ( ) = = last - > second - > getMax ( ) ) ) { <nl> + PRINT ( last - > second - > toString ( ) ) ; <nl> + PRINT ( it - > second - > toString ( ) ) ; <nl> + PRINT ( it - > second - > getMin ( ) ) ; <nl> + PRINT ( last - > second - > getMax ( ) ) ; <nl> + } <nl> + ENSURE ( it - > second - > getMin ( ) = = last - > second - > getMax ( ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> + <nl> + # undef ENSURE <nl> + } <nl> + <nl> + void ChunkManager : : _printChunks ( ) const { <nl> + for ( ChunkMap : : const_iterator it = _chunkMap . begin ( ) , end = _chunkMap . end ( ) ; it ! = end ; + + it ) { <nl> + log ( ) < < * it - > second ; <nl> + } <nl> + } <nl> + <nl> + void ChunkManager : : calcInitSplitsAndShards ( const Shard & primary , <nl> + const vector < BSONObj > * initPoints , <nl> + const vector < Shard > * initShards , <nl> + vector < BSONObj > * splitPoints , <nl> + vector < Shard > * shards ) const <nl> + { <nl> + verify ( _chunkMap . size ( ) = = 0 ) ; <nl> + <nl> + unsigned long long numObjects = 0 ; <nl> + Chunk c ( this , _keyPattern . getKeyPattern ( ) . globalMin ( ) , <nl> + _keyPattern . getKeyPattern ( ) . globalMax ( ) , primary ) ; <nl> + <nl> + if ( ! initPoints | | ! initPoints - > size ( ) ) { <nl> + / / discover split points <nl> + { <nl> + / / get stats to see if there is any data <nl> + ScopedDbConnection shardConn ( primary . getConnString ( ) ) ; <nl> + <nl> + numObjects = shardConn - > count ( getns ( ) ) ; <nl> + shardConn . done ( ) ; <nl> + } <nl> + <nl> + if ( numObjects > 0 ) <nl> + c . pickSplitVector ( * splitPoints , Chunk : : MaxChunkSize ) ; <nl> + <nl> + / / since docs alread exists , must use primary shard <nl> + shards - > push_back ( primary ) ; <nl> + } else { <nl> + / / make sure points are unique and ordered <nl> + set < BSONObj > orderedPts ; <nl> + for ( unsigned i = 0 ; i < initPoints - > size ( ) ; + + i ) { <nl> + BSONObj pt = ( * initPoints ) [ i ] ; <nl> + orderedPts . insert ( pt ) ; <nl> + } <nl> + for ( set < BSONObj > : : iterator it = orderedPts . begin ( ) ; it ! = orderedPts . end ( ) ; + + it ) { <nl> + splitPoints - > push_back ( * it ) ; <nl> + } <nl> + <nl> + if ( ! initShards | | ! initShards - > size ( ) ) { <nl> + / / If not specified , only use the primary shard ( note that it ' s not safe for mongos <nl> + / / to put initial chunks on other shards without the primary mongod knowing ) . <nl> + shards - > push_back ( primary ) ; <nl> + } else { <nl> + std : : copy ( initShards - > begin ( ) , initShards - > end ( ) , std : : back_inserter ( * shards ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void ChunkManager : : createFirstChunks ( const string & config , <nl> + const Shard & primary , <nl> + const vector < BSONObj > * initPoints , <nl> + const vector < Shard > * initShards ) <nl> + { <nl> + / / TODO distlock ? <nl> + / / TODO : Race condition if we shard the collection and insert data while we split across <nl> + / / the non - primary shard . <nl> + <nl> + vector < BSONObj > splitPoints ; <nl> + vector < Shard > shards ; <nl> + <nl> + calcInitSplitsAndShards ( primary , initPoints , initShards , <nl> + & splitPoints , & shards ) ; <nl> + <nl> + / / this is the first chunk ; start the versioning from scratch <nl> + ChunkVersion version ; <nl> + version . incEpoch ( ) ; <nl> + version . incMajor ( ) ; <nl> + <nl> + log ( ) < < " going to create " < < splitPoints . size ( ) + 1 < < " chunk ( s ) for : " < < _ns <nl> + < < " using new epoch " < < version . epoch ( ) ; <nl> + <nl> + ScopedDbConnection conn ( config , 30 ) ; <nl> + <nl> + / / Make sure we don ' t have any chunks that already exist here <nl> + unsigned long long existingChunks = <nl> + conn - > count ( ChunkType : : ConfigNS , BSON ( ChunkType : : ns ( _ns ) ) ) ; <nl> + <nl> + uassert ( 13449 , str : : stream ( ) < < " collection " < < _ns < < " already sharded with " <nl> + < < existingChunks < < " chunks " , existingChunks = = 0 ) ; <nl> + conn . done ( ) ; <nl> + <nl> + for ( unsigned i = 0 ; i < = splitPoints . size ( ) ; i + + ) { <nl> + BSONObj min = i = = 0 ? _keyPattern . getKeyPattern ( ) . globalMin ( ) : splitPoints [ i - 1 ] ; <nl> + BSONObj max = i < splitPoints . size ( ) ? <nl> + splitPoints [ i ] : _keyPattern . getKeyPattern ( ) . globalMax ( ) ; <nl> + <nl> + Chunk temp ( this , min , max , shards [ i % shards . size ( ) ] , version ) ; <nl> + <nl> + BSONObjBuilder chunkBuilder ; <nl> + temp . serialize ( chunkBuilder ) ; <nl> + BSONObj chunkObj = chunkBuilder . obj ( ) ; <nl> + <nl> + Status result = clusterUpdate ( ChunkType : : ConfigNS , <nl> + BSON ( ChunkType : : name ( temp . genID ( ) ) ) , <nl> + chunkObj , <nl> + true , / / upsert <nl> + false , / / multi <nl> + NULL ) ; <nl> + <nl> + version . incMinor ( ) ; <nl> + <nl> + if ( ! result . isOK ( ) ) { <nl> + string ss = str : : stream ( ) < < " creating first chunks failed . result : " <nl> + < < result . reason ( ) ; <nl> + error ( ) < < ss ; <nl> + msgasserted ( 15903 , ss ) ; <nl> + } <nl> + } <nl> + <nl> + _version = ChunkVersion ( 0 , 0 , version . epoch ( ) ) ; <nl> + } <nl> + <nl> + ChunkPtr ChunkManager : : findIntersectingChunk ( const BSONObj & shardKey ) const { <nl> + { <nl> + BSONObj chunkMin ; <nl> + ChunkPtr chunk ; <nl> + { <nl> + ChunkMap : : const_iterator it = _chunkMap . upper_bound ( shardKey ) ; <nl> + if ( it ! = _chunkMap . end ( ) ) { <nl> + chunkMin = it - > first ; <nl> + chunk = it - > second ; <nl> + } <nl> + } <nl> + <nl> + if ( chunk ) { <nl> + if ( chunk - > containsKey ( shardKey ) ) { <nl> + return chunk ; <nl> + } <nl> + <nl> + PRINT ( chunkMin ) ; <nl> + PRINT ( * chunk ) ; <nl> + PRINT ( shardKey ) ; <nl> + <nl> + reload ( ) ; <nl> + massert ( 13141 , " Chunk map pointed to incorrect chunk " , false ) ; <nl> + } <nl> + } <nl> + <nl> + msgasserted ( 8070 , <nl> + str : : stream ( ) < < " couldn ' t find a chunk intersecting : " < < shardKey <nl> + < < " for ns : " < < _ns <nl> + < < " at version : " < < _version . toString ( ) <nl> + < < " , number of chunks : " < < _chunkMap . size ( ) ) ; <nl> + } <nl> + <nl> + void ChunkManager : : getShardsForQuery ( set < Shard > & shards , const BSONObj & query ) const { <nl> + CanonicalQuery * canonicalQuery = NULL ; <nl> + Status status = CanonicalQuery : : canonicalize ( <nl> + _ns , <nl> + query , <nl> + & canonicalQuery , <nl> + WhereCallbackNoop ( ) ) ; <nl> + <nl> + boost : : scoped_ptr < CanonicalQuery > canonicalQueryPtr ( canonicalQuery ) ; <nl> + <nl> + uassert ( status . code ( ) , status . reason ( ) , status . isOK ( ) ) ; <nl> + <nl> + / / Query validation <nl> + if ( QueryPlannerCommon : : hasNode ( canonicalQuery - > root ( ) , MatchExpression : : GEO_NEAR ) ) { <nl> + uassert ( 13501 , " use geoNear command rather than $ near query " , false ) ; <nl> + } <nl> + <nl> + / / Transforms query into bounds for each field in the shard key <nl> + / / for example : <nl> + / / Key { a : 1 , b : 1 } , <nl> + / / Query { a : { $ gte : 1 , $ lt : 2 } , <nl> + / / b : { $ gte : 3 , $ lt : 4 } } <nl> + / / = > Bounds { a : [ 1 , 2 ) , b : [ 3 , 4 ) } <nl> + IndexBounds bounds = getIndexBoundsForQuery ( _keyPattern . toBSON ( ) , canonicalQuery ) ; <nl> + <nl> + / / Transforms bounds for each shard key field into full shard key ranges <nl> + / / for example : <nl> + / / Key { a : 1 , b : 1 } <nl> + / / Bounds { a : [ 1 , 2 ) , b : [ 3 , 4 ) } <nl> + / / = > Ranges { a : 1 , b : 3 } = > { a : 2 , b : 4 } <nl> + BoundList ranges = _keyPattern . flattenBounds ( bounds ) ; <nl> + <nl> + for ( BoundList : : const_iterator it = ranges . begin ( ) ; it ! = ranges . end ( ) ; <nl> + + + it ) { <nl> + <nl> + getShardsForRange ( shards , it - > first / * min * / , it - > second / * max * / ) ; <nl> + <nl> + / / once we know we need to visit all shards no need to keep looping <nl> + if ( shards . size ( ) = = _shards . size ( ) ) break ; <nl> + } <nl> + <nl> + / / SERVER - 4914 Some clients of getShardsForQuery ( ) assume at least one shard will be <nl> + / / returned . For now , we satisfy that assumption by adding a shard with no matches rather <nl> + / / than return an empty set of shards . <nl> + if ( shards . empty ( ) ) { <nl> + massert ( 16068 , " no chunk ranges available " , ! _chunkRanges . ranges ( ) . empty ( ) ) ; <nl> + shards . insert ( _chunkRanges . ranges ( ) . begin ( ) - > second - > getShard ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + void ChunkManager : : getShardsForRange ( set < Shard > & shards , <nl> + const BSONObj & min , <nl> + const BSONObj & max ) const { <nl> + <nl> + ChunkRangeMap : : const_iterator it = _chunkRanges . upper_bound ( min ) ; <nl> + ChunkRangeMap : : const_iterator end = _chunkRanges . upper_bound ( max ) ; <nl> + <nl> + massert ( 13507 , str : : stream ( ) < < " no chunks found between bounds " < < min < < " and " < < max , it ! = _chunkRanges . ranges ( ) . end ( ) ) ; <nl> + <nl> + if ( end ! = _chunkRanges . ranges ( ) . end ( ) ) + + end ; <nl> + <nl> + for ( ; it ! = end ; + + it ) { <nl> + shards . insert ( it - > second - > getShard ( ) ) ; <nl> + <nl> + / / once we know we need to visit all shards no need to keep looping <nl> + if ( shards . size ( ) = = _shards . size ( ) ) break ; <nl> + } <nl> + } <nl> + <nl> + void ChunkManager : : getAllShards ( set < Shard > & all ) const { <nl> + all . insert ( _shards . begin ( ) , _shards . end ( ) ) ; <nl> + } <nl> + <nl> + IndexBounds ChunkManager : : getIndexBoundsForQuery ( const BSONObj & key , const CanonicalQuery * canonicalQuery ) { <nl> + / / $ text is not allowed in planning since we don ' t have text index on mongos . <nl> + / / <nl> + / / TODO : Treat $ text query as a no - op in planning . So with shard key { a : 1 } , <nl> + / / the query { a : 2 , $ text : { . . . } } will only target to { a : 2 } . <nl> + if ( QueryPlannerCommon : : hasNode ( canonicalQuery - > root ( ) , MatchExpression : : TEXT ) ) { <nl> + IndexBounds bounds ; <nl> + IndexBoundsBuilder : : allValuesBounds ( key , & bounds ) ; / / [ minKey , maxKey ] <nl> + return bounds ; <nl> + } <nl> + <nl> + / / Consider shard key as an index <nl> + string accessMethod = IndexNames : : findPluginName ( key ) ; <nl> + dassert ( accessMethod = = IndexNames : : BTREE | | accessMethod = = IndexNames : : HASHED ) ; <nl> + <nl> + / / Use query framework to generate index bounds <nl> + QueryPlannerParams plannerParams ; <nl> + / / Must use " shard key " index <nl> + plannerParams . options = QueryPlannerParams : : NO_TABLE_SCAN ; <nl> + IndexEntry indexEntry ( key , accessMethod , false / * multiKey * / , false / * sparse * / , <nl> + false / * unique * / , " shardkey " , BSONObj ( ) ) ; <nl> + plannerParams . indices . push_back ( indexEntry ) ; <nl> + <nl> + OwnedPointerVector < QuerySolution > solutions ; <nl> + Status status = QueryPlanner : : plan ( * canonicalQuery , plannerParams , & solutions . mutableVector ( ) ) ; <nl> + uassert ( status . code ( ) , status . reason ( ) , status . isOK ( ) ) ; <nl> + <nl> + IndexBounds bounds ; <nl> + <nl> + for ( vector < QuerySolution * > : : const_iterator it = solutions . begin ( ) ; <nl> + bounds . size ( ) = = 0 & & it ! = solutions . end ( ) ; it + + ) { <nl> + / / Try next solution if we failed to generate index bounds , i . e . bounds . size ( ) = = 0 <nl> + bounds = collapseQuerySolution ( ( * it ) - > root . get ( ) ) ; <nl> + } <nl> + <nl> + if ( bounds . size ( ) = = 0 ) { <nl> + / / We cannot plan the query without collection scan , so target to all shards . <nl> + IndexBoundsBuilder : : allValuesBounds ( key , & bounds ) ; / / [ minKey , maxKey ] <nl> + } <nl> + return bounds ; <nl> + } <nl> + <nl> + IndexBounds ChunkManager : : collapseQuerySolution ( const QuerySolutionNode * node ) { <nl> + if ( node - > children . size ( ) = = 0 ) { <nl> + invariant ( node - > getType ( ) = = STAGE_IXSCAN ) ; <nl> + <nl> + const IndexScanNode * ixNode = static_cast < const IndexScanNode * > ( node ) ; <nl> + return ixNode - > bounds ; <nl> + } <nl> + <nl> + if ( node - > children . size ( ) = = 1 ) { <nl> + / / e . g . FETCH - > IXSCAN <nl> + return collapseQuerySolution ( node - > children . front ( ) ) ; <nl> + } <nl> + <nl> + / / children . size ( ) > 1 , assert it ' s OR / SORT_MERGE . <nl> + if ( node - > getType ( ) ! = STAGE_OR & & node - > getType ( ) ! = STAGE_SORT_MERGE ) { <nl> + / / Unexpected node . We should never reach here . <nl> + error ( ) < < " could not generate index bounds on query solution tree : " < < node - > toString ( ) ; <nl> + dassert ( false ) ; / / We ' d like to know this error in testing . <nl> + <nl> + / / Bail out with all shards in production , since this isn ' t a fatal error . <nl> + return IndexBounds ( ) ; <nl> + } <nl> + <nl> + IndexBounds bounds ; <nl> + for ( vector < QuerySolutionNode * > : : const_iterator it = node - > children . begin ( ) ; <nl> + it ! = node - > children . end ( ) ; it + + ) <nl> + { <nl> + / / The first branch under OR <nl> + if ( it = = node - > children . begin ( ) ) { <nl> + invariant ( bounds . size ( ) = = 0 ) ; <nl> + bounds = collapseQuerySolution ( * it ) ; <nl> + if ( bounds . size ( ) = = 0 ) { / / Got unexpected node in query solution tree <nl> + return IndexBounds ( ) ; <nl> + } <nl> + continue ; <nl> + } <nl> + <nl> + IndexBounds childBounds = collapseQuerySolution ( * it ) ; <nl> + if ( childBounds . size ( ) = = 0 ) { / / Got unexpected node in query solution tree <nl> + return IndexBounds ( ) ; <nl> + } <nl> + <nl> + invariant ( childBounds . size ( ) = = bounds . size ( ) ) ; <nl> + for ( size_t i = 0 ; i < bounds . size ( ) ; i + + ) { <nl> + bounds . fields [ i ] . intervals . insert ( bounds . fields [ i ] . intervals . end ( ) , <nl> + childBounds . fields [ i ] . intervals . begin ( ) , <nl> + childBounds . fields [ i ] . intervals . end ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + for ( size_t i = 0 ; i < bounds . size ( ) ; i + + ) { <nl> + IndexBoundsBuilder : : unionize ( & bounds . fields [ i ] ) ; <nl> + } <nl> + <nl> + return bounds ; <nl> + } <nl> + <nl> + bool ChunkManager : : compatibleWith ( const ChunkManager & other , const string & shardName ) const { <nl> + / / Return true if the shard version is the same in the two chunk managers <nl> + / / TODO : This doesn ' t need to be so strong , just major vs <nl> + return other . getVersion ( shardName ) . equals ( getVersion ( shardName ) ) ; <nl> + } <nl> + <nl> + void ChunkManager : : drop ( ) const { <nl> + scoped_lock lk ( _mutex ) ; <nl> + <nl> + configServer . logChange ( " dropCollection . start " , _ns , BSONObj ( ) ) ; <nl> + <nl> + DistributedLock nsLock ( ConnectionString ( configServer . modelServer ( ) , <nl> + ConnectionString : : SYNC ) , <nl> + _ns ) ; <nl> + <nl> + dist_lock_try dlk ; <nl> + try { <nl> + dlk = dist_lock_try ( & nsLock , " drop " ) ; <nl> + } <nl> + catch ( LockException & e ) { <nl> + uassert ( 14022 , str : : stream ( ) < < " Error locking distributed lock for chunk drop . " < < causedBy ( e ) , false ) ; <nl> + } <nl> + <nl> + uassert ( 13331 , " collection ' s metadata is undergoing changes . Please try again . " , dlk . got ( ) ) ; <nl> + <nl> + uassert ( 10174 , " config servers not all up " , configServer . allUp ( false ) ) ; <nl> + <nl> + set < Shard > seen ; <nl> + <nl> + LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns ; <nl> + <nl> + / / lock all shards so no one can do a split / migrate <nl> + for ( ChunkMap : : const_iterator i = _chunkMap . begin ( ) ; i ! = _chunkMap . end ( ) ; + + i ) { <nl> + ChunkPtr c = i - > second ; <nl> + seen . insert ( c - > getShard ( ) ) ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t all locked " ; <nl> + <nl> + map < string , BSONObj > errors ; <nl> + / / delete data from mongod <nl> + for ( set < Shard > : : iterator i = seen . begin ( ) ; i ! = seen . end ( ) ; i + + ) { <nl> + ScopedDbConnection conn ( i - > getConnString ( ) ) ; <nl> + BSONObj info ; <nl> + if ( ! conn - > dropCollection ( _ns , & info ) ) { <nl> + errors [ i - > getConnString ( ) ] = info ; <nl> + } <nl> + conn . done ( ) ; <nl> + } <nl> + if ( ! errors . empty ( ) ) { <nl> + StringBuilder sb ; <nl> + sb < < " Dropping collection failed on the following hosts : " ; <nl> + <nl> + for ( map < string , BSONObj > : : const_iterator it = errors . begin ( ) ; it ! = errors . end ( ) ; ) { <nl> + sb < < it - > first < < " : " < < it - > second ; <nl> + + + it ; <nl> + if ( it ! = errors . end ( ) ) { <nl> + sb < < " , " ; <nl> + } <nl> + } <nl> + <nl> + uasserted ( 16338 , sb . str ( ) ) ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t removed shard data " ; <nl> + <nl> + / / remove chunk data <nl> + Status result = clusterDelete ( ChunkType : : ConfigNS , <nl> + BSON ( ChunkType : : ns ( _ns ) ) , <nl> + 0 / * limit * / , <nl> + NULL ) ; <nl> + <nl> + / / Make sure we ' re dropped on the config <nl> + if ( ! result . isOK ( ) ) { <nl> + uasserted ( 17001 , str : : stream ( ) < < " could not drop chunks for " < < _ns <nl> + < < " : " < < result . reason ( ) ) ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t removed chunk data " ; <nl> + <nl> + for ( set < Shard > : : iterator i = seen . begin ( ) ; i ! = seen . end ( ) ; i + + ) { <nl> + ScopedDbConnection conn ( i - > getConnString ( ) ) ; <nl> + BSONObj res ; <nl> + <nl> + / / this is horrible <nl> + / / we need a special command for dropping on the d side <nl> + / / this hack works for the moment <nl> + <nl> + if ( ! setShardVersion ( conn . conn ( ) , <nl> + _ns , <nl> + configServer . modelServer ( ) , <nl> + ChunkVersion ( 0 , 0 , OID ( ) ) , <nl> + NULL , <nl> + true , <nl> + res ) ) { <nl> + <nl> + uasserted ( 8071 , str : : stream ( ) < < " cleaning up after drop failed : " < < res ) ; <nl> + } <nl> + <nl> + conn - > simpleCommand ( " admin " , 0 , " unsetSharding " ) ; <nl> + conn . done ( ) ; <nl> + } <nl> + <nl> + LOG ( 1 ) < < " ChunkManager : : drop : " < < _ns < < " \ t DONE " ; <nl> + configServer . logChange ( " dropCollection " , _ns , BSONObj ( ) ) ; <nl> + } <nl> + <nl> + ChunkVersion ChunkManager : : getVersion ( const std : : string & shardName ) const { <nl> + ShardVersionMap : : const_iterator i = _shardVersions . find ( shardName ) ; <nl> + if ( i = = _shardVersions . end ( ) ) { <nl> + / / Shards without explicitly tracked shard versions ( meaning they have <nl> + / / no chunks ) always have a version of ( 0 , 0 , epoch ) . Note this is <nl> + / / * different * from the dropped chunk version of ( 0 , 0 , OID ( 000 . . . ) ) . <nl> + / / See s / chunk_version . h . <nl> + return ChunkVersion ( 0 , 0 , _version . epoch ( ) ) ; <nl> + } <nl> + return i - > second ; <nl> + } <nl> + <nl> + ChunkVersion ChunkManager : : getVersion ( ) const { <nl> + return _version ; <nl> + } <nl> + <nl> + void ChunkManager : : getInfo ( BSONObjBuilder & b ) const { <nl> + b . append ( CollectionType : : keyPattern ( ) , _keyPattern . toBSON ( ) ) ; <nl> + b . appendBool ( CollectionType : : unique ( ) , _unique ) ; <nl> + _version . addEpochToBSON ( b , CollectionType : : DEPRECATED_lastmod ( ) ) ; <nl> + } <nl> + <nl> + string ChunkManager : : toString ( ) const { <nl> + StringBuilder sb ; <nl> + sb < < " ChunkManager : " < < _ns < < " key : " < < _keyPattern . toString ( ) < < ' \ n ' ; <nl> + <nl> + for ( ChunkMap : : const_iterator i = _chunkMap . begin ( ) ; i ! = _chunkMap . end ( ) ; + + i ) { <nl> + sb < < " \ t " < < i - > second - > toString ( ) < < ' \ n ' ; <nl> + } <nl> + <nl> + return sb . str ( ) ; <nl> + } <nl> + <nl> + <nl> + ChunkRange : : ChunkRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) <nl> + : _manager ( begin - > second - > getManager ( ) ) , <nl> + _shard ( begin - > second - > getShard ( ) ) , <nl> + _min ( begin - > second - > getMin ( ) ) , <nl> + _max ( boost : : prior ( end ) - > second - > getMax ( ) ) { <nl> + <nl> + invariant ( begin ! = end ) ; <nl> + <nl> + DEV while ( begin ! = end ) { <nl> + dassert ( begin - > second - > getManager ( ) = = _manager ) ; <nl> + dassert ( begin - > second - > getShard ( ) = = _shard ) ; <nl> + + + begin ; <nl> + } <nl> + } <nl> + <nl> + ChunkRange : : ChunkRange ( const ChunkRange & min , const ChunkRange & max ) <nl> + : _manager ( min . getManager ( ) ) , <nl> + _shard ( min . getShard ( ) ) , <nl> + _min ( min . getMin ( ) ) , <nl> + _max ( max . getMax ( ) ) { <nl> + <nl> + invariant ( min . getShard ( ) = = max . getShard ( ) ) ; <nl> + invariant ( min . getManager ( ) = = max . getManager ( ) ) ; <nl> + invariant ( min . getMax ( ) = = max . getMin ( ) ) ; <nl> + } <nl> + <nl> + string ChunkRange : : toString ( ) const { <nl> + StringBuilder sb ; <nl> + sb < < " ChunkRange ( min = " < < _min < < " , max = " < < _max <nl> + < < " , shard = " < < _shard . toString ( ) < < " ) " ; <nl> + <nl> + return sb . str ( ) ; <nl> + } <nl> + <nl> + <nl> + void ChunkRangeManager : : assertValid ( ) const { <nl> + if ( _ranges . empty ( ) ) <nl> + return ; <nl> + <nl> + try { <nl> + / / No Nulls <nl> + for ( ChunkRangeMap : : const_iterator it = _ranges . begin ( ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> + verify ( it - > second ) ; <nl> + } <nl> + <nl> + / / Check endpoints <nl> + verify ( allOfType ( MinKey , _ranges . begin ( ) - > second - > getMin ( ) ) ) ; <nl> + verify ( allOfType ( MaxKey , boost : : prior ( _ranges . end ( ) ) - > second - > getMax ( ) ) ) ; <nl> + <nl> + / / Make sure there are no gaps or overlaps <nl> + for ( ChunkRangeMap : : const_iterator it = boost : : next ( _ranges . begin ( ) ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> + ChunkRangeMap : : const_iterator last = boost : : prior ( it ) ; <nl> + verify ( it - > second - > getMin ( ) = = last - > second - > getMax ( ) ) ; <nl> + } <nl> + <nl> + / / Check Map keys <nl> + for ( ChunkRangeMap : : const_iterator it = _ranges . begin ( ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> + verify ( it - > first = = it - > second - > getMax ( ) ) ; <nl> + } <nl> + <nl> + / / Make sure we match the original chunks <nl> + const ChunkMap chunks = _ranges . begin ( ) - > second - > getManager ( ) - > _chunkMap ; <nl> + for ( ChunkMap : : const_iterator i = chunks . begin ( ) ; i ! = chunks . end ( ) ; + + i ) { <nl> + const ChunkPtr chunk = i - > second ; <nl> + <nl> + ChunkRangeMap : : const_iterator min = _ranges . upper_bound ( chunk - > getMin ( ) ) ; <nl> + ChunkRangeMap : : const_iterator max = _ranges . lower_bound ( chunk - > getMax ( ) ) ; <nl> + <nl> + verify ( min ! = _ranges . end ( ) ) ; <nl> + verify ( max ! = _ranges . end ( ) ) ; <nl> + verify ( min = = max ) ; <nl> + verify ( min - > second - > getShard ( ) = = chunk - > getShard ( ) ) ; <nl> + verify ( min - > second - > containsKey ( chunk - > getMin ( ) ) ) ; <nl> + verify ( min - > second - > containsKey ( chunk - > getMax ( ) ) | | ( min - > second - > getMax ( ) = = chunk - > getMax ( ) ) ) ; <nl> + } <nl> + <nl> + } <nl> + catch ( . . . ) { <nl> + error ( ) < < " \ t invalid ChunkRangeMap ! printing ranges : " ; <nl> + <nl> + for ( ChunkRangeMap : : const_iterator it = _ranges . begin ( ) , end = _ranges . end ( ) ; it ! = end ; + + it ) { <nl> + log ( ) < < it - > first < < " : " < < it - > second - > toString ( ) ; <nl> + } <nl> + <nl> + throw ; <nl> + } <nl> + } <nl> + <nl> + void ChunkRangeManager : : reloadAll ( const ChunkMap & chunks ) { <nl> + _ranges . clear ( ) ; <nl> + _insertRange ( chunks . begin ( ) , chunks . end ( ) ) ; <nl> + <nl> + DEV assertValid ( ) ; <nl> + } <nl> + <nl> + void ChunkRangeManager : : _insertRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) { <nl> + while ( begin ! = end ) { <nl> + ChunkMap : : const_iterator first = begin ; <nl> + Shard shard = first - > second - > getShard ( ) ; <nl> + while ( begin ! = end & & ( begin - > second - > getShard ( ) = = shard ) ) <nl> + + + begin ; <nl> + <nl> + shared_ptr < ChunkRange > cr ( new ChunkRange ( first , begin ) ) ; <nl> + _ranges [ cr - > getMax ( ) ] = cr ; <nl> + } <nl> + } <nl> + <nl> + int ChunkManager : : getCurrentDesiredChunkSize ( ) const { <nl> + / / split faster in early chunks helps spread out an initial load better <nl> + const int minChunkSize = 1 < < 20 ; / / 1 MBytes <nl> + <nl> + int splitThreshold = Chunk : : MaxChunkSize ; <nl> + <nl> + int nc = numChunks ( ) ; <nl> + <nl> + if ( nc < = 1 ) { <nl> + return 1024 ; <nl> + } <nl> + else if ( nc < 3 ) { <nl> + return minChunkSize / 2 ; <nl> + } <nl> + else if ( nc < 10 ) { <nl> + splitThreshold = max ( splitThreshold / 4 , minChunkSize ) ; <nl> + } <nl> + else if ( nc < 20 ) { <nl> + splitThreshold = max ( splitThreshold / 2 , minChunkSize ) ; <nl> + } <nl> + <nl> + return splitThreshold ; <nl> + } <nl> + <nl> + } / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . c15fb7dbaa13 <nl> mmm / dev / null <nl> ppp b / src / mongo / s / chunk_manager . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2014 MongoDB Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the GNU Affero General Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < boost / next_prior . hpp > <nl> + # include < boost / shared_ptr . hpp > <nl> + # include < map > <nl> + # include < string > <nl> + # include < vector > <nl> + <nl> + # include " mongo / s / chunk . h " <nl> + <nl> + namespace mongo { <nl> + <nl> + class CanonicalQuery ; <nl> + class ChunkManager ; <nl> + struct QuerySolutionNode ; <nl> + <nl> + typedef boost : : shared_ptr < ChunkManager > ChunkManagerPtr ; <nl> + <nl> + / / The key for the map is max for each Chunk or ChunkRange <nl> + typedef std : : map < BSONObj , boost : : shared_ptr < const Chunk > , BSONObjCmp > ChunkMap ; <nl> + <nl> + <nl> + class ChunkRange { <nl> + public : <nl> + ChunkRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) ; <nl> + <nl> + / / Merge min and max ( must be adjacent ranges ) <nl> + ChunkRange ( const ChunkRange & min , const ChunkRange & max ) ; <nl> + <nl> + const ChunkManager * getManager ( ) const { return _manager ; } <nl> + Shard getShard ( ) const { return _shard ; } <nl> + <nl> + const BSONObj & getMin ( ) const { return _min ; } <nl> + const BSONObj & getMax ( ) const { return _max ; } <nl> + <nl> + / / clones of Chunk methods <nl> + / / Returns true if this ChunkRange contains the given shard key , and false otherwise <nl> + / / <nl> + / / Note : this function takes an extracted * key * , not an original document <nl> + / / ( the point may be computed by , say , hashing a given field or projecting <nl> + / / to a subset of fields ) . <nl> + bool containsKey ( const BSONObj & shardKey ) const ; <nl> + <nl> + std : : string toString ( ) const ; <nl> + <nl> + private : <nl> + const ChunkManager * _manager ; <nl> + const Shard _shard ; <nl> + const BSONObj _min ; <nl> + const BSONObj _max ; <nl> + } ; <nl> + <nl> + typedef std : : map < BSONObj , boost : : shared_ptr < ChunkRange > , BSONObjCmp > ChunkRangeMap ; <nl> + <nl> + <nl> + class ChunkRangeManager { <nl> + public : <nl> + const ChunkRangeMap & ranges ( ) const { return _ranges ; } <nl> + <nl> + void clear ( ) { _ranges . clear ( ) ; } <nl> + <nl> + void reloadAll ( const ChunkMap & chunks ) ; <nl> + <nl> + / / Slow operation - - wrap with DEV <nl> + void assertValid ( ) const ; <nl> + <nl> + ChunkRangeMap : : const_iterator upper_bound ( const BSONObj & o ) const { return _ranges . upper_bound ( o ) ; } <nl> + ChunkRangeMap : : const_iterator lower_bound ( const BSONObj & o ) const { return _ranges . lower_bound ( o ) ; } <nl> + <nl> + private : <nl> + / / assumes nothing in this range exists in _ranges <nl> + void _insertRange ( ChunkMap : : const_iterator begin , const ChunkMap : : const_iterator end ) ; <nl> + <nl> + ChunkRangeMap _ranges ; <nl> + } ; <nl> + <nl> + <nl> + / * config . sharding <nl> + { ns : ' alleyinsider . fs . chunks ' , <nl> + key : { ts : 1 } , <nl> + shards : [ { min : 1 , max : 100 , server : a } , { min : 101 , max : 200 , server : b } ] <nl> + } <nl> + * / <nl> + class ChunkManager { <nl> + public : <nl> + typedef std : : map < std : : string , ChunkVersion > ShardVersionMap ; <nl> + <nl> + / / Loads a new chunk manager from a collection document <nl> + ChunkManager ( const BSONObj & collDoc ) ; <nl> + <nl> + / / Creates an empty chunk manager for the namespace <nl> + ChunkManager ( const std : : string & ns , const ShardKeyPattern & pattern , bool unique ) ; <nl> + <nl> + std : : string getns ( ) const { return _ns ; } <nl> + <nl> + const ShardKeyPattern & getShardKeyPattern ( ) const { return _keyPattern ; } <nl> + <nl> + bool isUnique ( ) const { return _unique ; } <nl> + <nl> + / * * <nl> + * this is just an increasing number of how many ChunkManagers we have so we know if something has been updated <nl> + * / <nl> + unsigned long long getSequenceNumber ( ) const { return _sequenceNumber ; } <nl> + <nl> + / / <nl> + / / After constructor is invoked , we need to call loadExistingRanges . If this is a new <nl> + / / sharded collection , we can call createFirstChunks first . <nl> + / / <nl> + <nl> + / / Creates new chunks based on info in chunk manager <nl> + void createFirstChunks ( const std : : string & config , <nl> + const Shard & primary , <nl> + const std : : vector < BSONObj > * initPoints , <nl> + const std : : vector < Shard > * initShards ) ; <nl> + <nl> + / / Loads existing ranges based on info in chunk manager <nl> + void loadExistingRanges ( const std : : string & config , const ChunkManager * oldManager ) ; <nl> + <nl> + <nl> + / / Helpers for load <nl> + void calcInitSplitsAndShards ( const Shard & primary , <nl> + const std : : vector < BSONObj > * initPoints , <nl> + const std : : vector < Shard > * initShards , <nl> + std : : vector < BSONObj > * splitPoints , <nl> + std : : vector < Shard > * shards ) const ; <nl> + <nl> + / / <nl> + / / Methods to use once loaded / created <nl> + / / <nl> + <nl> + int numChunks ( ) const { return _chunkMap . size ( ) ; } <nl> + <nl> + / * * <nl> + * Given a key that has been extracted from a document , returns the <nl> + * chunk that contains that key . <nl> + * <nl> + * For instance , to locate the chunk for document { a : " foo " , b : " bar " } <nl> + * when the shard key is { a : " hashed " } , you can call <nl> + * findIntersectingChunk ( ) on { a : hash ( " foo " ) } <nl> + * / <nl> + ChunkPtr findIntersectingChunk ( const BSONObj & shardKey ) const ; <nl> + <nl> + void getShardsForQuery ( std : : set < Shard > & shards , const BSONObj & query ) const ; <nl> + void getAllShards ( std : : set < Shard > & all ) const ; <nl> + / * * @ param shards set to the shards covered by the interval [ min , max ] , see SERVER - 4791 * / <nl> + void getShardsForRange ( std : : set < Shard > & shards , const BSONObj & min , const BSONObj & max ) const ; <nl> + <nl> + / / Transforms query into bounds for each field in the shard key <nl> + / / for example : <nl> + / / Key { a : 1 , b : 1 } , <nl> + / / Query { a : { $ gte : 1 , $ lt : 2 } , <nl> + / / b : { $ gte : 3 , $ lt : 4 } } <nl> + / / = > Bounds { a : [ 1 , 2 ) , b : [ 3 , 4 ) } <nl> + static IndexBounds getIndexBoundsForQuery ( const BSONObj & key , const CanonicalQuery * canonicalQuery ) ; <nl> + <nl> + / / Collapse query solution tree . <nl> + / / <nl> + / / If it has OR node , the result could be a superset of the index bounds generated . <nl> + / / Since to give a single IndexBounds , this gives the union of bounds on each field . <nl> + / / for example : <nl> + / / OR : { a : ( 0 , 1 ) , b : ( 0 , 1 ) } , <nl> + / / { a : ( 2 , 3 ) , b : ( 2 , 3 ) } <nl> + / / = > { a : ( 0 , 1 ) , ( 2 , 3 ) , b : ( 0 , 1 ) , ( 2 , 3 ) } <nl> + static IndexBounds collapseQuerySolution ( const QuerySolutionNode * node ) ; <nl> + <nl> + const ChunkMap & getChunkMap ( ) const { return _chunkMap ; } <nl> + <nl> + / * * <nl> + * Returns true if , for this shard , the chunks are identical in both chunk managers <nl> + * / <nl> + bool compatibleWith ( const ChunkManager & other , const std : : string & shard ) const ; <nl> + <nl> + std : : string toString ( ) const ; <nl> + <nl> + ChunkVersion getVersion ( const std : : string & shardName ) const ; <nl> + ChunkVersion getVersion ( ) const ; <nl> + <nl> + void getInfo ( BSONObjBuilder & b ) const ; <nl> + <nl> + void drop ( ) const ; <nl> + <nl> + void _printChunks ( ) const ; <nl> + <nl> + int getCurrentDesiredChunkSize ( ) const ; <nl> + <nl> + ChunkManagerPtr reload ( bool force = true ) const ; / / doesn ' t modify self ! <nl> + <nl> + void markMinorForReload ( ChunkVersion majorVersion ) const ; <nl> + void getMarkedMinorVersions ( std : : set < ChunkVersion > & minorVersions ) const ; <nl> + <nl> + private : <nl> + <nl> + / / helpers for loading <nl> + <nl> + / / returns true if load was consistent <nl> + bool _load ( const std : : string & config , <nl> + ChunkMap & chunks , <nl> + std : : set < Shard > & shards , <nl> + ShardVersionMap & shardVersions , <nl> + const ChunkManager * oldManager ) ; <nl> + static bool _isValid ( const ChunkMap & chunks ) ; <nl> + <nl> + / / end helpers <nl> + <nl> + / / All members should be const for thread - safety <nl> + const std : : string _ns ; <nl> + const ShardKeyPattern _keyPattern ; <nl> + const bool _unique ; <nl> + <nl> + const ChunkMap _chunkMap ; <nl> + const ChunkRangeManager _chunkRanges ; <nl> + <nl> + const std : : set < Shard > _shards ; <nl> + <nl> + const ShardVersionMap _shardVersions ; / / max version per shard <nl> + <nl> + / / max version of any chunk <nl> + ChunkVersion _version ; <nl> + <nl> + mutable mutex _mutex ; / / only used with _nsLock <nl> + <nl> + const unsigned long long _sequenceNumber ; <nl> + <nl> + / / <nl> + / / Split Heuristic info <nl> + / / <nl> + <nl> + <nl> + class SplitHeuristics { <nl> + public : <nl> + <nl> + SplitHeuristics ( ) <nl> + : _splitTickets ( maxParallelSplits ) { <nl> + } <nl> + <nl> + TicketHolder _splitTickets ; <nl> + <nl> + / / Test whether we should split once data * splitTestFactor > chunkSize ( approximately ) <nl> + static const int splitTestFactor = 5 ; <nl> + / / Maximum number of parallel threads requesting a split <nl> + static const int maxParallelSplits = 5 ; <nl> + <nl> + / / The idea here is that we ' re over - aggressive on split testing by a factor of <nl> + / / splitTestFactor , so we can safely wait until we get to splitTestFactor invalid splits <nl> + / / before changing . Unfortunately , we also potentially over - request the splits by a <nl> + / / factor of maxParallelSplits , but since the factors are identical it works out <nl> + / / ( for now ) for parallel or sequential oversplitting . <nl> + / / TODO : Make splitting a separate thread with notifications ? <nl> + static const int staleMinorReloadThreshold = maxParallelSplits ; <nl> + } ; <nl> + <nl> + mutable SplitHeuristics _splitHeuristics ; <nl> + <nl> + / / <nl> + / / End split heuristics <nl> + / / <nl> + <nl> + friend class Chunk ; <nl> + friend class ChunkRangeManager ; / / only needed for CRM : : assertValid ( ) <nl> + static AtomicUInt32 NextSequenceNumber ; <nl> + <nl> + friend class TestableChunkManager ; <nl> + } ; <nl> + <nl> + } / / namespace mongo <nl> mmm a / src / mongo / s / chunk_manager_targeter . cpp <nl> ppp b / src / mongo / s / chunk_manager_targeter . cpp <nl> <nl> <nl> # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kSharding <nl> <nl> + # include " mongo / platform / basic . h " <nl> + <nl> # include " mongo / s / chunk_manager_targeter . h " <nl> <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / grid . h " <nl> # include " mongo / util / log . h " <nl> <nl> <nl> namespace mongo { <nl> <nl> - using std : : endl ; <nl> using std : : map ; <nl> using std : : set ; <nl> using std : : string ; <nl> namespace mongo { <nl> <nl> using mongoutils : : str : : stream ; <nl> <nl> + namespace { <nl> + <nl> + enum UpdateType { <nl> + UpdateType_Replacement , UpdateType_OpStyle , UpdateType_Unknown <nl> + } ; <nl> + <nl> + enum CompareResult { <nl> + CompareResult_Unknown , CompareResult_GTE , CompareResult_LT <nl> + } ; <nl> + <nl> + const ShardKeyPattern virtualIdShardKey ( BSON ( " _id " < < 1 ) ) ; <nl> + <nl> + / / To match legacy reload behavior , we have to backoff on config reload per - thread <nl> + / / TODO : Centralize this behavior better by refactoring config reload in mongos <nl> + boost : : thread_specific_ptr < Backoff > perThreadBackoff ; <nl> + const int maxWaitMillis = 500 ; <nl> + <nl> / * * <nl> * Helper to get the DBConfigPtr object in an exception - safe way . <nl> * / <nl> - static bool getDBConfigSafe ( StringData db , DBConfigPtr & config , string * errMsg ) { <nl> + bool getDBConfigSafe ( StringData db , DBConfigPtr & config , string * errMsg ) { <nl> try { <nl> - config = grid . getDBConfig ( db , true ) ; <nl> - if ( ! config ) * errMsg = stream ( ) < < " could not load or create database " < < db ; <nl> + config = grid . getDBConfig ( db , true ) ; <nl> + if ( config ) { <nl> + return true ; <nl> + } <nl> + <nl> + * errMsg = stream ( ) < < " could not load or create database " < < db ; <nl> } <nl> - catch ( const DBException & ex ) { <nl> + catch ( const DBException & ex ) { <nl> * errMsg = ex . toString ( ) ; <nl> } <nl> <nl> - return config . get ( ) ; <nl> + return false ; <nl> + } <nl> + <nl> + / * * <nl> + * There are two styles of update expressions : <nl> + * <nl> + * Replacement style : coll . update ( { x : 1 } , { y : 2 } ) <nl> + * OpStyle : coll . update ( { x : 1 } , { $ set : { y : 2 } } ) <nl> + * / <nl> + UpdateType getUpdateExprType ( const BSONObj & updateExpr ) { <nl> + / / Empty update is replacement - style , by default <nl> + if ( updateExpr . isEmpty ( ) ) { <nl> + return UpdateType_Replacement ; <nl> + } <nl> + <nl> + UpdateType updateType = UpdateType_Unknown ; <nl> + <nl> + BSONObjIterator it ( updateExpr ) ; <nl> + while ( it . more ( ) ) { <nl> + BSONElement next = it . next ( ) ; <nl> + <nl> + if ( next . fieldName ( ) [ 0 ] = = ' $ ' ) { <nl> + if ( updateType = = UpdateType_Unknown ) { <nl> + updateType = UpdateType_OpStyle ; <nl> + } <nl> + else if ( updateType = = UpdateType_Replacement ) { <nl> + return UpdateType_Unknown ; <nl> + } <nl> + } <nl> + else { <nl> + if ( updateType = = UpdateType_Unknown ) { <nl> + updateType = UpdateType_Replacement ; <nl> + } <nl> + else if ( updateType = = UpdateType_OpStyle ) { <nl> + return UpdateType_Unknown ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + return updateType ; <nl> + } <nl> + <nl> + / * * <nl> + * This returns " does the query have an _id field " and " is the _id field querying for a direct <nl> + * value like _id : 3 and not _id : { $ gt : 3 } " <nl> + * <nl> + * Ex : { _id : 1 } = > true <nl> + * { foo : < anything > , _id : 1 } = > true <nl> + * { _id : { $ lt : 30 } } = > false <nl> + * { foo : < anything > } = > false <nl> + * / <nl> + bool isExactIdQuery ( const BSONObj & query ) { <nl> + StatusWith < BSONObj > status = virtualIdShardKey . extractShardKeyFromQuery ( query ) ; <nl> + if ( ! status . isOK ( ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + return ! status . getValue ( ) [ " _id " ] . eoo ( ) ; <nl> + } <nl> + <nl> + void refreshBackoff ( ) { <nl> + if ( ! perThreadBackoff . get ( ) ) { <nl> + perThreadBackoff . reset ( new Backoff ( maxWaitMillis , maxWaitMillis * 2 ) ) ; <nl> + } <nl> + <nl> + perThreadBackoff . get ( ) - > nextSleepMillis ( ) ; <nl> + } <nl> + <nl> + <nl> + / / <nl> + / / Utilities to compare shard versions <nl> + / / <nl> + <nl> + / * * <nl> + * Returns the relationship of two shard versions . Shard versions of a collection that has not <nl> + * been dropped and recreated and where there is at least one chunk on a shard are comparable , <nl> + * otherwise the result is ambiguous . <nl> + * / <nl> + CompareResult compareShardVersions ( const ChunkVersion & shardVersionA , <nl> + const ChunkVersion & shardVersionB ) { <nl> + <nl> + / / Collection may have been dropped <nl> + if ( ! shardVersionA . hasEqualEpoch ( shardVersionB ) ) { <nl> + return CompareResult_Unknown ; <nl> + } <nl> + <nl> + / / Zero shard versions are only comparable to themselves <nl> + if ( ! shardVersionA . isSet ( ) | | ! shardVersionB . isSet ( ) ) { <nl> + / / If both are zero . . . <nl> + if ( ! shardVersionA . isSet ( ) & & ! shardVersionB . isSet ( ) ) { <nl> + return CompareResult_GTE ; <nl> + } <nl> + <nl> + return CompareResult_Unknown ; <nl> + } <nl> + <nl> + if ( shardVersionA < shardVersionB ) { <nl> + return CompareResult_LT ; <nl> + } <nl> + <nl> + else return CompareResult_GTE ; <nl> + } <nl> + <nl> + ChunkVersion getShardVersion ( StringData shardName , <nl> + const ChunkManager * manager , <nl> + const Shard * primary ) { <nl> + <nl> + dassert ( ! ( manager & & primary ) ) ; <nl> + dassert ( manager | | primary ) ; <nl> + <nl> + if ( primary ) { <nl> + return ChunkVersion : : UNSHARDED ( ) ; <nl> + } <nl> + <nl> + return manager - > getVersion ( shardName . toString ( ) ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Returns the relationship between two maps of shard versions . As above , these maps are often <nl> + * comparable when the collection has not been dropped and there is at least one chunk on the <nl> + * shards . If any versions in the maps are not comparable , the result is _Unknown . <nl> + * <nl> + * If any versions in the first map ( cached ) are _LT the versions in the second map ( remote ) , <nl> + * the first ( cached ) versions are _LT the second ( remote ) versions . <nl> + * <nl> + * Note that the signature here is weird since our cached map of chunk versions is stored in a <nl> + * ChunkManager or is implicit in the primary shard of the collection . <nl> + * / <nl> + CompareResult compareAllShardVersions ( const ChunkManager * cachedChunkManager , <nl> + const Shard * cachedPrimary , <nl> + const map < string , ChunkVersion > & remoteShardVersions ) { <nl> + <nl> + CompareResult finalResult = CompareResult_GTE ; <nl> + <nl> + for ( map < string , ChunkVersion > : : const_iterator it = remoteShardVersions . begin ( ) ; <nl> + it ! = remoteShardVersions . end ( ) ; <nl> + + + it ) { <nl> + <nl> + / / Get the remote and cached version for the next shard <nl> + const string & shardName = it - > first ; <nl> + const ChunkVersion & remoteShardVersion = it - > second ; <nl> + <nl> + ChunkVersion cachedShardVersion ; <nl> + <nl> + try { <nl> + / / Throws b / c shard constructor throws <nl> + cachedShardVersion = getShardVersion ( shardName , <nl> + cachedChunkManager , <nl> + cachedPrimary ) ; <nl> + } <nl> + catch ( const DBException & ex ) { <nl> + warning ( ) < < " could not lookup shard " < < shardName <nl> + < < " in local cache , shard metadata may have changed " <nl> + < < " or be unavailable " < < causedBy ( ex ) ; <nl> + <nl> + return CompareResult_Unknown ; <nl> + } <nl> + <nl> + / / Compare the remote and cached versions <nl> + CompareResult result = compareShardVersions ( cachedShardVersion , remoteShardVersion ) ; <nl> + <nl> + if ( result = = CompareResult_Unknown ) return result ; <nl> + if ( result = = CompareResult_LT ) finalResult = CompareResult_LT ; <nl> + <nl> + / / Note that we keep going after _LT b / c there could be more _Unknowns . <nl> + } <nl> + <nl> + return finalResult ; <nl> + } <nl> + <nl> + / * * <nl> + * Whether or not the manager / primary pair is different from the other manager / primary pair . <nl> + * / <nl> + bool isMetadataDifferent ( const ChunkManagerPtr & managerA , <nl> + const ShardPtr & primaryA , <nl> + const ChunkManagerPtr & managerB , <nl> + const ShardPtr & primaryB ) { <nl> + <nl> + if ( ( managerA & & ! managerB ) | | ( ! managerA & & managerB ) | | ( primaryA & & ! primaryB ) | | ( ! primaryA & & primaryB ) ) return true ; <nl> + <nl> + if ( managerA ) { <nl> + return ! managerA - > getVersion ( ) . isStrictlyEqualTo ( managerB - > getVersion ( ) ) ; <nl> + } <nl> + <nl> + dassert ( NULL ! = primaryA . get ( ) ) ; <nl> + return primaryA - > getName ( ) ! = primaryB - > getName ( ) ; <nl> } <nl> <nl> + / * * <nl> + * Whether or not the manager / primary pair was changed or refreshed from a previous version <nl> + * of the metadata . <nl> + * / <nl> + bool wasMetadataRefreshed ( const ChunkManagerPtr & managerA , <nl> + const ShardPtr & primaryA , <nl> + const ChunkManagerPtr & managerB , <nl> + const ShardPtr & primaryB ) { <nl> + <nl> + if ( isMetadataDifferent ( managerA , primaryA , managerB , primaryB ) ) <nl> + return true ; <nl> + <nl> + if ( managerA ) { <nl> + dassert ( managerB . get ( ) ) ; / / otherwise metadata would be different <nl> + return managerA - > getSequenceNumber ( ) ! = managerB - > getSequenceNumber ( ) ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> + } / / namespace <nl> + <nl> ChunkManagerTargeter : : ChunkManagerTargeter ( const NamespaceString & nss ) <nl> : _nss ( nss ) , <nl> _needsTargetingRefresh ( false ) { <nl> namespace mongo { <nl> DBConfigPtr config ; <nl> <nl> string errMsg ; <nl> - if ( ! getDBConfigSafe ( _nss . db ( ) , config , & errMsg ) ) { <nl> - return Status ( ErrorCodes : : DatabaseNotFound , errMsg ) ; <nl> + if ( ! getDBConfigSafe ( _nss . db ( ) , config , & errMsg ) ) { <nl> + return Status ( ErrorCodes : : DatabaseNotFound , errMsg ) ; <nl> } <nl> <nl> / / Get either the chunk manager or primary shard <nl> - config - > getChunkManagerOrPrimary ( _nss . ns ( ) , _manager , _primary ) ; <nl> + config - > getChunkManagerOrPrimary ( _nss . ns ( ) , _manager , _primary ) ; <nl> <nl> return Status : : OK ( ) ; <nl> } <nl> namespace mongo { <nl> } <nl> } <nl> <nl> - namespace { <nl> - <nl> - / / TODO : Expose these for unit testing via dbtests <nl> - <nl> - enum UpdateType { <nl> - UpdateType_Replacement , UpdateType_OpStyle , UpdateType_Unknown <nl> - } ; <nl> - <nl> - / * * <nl> - * There are two styles of update expressions : <nl> - * coll . update ( { x : 1 } , { y : 2 } ) / / Replacement style <nl> - * coll . update ( { x : 1 } , { $ set : { y : 2 } } ) / / OpStyle <nl> - * / <nl> - UpdateType getUpdateExprType ( const BSONObj & updateExpr ) { <nl> - <nl> - UpdateType updateType = UpdateType_Unknown ; <nl> - <nl> - / / Empty update is replacement - style , by default <nl> - if ( updateExpr . isEmpty ( ) ) return UpdateType_Replacement ; <nl> - <nl> - BSONObjIterator it ( updateExpr ) ; <nl> - while ( it . more ( ) ) { <nl> - BSONElement next = it . next ( ) ; <nl> - <nl> - if ( next . fieldName ( ) [ 0 ] = = ' $ ' ) { <nl> - if ( updateType = = UpdateType_Unknown ) { <nl> - updateType = UpdateType_OpStyle ; <nl> - } <nl> - else if ( updateType = = UpdateType_Replacement ) { <nl> - return UpdateType_Unknown ; <nl> - } <nl> - } <nl> - else { <nl> - if ( updateType = = UpdateType_Unknown ) { <nl> - updateType = UpdateType_Replacement ; <nl> - } <nl> - else if ( updateType = = UpdateType_OpStyle ) { <nl> - return UpdateType_Unknown ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - return updateType ; <nl> - } <nl> - <nl> - / * * <nl> - * This returns " does the query have an _id field " and " is the _id field <nl> - * querying for a direct value like _id : 3 and not _id : { $ gt : 3 } " <nl> - * <nl> - * Ex : { _id : 1 } = > true <nl> - * { foo : < anything > , _id : 1 } = > true <nl> - * { _id : { $ lt : 30 } } = > false <nl> - * { foo : < anything > } = > false <nl> - * / <nl> - bool isExactIdQuery ( const BSONObj & query ) { <nl> - static const ShardKeyPattern virtualIdShardKey ( BSON ( " _id " < < 1 ) ) ; <nl> - StatusWith < BSONObj > status = virtualIdShardKey . extractShardKeyFromQuery ( query ) ; <nl> - if ( ! status . isOK ( ) ) <nl> - return false ; <nl> - return ! status . getValue ( ) [ " _id " ] . eoo ( ) ; <nl> - } <nl> - } <nl> - <nl> Status ChunkManagerTargeter : : targetUpdate ( const BatchedUpdateDocument & updateDoc , <nl> vector < ShardEndpoint * > * endpoints ) const { <nl> <nl> namespace mongo { <nl> vector < ShardEndpoint * > * endpoints ) const { <nl> <nl> if ( ! _primary & & ! _manager ) { <nl> - return Status ( ErrorCodes : : NamespaceNotFound , <nl> - str : : stream ( ) < < " could not target query in " <nl> - < < getNS ( ) . ns ( ) <nl> - < < " ; no metadata found " ) ; <nl> + return Status ( ErrorCodes : : NamespaceNotFound , <nl> + stream ( ) < < " could not target query in " <nl> + < < getNS ( ) . ns ( ) < < " ; no metadata found " ) ; <nl> } <nl> <nl> set < Shard > shards ; <nl> namespace mongo { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - namespace { <nl> - <nl> - / / <nl> - / / Utilities to compare shard versions <nl> - / / <nl> - <nl> - enum CompareResult { <nl> - CompareResult_Unknown , CompareResult_GTE , CompareResult_LT <nl> - } ; <nl> - <nl> - / * * <nl> - * Returns the relationship of two shard versions . Shard versions of a collection that has <nl> - * not been dropped and recreated and where there is at least one chunk on a shard are <nl> - * comparable , otherwise the result is ambiguous . <nl> - * / <nl> - CompareResult compareShardVersions ( const ChunkVersion & shardVersionA , <nl> - const ChunkVersion & shardVersionB ) { <nl> - <nl> - / / Collection may have been dropped <nl> - if ( ! shardVersionA . hasEqualEpoch ( shardVersionB ) ) return CompareResult_Unknown ; <nl> - <nl> - / / Zero shard versions are only comparable to themselves <nl> - if ( ! shardVersionA . isSet ( ) | | ! shardVersionB . isSet ( ) ) { <nl> - / / If both are zero . . . <nl> - if ( ! shardVersionA . isSet ( ) & & ! shardVersionB . isSet ( ) ) return CompareResult_GTE ; <nl> - / / Otherwise . . . <nl> - return CompareResult_Unknown ; <nl> - } <nl> - <nl> - if ( shardVersionA < shardVersionB ) return CompareResult_LT ; <nl> - else return CompareResult_GTE ; <nl> - } <nl> - <nl> - ChunkVersion getShardVersion ( StringData shardName , <nl> - const ChunkManagerPtr & manager , <nl> - const ShardPtr & primary ) { <nl> - <nl> - dassert ( ! ( manager & & primary ) ) ; <nl> - dassert ( manager | | primary ) ; <nl> - <nl> - if ( primary ) return ChunkVersion : : UNSHARDED ( ) ; <nl> - <nl> - return manager - > getVersion ( shardName . toString ( ) ) ; <nl> - } <nl> - <nl> - / * * <nl> - * Returns the relationship between two maps of shard versions . As above , these maps are <nl> - * often comparable when the collection has not been dropped and there is at least one <nl> - * chunk on the shards . <nl> - * <nl> - * If any versions in the maps are not comparable , the result is _Unknown . <nl> - * <nl> - * If any versions in the first map ( cached ) are _LT the versions in the second map <nl> - * ( remote ) , the first ( cached ) versions are _LT the second ( remote ) versions . <nl> - * <nl> - * Note that the signature here is weird since our cached map of chunk versions is <nl> - * stored in a ChunkManager or is implicit in the primary shard of the collection . <nl> - * / <nl> - CompareResult / / <nl> - compareAllShardVersions ( const ChunkManagerPtr & cachedShardVersions , <nl> - const ShardPtr & cachedPrimary , <nl> - const map < string , ChunkVersion > & remoteShardVersions ) { <nl> - <nl> - CompareResult finalResult = CompareResult_GTE ; <nl> - <nl> - for ( map < string , ChunkVersion > : : const_iterator it = remoteShardVersions . begin ( ) ; <nl> - it ! = remoteShardVersions . end ( ) ; + + it ) { <nl> - <nl> - / / <nl> - / / Get the remote and cached version for the next shard <nl> - / / <nl> - <nl> - const string & shardName = it - > first ; <nl> - const ChunkVersion & remoteShardVersion = it - > second ; <nl> - ChunkVersion cachedShardVersion ; <nl> - <nl> - try { <nl> - / / Throws b / c shard constructor throws <nl> - cachedShardVersion = getShardVersion ( shardName , <nl> - cachedShardVersions , <nl> - cachedPrimary ) ; <nl> - } <nl> - catch ( const DBException & ex ) { <nl> - <nl> - warning ( ) < < " could not lookup shard " < < shardName <nl> - < < " in local cache , shard metadata may have changed " <nl> - < < " or be unavailable " < < causedBy ( ex ) < < endl ; <nl> - <nl> - return CompareResult_Unknown ; <nl> - } <nl> - <nl> - / / <nl> - / / Compare the remote and cached versions <nl> - / / <nl> - <nl> - CompareResult result = compareShardVersions ( cachedShardVersion , <nl> - remoteShardVersion ) ; <nl> - <nl> - if ( result = = CompareResult_Unknown ) return result ; <nl> - if ( result = = CompareResult_LT ) finalResult = CompareResult_LT ; <nl> - <nl> - / / Note that we keep going after _LT b / c there could be more _Unknowns . <nl> - } <nl> - <nl> - return finalResult ; <nl> - } <nl> - <nl> - / * * <nl> - * Whether or not the manager / primary pair is different from the other manager / primary pair <nl> - * / <nl> - bool isMetadataDifferent ( const ChunkManagerPtr & managerA , <nl> - const ShardPtr & primaryA , <nl> - const ChunkManagerPtr & managerB , <nl> - const ShardPtr & primaryB ) { <nl> - <nl> - if ( ( managerA & & ! managerB ) | | ( ! managerA & & managerB ) | | ( primaryA & & ! primaryB ) <nl> - | | ( ! primaryA & & primaryB ) ) return true ; <nl> - <nl> - if ( managerA ) { <nl> - return ! managerA - > getVersion ( ) . isStrictlyEqualTo ( managerB - > getVersion ( ) ) ; <nl> - } <nl> - <nl> - dassert ( NULL ! = primaryA . get ( ) ) ; <nl> - return primaryA - > getName ( ) ! = primaryB - > getName ( ) ; <nl> - } <nl> - <nl> - / * * <nl> - * Whether or not the manager / primary pair was changed or refreshed from a previous version <nl> - * of the metadata . <nl> - * / <nl> - bool wasMetadataRefreshed ( const ChunkManagerPtr & managerA , <nl> - const ShardPtr & primaryA , <nl> - const ChunkManagerPtr & managerB , <nl> - const ShardPtr & primaryB ) { <nl> - <nl> - if ( isMetadataDifferent ( managerA , primaryA , managerB , primaryB ) ) <nl> - return true ; <nl> - <nl> - if ( managerA ) { <nl> - dassert ( managerB . get ( ) ) ; / / otherwise metadata would be different <nl> - return managerA - > getSequenceNumber ( ) ! = managerB - > getSequenceNumber ( ) ; <nl> - } <nl> - <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> void ChunkManagerTargeter : : noteStaleResponse ( const ShardEndpoint & endpoint , <nl> const BSONObj & staleInfo ) { <nl> dassert ( ! _needsTargetingRefresh ) ; <nl> namespace mongo { <nl> if ( staleInfo [ " vWanted " ] . eoo ( ) ) { <nl> / / If we don ' t have a vWanted sent , assume the version is higher than our current <nl> / / version . <nl> - remoteShardVersion = getShardVersion ( endpoint . shardName , _manager , _primary ) ; <nl> + remoteShardVersion = <nl> + getShardVersion ( endpoint . shardName , _manager . get ( ) , _primary . get ( ) ) ; <nl> remoteShardVersion . incMajor ( ) ; <nl> } <nl> else { <nl> namespace mongo { <nl> / / If we got stale shard versions from remote shards , we may need to refresh <nl> / / NOTE : Not sure yet if this can happen simultaneously with targeting issues <nl> <nl> - CompareResult result = compareAllShardVersions ( _manager , <nl> - _primary , <nl> - _remoteShardVersions ) ; <nl> + CompareResult result = compareAllShardVersions ( _manager . get ( ) , <nl> + _primary . get ( ) , <nl> + _remoteShardVersions ) ; <nl> / / Reset the versions <nl> _remoteShardVersions . clear ( ) ; <nl> <nl> namespace mongo { <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> - / / To match legacy reload behavior , we have to backoff on config reload per - thread <nl> - / / TODO : Centralize this behavior better by refactoring config reload in mongos <nl> - static const int maxWaitMillis = 500 ; <nl> - static boost : : thread_specific_ptr < Backoff > perThreadBackoff ; <nl> - <nl> - static void refreshBackoff ( ) { <nl> - if ( ! perThreadBackoff . get ( ) ) <nl> - perThreadBackoff . reset ( new Backoff ( maxWaitMillis , maxWaitMillis * 2 ) ) ; <nl> - perThreadBackoff . get ( ) - > nextSleepMillis ( ) ; <nl> - } <nl> - <nl> Status ChunkManagerTargeter : : refreshNow ( RefreshType refreshType ) { <nl> - <nl> DBConfigPtr config ; <nl> <nl> string errMsg ; <nl> namespace mongo { <nl> catch ( const DBException & ex ) { <nl> return Status ( ErrorCodes : : UnknownError , ex . toString ( ) ) ; <nl> } <nl> + <nl> config - > getChunkManagerOrPrimary ( _nss . ns ( ) , _manager , _primary ) ; <nl> } <nl> <nl> mmm a / src / mongo / s / chunk_manager_targeter . h <nl> ppp b / src / mongo / s / chunk_manager_targeter . h <nl> <nl> <nl> # pragma once <nl> <nl> - # include < boost / scoped_ptr . hpp > <nl> + # include < boost / shared_ptr . hpp > <nl> # include < map > <nl> <nl> # include " mongo / bson / bsonobj . h " <nl> # include " mongo / db / namespace_string . h " <nl> - # include " mongo / s / chunk . h " <nl> - # include " mongo / s / shard . h " <nl> - # include " mongo / s / chunk_version . h " <nl> # include " mongo / s / ns_targeter . h " <nl> <nl> namespace mongo { <nl> <nl> + class ChunkManager ; <nl> + struct ChunkVersion ; <nl> + class Shard ; <nl> + <nl> struct TargeterStats { <nl> / / Map of chunk shard minKey - > approximate delta . This is used for deciding <nl> / / whether a chunk might need splitting or not . <nl> namespace mongo { <nl> * / <nl> class ChunkManagerTargeter : public NSTargeter { <nl> public : <nl> - <nl> ChunkManagerTargeter ( const NamespaceString & nss ) ; <nl> <nl> / * * <nl> namespace mongo { <nl> const TargeterStats * getStats ( ) const ; <nl> <nl> private : <nl> - <nl> / / Different ways we can refresh metadata <nl> enum RefreshType { <nl> / / No refresh is needed <nl> namespace mongo { <nl> <nl> / / Zero or one of these are filled at all times <nl> / / If sharded , _manager , if unsharded , _primary , on error , neither <nl> - ChunkManagerPtr _manager ; <nl> - ShardPtr _primary ; <nl> + boost : : shared_ptr < ChunkManager > _manager ; <nl> + boost : : shared_ptr < Shard > _primary ; <nl> <nl> / / Map of shard - > remote shard version reported from stale errors <nl> ShardVersionMap _remoteShardVersions ; <nl> mmm a / src / mongo / s / chunk_manager_targeter_test . cpp <nl> ppp b / src / mongo / s / chunk_manager_targeter_test . cpp <nl> <nl> <nl> # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kSharding <nl> <nl> + # include " mongo / platform / basic . h " <nl> + <nl> # include " mongo / db / json . h " <nl> # include " mongo / db / namespace_string . h " <nl> - # include " mongo / db / query / interval . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / db / query / canonical_query . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / shard_key_pattern . h " <nl> # include " mongo / unittest / unittest . h " <nl> # include " mongo / util / log . h " <nl> namespace { <nl> <nl> using std : : auto_ptr ; <nl> using std : : make_pair ; <nl> + <nl> / * * <nl> * ChunkManager targeting test <nl> * <nl> namespace { <nl> CheckBoundList ( list , expectedList ) ; <nl> } <nl> <nl> - } / / end namespace <nl> + } / / namespace <nl> mmm a / src / mongo / s / client / shard_connection . cpp <nl> ppp b / src / mongo / s / client / shard_connection . cpp <nl> <nl> <nl> # include " mongo / db / commands . h " <nl> # include " mongo / db / lasterror . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / request . h " <nl> # include " mongo / s / shard . h " <nl> # include " mongo / s / stale_exception . h " <nl> mmm a / src / mongo / s / cluster_write . cpp <nl> ppp b / src / mongo / s / cluster_write . cpp <nl> <nl> # include " mongo / base / init . h " <nl> # include " mongo / base / status . h " <nl> # include " mongo / db / write_concern_options . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / chunk_manager_targeter . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / dbclient_multi_command . h " <nl> mmm a / src / mongo / s / commands / cluster_merge_chunks_cmd . cpp <nl> ppp b / src / mongo / s / commands / cluster_merge_chunks_cmd . cpp <nl> <nl> * then also delete it in the license file . <nl> * / <nl> <nl> + # include " mongo / platform / basic . h " <nl> + <nl> # include " mongo / base / init . h " <nl> # include " mongo / client / connpool . h " <nl> # include " mongo / db / auth / action_type . h " <nl> <nl> # include " mongo / db / commands . h " <nl> # include " mongo / db / field_parser . h " <nl> # include " mongo / db / namespace_string . h " <nl> - # include " mongo / s / config . h " / / For config server and DBConfig and version refresh <nl> + # include " mongo / s / chunk_manager . h " <nl> + # include " mongo / s / config . h " <nl> # include " mongo / s / grid . h " <nl> # include " mongo / s / shard . h " <nl> <nl> mmm a / src / mongo / s / commands_admin . cpp <nl> ppp b / src / mongo / s / commands_admin . cpp <nl> <nl> # include " mongo / db / wire_version . h " <nl> # include " mongo / db / write_concern . h " <nl> # include " mongo / db / write_concern_options . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / client / shard_connection . h " <nl> # include " mongo / s / cluster_write . h " <nl> mmm a / src / mongo / s / commands_public . cpp <nl> ppp b / src / mongo / s / commands_public . cpp <nl> <nl> # include " mongo / platform / atomic_word . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / cluster_explain . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / cursors . h " <nl> # include " mongo / s / distlock . h " <nl> namespace mongo { <nl> return passthrough ( conf , cmdObj , result ) ; <nl> } <nl> <nl> - cm - > drop ( cm ) ; <nl> + cm - > drop ( ) ; <nl> <nl> if ( ! conf - > removeSharding ( fullns ) ) { <nl> warning ( ) < < " collection " < < fullns <nl> mmm a / src / mongo / s / config . cpp <nl> ppp b / src / mongo / s / config . cpp <nl> <nl> <nl> # include " mongo / platform / basic . h " <nl> <nl> + # include " mongo / s / config . h " <nl> + <nl> # include < boost / scoped_ptr . hpp > <nl> - # include " pcrecpp . h " <nl> + # include < pcrecpp . h > <nl> <nl> # include " mongo / client / connpool . h " <nl> # include " mongo / client / dbclientcursor . h " <nl> # include " mongo / db / client . h " <nl> # include " mongo / db / lasterror . h " <nl> + # include " mongo / db / server_options . h " <nl> # include " mongo / db / write_concern . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / chunk_version . h " <nl> # include " mongo / s / client / shard_connection . h " <nl> # include " mongo / s / cluster_write . h " <nl> - # include " mongo / s / config . h " <nl> # include " mongo / s / grid . h " <nl> # include " mongo / s / server . h " <nl> # include " mongo / s / type_changelog . h " <nl> namespace mongo { <nl> int ConfigServer : : VERSION = 3 ; <nl> Shard Shard : : EMPTY ; <nl> <nl> - / * mmm DBConfig mmm * / <nl> <nl> DBConfig : : CollectionInfo : : CollectionInfo ( const BSONObj & in ) { <nl> _dirty = false ; <nl> _dropped = in [ CollectionType : : dropped ( ) ] . trueValue ( ) ; <nl> <nl> - if ( in [ CollectionType : : keyPattern ( ) ] . isABSONObj ( ) ) { <nl> - shard ( new ChunkManager ( in ) ) ; <nl> + if ( in [ CollectionType : : keyPattern ( ) ] . isABSONObj ( ) ) { <nl> + shard ( new ChunkManager ( in ) ) ; <nl> } <nl> <nl> _dirty = false ; <nl> } <nl> - <nl> - void DBConfig : : CollectionInfo : : shard ( ChunkManager * manager ) { <nl> <nl> + DBConfig : : CollectionInfo : : ~ CollectionInfo ( ) { <nl> + <nl> + } <nl> + <nl> + void DBConfig : : CollectionInfo : : resetCM ( ChunkManager * cm ) { <nl> + invariant ( cm ) ; <nl> + invariant ( _cm ) ; <nl> + <nl> + _cm . reset ( cm ) ; <nl> + } <nl> + <nl> + void DBConfig : : CollectionInfo : : shard ( ChunkManager * manager ) { <nl> / / Do this * first * so we ' re invisible to everyone else <nl> manager - > loadExistingRanges ( configServer . getPrimary ( ) . getConnString ( ) , NULL ) ; <nl> <nl> namespace mongo { <nl> / / This helps prevent errors when dropping in a different process <nl> / / <nl> <nl> - if ( manager - > numChunks ( ) ! = 0 ) { <nl> - _cm = ChunkManagerPtr ( manager ) ; <nl> + if ( manager - > numChunks ( ) ! = 0 ) { <nl> + _cm = ChunkManagerPtr ( manager ) ; <nl> _key = manager - > getShardKeyPattern ( ) . toBSON ( ) . getOwned ( ) ; <nl> _unqiue = manager - > isUnique ( ) ; <nl> _dirty = true ; <nl> namespace mongo { <nl> } <nl> else { <nl> warning ( ) < < " no chunks found for collection " < < manager - > getns ( ) <nl> - < < " , assuming unsharded " < < endl ; <nl> + < < " , assuming unsharded " ; <nl> unshard ( ) ; <nl> } <nl> } <nl> namespace mongo { <nl> _dirty = false ; <nl> } <nl> <nl> + <nl> + DBConfig : : DBConfig ( std : : string name ) <nl> + : _name ( name ) , <nl> + _primary ( " config " , " " , 0 / * maxSize * / , false / * draining * / ) , <nl> + _shardingEnabled ( false ) , <nl> + _lock ( " DBConfig " ) , <nl> + _hitConfigServerLock ( " DBConfig : : _hitConfigServerLock " ) { <nl> + <nl> + invariant ( ! _name . empty ( ) ) ; <nl> + } <nl> + <nl> + DBConfig : : ~ DBConfig ( ) { <nl> + <nl> + } <nl> + <nl> bool DBConfig : : isSharded ( const string & ns ) { <nl> if ( ! _shardingEnabled ) <nl> return false ; <nl> namespace mongo { <nl> } <nl> <nl> bool DBConfig : : _isSharded ( const string & ns ) { <nl> - if ( ! _shardingEnabled ) <nl> + if ( ! _shardingEnabled ) { <nl> return false ; <nl> - Collections : : iterator i = _collections . find ( ns ) ; <nl> - if ( i = = _collections . end ( ) ) <nl> + } <nl> + <nl> + CollectionInfoMap : : iterator i = _collections . find ( ns ) ; <nl> + if ( i = = _collections . end ( ) ) { <nl> return false ; <nl> + } <nl> + <nl> return i - > second . isSharded ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> <nl> scoped_lock lk ( _lock ) ; <nl> <nl> - Collections : : iterator i = _collections . find ( ns ) ; <nl> + CollectionInfoMap : : iterator i = _collections . find ( ns ) ; <nl> <nl> if ( i = = _collections . end ( ) ) <nl> return false ; <nl> namespace mongo { <nl> { <nl> scoped_lock lk ( _lock ) ; <nl> <nl> - Collections : : iterator i = _collections . find ( ns ) ; <nl> + CollectionInfoMap : : iterator i = _collections . find ( ns ) ; <nl> <nl> / / No namespace <nl> if ( i = = _collections . end ( ) ) { <nl> namespace mongo { <nl> <nl> if ( coll ) { <nl> <nl> - for ( Collections : : iterator i = _collections . begin ( ) ; i ! = _collections . end ( ) ; + + i ) { <nl> + for ( CollectionInfoMap : : iterator i = _collections . begin ( ) ; i ! = _collections . end ( ) ; + + i ) { <nl> if ( ! i - > second . isDirty ( ) ) <nl> continue ; <nl> i - > second . save ( i - > first ) ; <nl> namespace mongo { <nl> num = 0 ; <nl> set < string > seen ; <nl> while ( true ) { <nl> - Collections : : iterator i = _collections . begin ( ) ; <nl> + CollectionInfoMap : : iterator i = _collections . begin ( ) ; <nl> for ( ; i ! = _collections . end ( ) ; + + i ) { <nl> / / log ( ) < < " coll : " < < i - > first < < " and " < < i - > second . isSharded ( ) < < endl ; <nl> if ( i - > second . isSharded ( ) ) <nl> namespace mongo { <nl> LOG ( 1 ) < < " \ t dropping sharded collection : " < < i - > first < < endl ; <nl> <nl> i - > second . getCM ( ) - > getAllShards ( allServers ) ; <nl> - i - > second . getCM ( ) - > drop ( i - > second . getCM ( ) ) ; <nl> + i - > second . getCM ( ) - > drop ( ) ; <nl> <nl> / / We should warn , but it ' s not a fatal error if someone else reloaded the db / coll as <nl> / / unsharded in the meantime <nl> namespace mongo { <nl> void DBConfig : : getAllShards ( set < Shard > & shards ) const { <nl> scoped_lock lk ( _lock ) ; <nl> shards . insert ( getPrimary ( ) ) ; <nl> - for ( Collections : : const_iterator it ( _collections . begin ( ) ) , end ( _collections . end ( ) ) ; it ! = end ; + + it ) { <nl> + for ( CollectionInfoMap : : const_iterator it ( _collections . begin ( ) ) , end ( _collections . end ( ) ) ; it ! = end ; + + it ) { <nl> if ( it - > second . isSharded ( ) ) { <nl> it - > second . getCM ( ) - > getAllShards ( shards ) ; <nl> } / / TODO : handle collections on non - primary shard <nl> namespace mongo { <nl> <nl> scoped_lock lk ( _lock ) ; <nl> <nl> - for ( Collections : : const_iterator i = _collections . begin ( ) ; i ! = _collections . end ( ) ; i + + ) { <nl> + for ( CollectionInfoMap : : const_iterator i = _collections . begin ( ) ; i ! = _collections . end ( ) ; i + + ) { <nl> log ( ) < < " Coll : " < < i - > first < < " sharded ? " < < i - > second . isSharded ( ) < < endl ; <nl> if ( i - > second . isSharded ( ) ) namespaces . insert ( i - > first ) ; <nl> } <nl> mmm a / src / mongo / s / config . h <nl> ppp b / src / mongo / s / config . h <nl> <nl> # include < boost / shared_ptr . hpp > <nl> <nl> # include " mongo / client / dbclient_rs . h " <nl> - # include " mongo / s / chunk . h " <nl> # include " mongo / s / shard . h " <nl> # include " mongo / s / shard_key_pattern . h " <nl> <nl> namespace mongo { <nl> <nl> + class ChunkManager ; <nl> class ConfigServer ; <nl> - <nl> class DBConfig ; <nl> + <nl> typedef boost : : shared_ptr < DBConfig > DBConfigPtr ; <nl> <nl> extern DBConfigPtr configServerPtr ; <nl> namespace mongo { <nl> * top level configuration for a database <nl> * / <nl> class DBConfig { <nl> - <nl> - struct CollectionInfo { <nl> - CollectionInfo ( ) { <nl> - _dirty = false ; <nl> - _dropped = false ; <nl> - } <nl> - <nl> - CollectionInfo ( const BSONObj & in ) ; <nl> - <nl> - bool isSharded ( ) const { <nl> - return _cm . get ( ) ; <nl> - } <nl> - <nl> - ChunkManagerPtr getCM ( ) const { <nl> - return _cm ; <nl> - } <nl> - <nl> - void resetCM ( ChunkManager * cm ) { <nl> - verify ( cm ) ; <nl> - verify ( _cm ) ; / / this has to be already sharded <nl> - _cm . reset ( cm ) ; <nl> - } <nl> - <nl> - void shard ( ChunkManager * cm ) ; <nl> - void unshard ( ) ; <nl> - <nl> - bool isDirty ( ) const { return _dirty ; } <nl> - bool wasDropped ( ) const { return _dropped ; } <nl> - <nl> - void save ( const std : : string & ns ) ; <nl> - <nl> - bool unique ( ) const { return _unqiue ; } <nl> - BSONObj key ( ) const { return _key ; } <nl> - <nl> - <nl> - private : <nl> - BSONObj _key ; <nl> - bool _unqiue ; <nl> - ChunkManagerPtr _cm ; <nl> - bool _dirty ; <nl> - bool _dropped ; <nl> - } ; <nl> - <nl> - typedef std : : map < std : : string , CollectionInfo > Collections ; <nl> - <nl> public : <nl> - <nl> - DBConfig ( std : : string name ) <nl> - : _name ( name ) , <nl> - _primary ( " config " , <nl> - " " , <nl> - 0 / * maxSize * / , <nl> - false / * draining * / ) , <nl> - _shardingEnabled ( false ) , <nl> - _lock ( " DBConfig " ) , <nl> - _hitConfigServerLock ( " DBConfig : : _hitConfigServerLock " ) { <nl> - verify ( name . size ( ) ) ; <nl> - } <nl> - virtual ~ DBConfig ( ) { } <nl> + DBConfig ( std : : string name ) ; <nl> + virtual ~ DBConfig ( ) ; <nl> <nl> std : : string getName ( ) const { return _name ; } ; <nl> <nl> namespace mongo { <nl> * WARNING : It ' s not safe to place initial chunks onto non - primary shards using this method . <nl> * The initShards parameter allows legacy behavior expected by map - reduce . <nl> * / <nl> - ChunkManagerPtr shardCollection ( const std : : string & ns , <nl> - const ShardKeyPattern & fieldsAndOrder , <nl> - bool unique , <nl> - std : : vector < BSONObj > * initPoints , <nl> - std : : vector < Shard > * initShards = NULL ) ; <nl> + boost : : shared_ptr < ChunkManager > shardCollection ( const std : : string & ns , <nl> + const ShardKeyPattern & fieldsAndOrder , <nl> + bool unique , <nl> + std : : vector < BSONObj > * initPoints , <nl> + std : : vector < Shard > * initShards = NULL ) ; <nl> <nl> / * * <nl> @ return true if there was sharding info to remove <nl> namespace mongo { <nl> <nl> / / Atomically returns * either * the chunk manager * or * the primary shard for the collection , <nl> / / neither if the collection doesn ' t exist . <nl> - void getChunkManagerOrPrimary ( const std : : string & ns , ChunkManagerPtr & manager , ShardPtr & primary ) ; <nl> + void getChunkManagerOrPrimary ( const std : : string & ns , boost : : shared_ptr < ChunkManager > & manager , ShardPtr & primary ) ; <nl> <nl> - ChunkManagerPtr getChunkManager ( const std : : string & ns , bool reload = false , bool forceReload = false ) ; <nl> - ChunkManagerPtr getChunkManagerIfExists ( const std : : string & ns , bool reload = false , bool forceReload = false ) ; <nl> + boost : : shared_ptr < ChunkManager > getChunkManager ( const std : : string & ns , bool reload = false , bool forceReload = false ) ; <nl> + boost : : shared_ptr < ChunkManager > getChunkManagerIfExists ( const std : : string & ns , bool reload = false , bool forceReload = false ) ; <nl> <nl> const Shard & getShard ( const std : : string & ns ) ; <nl> / * * <nl> namespace mongo { <nl> void getAllShardedCollections ( std : : set < std : : string > & namespaces ) const ; <nl> <nl> protected : <nl> + struct CollectionInfo { <nl> + CollectionInfo ( ) { <nl> + _dirty = false ; <nl> + _dropped = false ; <nl> + } <nl> + <nl> + CollectionInfo ( const BSONObj & in ) ; <nl> + ~ CollectionInfo ( ) ; <nl> + <nl> + bool isSharded ( ) const { <nl> + return _cm . get ( ) ; <nl> + } <nl> + <nl> + boost : : shared_ptr < ChunkManager > getCM ( ) const { <nl> + return _cm ; <nl> + } <nl> + <nl> + void resetCM ( ChunkManager * cm ) ; <nl> + <nl> + void shard ( ChunkManager * cm ) ; <nl> + void unshard ( ) ; <nl> + <nl> + bool isDirty ( ) const { return _dirty ; } <nl> + bool wasDropped ( ) const { return _dropped ; } <nl> + <nl> + void save ( const std : : string & ns ) ; <nl> + <nl> + bool unique ( ) const { return _unqiue ; } <nl> + BSONObj key ( ) const { return _key ; } <nl> + <nl> + <nl> + private : <nl> + BSONObj _key ; <nl> + bool _unqiue ; <nl> + boost : : shared_ptr < ChunkManager > _cm ; <nl> + bool _dirty ; <nl> + bool _dropped ; <nl> + } ; <nl> + <nl> + typedef std : : map < std : : string , CollectionInfo > CollectionInfoMap ; <nl> + <nl> <nl> / * * <nl> lockless <nl> namespace mongo { <nl> bool _reload ( ) ; <nl> void _save ( bool db = true , bool coll = true ) ; <nl> <nl> - std : : string _name ; / / e . g . " alleyinsider " <nl> + <nl> + const std : : string _name ; / / e . g . " alleyinsider " <nl> + <nl> Shard _primary ; / / e . g . localhost , mongo . foo . com : 9999 <nl> bool _shardingEnabled ; <nl> <nl> - / / map < std : : string , CollectionInfo > _sharded ; / / { " alleyinsider . blog . posts " : { ts : 1 } , . . . ] - all ns that are sharded <nl> - / / map < std : : string , ChunkManagerPtr > _shards ; / / this will only have entries for things that have been looked at <nl> - <nl> - Collections _collections ; <nl> + CollectionInfoMap _collections ; <nl> <nl> mutable mongo : : mutex _lock ; / / TODO : change to r / w lock ? ? <nl> mutable mongo : : mutex _hitConfigServerLock ; <nl> mmm a / src / mongo / s / config_server_checker_service . cpp <nl> ppp b / src / mongo / s / config_server_checker_service . cpp <nl> <nl> * then also delete it in the license file . <nl> * / <nl> <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + # include " mongo / s / config_server_checker_service . h " <nl> + <nl> # include < boost / scoped_ptr . hpp > <nl> # include < boost / thread / thread . hpp > <nl> <nl> # include " mongo / s / config . h " <nl> - # include " mongo / s / config_server_checker_service . h " <nl> # include " mongo / util / concurrency / mutex . h " <nl> # include " mongo / util / exit . h " <nl> <nl> namespace mongo { <nl> return _checkerThread ! = NULL ; <nl> } <nl> } <nl> - <nl> mmm a / src / mongo / s / grid . cpp <nl> ppp b / src / mongo / s / grid . cpp <nl> <nl> <nl> # include " mongo / s / grid . h " <nl> <nl> - # include " pcrecpp . h " <nl> # include < iomanip > <nl> + # include < pcrecpp . h > <nl> <nl> # include " mongo / client / connpool . h " <nl> # include " mongo / client / replica_set_monitor . h " <nl> namespace mongo { <nl> <nl> MONGO_FP_DECLARE ( neverBalance ) ; <nl> <nl> + Grid : : Grid ( ) <nl> + : _lock ( " Grid " ) , <nl> + _allowLocalShard ( true ) { <nl> + <nl> + } <nl> + <nl> + Grid : : ~ Grid ( ) { <nl> + <nl> + } <nl> + <nl> DBConfigPtr Grid : : getDBConfig ( StringData ns , bool create , const string & shardNameHint ) { <nl> string database = nsToDatabase ( ns ) ; <nl> <nl> namespace mongo { <nl> return ! shard . isEmpty ( ) ; <nl> } <nl> <nl> - bool Grid : : _getNewShardName ( string * name ) const { <nl> - DEV verify ( name ) ; <nl> + bool Grid : : _getNewShardName ( string * name ) const { <nl> + invariant ( name ) ; <nl> <nl> bool ok = false ; <nl> int count = 0 ; <nl> namespace mongo { <nl> BSONObj o = conn - > findOne ( ShardType : : ConfigNS , <nl> Query ( fromjson ( " { " + ShardType : : name ( ) + " : / ^ shard / } " ) ) <nl> . sort ( BSON ( ShardType : : name ( ) < < - 1 ) ) ) ; <nl> - if ( ! o . isEmpty ( ) ) { <nl> + if ( ! o . isEmpty ( ) ) { <nl> string last = o [ ShardType : : name ( ) ] . String ( ) ; <nl> - istringstream is ( last . substr ( 5 ) ) ; <nl> + istringstream is ( last . substr ( 5 ) ) ; <nl> is > > count ; <nl> count + + ; <nl> } <nl> + <nl> if ( count < 9999 ) { <nl> stringstream ss ; <nl> ss < < " shard " < < setfill ( ' 0 ' ) < < setw ( 4 ) < < count ; <nl> * name = ss . str ( ) ; <nl> ok = true ; <nl> } <nl> + <nl> conn . done ( ) ; <nl> <nl> return ok ; <nl> mmm a / src / mongo / s / grid . h <nl> ppp b / src / mongo / s / grid . h <nl> <nl> # include " mongo / util / time_support . h " <nl> # include " mongo / util / concurrency / mutex . h " <nl> <nl> - # include " mongo / s / config . h " / / DBConfigPtr <nl> + # include " mongo / s / config . h " <nl> <nl> namespace mongo { <nl> <nl> namespace mongo { <nl> * / <nl> class Grid { <nl> public : <nl> - Grid ( ) : _lock ( " Grid " ) , _allowLocalShard ( true ) { } <nl> + Grid ( ) ; <nl> + ~ Grid ( ) ; <nl> <nl> / * * <nl> * gets the config the db . <nl> namespace mongo { <nl> static bool _inBalancingWindow ( const BSONObj & balancerDoc , const boost : : posix_time : : ptime & now ) ; <nl> <nl> private : <nl> - mongo : : mutex _lock ; / / protects _databases ; TODO : change to r / w lock ? ? <nl> - std : : map < std : : string , DBConfigPtr > _databases ; / / maps ns to DBConfig ' s <nl> - bool _allowLocalShard ; / / can ' localhost ' be used in shard addresses ? <nl> - <nl> / * * <nl> * @ param name is the chose name for the shard . Parameter is mandatory . <nl> * @ return true if it managed to generate a shard name . May return false if ( currently ) <nl> namespace mongo { <nl> * @ return whether a give dbname is used for shard " local " databases ( e . g . , admin or local ) <nl> * / <nl> static bool _isSpecialLocalDB ( const std : : string & dbName ) ; <nl> + <nl> + <nl> + / / Databases catalog map and mutex to protect it <nl> + mongo : : mutex _lock ; <nl> + std : : map < std : : string , DBConfigPtr > _databases ; <nl> + <nl> + / / can ' localhost ' be used in shard addresses ? <nl> + bool _allowLocalShard ; <nl> } ; <nl> <nl> extern Grid grid ; <nl> mmm a / src / mongo / s / mongos_options . cpp <nl> ppp b / src / mongo / s / mongos_options . cpp <nl> <nl> - / * <nl> + / * * <nl> * Copyright ( C ) 2013 10gen Inc . <nl> * <nl> * This program is free software : you can redistribute it and / or modify <nl> mmm a / src / mongo / s / server . cpp <nl> ppp b / src / mongo / s / server . cpp <nl> <nl> # include " mongo / db / startup_warnings_common . h " <nl> # include " mongo / platform / process_id . h " <nl> # include " mongo / s / balance . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / config . h " <nl> # include " mongo / s / config_server_checker_service . h " <nl> mmm a / src / mongo / s / strategy . cpp <nl> ppp b / src / mongo / s / strategy . cpp <nl> <nl> # include " mongo / s / chunk_manager_targeter . h " <nl> # include " mongo / s / client_info . h " <nl> # include " mongo / s / cluster_write . h " <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / chunk_version . h " <nl> # include " mongo / s / cursors . h " <nl> # include " mongo / s / dbclient_shard_resolver . h " <nl> mmm a / src / mongo / s / version_manager . cpp <nl> ppp b / src / mongo / s / version_manager . cpp <nl> <nl> <nl> # include " mongo / s / version_manager . h " <nl> <nl> - # include " mongo / s / chunk . h " <nl> + # include " mongo / s / chunk_manager . h " <nl> # include " mongo / s / chunk_version . h " <nl> # include " mongo / s / client / shard_connection . h " <nl> # include " mongo / s / config . h " <nl> | SERVER - 17496 Move ChunkManager to separate file | mongodb/mongo | df85a204a7d56cc3901f817e4bb7519103230abe | 2015-03-09T15:46:34Z |
mmm a / Documentation / Books / Manual / Deployment / Mesos . mdpp <nl> ppp b / Documentation / Books / Manual / Deployment / Mesos . mdpp <nl> DC / OS is the recommended way to install a cluster as it eases much of the proces <nl> DC / OS comes with its own package management . Packages can be installed from the so called " Universe " . As an official DC / OS partner ArangoDB can be installed from there straight away . <nl> <nl> 1 . Installing via DC / OS UI <nl> - 1 . Go to https : / / dcos . io and prepare a cluster <nl> - 2 . Open your browser and go to the DC / OS admin interface <nl> - 3 . Open the " Universe " tab <nl> - 4 . Locate arangodb and hit " Install Package " <nl> - 5 . Press " Install Package " <nl> + <nl> + 1 . Go to https : / / dcos . io and prepare a cluster <nl> + 2 . Open your browser and go to the DC / OS admin interface <nl> + 3 . Open the " Universe " tab <nl> + 4 . Locate arangodb and hit " Install Package " <nl> + 5 . Press " Install Package " <nl> <nl> 2 . Installing via the DC / OS command line <nl> <nl> - 1 . Install the [ dcos cli ] ( https : / / docs . mesosphere . com / usage / cli / ) <nl> - 2 . Open a terminal and issue ` dcos install arangodb ` <nl> + 1 . Install the [ dcos cli ] ( https : / / docs . mesosphere . com / usage / cli / ) <nl> + 2 . Open a terminal and issue ` dcos install arangodb ` <nl> <nl> Both options are essentially doing the same in the background . Both are starting ArangoDB with its default options set . <nl> <nl> | Fix indentation and avoid docu - block . | arangodb/arangodb | 6167b6dcf5b5def4ca16e1cddc8cdbd8a593893a | 2016-06-24T08:42:44Z |
mmm a / code / search / src / binary_search / binary_search . js <nl> ppp b / code / search / src / binary_search / binary_search . js <nl> <nl> + / / implementation by looping <nl> function binarySearch ( array , key ) { <nl> - var lo = 0 , <nl> - hi = array . length - 1 , <nl> - mid , <nl> - element ; <nl> - while ( lo < = hi ) { <nl> - mid = Math . floor ( ( lo + hi ) / 2 , 10 ) ; <nl> - element = array [ mid ] ; <nl> - if ( element < key ) { <nl> - lo = mid + 1 ; <nl> - } else if ( element > key ) { <nl> - hi = mid - 1 ; <nl> - } else { <nl> - return mid ; <nl> - } <nl> + var lo = 0 , <nl> + hi = array . length - 1 , <nl> + mid , <nl> + element ; <nl> + while ( lo < = hi ) { <nl> + mid = Math . floor ( ( lo + hi ) / 2 , 10 ) ; <nl> + element = array [ mid ] ; <nl> + if ( element < key ) { <nl> + lo = mid + 1 ; <nl> + } else if ( element > key ) { <nl> + hi = mid - 1 ; <nl> + } else { <nl> + return mid ; <nl> } <nl> + } <nl> + return - 1 ; <nl> + } <nl> + <nl> + / / implementation by recursion <nl> + / * * <nl> + * <nl> + * @ param { * number [ ] } arr - the sorted array to be searched in <nl> + * @ param { * number } value - the value to be searched <nl> + * @ param { * number } low - the start index of the search range <nl> + * @ param { * number } high - the end index of the search range <nl> + * / <nl> + function binarySearchByRecursion ( arr , value , low , high ) { <nl> + var start = low = = = undefined ? 0 : low ; <nl> + var end = high = = = undefined ? arr . length - 1 : high ; <nl> + var middle = Math . floor ( ( start + end ) / 2 ) ; <nl> + if ( end = = = start + 1 & & arr [ middle ] ! = = value & & arr [ start ] ! = = value ) { <nl> return - 1 ; <nl> + } <nl> + if ( arr [ start ] = = = value ) { <nl> + return start ; <nl> + } else if ( arr [ end ] = = = value ) { <nl> + return end ; <nl> + } else if ( arr [ middle ] = = = value ) { <nl> + return middle ; <nl> + } else { <nl> + if ( arr [ middle ] > value ) { <nl> + return binarySearchByRecursion ( arr , value , start , middle ) ; <nl> + } else { <nl> + return binarySearchByRecursion ( arr , value , middle , end ) ; <nl> + } <nl> + } <nl> } <nl> | add binary search implemented by recursion in javascript | OpenGenus/cosmos | f66aa17b4c14786386ad450117cc98f5f2fb1653 | 2018-03-13T08:00:29Z |
mmm a / tensorflow / core / ops / mkl_nn_ops . cc <nl> ppp b / tensorflow / core / ops / mkl_nn_ops . cc <nl> REGISTER_OP ( " _MklQuantizedDepthwiseConv2DWithBiasAndRelu " ) <nl> . Attr ( " is_bias_const : bool = true " ) <nl> . Attr ( GetPaddingAttrString ( ) ) <nl> . Attr ( " dilations : list ( int ) = [ 1 , 1 , 1 , 1 ] " ) <nl> - . Attr ( " paddings : list ( int ) = [ ] " ) <nl> + . Attr ( " padding_list : list ( int ) = [ ] " ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> TF_RETURN_IF_ERROR ( shape_inference : : Conv2DShape ( c ) ) ; <nl> ShapeHandle unused , channel ; <nl> REGISTER_OP ( " _MklQuantizedDepthwiseConv2DWithBiasAndReluAndRequantize " ) <nl> . Attr ( " is_bias_const : bool = true " ) <nl> . Attr ( GetPaddingAttrString ( ) ) <nl> . Attr ( " dilations : list ( int ) = [ 1 , 1 , 1 , 1 ] " ) <nl> - . Attr ( " paddings : list ( int ) = [ ] " ) <nl> + . Attr ( " padding_list : list ( int ) = [ ] " ) <nl> . SetShapeFn ( [ ] ( InferenceContext * c ) { <nl> TF_RETURN_IF_ERROR ( shape_inference : : Conv2DShape ( c ) ) ; <nl> ShapeHandle unused ; <nl> mmm a / tensorflow / core / ops / nn_ops . cc <nl> ppp b / tensorflow / core / ops / nn_ops . cc <nl> REGISTER_OP ( " QuantizedDepthwiseConv2DWithBiasAndRelu " ) <nl> . Attr ( " strides : list ( int ) " ) <nl> . Attr ( GetPaddingAttrString ( ) ) <nl> . Attr ( " dilations : list ( int ) = [ 1 , 1 , 1 , 1 ] " ) <nl> - . Attr ( " paddings : list ( int ) = [ ] " ) <nl> + . Attr ( " padding_list : list ( int ) = [ ] " ) <nl> . SetShapeFn ( shape_inference : : DepthwiseConv2DNativeShape ) ; <nl> <nl> REGISTER_OP ( " QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize " ) <nl> REGISTER_OP ( " QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize " ) <nl> . Attr ( " strides : list ( int ) " ) <nl> . Attr ( GetPaddingAttrString ( ) ) <nl> . Attr ( " dilations : list ( int ) = [ 1 , 1 , 1 , 1 ] " ) <nl> - . Attr ( " paddings : list ( int ) = [ ] " ) <nl> + . Attr ( " padding_list : list ( int ) = [ ] " ) <nl> . SetShapeFn ( shape_inference : : DepthwiseConv2DNativeShape ) ; <nl> <nl> } / / namespace tensorflow <nl> mmm a / tensorflow / tools / api / golden / v1 / tensorflow . raw_ops . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v1 / tensorflow . raw_ops . pbtxt <nl> tf_module { <nl> } <nl> member_method { <nl> name : " QuantizedDepthwiseConv2DWithBiasAndRelu " <nl> - argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' paddings \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' qint32 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' padding_list \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' qint32 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize " <nl> - argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' min_freezed_output \ ' , \ ' max_freezed_output \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' paddings \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' quint8 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' min_freezed_output \ ' , \ ' max_freezed_output \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' padding_list \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' quint8 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " QuantizedInstanceNorm " <nl> mmm a / tensorflow / tools / api / golden / v2 / tensorflow . raw_ops . pbtxt <nl> ppp b / tensorflow / tools / api / golden / v2 / tensorflow . raw_ops . pbtxt <nl> tf_module { <nl> } <nl> member_method { <nl> name : " QuantizedDepthwiseConv2DWithBiasAndRelu " <nl> - argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' paddings \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' qint32 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' padding_list \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' qint32 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize " <nl> - argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' min_freezed_output \ ' , \ ' max_freezed_output \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' paddings \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' quint8 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> + argspec : " args = [ \ ' input \ ' , \ ' filter \ ' , \ ' bias \ ' , \ ' min_input \ ' , \ ' max_input \ ' , \ ' min_filter \ ' , \ ' max_filter \ ' , \ ' min_freezed_output \ ' , \ ' max_freezed_output \ ' , \ ' strides \ ' , \ ' padding \ ' , \ ' out_type \ ' , \ ' dilations \ ' , \ ' padding_list \ ' , \ ' name \ ' ] , varargs = None , keywords = None , defaults = [ \ " < dtype : \ ' quint8 \ ' > \ " , \ ' [ 1 , 1 , 1 , 1 ] \ ' , \ ' [ ] \ ' , \ ' None \ ' ] , " <nl> } <nl> member_method { <nl> name : " QuantizedInstanceNorm " <nl> | Revert " change padding_list to paddings " | tensorflow/tensorflow | ebace5d2a2b53315eb9337f3e1266583fbee276e | 2019-11-12T07:53:40Z |
mmm a / folly / ExceptionWrapper . cpp <nl> ppp b / folly / ExceptionWrapper . cpp <nl> namespace folly { <nl> } else if ( eptr_ ) { <nl> std : : rethrow_exception ( eptr_ ) ; <nl> } <nl> + std : : ios_base : : Init ioinit_ ; / / ensure std : : cerr is alive <nl> std : : cerr <nl> < < " Cannot use ` throwException ` with an empty folly : : exception_wrapper " <nl> < < std : : endl ; <nl> | Fix exception_wrapper : : throwException when called before main | facebook/folly | 05499631dc3e1160d673a7dd57e70fc797e532dc | 2017-02-17T05:23:07Z |
mmm a / scripting / javascript / bindings / ScriptingCore . cpp <nl> ppp b / scripting / javascript / bindings / ScriptingCore . cpp <nl> jsval ccarray_to_jsval ( JSContext * cx , CCArray * arr ) { <nl> CCObject * obj = arr - > objectAtIndex ( i ) ; <nl> <nl> CCString * testString = dynamic_cast < cocos2d : : CCString * > ( obj ) ; <nl> + CCDictionary * testDict = NULL ; <nl> + CCArray * testArray = NULL ; <nl> + / / XXX : Only supports string , since all data read from plist files will be stored as string in cocos2d - x <nl> + / / Do we need to convert string to js base type ? <nl> if ( testString ) { <nl> arrElement = c_string_to_jsval ( cx , testString - > getCString ( ) ) ; <nl> + } else if ( testDict = dynamic_cast < cocos2d : : CCDictionary * > ( obj ) ) { <nl> + arrElement = ccdictionary_to_jsval ( cx , testDict ) ; <nl> + } else if ( testArray = dynamic_cast < cocos2d : : CCArray * > ( obj ) ) { <nl> + arrElement = ccarray_to_jsval ( cx , testArray ) ; <nl> } else { <nl> js_proxy_t * proxy = js_get_or_create_proxy < cocos2d : : CCObject > ( cx , obj ) ; <nl> arrElement = OBJECT_TO_JSVAL ( proxy - > obj ) ; <nl> jsval ccarray_to_jsval ( JSContext * cx , CCArray * arr ) { <nl> return OBJECT_TO_JSVAL ( jsretArr ) ; <nl> } <nl> <nl> + jsval ccdictionary_to_jsval ( JSContext * cx , CCDictionary * dict ) <nl> + { <nl> + JSObject * jsRet = JS_NewObject ( cx , NULL , NULL , NULL ) ; <nl> + CCDictElement * pElement = NULL ; <nl> + CCDICT_FOREACH ( dict , pElement ) <nl> + { <nl> + jsval dictElement ; <nl> + CCString * obj = dynamic_cast < CCString * > ( pElement - > getObject ( ) ) ; <nl> + <nl> + CCString * testString = dynamic_cast < cocos2d : : CCString * > ( obj ) ; <nl> + CCDictionary * testDict = NULL ; <nl> + CCArray * testArray = NULL ; <nl> + / / XXX : Only supports string , since all data read from plist files will be stored as string in cocos2d - x <nl> + / / Do we need to convert string to js base type ? <nl> + if ( testString ) { <nl> + dictElement = c_string_to_jsval ( cx , testString - > getCString ( ) ) ; <nl> + } else if ( testDict = dynamic_cast < cocos2d : : CCDictionary * > ( obj ) ) { <nl> + dictElement = ccdictionary_to_jsval ( cx , testDict ) ; <nl> + } else if ( testArray = dynamic_cast < cocos2d : : CCArray * > ( obj ) ) { <nl> + dictElement = ccarray_to_jsval ( cx , testArray ) ; <nl> + } else { <nl> + js_proxy_t * proxy = js_get_or_create_proxy < cocos2d : : CCObject > ( cx , obj ) ; <nl> + dictElement = OBJECT_TO_JSVAL ( proxy - > obj ) ; <nl> + } <nl> + <nl> + const char * key = pElement - > getStrKey ( ) ; <nl> + if ( key & & strlen ( key ) > 0 ) <nl> + { <nl> + JS_SetProperty ( cx , jsRet , key , & dictElement ) ; <nl> + } <nl> + } <nl> + return OBJECT_TO_JSVAL ( jsRet ) ; <nl> + } <nl> + <nl> jsval long_long_to_jsval ( JSContext * cx , long long v ) { <nl> JSObject * tmp = JS_NewUint32Array ( cx , 2 ) ; <nl> uint32_t * data = ( uint32_t * ) JS_GetArrayBufferViewData ( tmp , cx ) ; <nl> mmm a / scripting / javascript / bindings / ScriptingCore . h <nl> ppp b / scripting / javascript / bindings / ScriptingCore . h <nl> ccColor3B jsval_to_cccolor3b ( JSContext * cx , jsval v ) ; <nl> JSBool jsval_to_ccarray_of_CCPoint ( JSContext * cx , jsval v , CCPoint * * points , int * numPoints ) ; <nl> CCArray * jsval_to_ccarray ( JSContext * cx , jsval v ) ; <nl> jsval ccarray_to_jsval ( JSContext * cx , CCArray * arr ) ; <nl> + jsval ccdictionary_to_jsval ( JSContext * cx , CCDictionary * dict ) ; <nl> / / from native <nl> jsval long_long_to_jsval ( JSContext * cx , long long v ) ; <nl> jsval std_string_to_jsval ( JSContext * cx , std : : string & v ) ; <nl> | issue : Added " ccdictionary_to_jsval " function , now TMXOrthoObjectsTest and TMXIsoObjectsTest don ' t crash . | cocos2d/cocos2d-x | 449c719d96abaa9ea97846a56f7c075f0b364510 | 2012-11-06T08:11:45Z |
mmm a / src / compiler / js - native - context - specialization . cc <nl> ppp b / src / compiler / js - native - context - specialization . cc <nl> FieldAccess ForPropertyCellValue ( MachineRepresentation representation , <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> Node * node , Node * receiver , Node * value , Handle < Name > name , <nl> AccessMode access_mode , Node * index ) { <nl> - Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> - Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> - <nl> / / Lookup on the global object . We only deal with own data properties <nl> / / of the global object here ( represented as PropertyCell ) . <nl> LookupIterator it ( isolate ( ) , global_object ( ) , name , LookupIterator : : OWN ) ; <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> if ( it . state ( ) ! = LookupIterator : : DATA ) return NoChange ( ) ; <nl> if ( ! it . GetHolder < JSObject > ( ) - > IsJSGlobalObject ( ) ) return NoChange ( ) ; <nl> Handle < PropertyCell > property_cell = it . GetPropertyCell ( ) ; <nl> - PropertyDetails property_details = property_cell - > property_details ( ) ; <nl> + return ReduceGlobalAccess ( node , receiver , value , name , access_mode , index , <nl> + property_cell ) ; <nl> + } <nl> + <nl> + Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> + Node * node , Node * receiver , Node * value , Handle < Name > name , <nl> + AccessMode access_mode , Node * index , Handle < PropertyCell > property_cell ) { <nl> + Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> + Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> + <nl> Handle < Object > property_cell_value ( property_cell - > value ( ) , isolate ( ) ) ; <nl> + if ( property_cell_value . is_identical_to ( factory ( ) - > the_hole_value ( ) ) ) { <nl> + / / The property cell is no longer valid . <nl> + return NoChange ( ) ; <nl> + } <nl> + <nl> + PropertyDetails property_details = property_cell - > property_details ( ) ; <nl> PropertyCellType property_cell_type = property_details . cell_type ( ) ; <nl> + DCHECK_EQ ( kData , property_details . kind ( ) ) ; <nl> <nl> / / We have additional constraints for stores . <nl> if ( access_mode = = AccessMode : : kStore ) { <nl> Reduction JSNativeContextSpecialization : : ReduceGlobalAccess ( <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceJSLoadGlobal ( Node * node ) { <nl> DCHECK_EQ ( IrOpcode : : kJSLoadGlobal , node - > opcode ( ) ) ; <nl> - NameRef name ( broker ( ) , LoadGlobalParametersOf ( node - > op ( ) ) . name ( ) ) ; <nl> Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> <nl> - / / Try to lookup the name on the script context table first ( lexical scoping ) . <nl> - base : : Optional < ScriptContextTableRef : : LookupResult > result = <nl> - native_context ( ) . script_context_table ( ) . lookup ( name ) ; <nl> - if ( result ) { <nl> - ObjectRef contents = result - > context . get ( result - > index ) ; <nl> - if ( contents . IsHeapObject ( ) & & <nl> - contents . AsHeapObject ( ) . map ( ) . oddball_type ( ) = = OddballType : : kHole ) { <nl> - return NoChange ( ) ; <nl> + LoadGlobalParameters const & p = LoadGlobalParametersOf ( node - > op ( ) ) ; <nl> + if ( ! p . feedback ( ) . IsValid ( ) ) return NoChange ( ) ; <nl> + FeedbackNexus nexus ( p . feedback ( ) . vector ( ) , p . feedback ( ) . slot ( ) ) ; <nl> + <nl> + DCHECK ( nexus . kind ( ) = = FeedbackSlotKind : : kLoadGlobalInsideTypeof | | <nl> + nexus . kind ( ) = = FeedbackSlotKind : : kLoadGlobalNotInsideTypeof ) ; <nl> + if ( nexus . GetFeedback ( ) - > IsCleared ( ) ) return NoChange ( ) ; <nl> + Handle < Object > feedback ( nexus . GetFeedback ( ) - > GetHeapObjectOrSmi ( ) , isolate ( ) ) ; <nl> + <nl> + if ( feedback - > IsSmi ( ) ) { <nl> + / / The wanted name belongs to a script - scope variable and the feedback tells <nl> + / / us where to find its value . <nl> + <nl> + int number = feedback - > Number ( ) ; <nl> + int const script_context_index = <nl> + FeedbackNexus : : ContextIndexBits : : decode ( number ) ; <nl> + int const context_slot_index = FeedbackNexus : : SlotIndexBits : : decode ( number ) ; <nl> + bool const immutable = FeedbackNexus : : ImmutabilityBit : : decode ( number ) ; <nl> + Handle < Context > context = ScriptContextTable : : GetContext ( <nl> + isolate ( ) , native_context ( ) . script_context_table ( ) . object ( ) , <nl> + script_context_index ) ; <nl> + <nl> + { <nl> + ObjectRef contents ( broker ( ) , <nl> + handle ( context - > get ( context_slot_index ) , isolate ( ) ) ) ; <nl> + CHECK ( ! contents . equals ( ObjectRef ( broker ( ) , factory ( ) - > the_hole_value ( ) ) ) ) ; <nl> } <nl> - Node * context = jsgraph ( ) - > Constant ( result - > context ) ; <nl> + <nl> + Node * context_constant = jsgraph ( ) - > Constant ( context ) ; <nl> Node * value = effect = graph ( ) - > NewNode ( <nl> - javascript ( ) - > LoadContext ( 0 , result - > index , result - > immutable ) , context , <nl> - effect ) ; <nl> + javascript ( ) - > LoadContext ( 0 , context_slot_index , immutable ) , <nl> + context_constant , effect ) ; <nl> ReplaceWithValue ( node , value , effect ) ; <nl> return Replace ( value ) ; <nl> } <nl> <nl> - / / Lookup the { name } on the global object instead . <nl> - return ReduceGlobalAccess ( node , nullptr , nullptr , name . object ( ) , <nl> - AccessMode : : kLoad ) ; <nl> + CHECK ( feedback - > IsPropertyCell ( ) ) ; <nl> + / / The wanted name belongs ( or did belong ) to a property on the global object <nl> + / / and the feedback is the cell holding its value . <nl> + return ReduceGlobalAccess ( node , nullptr , nullptr , p . name ( ) , AccessMode : : kLoad , <nl> + nullptr , Handle < PropertyCell > : : cast ( feedback ) ) ; <nl> } <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceJSStoreGlobal ( Node * node ) { <nl> DCHECK_EQ ( IrOpcode : : kJSStoreGlobal , node - > opcode ( ) ) ; <nl> - NameRef name ( broker ( ) , StoreGlobalParametersOf ( node - > op ( ) ) . name ( ) ) ; <nl> Node * value = NodeProperties : : GetValueInput ( node , 0 ) ; <nl> Node * effect = NodeProperties : : GetEffectInput ( node ) ; <nl> Node * control = NodeProperties : : GetControlInput ( node ) ; <nl> <nl> - / / Try to lookup the name on the script context table first ( lexical scoping ) . <nl> - base : : Optional < ScriptContextTableRef : : LookupResult > result = <nl> - native_context ( ) . script_context_table ( ) . lookup ( name ) ; <nl> - if ( result ) { <nl> - ObjectRef contents = result - > context . get ( result - > index ) ; <nl> - if ( ( contents . IsHeapObject ( ) & & <nl> - contents . AsHeapObject ( ) . map ( ) . oddball_type ( ) = = OddballType : : kHole ) | | <nl> - result - > immutable ) { <nl> - return NoChange ( ) ; <nl> + StoreGlobalParameters const & p = StoreGlobalParametersOf ( node - > op ( ) ) ; <nl> + if ( ! p . feedback ( ) . IsValid ( ) ) return NoChange ( ) ; <nl> + FeedbackNexus nexus ( p . feedback ( ) . vector ( ) , p . feedback ( ) . slot ( ) ) ; <nl> + <nl> + DCHECK ( nexus . kind ( ) = = FeedbackSlotKind : : kStoreGlobalSloppy | | <nl> + nexus . kind ( ) = = FeedbackSlotKind : : kStoreGlobalStrict ) ; <nl> + if ( nexus . GetFeedback ( ) - > IsCleared ( ) ) return NoChange ( ) ; <nl> + Handle < Object > feedback ( nexus . GetFeedback ( ) - > GetHeapObjectOrSmi ( ) , isolate ( ) ) ; <nl> + <nl> + if ( feedback - > IsSmi ( ) ) { <nl> + / / The wanted name belongs to a script - scope variable and the feedback tells <nl> + / / us where to find its value . <nl> + <nl> + int const script_context_index = <nl> + FeedbackNexus : : ContextIndexBits : : decode ( feedback - > Number ( ) ) ; <nl> + int const context_slot_index = <nl> + FeedbackNexus : : SlotIndexBits : : decode ( feedback - > Number ( ) ) ; <nl> + bool const immutable = <nl> + FeedbackNexus : : ImmutabilityBit : : decode ( feedback - > Number ( ) ) ; <nl> + Handle < Context > context = ScriptContextTable : : GetContext ( <nl> + isolate ( ) , native_context ( ) . script_context_table ( ) . object ( ) , <nl> + script_context_index ) ; <nl> + <nl> + if ( immutable ) return NoChange ( ) ; <nl> + <nl> + { <nl> + ObjectRef contents ( broker ( ) , <nl> + handle ( context - > get ( context_slot_index ) , isolate ( ) ) ) ; <nl> + CHECK ( ! contents . equals ( ObjectRef ( broker ( ) , factory ( ) - > the_hole_value ( ) ) ) ) ; <nl> } <nl> - Node * context = jsgraph ( ) - > Constant ( result - > context ) ; <nl> - effect = graph ( ) - > NewNode ( javascript ( ) - > StoreContext ( 0 , result - > index ) , <nl> - value , context , effect , control ) ; <nl> + <nl> + Node * context_constant = jsgraph ( ) - > Constant ( context ) ; <nl> + effect = graph ( ) - > NewNode ( javascript ( ) - > StoreContext ( 0 , context_slot_index ) , <nl> + value , context_constant , effect , control ) ; <nl> ReplaceWithValue ( node , value , effect , control ) ; <nl> return Replace ( value ) ; <nl> } <nl> <nl> - / / Lookup the { name } on the global object instead . <nl> - return ReduceGlobalAccess ( node , nullptr , value , name . object ( ) , <nl> - AccessMode : : kStore ) ; <nl> + CHECK ( feedback - > IsPropertyCell ( ) ) ; <nl> + / / The wanted name belongs ( or did belong ) to a property on the global object <nl> + / / and the feedback is the cell holding its value . <nl> + return ReduceGlobalAccess ( node , nullptr , value , p . name ( ) , AccessMode : : kStore , <nl> + nullptr , Handle < PropertyCell > : : cast ( feedback ) ) ; <nl> } <nl> <nl> Reduction JSNativeContextSpecialization : : ReduceNamedAccess ( <nl> mmm a / src / compiler / js - native - context - specialization . h <nl> ppp b / src / compiler / js - native - context - specialization . h <nl> class V8_EXPORT_PRIVATE JSNativeContextSpecialization final <nl> Reduction ReduceGlobalAccess ( Node * node , Node * receiver , Node * value , <nl> Handle < Name > name , AccessMode access_mode , <nl> Node * index = nullptr ) ; <nl> + Reduction ReduceGlobalAccess ( Node * node , Node * receiver , Node * value , <nl> + Handle < Name > name , AccessMode access_mode , <nl> + Node * index , Handle < PropertyCell > property_cell ) ; <nl> <nl> Reduction ReduceSoftDeoptimize ( Node * node , DeoptimizeReason reason ) ; <nl> Reduction ReduceJSToString ( Node * node ) ; <nl> mmm a / src / feedback - vector . cc <nl> ppp b / src / feedback - vector . cc <nl> void FeedbackNexus : : ConfigurePropertyCellMode ( Handle < PropertyCell > cell ) { <nl> } <nl> <nl> bool FeedbackNexus : : ConfigureLexicalVarMode ( int script_context_index , <nl> - int context_slot_index ) { <nl> + int context_slot_index , <nl> + bool immutable ) { <nl> DCHECK ( IsGlobalICKind ( kind ( ) ) ) ; <nl> DCHECK_LE ( 0 , script_context_index ) ; <nl> DCHECK_LE ( 0 , context_slot_index ) ; <nl> if ( ! ContextIndexBits : : is_valid ( script_context_index ) | | <nl> - ! SlotIndexBits : : is_valid ( context_slot_index ) ) { <nl> + ! SlotIndexBits : : is_valid ( context_slot_index ) | | <nl> + ! ImmutabilityBit : : is_valid ( immutable ) ) { <nl> return false ; <nl> } <nl> int config = ContextIndexBits : : encode ( script_context_index ) | <nl> - SlotIndexBits : : encode ( context_slot_index ) ; <nl> + SlotIndexBits : : encode ( context_slot_index ) | <nl> + ImmutabilityBit : : encode ( immutable ) ; <nl> + <nl> + / / Force { config } to be in Smi range by propagating the most significant Smi <nl> + / / bit . This does not change any of the bitfield ' s bits . <nl> + config = ( config < < ( 32 - kSmiValueSize ) ) > > ( 32 - kSmiValueSize ) ; <nl> <nl> SetFeedback ( Smi : : FromInt ( config ) ) ; <nl> Isolate * isolate = GetIsolate ( ) ; <nl> mmm a / src / feedback - vector . h <nl> ppp b / src / feedback - vector . h <nl> class FeedbackNexus final { <nl> / / For Global Load and Store ICs . <nl> void ConfigurePropertyCellMode ( Handle < PropertyCell > cell ) ; <nl> / / Returns false if given combination of indices is not allowed . <nl> - bool ConfigureLexicalVarMode ( int script_context_index , <nl> - int context_slot_index ) ; <nl> + bool ConfigureLexicalVarMode ( int script_context_index , int context_slot_index , <nl> + bool immutable ) ; <nl> void ConfigureHandlerMode ( const MaybeObjectHandle & handler ) ; <nl> <nl> / / For CloneObject ICs <nl> class FeedbackNexus final { <nl> / / Bit positions in a smi that encodes lexical environment variable access . <nl> # define LEXICAL_MODE_BIT_FIELDS ( V , _ ) \ <nl> V ( ContextIndexBits , unsigned , 12 , _ ) \ <nl> - V ( SlotIndexBits , unsigned , 19 , _ ) <nl> + V ( SlotIndexBits , unsigned , 18 , _ ) \ <nl> + V ( ImmutabilityBit , bool , 1 , _ ) <nl> <nl> DEFINE_BIT_FIELDS ( LEXICAL_MODE_BIT_FIELDS ) <nl> # undef LEXICAL_MODE_BIT_FIELDS <nl> mmm a / src / ic / ic . cc <nl> ppp b / src / ic / ic . cc <nl> MaybeHandle < Object > LoadGlobalIC : : Load ( Handle < Name > name ) { <nl> <nl> bool use_ic = ( state ( ) ! = NO_FEEDBACK ) & & FLAG_use_ic ; <nl> if ( use_ic ) { <nl> - if ( nexus ( ) - > ConfigureLexicalVarMode ( lookup_result . context_index , <nl> - lookup_result . slot_index ) ) { <nl> + if ( nexus ( ) - > ConfigureLexicalVarMode ( <nl> + lookup_result . context_index , lookup_result . slot_index , <nl> + lookup_result . mode = = VariableMode : : kConst ) ) { <nl> TRACE_HANDLER_STATS ( isolate ( ) , LoadGlobalIC_LoadScriptContextField ) ; <nl> } else { <nl> / / Given combination of indices can ' t be encoded , so use slow stub . <nl> MaybeHandle < Object > StoreGlobalIC : : Store ( Handle < Name > name , <nl> <nl> bool use_ic = ( state ( ) ! = NO_FEEDBACK ) & & FLAG_use_ic ; <nl> if ( use_ic ) { <nl> - if ( nexus ( ) - > ConfigureLexicalVarMode ( lookup_result . context_index , <nl> - lookup_result . slot_index ) ) { <nl> + if ( nexus ( ) - > ConfigureLexicalVarMode ( <nl> + lookup_result . context_index , lookup_result . slot_index , <nl> + lookup_result . mode = = VariableMode : : kConst ) ) { <nl> TRACE_HANDLER_STATS ( isolate ( ) , StoreGlobalIC_StoreScriptContextField ) ; <nl> } else { <nl> / / Given combination of indices can ' t be encoded , so use slow stub . <nl> | Reland ^ 3 " [ turbofan ] Use feedback when reducing global loads / stores . " | v8/v8 | 8683116e64335a99ecc403c509587410db070cd1 | 2019-01-14T10:41:55Z |
new file mode 100644 <nl> index 00000000000 . . 47838e20766 <nl> mmm / dev / null <nl> ppp b / src / arch / conn_streambuf . hpp <nl> <nl> + # ifndef __ARCH_CONN_STREAMBUF_HPP__ <nl> + # define __ARCH_CONN_STREAMBUF_HPP__ <nl> + <nl> + / * This file contains an std : : streambuf implementation that works <nl> + with a tcp_conn_t object . This allows us to create std : : ostream and std : : istream <nl> + objects that interact directly with a tcp_conn_t TCP connection . For example <nl> + boost serialize requires streams for serializing data . <nl> + This implementation does read buffering itself ( where the buffering logic is <nl> + inherited from basic_streambuf ) . This is mostly because linux_tcp_conn_t : : pop <nl> + is badly implemented and basic_streambuf has that logic implemented anyway . <nl> + Eventually we should rather fix buffering in the tcp_conn_t implementation and <nl> + make this synchronous . * / <nl> + <nl> + # include < streambuf > <nl> + # include " arch / arch . hpp " <nl> + <nl> + / * <nl> + Usage example : <nl> + tcp_conn_t conn ( . . . ) ; <nl> + tcp_conn_streambuf_t streambuf ( & conn ) ; <nl> + std : : iostream stream ( & streambuf ) ; <nl> + stream < < " Hi , what ' s your name ? " < < std : : endl ; <nl> + std : : string name ; <nl> + stream > > name ; <nl> + stream < < " Welcome " < < name < < " ! " < < std : : endl ; <nl> + stream < < " How old are you ? " < < std : : endl ; <nl> + int age ; <nl> + stream > > age ; <nl> + stream < < age < < " is a nice age . " < < std : : endl ; <nl> + * / <nl> + <nl> + class tcp_conn_streambuf_t : public std : : basic_streambuf < char , std : : char_traits < char > > { <nl> + public : <nl> + tcp_conn_streambuf_t ( tcp_conn_t * conn ) : conn ( conn ) { <nl> + / / Initialize basic_streambuf , with gets being buffered and puts being unbuffered <nl> + setg ( & get_buf [ 0 ] , & get_buf [ 0 ] , & get_buf [ 0 ] ) ; <nl> + setp ( 0 , 0 ) ; <nl> + } <nl> + <nl> + private : <nl> + tcp_conn_t * conn ; <nl> + <nl> + protected : <nl> + / / Implementation of basic_streambuf methods <nl> + virtual int underflow ( ) { <nl> + if ( gptr ( ) > = egptr ( ) ) { <nl> + / / No data left in buffer , retrieve new data <nl> + try { <nl> + / / Read up to GET_BUF_LENGTH characters into get_buf <nl> + size_t bytes_read = conn - > read_some ( get_buf , GET_BUF_LENGTH ) ; <nl> + rassert ( bytes_read > 0 ) ; <nl> + setg ( & get_buf [ 0 ] , & get_buf [ 0 ] , & get_buf [ bytes_read ] ) ; <nl> + } catch ( tcp_conn_t : : read_closed_exc_t & e ) { <nl> + return std : : char_traits < char > : : eof ( ) ; <nl> + } <nl> + } <nl> + <nl> + int i = std : : char_traits < char > : : to_int_type ( * gptr ( ) ) ; <nl> + return std : : char_traits < char > : : not_eof ( i ) ; <nl> + } <nl> + <nl> + virtual int overflow ( int i = std : : char_traits < char > : : eof ( ) ) { <nl> + if ( ! conn - > is_write_open ( ) ) { <nl> + return std : : char_traits < char > : : eof ( ) ; <nl> + } else if ( i = = std : : char_traits < char > : : eof ( ) ) { <nl> + return std : : char_traits < char > : : not_eof ( i ) ; <nl> + } else { <nl> + char c = static_cast < char > ( i ) ; <nl> + rassert ( static_cast < int > ( c ) = = i ) ; <nl> + try { <nl> + conn - > write_buffered ( & c , 1 ) ; <nl> + } catch ( tcp_conn_t : : write_closed_exc_t & e ) { <nl> + return std : : char_traits < char > : : eof ( ) ; <nl> + } <nl> + return std : : char_traits < char > : : not_eof ( i ) ; <nl> + } <nl> + } <nl> + <nl> + virtual int sync ( ) { <nl> + try { <nl> + conn - > flush_buffer ( ) ; <nl> + return 0 ; <nl> + } catch ( tcp_conn_t : : write_closed_exc_t & e ) { <nl> + return - 1 ; <nl> + } <nl> + } <nl> + <nl> + private : <nl> + / / Read buffer <nl> + static const size_t GET_BUF_LENGTH = 512 ; <nl> + char get_buf [ GET_BUF_LENGTH ] ; <nl> + } ; <nl> + <nl> + # endif <nl> | Provide a std : : streambuf wrapper around tcp_conn_t | rethinkdb/rethinkdb | bddbf26798a6f20d32151b86a87e3cb97d16ea9e | 2011-07-07T23:10:40Z |
mmm a / packignore <nl> ppp b / packignore <nl> vendor \ tmp <nl> appveyor . yml <nl> vendor \ cmder . sh <nl> vendor \ git - prompt . sh <nl> + config \ user - * <nl> | added config / user - * to packignore | cmderdev/cmder | 8601b0f9402457db068d01ff3db6f2149e871570 | 2015-11-24T22:06:39Z |
mmm a / Code / CryEngine / RenderDll / XRenderD3D9 / D3DTexturesStreaming . cpp <nl> ppp b / Code / CryEngine / RenderDll / XRenderD3D9 / D3DTexturesStreaming . cpp <nl> void CTexture : : StreamCopyMipsTexToMem ( int8 nStartMip , int8 nEndMip , bool bToDevi <nl> CryInterlockedAdd ( & CTexture : : s_nTexturesDataBytesUploaded , mh [ nLod ] . m_SideSize ) ; <nl> # endif <nl> / / TODO : batch upload ( instead of loop ) <nl> + const SResourceMemoryAlignment sourceAlignment = <nl> + { <nl> + CTexture : : TextureDataSize ( 1 , 1 , 1 , 1 , 1 , m_eSrcFormat , m_eSrcTileMode ) , <nl> + CTexture : : TextureDataSize ( nMipW , 1 , 1 , 1 , 1 , m_eSrcFormat , m_eSrcTileMode ) , <nl> + CTexture : : TextureDataSize ( nMipW , nMipH , 1 , 1 , 1 , m_eSrcFormat , m_eSrcTileMode ) , <nl> + CTexture : : TextureDataSize ( nMipW , nMipH , 1 , 1 , 1 , m_eSrcFormat , m_eSrcTileMode ) <nl> + } ; <nl> const SResourceMemoryMapping mapping = <nl> { <nl> - pDevTexture - > GetAlignment ( nDevTexMip ) , / / src alignment = = hardware alignment <nl> + sourceAlignment , <nl> { 0 , 0 , 0 , D3D11CalcSubresource ( nDevTexMip , iSide , nTexMips ) } , / / dst position <nl> { static_cast < UINT > ( nMipW ) , static_cast < UINT > ( nMipH ) , 1 , 1 } / / dst size <nl> } ; <nl> mmm a / Engine / Shaders / HWScripts / CryFX / FogVolume . cfx <nl> ppp b / Engine / Shaders / HWScripts / CryFX / FogVolume . cfx <nl> pixout FogVolumeEllipsoidPS ( v2f_ellipsoid IN ) <nl> float tE = t . y ; <nl> <nl> float sceneDepth = linearDepthTex . Load ( int3 ( IN . hPos . xy , 0 ) ) . x * CV_NearFarClipDist . y ; <nl> - float tI = sceneDepth / dot ( cameraLookDirInWS , - CV_CameraFrontVector ) ; <nl> + float tI = sceneDepth / dot ( cameraLookDirInWS , - CV_CameraFrontVector . xyz ) ; <nl> tI = max ( tS , min ( tI , tE ) ) ; / / clamp to range [ tS , tE ] <nl> <nl> float3 front = tS * cameraLookDirInWS + cameraPosInWS ; <nl> | ! B ( Gnm ) Fix texture loading of non - tiled textures . | CRYTEK/CRYENGINE | 8e89502cf0f638ef1160fa11d8e6a927309a4960 | 2019-03-26T12:41:52Z |
deleted file mode 100644 <nl> index e69de29bb2d . . 00000000000 <nl> | removed old snippet css | arangodb/arangodb | 5107cbb694c7e1c2cab7de1ee90c22fce1042a85 | 2014-03-21T14:51:47Z |
mmm a / include / taichi / common / interface . h <nl> ppp b / include / taichi / common / interface . h <nl> class Unit { <nl> <nl> virtual std : : string get_name ( ) const { return " unit " ; } <nl> <nl> - virtual std : : string general_action ( const Config & config ) { NOT_IMPLEMENTED ; } <nl> + virtual std : : string general_action ( const Config & config ) { NOT_IMPLEMENTED ; } <nl> } ; <nl> <nl> # define TC_IMPLEMENTATION_HOLDER_NAME ( T ) ImplementationHolder_ # # T <nl> mmm a / include / taichi / math / array_3d . h <nl> ppp b / include / taichi / math / array_3d . h <nl> class ArrayND < 3 , T > { <nl> T * data ; <nl> int offset ; <nl> <nl> - TC_FORCE_INLINE Accessor2D ( T * data , int offset ) : data ( data ) , offset ( offset ) { } <nl> + TC_FORCE_INLINE Accessor2D ( T * data , int offset ) <nl> + : data ( data ) , offset ( offset ) { } <nl> <nl> TC_FORCE_INLINE T * operator [ ] ( int i ) const { return data + offset * i ; } <nl> } ; <nl> class ArrayND < 3 , T > { <nl> const T * data ; <nl> int offset ; <nl> <nl> - TC_FORCE_INLINE ConstAccessor2D ( const T * data , int offset ) : data ( data ) , offset ( offset ) { } <nl> + TC_FORCE_INLINE ConstAccessor2D ( const T * data , int offset ) <nl> + : data ( data ) , offset ( offset ) { } <nl> <nl> - TC_FORCE_INLINE const T * operator [ ] ( int i ) const { return data + offset * i ; } <nl> + TC_FORCE_INLINE const T * operator [ ] ( int i ) const { <nl> + return data + offset * i ; <nl> + } <nl> } ; <nl> <nl> public : <nl> class ArrayND < 3 , T > { <nl> TC_FORCE_INLINE const Region3D & get_region ( ) const { return region ; } <nl> <nl> TC_FORCE_INLINE ArrayND ( const Vector3i & resolution , <nl> - T init = T ( 0 ) , <nl> - Vector3 storage_offset = Vector3 ( 0 . 5f ) ) { <nl> + T init = T ( 0 ) , <nl> + Vector3 storage_offset = Vector3 ( 0 . 5f ) ) { <nl> initialize ( resolution , init , storage_offset ) ; <nl> } <nl> <nl> class ArrayND < 3 , T > { <nl> <nl> auto end ( ) const { return data . cend ( ) ; } <nl> <nl> - TC_FORCE_INLINE T & operator [ ] ( const Vector3i & pos ) { return ( * this ) [ pos . x ] [ pos . y ] [ pos . z ] ; } <nl> + TC_FORCE_INLINE T & operator [ ] ( const Vector3i & pos ) { <nl> + return ( * this ) [ pos . x ] [ pos . y ] [ pos . z ] ; <nl> + } <nl> <nl> TC_FORCE_INLINE const T & operator [ ] ( const Vector3i & pos ) const { <nl> return ( * this ) [ pos . x ] [ pos . y ] [ pos . z ] ; <nl> mmm a / include / taichi / system / threading . h <nl> ppp b / include / taichi / system / threading . h <nl> class ThreadedTaskManager { <nl> } ; <nl> <nl> class PID { <nl> - public : <nl> + public : <nl> static int get_pid ( ) { return ( int ) getpid ( ) ; } <nl> static int get_parent_pid ( ) { return ( int ) getppid ( ) ; } <nl> } ; <nl> mmm a / python / examples / server / main . py <nl> ppp b / python / examples / server / main . py <nl> <nl> app = tc . get_pakua_server ( ) <nl> <nl> app . run ( ) <nl> - <nl> mmm a / python / taichi / core / load_core . py <nl> ppp b / python / taichi / core / load_core . py <nl> <nl> sys . path . append ( bin_dir ) <nl> shutil . copy ( ' libtaichi_core . so ' , ' taichi_core . so ' ) <nl> try : <nl> - import taichi_core as tc_core <nl> + import taichi_core as tc_core <nl> except Exception as e : <nl> print ( ) <nl> print ( " \ 033 [ 91m * Please make sure you are using python3 " <nl> mmm a / python / taichi / pakua / server . py <nl> ppp b / python / taichi / pakua / server . py <nl> <nl> <nl> app = Flask ( __name__ ) <nl> <nl> + <nl> @ app . route ( ' / ' ) <nl> def browse_outputs ( ) : <nl> output_dir = get_output_directory ( ) <nl> def browse_outputs ( ) : <nl> entries = [ ] <nl> for d in dirs : <nl> entries . append ( { <nl> - ' title ' : d , <nl> - ' text ' : ' ' , <nl> + ' title ' : d , <nl> + ' text ' : ' ' , <nl> } ) <nl> return render_template ( ' browser . html ' , entries = entries ) <nl> <nl> def view ( folder ) : <nl> output_dir = get_output_directory ( ) <nl> return render_template ( ' view . html ' , folder = folder ) <nl> <nl> + <nl> def get_pakua_server ( ) : <nl> - return app <nl> \ No newline at end of file <nl> + return app <nl> mmm a / src / python / export_misc . cpp <nl> ppp b / src / python / export_misc . cpp <nl> void export_misc ( py : : module & m ) { <nl> m . def ( " test_raise_error " , test_raise_error ) ; <nl> m . def ( " test_volumetric_io " , test_volumetric_io ) ; <nl> m . def ( " config_from_dict " , config_from_py_dict ) ; <nl> - / / m . def ( " dict_from_config " , py_dict_from_py_config ) ; <nl> + / / m . def ( " dict_from_config " , py_dict_from_py_config ) ; <nl> m . def ( " print_profile_info " , <nl> [ & ] ( ) { ProfilerRecords : : get_instance ( ) . print ( ) ; } ) ; <nl> } <nl> | Format issues | taichi-dev/taichi | 9dd665819f662af886638bfdbd2ccd3b6b03fe82 | 2017-10-06T18:29:29Z |
mmm a / include / leveldb / options . h <nl> ppp b / include / leveldb / options . h <nl> struct Options { <nl> / / space if the same key space is being repeatedly overwritten . <nl> int max_mem_compaction_level ; <nl> <nl> - / / Target file size for compaction . Target file size for level L is <nl> - / / ( target_file_size_base ) ^ ( target_file_size_multiplier ) . <nl> - / / For example , if target_file_size_base is 20MB and <nl> - / / target_file_size_multiplier is 2 ^ 10 , then target file size on level 1 <nl> - / / will be 200MB , and wiil be 2GB on level 2 . <nl> - <nl> + / / Target file size for compaction . <nl> + / / target_file_size_base is per - file size for level - 1 . <nl> + / / Target file size for level L can be calculated by <nl> + / / target_file_size_base * ( target_file_size_multiplier ^ ( L - 1 ) ) <nl> + / / For example , if target_file_size_base is 2MB and <nl> + / / target_file_size_multiplier is 10 , then each file on level - 1 will <nl> + / / be 2MB , and each file on level 2 will be 20MB , <nl> + / / and each file on level - 3 will be 200MB . <nl> + <nl> + / / by default target_file_size_base is 2MB . <nl> int target_file_size_base ; <nl> + / / by default target_file_size_multiplier is 1 , which means <nl> + / / by default files in different levels will have similar size . <nl> int target_file_size_multiplier ; <nl> <nl> - / / Control maximum number of bytes in all compacted files for one level . <nl> - / / Maximum number of bytes for level L is <nl> - / / ( max_bytes_for_level_base ) ^ ( max_bytes_for_level_multiplier ) . <nl> + / / Control maximum total data size for a level . <nl> + / / max_bytes_for_level_base is the max total for level - 1 . <nl> + / / Maximum number of bytes for level L can be calculated as <nl> + / / ( max_bytes_for_level_base ) * ( max_bytes_for_level_multiplier ^ ( L - 1 ) ) <nl> + / / For example , if max_bytes_for_level_base is 20MB , and if <nl> + / / max_bytes_for_level_multiplier is 10 , total data size for level - 1 <nl> + / / will be 20MB , total file size for level - 2 will be 200MB , <nl> + / / and total file size for level - 3 will be 2GB . <nl> + <nl> <nl> + / / by default ' max_bytes_for_level_base ' is 10MB . <nl> int max_bytes_for_level_base ; <nl> + / / by default ' max_bytes_for_level_base ' is 10 . <nl> int max_bytes_for_level_multiplier ; <nl> <nl> / / Maximum number of bytes in all compacted files . We avoid expanding <nl> | improve comments about target_file_size_base , target_file_size_multiplier , max_bytes_for_level_base , max_bytes_for_level_multiplier Summary : | facebook/rocksdb | 3662c2976a74d140c589066480ee53171882f624 | 2012-09-17T22:56:11Z |
mmm a / bench / bench . cpp <nl> ppp b / bench / bench . cpp <nl> void bench_threaded_logging ( int threads , int iters ) <nl> daily_mt_tracing - > enable_backtrace ( 32 ) ; <nl> bench_mt ( iters , std : : move ( daily_mt_tracing ) , threads ) ; <nl> <nl> - <nl> spdlog : : info ( " " ) ; <nl> auto empty_logger = std : : make_shared < spdlog : : logger > ( " level - off " ) ; <nl> empty_logger - > set_level ( spdlog : : level : : off ) ; <nl> void bench_threaded_logging ( int threads , int iters ) <nl> empty_logger_tracing - > set_level ( spdlog : : level : : off ) ; <nl> empty_logger_tracing - > enable_backtrace ( 32 ) ; <nl> bench ( iters , empty_logger_tracing ) ; <nl> - <nl> } <nl> <nl> void bench_single_threaded ( int iters ) <nl> void bench_single_threaded ( int iters ) <nl> spdlog : : info ( " Single threaded : { : n } messages " , iters ) ; <nl> spdlog : : info ( " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * " ) ; <nl> <nl> - <nl> auto basic_st = spdlog : : basic_logger_st ( " basic_st " , " logs / basic_st . log " , true ) ; <nl> bench ( iters , std : : move ( basic_st ) ) ; <nl> <nl> new file mode 100644 <nl> index 00000000 . . 910a7192 <nl> mmm / dev / null <nl> ppp b / include / spdlog / details / backtracer - inl . h <nl> <nl> + / / Copyright ( c ) 2015 - present , Gabi Melman & spdlog contributors . <nl> + / / Distributed under the MIT License ( http : / / opensource . org / licenses / MIT ) <nl> + <nl> + # pragma once <nl> + <nl> + # ifndef SPDLOG_HEADER_ONLY <nl> + # include " spdlog / details / backtracer . h " <nl> + # endif <nl> + namespace spdlog { <nl> + namespace details { <nl> + SPDLOG_INLINE backtracer : : backtracer ( const backtracer & other ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( other . mutex_ ) ; <nl> + enabled_ = other . enabled ( ) ; <nl> + messages_ = other . messages_ ; <nl> + } <nl> + <nl> + SPDLOG_INLINE backtracer : : backtracer ( backtracer & & other ) SPDLOG_NOEXCEPT <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( other . mutex_ ) ; <nl> + enabled_ = other . enabled ( ) ; <nl> + messages_ = std : : move ( other . messages_ ) ; <nl> + } <nl> + <nl> + SPDLOG_INLINE backtracer & backtracer : : operator = ( backtracer other ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock ( mutex_ ) ; <nl> + enabled_ = other . enabled ( ) ; <nl> + messages_ = other . messages_ ; <nl> + return * this ; <nl> + } <nl> + <nl> + SPDLOG_INLINE void backtracer : : enable ( size_t size ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> + enabled_ . store ( true , std : : memory_order_relaxed ) ; <nl> + messages_ = circular_q < log_msg_buffer > { size } ; <nl> + } <nl> + <nl> + SPDLOG_INLINE void backtracer : : disable ( ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> + enabled_ . store ( false , std : : memory_order_relaxed ) ; <nl> + } <nl> + <nl> + SPDLOG_INLINE bool backtracer : : enabled ( ) const <nl> + { <nl> + return enabled_ . load ( std : : memory_order_relaxed ) ; <nl> + } <nl> + <nl> + SPDLOG_INLINE backtracer : : operator bool ( ) const <nl> + { <nl> + return enabled ( ) ; <nl> + } <nl> + <nl> + SPDLOG_INLINE void backtracer : : push_back ( const log_msg & msg ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> + messages_ . push_back ( log_msg_buffer { msg } ) ; <nl> + } <nl> + <nl> + / / pop all items in the q and apply the given fun on each of them . <nl> + SPDLOG_INLINE void backtracer : : foreach_pop ( std : : function < void ( const details : : log_msg & ) > fun ) <nl> + { <nl> + std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> + while ( ! messages_ . empty ( ) ) <nl> + { <nl> + log_msg_buffer popped ; <nl> + messages_ . pop_front ( popped ) ; <nl> + fun ( popped ) ; <nl> + } <nl> + } <nl> + } / / namespace details <nl> + } / / namespace spdlog <nl> mmm a / include / spdlog / details / backtracer . h <nl> ppp b / include / spdlog / details / backtracer . h <nl> <nl> / / Useful for storing debug data in case of error / warning happens . <nl> <nl> namespace spdlog { <nl> - namespace details { <nl> - class backtracer <nl> - { <nl> - mutable std : : mutex mutex_ ; <nl> - std : : atomic < bool > enabled_ { false } ; <nl> - circular_q < log_msg_buffer > messages_ ; <nl> + namespace details { <nl> + class backtracer <nl> + { <nl> + mutable std : : mutex mutex_ ; <nl> + std : : atomic < bool > enabled_ { false } ; <nl> + circular_q < log_msg_buffer > messages_ ; <nl> <nl> - public : <nl> - backtracer ( ) = default ; <nl> - backtracer ( const backtracer & other ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( other . mutex_ ) ; <nl> - enabled_ = other . enabled ( ) ; <nl> - messages_ = other . messages_ ; <nl> - } <nl> + public : <nl> + backtracer ( ) = default ; <nl> + backtracer ( const backtracer & other ) ; <nl> <nl> - backtracer ( backtracer & & other ) SPDLOG_NOEXCEPT <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( other . mutex_ ) ; <nl> - enabled_ = other . enabled ( ) ; <nl> - messages_ = std : : move ( other . messages_ ) ; <nl> - } <nl> <nl> - backtracer & operator = ( backtracer other ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock ( mutex_ ) ; <nl> - enabled_ = other . enabled ( ) ; <nl> - messages_ = other . messages_ ; <nl> - return * this ; <nl> - } <nl> + backtracer ( backtracer & & other ) SPDLOG_NOEXCEPT ; <nl> + backtracer & operator = ( backtracer other ) ; <nl> + void enable ( size_t size ) ; <nl> + void disable ( ) ; <nl> + bool enabled ( ) const ; <nl> + explicit operator bool ( ) const ; <nl> + void push_back ( const log_msg & msg ) ; <nl> <nl> - void enable ( size_t size ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> - enabled_ . store ( true , std : : memory_order_relaxed ) ; <nl> - messages_ = circular_q < log_msg_buffer > { size } ; <nl> - } <nl> + / / pop all items in the q and apply the given fun on each of them . <nl> + void foreach_pop ( std : : function < void ( const details : : log_msg & ) > fun ) ; <nl> <nl> + } ; <nl> <nl> - void disable ( ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> - enabled_ . store ( false , std : : memory_order_relaxed ) ; <nl> - } <nl> + } / / namespace details <nl> + } / / namespace spdlog <nl> <nl> - <nl> - bool enabled ( ) const <nl> - { <nl> - return enabled_ . load ( std : : memory_order_relaxed ) ; <nl> - } <nl> - <nl> - explicit operator bool ( ) const <nl> - { <nl> - return enabled ( ) ; <nl> - } <nl> - <nl> - void push_back ( const log_msg & msg ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> - messages_ . push_back ( log_msg_buffer { msg } ) ; <nl> - } <nl> - <nl> - / / pop all items in the q and apply the given fun on each of them . <nl> - void foreach_pop ( std : : function < void ( const details : : log_msg & ) > fun ) <nl> - { <nl> - std : : lock_guard < std : : mutex > lock { mutex_ } ; <nl> - while ( ! messages_ . empty ( ) ) <nl> - { <nl> - log_msg_buffer popped ; <nl> - messages_ . pop_front ( popped ) ; <nl> - fun ( popped ) ; <nl> - } <nl> - } <nl> - } ; <nl> - <nl> - } / / namespace details <nl> - } / / namespace spdlog <nl> \ No newline at end of file <nl> + # ifdef SPDLOG_HEADER_ONLY <nl> + # include " backtracer - inl . h " <nl> + # endif <nl> \ No newline at end of file <nl> mmm a / include / spdlog / details / circular_q . h <nl> ppp b / include / spdlog / details / circular_q . h <nl> <nl> # include < vector > <nl> <nl> namespace spdlog { <nl> - namespace details { <nl> - template < typename T > <nl> - class circular_q <nl> + namespace details { <nl> + template < typename T > <nl> + class circular_q <nl> + { <nl> + size_t max_items_ = 0 ; <nl> + typename std : : vector < T > : : size_type head_ = 0 ; <nl> + typename std : : vector < T > : : size_type tail_ = 0 ; <nl> + size_t overrun_counter_ = 0 ; <nl> + std : : vector < T > v_ ; <nl> + <nl> + public : <nl> + using item_type = T ; <nl> + <nl> + / / empty cir <nl> + circular_q ( ) = default ; <nl> + <nl> + explicit circular_q ( size_t max_items ) <nl> + : max_items_ ( max_items + 1 ) / / one item is reserved as marker for full q <nl> + , v_ ( max_items_ ) <nl> + { } <nl> + <nl> + circular_q ( const circular_q & ) = default ; <nl> + circular_q & operator = ( const circular_q & ) = default ; <nl> + <nl> + / / move cannot be default , <nl> + / / since we need to reset head_ , tail_ , etc to zero in the moved object <nl> + circular_q ( circular_q & & other ) SPDLOG_NOEXCEPT <nl> + { <nl> + copy_moveable ( std : : move ( other ) ) ; <nl> + } <nl> + <nl> + circular_q & operator = ( circular_q & & other ) SPDLOG_NOEXCEPT <nl> + { <nl> + copy_moveable ( std : : move ( other ) ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + / / push back , overrun ( oldest ) item if no room left <nl> + void push_back ( T & & item ) <nl> + { <nl> + if ( max_items_ > 0 ) <nl> { <nl> - size_t max_items_ = 0 ; <nl> - typename std : : vector < T > : : size_type head_ = 0 ; <nl> - typename std : : vector < T > : : size_type tail_ = 0 ; <nl> - size_t overrun_counter_ = 0 ; <nl> - std : : vector < T > v_ ; <nl> + v_ [ tail_ ] = std : : move ( item ) ; <nl> + tail_ = ( tail_ + 1 ) % max_items_ ; <nl> <nl> - public : <nl> - using item_type = T ; <nl> - <nl> - / / empty cir <nl> - circular_q ( ) = default ; <nl> - <nl> - explicit circular_q ( size_t max_items ) <nl> - : max_items_ ( max_items + 1 ) / / one item is reserved as marker for full q <nl> - , v_ ( max_items_ ) <nl> - { } <nl> - <nl> - <nl> - <nl> - circular_q ( const circular_q & ) = default ; <nl> - circular_q & operator = ( const circular_q & ) = default ; <nl> - <nl> - / / move cannot be default , <nl> - / / since we need to reset head_ , tail_ , etc to zero in the moved object <nl> - circular_q ( circular_q & & other ) SPDLOG_NOEXCEPT <nl> - { <nl> - copy_moveable ( std : : move ( other ) ) ; <nl> - } <nl> - <nl> - circular_q & operator = ( circular_q & & other ) SPDLOG_NOEXCEPT <nl> - { <nl> - copy_moveable ( std : : move ( other ) ) ; <nl> - return * this ; <nl> - } <nl> - <nl> - <nl> - / / push back , overrun ( oldest ) item if no room left <nl> - void push_back ( T & & item ) <nl> - { <nl> - if ( max_items_ > 0 ) <nl> - { <nl> - v_ [ tail_ ] = std : : move ( item ) ; <nl> - tail_ = ( tail_ + 1 ) % max_items_ ; <nl> - <nl> - if ( tail_ = = head_ ) / / overrun last item if full <nl> - { <nl> - head_ = ( head_ + 1 ) % max_items_ ; <nl> - + + overrun_counter_ ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - / / Pop item from front . <nl> - / / If there are no elements in the container , the behavior is undefined . <nl> - void pop_front ( T & popped_item ) <nl> + if ( tail_ = = head_ ) / / overrun last item if full <nl> { <nl> - if ( max_items_ > 0 ) <nl> - { <nl> - popped_item = std : : move ( v_ [ head_ ] ) ; <nl> - head_ = ( head_ + 1 ) % max_items_ ; <nl> - } <nl> + head_ = ( head_ + 1 ) % max_items_ ; <nl> + + + overrun_counter_ ; <nl> } <nl> - <nl> - bool empty ( ) <nl> - { <nl> - return tail_ = = head_ ; <nl> - } <nl> - <nl> - bool full ( ) <nl> - { <nl> - / / head is ahead of the tail by 1 <nl> - return ( ( tail_ + 1 ) % max_items_ ) = = head_ ; <nl> - } <nl> - <nl> - size_t overrun_counter ( ) const <nl> - { <nl> - return overrun_counter_ ; <nl> - } <nl> - <nl> - private : <nl> - void copy_moveable ( circular_q & & other ) SPDLOG_NOEXCEPT <nl> - { <nl> - max_items_ = other . max_items_ ; <nl> - head_ = other . head_ ; <nl> - tail_ = other . tail_ ; <nl> - overrun_counter_ = other . overrun_counter_ , <nl> - v_ = std : : move ( other . v_ ) ; <nl> - other . max_items_ = 0 ; / / disable other <nl> - } <nl> - <nl> - } ; <nl> - } / / namespace details <nl> + } <nl> + } <nl> + <nl> + / / Pop item from front . <nl> + / / If there are no elements in the container , the behavior is undefined . <nl> + void pop_front ( T & popped_item ) <nl> + { <nl> + if ( max_items_ > 0 ) <nl> + { <nl> + popped_item = std : : move ( v_ [ head_ ] ) ; <nl> + head_ = ( head_ + 1 ) % max_items_ ; <nl> + } <nl> + } <nl> + <nl> + bool empty ( ) <nl> + { <nl> + return tail_ = = head_ ; <nl> + } <nl> + <nl> + bool full ( ) <nl> + { <nl> + / / head is ahead of the tail by 1 <nl> + return ( ( tail_ + 1 ) % max_items_ ) = = head_ ; <nl> + } <nl> + <nl> + size_t overrun_counter ( ) const <nl> + { <nl> + return overrun_counter_ ; <nl> + } <nl> + <nl> + private : <nl> + void copy_moveable ( circular_q & & other ) SPDLOG_NOEXCEPT <nl> + { <nl> + max_items_ = other . max_items_ ; <nl> + head_ = other . head_ ; <nl> + tail_ = other . tail_ ; <nl> + overrun_counter_ = other . overrun_counter_ , v_ = std : : move ( other . v_ ) ; <nl> + other . max_items_ = 0 ; / / disable other <nl> + } <nl> + } ; <nl> + } / / namespace details <nl> } / / namespace spdlog <nl> mmm a / include / spdlog / logger - inl . h <nl> ppp b / include / spdlog / logger - inl . h <nl> SPDLOG_INLINE logger : : logger ( const logger & other ) <nl> , flush_level_ ( other . flush_level_ . load ( std : : memory_order_relaxed ) ) <nl> , custom_err_handler_ ( other . custom_err_handler_ ) <nl> , tracer_ ( other . tracer_ ) <nl> - { <nl> - } <nl> + { } <nl> <nl> SPDLOG_INLINE logger : : logger ( logger & & other ) SPDLOG_NOEXCEPT : name_ ( std : : move ( other . name_ ) ) , <nl> sinks_ ( std : : move ( other . sinks_ ) ) , <nl> SPDLOG_INLINE void logger : : swap ( spdlog : : logger & other ) SPDLOG_NOEXCEPT <nl> other . flush_level_ . store ( tmp ) ; <nl> <nl> custom_err_handler_ . swap ( other . custom_err_handler_ ) ; <nl> - std : : swap ( tracer_ , other . tracer_ ) ; <nl> + std : : swap ( tracer_ , other . tracer_ ) ; <nl> } <nl> <nl> SPDLOG_INLINE void swap ( logger & a , logger & b ) <nl> mmm a / include / spdlog / sinks / daily_file_sink . h <nl> ppp b / include / spdlog / sinks / daily_file_sink . h <nl> struct daily_filename_calculator <nl> { <nl> filename_t basename , ext ; <nl> std : : tie ( basename , ext ) = details : : file_helper : : split_by_extension ( filename ) ; <nl> - return fmt : : format ( SPDLOG_FILENAME_T ( " { } _ { : 04d } - { : 02d } - { : 02d } { } " ) , <nl> - basename , now_tm . tm_year + 1900 , now_tm . tm_mon + 1 , now_tm . tm_mday , ext ) ; <nl> + return fmt : : format ( <nl> + SPDLOG_FILENAME_T ( " { } _ { : 04d } - { : 02d } - { : 02d } { } " ) , basename , now_tm . tm_year + 1900 , now_tm . tm_mon + 1 , now_tm . tm_mday , ext ) ; <nl> } <nl> } ; <nl> <nl> mmm a / include / spdlog / sinks / rotating_file_sink - inl . h <nl> ppp b / include / spdlog / sinks / rotating_file_sink - inl . h <nl> SPDLOG_INLINE rotating_file_sink < Mutex > : : rotating_file_sink ( <nl> template < typename Mutex > <nl> SPDLOG_INLINE filename_t rotating_file_sink < Mutex > : : calc_filename ( const filename_t & filename , std : : size_t index ) <nl> { <nl> - if ( index = = 0u ) <nl> - { <nl> - return filename ; <nl> - } <nl> + if ( index = = 0u ) <nl> + { <nl> + return filename ; <nl> + } <nl> <nl> - filename_t basename , ext ; <nl> + filename_t basename , ext ; <nl> std : : tie ( basename , ext ) = details : : file_helper : : split_by_extension ( filename ) ; <nl> return fmt : : format ( SPDLOG_FILENAME_T ( " { } . { } { } " ) , basename , index , ext ) ; <nl> } <nl> mmm a / src / spdlog . cpp <nl> ppp b / src / spdlog . cpp <nl> <nl> <nl> # include " spdlog / spdlog - inl . h " <nl> # include " spdlog / common - inl . h " <nl> - <nl> + # include " spdlog / details / backtracer - inl . h " <nl> # include " spdlog / logger - inl . h " <nl> template spdlog : : logger : : logger ( std : : string name , sinks_init_list : : iterator begin , sinks_init_list : : iterator end ) ; <nl> <nl> mmm a / tests / test_misc . cpp <nl> ppp b / tests / test_misc . cpp <nl> <nl> # include " test_sink . h " <nl> # include " spdlog / fmt / bin_to_hex . h " <nl> <nl> - <nl> template < class T > <nl> std : : string log_info ( const T & what , spdlog : : level : : level_enum logger_level = spdlog : : level : : info ) <nl> { <nl> TEST_CASE ( " clone - logger " , " [ clone ] " ) <nl> logger - > info ( " Some message 1 " ) ; <nl> cloned - > info ( " Some message 2 " ) ; <nl> <nl> - REQUIRE ( test_sink - > lines ( ) . size ( ) = = 2 ) ; <nl> + REQUIRE ( test_sink - > lines ( ) . size ( ) = = 2 ) ; <nl> REQUIRE ( test_sink - > lines ( ) [ 0 ] = = " Some message 1 " ) ; <nl> REQUIRE ( test_sink - > lines ( ) [ 1 ] = = " Some message 2 " ) ; <nl> <nl> TEST_CASE ( " clone async " , " [ clone ] " ) <nl> using namespace spdlog ; <nl> <nl> spdlog : : init_thread_pool ( 4 , 1 ) ; <nl> - auto test_sink = std : : make_shared < sinks : : test_sink_st > ( ) ; <nl> + auto test_sink = std : : make_shared < sinks : : test_sink_st > ( ) ; <nl> auto logger = std : : make_shared < spdlog : : async_logger > ( " orig " , test_sink , spdlog : : thread_pool ( ) ) ; <nl> logger - > set_pattern ( " % v " ) ; <nl> auto cloned = logger - > clone ( " clone " ) ; <nl> TEST_CASE ( " clone async " , " [ clone ] " ) <nl> <nl> spdlog : : details : : os : : sleep_for_millis ( 10 ) ; <nl> <nl> - REQUIRE ( test_sink - > lines ( ) . size ( ) = = 2 ) ; <nl> + REQUIRE ( test_sink - > lines ( ) . size ( ) = = 2 ) ; <nl> REQUIRE ( test_sink - > lines ( ) [ 0 ] = = " Some message 1 " ) ; <nl> REQUIRE ( test_sink - > lines ( ) [ 1 ] = = " Some message 2 " ) ; <nl> <nl> | wip backtracer | gabime/spdlog | 5c2855e1c1fbbf4f338f4d08427507336d5a48f5 | 2019-09-04T22:25:00Z |
mmm a / dbms / src / Interpreters / DDLWorker . cpp <nl> ppp b / dbms / src / Interpreters / DDLWorker . cpp <nl> void DDLWorker : : processTasks ( ) <nl> } <nl> catch ( . . . ) <nl> { <nl> - auto status = ExecutionStatus : : fromCurrentException ( ) ; <nl> - / / / We even cannot parse host name and can ' t properly submit execution status . <nl> + / / / We even cannot parse host name and therefore cannot properly submit execution status . <nl> / / / What should we do ? <nl> + / / / We can try to create fail node using FQDN if it equal to host name in cluster config attempt will be sucessfull . <nl> + / / / Otherwise , that node will be ignored by DDLQueryStatusInputSream . <nl> + <nl> + tryLogCurrentException ( log , " Cannot parse DDL task " + node_data + " , will try to send error status " ) ; <nl> + <nl> + ExecutionStatus status = ExecutionStatus : : fromCurrentException ( ) ; <nl> + String host_id = task . host_id_in_cluster . empty ( ) ? host_fqdn_id : task . host_id_in_cluster ; <nl> + <nl> + createStatusDirs ( node_path ) ; <nl> + zookeeper - > create ( node_path + " / finished / " + host_id , current_node_execution_status . serializeText ( ) , zkutil : : CreateMode : : Persistent ) ; <nl> + <nl> + last_processed_node_name = node_name ; <nl> + continue ; <nl> } <nl> <nl> const auto & hosts = task . entry . hosts ; <nl> class DDLQueryStatusInputSream : public IProfilingBlockInputStream <nl> { <nl> public : <nl> <nl> - DDLQueryStatusInputSream ( const String & zk_node_path , Context & context , size_t num_hosts ) <nl> + DDLQueryStatusInputSream ( const String & zk_node_path , const DDLLogEntry & entry , Context & context ) <nl> : node_path ( zk_node_path ) , context ( context ) , watch ( CLOCK_MONOTONIC_COARSE ) <nl> { <nl> sample = Block { <nl> { std : : make_shared < DataTypeString > ( ) , " host " } , <nl> + { std : : make_shared < DataTypeUInt16 > ( ) , " port " } , <nl> { std : : make_shared < DataTypeUInt64 > ( ) , " status " } , <nl> { std : : make_shared < DataTypeString > ( ) , " error " } , <nl> { std : : make_shared < DataTypeUInt64 > ( ) , " num_hosts_remaining " } , <nl> { std : : make_shared < DataTypeUInt64 > ( ) , " num_hosts_active " } , <nl> } ; <nl> <nl> - setTotalRowsApprox ( num_hosts ) ; <nl> + waiting_hosts . insert ( entry . hosts . cbegin ( ) , entry . hosts . cend ( ) ) ; <nl> + setTotalRowsApprox ( entry . hosts . size ( ) ) ; <nl> } <nl> <nl> String getName ( ) const override <nl> class DDLQueryStatusInputSream : public IProfilingBlockInputStream <nl> ErrorCodes : : UNFINISHED ) ; <nl> } <nl> <nl> - Strings new_hosts = getNewAndUpdate ( finished_hosts_set , getChildrenAllowNoNode ( zookeeper , node_path + " / finished " ) ) ; <nl> + Strings new_hosts = getNewAndUpdate ( getChildrenAllowNoNode ( zookeeper , node_path + " / finished " ) ) ; <nl> + + try_number ; <nl> if ( new_hosts . empty ( ) ) <nl> continue ; <nl> class DDLQueryStatusInputSream : public IProfilingBlockInputStream <nl> Cluster : : Address : : fromString ( host_id , host , port ) ; <nl> <nl> res . getByName ( " host " ) . column - > insert ( host ) ; <nl> + res . getByName ( " port " ) . column - > insert ( port ) ; <nl> res . getByName ( " status " ) . column - > insert ( static_cast < UInt64 > ( status . code ) ) ; <nl> res . getByName ( " error " ) . column - > insert ( status . message ) ; <nl> res . getByName ( " num_hosts_remaining " ) . column - > insert ( total_rows_approx - ( + + num_hosts_finished ) ) ; <nl> class DDLQueryStatusInputSream : public IProfilingBlockInputStream <nl> return res ; <nl> } <nl> <nl> - static Strings getNewAndUpdate ( NameSet & prev , const Strings & cur_list ) <nl> + Strings getNewAndUpdate ( const Strings & current_list_of_finished_hosts ) <nl> { <nl> Strings diff ; <nl> - for ( const String & elem : cur_list ) <nl> + for ( const String & host : current_list_of_finished_hosts ) <nl> { <nl> - if ( ! prev . count ( elem ) ) <nl> + if ( ! waiting_hosts . count ( host ) ) <nl> + { <nl> + if ( ! ignoring_hosts . count ( host ) ) <nl> + { <nl> + ignoring_hosts . emplace ( host ) ; <nl> + LOG_INFO ( log , " Unexpected host " < < host < < " appeared " < < " in task " < < node_path ) ; <nl> + } <nl> + continue ; <nl> + } <nl> + <nl> + if ( ! finished_hosts . count ( host ) ) <nl> { <nl> - diff . emplace_back ( elem ) ; <nl> - prev . emplace ( elem ) ; <nl> + diff . emplace_back ( host ) ; <nl> + finished_hosts . emplace ( host ) ; <nl> } <nl> } <nl> <nl> class DDLQueryStatusInputSream : public IProfilingBlockInputStream <nl> String node_path ; <nl> Context & context ; <nl> <nl> - Stopwatch watch ; <nl> - <nl> - NameSet finished_hosts_set ; <nl> + NameSet waiting_hosts ; / / / hosts from task host list <nl> + NameSet finished_hosts ; / / / finished hosts from host list <nl> + NameSet ignoring_hosts ; / / / appeared hosts that are not in hosts list <nl> size_t num_hosts_finished = 0 ; <nl> + <nl> + Stopwatch watch ; <nl> } ; <nl> <nl> <nl> BlockIO executeDDLQueryOnCluster ( const ASTPtr & query_ptr , Context & context ) <nl> if ( node_path . empty ( ) ) <nl> return io ; <nl> <nl> - auto stream = std : : make_shared < DDLQueryStatusInputSream > ( node_path , context , entry . hosts . size ( ) ) ; <nl> + auto stream = std : : make_shared < DDLQueryStatusInputSream > ( node_path , entry , context ) ; <nl> io . in_sample = stream - > sample . cloneEmpty ( ) ; <nl> io . in = std : : move ( stream ) ; <nl> return io ; <nl> | Add parse error handling . [ # CLICKHOUSE - 3128 ] | ClickHouse/ClickHouse | 7e8f3a0561535ece2c7f5e179bc523395914d1db | 2017-08-13T09:18:46Z |
mmm a / hphp / runtime / ext / ext_collection . cpp <nl> ppp b / hphp / runtime / ext / ext_collection . cpp <nl> Object c_Vector : : t_put ( CVarRef key , CVarRef value ) { <nl> return this ; <nl> } <nl> <nl> - Variant c_Vector : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> + Object c_Vector : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> if ( ! arr . isArray ( ) ) { <nl> Object e ( SystemLib : : AllocInvalidArgumentExceptionObject ( <nl> " Parameter arr must be an array " ) ) ; <nl> Variant c_Vector : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> return ret ; <nl> } <nl> <nl> - Variant c_Vector : : ti_fromvector ( const char * cls , CVarRef vec ) { <nl> + Object c_Vector : : ti_fromvector ( const char * cls , CVarRef vec ) { <nl> if ( ! vec . isObject ( ) ) { <nl> Object e ( SystemLib : : AllocInvalidArgumentExceptionObject ( <nl> " vec must be an instance of Vector " ) ) ; <nl> Object c_Map : : t_getiterator ( ) { <nl> return it ; <nl> } <nl> <nl> - Variant c_Map : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> + Object c_Map : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> if ( ! arr . isArray ( ) ) { <nl> Object e ( SystemLib : : AllocInvalidArgumentExceptionObject ( <nl> " Parameter arr must be an array " ) ) ; <nl> Variant c_Map : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> return ret ; <nl> } <nl> <nl> - Variant c_Map : : ti_fromiterable ( const char * cls , CVarRef it ) { <nl> + Object c_Map : : ti_fromiterable ( const char * cls , CVarRef it ) { <nl> if ( ! it . isObject ( ) ) { <nl> Object e ( SystemLib : : AllocInvalidArgumentExceptionObject ( <nl> " Parameter it must be an instance of Iterable " ) ) ; <nl> Object c_StableMap : : t_getiterator ( ) { <nl> return it ; <nl> } <nl> <nl> - Variant c_StableMap : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> + Object c_StableMap : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> if ( ! arr . isArray ( ) ) { <nl> Object e ( SystemLib : : AllocInvalidArgumentExceptionObject ( <nl> " Parameter arr must be an array " ) ) ; <nl> Variant c_StableMap : : ti_fromarray ( const char * cls , CVarRef arr ) { <nl> return ret ; <nl> } <nl> <nl> - Variant c_StableMap : : ti_fromiterable ( const char * cls , CVarRef it ) { <nl> + Object c_StableMap : : ti_fromiterable ( const char * cls , CVarRef it ) { <nl> if ( ! it . isObject ( ) ) { <nl> Object e ( SystemLib : : AllocInvalidArgumentExceptionObject ( <nl> " Parameter it must be an instance of Iterable " ) ) ; <nl> mmm a / hphp / runtime / ext / ext_collection . h <nl> ppp b / hphp / runtime / ext / ext_collection . h <nl> class c_Vector : public ExtObjectDataFlags < ObjectData : : VectorAttrInit | <nl> public : Variant t___set ( Variant name , Variant value ) ; <nl> public : bool t___isset ( Variant name ) ; <nl> public : Variant t___unset ( Variant name ) ; <nl> - public : static Variant ti_fromarray ( const char * cls , CVarRef arr ) ; <nl> - public : static Variant t_fromarray ( CVarRef arr ) { <nl> + public : static Object ti_fromarray ( const char * cls , CVarRef arr ) ; <nl> + public : static Object t_fromarray ( CVarRef arr ) { <nl> return ti_fromarray ( " vector " , arr ) ; <nl> } <nl> - public : static Variant ti_fromvector ( const char * cls , CVarRef vec ) ; <nl> - public : static Variant t_fromvector ( CVarRef vec ) { <nl> + public : static Object ti_fromvector ( const char * cls , CVarRef vec ) ; <nl> + public : static Object t_fromvector ( CVarRef vec ) { <nl> return ti_fromvector ( " vector " , vec ) ; <nl> } <nl> public : static Variant ti_slice ( const char * cls , CVarRef vec , CVarRef offset , <nl> class c_Map : public ExtObjectDataFlags < ObjectData : : MapAttrInit | <nl> public : Variant t___set ( Variant name , Variant value ) ; <nl> public : bool t___isset ( Variant name ) ; <nl> public : Variant t___unset ( Variant name ) ; <nl> - public : static Variant ti_fromarray ( const char * cls , CVarRef arr ) ; <nl> - public : static Variant t_fromarray ( CVarRef arr ) { <nl> + public : static Object ti_fromarray ( const char * cls , CVarRef arr ) ; <nl> + public : static Object t_fromarray ( CVarRef arr ) { <nl> return ti_fromarray ( " map " , arr ) ; <nl> } <nl> - public : static Variant ti_fromiterable ( const char * cls , CVarRef vec ) ; <nl> - public : static Variant t_fromiterable ( CVarRef vec ) { <nl> + public : static Object ti_fromiterable ( const char * cls , CVarRef vec ) ; <nl> + public : static Object t_fromiterable ( CVarRef vec ) { <nl> return ti_fromiterable ( " map " , vec ) ; <nl> } <nl> <nl> class c_StableMap : public ExtObjectDataFlags < ObjectData : : StableMapAttrInit | <nl> public : Variant t___set ( Variant name , Variant value ) ; <nl> public : bool t___isset ( Variant name ) ; <nl> public : Variant t___unset ( Variant name ) ; <nl> - public : static Variant ti_fromarray ( const char * cls , CVarRef arr ) ; <nl> - public : static Variant t_fromarray ( CVarRef arr ) { <nl> + public : static Object ti_fromarray ( const char * cls , CVarRef arr ) ; <nl> + public : static Object t_fromarray ( CVarRef arr ) { <nl> return ti_fromarray ( " map " , arr ) ; <nl> } <nl> - public : static Variant ti_fromiterable ( const char * cls , CVarRef vec ) ; <nl> - public : static Variant t_fromiterable ( CVarRef vec ) { <nl> + public : static Object ti_fromiterable ( const char * cls , CVarRef vec ) ; <nl> + public : static Object t_fromiterable ( CVarRef vec ) { <nl> return ti_fromiterable ( " map " , vec ) ; <nl> } <nl> <nl> | Fix return type in collection fromArray / Vector / Iterable ( ) methods | facebook/hhvm | 65119c99a07ecb743ebecd6f18bc80576abfc3c2 | 2013-03-09T01:52:30Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( BUILD_TESTS ) <nl> XlogHeader1 . h <nl> XlogHeader2 . h <nl> SOURCES <nl> + AsyncFileWriterTest . cpp <nl> ImmediateFileWriterTest . cpp <nl> LogCategoryTest . cpp <nl> LoggerDBTest . cpp <nl> mmm a / folly / Makefile . am <nl> ppp b / folly / Makefile . am <nl> nobase_follyinclude_HEADERS = \ <nl> experimental / JemallocNodumpAllocator . h \ <nl> experimental / JSONSchema . h \ <nl> experimental / LockFreeRingBuffer . h \ <nl> + experimental / logging / AsyncFileWriter . h \ <nl> experimental / logging / ImmediateFileWriter . h \ <nl> experimental / logging / LogCategory . h \ <nl> experimental / logging / LogFormatter . h \ <nl> new file mode 100644 <nl> index 00000000000 . . 3aab81ad06e <nl> mmm / dev / null <nl> ppp b / folly / experimental / logging / AsyncFileWriter . cpp <nl> <nl> + / * <nl> + * Copyright 2004 - present Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + # include < folly / experimental / logging / AsyncFileWriter . h > <nl> + <nl> + # include < folly / Exception . h > <nl> + # include < folly / FileUtil . h > <nl> + # include < folly / experimental / logging / LoggerDB . h > <nl> + <nl> + using folly : : File ; <nl> + using folly : : StringPiece ; <nl> + <nl> + namespace folly { <nl> + <nl> + AsyncFileWriter : : AsyncFileWriter ( StringPiece path ) <nl> + : AsyncFileWriter { File { path . str ( ) , O_WRONLY | O_APPEND | O_CREAT } } { } <nl> + <nl> + AsyncFileWriter : : AsyncFileWriter ( folly : : File & & file ) <nl> + : file_ { std : : move ( file ) } , ioThread_ ( [ this ] { ioThread ( ) ; } ) { } <nl> + <nl> + AsyncFileWriter : : ~ AsyncFileWriter ( ) { <nl> + data_ - > stop = true ; <nl> + messageReady_ . notify_one ( ) ; <nl> + ioThread_ . join ( ) ; <nl> + } <nl> + <nl> + void AsyncFileWriter : : writeMessage ( StringPiece buffer ) { <nl> + return writeMessage ( buffer . str ( ) ) ; <nl> + } <nl> + <nl> + void AsyncFileWriter : : writeMessage ( std : : string & & buffer ) { <nl> + auto data = data_ . lock ( ) ; <nl> + if ( data - > currentBufferSize > = data - > maxBufferBytes ) { <nl> + + + data - > numDiscarded ; <nl> + return ; <nl> + } <nl> + <nl> + data - > currentBufferSize + = buffer . size ( ) ; <nl> + auto * queue = data - > getCurrentQueue ( ) ; <nl> + queue - > emplace_back ( std : : move ( buffer ) ) ; <nl> + messageReady_ . notify_one ( ) ; <nl> + } <nl> + <nl> + void AsyncFileWriter : : flush ( ) { <nl> + auto data = data_ . lock ( ) ; <nl> + auto start = data - > ioThreadCounter ; <nl> + <nl> + / / Wait until ioThreadCounter increments by at least two . <nl> + / / Waiting for a single increment is not sufficient , as this happens after <nl> + / / the I / O thread has swapped the queues , which is before it has actually <nl> + / / done the I / O . <nl> + while ( data - > ioThreadCounter < start + 2 ) { <nl> + if ( data - > ioThreadDone ) { <nl> + return ; <nl> + } <nl> + <nl> + / / Enqueue an empty string and wake the I / O thread . <nl> + / / The empty string ensures that the I / O thread will break out of its wait <nl> + / / loop and increment the ioThreadCounter , even if there is no other work <nl> + / / to do . <nl> + data - > getCurrentQueue ( ) - > emplace_back ( ) ; <nl> + messageReady_ . notify_one ( ) ; <nl> + <nl> + / / Wait for notification from the I / O thread that it has done work . <nl> + ioCV_ . wait ( data . getUniqueLock ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + void AsyncFileWriter : : ioThread ( ) { <nl> + while ( true ) { <nl> + / / With the lock held , grab a pointer to the current queue , then increment <nl> + / / the ioThreadCounter index so that other threads will write into the <nl> + / / other queue as we process this one . <nl> + std : : vector < std : : string > * ioQueue ; <nl> + size_t numDiscarded ; <nl> + bool stop ; <nl> + { <nl> + auto data = data_ . lock ( ) ; <nl> + ioQueue = data - > getCurrentQueue ( ) ; <nl> + while ( ioQueue - > empty ( ) & & ! data - > stop ) { <nl> + messageReady_ . wait ( data . getUniqueLock ( ) ) ; <nl> + } <nl> + <nl> + + + data - > ioThreadCounter ; <nl> + numDiscarded = data - > numDiscarded ; <nl> + data - > numDiscarded = 0 ; <nl> + data - > currentBufferSize = 0 ; <nl> + stop = data - > stop ; <nl> + } <nl> + ioCV_ . notify_all ( ) ; <nl> + <nl> + / / Write the log messages now that we have released the lock <nl> + try { <nl> + performIO ( ioQueue ) ; <nl> + } catch ( const std : : exception & ex ) { <nl> + onIoError ( ex ) ; <nl> + } <nl> + <nl> + / / clear ( ) empties the vector , but the allocated capacity remains so we can <nl> + / / just reuse it without having to re - allocate in most cases . <nl> + ioQueue - > clear ( ) ; <nl> + <nl> + if ( numDiscarded > 0 ) { <nl> + auto msg = getNumDiscardedMsg ( numDiscarded ) ; <nl> + if ( ! msg . empty ( ) ) { <nl> + auto ret = folly : : writeFull ( file_ . fd ( ) , msg . data ( ) , msg . size ( ) ) ; <nl> + / / We currently ignore errors from writeFull ( ) here . <nl> + / / There ' s not much we can really do . <nl> + ( void ) ret ; <nl> + } <nl> + } <nl> + <nl> + if ( stop ) { <nl> + data_ - > ioThreadDone = true ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void AsyncFileWriter : : performIO ( std : : vector < std : : string > * ioQueue ) { <nl> + / / kNumIovecs controls the maximum number of strings we write at once in a <nl> + / / single writev ( ) call . <nl> + constexpr int kNumIovecs = 64 ; <nl> + std : : array < iovec , kNumIovecs > iovecs ; <nl> + <nl> + size_t idx = 0 ; <nl> + while ( idx < ioQueue - > size ( ) ) { <nl> + int numIovecs = 0 ; <nl> + while ( numIovecs < kNumIovecs & & idx < ioQueue - > size ( ) ) { <nl> + const auto & str = ( * ioQueue ) [ idx ] ; <nl> + iovecs [ numIovecs ] . iov_base = const_cast < char * > ( str . data ( ) ) ; <nl> + iovecs [ numIovecs ] . iov_len = str . size ( ) ; <nl> + + + numIovecs ; <nl> + + + idx ; <nl> + } <nl> + <nl> + auto ret = folly : : writevFull ( file_ . fd ( ) , iovecs . data ( ) , numIovecs ) ; <nl> + folly : : checkUnixError ( ret , " writeFull ( ) failed " ) ; <nl> + } <nl> + } <nl> + <nl> + void AsyncFileWriter : : onIoError ( const std : : exception & ex ) { <nl> + LoggerDB : : internalWarning ( <nl> + __FILE__ , <nl> + __LINE__ , <nl> + " error writing to log file " , <nl> + file_ . fd ( ) , <nl> + " in AsyncFileWriter : " , <nl> + folly : : exceptionStr ( ex ) ) ; <nl> + } <nl> + <nl> + std : : string AsyncFileWriter : : getNumDiscardedMsg ( size_t numDiscarded ) { <nl> + / / We may want to make this customizable in the future ( e . g . , to allow it to <nl> + / / conform to the LogFormatter style being used ) . <nl> + / / For now just return a simple fixed message . <nl> + return folly : : to < std : : string > ( <nl> + numDiscarded , <nl> + " log messages discarded : logging faster than we can write \ n " ) ; <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 0d05ccd39c7 <nl> mmm / dev / null <nl> ppp b / folly / experimental / logging / AsyncFileWriter . h <nl> <nl> + / * <nl> + * Copyright 2004 - present Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + # pragma once <nl> + <nl> + # include < condition_variable > <nl> + # include < mutex > <nl> + # include < thread > <nl> + <nl> + # include < folly / File . h > <nl> + # include < folly / Range . h > <nl> + # include < folly / Synchronized . h > <nl> + # include < folly / experimental / logging / LogWriter . h > <nl> + <nl> + namespace folly { <nl> + <nl> + / * * <nl> + * A LogWriter implementation that asynchronously writes to a file descriptor . <nl> + * <nl> + * This class performs the log I / O in a separarate thread . <nl> + * <nl> + * The advantage of this class over ImmediateFileWriter is that logging I / O can <nl> + * never slow down or block your normal program operation . If log messages are <nl> + * generated faster than they can be written , messages will be dropped ( and an <nl> + * indication of how many messages were dropped will be written to the log file <nl> + * when we are able to catch up a bit . ) <nl> + * <nl> + * However , one downside is that if your program crashes , not all log messages <nl> + * may have been written , so you may lose messages generated immediately before <nl> + * the crash . <nl> + * / <nl> + class AsyncFileWriter : public LogWriter { <nl> + public : <nl> + / * * <nl> + * Construct an AsyncFileWriter that appends to the file at the specified <nl> + * path . <nl> + * / <nl> + explicit AsyncFileWriter ( folly : : StringPiece path ) ; <nl> + <nl> + / * * <nl> + * Construct an AsyncFileWriter that writes to the specified File object . <nl> + * / <nl> + explicit AsyncFileWriter ( folly : : File & & file ) ; <nl> + <nl> + ~ AsyncFileWriter ( ) ; <nl> + <nl> + void writeMessage ( folly : : StringPiece buffer ) override ; <nl> + void writeMessage ( std : : string & & buffer ) override ; <nl> + <nl> + / * * <nl> + * Block until the I / O thread has finished writing all messages that <nl> + * were already enqueued when flush ( ) was called . <nl> + * / <nl> + void flush ( ) ; <nl> + <nl> + private : <nl> + / * <nl> + * A simple implementation using two queues . <nl> + * All writer threads enqueue into one queue while the I / O thread is <nl> + * processing the other . <nl> + * <nl> + * We could potentially also provide an implementation using folly : : MPMCQueue <nl> + * in the future , which may improve contention under very high write loads . <nl> + * / <nl> + struct Data { <nl> + std : : array < std : : vector < std : : string > , 2 > queues ; <nl> + bool stop { false } ; <nl> + bool ioThreadDone { false } ; <nl> + uint64_t ioThreadCounter { 0 } ; <nl> + size_t maxBufferBytes { 1024 * 1024 } ; <nl> + size_t currentBufferSize { 0 } ; <nl> + size_t numDiscarded { 0 } ; <nl> + <nl> + std : : vector < std : : string > * getCurrentQueue ( ) { <nl> + return & queues [ ioThreadCounter & 0x1 ] ; <nl> + } <nl> + } ; <nl> + <nl> + void ioThread ( ) ; <nl> + void performIO ( std : : vector < std : : string > * ioQueue ) ; <nl> + <nl> + void onIoError ( const std : : exception & ex ) ; <nl> + std : : string getNumDiscardedMsg ( size_t numDiscarded ) ; <nl> + <nl> + folly : : File file_ ; <nl> + folly : : Synchronized < Data , std : : mutex > data_ ; <nl> + / * * <nl> + * messageReady_ is signaled by writer threads whenever they add a new <nl> + * message to the current queue . <nl> + * / <nl> + std : : condition_variable messageReady_ ; <nl> + / * * <nl> + * ioCV_ is signaled by the I / O thread each time it increments <nl> + * the ioThreadCounter ( once each time around its loop ) . <nl> + * / <nl> + std : : condition_variable ioCV_ ; <nl> + <nl> + / * * <nl> + * The I / O thread . <nl> + * <nl> + * This should come last , since all other member variables need to be <nl> + * constructed before the I / O thread starts . <nl> + * / <nl> + std : : thread ioThread_ ; <nl> + } ; <nl> + } <nl> mmm a / folly / experimental / logging / Makefile . am <nl> ppp b / folly / experimental / logging / Makefile . am <nl> SUBDIRS = . <nl> lib_LTLIBRARIES = libfollylogging . la <nl> <nl> libfollylogging_la_SOURCES = \ <nl> + AsyncFileWriter . cpp \ <nl> ImmediateFileWriter . cpp \ <nl> LogCategory . cpp \ <nl> Logger . cpp \ <nl> new file mode 100644 <nl> index 00000000000 . . d1541dc52ca <nl> mmm / dev / null <nl> ppp b / folly / experimental / logging / test / AsyncFileWriterTest . cpp <nl> <nl> + / * <nl> + * Copyright 2004 - present Facebook , Inc . <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + # include < folly / Conv . h > <nl> + # include < folly / Exception . h > <nl> + # include < folly / File . h > <nl> + # include < folly / FileUtil . h > <nl> + # include < folly / String . h > <nl> + # include < folly / experimental / TestUtil . h > <nl> + # include < folly / experimental / logging / AsyncFileWriter . h > <nl> + # include < folly / experimental / logging / LoggerDB . h > <nl> + # include < folly / portability / GFlags . h > <nl> + # include < folly / portability / GMock . h > <nl> + # include < folly / portability / GTest . h > <nl> + # include < folly / portability / Unistd . h > <nl> + <nl> + DEFINE_int64 ( <nl> + async_discard_num_writer_threads , <nl> + 32 , <nl> + " number of threads to use to generate log messages during " <nl> + " the AsyncFileWriter . discard test " ) ; <nl> + DEFINE_int64 ( <nl> + async_discard_messages_per_writer , <nl> + 200000 , <nl> + " number of messages each writer threads should generate in " <nl> + " the AsyncFileWriter . discard test " ) ; <nl> + DEFINE_int64 ( <nl> + async_discard_read_sleep_usec , <nl> + 500 , <nl> + " how long the read thread should sleep between reads in " <nl> + " the AsyncFileWriter . discard test " ) ; <nl> + <nl> + using namespace folly ; <nl> + using folly : : test : : TemporaryFile ; <nl> + <nl> + TEST ( AsyncFileWriter , noMessages ) { <nl> + TemporaryFile tmpFile { " logging_test " } ; <nl> + <nl> + / / Test the simple construction and destruction of an AsyncFileWriter <nl> + / / without ever writing any messages . This still exercises the I / O <nl> + / / thread start - up and shutdown code . <nl> + AsyncFileWriter writer { folly : : File { tmpFile . fd ( ) , false } } ; <nl> + } <nl> + <nl> + TEST ( AsyncFileWriter , simpleMessages ) { <nl> + TemporaryFile tmpFile { " logging_test " } ; <nl> + <nl> + { <nl> + AsyncFileWriter writer { folly : : File { tmpFile . fd ( ) , false } } ; <nl> + for ( int n = 0 ; n < 10 ; + + n ) { <nl> + writer . writeMessage ( folly : : to < std : : string > ( " message " , n , " \ n " ) ) ; <nl> + sched_yield ( ) ; <nl> + } <nl> + } <nl> + <nl> + std : : string data ; <nl> + auto ret = folly : : readFile ( tmpFile . path ( ) . native ( ) . c_str ( ) , data ) ; <nl> + ASSERT_TRUE ( ret ) ; <nl> + <nl> + std : : string expected = <nl> + " message 0 \ n " <nl> + " message 1 \ n " <nl> + " message 2 \ n " <nl> + " message 3 \ n " <nl> + " message 4 \ n " <nl> + " message 5 \ n " <nl> + " message 6 \ n " <nl> + " message 7 \ n " <nl> + " message 8 \ n " <nl> + " message 9 \ n " ; <nl> + EXPECT_EQ ( expected , data ) ; <nl> + } <nl> + <nl> + # ifndef _WIN32 <nl> + namespace { <nl> + static std : : vector < std : : string > * internalWarnings ; <nl> + <nl> + void handleLoggingError ( <nl> + StringPiece / * file * / , <nl> + int / * lineNumber * / , <nl> + std : : string & & msg ) { <nl> + internalWarnings - > emplace_back ( std : : move ( msg ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( AsyncFileWriter , ioError ) { <nl> + / / Set the LoggerDB internal warning handler so we can record the messages <nl> + std : : vector < std : : string > logErrors ; <nl> + internalWarnings = & logErrors ; <nl> + LoggerDB : : setInternalWarningHandler ( handleLoggingError ) ; <nl> + <nl> + / / Create an AsyncFileWriter that refers to a pipe whose read end is closed <nl> + std : : array < int , 2 > fds ; <nl> + auto rc = pipe ( fds . data ( ) ) ; <nl> + folly : : checkUnixError ( rc , " failed to create pipe " ) ; <nl> + signal ( SIGPIPE , SIG_IGN ) ; <nl> + : : close ( fds [ 0 ] ) ; <nl> + <nl> + / / Log a bunch of messages to the writer <nl> + size_t numMessages = 100 ; <nl> + { <nl> + AsyncFileWriter writer { folly : : File { fds [ 1 ] , true } } ; <nl> + for ( size_t n = 0 ; n < numMessages ; + + n ) { <nl> + writer . writeMessage ( folly : : to < std : : string > ( " message " , n , " \ n " ) ) ; <nl> + sched_yield ( ) ; <nl> + } <nl> + } <nl> + <nl> + LoggerDB : : setInternalWarningHandler ( nullptr ) ; <nl> + <nl> + / / AsyncFileWriter should have some internal warning messages about the <nl> + / / log failures . This will generally be many fewer than the number of <nl> + / / messages we wrote , though , since it performs write batching . <nl> + for ( const auto & msg : logErrors ) { <nl> + EXPECT_THAT ( <nl> + msg , <nl> + testing : : ContainsRegex ( <nl> + " error writing to log file . * in AsyncFileWriter . * : Broken pipe " ) ) ; <nl> + } <nl> + EXPECT_GT ( logErrors . size ( ) , 0 ) ; <nl> + EXPECT_LE ( logErrors . size ( ) , numMessages ) ; <nl> + } <nl> + # endif <nl> + <nl> + / * * <nl> + * writeThread ( ) writes a series of messages to the AsyncFileWriter <nl> + * / <nl> + void writeThread ( AsyncFileWriter * writer , size_t id , size_t numMessages ) { <nl> + for ( size_t n = 0 ; n < numMessages ; + + n ) { <nl> + writer - > writeMessage ( <nl> + folly : : to < std : : string > ( " thread " , id , " message " , n + 1 , ' \ n ' ) ) ; <nl> + } <nl> + } <nl> + <nl> + class ReadStats { <nl> + public : <nl> + void check ( size_t numThreads , size_t messagesPerThread ) { <nl> + EXPECT_EQ ( " " , trailingData_ ) ; <nl> + EXPECT_EQ ( numThreads , writers_ . size ( ) ) ; <nl> + size_t totalMessagesReceived = 0 ; <nl> + for ( const auto & writerData : writers_ ) { <nl> + EXPECT_LE ( writerData . numMessages , messagesPerThread ) ; <nl> + EXPECT_LE ( writerData . lastId , messagesPerThread ) ; <nl> + totalMessagesReceived + = writerData . numMessages ; <nl> + } <nl> + <nl> + EXPECT_EQ ( 0 , numUnableToParse_ ) ; <nl> + EXPECT_EQ ( 0 , numOutOfOrder_ ) ; <nl> + EXPECT_EQ ( <nl> + numThreads * messagesPerThread , totalMessagesReceived + numDiscarded_ ) ; <nl> + } <nl> + <nl> + void messageReceived ( StringPiece msg ) { <nl> + if ( msg . endsWith ( " log messages discarded : " <nl> + " logging faster than we can write " ) ) { <nl> + auto discardCount = folly : : to < size_t > ( msg . subpiece ( 0 , msg . find ( ' ' ) ) ) ; <nl> + fprintf ( stderr , " received discard notification : % zu \ n " , discardCount ) ; <nl> + numDiscarded_ + = discardCount ; <nl> + return ; <nl> + } <nl> + <nl> + size_t threadID = 0 ; <nl> + size_t messageIndex = 0 ; <nl> + try { <nl> + parseMessage ( msg , & threadID , & messageIndex ) ; <nl> + } catch ( const std : : exception & ex ) { <nl> + + + numUnableToParse_ ; <nl> + fprintf ( <nl> + stderr , <nl> + " unable to parse log message : % s \ n " , <nl> + folly : : humanify ( msg . str ( ) ) . c_str ( ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + if ( threadID > = writers_ . size ( ) ) { <nl> + writers_ . resize ( threadID + 1 ) ; <nl> + } <nl> + writers_ [ threadID ] . numMessages + + ; <nl> + if ( messageIndex > writers_ [ threadID ] . lastId ) { <nl> + writers_ [ threadID ] . lastId = messageIndex ; <nl> + } else { <nl> + + + numOutOfOrder_ ; <nl> + fprintf ( <nl> + stderr , <nl> + " received out - of - order messages from writer % zu : " <nl> + " % zu received after % zu \ n " , <nl> + threadID , <nl> + messageIndex , <nl> + writers_ [ threadID ] . lastId ) ; <nl> + } <nl> + } <nl> + <nl> + void trailingData ( StringPiece data ) { <nl> + trailingData_ = data . str ( ) ; <nl> + } <nl> + <nl> + private : <nl> + struct WriterStats { <nl> + size_t numMessages { 0 } ; <nl> + size_t lastId { 0 } ; <nl> + } ; <nl> + <nl> + void parseMessage ( StringPiece msg , size_t * threadID , size_t * messageIndex ) { <nl> + constexpr StringPiece prefix { " thread " } ; <nl> + constexpr StringPiece middle { " message " } ; <nl> + if ( ! msg . startsWith ( prefix ) ) { <nl> + throw std : : runtime_error ( " bad message prefix " ) ; <nl> + } <nl> + <nl> + auto idx = prefix . size ( ) ; <nl> + auto end = msg . find ( ' ' , idx ) ; <nl> + if ( end = = StringPiece : : npos ) { <nl> + throw std : : runtime_error ( " no middle found " ) ; <nl> + } <nl> + <nl> + * threadID = folly : : to < size_t > ( msg . subpiece ( idx , end - idx ) ) ; <nl> + auto rest = msg . subpiece ( end ) ; <nl> + if ( ! rest . startsWith ( middle ) ) { <nl> + throw std : : runtime_error ( " bad message middle " ) ; <nl> + } <nl> + <nl> + rest . advance ( middle . size ( ) ) ; <nl> + * messageIndex = folly : : to < size_t > ( rest ) ; <nl> + } <nl> + <nl> + std : : vector < WriterStats > writers_ ; <nl> + std : : string trailingData_ ; <nl> + size_t numUnableToParse_ { 0 } ; <nl> + size_t numOutOfOrder_ { 0 } ; <nl> + size_t numDiscarded_ { 0 } ; <nl> + } ; <nl> + <nl> + / * * <nl> + * readThread ( ) reads messages slowly from a pipe . This helps test the <nl> + * AsyncFileWriter behavior when I / O is slow . <nl> + * / <nl> + void readThread ( folly : : File & & file , ReadStats * stats ) { <nl> + std : : vector < char > buffer ; <nl> + buffer . resize ( 1024 ) ; <nl> + <nl> + size_t bufferIdx = 0 ; <nl> + while ( true ) { <nl> + / * sleep override * / <nl> + usleep ( FLAGS_async_discard_read_sleep_usec ) ; <nl> + <nl> + auto readResult = folly : : readNoInt ( <nl> + file . fd ( ) , buffer . data ( ) + bufferIdx , buffer . size ( ) - bufferIdx ) ; <nl> + if ( readResult < 0 ) { <nl> + fprintf ( stderr , " error reading from pipe : % d \ n " , errno ) ; <nl> + return ; <nl> + } <nl> + if ( readResult = = 0 ) { <nl> + fprintf ( stderr , " read EOF \ n " ) ; <nl> + break ; <nl> + } <nl> + <nl> + auto logDataLen = bufferIdx + readResult ; <nl> + StringPiece logData { buffer . data ( ) , logDataLen } ; <nl> + auto idx = 0 ; <nl> + while ( true ) { <nl> + auto end = logData . find ( ' \ n ' , idx ) ; <nl> + if ( end = = StringPiece : : npos ) { <nl> + bufferIdx = logDataLen - idx ; <nl> + memmove ( buffer . data ( ) , buffer . data ( ) + idx , bufferIdx ) ; <nl> + break ; <nl> + } <nl> + <nl> + StringPiece logMsg { logData . data ( ) + idx , end - idx } ; <nl> + stats - > messageReceived ( logMsg ) ; <nl> + idx = end + 1 ; <nl> + } <nl> + } <nl> + <nl> + if ( bufferIdx ! = 0 ) { <nl> + stats - > trailingData ( StringPiece { buffer . data ( ) , bufferIdx } ) ; <nl> + } <nl> + } <nl> + <nl> + / * <nl> + * The discard test spawns a number of threads that each write a large number <nl> + * of messages quickly . The AsyncFileWriter writes to a pipe , an a separate <nl> + * thread reads from it slowly , causing a backlog to build up . <nl> + * <nl> + * The test then checks that : <nl> + * - The read thread always receives full messages ( no partial log messages ) <nl> + * - Messages that are received are received in order <nl> + * - The number of messages received plus the number reported in discard <nl> + * notifications matches the number of messages sent . <nl> + * / <nl> + TEST ( AsyncFileWriter , discard ) { <nl> + std : : array < int , 2 > fds ; <nl> + auto pipeResult = pipe ( fds . data ( ) ) ; <nl> + folly : : checkUnixError ( pipeResult , " pipe failed " ) ; <nl> + folly : : File readPipe { fds [ 0 ] , true } ; <nl> + folly : : File writePipe { fds [ 1 ] , true } ; <nl> + <nl> + ReadStats readStats ; <nl> + std : : thread reader ( readThread , std : : move ( readPipe ) , & readStats ) ; <nl> + { <nl> + AsyncFileWriter writer { std : : move ( writePipe ) } ; <nl> + <nl> + std : : vector < std : : thread > writeThreads ; <nl> + for ( int n = 0 ; n < FLAGS_async_discard_num_writer_threads ; + + n ) { <nl> + writeThreads . emplace_back ( <nl> + writeThread , & writer , n , FLAGS_async_discard_messages_per_writer ) ; <nl> + } <nl> + <nl> + for ( auto & t : writeThreads ) { <nl> + t . join ( ) ; <nl> + } <nl> + fprintf ( stderr , " writers done \ n " ) ; <nl> + } <nl> + reader . join ( ) ; <nl> + readStats . check ( <nl> + FLAGS_async_discard_num_writer_threads , <nl> + FLAGS_async_discard_messages_per_writer ) ; <nl> + } <nl> | logging : add AsyncFileWriter | facebook/folly | 82b71ca37d76487b5ee0b0e517576f6111d1d3ea | 2017-06-15T18:06:06Z |
mmm a / modules / imgproc / perf / perf_warp . cpp <nl> ppp b / modules / imgproc / perf / perf_warp . cpp <nl> PERF_TEST_P ( TestWarpPerspective , WarpPerspectiveLarge , <nl> <nl> SANITY_CHECK ( dst ) ; <nl> <nl> - imwrite ( " / home / kir / temp / dst " + resolution + " . png " , dst ) ; <nl> + / / imwrite ( " / home / kir / temp / dst " + resolution + " . png " , dst ) ; <nl> } <nl> <nl> PERF_TEST_P ( TestRemap , remap , <nl> | commented wrong line | opencv/opencv | de98da42f7c6b00693fcaa2dd0b5bc4f4a5957b9 | 2012-10-12T13:30:23Z |
mmm a / lib / AST / ASTWalker . cpp <nl> ppp b / lib / AST / ASTWalker . cpp <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> / / Decls <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> <nl> + bool visitGenericParamListIfNeeded ( GenericContext * GC ) { <nl> + / / Must check this first in case extensions have not been bound yet <nl> + if ( Walker . shouldWalkIntoGenericParams ( ) ) { <nl> + if ( auto * params = GC - > getGenericParams ( ) ) { <nl> + visitGenericParamList ( params ) ; <nl> + } <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + bool visitTrailingRequirements ( GenericContext * GC ) { <nl> + if ( const auto Where = GC - > getTrailingWhereClause ( ) ) { <nl> + for ( auto & Req : Where - > getRequirements ( ) ) <nl> + if ( doIt ( Req ) ) <nl> + return true ; <nl> + } else if ( ! isa < ExtensionDecl > ( GC ) ) { <nl> + if ( const auto GP = GC - > getGenericParams ( ) ) <nl> + for ( auto Req : GP - > getTrailingRequirements ( ) ) <nl> + if ( doIt ( Req ) ) <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> bool visitImportDecl ( ImportDecl * ID ) { <nl> return false ; <nl> } <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> if ( doIt ( Inherit ) ) <nl> return true ; <nl> } <nl> - if ( auto * Where = ED - > getTrailingWhereClause ( ) ) { <nl> - for ( auto & Req : Where - > getRequirements ( ) ) { <nl> - if ( doIt ( Req ) ) <nl> - return true ; <nl> - } <nl> - } <nl> + if ( visitTrailingRequirements ( ED ) ) <nl> + return true ; <nl> + <nl> for ( Decl * M : ED - > getMembers ( ) ) { <nl> if ( doIt ( M ) ) <nl> return true ; <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> } <nl> <nl> bool visitTypeAliasDecl ( TypeAliasDecl * TAD ) { <nl> - if ( Walker . shouldWalkIntoGenericParams ( ) & & TAD - > getGenericParams ( ) ) { <nl> - if ( visitGenericParamList ( TAD - > getGenericParams ( ) ) ) <nl> - return true ; <nl> - } <nl> + bool WalkGenerics = visitGenericParamListIfNeeded ( TAD ) ; <nl> <nl> if ( auto typerepr = TAD - > getUnderlyingTypeRepr ( ) ) <nl> if ( doIt ( typerepr ) ) <nl> return true ; <nl> - return false ; <nl> + <nl> + return WalkGenerics & & visitTrailingRequirements ( TAD ) ; <nl> } <nl> <nl> bool visitOpaqueTypeDecl ( OpaqueTypeDecl * OTD ) { <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> } <nl> <nl> / / Visit requirements <nl> - if ( WalkGenerics ) { <nl> - ArrayRef < swift : : RequirementRepr > Reqs = None ; <nl> - if ( auto * Protocol = dyn_cast < ProtocolDecl > ( NTD ) ) { <nl> - if ( auto * WhereClause = Protocol - > getTrailingWhereClause ( ) ) <nl> - Reqs = WhereClause - > getRequirements ( ) ; <nl> - } else { <nl> - Reqs = NTD - > getGenericParams ( ) - > getTrailingRequirements ( ) ; <nl> - } <nl> - for ( auto Req : Reqs ) { <nl> - if ( doIt ( Req ) ) <nl> - return true ; <nl> - } <nl> - } <nl> - <nl> + if ( WalkGenerics & & visitTrailingRequirements ( NTD ) ) <nl> + return true ; <nl> + <nl> for ( Decl * Member : NTD - > getMembers ( ) ) { <nl> if ( doIt ( Member ) ) <nl> return true ; <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> if ( doIt ( SD - > getElementTypeLoc ( ) ) ) <nl> return true ; <nl> <nl> - if ( WalkGenerics ) { <nl> - / / Visit generic requirements <nl> - for ( auto Req : SD - > getGenericParams ( ) - > getTrailingRequirements ( ) ) { <nl> - if ( doIt ( Req ) ) <nl> - return true ; <nl> - } <nl> - } <nl> + / / Visit trailing requirements <nl> + if ( WalkGenerics & & visitTrailingRequirements ( SD ) ) <nl> + return true ; <nl> <nl> if ( ! Walker . shouldWalkAccessorsTheOldWay ( ) ) { <nl> for ( auto * AD : SD - > getAllAccessors ( ) ) <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> if ( doIt ( FD - > getBodyResultTypeLoc ( ) ) ) <nl> return true ; <nl> <nl> - if ( WalkGenerics ) { <nl> - / / Visit trailing requirments <nl> - for ( auto Req : AFD - > getGenericParams ( ) - > getTrailingRequirements ( ) ) { <nl> - if ( doIt ( Req ) ) <nl> - return true ; <nl> - } <nl> - } <nl> + / / Visit trailing requirements <nl> + if ( WalkGenerics & & visitTrailingRequirements ( AFD ) ) <nl> + return true ; <nl> <nl> if ( AFD - > getBody ( / * canSynthesize = * / false ) ) { <nl> AbstractFunctionDecl : : BodyKind PreservedKind = AFD - > getBodyKind ( ) ; <nl> class Traversal : public ASTVisitor < Traversal , Expr * , Stmt * , <nl> } <nl> return false ; <nl> } <nl> - <nl> - private : <nl> - bool visitGenericParamListIfNeeded ( GenericContext * gc ) { <nl> - if ( Walker . shouldWalkIntoGenericParams ( ) ) { <nl> - if ( auto * params = gc - > getGenericParams ( ) ) { <nl> - visitGenericParamList ( params ) ; <nl> - return true ; <nl> - } <nl> - } <nl> - return false ; <nl> - } <nl> } ; <nl> <nl> } / / end anonymous namespace <nl> mmm a / test / IDE / coloring . swift <nl> ppp b / test / IDE / coloring . swift <nl> enum E { <nl> / / CHECK : < kw > var < / kw > < kw > _ < / kw > = < int > 10 < / int > <nl> @ available ( iOS 99 , * ) <nl> var _ = 10 <nl> + <nl> + / / CHECK : < type > Array < / type > < < type > T < / type > > < kw > where < / kw > < type > T < / type > : < type > Equatable < / type > <nl> + typealias GenericAlias < T > = Array < T > where T : Equatable <nl> + <nl> + / / Where clauses on contextually generic declarations <nl> + / / <nl> + struct FreeWhere < T > { <nl> + / / CHECK : < kw > func < / kw > foo ( ) < kw > where < / kw > < type > T < / type > = = < type > Int < / type > <nl> + func foo ( ) where T = = Int { } <nl> + <nl> + / / CHECK : < kw > subscript < / kw > ( ) - > < type > Int < / type > < kw > where < / kw > < type > T < / type > : < type > Sequence < / type > <nl> + subscript ( ) - > Int where T : Sequence { } <nl> + <nl> + / / CHECK : < kw > enum < / kw > Enum < kw > where < / kw > < type > T < / type > = = < type > Int < / type > <nl> + enum Enum where T = = Int { } <nl> + <nl> + / / CHECK : < kw > typealias < / kw > Alias = < type > Int < / type > < kw > where < / kw > < type > T < / type > = = < type > Int < / type > <nl> + typealias Alias = Int where T = = Int <nl> + } <nl> | IDE : Ensure syntax coloring for contextual where clauses | apple/swift | c498ad028309492f4b4ff24e603e0b71bb80169c | 2020-03-05T04:29:28Z |
mmm a / folly / concurrency / ConcurrentHashMap . h <nl> ppp b / folly / concurrency / ConcurrentHashMap . h <nl> class ConcurrentHashMap { <nl> void next ( ) { <nl> while ( it_ = = parent_ - > ensureSegment ( segment_ ) - > cend ( ) & & <nl> segment_ < parent_ - > NumShards ) { <nl> - segment_ + + ; <nl> - auto seg = parent_ - > segments_ [ segment_ ] . load ( std : : memory_order_acquire ) ; <nl> - if ( segment_ < parent_ - > NumShards ) { <nl> - if ( ! seg ) { <nl> - continue ; <nl> + SegmentT * seg { nullptr } ; <nl> + while ( ! seg ) { <nl> + segment_ + + ; <nl> + seg = parent_ - > segments_ [ segment_ ] . load ( std : : memory_order_acquire ) ; <nl> + if ( segment_ < parent_ - > NumShards ) { <nl> + if ( ! seg ) { <nl> + continue ; <nl> + } <nl> + it_ = seg - > cbegin ( ) ; <nl> } <nl> - it_ = seg - > cbegin ( ) ; <nl> + break ; <nl> } <nl> } <nl> } <nl> mmm a / folly / concurrency / detail / ConcurrentHashMap - detail . h <nl> ppp b / folly / concurrency / detail / ConcurrentHashMap - detail . h <nl> class alignas ( 64 ) ConcurrentHashMapSegment { <nl> / / throw if hash or key_eq functions throw . <nl> void erase ( Iterator & res , Iterator & pos ) { <nl> erase_internal ( pos - > first , & res ) ; <nl> + / / Invalidate the iterator . <nl> + pos = cend ( ) ; <nl> } <nl> <nl> void clear ( ) { <nl> mmm a / folly / concurrency / test / ConcurrentHashMapTest . cpp <nl> ppp b / folly / concurrency / test / ConcurrentHashMapTest . cpp <nl> TEST ( ConcurrentHashMap , RefcountTest ) { <nl> } <nl> <nl> struct Wrapper { <nl> - Wrapper ( ) = default ; <nl> + explicit Wrapper ( bool & del_ ) : del ( del_ ) { } <nl> ~ Wrapper ( ) { <nl> del = true ; <nl> } <nl> <nl> - static bool del ; <nl> + bool & del ; <nl> } ; <nl> <nl> - bool Wrapper : : del = false ; <nl> - <nl> TEST ( ConcurrentHashMap , Deletion ) { <nl> - EXPECT_FALSE ( Wrapper : : del ) ; <nl> + bool del { false } ; <nl> <nl> { <nl> ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map ; <nl> <nl> - map . insert ( 0 , std : : make_shared < Wrapper > ( ) ) ; <nl> + map . insert ( 0 , std : : make_shared < Wrapper > ( del ) ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( del ) ; <nl> + } <nl> + <nl> + TEST ( ConcurrentHashMap , DeletionWithErase ) { <nl> + bool del { false } ; <nl> + <nl> + { <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map ; <nl> + <nl> + map . insert ( 0 , std : : make_shared < Wrapper > ( del ) ) ; <nl> map . erase ( 0 ) ; <nl> } <nl> <nl> - EXPECT_TRUE ( Wrapper : : del ) ; <nl> + EXPECT_TRUE ( del ) ; <nl> + } <nl> + <nl> + TEST ( ConcurrentHashMap , DeletionWithIterator ) { <nl> + bool del { false } ; <nl> + <nl> + { <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map ; <nl> + <nl> + map . insert ( 0 , std : : make_shared < Wrapper > ( del ) ) ; <nl> + auto it = map . find ( 0 ) ; <nl> + map . erase ( it ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( del ) ; <nl> + } <nl> + <nl> + TEST ( ConcurrentHashMap , DeletionWithForLoop ) { <nl> + bool del { false } ; <nl> + <nl> + { <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map ; <nl> + <nl> + map . insert ( 0 , std : : make_shared < Wrapper > ( del ) ) ; <nl> + for ( auto it = map . cbegin ( ) ; it ! = map . cend ( ) ; + + it ) { <nl> + EXPECT_EQ ( it - > first , 0 ) ; <nl> + } <nl> + } <nl> + <nl> + EXPECT_TRUE ( del ) ; <nl> + } <nl> + <nl> + TEST ( ConcurrentHashMap , DeletionMultiple ) { <nl> + bool del1 { false } , del2 { false } ; <nl> + <nl> + { <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map ; <nl> + <nl> + map . insert ( 0 , std : : make_shared < Wrapper > ( del1 ) ) ; <nl> + map . insert ( 1 , std : : make_shared < Wrapper > ( del2 ) ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( del1 ) ; <nl> + EXPECT_TRUE ( del2 ) ; <nl> + } <nl> + <nl> + TEST ( ConcurrentHashMap , DeletionAssigned ) { <nl> + bool del1 { false } , del2 { false } ; <nl> + <nl> + { <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map ; <nl> + <nl> + map . insert ( 0 , std : : make_shared < Wrapper > ( del1 ) ) ; <nl> + map . insert_or_assign ( 0 , std : : make_shared < Wrapper > ( del2 ) ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( del1 ) ; <nl> + EXPECT_TRUE ( del2 ) ; <nl> + } <nl> + <nl> + TEST ( ConcurrentHashMap , DeletionMultipleMaps ) { <nl> + bool del1 { false } , del2 { false } ; <nl> + <nl> + { <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map1 ; <nl> + ConcurrentHashMap < int , std : : shared_ptr < Wrapper > > map2 ; <nl> + <nl> + map1 . insert ( 0 , std : : make_shared < Wrapper > ( del1 ) ) ; <nl> + map2 . insert ( 0 , std : : make_shared < Wrapper > ( del2 ) ) ; <nl> + } <nl> + <nl> + EXPECT_TRUE ( del1 ) ; <nl> + EXPECT_TRUE ( del2 ) ; <nl> } <nl> mmm a / folly / experimental / hazptr / hazptr - impl . h <nl> ppp b / folly / experimental / hazptr / hazptr - impl . h <nl> FOLLY_ALWAYS_INLINE hazptr_array < M > : : ~ hazptr_array ( ) { <nl> auto count = tc . count ( ) ; <nl> if ( ( M < = HAZPTR_TC_SIZE ) & & ( count + M < = HAZPTR_TC_SIZE ) ) { <nl> for ( size_t i = 0 ; i < M ; + + i ) { <nl> + h [ i ] . reset ( ) ; <nl> tc [ count + i ] . hprec_ = h [ i ] . hazptr_ ; <nl> HAZPTR_DEBUG_PRINT ( i < < " " < < & h [ i ] ) ; <nl> new ( & h [ i ] ) hazptr_holder ( nullptr ) ; <nl> | more ConcurrentHashMap deletion tests | facebook/folly | 051bd89d6e03bcd6fcc69199a454247848b16b9b | 2018-02-21T17:30:20Z |
mmm a / src / ccutil / universalambigs . cpp <nl> ppp b / src / ccutil / universalambigs . cpp <nl> <nl> / / Description : Data for a universal ambigs file that is useful for <nl> / / any language . <nl> / / Author : Ray Smith <nl> - / / Created : Mon Mar 18 11 : 26 : 00 PDT 2013 <nl> / / <nl> / / ( C ) Copyright 2013 , Google Inc . <nl> / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> <nl> / / <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " universalambigs . h " <nl> + <nl> namespace tesseract { <nl> <nl> - extern const char kUniversalAmbigsFile [ ] = { <nl> + const char kUniversalAmbigsFile [ ] = { <nl> ' \ 166 ' , ' \ 062 ' , ' \ 012 ' , ' \ 047 ' , ' \ 047 ' , ' \ 040 ' , ' \ 042 ' , ' \ 040 ' , ' \ 061 ' , <nl> ' \ 012 ' , ' \ 140 ' , ' \ 047 ' , ' \ 040 ' , ' \ 042 ' , ' \ 040 ' , ' \ 061 ' , ' \ 012 ' , ' \ 047 ' , <nl> ' \ 140 ' , ' \ 040 ' , ' \ 042 ' , ' \ 040 ' , ' \ 061 ' , ' \ 012 ' , ' \ 342 ' , ' \ 200 ' , ' \ 230 ' , <nl> extern const char kUniversalAmbigsFile [ ] = { <nl> ' \ 012 ' , <nl> } ; <nl> <nl> - extern const int ksizeofUniversalAmbigsFile = sizeof ( kUniversalAmbigsFile ) ; <nl> + const int ksizeofUniversalAmbigsFile = sizeof ( kUniversalAmbigsFile ) ; <nl> <nl> } / / namespace tesseract <nl> | universalambigs : Add missing include file | tesseract-ocr/tesseract | cd749be473925e7bd4f2fe6868b614db474082d0 | 2019-05-02T05:36:31Z |
mmm a / tensorflow / g3doc / api_docs / python / train . md <nl> ppp b / tensorflow / g3doc / api_docs / python / train . md <nl> Construct a new gradient descent optimizer . <nl> <nl> * < b > ` learning_rate ` < / b > : A Tensor or a floating point value . The learning <nl> rate to use . <nl> - * < b > ` use_locking ` < / b > : If True use locks for update operation . s <nl> + * < b > ` use_locking ` < / b > : If True use locks for update operations . <nl> * < b > ` name ` < / b > : Optional name prefix for the operations created when applying <nl> gradients . Defaults to " GradientDescent " . <nl> <nl> | Update generated Op docs . | tensorflow/tensorflow | 701d9f27da8f6cf4fb42abc7fc65371901862c3a | 2015-12-17T01:55:00Z |
mmm a / CHANGELOG . md <nl> ppp b / CHANGELOG . md <nl> <nl> <nl> # # ClickHouse release 20 . 9 <nl> <nl> + # # # ClickHouse release v20 . 9 . 5 . 5 - stable , 2020 - 11 - 13 <nl> + <nl> + # # # # Bug Fix <nl> + <nl> + * Fix rare silent crashes when query profiler is on and ClickHouse is installed on OS with glibc version that has ( supposedly ) broken asynchronous unwind tables for some functions . This fixes [ # 15301 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 15301 ) . This fixes [ # 13098 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 13098 ) . [ # 16846 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16846 ) ( [ alexey - milovidov ] ( https : / / github . com / alexey - milovidov ) ) . <nl> + * Now when parsing AVRO from input the LowCardinality is removed from type . Fixes [ # 16188 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 16188 ) . [ # 16521 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16521 ) ( [ Mike ] ( https : / / github . com / myrrc ) ) . <nl> + * Fix rapid growth of metadata when using MySQL Master - > MySQL Slave - > ClickHouse MaterializeMySQL Engine , and ` slave_parallel_worker ` enabled on MySQL Slave , by properly shrinking GTID sets . This fixes [ # 15951 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 15951 ) . [ # 16504 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16504 ) ( [ TCeason ] ( https : / / github . com / TCeason ) ) . <nl> + * Fix DROP TABLE for Distributed ( racy with INSERT ) . [ # 16409 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16409 ) ( [ Azat Khuzhin ] ( https : / / github . com / azat ) ) . <nl> + * Fix processing of very large entries in replication queue . Very large entries may appear in ALTER queries if table structure is extremely large ( near 1 MB ) . This fixes [ # 16307 ] ( https : / / github . com / ClickHouse / ClickHouse / issues / 16307 ) . [ # 16332 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16332 ) ( [ alexey - milovidov ] ( https : / / github . com / alexey - milovidov ) ) . <nl> + * Fixed the inconsistent behaviour when a part of return data could be dropped because the set for its filtration wasn ' t created . [ # 16308 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16308 ) ( [ Nikita Mikhaylov ] ( https : / / github . com / nikitamikhaylov ) ) . <nl> + * Fix bug with MySQL database . When MySQL server used as database engine is down some queries raise Exception , because they try to get tables from disabled server , while it ' s unnecessary . For example , query ` SELECT . . . FROM system . parts ` should work only with MergeTree tables and don ' t touch MySQL database at all . [ # 16032 ] ( https : / / github . com / ClickHouse / ClickHouse / pull / 16032 ) ( [ Kruglov Pavel ] ( https : / / github . com / Avogar ) ) . <nl> + <nl> + <nl> # # # ClickHouse release v20 . 9 . 4 . 76 - stable ( 2020 - 10 - 29 ) <nl> <nl> # # # # Bug Fix <nl> | more | ClickHouse/ClickHouse | 2092aed4159b9c5619657b150f7c50d24cf69a10 | 2020-11-13T06:39:14Z |
mmm a / include / reporters / catch_reporter_teamcity . hpp <nl> ppp b / include / reporters / catch_reporter_teamcity . hpp <nl> <nl> # ifndef TWOBLUECUBES_CATCH_REPORTER_TEAMCITY_HPP_INCLUDED <nl> # define TWOBLUECUBES_CATCH_REPORTER_TEAMCITY_HPP_INCLUDED <nl> <nl> - # include " catch_reporter_bases . hpp " <nl> - <nl> - # include " . . / internal / catch_reporter_registrars . hpp " <nl> + / / Don ' t # include any Catch headers here - we can assume they are already <nl> + / / included before this header . <nl> + / / This is not good practice in general but is necessary in this case so this <nl> + / / file can be distributed as a single header that works with the main <nl> + / / Catch single header . <nl> <nl> # include < cstring > <nl> <nl> | Removed # includes for Catch headers | catchorg/Catch2 | 58dcb5ea928024544039babe8cdafcaf0c78c5ea | 2014-12-22T19:45:16Z |
mmm a / lib / Sema / CSSimplify . cpp <nl> ppp b / lib / Sema / CSSimplify . cpp <nl> ConstraintSystem : : addKeyPathApplicationRootConstraint ( Type root , ConstraintLocat <nl> auto subscript = dyn_cast_or_null < SubscriptExpr > ( anchor ) ; <nl> if ( ! subscript ) <nl> return ; <nl> - <nl> - assert ( path . size ( ) = = 1 & & <nl> - path [ 0 ] . getKind ( ) = = ConstraintLocator : : SubscriptMember ) ; <nl> + <nl> + assert ( ( path . size ( ) = = 1 & & <nl> + path [ 0 ] . getKind ( ) = = ConstraintLocator : : SubscriptMember ) | | <nl> + ( path . size ( ) = = 2 & & <nl> + path [ 1 ] . getKind ( ) = = ConstraintLocator : : KeyPathDynamicMember ) ) ; <nl> auto indexTuple = dyn_cast < TupleExpr > ( subscript - > getIndex ( ) ) ; <nl> if ( ! indexTuple | | indexTuple - > getNumElements ( ) ! = 1 ) <nl> return ; <nl> mmm a / test / decl / var / property_wrappers . swift <nl> ppp b / test / decl / var / property_wrappers . swift <nl> protocol ProtocolWithWrapper { <nl> struct UsesProtocolWithWrapper : ProtocolWithWrapper { <nl> @ Wrapper var foo : Int / / expected - warning { { ignoring associated type ' Wrapper ' in favor of module - scoped property wrapper ' Wrapper ' ; please qualify the reference with ' property_wrappers ' } } { { 4 - 4 = property_wrappers . } } <nl> } <nl> + <nl> + / / rdar : / / problem / 56350060 - [ Dynamic key path member lookup ] Assertion when subscripting with a key path <nl> + func test_rdar56350060 ( ) { <nl> + @ propertyWrapper <nl> + @ dynamicMemberLookup <nl> + struct DynamicWrapper < Value > { <nl> + var wrappedValue : Value { fatalError ( ) } <nl> + <nl> + subscript < T > ( keyPath keyPath : KeyPath < Value , T > ) - > DynamicWrapper < T > { <nl> + fatalError ( ) <nl> + } <nl> + <nl> + subscript < T > ( dynamicMember keyPath : KeyPath < Value , T > ) - > DynamicWrapper < T > { <nl> + return self [ keyPath : keyPath ] / / Ok <nl> + } <nl> + } <nl> + } <nl> | [ ConstraintSystem ] Adjust keypath subscript assert to account for dynamic member lookup | apple/swift | 09a36ddf17b2e5a073fbb2b3a76fd8336edc196c | 2019-10-16T23:38:16Z |
mmm a / src / containers / intrusive_list . hpp <nl> ppp b / src / containers / intrusive_list . hpp <nl> class intrusive_list_node_t { <nl> # endif <nl> prev ( NULL ) , next ( NULL ) <nl> { } <nl> - virtual ~ intrusive_list_node_t ( ) { <nl> - rassert ( prev = = NULL ) ; <nl> - rassert ( next = = NULL ) ; <nl> - rassert ( parent_list = = NULL ) ; <nl> - } <nl> <nl> # ifndef NDEBUG <nl> bool in_a_list ( ) { <nl> class intrusive_list_node_t { <nl> } <nl> # endif <nl> <nl> + protected : <nl> + ~ intrusive_list_node_t ( ) { <nl> + rassert ( prev = = NULL ) ; <nl> + rassert ( next = = NULL ) ; <nl> + rassert ( parent_list = = NULL ) ; <nl> + } <nl> + <nl> private : <nl> # ifndef NDEBUG <nl> intrusive_list_t < derived_t > * parent_list ; <nl> | Made ~ intrusive_list_node_t non - virtual and protected . | rethinkdb/rethinkdb | 1a74dd14167aba4beb147155135b38c3527b8275 | 2013-09-30T17:38:22Z |
mmm a / Marlin / src / module / tool_change . cpp <nl> ppp b / Marlin / src / module / tool_change . cpp <nl> void tool_change ( const uint8_t tmp_extruder , const float fr_mm_s / * = 0 . 0 * / , bool n <nl> if ( ! DEBUGGING ( DRYRUN ) & & thermalManager . targetTooColdToExtrude ( active_extruder ) ) { <nl> SERIAL_ERROR_START ( ) ; <nl> SERIAL_ERRORLNPGM ( MSG_HOTEND_TOO_COLD ) ; <nl> + active_extruder = tmp_extruder ; <nl> return ; <nl> } <nl> # endif <nl> | Allow cold change of active extruder ( ) | MarlinFirmware/Marlin | b2c1cd7eda5aaec3ec9ef5322e9502025ce2a2f2 | 2018-10-10T14:57:48Z |
mmm a / system / keymaps / joystick . AppleRemote . xml <nl> ppp b / system / keymaps / joystick . AppleRemote . xml <nl> <nl> < button id = " 6 " > Close < / button > <nl> < / joystick > <nl> < / TextViewer > <nl> + < NumericInput > <nl> + < joystick name = " AppleRemote " > <nl> + < button id = " 6 " > Close < / button > <nl> + < / joystick > <nl> + < / NumericInput > <nl> < / keymap > <nl> mmm a / system / keymaps / joystick . Logitech . RumblePad . 2 . xml <nl> ppp b / system / keymaps / joystick . Logitech . RumblePad . 2 . xml <nl> <nl> < / joystick > <nl> < / TextViewer > <nl> <nl> + < NumericInput > <nl> + < joystick name = " Logitech Logitech Cordless RumblePad 2 " > <nl> + < altname > Logitech Cordless RumblePad 2 < / altname > <nl> + < button id = " 3 " > Close < / button > <nl> + < / joystick > <nl> + < / NumericInput > <nl> + <nl> < / keymap > <nl> mmm a / system / keymaps / joystick . PS3 . Remote . Keyboard . xml <nl> ppp b / system / keymaps / joystick . PS3 . Remote . Keyboard . xml <nl> <nl> < / joystick > <nl> < / PictureInfo > <nl> <nl> - < FullscreenInfo > <nl> + < FullscreenInfo > <nl> < joystick name = " PLAYSTATION ( R ) 3 Remote Keyboard " > <nl> < altname > PS3 Remote Keyboard < / altname > <nl> < altname > MoSart PS3 Remote Keyboard < / altname > <nl> < button id = " 1 " > Close < / button > <nl> < button id = " 4 " > OSD < / button > <nl> < / joystick > <nl> - < / FullscreenInfo > <nl> + < / FullscreenInfo > <nl> + <nl> + < NumericInput > <nl> + < joystick name = " PLAYSTATION ( R ) 3 Remote Keyboard " > <nl> + < altname > PS3 Remote Keyboard < / altname > <nl> + < altname > MoSart PS3 Remote Keyboard < / altname > <nl> + < button id = " 3 " > Close < / button > <nl> + < / joystick > <nl> + < / NumericInput > <nl> < / keymap > <nl> mmm a / system / keymaps / keyboard . xml <nl> ppp b / system / keymaps / keyboard . xml <nl> <nl> < backspace > Close < / backspace > <nl> < / keyboard > <nl> < / Favourites > <nl> + < NumericInput > <nl> + < keyboard > <nl> + < backspace > Close < / backspace > <nl> + < / keyboard > <nl> + < / NumericInput > <nl> < / keymap > <nl> | fixed : numeric input dialog missing in various keymaps | xbmc/xbmc | cd592fbc19a9bba664b705d7d95f0bb01c41793c | 2010-10-23T08:33:09Z |
mmm a / stdlib / public / runtime / Private . h <nl> ppp b / stdlib / public / runtime / Private . h <nl> class TypeInfo { <nl> const Metadata * type , <nl> const ProtocolConformanceDescriptor * conformance ) ; <nl> <nl> + / / / Determine whether the given type conforms to the given Swift protocol , <nl> + / / / returning the appropriate protocol conformance descriptor when it does . <nl> + const ProtocolConformanceDescriptor * <nl> + _conformsToSwiftProtocol ( const Metadata * const type , <nl> + const ProtocolDescriptor * protocol ) ; <nl> + <nl> / / / Retrieve an associated type witness from the given witness table . <nl> / / / <nl> / / / \ param wtable The witness table . <nl> mmm a / stdlib / public / runtime / ProtocolConformance . cpp <nl> ppp b / stdlib / public / runtime / ProtocolConformance . cpp <nl> namespace { <nl> private : <nl> const void * Type ; <nl> const ProtocolDescriptor * Proto ; <nl> - std : : atomic < const WitnessTable * > Table ; <nl> + std : : atomic < const ProtocolConformanceDescriptor * > Description ; <nl> std : : atomic < size_t > FailureGeneration ; <nl> <nl> public : <nl> ConformanceCacheEntry ( ConformanceCacheKey key , <nl> - const WitnessTable * table , <nl> + const ProtocolConformanceDescriptor * description , <nl> size_t failureGeneration ) <nl> - : Type ( key . Type ) , Proto ( key . Proto ) , Table ( table ) , <nl> + : Type ( key . Type ) , Proto ( key . Proto ) , Description ( description ) , <nl> FailureGeneration ( failureGeneration ) { <nl> } <nl> <nl> namespace { <nl> } <nl> <nl> bool isSuccessful ( ) const { <nl> - return Table . load ( std : : memory_order_relaxed ) ! = nullptr ; <nl> + return Description . load ( std : : memory_order_relaxed ) ! = nullptr ; <nl> } <nl> <nl> - void makeSuccessful ( const WitnessTable * table ) { <nl> - Table . store ( table , std : : memory_order_release ) ; <nl> + void makeSuccessful ( const ProtocolConformanceDescriptor * description ) { <nl> + Description . store ( description , std : : memory_order_release ) ; <nl> } <nl> <nl> void updateFailureGeneration ( size_t failureGeneration ) { <nl> assert ( ! isSuccessful ( ) ) ; <nl> FailureGeneration . store ( failureGeneration , std : : memory_order_relaxed ) ; <nl> } <nl> - <nl> - / / / Get the cached witness table , if successful . <nl> - const WitnessTable * getWitnessTable ( ) const { <nl> + <nl> + / / / Get the cached conformance descriptor , if successful . <nl> + const ProtocolConformanceDescriptor * getDescription ( ) const { <nl> assert ( isSuccessful ( ) ) ; <nl> - return Table . load ( std : : memory_order_acquire ) ; <nl> + return Description . load ( std : : memory_order_acquire ) ; <nl> } <nl> <nl> / / / Get the generation in which this lookup failed . <nl> struct ConformanceState { <nl> } <nl> <nl> void cacheSuccess ( const void * type , const ProtocolDescriptor * proto , <nl> - const WitnessTable * witness ) { <nl> + const ProtocolConformanceDescriptor * description ) { <nl> auto result = Cache . getOrInsert ( ConformanceCacheKey ( type , proto ) , <nl> - witness , 0 ) ; <nl> + description , 0 ) ; <nl> <nl> / / If the entry was already present , we may need to update it . <nl> if ( ! result . second ) { <nl> - result . first - > makeSuccessful ( witness ) ; <nl> + result . first - > makeSuccessful ( description ) ; <nl> } <nl> } <nl> <nl> void cacheFailure ( const void * type , const ProtocolDescriptor * proto , <nl> size_t failureGeneration ) { <nl> - auto result = Cache . getOrInsert ( ConformanceCacheKey ( type , proto ) , <nl> - ( const WitnessTable * ) nullptr , <nl> - failureGeneration ) ; <nl> + auto result = <nl> + Cache . getOrInsert ( ConformanceCacheKey ( type , proto ) , <nl> + ( const ProtocolConformanceDescriptor * ) nullptr , <nl> + failureGeneration ) ; <nl> <nl> / / If the entry was already present , we may need to update it . <nl> if ( ! result . second ) { <nl> swift : : swift_registerProtocolConformances ( const ProtocolConformanceRecord * begin <nl> <nl> <nl> struct ConformanceCacheResult { <nl> - / / true if witnessTable is an authoritative result as - is . <nl> + / / true if description is an authoritative result as - is . <nl> / / false if more searching is required ( for example , because a cached <nl> / / failure was returned in failureEntry but it is out - of - date . <nl> bool isAuthoritative ; <nl> <nl> - / / The matching witness table , or null if no cached conformance was found . <nl> - const WitnessTable * witnessTable ; <nl> + / / The matching conformance descriptor , or null if no cached conformance <nl> + / / was found . <nl> + const ProtocolConformanceDescriptor * description ; <nl> <nl> / / If the search fails , this may be the negative cache entry for the <nl> / / queried type itself . This entry may be null or out - of - date . <nl> ConformanceCacheEntry * failureEntry ; <nl> <nl> static ConformanceCacheResult <nl> - cachedSuccess ( const WitnessTable * table ) { <nl> - return ConformanceCacheResult { true , table , nullptr } ; <nl> + cachedSuccess ( const ProtocolConformanceDescriptor * description ) { <nl> + return ConformanceCacheResult { true , description , nullptr } ; <nl> } <nl> <nl> static ConformanceCacheResult <nl> static const void * getConformanceCacheTypeKey ( const Metadata * type ) { <nl> return type ; <nl> } <nl> <nl> - / / / Search for a witness table in the ConformanceCache . <nl> + / / / Search for a conformance descriptor in the ConformanceCache . <nl> static <nl> ConformanceCacheResult <nl> searchInConformanceCache ( const Metadata * type , <nl> searchInConformanceCache ( const Metadata * type , <nl> if ( auto * Value = C . findCached ( type , protocol ) ) { <nl> if ( Value - > isSuccessful ( ) ) { <nl> / / Found a conformance on the type or some superclass . Return it . <nl> - return ConformanceCacheResult : : cachedSuccess ( Value - > getWitnessTable ( ) ) ; <nl> + return ConformanceCacheResult : : cachedSuccess ( Value - > getDescription ( ) ) ; <nl> } <nl> <nl> / / Found a negative cache entry . <nl> searchInConformanceCache ( const Metadata * type , <nl> / / Hash and lookup the type - protocol pair in the cache . <nl> if ( auto * Value = C . findCached ( typeKey , protocol ) ) { <nl> if ( Value - > isSuccessful ( ) ) <nl> - return ConformanceCacheResult : : cachedSuccess ( Value - > getWitnessTable ( ) ) ; <nl> + return ConformanceCacheResult : : cachedSuccess ( Value - > getDescription ( ) ) ; <nl> <nl> / / We don ' t try to cache negative responses for generic <nl> / / patterns . <nl> namespace { <nl> } ; <nl> } <nl> <nl> - static const WitnessTable * <nl> - swift_conformsToProtocolImpl ( const Metadata * const type , <nl> - const ProtocolDescriptor * protocol ) { <nl> + const ProtocolConformanceDescriptor * <nl> + swift : : _conformsToSwiftProtocol ( const Metadata * const type , <nl> + const ProtocolDescriptor * protocol ) { <nl> auto & C = Conformances . get ( ) ; <nl> <nl> / / See if we have a cached conformance . The ConcurrentMap data structure <nl> swift_conformsToProtocolImpl ( const Metadata * const type , <nl> auto FoundConformance = searchInConformanceCache ( type , protocol ) ; <nl> / / If the result ( positive or negative ) is authoritative , return it . <nl> if ( FoundConformance . isAuthoritative ) <nl> - return FoundConformance . witnessTable ; <nl> + return FoundConformance . description ; <nl> <nl> auto failureEntry = FoundConformance . failureEntry ; <nl> <nl> swift_conformsToProtocolImpl ( const Metadata * const type , <nl> return nullptr ; <nl> } <nl> <nl> - / / / Local function to retrieve the witness table and record the result . <nl> - auto recordWitnessTable = [ & ] ( const ProtocolConformanceDescriptor & descriptor , <nl> - const Metadata * type ) { <nl> - auto witnessTable = descriptor . getWitnessTable ( type ) ; <nl> - if ( witnessTable ) <nl> - C . cacheSuccess ( type , protocol , witnessTable ) ; <nl> - else <nl> - C . cacheFailure ( type , protocol , snapshot . count ( ) ) ; <nl> - } ; <nl> - <nl> / / Really scan conformance records . <nl> for ( size_t i = startIndex ; i < endIndex ; i + + ) { <nl> auto & section = snapshot . Start [ i ] ; <nl> swift_conformsToProtocolImpl ( const Metadata * const type , <nl> if ( ! matchingType ) <nl> matchingType = type ; <nl> <nl> - recordWitnessTable ( descriptor , matchingType ) ; <nl> + C . cacheSuccess ( matchingType , protocol , & descriptor ) ; <nl> } <nl> } <nl> } <nl> <nl> / / Conformance scan is complete . <nl> - / / Search the cache once more , and this time update the cache if necessary . <nl> <nl> + / / Search the cache once more , and this time update the cache if necessary . <nl> FoundConformance = searchInConformanceCache ( type , protocol ) ; <nl> if ( FoundConformance . isAuthoritative ) { <nl> - return FoundConformance . witnessTable ; <nl> + return FoundConformance . description ; <nl> } else { <nl> C . cacheFailure ( type , protocol , snapshot . count ( ) ) ; <nl> return nullptr ; <nl> } <nl> } <nl> <nl> + static const WitnessTable * <nl> + swift_conformsToProtocolImpl ( const Metadata * const type , <nl> + const ProtocolDescriptor * protocol ) { <nl> + auto description = _conformsToSwiftProtocol ( type , protocol ) ; <nl> + if ( ! description ) <nl> + return nullptr ; <nl> + <nl> + return description - > getWitnessTable ( type ) ; <nl> + } <nl> + <nl> const ContextDescriptor * <nl> swift : : _searchConformancesByMangledTypeName ( Demangle : : NodePointer node ) { <nl> auto & C = Conformances . get ( ) ; <nl> | [ Runtime ] Cache protocol conformance descriptors , not witness tables . | apple/swift | d9bb81bc20d82a5eddc74dabbf1c983259ff06c0 | 2018-11-17T06:26:40Z |
mmm a / xbmc / FileItem . cpp <nl> ppp b / xbmc / FileItem . cpp <nl> bool CFileItem : : IsGame ( ) const <nl> if ( HasPictureInfoTag ( ) ) <nl> return false ; <nl> <nl> + if ( IsPVR ( ) ) <nl> + return false ; <nl> + <nl> if ( HasAddonInfo ( ) ) <nl> return CGameUtils : : IsStandaloneGame ( std : : const_pointer_cast < ADDON : : IAddon > ( GetAddonInfo ( ) ) ) ; <nl> <nl> | Merge pull request from ksooo / fix - fileitem - isgame - performance - issue | xbmc/xbmc | 4d09cd1f5768848410e5aa4243deb60828b9e695 | 2016-12-10T09:23:49Z |
mmm a / selfdrive / car / toyota / values . py <nl> ppp b / selfdrive / car / toyota / values . py <nl> class CAR : <nl> ( Ecu . engine , 0x700 , None ) : [ <nl> b ' \ x01896634A15000 \ x00 \ x00 \ x00 \ x00 ' , <nl> b ' \ x018966342M5000 \ x00 \ x00 \ x00 \ x00 ' , <nl> - b ' \ x018966342X6000 \ x00 \ x00 \ x00 \ x00 ' , <nl> b ' \ x018966342W8000 \ x00 \ x00 \ x00 \ x00 ' , <nl> + b ' \ 0018966342X5000 \ x00 \ x00 \ x00 \ x00 ' , <nl> + b ' \ x018966342X6000 \ x00 \ x00 \ x00 \ x00 ' , <nl> b ' \ x028966342W4001 \ x00 \ x00 \ x00 \ x00897CF1203001 \ x00 \ x00 \ x00 \ x00 ' , <nl> b ' \ x02896634A14001 \ x00 \ x00 \ x00 \ x00897CF1203001 \ x00 \ x00 \ x00 \ x00 ' , <nl> b ' \ x02896634A23001 \ x00 \ x00 \ x00 \ x00897CF1203001 \ x00 \ x00 \ x00 \ x00 ' , <nl> | Add missing engine f / w for CAR . RAV4H_TSS2 ( ) | commaai/openpilot | 200f246d5aab5acac68f0ba8bbd1c84ddd8effc8 | 2020-11-30T14:33:18Z |
similarity index 100 % <nl> rename from docs / api / dialog - ko . md <nl> rename to docs - translations / ko / api / dialog - ko . md <nl> | Add file to KO translation docs | electron/electron | 6c984fac7c0be5fdc16cf99f1d3448f90f74a782 | 2015-08-22T12:23:57Z |
mmm a / cmake / modules / AddSwift . cmake <nl> ppp b / cmake / modules / AddSwift . cmake <nl> function ( _add_swift_library_single target name ) <nl> <nl> # The section metadata objects are generated sources , and we need to tell CMake <nl> # not to expect to find them prior to their generation . <nl> - if ( " $ { SWIFTLIB_SINGLE_SDK } " STREQUAL " LINUX " OR <nl> - " $ { SWIFTLIB_SINGLE_SDK } " STREQUAL " FREEBSD " ) <nl> + if ( " $ { SWIFT_SDK_ $ { SWIFTLIB_SINGLE_SDK } _OBJECT_FORMAT } " STREQUAL " ELF " ) <nl> if ( " $ { libkind } " STREQUAL " SHARED " ) <nl> set_source_files_properties ( $ { SWIFT_SECTIONS_OBJECT_BEGIN } PROPERTIES GENERATED 1 ) <nl> set_source_files_properties ( $ { SWIFT_SECTIONS_OBJECT_END } PROPERTIES GENERATED 1 ) <nl> | Merge pull request from compnerd / elf - check | apple/swift | 45bf03abc15f19275c7f59ff86b74bce22ca0784 | 2016-07-03T21:13:46Z |
mmm a / tensorflow / core / profiler / internal / traceme_recorder . cc <nl> ppp b / tensorflow / core / profiler / internal / traceme_recorder . cc <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> # include " tensorflow / core / profiler / internal / traceme_recorder . h " <nl> <nl> - / / To avoid unnecessary synchronization between threads , each thread has a <nl> - / / ThreadLocalRecorder that independently records its events . <nl> - / / <nl> - / / Events are stored in an EventQueue implemented as a linked - list of blocks , <nl> - / / with start and end pointers : <nl> - / / [ events . . . . . . . . | next - ] - - > [ events . . . . . . . . . | next ] <nl> - / / ^ start_block ^ start ^ end_block ^ end <nl> - / / <nl> - / / Record ( ) writes at end , and then advances it , allocating a block if needed . <nl> - / / Clear ( ) takes ownership of events in the range [ start , end ) . <nl> - / / The end pointer is atomic so these can be concurrent . <nl> - / / <nl> - / / If a thread dies , the ThreadLocalRecorder ' s destructor hands its data off to <nl> - / / the orphaned_events list . <nl> - <nl> # include < cstddef > <nl> <nl> # include " tensorflow / core / platform / env . h " <nl> namespace { <nl> <nl> / / A single - producer single - consumer queue of Events . <nl> / / <nl> - / / Push and Consume are lock free and each might be called from at most one <nl> - / / thread . Push is only be called by the owner thread . Consume is called by the <nl> + / / Implemented as a linked - list of blocks containing numbered slots , with start <nl> + / / and end pointers : <nl> + / / <nl> + / / [ events . . . . . . . . | next - ] - - > [ events . . . . . . . . . | next ] <nl> + / / ^ start_block_ ^ start_ ^ end_block_ ^ end_ <nl> + / / <nl> + / / start_ is the first occupied slot , end_ is the first unoccupied slot . <nl> + / / <nl> + / / Push writes at end_ , and then advances it , allocating a block if needed . <nl> + / / PopAll takes ownership of events in the range [ start_ , end_ ) . <nl> + / / The end_ pointer is atomic so Push and PopAll can be concurrent . <nl> + / / <nl> + / / Push and PopAll are lock free and each might be called from at most one <nl> + / / thread . Push is only called by the owner thread . PopAll is called by the <nl> / / owner thread when it shuts down , or by the tracing control thread . <nl> - / / Thus , Consume might race with Push , so Consume only removes events that were <nl> - / / in the queue when it was invoked . If Push is called while Consume is active , <nl> + / / <nl> + / / Thus , PopAll might race with Push , so PopAll only removes events that were <nl> + / / in the queue when it was invoked . If Push is called while PopAll is active , <nl> / / the new event remains in the queue . Thus , the tracing control thread should <nl> - / / call Consume when tracing stops to remove events created during tracing , but <nl> + / / call PopAll when tracing stops to remove events created during tracing , but <nl> / / also when tracing starts again to clear any remaining events . <nl> - / / <nl> - / / Internally , we have a linked list of blocks containing numbered slots . <nl> - / / start is the first occupied slot , end is the first unoccupied slot . <nl> class EventQueue { <nl> public : <nl> EventQueue ( ) <nl> class EventQueue { <nl> end_block_ ( start_block_ ) , <nl> end_ ( start_ ) { } <nl> <nl> - / / REQUIRES : Consume ( ) was called since the last Push ( ) . <nl> + / / REQUIRES : PopAll ( ) was called since the last Push ( ) . <nl> / / Memory should be deallocated and trace events destroyed on destruction . <nl> / / This doesn ' t require global lock as this discards all the stored trace <nl> - / / events and we assume of destruction of this class only after the last <nl> + / / events and we assume of destruction of this instance only after the last <nl> / / Push ( ) has been called . <nl> ~ EventQueue ( ) { <nl> - DCHECK_EQ ( start_ , end_ . load ( ) ) < < " EventQueue destroyed without Consume ( ) " ; <nl> + DCHECK ( Empty ( ) ) < < " EventQueue destroyed without PopAll ( ) " ; <nl> delete end_block_ ; <nl> } <nl> <nl> class EventQueue { <nl> } <nl> <nl> / / Retrieve and remove all events in the queue at the time of invocation . <nl> - / / If Push is called while Consume is active , the new event will not be <nl> + / / If Push is called while PopAll is active , the new event will not be <nl> / / removed from the queue . <nl> - std : : vector < TraceMeRecorder : : Event > Consume ( ) { <nl> + std : : vector < TraceMeRecorder : : Event > PopAll ( ) { <nl> / / Read index before contents . <nl> size_t end = end_ . load ( std : : memory_order_acquire ) ; <nl> std : : vector < TraceMeRecorder : : Event > result ; <nl> result . reserve ( end - start_ ) ; <nl> while ( start_ ! = end ) { <nl> - Shift ( & result ) ; <nl> + result . emplace_back ( Pop ( ) ) ; <nl> } <nl> return result ; <nl> } <nl> <nl> private : <nl> - / / Shift one event off the front of the queue into * out . <nl> - void Shift ( std : : vector < TraceMeRecorder : : Event > * out ) { <nl> + / / Returns true if the queue is empty at the time of invocation . <nl> + bool Empty ( ) const { <nl> + return ( start_ = = end_ . load ( std : : memory_order_acquire ) ) ; <nl> + } <nl> + <nl> + / / Remove one event off the front of the queue and return it . <nl> + / / REQUIRES : The queue must not be empty . <nl> + TraceMeRecorder : : Event Pop ( ) { <nl> + DCHECK ( ! Empty ( ) ) ; <nl> / / Move the next event into the output . <nl> auto & event = start_block_ - > events [ start_ + + - start_block_ - > start ] . event ; <nl> - out - > push_back ( std : : move ( event ) ) ; <nl> + TraceMeRecorder : : Event out = std : : move ( event ) ; <nl> event . ~ Event ( ) ; / / Events must be individually destroyed . <nl> / / If we reach the end of a block , we own it and should delete it . <nl> / / The next block is present : end always points to something . <nl> class EventQueue { <nl> auto * next_block = start_block_ - > next ; <nl> delete start_block_ ; <nl> start_block_ = next_block ; <nl> + DCHECK_EQ ( start_ , start_block_ - > start ) ; <nl> } <nl> + return out ; <nl> } <nl> <nl> - / / The number of slots in a block . Chosen so that the block fits in 64k . <nl> struct Block { <nl> / / The number of slots in a block is chosen so the block fits in 64 KiB . <nl> static constexpr size_t kSize = 1 < < 16 ; <nl> class EventQueue { <nl> <nl> } / / namespace <nl> <nl> + / / To avoid unnecessary synchronization between threads , each thread has a <nl> + / / ThreadLocalRecorder that independently records its events . <nl> class TraceMeRecorder : : ThreadLocalRecorder { <nl> public : <nl> / / The recorder is created the first time TraceMeRecorder : : Record ( ) is called <nl> class TraceMeRecorder : : ThreadLocalRecorder { <nl> <nl> / / Clear is called from the control thread when tracing starts / stops , or from <nl> / / the owner thread when it shuts down ( see destructor ) . <nl> - TraceMeRecorder : : ThreadEvents Clear ( ) { return { info_ , queue_ . Consume ( ) } ; } <nl> + TraceMeRecorder : : ThreadEvents Clear ( ) { return { info_ , queue_ . PopAll ( ) } ; } <nl> <nl> private : <nl> TraceMeRecorder : : ThreadInfo info_ ; <nl> | profiler : : TraceMe - Improve EventQueue documentation | tensorflow/tensorflow | 94751d7ba35ee3512d9f7944138841f26f211118 | 2019-05-09T22:27:13Z |
mmm a / test / ParseableInterface / Inputs / exported - module - name - after / CoreKit . h <nl> ppp b / test / ParseableInterface / Inputs / exported - module - name - after / CoreKit . h <nl> @ @ - 1 + 1 @ @ <nl> - # import < ExportAsCoreKit . h > <nl> + # include < ExportAsCoreKit . h > <nl> mmm a / test / ParseableInterface / Inputs / exported - module - name - after / ExportAsCoreKit . h <nl> ppp b / test / ParseableInterface / Inputs / exported - module - name - after / ExportAsCoreKit . h <nl> <nl> + # ifndef EXPORT_AS_COREKIT_H <nl> + # define EXPORT_AS_COREKIT_H <nl> + <nl> struct CKThing { <nl> long value ; <nl> } ; <nl> + <nl> + # endif <nl> + <nl> mmm a / test / ParseableInterface / Inputs / exported - module - name - before / CoreKit . h <nl> ppp b / test / ParseableInterface / Inputs / exported - module - name - before / CoreKit . h <nl> @ @ - 1 + 1 @ @ <nl> - # import < ExportAsCoreKit . h > <nl> + # include < ExportAsCoreKit . h > <nl> | Merge pull request from compnerd / importing - is - not - inclusive | apple/swift | a405948a4bcc0f4e5afbda3e305095ba6d56955a | 2019-03-22T21:31:46Z |
mmm a / tools / tungsten / . gitignore <nl> ppp b / tools / tungsten / . gitignore <nl> build / <nl> . externalNativeBuild <nl> out / <nl> * . iml <nl> + / core / resources / ibls <nl> mmm a / tools / tungsten / README . md <nl> ppp b / tools / tungsten / README . md <nl> __Note : Tungsten is still a work - in - progress and is not ready for public at - larg <nl> <nl> # # Prerequisites <nl> <nl> - Before building Tungsten , you ' ll need to first build Filament . See Filament ' s [ README ] ( . . / . . / README . md ) for instructions . Be sure to run ` make install ` ( or the equivalent for your chosen build system ) to install Filament binaries to the ` dist ` folder at the root of Filament . Tungsten relies on ` filament - java . jar ` , ` libfilament - jni ` and ` matc ` in the appropriate directories under ` dist ` : <nl> + Before building Tungsten , you ' ll need to first build Filament . See Filament ' s [ README ] ( . . / . . / README . md ) for instructions . Be sure to run ` make install ` ( or the equivalent for your chosen build system ) to install Filament binaries to the ` dist ` folder at the root of Filament . Tungsten relies on ` filament - java . jar ` , ` libfilament - jni ` , and tools such as ` matc ` and ` cmgen ` in the appropriate directories under ` dist ` : <nl> <nl> ` ` ` <nl> Filament <nl> | - - dist <nl> | | - - bin <nl> | | - - matc <nl> + | | - - cmgen <nl> | | - - lib <nl> | | - - x86_64 <nl> | | - - libfilament - jni . * <nl> | filament - java . jar <nl> ` ` ` <nl> <nl> + The location of this directory can be changed by updating the ` filament_tools_dir ` property inside of ` gradle . properties ` . <nl> + <nl> You ' ll also need Java 8 in order to use Tungsten . Tungsten is supported on Windows , Mac , and Linux . <nl> <nl> # # Getting Started <nl> mmm a / tools / tungsten / core / build . gradle <nl> ppp b / tools / tungsten / core / build . gradle <nl> plugins { <nl> id ' org . jetbrains . kotlin . jvm ' version ' 1 . 2 . 51 ' <nl> } <nl> <nl> + apply from : ' . . / . . / . . / android / build / filament - tasks . gradle ' <nl> + <nl> sourceSets { <nl> main { <nl> java { <nl> dependencies { <nl> implementation project ( ' : kotlin - math ' ) <nl> } <nl> <nl> + generateIbl { <nl> + group ' Tungsten ' <nl> + description ' Generate a preview IBL ' <nl> + <nl> + inputFile = file ( " . . / . . / . . / third_party / environments / venetian_crossroads_2k . hdr " ) <nl> + outputDir = file ( " resources / ibls " ) <nl> + } <nl> + <nl> + processResources . dependsOn generateIbl <nl> + <nl> test { <nl> testLogging { <nl> showStandardStreams = true <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / texture / TextureCache . kt <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / texture / TextureCache . kt <nl> object TextureCache { <nl> return futureTexture <nl> } <nl> <nl> + / * <nl> + * Keep track of texture and delete when Filament shutdownAndDestroyTextures is called . <nl> + * / <nl> + fun addTextureForRemoval ( texture : Texture ) { <nl> + Filament . getInstance ( ) . assertIsFilamentThread ( ) <nl> + textures . add ( texture ) <nl> + } <nl> + <nl> / * * <nl> * Delete all cached textures and disallow any additional texture caching . <nl> * / <nl> object TextureCache { <nl> val futureTexture = FutureTexture ( ) <nl> Filament . getInstance ( ) . runOnFilamentThread { engine - > <nl> if ( ! allowNewTextures ) return @ runOnFilamentThread <nl> - val texture = TextureUtils . loadTexture ( engine , imageSource , colorSpace ) <nl> + val texture = TextureUtils . loadTextureFromFile ( engine , imageSource , colorSpace ) <nl> if ( texture ! = null ) { <nl> futureTexture . complete ( texture ) <nl> textures . add ( texture ) <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / texture / TextureUtils . kt <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / texture / TextureUtils . kt <nl> import java . awt . image . BufferedImage . TYPE_INT_BGR <nl> import java . awt . image . DataBufferByte <nl> import java . io . File <nl> import java . io . IOException <nl> + import java . io . InputStream <nl> import java . nio . ByteBuffer <nl> <nl> private fun Texture . InternalFormat . toSrgb ( ) : Texture . InternalFormat { <nl> private fun Texture . InternalFormat . toSrgb ( ) : Texture . InternalFormat { <nl> } <nl> } <nl> <nl> + data class ImageInfo ( val width : Int , val height : Int ) <nl> + <nl> object TextureUtils { <nl> <nl> enum class ColorSpaceStrategy { <nl> object TextureUtils { <nl> return texture ? : throw RuntimeException ( " Could not load default texture " ) <nl> } <nl> <nl> - fun loadTexture ( engine : Engine , file : File , colorSpace : ColorSpaceStrategy ) : Texture ? { <nl> + fun loadTextureFromFile ( engine : Engine , file : File , colorSpace : ColorSpaceStrategy ) : Texture ? { <nl> val img = try { <nl> ImageIO . read ( file ) ? : return null <nl> } catch ( e : IOException ) { <nl> object TextureUtils { <nl> return loadTextureFromImage ( engine , img , colorSpace ) <nl> } <nl> <nl> + fun loadImageBufferFromStream ( stream : InputStream ) : Pair < ByteBuffer , ImageInfo > ? { <nl> + val img = try { <nl> + ImageIO . read ( stream ) ? : return null <nl> + } catch ( e : IOException ) { <nl> + System . err . println ( " Could not parse image from InputStream . " ) <nl> + e . printStackTrace ( ) <nl> + return null <nl> + } <nl> + return Pair ( loadImageBuffer ( img ) , ImageInfo ( img . width , img . height ) ) <nl> + } <nl> + <nl> / * * <nl> * Based on the number of components the image has and its color space , decide which <nl> * texture formats to use . <nl> object TextureUtils { <nl> return ( if ( isSrgb ) internalFormat . toSrgb ( ) else internalFormat ) to textureFormat <nl> } <nl> <nl> + private fun loadImageBuffer ( img : BufferedImage ) : ByteBuffer { <nl> + val data = img . raster . dataBuffer as DataBufferByte <nl> + flipComponentsIfNecessary ( img ) <nl> + return ByteBuffer . wrap ( data . data ) <nl> + } <nl> + <nl> private fun loadTextureFromImage ( <nl> engine : Engine , <nl> img : BufferedImage , <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / GraphPresenter . java <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / GraphPresenter . java <nl> <nl> import java . util . concurrent . CompletableFuture ; <nl> import java . util . concurrent . atomic . AtomicReference ; <nl> import javax . swing . JTextArea ; <nl> + <nl> + import com . google . android . filament . tungsten . ui . preview . PreviewMeshPanel ; <nl> import org . jetbrains . annotations . Nullable ; <nl> <nl> public class GraphPresenter implements IPropertiesPresenter { <nl> deleted file mode 100644 <nl> index f3c658093 . . 000000000 <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / LightHelpers . java <nl> ppp / dev / null <nl> <nl> - / * <nl> - * Copyright ( C ) 2018 The Android Open Source Project <nl> - * <nl> - * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> - * you may not use this file except in compliance with the License . <nl> - * You may obtain a copy of the License at <nl> - * <nl> - * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> - * <nl> - * Unless required by applicable law or agreed to in writing , software <nl> - * distributed under the License is distributed on an " AS IS " BASIS , <nl> - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> - * See the License for the specific language governing permissions and <nl> - * limitations under the License . <nl> - * / <nl> - <nl> - package com . google . android . filament . tungsten . ui ; <nl> - <nl> - import com . google . android . filament . Engine ; <nl> - import com . google . android . filament . EntityManager ; <nl> - import com . google . android . filament . IndirectLight ; <nl> - import com . google . android . filament . LightManager ; <nl> - import com . google . android . filament . Scene ; <nl> - <nl> - public final class LightHelpers { <nl> - <nl> - private LightHelpers ( ) { } <nl> - <nl> - private static final float [ ] DEBUG_IRRADIANCE = { <nl> - 0 . 7545545f , <nl> - 0 . 74854296f , <nl> - 0 . 7909215f , <nl> - - 0 . 083856545f , <nl> - 0 . 0925335f , <nl> - 0 . 32276466f , <nl> - 0 . 3081527f , <nl> - 0 . 36679634f , <nl> - 0 . 46669817f , <nl> - - 0 . 18888493f , <nl> - - 0 . 27740255f , <nl> - - 0 . 3778442f , <nl> - - 0 . 25278875f , <nl> - - 0 . 3160564f , <nl> - - 0 . 39614528f , <nl> - 0 . 0713582f , <nl> - 0 . 15978426f , <nl> - 0 . 29059005f , <nl> - - 0 . 031043747f , <nl> - - 0 . 031144021f , <nl> - - 0 . 031046612f , <nl> - - 0 . 16099837f , <nl> - - 0 . 2036487f , <nl> - - 0 . 24664281f , <nl> - 0 . 045710605f , <nl> - 0 . 048120886f , <nl> - 0 . 046324715f <nl> - } ; <nl> - <nl> - static int addSun ( Engine engine , Scene scene ) { <nl> - int light = EntityManager . get ( ) . create ( ) ; <nl> - LightManager . Builder lightBuilder = new LightManager . Builder ( LightManager . Type . SUN ) ; <nl> - lightBuilder . build ( engine , light ) ; <nl> - scene . addEntity ( light ) ; <nl> - return light ; <nl> - } <nl> - <nl> - static int addPointLight ( Engine engine , Scene scene ) { <nl> - int light = EntityManager . get ( ) . create ( ) ; <nl> - LightManager . Builder lightBuilder = new LightManager . Builder ( LightManager . Type . POINT ) <nl> - . position ( 0 . 0f , 0 . 0f , 2 . 0f ) <nl> - . direction ( 0 . 0f , 0 . 0f , - 1 . 0f ) <nl> - . intensity ( 440000 . 0f ) <nl> - . castShadows ( false ) <nl> - . falloff ( 10 . 0f ) <nl> - . color ( 1 . 0f , 1 . 0f , 1 . 0f ) ; <nl> - lightBuilder . build ( engine , light ) ; <nl> - scene . addEntity ( light ) ; <nl> - return light ; <nl> - } <nl> - <nl> - static IndirectLight addIndirectLight ( Engine engine , Scene scene ) { <nl> - IndirectLight ibl = <nl> - new IndirectLight . Builder ( ) <nl> - . irradiance ( 3 , DEBUG_IRRADIANCE ) <nl> - . intensity ( 30000 . 0f ) <nl> - . build ( engine ) ; <nl> - scene . setIndirectLight ( ibl ) ; <nl> - return ibl ; <nl> - } <nl> - } <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / TungstenPanel . java <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / TungstenPanel . java <nl> <nl> import com . google . android . filament . tungsten . MaterialManager ; <nl> import com . google . android . filament . tungsten . compiler . NodeRegistry ; <nl> import com . google . android . filament . tungsten . properties . PropertiesPanel ; <nl> + import com . google . android . filament . tungsten . ui . preview . PreviewMeshPanel ; <nl> + <nl> import java . awt . BorderLayout ; <nl> import java . awt . Dimension ; <nl> import javax . swing . JPanel ; <nl> similarity index 95 % <nl> rename from tools / tungsten / core / src / com / google / android / filament / tungsten / ui / CameraManipulator . kt <nl> rename to tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / CameraManipulator . kt <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / CameraManipulator . kt <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / CameraManipulator . kt <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> - package com . google . android . filament . tungsten . ui <nl> + package com . google . android . filament . tungsten . ui . preview <nl> <nl> import com . curiouscreature . kotlin . math . Float2 <nl> import com . curiouscreature . kotlin . math . Float3 <nl> private fun rotateVector ( rx : Float , ry : Float , v : Float3 ) : Float3 { <nl> return matrix . times ( Float4 ( v ) ) . xyz <nl> } <nl> <nl> - class CameraManipulator ( private val camera : Camera ) { <nl> + internal class CameraManipulator ( private val camera : Camera ) { <nl> <nl> private var cameraTranslation = Float3 ( z = START_RADIUS ) <nl> private var cameraRotation = Float3 ( ) <nl> new file mode 100644 <nl> index 000000000 . . 668c075f9 <nl> mmm / dev / null <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / Ibl . kt <nl> <nl> + / * <nl> + * Copyright ( C ) 2018 The Android Open Source Project <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + package com . google . android . filament . tungsten . ui . preview <nl> + <nl> + import com . google . android . filament . Engine <nl> + import com . google . android . filament . Skybox <nl> + import com . google . android . filament . Texture <nl> + import com . google . android . filament . tungsten . texture . TextureCache <nl> + import com . google . android . filament . tungsten . texture . TextureUtils <nl> + import java . io . InputStream <nl> + import java . lang . RuntimeException <nl> + import java . nio . ByteBuffer <nl> + import kotlin . math . log2 <nl> + <nl> + private fun faceName ( mip : Int , face : String ) = " m $ { mip } _ $ face . rgbm " <nl> + <nl> + private const val float = " " " ( [ 0 - 9 - . ] + ) " " " <nl> + private val pattern = Regex ( " " " \ ( \ s * $ float \ s * , \ s * $ float \ s * , \ s * $ float \ s * \ ) ; " " " ) <nl> + <nl> + internal fun parseSphereHarmonics ( harmonics : String ) : FloatArray { <nl> + return harmonics . lines ( ) . fold ( FloatArray ( 0 ) ) { acc , line - > <nl> + val match = pattern . find ( line ) <nl> + val floats = match ? . groups ? . mapNotNull { group - > <nl> + group ? . value ? . toFloatOrNull ( ) <nl> + } <nl> + floats ? . toFloatArray ( ) ? . let { <nl> + acc + it <nl> + } ? : acc <nl> + } <nl> + } <nl> + <nl> + internal class Ibl ( val engine : Engine , private val pathPrefix : String ) { <nl> + <nl> + val environmentMap : Texture <nl> + val skyboxTexture : Texture <nl> + val skybox : Skybox <nl> + val irradiance : FloatArray <nl> + <nl> + init { <nl> + environmentMap = loadCubemapLevel ( null , 0 ) <nl> + TextureCache . addTextureForRemoval ( environmentMap ) <nl> + for ( i in 1 until environmentMap . levels ) { <nl> + println ( " Loading level $ i " ) <nl> + loadCubemapLevel ( environmentMap , i ) <nl> + } <nl> + <nl> + / / Use the highest - level mip as the skybox texture . <nl> + skyboxTexture = loadCubemapLevel ( null , 0 ) <nl> + TextureCache . addTextureForRemoval ( skyboxTexture ) <nl> + skybox = loadSkybox ( ) <nl> + <nl> + irradiance = loadSphereHarmonics ( ) <nl> + } <nl> + <nl> + private fun loadSkybox ( ) : Skybox { <nl> + return Skybox . Builder ( ) <nl> + . environment ( skyboxTexture ) <nl> + . showSun ( true ) <nl> + . build ( engine ) <nl> + } <nl> + <nl> + private fun loadSphereHarmonics ( ) : FloatArray { <nl> + val path = " $ pathPrefix / sh . txt " <nl> + val stream : InputStream = javaClass . classLoader . getResourceAsStream ( path ) <nl> + ? : throw RuntimeException ( " Could not get stream for sphere harmonics at $ path . " ) <nl> + val contents = stream . bufferedReader ( ) . use { it . readText ( ) } <nl> + return parseSphereHarmonics ( contents ) <nl> + } <nl> + <nl> + private fun loadCubemapLevel ( texture : Texture ? , level : Int ) : Texture { <nl> + require ( texture ! = null | | level = = 0 ) <nl> + <nl> + val cubemapFaces = listOf ( " px " , " nx " , " py " , " ny " , " pz " , " nz " ) <nl> + <nl> + val faceOffsets = IntArray ( 6 ) <nl> + <nl> + val rawBuffers = cubemapFaces . map { face - > <nl> + val path = pathPrefix + " / " + faceName ( level , face ) <nl> + val stream : InputStream = javaClass . classLoader . getResourceAsStream ( path ) <nl> + ? : throw RuntimeException ( " Could not get stream for cubemap face $ path . " ) <nl> + <nl> + val bufferAndInfo = TextureUtils . loadImageBufferFromStream ( stream ) <nl> + ? : throw RuntimeException ( " Could not load cubemap face $ path . " ) <nl> + <nl> + val ( _ , info ) = bufferAndInfo <nl> + if ( info . width ! = info . height ) { <nl> + throw RuntimeException ( " Cubemap face $ pathPrefix width ! = height " ) <nl> + } <nl> + <nl> + bufferAndInfo <nl> + } <nl> + <nl> + val firstFace = rawBuffers [ 0 ] <nl> + val ( _ , firstFaceInfo ) = firstFace <nl> + val size = firstFaceInfo . width <nl> + <nl> + / / Assuming that size is a PO2 <nl> + val levels = ( log2 ( size . toFloat ( ) ) + 1 ) . toInt ( ) <nl> + <nl> + / / Allocate a byte buffer large enough to hold all the faces <nl> + val buffer = ByteBuffer . allocate ( size * size * 4 * 6 ) <nl> + for ( ( rawBuffer , _ ) in rawBuffers ) { <nl> + buffer . put ( rawBuffer ) <nl> + } <nl> + buffer . position ( 0 ) <nl> + <nl> + val bufferDescriptor = <nl> + Texture . PixelBufferDescriptor ( buffer , Texture . Format . RGBM , Texture . Type . UBYTE ) <nl> + <nl> + / / If the texture hasn ' t been created yet , create it . <nl> + val resultTexture = texture ? : Texture . Builder ( ) <nl> + . width ( size ) <nl> + . height ( size ) <nl> + . levels ( levels ) <nl> + . format ( Texture . InternalFormat . RGBM ) <nl> + . sampler ( Texture . Sampler . SAMPLER_CUBEMAP ) <nl> + . build ( engine ) <nl> + <nl> + for ( i in 0 . . 5 ) { <nl> + faceOffsets [ i ] = size * size * 4 * i <nl> + } <nl> + <nl> + resultTexture . setImage ( engine , level , bufferDescriptor , faceOffsets ) <nl> + return resultTexture <nl> + } <nl> + } <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000 . . fea5c3fb8 <nl> mmm / dev / null <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / LightHelpers . java <nl> <nl> + / * <nl> + * Copyright ( C ) 2018 The Android Open Source Project <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + package com . google . android . filament . tungsten . ui . preview ; <nl> + <nl> + import com . google . android . filament . Engine ; <nl> + import com . google . android . filament . IndirectLight ; <nl> + import com . google . android . filament . Scene ; <nl> + <nl> + public final class LightHelpers { <nl> + <nl> + private LightHelpers ( ) { } <nl> + <nl> + static IndirectLight addIndirectLight ( Engine engine , Scene scene ) { <nl> + Ibl i = new Ibl ( engine , " ibls / venetian_crossroads_2k " ) ; <nl> + IndirectLight ibl = <nl> + new IndirectLight . Builder ( ) <nl> + . irradiance ( 3 , i . getIrradiance ( ) ) <nl> + . reflections ( i . getEnvironmentMap ( ) ) <nl> + . intensity ( 30000 . 0f ) <nl> + . build ( engine ) ; <nl> + scene . setIndirectLight ( ibl ) ; <nl> + scene . setSkybox ( i . getSkybox ( ) ) ; <nl> + return ibl ; <nl> + } <nl> + } <nl> similarity index 95 % <nl> rename from tools / tungsten / core / src / com / google / android / filament / tungsten / ui / PreviewCamera . java <nl> rename to tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / PreviewCamera . java <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / PreviewCamera . java <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / PreviewCamera . java <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> - package com . google . android . filament . tungsten . ui ; <nl> + package com . google . android . filament . tungsten . ui . preview ; <nl> <nl> import com . google . android . filament . Camera ; <nl> <nl> similarity index 94 % <nl> rename from tools / tungsten / core / src / com / google / android / filament / tungsten / ui / PreviewMeshPanel . java <nl> rename to tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / PreviewMeshPanel . java <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / PreviewMeshPanel . java <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / PreviewMeshPanel . java <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> - package com . google . android . filament . tungsten . ui ; <nl> + package com . google . android . filament . tungsten . ui . preview ; <nl> <nl> import com . google . android . filament . Box ; <nl> import com . google . android . filament . Camera ; <nl> <nl> import com . google . android . filament . filamesh . FilameshLoader ; <nl> import com . google . android . filament . tungsten . Filament ; <nl> import com . google . android . filament . tungsten . MathUtils ; <nl> + <nl> import java . awt . BorderLayout ; <nl> import java . awt . GraphicsDevice ; <nl> import java . awt . GraphicsEnvironment ; <nl> <nl> private PreviewCamera mPreviewCamera ; <nl> private Filament . Viewer mViewer ; <nl> <nl> - PreviewMeshPanel ( ) { <nl> + public PreviewMeshPanel ( ) { <nl> / / On Windows D3D seems to mess with our OpenGLContext , this disable it . <nl> System . setProperty ( " sun . java2d . d3d " , " false " ) ; <nl> <nl> <nl> mCamera . setProjection ( 90 . 0 , 1 . 3 , 0 . 1 , 200 . 0 , Camera . Fov . HORIZONTAL ) ; <nl> mCamera . lookAt ( 1 . 5f , 1 . 5f , 1 . 5f , 0 , 0 , 0 , 0 , 1 , 0 ) ; <nl> <nl> - mSun = LightHelpers . addSun ( engine , mScene ) ; <nl> - mPointLight = LightHelpers . addPointLight ( engine , mScene ) ; <nl> mIndirectLight = LightHelpers . addIndirectLight ( engine , mScene ) ; <nl> <nl> loadMesh ( engine , mScene , mVertexBuffer , mIndexBuffer ) ; <nl> <nl> } ) ; <nl> } <nl> <nl> - void updateMaterial ( MaterialInstance newMaterialInstance ) { <nl> + public void updateMaterial ( MaterialInstance newMaterialInstance ) { <nl> Filament . getInstance ( ) . runOnFilamentThread ( ( Engine engine ) - > { <nl> mMeshRenderable = engine . getRenderableManager ( ) . getInstance ( mMeshEntity ) ; <nl> engine . getRenderableManager ( ) . setMaterialInstanceAt ( mMeshRenderable , 0 , <nl> void updateMaterial ( MaterialInstance newMaterialInstance ) { <nl> } ) ; <nl> } <nl> <nl> - void destroy ( ) { <nl> + public void destroy ( ) { <nl> Filament . getInstance ( ) . removeViewer ( mViewer ) ; <nl> Filament . getInstance ( ) . runOnFilamentThread ( ( Engine engine ) - > { <nl> engine . destroyRenderer ( mRenderer ) ; <nl> void destroy ( ) { <nl> EntityManager . get ( ) . destroy ( mMeshEntity ) ; <nl> engine . getRenderableManager ( ) . destroy ( mMeshEntity ) ; <nl> engine . getTransformManager ( ) . destroy ( mMeshTransform ) ; <nl> - engine . getLightManager ( ) . destroy ( mSun ) ; <nl> - engine . getLightManager ( ) . destroy ( mPointLight ) ; <nl> } ) ; <nl> } <nl> <nl> similarity index 93 % <nl> rename from tools / tungsten / core / src / com / google / android / filament / tungsten / ui / TungstenViewer . kt <nl> rename to tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / TungstenViewer . kt <nl> mmm a / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / TungstenViewer . kt <nl> ppp b / tools / tungsten / core / src / com / google / android / filament / tungsten / ui / preview / TungstenViewer . kt <nl> <nl> * limitations under the License . <nl> * / <nl> <nl> - package com . google . android . filament . tungsten . ui <nl> + package com . google . android . filament . tungsten . ui . preview <nl> <nl> import com . curiouscreature . kotlin . math . Float2 <nl> import com . google . android . filament . Camera <nl> private const val DOLLY_MULTIPLIER = 5 . 0f <nl> <nl> private fun MouseEvent . toFloat2 ( ) = Float2 ( this . x . toFloat ( ) , this . y . toFloat ( ) ) <nl> <nl> - class TungstenViewer ( camera : Camera , val previewMeshPanel : PreviewMeshPanel ) <nl> + internal class TungstenViewer ( camera : Camera , val previewMeshPanel : PreviewMeshPanel ) <nl> : Filament . Viewer ( ) { <nl> <nl> private val previewCamera = PreviewCamera ( camera ) <nl> class TungstenViewer ( camera : Camera , val previewMeshPanel : PreviewMeshPanel ) <nl> / / Swing reports wheel events caused by a physical wheel ( opposed to a trackpad ) <nl> / / with an inverted sign . Flip it so it feels more intuitive . <nl> val causedByWheel = mouseWheelEvent . wheelRotation ! = 0 <nl> - val multiplier = if ( causedByWheel ) 1 else - 1 <nl> + val multiplier = if ( causedByWheel ) 1 else - 1 <nl> manipulator . dolly ( multiplier * mouseWheelEvent . preciseWheelRotation . toFloat ( ) / <nl> previewMeshPanel . width , DOLLY_MULTIPLIER ) <nl> } <nl> new file mode 100644 <nl> index 000000000 . . 3ac3a5427 <nl> mmm / dev / null <nl> ppp b / tools / tungsten / core / test / com / google / android / filament / tungsten / ui / preview / IblTest . kt <nl> <nl> + / * <nl> + * Copyright ( C ) 2018 The Android Open Source Project <nl> + * <nl> + * Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + * you may not use this file except in compliance with the License . <nl> + * You may obtain a copy of the License at <nl> + * <nl> + * http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + * <nl> + * Unless required by applicable law or agreed to in writing , software <nl> + * distributed under the License is distributed on an " AS IS " BASIS , <nl> + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + * See the License for the specific language governing permissions and <nl> + * limitations under the License . <nl> + * / <nl> + <nl> + package com . google . android . filament . tungsten . ui . preview <nl> + <nl> + import org . junit . Assert . assertArrayEquals <nl> + import org . junit . Test <nl> + <nl> + class IblTest { <nl> + <nl> + @ Test <nl> + fun ` parseSphereHarmonics parses single row ` ( ) { <nl> + val result = parseSphereHarmonics ( <nl> + " ( 0 . 001 , - 0 . 003 , - 0 . 005 ) ; / / L20 , irradiance , pre - scaled base \ n " ) <nl> + val expected = floatArrayOf ( 0 . 001f , - 0 . 003f , - 0 . 005f ) <nl> + assertArrayEquals ( expected , result , 0 . 0f ) <nl> + } <nl> + <nl> + @ Test <nl> + fun ` parseSphereHarmonics parses multiple rows ` ( ) { <nl> + val result = parseSphereHarmonics ( <nl> + " ( - 0 . 2 , - 0 . 24 , - 0 . 24 ) ; / / L2 - 2 , irradiance , pre - scaled base \ n " + <nl> + " ( 0 . 05 , 0 . 06 , 0 . 06 ) ; / / L2 - 1 , irradiance , pre - scaled base \ n " ) <nl> + val expected = floatArrayOf ( - 0 . 2f , - 0 . 24f , - 0 . 24f , 0 . 05f , 0 . 06f , 0 . 06f ) <nl> + assertArrayEquals ( expected , result , 0 . 0f ) <nl> + } <nl> + } <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 000000000 . . 9d3b684c2 <nl> mmm / dev / null <nl> ppp b / tools / tungsten / gradle . properties <nl> @ @ - 0 , 0 + 1 @ @ <nl> + filament_tools_dir = . . / . . / . . / dist <nl> | Tungsten : add IBL to the preview panel ( ) | google/filament | 29e891a7b1755ebac4e853cb79948ab31d8207a4 | 2018-09-19T18:16:58Z |
mmm a / xbmc / cores / VideoRenderers / WinRenderer . cpp <nl> ppp b / xbmc / cores / VideoRenderers / WinRenderer . cpp <nl> bool CWinRenderer : : Configure ( unsigned int width , unsigned int height , unsigned i <nl> / / need to recreate textures <nl> m_NumYV12Buffers = 0 ; <nl> m_iYV12RenderBuffer = 0 ; <nl> + / / reinitialize the filters / shaders <nl> + m_bFilterInitialized = false ; <nl> } <nl> <nl> m_fps = fps ; <nl> | [ WIN32 ] fixed : reinitialize shaders and textures when video size changes . Fixes | xbmc/xbmc | 13e0b0e6a83e2bbf07fd0b0c1864f88aa637e870 | 2011-03-08T02:15:21Z |
mmm a / modules / prediction / common / BUILD <nl> ppp b / modules / prediction / common / BUILD <nl> cc_library ( <nl> hdrs = [ " prediction_thread_pool . h " ] , <nl> linkopts = [ <nl> " - lboost_thread " , <nl> - " - lboost_system " , <nl> ] , <nl> deps = [ <nl> " / / cyber / common " , <nl> + " / / cyber / base " , <nl> ] , <nl> ) <nl> <nl> cc_test ( <nl> name = " prediction_thread_pool_test " , <nl> size = " small " , <nl> srcs = [ " prediction_thread_pool_test . cc " ] , <nl> + linkopts = [ <nl> + " - lboost_system " , <nl> + ] , <nl> deps = [ <nl> " : prediction_thread_pool " , <nl> " @ gtest / / : main " , <nl> mmm a / modules / prediction / common / prediction_thread_pool . cc <nl> ppp b / modules / prediction / common / prediction_thread_pool . cc <nl> <nl> <nl> # include " modules / prediction / common / prediction_thread_pool . h " <nl> <nl> - # include " boost / preprocessor / repeat . hpp " <nl> - <nl> namespace apollo { <nl> namespace prediction { <nl> <nl> thread_local int PredictionThreadPool : : s_thread_pool_level = 0 ; <nl> std : : vector < int > BaseThreadPool : : THREAD_POOL_CAPACITY = { 10 , 10 , 10 } ; <nl> <nl> - BaseThreadPool : : BaseThreadPool ( int thread_num , int next_thread_pool_level ) <nl> - : work_ ( io_service_ ) { <nl> + BaseThreadPool : : BaseThreadPool ( <nl> + int thread_num , int next_thread_pool_level ) : stopped_ ( false ) { <nl> + if ( ! task_queue_ . Init ( thread_num , <nl> + new apollo : : cyber : : base : : BlockWaitStrategy ( ) ) ) { <nl> + throw std : : runtime_error ( " Task queue init failed . " ) ; <nl> + } <nl> for ( int i = 0 ; i < thread_num ; + + i ) { <nl> - thread_group_ . create_thread ( [ this , next_thread_pool_level , i ] { <nl> + workers_ . emplace_back ( [ this , next_thread_pool_level , i ] { <nl> PredictionThreadPool : : s_thread_pool_level = next_thread_pool_level ; <nl> - this - > io_service_ . run ( ) ; <nl> + while ( ! stopped_ ) { <nl> + std : : function < void ( ) > task ; <nl> + if ( task_queue_ . WaitDequeue ( & task ) ) { <nl> + task ( ) ; <nl> + } <nl> + } <nl> } ) ; <nl> } <nl> } <nl> <nl> void BaseThreadPool : : Stop ( ) { <nl> - io_service_ . stop ( ) ; <nl> - thread_group_ . join_all ( ) ; <nl> + task_queue_ . BreakAllWait ( ) ; <nl> + for ( std : : thread & worker : workers_ ) { <nl> + worker . join ( ) ; <nl> + } <nl> stopped_ = true ; <nl> } <nl> <nl> BaseThreadPool : : ~ BaseThreadPool ( ) { <nl> - if ( ! stopped_ ) { <nl> - try { <nl> - Stop ( ) ; <nl> - } catch ( std : : exception & e ) { <nl> - AERROR < < " Stop thread pool failed . " < < e . what ( ) ; <nl> - } <nl> + if ( stopped_ . exchange ( true ) ) { <nl> + return ; <nl> + } <nl> + task_queue_ . BreakAllWait ( ) ; <nl> + for ( std : : thread & worker : workers_ ) { <nl> + worker . join ( ) ; <nl> } <nl> } <nl> <nl> mmm a / modules / prediction / common / prediction_thread_pool . h <nl> ppp b / modules / prediction / common / prediction_thread_pool . h <nl> <nl> # include < utility > <nl> # include < vector > <nl> <nl> - # include " boost / asio . hpp " <nl> # include " boost / thread . hpp " <nl> <nl> + # include " cyber / base / bounded_queue . h " <nl> # include " cyber / common / log . h " <nl> <nl> namespace apollo { <nl> class BaseThreadPool { <nl> std : : future < ReturnType > returned_future = task - > get_future ( ) ; <nl> <nl> / / Note : variables eg . ` task ` must be copied here because of the lifetime <nl> - io_service_ . post ( [ = ] { ( * task ) ( ) ; } ) ; <nl> + if ( stopped_ ) { <nl> + return std : : future < ReturnType > ( ) ; <nl> + } <nl> + task_queue_ . Enqueue ( [ task ] ( ) { ( * task ) ( ) ; } ) ; <nl> return returned_future ; <nl> } <nl> <nl> static std : : vector < int > THREAD_POOL_CAPACITY ; <nl> <nl> private : <nl> - boost : : thread_group thread_group_ ; <nl> - boost : : asio : : io_service io_service_ ; <nl> - boost : : asio : : io_service : : work work_ ; <nl> - bool stopped_ = false ; <nl> + std : : vector < std : : thread > workers_ ; <nl> + apollo : : cyber : : base : : BoundedQueue < std : : function < void ( ) > > task_queue_ ; <nl> + std : : atomic_bool stopped_ ; <nl> } ; <nl> <nl> template < int LEVEL > <nl> mmm a / modules / prediction / common / prediction_thread_pool_test . cc <nl> ppp b / modules / prediction / common / prediction_thread_pool_test . cc <nl> <nl> namespace apollo { <nl> namespace prediction { <nl> <nl> - TEST ( PredictionThreadPoolTest , post_future ) { <nl> - BaseThreadPool pool ( 5 , 0 ) ; <nl> - int n = 1 ; <nl> - std : : future < int > r1 = pool . Post ( [ & ] { return n ; } ) ; <nl> - std : : this_thread : : sleep_for ( std : : chrono : : microseconds ( 1000 ) ) ; <nl> - <nl> - std : : future < int > r2 = pool . Post ( [ & ] { <nl> - std : : this_thread : : sleep_for ( std : : chrono : : microseconds ( 1000 ) ) ; <nl> - return n ; <nl> - } ) ; <nl> - <nl> - n = 2 ; <nl> - r1 . get ( ) ; <nl> - EXPECT_EQ ( 2 , r2 . get ( ) ) ; <nl> - } <nl> - <nl> - TEST ( PredictionThreadPoolTest , for_each ) { <nl> - BaseThreadPool pool ( 5 , 0 ) ; <nl> - std : : vector < int > expect = { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ; <nl> - std : : vector < int > real = { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ; <nl> - <nl> - auto incr = [ ] ( int & input ) { + + input ; } ; <nl> - <nl> - std : : for_each ( expect . begin ( ) , expect . end ( ) , incr ) ; <nl> - pool . ForEach ( real . begin ( ) , real . end ( ) , incr ) ; <nl> - <nl> - EXPECT_EQ ( expect , real ) ; <nl> - } <nl> - <nl> TEST ( PredictionThreadPoolTest , global_for_each ) { <nl> std : : vector < int > expect = { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ; <nl> std : : vector < int > real = { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ; <nl> TEST ( PredictionThreadPoolTest , global_for_each ) { <nl> EXPECT_EQ ( expect , real ) ; <nl> } <nl> <nl> + / * TODO ( kechxu ) uncomment this when deadlock issue is fixed <nl> TEST ( PredictionThreadPoolTest , avoid_deadlock ) { <nl> std : : vector < int > expect = { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 } ; <nl> std : : vector < int > real = { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 } ; <nl> TEST ( PredictionThreadPoolTest , avoid_deadlock ) { <nl> <nl> EXPECT_EQ ( expect , real ) ; <nl> } <nl> + * / <nl> <nl> } / / namespace prediction <nl> } / / namespace apollo <nl> | Prediction : use fewer third - party dependencies to avoid TLS issues | ApolloAuto/apollo | 08caa14a71e15e30c7f6768e052cad0e74ab9e06 | 2019-04-22T21:52:22Z |
mmm a / src / mongo / db / ftdc / block_compressor . cpp <nl> ppp b / src / mongo / db / ftdc / block_compressor . cpp <nl> StatusWith < ConstDataRange > BlockCompressor : : compress ( ConstDataRange source ) { <nl> if ( err ! = Z_STREAM_END ) { <nl> ( void ) deflateEnd ( & stream ) ; <nl> <nl> - if ( err ! = Z_OK ) { <nl> - return { ErrorCodes : : ZLibError , str : : stream ( ) < < " deflate failed with " < < err } ; <nl> - } <nl> + return { ErrorCodes : : ZLibError , str : : stream ( ) < < " deflate failed with " < < err } ; <nl> } <nl> <nl> err = deflateEnd ( & stream ) ; <nl> | SERVER - 40142 Coverity analysis defect 112101 : Double free | mongodb/mongo | add02fb371a56a7802c31b643df5fd0c716f04c1 | 2019-03-22T19:53:43Z |
mmm a / test / cctest / test - assembler - mips64 . cc <nl> ppp b / test / cctest / test - assembler - mips64 . cc <nl> TEST ( jump_tables1 ) { <nl> <nl> Label done ; <nl> { <nl> - __ BlockTrampolinePoolFor ( kNumCases * 2 + 7 ) ; <nl> + __ BlockTrampolinePoolFor ( kNumCases * 2 + 6 ) ; <nl> PredictableCodeSizeScope predictable ( <nl> - & assm , ( kNumCases * 2 + 7 ) * Assembler : : kInstrSize ) ; <nl> + & assm , ( kNumCases * 2 + 6 ) * Assembler : : kInstrSize ) ; <nl> Label here ; <nl> <nl> __ bal ( & here ) ; <nl> - __ nop ( ) ; <nl> + __ dsll ( at , a0 , 3 ) ; / / In delay slot . <nl> __ bind ( & here ) ; <nl> - __ dsll ( at , a0 , 3 ) ; <nl> __ daddu ( at , at , ra ) ; <nl> - __ ld ( at , MemOperand ( at , 5 * Assembler : : kInstrSize ) ) ; <nl> + __ ld ( at , MemOperand ( at , 4 * Assembler : : kInstrSize ) ) ; <nl> __ jr ( at ) ; <nl> __ nop ( ) ; <nl> for ( int i = 0 ; i < kNumCases ; + + i ) { <nl> TEST ( jump_tables2 ) { <nl> __ Align ( 8 ) ; <nl> __ bind ( & dispatch ) ; <nl> { <nl> - __ BlockTrampolinePoolFor ( kNumCases * 2 + 7 ) ; <nl> + __ BlockTrampolinePoolFor ( kNumCases * 2 + 6 ) ; <nl> PredictableCodeSizeScope predictable ( <nl> - & assm , ( kNumCases * 2 + 7 ) * Assembler : : kInstrSize ) ; <nl> + & assm , ( kNumCases * 2 + 6 ) * Assembler : : kInstrSize ) ; <nl> Label here ; <nl> <nl> __ bal ( & here ) ; <nl> - __ nop ( ) ; <nl> + __ dsll ( at , a0 , 3 ) ; / / In delay slot . <nl> __ bind ( & here ) ; <nl> - __ dsll ( at , a0 , 3 ) ; <nl> __ daddu ( at , at , ra ) ; <nl> - __ ld ( at , MemOperand ( at , 5 * Assembler : : kInstrSize ) ) ; <nl> + __ ld ( at , MemOperand ( at , 4 * Assembler : : kInstrSize ) ) ; <nl> __ jr ( at ) ; <nl> __ nop ( ) ; <nl> for ( int i = 0 ; i < kNumCases ; + + i ) { <nl> TEST ( jump_tables3 ) { <nl> __ Align ( 8 ) ; <nl> __ bind ( & dispatch ) ; <nl> { <nl> - __ BlockTrampolinePoolFor ( kNumCases * 2 + 7 ) ; <nl> + __ BlockTrampolinePoolFor ( kNumCases * 2 + 6 ) ; <nl> PredictableCodeSizeScope predictable ( <nl> - & assm , ( kNumCases * 2 + 7 ) * Assembler : : kInstrSize ) ; <nl> + & assm , ( kNumCases * 2 + 6 ) * Assembler : : kInstrSize ) ; <nl> Label here ; <nl> <nl> __ bal ( & here ) ; <nl> - __ nop ( ) ; <nl> + __ dsll ( at , a0 , 3 ) ; / / In delay slot . <nl> __ bind ( & here ) ; <nl> - __ dsll ( at , a0 , 3 ) ; <nl> __ daddu ( at , at , ra ) ; <nl> - __ ld ( at , MemOperand ( at , 5 * Assembler : : kInstrSize ) ) ; <nl> + __ ld ( at , MemOperand ( at , 4 * Assembler : : kInstrSize ) ) ; <nl> __ jr ( at ) ; <nl> __ nop ( ) ; <nl> for ( int i = 0 ; i < kNumCases ; + + i ) { <nl> mmm a / test / cctest / test - macro - assembler - mips . cc <nl> ppp b / test / cctest / test - macro - assembler - mips . cc <nl> TEST ( jump_tables5 ) { <nl> Label here ; <nl> <nl> __ bal ( & here ) ; <nl> - __ sll ( at , a0 , 3 ) ; / / In delay slot . <nl> + __ sll ( at , a0 , 2 ) ; / / In delay slot . <nl> __ bind ( & here ) ; <nl> __ addu ( at , at , ra ) ; <nl> __ lw ( at , MemOperand ( at , 6 * Assembler : : kInstrSize ) ) ; <nl> TEST ( jump_tables5 ) { <nl> # endif <nl> F1 f = FUNCTION_CAST < F1 > ( code - > entry ( ) ) ; <nl> for ( int i = 0 ; i < kNumCases ; + + i ) { <nl> - int64_t res = reinterpret_cast < int64_t > ( <nl> + int32_t res = reinterpret_cast < int32_t > ( <nl> CALL_GENERATED_CODE ( isolate , f , i , 0 , 0 , 0 , 0 ) ) ; <nl> - : : printf ( " f ( % d ) = % " PRId64 " \ n " , i , res ) ; <nl> + : : printf ( " f ( % d ) = % d \ n " , i , res ) ; <nl> CHECK_EQ ( values [ i ] , res ) ; <nl> } <nl> } <nl> mmm a / test / cctest / test - macro - assembler - mips64 . cc <nl> ppp b / test / cctest / test - macro - assembler - mips64 . cc <nl> TEST ( jump_tables5 ) { <nl> __ jalr ( at ) ; <nl> __ nop ( ) ; / / Branch delay slot nop . <nl> __ bc ( & done ) ; <nl> + / / A nop instruction must be generated by the forbidden slot guard <nl> + / / ( Assembler : : dd ( Label * ) ) so the first label goes to an 8 bytes aligned <nl> + / / location . <nl> for ( int i = 0 ; i < kNumCases ; + + i ) { <nl> __ dd ( & labels [ i ] ) ; <nl> } <nl> | MIPS : Fix ' MIPS : Fix dd ( ) implementations for compact branches . ' | v8/v8 | 0830ac7cc303e09940a474dab50944c114732afc | 2016-01-13T15:16:31Z |
mmm a / src / wasm / decoder . h <nl> ppp b / src / wasm / decoder . h <nl> class Decoder { <nl> } <nl> DCHECK_LE ( ptr - ( base + offset ) , kMaxDiff ) ; <nl> * length = static_cast < int > ( ptr - ( base + offset ) ) ; <nl> - if ( ptr = = end & & ( b & 0x80 ) ) { <nl> - error ( base , ptr , msg ) ; <nl> - return 0 ; <nl> + if ( ptr = = end ) { <nl> + if ( * length = = kMaxDiff & & ( b & 0xF0 ) ! = 0 ) { <nl> + error ( base , ptr , " extra bits in LEB128 " ) ; <nl> + return 0 ; <nl> + } <nl> + if ( ( b & 0x80 ) ! = 0 ) { <nl> + error ( base , ptr , msg ) ; <nl> + return 0 ; <nl> + } <nl> } <nl> return result ; <nl> } <nl> <nl> + / / Reads a variable - length signed integer ( little endian ) . <nl> + int32_t checked_read_i32v ( const byte * base , int offset , int * length , <nl> + const char * msg = " expected SLEB128 " ) { <nl> + uint32_t result = checked_read_u32v ( base , offset , length , msg ) ; <nl> + if ( * length = = 5 ) return bit_cast < int32_t > ( result ) ; <nl> + if ( * length > 0 ) { <nl> + int shift = 32 - 7 * * length ; <nl> + return bit_cast < int32_t > ( result < < shift ) > > shift ; <nl> + } <nl> + return 0 ; <nl> + } <nl> + <nl> / / Reads a single 16 - bit unsigned integer ( little endian ) . <nl> inline uint16_t read_u16 ( const byte * ptr ) { <nl> DCHECK ( ptr > = start_ & & ( ptr + 2 ) < = end_ ) ; <nl> class Decoder { <nl> } <nl> } <nl> <nl> - bool RangeOk ( const byte * pc , int length ) { <nl> - if ( pc < start_ | | pc_ > = limit_ ) return false ; <nl> - if ( ( pc + length ) > = limit_ ) return false ; <nl> - return true ; <nl> - } <nl> - <nl> void error ( const char * msg ) { error ( pc_ , nullptr , msg ) ; } <nl> <nl> void error ( const byte * pc , const char * msg ) { error ( pc , nullptr , msg ) ; } <nl> class Decoder { <nl> result . start = start_ ; <nl> result . error_pc = error_pc_ ; <nl> result . error_pt = error_pt_ ; <nl> - result . error_msg = error_msg_ ; <nl> - error_msg_ . Reset ( nullptr ) ; <nl> + / / transfer ownership of the error to the result . <nl> + result . error_msg . Reset ( error_msg_ . Detach ( ) ) ; <nl> } else { <nl> result . error_code = kSuccess ; <nl> } <nl> class Decoder { <nl> bool ok ( ) const { return error_pc_ = = nullptr ; } <nl> bool failed ( ) const { return error_pc_ ! = nullptr ; } <nl> <nl> + const byte * start ( ) { return start_ ; } <nl> + const byte * pc ( ) { return pc_ ; } <nl> + <nl> protected : <nl> const byte * start_ ; <nl> const byte * pc_ ; <nl> mmm a / src / wasm / wasm - macro - gen . h <nl> ppp b / src / wasm / wasm - macro - gen . h <nl> <nl> # define FUNC_INDEX ( v ) U16_LE ( v ) <nl> # define NAME_OFFSET ( v ) U32_LE ( v ) <nl> <nl> + # define MASK_7 ( ( 1 < < 7 ) - 1 ) <nl> + # define MASK_14 ( ( 1 < < 14 ) - 1 ) <nl> + # define MASK_21 ( ( 1 < < 21 ) - 1 ) <nl> + # define MASK_28 ( ( 1 < < 28 ) - 1 ) <nl> + <nl> + # define U32V_1 ( x ) static_cast < byte > ( x & MASK_7 ) <nl> + # define U32V_2 ( x ) \ <nl> + static_cast < byte > ( ( x & MASK_7 ) | 0x80 ) , static_cast < byte > ( ( x > > 7 ) & MASK_7 ) <nl> + # define U32V_3 ( x ) \ <nl> + static_cast < byte > ( ( x & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( ( x > > 7 ) & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( x > > 14 ) & MASK_7 ) <nl> + # define U32V_4 ( x ) \ <nl> + static_cast < byte > ( ( x & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( ( x > > 7 ) & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( ( x > > 14 ) & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( x > > 21 ) & MASK_7 ) <nl> + # define U32V_5 ( x ) \ <nl> + static_cast < byte > ( ( x & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( ( x > > 7 ) & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( ( x > > 14 ) & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( ( x > > 21 ) & MASK_7 ) | 0x80 ) , \ <nl> + static_cast < byte > ( ( x > > 28 ) & 0xF ) <nl> + <nl> # endif / / V8_WASM_MACRO_GEN_H_ <nl> mmm a / test / unittests / unittests . gyp <nl> ppp b / test / unittests / unittests . gyp <nl> <nl> ' test - utils . h ' , <nl> ' test - utils . cc ' , <nl> ' wasm / ast - decoder - unittest . cc ' , <nl> + ' wasm / decoder - unittest . cc ' , <nl> ' wasm / encoder - unittest . cc ' , <nl> ' wasm / loop - assignment - analysis - unittest . cc ' , <nl> ' wasm / module - decoder - unittest . cc ' , <nl> mmm a / test / unittests / wasm / ast - decoder - unittest . cc <nl> ppp b / test / unittests / wasm / ast - decoder - unittest . cc <nl> static const WasmOpcode kInt32BinopOpcodes [ ] = { <nl> Verify ( kSuccess , & env_v_i , code , code + sizeof ( code ) ) ; \ <nl> } while ( false ) <nl> <nl> - <nl> - class WasmDecoderTest : public TestWithZone { <nl> + class AstDecoderTest : public TestWithZone { <nl> public : <nl> - WasmDecoderTest ( ) : TestWithZone ( ) , sigs ( ) { <nl> + AstDecoderTest ( ) : TestWithZone ( ) , sigs ( ) { <nl> init_env ( & env_i_i , sigs . i_i ( ) ) ; <nl> init_env ( & env_v_v , sigs . v_v ( ) ) ; <nl> init_env ( & env_v_i , sigs . v_i ( ) ) ; <nl> static FunctionEnv CreateInt32FunctionEnv ( FunctionSig * sig , int count ) { <nl> return env ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int8Const ) { <nl> + TEST_F ( AstDecoderTest , Int8Const ) { <nl> byte code [ ] = { kExprI8Const , 0 } ; <nl> for ( int i = - 128 ; i < 128 ; i + + ) { <nl> code [ 1 ] = static_cast < byte > ( i ) ; <nl> TEST_F ( WasmDecoderTest , Int8Const ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , EmptyFunction ) { <nl> + TEST_F ( AstDecoderTest , EmptyFunction ) { <nl> byte code [ ] = { 0 } ; <nl> Verify ( kSuccess , & env_v_v , code , code ) ; <nl> Verify ( kError , & env_i_i , code , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IncompleteIf1 ) { <nl> + TEST_F ( AstDecoderTest , IncompleteIf1 ) { <nl> byte code [ ] = { kExprIf } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> EXPECT_FAILURE ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IncompleteIf2 ) { <nl> + TEST_F ( AstDecoderTest , IncompleteIf2 ) { <nl> byte code [ ] = { kExprIf , kExprI8Const , 0 } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> EXPECT_FAILURE ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int8Const_fallthru ) { <nl> + TEST_F ( AstDecoderTest , Int8Const_fallthru ) { <nl> byte code [ ] = { kExprI8Const , 0 , kExprI8Const , 1 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int32Const ) { <nl> + TEST_F ( AstDecoderTest , Int32Const ) { <nl> byte code [ ] = { kExprI32Const , 0 , 0 , 0 , 0 } ; <nl> int32_t * ptr = reinterpret_cast < int32_t * > ( code + 1 ) ; <nl> const int kInc = 4498211 ; <nl> TEST_F ( WasmDecoderTest , Int32Const ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int8Const_fallthru2 ) { <nl> + TEST_F ( AstDecoderTest , Int8Const_fallthru2 ) { <nl> byte code [ ] = { kExprI8Const , 0 , kExprI32Const , 1 , 2 , 3 , 4 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int64Const ) { <nl> + TEST_F ( AstDecoderTest , Int64Const ) { <nl> byte code [ ] = { kExprI64Const , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } ; <nl> int64_t * ptr = reinterpret_cast < int64_t * > ( code + 1 ) ; <nl> const int kInc = 4498211 ; <nl> TEST_F ( WasmDecoderTest , Int64Const ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Float32Const ) { <nl> + TEST_F ( AstDecoderTest , Float32Const ) { <nl> byte code [ ] = { kExprF32Const , 0 , 0 , 0 , 0 } ; <nl> float * ptr = reinterpret_cast < float * > ( code + 1 ) ; <nl> for ( int i = 0 ; i < 30 ; i + + ) { <nl> TEST_F ( WasmDecoderTest , Float32Const ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Float64Const ) { <nl> + TEST_F ( AstDecoderTest , Float64Const ) { <nl> byte code [ ] = { kExprF64Const , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } ; <nl> double * ptr = reinterpret_cast < double * > ( code + 1 ) ; <nl> for ( int i = 0 ; i < 30 ; i + + ) { <nl> TEST_F ( WasmDecoderTest , Float64Const ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int32Const_off_end ) { <nl> + TEST_F ( AstDecoderTest , Int32Const_off_end ) { <nl> byte code [ ] = { kExprI32Const , 0xaa , 0xbb , 0xcc , 0x44 } ; <nl> <nl> for ( int size = 1 ; size < = 4 ; size + + ) { <nl> TEST_F ( WasmDecoderTest , Int32Const_off_end ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal0_param ) { <nl> + TEST_F ( AstDecoderTest , GetLocal0_param ) { <nl> EXPECT_VERIFIES ( & env_i_i , kCodeGetLocal0 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal0_local ) { <nl> + TEST_F ( AstDecoderTest , GetLocal0_local ) { <nl> FunctionEnv env ; <nl> init_env ( & env , sigs . i_v ( ) ) ; <nl> env . AddLocals ( kAstI32 , 1 ) ; <nl> EXPECT_VERIFIES ( & env , kCodeGetLocal0 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal0_param_n ) { <nl> + TEST_F ( AstDecoderTest , GetLocal0_param_n ) { <nl> FunctionSig * array [ ] = { sigs . i_i ( ) , sigs . i_ii ( ) , sigs . i_iii ( ) } ; <nl> <nl> for ( size_t i = 0 ; i < arraysize ( array ) ; i + + ) { <nl> TEST_F ( WasmDecoderTest , GetLocal0_param_n ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocalN_local ) { <nl> + TEST_F ( AstDecoderTest , GetLocalN_local ) { <nl> for ( byte i = 1 ; i < 8 ; i + + ) { <nl> FunctionEnv env = CreateInt32FunctionEnv ( sigs . i_v ( ) , i ) ; <nl> for ( byte j = 0 ; j < i ; j + + ) { <nl> TEST_F ( WasmDecoderTest , GetLocalN_local ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal0_fail_no_params ) { <nl> + TEST_F ( AstDecoderTest , GetLocal0_fail_no_params ) { <nl> FunctionEnv env = CreateInt32FunctionEnv ( sigs . i_v ( ) , 0 ) ; <nl> <nl> EXPECT_FAILURE ( & env , kCodeGetLocal0 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal1_fail_no_locals ) { <nl> + TEST_F ( AstDecoderTest , GetLocal1_fail_no_locals ) { <nl> EXPECT_FAILURE ( & env_i_i , kCodeGetLocal1 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal_off_end ) { <nl> + TEST_F ( AstDecoderTest , GetLocal_off_end ) { <nl> static const byte code [ ] = { kExprGetLocal } ; <nl> EXPECT_FAILURE ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GetLocal_varint ) { <nl> + TEST_F ( AstDecoderTest , GetLocal_varint ) { <nl> env_i_i . local_i32_count = 1000000000 ; <nl> env_i_i . total_locals + = 1000000000 ; <nl> <nl> TEST_F ( WasmDecoderTest , GetLocal_varint ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Binops_off_end ) { <nl> + TEST_F ( AstDecoderTest , Binops_off_end ) { <nl> byte code1 [ ] = { 0 } ; / / [ opcode ] <nl> for ( size_t i = 0 ; i < arraysize ( kInt32BinopOpcodes ) ; i + + ) { <nl> code1 [ 0 ] = kInt32BinopOpcodes [ i ] ; <nl> TEST_F ( WasmDecoderTest , Binops_off_end ) { <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> / / = = Statements <nl> / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> - TEST_F ( WasmDecoderTest , Nop ) { <nl> + TEST_F ( AstDecoderTest , Nop ) { <nl> static const byte code [ ] = { kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , SetLocal0_param ) { <nl> + TEST_F ( AstDecoderTest , SetLocal0_param ) { <nl> static const byte code [ ] = { kExprSetLocal , 0 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , SetLocal0_local ) { <nl> + TEST_F ( AstDecoderTest , SetLocal0_local ) { <nl> byte code [ ] = { kExprSetLocal , 0 , kExprI8Const , 0 } ; <nl> FunctionEnv env = CreateInt32FunctionEnv ( sigs . i_v ( ) , 1 ) ; <nl> <nl> EXPECT_VERIFIES ( & env , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , SetLocalN_local ) { <nl> + TEST_F ( AstDecoderTest , SetLocalN_local ) { <nl> for ( byte i = 1 ; i < 8 ; i + + ) { <nl> FunctionEnv env = CreateInt32FunctionEnv ( sigs . i_v ( ) , i ) ; <nl> for ( byte j = 0 ; j < i ; j + + ) { <nl> TEST_F ( WasmDecoderTest , SetLocalN_local ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block0 ) { <nl> + TEST_F ( AstDecoderTest , Block0 ) { <nl> static const byte code [ ] = { kExprBlock , 0 } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block0_fallthru1 ) { <nl> + TEST_F ( AstDecoderTest , Block0_fallthru1 ) { <nl> static const byte code [ ] = { kExprBlock , 0 , kExprBlock , 0 } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block1 ) { <nl> + TEST_F ( AstDecoderTest , Block1 ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprSetLocal , 0 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block0_fallthru2 ) { <nl> + TEST_F ( AstDecoderTest , Block0_fallthru2 ) { <nl> static const byte code [ ] = { kExprBlock , 0 , kExprSetLocal , 0 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block2 ) { <nl> + TEST_F ( AstDecoderTest , Block2 ) { <nl> static const byte code [ ] = { kExprBlock , 2 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 } ; / / - - <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block2_fallthru ) { <nl> + TEST_F ( AstDecoderTest , Block2_fallthru ) { <nl> static const byte code [ ] = { kExprBlock , 2 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> TEST_F ( WasmDecoderTest , Block2_fallthru ) { <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BlockN ) { <nl> + TEST_F ( AstDecoderTest , BlockN ) { <nl> byte block [ ] = { kExprBlock , 2 } ; <nl> <nl> for ( size_t i = 0 ; i < 10 ; i + + ) { <nl> TEST_F ( WasmDecoderTest , BlockN ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BlockN_off_end ) { <nl> + TEST_F ( AstDecoderTest , BlockN_off_end ) { <nl> for ( byte i = 2 ; i < 10 ; i + + ) { <nl> byte code [ ] = { kExprBlock , i , kExprNop } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block1_break ) { <nl> + TEST_F ( AstDecoderTest , Block1_break ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprBr , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block2_break ) { <nl> + TEST_F ( AstDecoderTest , Block2_break ) { <nl> static const byte code [ ] = { kExprBlock , 2 , kExprNop , kExprBr , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block1_continue ) { <nl> + TEST_F ( AstDecoderTest , Block1_continue ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprBr , 1 , kExprNop } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Block2_continue ) { <nl> + TEST_F ( AstDecoderTest , Block2_continue ) { <nl> static const byte code [ ] = { kExprBlock , 2 , kExprNop , kExprBr , 1 , kExprNop } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBlock0 ) { <nl> + TEST_F ( AstDecoderTest , ExprBlock0 ) { <nl> static const byte code [ ] = { kExprBlock , 0 } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBlock1a ) { <nl> + TEST_F ( AstDecoderTest , ExprBlock1a ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBlock1b ) { <nl> + TEST_F ( AstDecoderTest , ExprBlock1b ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprI8Const , 0 } ; <nl> EXPECT_FAILURE ( & env_f_ff , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBlock1c ) { <nl> + TEST_F ( AstDecoderTest , ExprBlock1c ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprF32Const , 0 , 0 , 0 , 0 } ; <nl> EXPECT_VERIFIES ( & env_f_ff , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfEmpty ) { <nl> + TEST_F ( AstDecoderTest , IfEmpty ) { <nl> static const byte code [ ] = { kExprIf , kExprGetLocal , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfSet ) { <nl> + TEST_F ( AstDecoderTest , IfSet ) { <nl> static const byte code [ ] = { kExprIfElse , kExprGetLocal , 0 , kExprSetLocal , <nl> 0 , kExprI8Const , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfBlock1 ) { <nl> + TEST_F ( AstDecoderTest , IfBlock1 ) { <nl> static const byte code [ ] = { kExprIfElse , kExprGetLocal , 0 , kExprBlock , <nl> 1 , kExprSetLocal , 0 , kExprI8Const , <nl> 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfBlock2 ) { <nl> + TEST_F ( AstDecoderTest , IfBlock2 ) { <nl> static const byte code [ ] = { kExprIf , kExprGetLocal , 0 , kExprBlock , <nl> 2 , kExprSetLocal , 0 , kExprI8Const , <nl> 0 , kExprSetLocal , 0 , kExprI8Const , <nl> TEST_F ( WasmDecoderTest , IfBlock2 ) { <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfElseEmpty ) { <nl> + TEST_F ( AstDecoderTest , IfElseEmpty ) { <nl> static const byte code [ ] = { kExprIfElse , kExprGetLocal , 0 , kExprNop , <nl> kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfElseSet ) { <nl> + TEST_F ( AstDecoderTest , IfElseSet ) { <nl> static const byte code [ ] = { kExprIfElse , <nl> kExprGetLocal , <nl> 0 , / / - - <nl> TEST_F ( WasmDecoderTest , IfElseSet ) { <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IfElseUnreachable ) { <nl> + TEST_F ( AstDecoderTest , IfElseUnreachable ) { <nl> static const byte code [ ] = { kExprIfElse , kExprI8Const , 0 , <nl> kExprUnreachable , kExprGetLocal , 0 } ; <nl> <nl> TEST_F ( WasmDecoderTest , IfElseUnreachable ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop0 ) { <nl> + TEST_F ( AstDecoderTest , Loop0 ) { <nl> static const byte code [ ] = { kExprLoop , 0 } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop1 ) { <nl> + TEST_F ( AstDecoderTest , Loop1 ) { <nl> static const byte code [ ] = { kExprLoop , 1 , kExprSetLocal , 0 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop2 ) { <nl> + TEST_F ( AstDecoderTest , Loop2 ) { <nl> static const byte code [ ] = { kExprLoop , 2 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 } ; / / - - <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop1_continue ) { <nl> + TEST_F ( AstDecoderTest , Loop1_continue ) { <nl> static const byte code [ ] = { kExprLoop , 1 , kExprBr , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop1_break ) { <nl> + TEST_F ( AstDecoderTest , Loop1_break ) { <nl> static const byte code [ ] = { kExprLoop , 1 , kExprBr , 1 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop2_continue ) { <nl> + TEST_F ( AstDecoderTest , Loop2_continue ) { <nl> static const byte code [ ] = { kExprLoop , 2 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> kExprBr , 0 , kExprNop } ; / / - - <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Loop2_break ) { <nl> + TEST_F ( AstDecoderTest , Loop2_break ) { <nl> static const byte code [ ] = { kExprLoop , 2 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> kExprBr , 1 , kExprNop } ; / / - - <nl> EXPECT_VERIFIES ( & env_v_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprLoop0 ) { <nl> + TEST_F ( AstDecoderTest , ExprLoop0 ) { <nl> static const byte code [ ] = { kExprLoop , 0 } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprLoop1a ) { <nl> + TEST_F ( AstDecoderTest , ExprLoop1a ) { <nl> static const byte code [ ] = { kExprLoop , 1 , kExprBr , 0 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprLoop1b ) { <nl> + TEST_F ( AstDecoderTest , ExprLoop1b ) { <nl> static const byte code [ ] = { kExprLoop , 1 , kExprBr , 0 , kExprI8Const , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprLoop2_unreachable ) { <nl> + TEST_F ( AstDecoderTest , ExprLoop2_unreachable ) { <nl> static const byte code [ ] = { kExprLoop , 2 , kExprBr , 0 , <nl> kExprI8Const , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ReturnVoid1 ) { <nl> + TEST_F ( AstDecoderTest , ReturnVoid1 ) { <nl> static const byte code [ ] = { kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> EXPECT_FAILURE ( & env_i_i , code ) ; <nl> EXPECT_FAILURE ( & env_i_f , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ReturnVoid2 ) { <nl> + TEST_F ( AstDecoderTest , ReturnVoid2 ) { <nl> static const byte code [ ] = { kExprBlock , 1 , kExprBr , 0 , kExprNop } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> EXPECT_FAILURE ( & env_i_i , code ) ; <nl> EXPECT_FAILURE ( & env_i_f , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ReturnVoid3 ) { <nl> + TEST_F ( AstDecoderTest , ReturnVoid3 ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , kExprI8Const , 0 ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , kExprI32Const , 0 , 0 , 0 , 0 ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , kExprI64Const , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ) ; <nl> TEST_F ( WasmDecoderTest , ReturnVoid3 ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_i , kExprGetLocal , 0 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Unreachable1 ) { <nl> + TEST_F ( AstDecoderTest , Unreachable1 ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , kExprUnreachable ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , kExprUnreachable , kExprUnreachable ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_BLOCK ( 2 , WASM_UNREACHABLE , WASM_ZERO ) ) ; <nl> TEST_F ( WasmDecoderTest , Unreachable1 ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_LOOP ( 2 , WASM_BR ( 0 ) , WASM_ZERO ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Codeiness ) { <nl> + TEST_F ( AstDecoderTest , Codeiness ) { <nl> VERIFY ( kExprLoop , 2 , / / - - <nl> kExprSetLocal , 0 , kExprI8Const , 0 , / / - - <nl> kExprBr , 0 , kExprNop ) ; / / - - <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprIf1 ) { <nl> + TEST_F ( AstDecoderTest , ExprIf1 ) { <nl> VERIFY ( kExprIf , kExprGetLocal , 0 , kExprI8Const , 0 , kExprI8Const , 1 ) ; <nl> VERIFY ( kExprIf , kExprGetLocal , 0 , kExprGetLocal , 0 , kExprGetLocal , 0 ) ; <nl> VERIFY ( kExprIf , kExprGetLocal , 0 , kExprI32Add , kExprGetLocal , 0 , <nl> kExprGetLocal , 0 , kExprI8Const , 1 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprIf_off_end ) { <nl> + TEST_F ( AstDecoderTest , ExprIf_off_end ) { <nl> static const byte kCode [ ] = { kExprIf , kExprGetLocal , 0 , kExprGetLocal , <nl> 0 , kExprGetLocal , 0 } ; <nl> for ( size_t len = 1 ; len < arraysize ( kCode ) ; len + + ) { <nl> TEST_F ( WasmDecoderTest , ExprIf_off_end ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprIf_type ) { <nl> + TEST_F ( AstDecoderTest , ExprIf_type ) { <nl> { <nl> / / float | double ? 1 : 2 <nl> static const byte kCode [ ] = { kExprIfElse , kExprGetLocal , 0 , kExprI8Const , <nl> TEST_F ( WasmDecoderTest , ExprIf_type ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int64Local_param ) { <nl> + TEST_F ( AstDecoderTest , Int64Local_param ) { <nl> EXPECT_VERIFIES ( & env_l_l , kCodeGetLocal0 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int64Locals ) { <nl> + TEST_F ( AstDecoderTest , Int64Locals ) { <nl> for ( byte i = 1 ; i < 8 ; i + + ) { <nl> FunctionEnv env ; <nl> init_env ( & env , sigs . l_v ( ) ) ; <nl> TEST_F ( WasmDecoderTest , Int64Locals ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int32Binops ) { <nl> + TEST_F ( AstDecoderTest , Int32Binops ) { <nl> TestBinop ( kExprI32Add , sigs . i_ii ( ) ) ; <nl> TestBinop ( kExprI32Sub , sigs . i_ii ( ) ) ; <nl> TestBinop ( kExprI32Mul , sigs . i_ii ( ) ) ; <nl> TEST_F ( WasmDecoderTest , Int32Binops ) { <nl> TestBinop ( kExprI32LeU , sigs . i_ii ( ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , DoubleBinops ) { <nl> + TEST_F ( AstDecoderTest , DoubleBinops ) { <nl> TestBinop ( kExprF64Add , sigs . d_dd ( ) ) ; <nl> TestBinop ( kExprF64Sub , sigs . d_dd ( ) ) ; <nl> TestBinop ( kExprF64Mul , sigs . d_dd ( ) ) ; <nl> TEST_F ( WasmDecoderTest , DoubleBinops ) { <nl> TestBinop ( kExprF64Le , sigs . i_dd ( ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , FloatBinops ) { <nl> + TEST_F ( AstDecoderTest , FloatBinops ) { <nl> TestBinop ( kExprF32Add , sigs . f_ff ( ) ) ; <nl> TestBinop ( kExprF32Sub , sigs . f_ff ( ) ) ; <nl> TestBinop ( kExprF32Mul , sigs . f_ff ( ) ) ; <nl> TEST_F ( WasmDecoderTest , FloatBinops ) { <nl> TestBinop ( kExprF32Le , sigs . i_ff ( ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TypeConversions ) { <nl> + TEST_F ( AstDecoderTest , TypeConversions ) { <nl> TestUnop ( kExprI32SConvertF32 , kAstI32 , kAstF32 ) ; <nl> TestUnop ( kExprI32SConvertF64 , kAstI32 , kAstF64 ) ; <nl> TestUnop ( kExprI32UConvertF32 , kAstI32 , kAstF32 ) ; <nl> TEST_F ( WasmDecoderTest , TypeConversions ) { <nl> TestUnop ( kExprF32ConvertF64 , kAstF32 , kAstF64 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosStmt ) { <nl> + TEST_F ( AstDecoderTest , MacrosStmt ) { <nl> VERIFY ( WASM_SET_LOCAL ( 0 , WASM_I32 ( 87348 ) ) ) ; <nl> VERIFY ( WASM_STORE_MEM ( MachineType : : Int32 ( ) , WASM_I8 ( 24 ) , WASM_I8 ( 40 ) ) ) ; <nl> VERIFY ( WASM_IF ( WASM_GET_LOCAL ( 0 ) , WASM_NOP ) ) ; <nl> TEST_F ( WasmDecoderTest , MacrosStmt ) { <nl> VERIFY ( WASM_LOOP ( 1 , WASM_CONTINUE ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosBreak ) { <nl> + TEST_F ( AstDecoderTest , MacrosBreak ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_LOOP ( 1 , WASM_BREAK ( 0 ) ) ) ; <nl> <nl> EXPECT_VERIFIES_INLINE ( & env_i_i , WASM_LOOP ( 1 , WASM_BREAKV ( 0 , WASM_ZERO ) ) ) ; <nl> TEST_F ( WasmDecoderTest , MacrosBreak ) { <nl> WASM_LOOP ( 1 , WASM_BREAKV ( 0 , WASM_F64 ( 0 . 0 ) ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosContinue ) { <nl> + TEST_F ( AstDecoderTest , MacrosContinue ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_LOOP ( 1 , WASM_CONTINUE ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosVariadic ) { <nl> + TEST_F ( AstDecoderTest , MacrosVariadic ) { <nl> VERIFY ( WASM_BLOCK ( 2 , WASM_NOP , WASM_NOP ) ) ; <nl> VERIFY ( WASM_BLOCK ( 3 , WASM_NOP , WASM_NOP , WASM_NOP ) ) ; <nl> VERIFY ( WASM_LOOP ( 2 , WASM_NOP , WASM_NOP ) ) ; <nl> VERIFY ( WASM_LOOP ( 3 , WASM_NOP , WASM_NOP , WASM_NOP ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosNestedBlocks ) { <nl> + TEST_F ( AstDecoderTest , MacrosNestedBlocks ) { <nl> VERIFY ( WASM_BLOCK ( 2 , WASM_NOP , WASM_BLOCK ( 2 , WASM_NOP , WASM_NOP ) ) ) ; <nl> VERIFY ( WASM_BLOCK ( 3 , WASM_NOP , / / - - <nl> WASM_BLOCK ( 2 , WASM_NOP , WASM_NOP ) , / / - - <nl> TEST_F ( WasmDecoderTest , MacrosNestedBlocks ) { <nl> VERIFY ( WASM_BLOCK ( 1 , WASM_BLOCK ( 1 , WASM_BLOCK ( 2 , WASM_NOP , WASM_NOP ) ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MultipleReturn ) { <nl> + TEST_F ( AstDecoderTest , MultipleReturn ) { <nl> static LocalType kIntTypes5 [ ] = { kAstI32 , kAstI32 , kAstI32 , kAstI32 , kAstI32 } ; <nl> FunctionSig sig_ii_v ( 2 , 0 , kIntTypes5 ) ; <nl> FunctionEnv env_ii_v ; <nl> TEST_F ( WasmDecoderTest , MultipleReturn ) { <nl> EXPECT_FAILURE_INLINE ( & env_iii_v , WASM_RETURN ( WASM_ZERO , WASM_ONE ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MultipleReturn_fallthru ) { <nl> + TEST_F ( AstDecoderTest , MultipleReturn_fallthru ) { <nl> static LocalType kIntTypes5 [ ] = { kAstI32 , kAstI32 , kAstI32 , kAstI32 , kAstI32 } ; <nl> FunctionSig sig_ii_v ( 2 , 0 , kIntTypes5 ) ; <nl> FunctionEnv env_ii_v ; <nl> TEST_F ( WasmDecoderTest , MultipleReturn_fallthru ) { <nl> EXPECT_FAILURE_INLINE ( & env_iii_v , WASM_ZERO , WASM_ONE ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosInt32 ) { <nl> + TEST_F ( AstDecoderTest , MacrosInt32 ) { <nl> VERIFY ( WASM_I32_ADD ( WASM_GET_LOCAL ( 0 ) , WASM_I8 ( 12 ) ) ) ; <nl> VERIFY ( WASM_I32_SUB ( WASM_GET_LOCAL ( 0 ) , WASM_I8 ( 13 ) ) ) ; <nl> VERIFY ( WASM_I32_MUL ( WASM_GET_LOCAL ( 0 ) , WASM_I8 ( 14 ) ) ) ; <nl> TEST_F ( WasmDecoderTest , MacrosInt32 ) { <nl> VERIFY ( WASM_I32_GEU ( WASM_GET_LOCAL ( 0 ) , WASM_I8 ( 29 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MacrosInt64 ) { <nl> + TEST_F ( AstDecoderTest , MacrosInt64 ) { <nl> FunctionEnv env_i_ll ; <nl> FunctionEnv env_l_ll ; <nl> init_env ( & env_i_ll , sigs . i_ll ( ) ) ; <nl> TEST_F ( WasmDecoderTest , MacrosInt64 ) { <nl> VERIFY_I_LL ( WASM_I64_NE ( WASM_GET_LOCAL ( 0 ) , WASM_I64 ( 25 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , AllSimpleExpressions ) { <nl> + TEST_F ( AstDecoderTest , AllSimpleExpressions ) { <nl> / / Test all simple expressions which are described by a signature . <nl> # define DECODE_TEST ( name , opcode , sig ) \ <nl> { \ <nl> TEST_F ( WasmDecoderTest , AllSimpleExpressions ) { <nl> # undef DECODE_TEST <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , MemorySize ) { <nl> + TEST_F ( AstDecoderTest , MemorySize ) { <nl> byte code [ ] = { kExprMemorySize } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> EXPECT_FAILURE ( & env_f_ff , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , GrowMemory ) { <nl> + TEST_F ( AstDecoderTest , GrowMemory ) { <nl> byte code [ ] = { kExprGrowMemory , kExprGetLocal , 0 } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> EXPECT_FAILURE ( & env_i_d , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , LoadMemOffset ) { <nl> + TEST_F ( AstDecoderTest , LoadMemOffset ) { <nl> for ( int offset = 0 ; offset < 128 ; offset + = 7 ) { <nl> byte code [ ] = { kExprI32LoadMem , WasmOpcodes : : LoadStoreAccessOf ( true ) , <nl> static_cast < byte > ( offset ) , kExprI8Const , 0 } ; <nl> TEST_F ( WasmDecoderTest , LoadMemOffset ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , StoreMemOffset ) { <nl> + TEST_F ( AstDecoderTest , StoreMemOffset ) { <nl> for ( int offset = 0 ; offset < 128 ; offset + = 7 ) { <nl> byte code [ ] = { kExprI32StoreMem , <nl> WasmOpcodes : : LoadStoreAccessOf ( true ) , <nl> TEST_F ( WasmDecoderTest , StoreMemOffset ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , LoadMemOffset_varint ) { <nl> + TEST_F ( AstDecoderTest , LoadMemOffset_varint ) { <nl> byte code1 [ ] = { kExprI32LoadMem , WasmOpcodes : : LoadStoreAccessOf ( true ) , 0 , <nl> kExprI8Const , 0 } ; <nl> byte code2 [ ] = { kExprI32LoadMem , <nl> TEST_F ( WasmDecoderTest , LoadMemOffset_varint ) { <nl> EXPECT_VERIFIES ( & env_i_i , code4 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , StoreMemOffset_varint ) { <nl> + TEST_F ( AstDecoderTest , StoreMemOffset_varint ) { <nl> byte code1 [ ] = { kExprI32StoreMem , <nl> WasmOpcodes : : LoadStoreAccessOf ( true ) , <nl> 0 , <nl> TEST_F ( WasmDecoderTest , StoreMemOffset_varint ) { <nl> EXPECT_VERIFIES ( & env_i_i , code4 ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , AllLoadMemCombinations ) { <nl> + TEST_F ( AstDecoderTest , AllLoadMemCombinations ) { <nl> for ( size_t i = 0 ; i < arraysize ( kLocalTypes ) ; i + + ) { <nl> LocalType local_type = kLocalTypes [ i ] ; <nl> for ( size_t j = 0 ; j < arraysize ( machineTypes ) ; j + + ) { <nl> TEST_F ( WasmDecoderTest , AllLoadMemCombinations ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , AllStoreMemCombinations ) { <nl> + TEST_F ( AstDecoderTest , AllStoreMemCombinations ) { <nl> for ( size_t i = 0 ; i < arraysize ( kLocalTypes ) ; i + + ) { <nl> LocalType local_type = kLocalTypes [ i ] ; <nl> for ( size_t j = 0 ; j < arraysize ( machineTypes ) ; j + + ) { <nl> class TestModuleEnv : public ModuleEnv { <nl> } ; <nl> } / / namespace <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , SimpleCalls ) { <nl> + TEST_F ( AstDecoderTest , SimpleCalls ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , SimpleCalls ) { <nl> EXPECT_VERIFIES_INLINE ( env , WASM_CALL_FUNCTION ( 2 , WASM_I8 ( 37 ) , WASM_I8 ( 77 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , CallsWithTooFewArguments ) { <nl> + TEST_F ( AstDecoderTest , CallsWithTooFewArguments ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , CallsWithTooFewArguments ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_CALL_FUNCTION ( 2 , WASM_GET_LOCAL ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , CallsWithSpilloverArgs ) { <nl> + TEST_F ( AstDecoderTest , CallsWithSpilloverArgs ) { <nl> static LocalType a_i_ff [ ] = { kAstI32 , kAstF32 , kAstF32 } ; <nl> FunctionSig sig_i_ff ( 1 , 2 , a_i_ff ) ; <nl> FunctionEnv env_i_ff ; <nl> TEST_F ( WasmDecoderTest , CallsWithSpilloverArgs ) { <nl> WASM_CALL_FUNCTION ( 0 , WASM_F32 ( 0 . 1 ) , WASM_F32 ( 0 . 1 ) , WASM_F32 ( 11 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , CallsWithMismatchedSigs2 ) { <nl> + TEST_F ( AstDecoderTest , CallsWithMismatchedSigs2 ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , CallsWithMismatchedSigs2 ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_CALL_FUNCTION ( 0 , WASM_F64 ( 17 . 1 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , CallsWithMismatchedSigs3 ) { <nl> + TEST_F ( AstDecoderTest , CallsWithMismatchedSigs3 ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , CallsWithMismatchedSigs3 ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_CALL_FUNCTION ( 1 , WASM_F32 ( 17 . 6 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , SimpleIndirectCalls ) { <nl> + TEST_F ( AstDecoderTest , SimpleIndirectCalls ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , SimpleIndirectCalls ) { <nl> env , WASM_CALL_INDIRECT ( f2 , WASM_ZERO , WASM_I8 ( 32 ) , WASM_I8 ( 72 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IndirectCallsOutOfBounds ) { <nl> + TEST_F ( AstDecoderTest , IndirectCallsOutOfBounds ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , IndirectCallsOutOfBounds ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_CALL_INDIRECT ( 2 , WASM_ZERO , WASM_I8 ( 27 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , IndirectCallsWithMismatchedSigs3 ) { <nl> + TEST_F ( AstDecoderTest , IndirectCallsWithMismatchedSigs3 ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , IndirectCallsWithMismatchedSigs3 ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_CALL_INDIRECT ( f1 , WASM_ZERO , WASM_F32 ( 17 . 6 ) ) ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , SimpleImportCalls ) { <nl> + TEST_F ( AstDecoderTest , SimpleImportCalls ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , SimpleImportCalls ) { <nl> EXPECT_VERIFIES_INLINE ( env , WASM_CALL_IMPORT ( f2 , WASM_I8 ( 32 ) , WASM_I8 ( 72 ) ) ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , ImportCallsWithMismatchedSigs3 ) { <nl> + TEST_F ( AstDecoderTest , ImportCallsWithMismatchedSigs3 ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , ImportCallsWithMismatchedSigs3 ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_CALL_IMPORT ( f1 , WASM_F32 ( 17 . 6 ) ) ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , Int32Globals ) { <nl> + TEST_F ( AstDecoderTest , Int32Globals ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , Int32Globals ) { <nl> EXPECT_VERIFIES_INLINE ( env , WASM_STORE_GLOBAL ( 5 , WASM_GET_LOCAL ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int32Globals_fail ) { <nl> + TEST_F ( AstDecoderTest , Int32Globals_fail ) { <nl> FunctionEnv * env = & env_i_i ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , Int32Globals_fail ) { <nl> EXPECT_FAILURE_INLINE ( env , WASM_STORE_GLOBAL ( 3 , WASM_GET_LOCAL ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Int64Globals ) { <nl> + TEST_F ( AstDecoderTest , Int64Globals ) { <nl> FunctionEnv * env = & env_l_l ; <nl> TestModuleEnv module_env ; <nl> env - > module = & module_env ; <nl> TEST_F ( WasmDecoderTest , Int64Globals ) { <nl> EXPECT_VERIFIES_INLINE ( env , WASM_STORE_GLOBAL ( 1 , WASM_GET_LOCAL ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Float32Globals ) { <nl> + TEST_F ( AstDecoderTest , Float32Globals ) { <nl> FunctionEnv env_f_ff ; <nl> FunctionEnv * env = & env_f_ff ; <nl> init_env ( env , sigs . f_ff ( ) ) ; <nl> TEST_F ( WasmDecoderTest , Float32Globals ) { <nl> EXPECT_VERIFIES_INLINE ( env , WASM_STORE_GLOBAL ( 0 , WASM_GET_LOCAL ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Float64Globals ) { <nl> + TEST_F ( AstDecoderTest , Float64Globals ) { <nl> FunctionEnv env_d_dd ; <nl> FunctionEnv * env = & env_d_dd ; <nl> init_env ( env , sigs . d_dd ( ) ) ; <nl> TEST_F ( WasmDecoderTest , Float64Globals ) { <nl> EXPECT_VERIFIES_INLINE ( env , WASM_STORE_GLOBAL ( 0 , WASM_GET_LOCAL ( 0 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , AllLoadGlobalCombinations ) { <nl> + TEST_F ( AstDecoderTest , AllLoadGlobalCombinations ) { <nl> for ( size_t i = 0 ; i < arraysize ( kLocalTypes ) ; i + + ) { <nl> LocalType local_type = kLocalTypes [ i ] ; <nl> for ( size_t j = 0 ; j < arraysize ( machineTypes ) ; j + + ) { <nl> TEST_F ( WasmDecoderTest , AllLoadGlobalCombinations ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , AllStoreGlobalCombinations ) { <nl> + TEST_F ( AstDecoderTest , AllStoreGlobalCombinations ) { <nl> for ( size_t i = 0 ; i < arraysize ( kLocalTypes ) ; i + + ) { <nl> LocalType local_type = kLocalTypes [ i ] ; <nl> for ( size_t j = 0 ; j < arraysize ( machineTypes ) ; j + + ) { <nl> TEST_F ( WasmDecoderTest , AllStoreGlobalCombinations ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BreakNesting1 ) { <nl> + TEST_F ( AstDecoderTest , BreakNesting1 ) { <nl> for ( int i = 0 ; i < 5 ; i + + ) { <nl> / / ( block [ 2 ] ( loop [ 2 ] ( if ( get p ) break [ N ] ) ( set p 1 ) ) p ) <nl> byte code [ ] = { WASM_BLOCK ( <nl> TEST_F ( WasmDecoderTest , BreakNesting1 ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BreakNesting2 ) { <nl> + TEST_F ( AstDecoderTest , BreakNesting2 ) { <nl> env_v_v . AddLocals ( kAstI32 , 1 ) ; <nl> for ( int i = 0 ; i < 5 ; i + + ) { <nl> / / ( block [ 2 ] ( loop [ 2 ] ( if ( get p ) break [ N ] ) ( set p 1 ) ) ( return p ) ) ( 11 ) <nl> TEST_F ( WasmDecoderTest , BreakNesting2 ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BreakNesting3 ) { <nl> + TEST_F ( AstDecoderTest , BreakNesting3 ) { <nl> env_v_v . AddLocals ( kAstI32 , 1 ) ; <nl> for ( int i = 0 ; i < 5 ; i + + ) { <nl> / / ( block [ 1 ] ( loop [ 1 ] ( block [ 1 ] ( if ( get p ) break [ N ] ) <nl> TEST_F ( WasmDecoderTest , BreakNesting3 ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BreaksWithMultipleTypes ) { <nl> + TEST_F ( AstDecoderTest , BreaksWithMultipleTypes ) { <nl> EXPECT_FAILURE_INLINE ( <nl> & env_i_i , WASM_BLOCK ( 2 , WASM_BRV_IF_ZERO ( 0 , WASM_I8 ( 7 ) ) , WASM_F32 ( 7 . 7 ) ) ) ; <nl> <nl> TEST_F ( WasmDecoderTest , BreaksWithMultipleTypes ) { <nl> WASM_BRV_IF_ZERO ( 0 , WASM_I8 ( 11 ) ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , BreakNesting_6_levels ) { <nl> + TEST_F ( AstDecoderTest , BreakNesting_6_levels ) { <nl> for ( int mask = 0 ; mask < 64 ; mask + + ) { <nl> for ( int i = 0 ; i < 14 ; i + + ) { <nl> byte code [ ] = { <nl> TEST_F ( WasmDecoderTest , BreakNesting_6_levels ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBreak_TypeCheck ) { <nl> + TEST_F ( AstDecoderTest , ExprBreak_TypeCheck ) { <nl> FunctionEnv * envs [ ] = { & env_i_i , & env_l_l , & env_f_ff , & env_d_dd } ; <nl> for ( size_t i = 0 ; i < arraysize ( envs ) ; i + + ) { <nl> FunctionEnv * env = envs [ i ] ; <nl> TEST_F ( WasmDecoderTest , ExprBreak_TypeCheck ) { <nl> WASM_F64 ( 1 . 2 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBreak_TypeCheckAll ) { <nl> + TEST_F ( AstDecoderTest , ExprBreak_TypeCheckAll ) { <nl> byte code1 [ ] = { WASM_BLOCK ( 2 , <nl> WASM_IF ( WASM_ZERO , WASM_BRV ( 0 , WASM_GET_LOCAL ( 0 ) ) ) , <nl> WASM_GET_LOCAL ( 1 ) ) } ; <nl> TEST_F ( WasmDecoderTest , ExprBreak_TypeCheckAll ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBr_Unify ) { <nl> + TEST_F ( AstDecoderTest , ExprBr_Unify ) { <nl> FunctionEnv env ; <nl> <nl> for ( int which = 0 ; which < 2 ; which + + ) { <nl> TEST_F ( WasmDecoderTest , ExprBr_Unify ) { <nl> } <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , ExprBrIf_cond_type ) { <nl> + TEST_F ( AstDecoderTest , ExprBrIf_cond_type ) { <nl> FunctionEnv env ; <nl> byte code [ ] = { <nl> WASM_BLOCK ( 1 , WASM_BRV_IF ( 0 , WASM_GET_LOCAL ( 0 ) , WASM_GET_LOCAL ( 1 ) ) ) } ; <nl> TEST_F ( WasmDecoderTest , ExprBrIf_cond_type ) { <nl> } <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , ExprBrIf_val_type ) { <nl> + TEST_F ( AstDecoderTest , ExprBrIf_val_type ) { <nl> FunctionEnv env ; <nl> byte code [ ] = { <nl> WASM_BLOCK ( 2 , WASM_BRV_IF ( 0 , WASM_GET_LOCAL ( 1 ) , WASM_GET_LOCAL ( 2 ) ) , <nl> TEST_F ( WasmDecoderTest , ExprBrIf_val_type ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBrIf_Unify ) { <nl> + TEST_F ( AstDecoderTest , ExprBrIf_Unify ) { <nl> FunctionEnv env ; <nl> <nl> for ( int which = 0 ; which < 2 ; which + + ) { <nl> TEST_F ( WasmDecoderTest , ExprBrIf_Unify ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch0 ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch0 ) { <nl> static byte code [ ] = { kExprTableSwitch , 0 , 0 , 0 , 0 } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch0b ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch0b ) { <nl> static byte code [ ] = { kExprTableSwitch , 0 , 0 , 0 , 0 , kExprI8Const , 11 } ; <nl> EXPECT_FAILURE ( & env_v_v , code ) ; <nl> EXPECT_FAILURE ( & env_i_i , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch0c ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch0c ) { <nl> static byte code [ ] = { <nl> WASM_BLOCK ( 1 , WASM_TABLESWITCH_OP ( 0 , 1 , WASM_CASE_BR ( 0 ) ) , WASM_I8 ( 67 ) ) } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , TableSwitch0d ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch0d ) { <nl> static byte code [ ] = { <nl> WASM_BLOCK ( 1 , WASM_TABLESWITCH_OP ( 0 , 2 , WASM_CASE_BR ( 0 ) , WASM_CASE_BR ( 1 ) ) , <nl> WASM_I8 ( 67 ) ) } ; <nl> EXPECT_VERIFIES ( & env_v_v , code ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , TableSwitch1 ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch1 ) { <nl> static byte code [ ] = { WASM_TABLESWITCH_OP ( 1 , 1 , WASM_CASE ( 0 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_I8 ( 0 ) , WASM_I8 ( 9 ) ) } ; <nl> EXPECT_VERIFIES ( & env_i_i , code ) ; <nl> TEST_F ( WasmDecoderTest , TableSwitch1 ) { <nl> EXPECT_FAILURE ( & env_d_dd , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch_off_end ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch_off_end ) { <nl> static byte code [ ] = { WASM_TABLESWITCH_OP ( 1 , 1 , WASM_CASE ( 0 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_I8 ( 0 ) , WASM_I8 ( 9 ) ) } ; <nl> for ( size_t len = arraysize ( code ) - 1 ; len > 0 ; len - - ) { <nl> TEST_F ( WasmDecoderTest , TableSwitch_off_end ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch2 ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch2 ) { <nl> static byte code [ ] = { <nl> WASM_TABLESWITCH_OP ( 2 , 2 , WASM_CASE ( 0 ) , WASM_CASE ( 1 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_I8 ( 3 ) , WASM_I8 ( 10 ) , WASM_I8 ( 11 ) ) } ; <nl> TEST_F ( WasmDecoderTest , TableSwitch2 ) { <nl> EXPECT_FAILURE ( & env_d_dd , code ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch1b ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch1b ) { <nl> EXPECT_VERIFIES_INLINE ( & env_i_i , WASM_TABLESWITCH_OP ( 1 , 1 , WASM_CASE ( 0 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_GET_LOCAL ( 0 ) , WASM_ZERO ) ) ; <nl> <nl> TEST_F ( WasmDecoderTest , TableSwitch1b ) { <nl> WASM_TABLESWITCH_BODY ( WASM_ZERO , WASM_F64 ( 0 . 0 ) ) ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , TableSwitch_br1 ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch_br1 ) { <nl> for ( int depth = 0 ; depth < 2 ; depth + + ) { <nl> byte code [ ] = { WASM_BLOCK ( 1 , WASM_TABLESWITCH_OP ( 0 , 1 , WASM_CASE_BR ( depth ) ) , <nl> WASM_GET_LOCAL ( 0 ) ) } ; <nl> TEST_F ( WasmDecoderTest , TableSwitch_br1 ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch_invalid_br ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch_invalid_br ) { <nl> for ( int depth = 1 ; depth < 4 ; depth + + ) { <nl> EXPECT_FAILURE_INLINE ( & env_v_i , <nl> WASM_TABLESWITCH_OP ( 0 , 1 , WASM_CASE_BR ( depth ) ) , <nl> TEST_F ( WasmDecoderTest , TableSwitch_invalid_br ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch_invalid_case_ref ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch_invalid_case_ref ) { <nl> EXPECT_FAILURE_INLINE ( & env_i_i , WASM_TABLESWITCH_OP ( 0 , 1 , WASM_CASE ( 0 ) ) , <nl> WASM_GET_LOCAL ( 0 ) ) ; <nl> EXPECT_FAILURE_INLINE ( & env_i_i , WASM_TABLESWITCH_OP ( 1 , 1 , WASM_CASE ( 1 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_GET_LOCAL ( 0 ) , WASM_ZERO ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch1_br ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch1_br ) { <nl> EXPECT_VERIFIES_INLINE ( <nl> & env_i_i , WASM_TABLESWITCH_OP ( 1 , 1 , WASM_CASE ( 0 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_GET_LOCAL ( 0 ) , WASM_BRV ( 0 , WASM_ZERO ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch2_br ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch2_br ) { <nl> EXPECT_VERIFIES_INLINE ( <nl> & env_i_i , WASM_TABLESWITCH_OP ( 2 , 2 , WASM_CASE ( 0 ) , WASM_CASE ( 1 ) ) , <nl> WASM_TABLESWITCH_BODY ( WASM_GET_LOCAL ( 0 ) , WASM_BRV ( 0 , WASM_I8 ( 0 ) ) , <nl> TEST_F ( WasmDecoderTest , TableSwitch2_br ) { <nl> WASM_BRV ( 0 , WASM_I8 ( 4 ) ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , TableSwitch2x2 ) { <nl> + TEST_F ( AstDecoderTest , TableSwitch2x2 ) { <nl> EXPECT_VERIFIES_INLINE ( <nl> & env_i_i , WASM_TABLESWITCH_OP ( 2 , 4 , WASM_CASE ( 0 ) , WASM_CASE ( 1 ) , <nl> WASM_CASE ( 0 ) , WASM_CASE ( 1 ) ) , <nl> TEST_F ( WasmDecoderTest , TableSwitch2x2 ) { <nl> WASM_BRV ( 0 , WASM_I8 ( 4 ) ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , ExprBreakNesting1 ) { <nl> + TEST_F ( AstDecoderTest , ExprBreakNesting1 ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_BLOCK ( 1 , WASM_BRV ( 0 , WASM_ZERO ) ) ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_BLOCK ( 1 , WASM_BR ( 0 ) ) ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , <nl> TEST_F ( WasmDecoderTest , ExprBreakNesting1 ) { <nl> EXPECT_VERIFIES_INLINE ( & env_v_v , WASM_LOOP ( 1 , WASM_BR ( 1 ) ) ) ; <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Select ) { <nl> + TEST_F ( AstDecoderTest , Select ) { <nl> EXPECT_VERIFIES_INLINE ( <nl> & env_i_i , WASM_SELECT ( WASM_GET_LOCAL ( 0 ) , WASM_GET_LOCAL ( 0 ) , WASM_ZERO ) ) ; <nl> EXPECT_VERIFIES_INLINE ( & env_f_ff , <nl> TEST_F ( WasmDecoderTest , Select ) { <nl> WASM_SELECT ( WASM_I64 ( 0 ) , WASM_I64 ( 0 ) , WASM_ZERO ) ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , Select_fail1 ) { <nl> + TEST_F ( AstDecoderTest , Select_fail1 ) { <nl> EXPECT_FAILURE_INLINE ( & env_i_i , WASM_SELECT ( WASM_F32 ( 0 . 0 ) , WASM_GET_LOCAL ( 0 ) , <nl> WASM_GET_LOCAL ( 0 ) ) ) ; <nl> EXPECT_FAILURE_INLINE ( & env_i_i , WASM_SELECT ( WASM_GET_LOCAL ( 0 ) , WASM_F32 ( 0 . 0 ) , <nl> TEST_F ( WasmDecoderTest , Select_fail1 ) { <nl> WASM_SELECT ( WASM_GET_LOCAL ( 0 ) , WASM_GET_LOCAL ( 0 ) , WASM_F32 ( 0 . 0 ) ) ) ; <nl> } <nl> <nl> - TEST_F ( WasmDecoderTest , Select_fail2 ) { <nl> + TEST_F ( AstDecoderTest , Select_fail2 ) { <nl> for ( size_t i = 0 ; i < arraysize ( kLocalTypes ) ; i + + ) { <nl> LocalType type = kLocalTypes [ i ] ; <nl> if ( type = = kAstI32 ) continue ; <nl> TEST_F ( WasmDecoderTest , Select_fail2 ) { <nl> } <nl> } <nl> <nl> - <nl> - TEST_F ( WasmDecoderTest , Select_TypeCheck ) { <nl> + TEST_F ( AstDecoderTest , Select_TypeCheck ) { <nl> EXPECT_FAILURE_INLINE ( & env_i_i , WASM_SELECT ( WASM_F32 ( 9 . 9 ) , WASM_GET_LOCAL ( 0 ) , <nl> WASM_GET_LOCAL ( 0 ) ) ) ; <nl> <nl> TEST_F ( WasmOpcodeArityTest , Control ) { <nl> { <nl> TestSignatures sigs ; <nl> FunctionEnv env ; <nl> - WasmDecoderTest : : init_env ( & env , sigs . v_v ( ) ) ; <nl> + AstDecoderTest : : init_env ( & env , sigs . v_v ( ) ) ; <nl> EXPECT_ARITY ( 0 , kExprReturn ) ; <nl> - WasmDecoderTest : : init_env ( & env , sigs . i_i ( ) ) ; <nl> + AstDecoderTest : : init_env ( & env , sigs . i_i ( ) ) ; <nl> EXPECT_ARITY ( 1 , kExprReturn ) ; <nl> } <nl> } <nl> TEST_F ( WasmOpcodeArityTest , Calls ) { <nl> <nl> { <nl> FunctionEnv env ; <nl> - WasmDecoderTest : : init_env ( & env , sigs . i_ii ( ) ) ; <nl> + AstDecoderTest : : init_env ( & env , sigs . i_ii ( ) ) ; <nl> env . module = & module ; <nl> <nl> EXPECT_ARITY ( 2 , kExprCallFunction , 0 ) ; <nl> TEST_F ( WasmOpcodeArityTest , Calls ) { <nl> <nl> { <nl> FunctionEnv env ; <nl> - WasmDecoderTest : : init_env ( & env , sigs . v_v ( ) ) ; <nl> + AstDecoderTest : : init_env ( & env , sigs . v_v ( ) ) ; <nl> env . module = & module ; <nl> <nl> EXPECT_ARITY ( 1 , kExprCallFunction , 1 ) ; <nl> new file mode 100644 <nl> index 00000000000 . . de6bf49ea85 <nl> mmm / dev / null <nl> ppp b / test / unittests / wasm / decoder - unittest . cc <nl> <nl> + / / Copyright 2016 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # include " test / unittests / test - utils . h " <nl> + <nl> + # include " src / wasm / decoder . h " <nl> + # include " src / wasm / wasm - macro - gen . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + namespace wasm { <nl> + <nl> + class DecoderTest : public TestWithZone { <nl> + public : <nl> + DecoderTest ( ) : decoder ( nullptr , nullptr ) { } <nl> + <nl> + Decoder decoder ; <nl> + } ; <nl> + <nl> + # define CHECK_UINT32V_INLINE ( expected , expected_length , . . . ) \ <nl> + do { \ <nl> + const byte data [ ] = { __VA_ARGS__ } ; \ <nl> + decoder . Reset ( data , data + sizeof ( data ) ) ; \ <nl> + int length ; \ <nl> + EXPECT_EQ ( expected , \ <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ) ; \ <nl> + EXPECT_EQ ( expected_length , length ) ; \ <nl> + } while ( false ) <nl> + <nl> + # define CHECK_INT32V_INLINE ( expected , expected_length , . . . ) \ <nl> + do { \ <nl> + const byte data [ ] = { __VA_ARGS__ } ; \ <nl> + decoder . Reset ( data , data + sizeof ( data ) ) ; \ <nl> + int length ; \ <nl> + EXPECT_EQ ( expected , \ <nl> + decoder . checked_read_i32v ( decoder . start ( ) , 0 , & length ) ) ; \ <nl> + EXPECT_EQ ( expected_length , length ) ; \ <nl> + } while ( false ) <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_OneByte ) { <nl> + CHECK_UINT32V_INLINE ( 0 , 1 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 5 , 1 , 5 ) ; <nl> + CHECK_UINT32V_INLINE ( 7 , 1 , 7 ) ; <nl> + CHECK_UINT32V_INLINE ( 9 , 1 , 9 ) ; <nl> + CHECK_UINT32V_INLINE ( 37 , 1 , 37 ) ; <nl> + CHECK_UINT32V_INLINE ( 69 , 1 , 69 ) ; <nl> + CHECK_UINT32V_INLINE ( 110 , 1 , 110 ) ; <nl> + CHECK_UINT32V_INLINE ( 125 , 1 , 125 ) ; <nl> + CHECK_UINT32V_INLINE ( 126 , 1 , 126 ) ; <nl> + CHECK_UINT32V_INLINE ( 127 , 1 , 127 ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_TwoByte ) { <nl> + CHECK_UINT32V_INLINE ( 0 , 1 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 10 , 1 , 10 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 27 , 1 , 27 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 100 , 1 , 100 , 0 ) ; <nl> + <nl> + CHECK_UINT32V_INLINE ( 444 , 2 , U32V_2 ( 444 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 544 , 2 , U32V_2 ( 544 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 1311 , 2 , U32V_2 ( 1311 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 2333 , 2 , U32V_2 ( 2333 ) ) ; <nl> + <nl> + for ( uint32_t i = 0 ; i < 1 < < 14 ; i = i * 13 + 1 ) { <nl> + CHECK_UINT32V_INLINE ( i , 2 , U32V_2 ( i ) ) ; <nl> + } <nl> + <nl> + const uint32_t max = ( 1 < < 14 ) - 1 ; <nl> + CHECK_UINT32V_INLINE ( max , 2 , U32V_2 ( max ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_ThreeByte ) { <nl> + CHECK_UINT32V_INLINE ( 0 , 1 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 10 , 1 , 10 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 27 , 1 , 27 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 100 , 1 , 100 , 0 , 0 , 0 ) ; <nl> + <nl> + CHECK_UINT32V_INLINE ( 11 , 3 , U32V_3 ( 11 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 101 , 3 , U32V_3 ( 101 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 446 , 3 , U32V_3 ( 446 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 546 , 3 , U32V_3 ( 546 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 1319 , 3 , U32V_3 ( 1319 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 2338 , 3 , U32V_3 ( 2338 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 8191 , 3 , U32V_3 ( 8191 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 9999 , 3 , U32V_3 ( 9999 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 14444 , 3 , U32V_3 ( 14444 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 314444 , 3 , U32V_3 ( 314444 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 614444 , 3 , U32V_3 ( 614444 ) ) ; <nl> + <nl> + const uint32_t max = ( 1 < < 21 ) - 1 ; <nl> + <nl> + for ( uint32_t i = 0 ; i < = max ; i = i * 13 + 3 ) { <nl> + CHECK_UINT32V_INLINE ( i , 3 , U32V_3 ( i ) , 0 ) ; <nl> + } <nl> + <nl> + CHECK_UINT32V_INLINE ( max , 3 , U32V_3 ( max ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_FourByte ) { <nl> + CHECK_UINT32V_INLINE ( 0 , 1 , 0 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 10 , 1 , 10 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 27 , 1 , 27 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 100 , 1 , 100 , 0 , 0 , 0 , 0 ) ; <nl> + <nl> + CHECK_UINT32V_INLINE ( 13 , 4 , U32V_4 ( 13 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 107 , 4 , U32V_4 ( 107 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 449 , 4 , U32V_4 ( 449 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 541 , 4 , U32V_4 ( 541 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 1317 , 4 , U32V_4 ( 1317 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 2334 , 4 , U32V_4 ( 2334 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 8191 , 4 , U32V_4 ( 8191 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 9994 , 4 , U32V_4 ( 9994 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 14442 , 4 , U32V_4 ( 14442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 314442 , 4 , U32V_4 ( 314442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 614442 , 4 , U32V_4 ( 614442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 1614442 , 4 , U32V_4 ( 1614442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 5614442 , 4 , U32V_4 ( 5614442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 19614442 , 4 , U32V_4 ( 19614442 ) ) ; <nl> + <nl> + const uint32_t max = ( 1 < < 28 ) - 1 ; <nl> + <nl> + for ( uint32_t i = 0 ; i < = max ; i = i * 13 + 5 ) { <nl> + CHECK_UINT32V_INLINE ( i , 4 , U32V_4 ( i ) , 0 ) ; <nl> + } <nl> + <nl> + CHECK_UINT32V_INLINE ( max , 4 , U32V_4 ( max ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_FiveByte ) { <nl> + CHECK_UINT32V_INLINE ( 0 , 1 , 0 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 10 , 1 , 10 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 27 , 1 , 27 , 0 , 0 , 0 , 0 ) ; <nl> + CHECK_UINT32V_INLINE ( 100 , 1 , 100 , 0 , 0 , 0 , 0 ) ; <nl> + <nl> + CHECK_UINT32V_INLINE ( 13 , 5 , U32V_5 ( 13 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 107 , 5 , U32V_5 ( 107 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 449 , 5 , U32V_5 ( 449 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 541 , 5 , U32V_5 ( 541 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 1317 , 5 , U32V_5 ( 1317 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 2334 , 5 , U32V_5 ( 2334 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 8191 , 5 , U32V_5 ( 8191 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 9994 , 5 , U32V_5 ( 9994 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 24442 , 5 , U32V_5 ( 24442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 414442 , 5 , U32V_5 ( 414442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 714442 , 5 , U32V_5 ( 714442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 1614442 , 5 , U32V_5 ( 1614442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 6614442 , 5 , U32V_5 ( 6614442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 89614442 , 5 , U32V_5 ( 89614442 ) ) ; <nl> + CHECK_UINT32V_INLINE ( 2219614442u , 5 , U32V_5 ( 2219614442u ) ) ; <nl> + CHECK_UINT32V_INLINE ( 3219614442u , 5 , U32V_5 ( 3219614442u ) ) ; <nl> + CHECK_UINT32V_INLINE ( 4019614442u , 5 , U32V_5 ( 4019614442u ) ) ; <nl> + <nl> + const uint32_t max = 0xFFFFFFFFu ; <nl> + <nl> + for ( uint32_t i = 1 ; i < 32 ; i + + ) { <nl> + uint32_t val = 0x983489aau < < i ; <nl> + CHECK_UINT32V_INLINE ( val , 5 , U32V_5 ( val ) , 0 ) ; <nl> + } <nl> + <nl> + CHECK_UINT32V_INLINE ( max , 5 , U32V_5 ( max ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_various ) { <nl> + for ( int i = 0 ; i < 10 ; i + + ) { <nl> + uint32_t x = 0xCCCCCCCCu * i ; <nl> + for ( int width = 0 ; width < 32 ; width + + ) { <nl> + uint32_t val = x > > width ; <nl> + <nl> + CHECK_UINT32V_INLINE ( val & MASK_7 , 1 , U32V_1 ( val ) ) ; <nl> + CHECK_UINT32V_INLINE ( val & MASK_14 , 2 , U32V_2 ( val ) ) ; <nl> + CHECK_UINT32V_INLINE ( val & MASK_21 , 3 , U32V_3 ( val ) ) ; <nl> + CHECK_UINT32V_INLINE ( val & MASK_28 , 4 , U32V_4 ( val ) ) ; <nl> + CHECK_UINT32V_INLINE ( val , 5 , U32V_5 ( val ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadI32v_OneByte ) { <nl> + CHECK_INT32V_INLINE ( 0 , 1 , 0 ) ; <nl> + CHECK_INT32V_INLINE ( 4 , 1 , 4 ) ; <nl> + CHECK_INT32V_INLINE ( 6 , 1 , 6 ) ; <nl> + CHECK_INT32V_INLINE ( 9 , 1 , 9 ) ; <nl> + CHECK_INT32V_INLINE ( 33 , 1 , 33 ) ; <nl> + CHECK_INT32V_INLINE ( 61 , 1 , 61 ) ; <nl> + CHECK_INT32V_INLINE ( 63 , 1 , 63 ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 1 , 1 , 127 ) ; <nl> + CHECK_INT32V_INLINE ( - 2 , 1 , 126 ) ; <nl> + CHECK_INT32V_INLINE ( - 11 , 1 , 117 ) ; <nl> + CHECK_INT32V_INLINE ( - 62 , 1 , 66 ) ; <nl> + CHECK_INT32V_INLINE ( - 63 , 1 , 65 ) ; <nl> + CHECK_INT32V_INLINE ( - 64 , 1 , 64 ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadI32v_TwoByte ) { <nl> + CHECK_INT32V_INLINE ( 0 , 2 , U32V_2 ( 0 ) ) ; <nl> + CHECK_INT32V_INLINE ( 9 , 2 , U32V_2 ( 9 ) ) ; <nl> + CHECK_INT32V_INLINE ( 61 , 2 , U32V_2 ( 61 ) ) ; <nl> + CHECK_INT32V_INLINE ( 63 , 2 , U32V_2 ( 63 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 1 , 2 , U32V_2 ( - 1 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 2 , 2 , U32V_2 ( - 2 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 63 , 2 , U32V_2 ( - 63 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 64 , 2 , U32V_2 ( - 64 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 200 , 2 , U32V_2 ( - 200 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 1002 , 2 , U32V_2 ( - 1002 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 2004 , 2 , U32V_2 ( - 2004 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 4077 , 2 , U32V_2 ( - 4077 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( 207 , 2 , U32V_2 ( 207 ) ) ; <nl> + CHECK_INT32V_INLINE ( 1009 , 2 , U32V_2 ( 1009 ) ) ; <nl> + CHECK_INT32V_INLINE ( 2003 , 2 , U32V_2 ( 2003 ) ) ; <nl> + CHECK_INT32V_INLINE ( 4072 , 2 , U32V_2 ( 4072 ) ) ; <nl> + <nl> + const int32_t min = 0 - ( 1 < < 13 ) ; <nl> + for ( int i = min ; i < min + 10 ; i + + ) { <nl> + CHECK_INT32V_INLINE ( i , 2 , U32V_2 ( i ) ) ; <nl> + } <nl> + <nl> + const int32_t max = ( 1 < < 13 ) - 1 ; <nl> + for ( int i = max ; i > max - 10 ; i - - ) { <nl> + CHECK_INT32V_INLINE ( i , 2 , U32V_2 ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadI32v_ThreeByte ) { <nl> + CHECK_INT32V_INLINE ( 0 , 3 , U32V_3 ( 0 ) ) ; <nl> + CHECK_INT32V_INLINE ( 9 , 3 , U32V_3 ( 9 ) ) ; <nl> + CHECK_INT32V_INLINE ( 61 , 3 , U32V_3 ( 61 ) ) ; <nl> + CHECK_INT32V_INLINE ( 63 , 3 , U32V_3 ( 63 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 1 , 3 , U32V_3 ( - 1 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 2 , 3 , U32V_3 ( - 2 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 63 , 3 , U32V_3 ( - 63 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 64 , 3 , U32V_3 ( - 64 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 207 , 3 , U32V_3 ( - 207 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 1012 , 3 , U32V_3 ( - 1012 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 4067 , 3 , U32V_3 ( - 4067 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 14067 , 3 , U32V_3 ( - 14067 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 234061 , 3 , U32V_3 ( - 234061 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( 237 , 3 , U32V_3 ( 237 ) ) ; <nl> + CHECK_INT32V_INLINE ( 1309 , 3 , U32V_3 ( 1309 ) ) ; <nl> + CHECK_INT32V_INLINE ( 4372 , 3 , U32V_3 ( 4372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 64372 , 3 , U32V_3 ( 64372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 374372 , 3 , U32V_3 ( 374372 ) ) ; <nl> + <nl> + const int32_t min = 0 - ( 1 < < 20 ) ; <nl> + for ( int i = min ; i < min + 10 ; i + + ) { <nl> + CHECK_INT32V_INLINE ( i , 3 , U32V_3 ( i ) ) ; <nl> + } <nl> + <nl> + const int32_t max = ( 1 < < 20 ) - 1 ; <nl> + for ( int i = max ; i > max - 10 ; i - - ) { <nl> + CHECK_INT32V_INLINE ( i , 3 , U32V_3 ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadI32v_FourByte ) { <nl> + CHECK_INT32V_INLINE ( 0 , 4 , U32V_4 ( 0 ) ) ; <nl> + CHECK_INT32V_INLINE ( 9 , 4 , U32V_4 ( 9 ) ) ; <nl> + CHECK_INT32V_INLINE ( 61 , 4 , U32V_4 ( 61 ) ) ; <nl> + CHECK_INT32V_INLINE ( 63 , 4 , U32V_4 ( 63 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 1 , 4 , U32V_4 ( - 1 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 2 , 4 , U32V_4 ( - 2 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 63 , 4 , U32V_4 ( - 63 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 64 , 4 , U32V_4 ( - 64 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 267 , 4 , U32V_4 ( - 267 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 1612 , 4 , U32V_4 ( - 1612 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 4667 , 4 , U32V_4 ( - 4667 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 16067 , 4 , U32V_4 ( - 16067 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 264061 , 4 , U32V_4 ( - 264061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 1264061 , 4 , U32V_4 ( - 1264061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 6264061 , 4 , U32V_4 ( - 6264061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 8264061 , 4 , U32V_4 ( - 8264061 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( 277 , 4 , U32V_4 ( 277 ) ) ; <nl> + CHECK_INT32V_INLINE ( 1709 , 4 , U32V_4 ( 1709 ) ) ; <nl> + CHECK_INT32V_INLINE ( 4772 , 4 , U32V_4 ( 4772 ) ) ; <nl> + CHECK_INT32V_INLINE ( 67372 , 4 , U32V_4 ( 67372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 374372 , 4 , U32V_4 ( 374372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 2374372 , 4 , U32V_4 ( 2374372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 7374372 , 4 , U32V_4 ( 7374372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 9374372 , 4 , U32V_4 ( 9374372 ) ) ; <nl> + <nl> + const int32_t min = 0 - ( 1 < < 27 ) ; <nl> + for ( int i = min ; i < min + 10 ; i + + ) { <nl> + CHECK_INT32V_INLINE ( i , 4 , U32V_4 ( i ) ) ; <nl> + } <nl> + <nl> + const int32_t max = ( 1 < < 27 ) - 1 ; <nl> + for ( int i = max ; i > max - 10 ; i - - ) { <nl> + CHECK_INT32V_INLINE ( i , 4 , U32V_4 ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadI32v_FiveByte ) { <nl> + CHECK_INT32V_INLINE ( 0 , 5 , U32V_5 ( 0 ) ) ; <nl> + CHECK_INT32V_INLINE ( 16 , 5 , U32V_5 ( 16 ) ) ; <nl> + CHECK_INT32V_INLINE ( 94 , 5 , U32V_5 ( 94 ) ) ; <nl> + CHECK_INT32V_INLINE ( 127 , 5 , U32V_5 ( 127 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 1 , 5 , U32V_5 ( - 1 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 2 , 5 , U32V_5 ( - 2 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 63 , 5 , U32V_5 ( - 63 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 64 , 5 , U32V_5 ( - 64 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( - 257 , 5 , U32V_5 ( - 257 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 1512 , 5 , U32V_5 ( - 1512 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 4567 , 5 , U32V_5 ( - 4567 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 15067 , 5 , U32V_5 ( - 15067 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 254061 , 5 , U32V_5 ( - 254061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 1364061 , 5 , U32V_5 ( - 1364061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 6364061 , 5 , U32V_5 ( - 6364061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 8364061 , 5 , U32V_5 ( - 8364061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 28364061 , 5 , U32V_5 ( - 28364061 ) ) ; <nl> + CHECK_INT32V_INLINE ( - 228364061 , 5 , U32V_5 ( - 228364061 ) ) ; <nl> + <nl> + CHECK_INT32V_INLINE ( 227 , 5 , U32V_5 ( 227 ) ) ; <nl> + CHECK_INT32V_INLINE ( 1209 , 5 , U32V_5 ( 1209 ) ) ; <nl> + CHECK_INT32V_INLINE ( 4272 , 5 , U32V_5 ( 4272 ) ) ; <nl> + CHECK_INT32V_INLINE ( 62372 , 5 , U32V_5 ( 62372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 324372 , 5 , U32V_5 ( 324372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 2274372 , 5 , U32V_5 ( 2274372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 7274372 , 5 , U32V_5 ( 7274372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 9274372 , 5 , U32V_5 ( 9274372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 42374372 , 5 , U32V_5 ( 42374372 ) ) ; <nl> + CHECK_INT32V_INLINE ( 429374372 , 5 , U32V_5 ( 429374372 ) ) ; <nl> + <nl> + const int32_t min = kMinInt ; <nl> + for ( int i = min ; i < min + 10 ; i + + ) { <nl> + CHECK_INT32V_INLINE ( i , 5 , U32V_5 ( i ) ) ; <nl> + } <nl> + <nl> + const int32_t max = kMaxInt ; <nl> + for ( int i = max ; i > max - 10 ; i - - ) { <nl> + CHECK_INT32V_INLINE ( i , 5 , U32V_5 ( i ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_off_end1 ) { <nl> + static const byte data [ ] = { U32V_1 ( 11 ) } ; <nl> + int length = 0 ; <nl> + decoder . Reset ( data , data ) ; <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ; <nl> + EXPECT_EQ ( 0 , length ) ; <nl> + EXPECT_FALSE ( decoder . ok ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_off_end2 ) { <nl> + static const byte data [ ] = { U32V_2 ( 1111 ) } ; <nl> + for ( size_t i = 0 ; i < sizeof ( data ) ; i + + ) { <nl> + int length = 0 ; <nl> + decoder . Reset ( data , data + i ) ; <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ; <nl> + EXPECT_EQ ( i , length ) ; <nl> + EXPECT_FALSE ( decoder . ok ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_off_end3 ) { <nl> + static const byte data [ ] = { U32V_3 ( 111111 ) } ; <nl> + for ( size_t i = 0 ; i < sizeof ( data ) ; i + + ) { <nl> + int length = 0 ; <nl> + decoder . Reset ( data , data + i ) ; <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ; <nl> + EXPECT_EQ ( i , length ) ; <nl> + EXPECT_FALSE ( decoder . ok ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_off_end4 ) { <nl> + static const byte data [ ] = { U32V_4 ( 11111111 ) } ; <nl> + for ( size_t i = 0 ; i < sizeof ( data ) ; i + + ) { <nl> + int length = 0 ; <nl> + decoder . Reset ( data , data + i ) ; <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ; <nl> + EXPECT_EQ ( i , length ) ; <nl> + EXPECT_FALSE ( decoder . ok ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_off_end5 ) { <nl> + static const byte data [ ] = { U32V_5 ( 111111111 ) } ; <nl> + for ( size_t i = 0 ; i < sizeof ( data ) ; i + + ) { <nl> + int length = 0 ; <nl> + decoder . Reset ( data , data + i ) ; <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ; <nl> + EXPECT_EQ ( i , length ) ; <nl> + EXPECT_FALSE ( decoder . ok ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( DecoderTest , ReadU32v_extra_bits ) { <nl> + byte data [ ] = { 0x80 , 0x80 , 0x80 , 0x80 , 0x00 } ; <nl> + for ( int i = 1 ; i < 16 ; i + + ) { <nl> + data [ 4 ] = static_cast < byte > ( i < < 4 ) ; <nl> + int length = 0 ; <nl> + decoder . Reset ( data , data + sizeof ( data ) ) ; <nl> + decoder . checked_read_u32v ( decoder . start ( ) , 0 , & length ) ; <nl> + EXPECT_EQ ( 5 , length ) ; <nl> + EXPECT_FALSE ( decoder . ok ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + } / / namespace wasm <nl> + } / / namespace internal <nl> + } / / namespace v8 <nl> | [ wasm ] Add support and unittests for decoding signed LEB128 . | v8/v8 | c5b2f1536b79b9c0dd3fbb1a729edce277959746 | 2016-02-29T18:42:26Z |
mmm a / test / test_torch . py <nl> ppp b / test / test_torch . py <nl> def run_subtest ( guess_rank , actual_rank , matrix_size , batches , device , pca , * * op <nl> def test_ldexp ( self , device ) : <nl> # random values <nl> mantissas = torch . randn ( 64 , device = device ) <nl> - exponents = torch . randint ( - 31 , 31 , ( 64 , ) , device = device ) <nl> + exponents = torch . randint ( - 31 , 31 , ( 64 , ) , device = device , dtype = torch . int32 ) <nl> <nl> # basic test <nl> - np_outcome = np . ldexp ( mantissas . cpu ( ) . numpy ( ) , exponents . cpu ( ) . numpy ( ) ) <nl> + np_outcome = np . ldexp ( mantissas . numpy ( ) , exponents . numpy ( ) ) <nl> pt_outcome_1 = torch . ldexp ( mantissas , exponents ) <nl> pt_outcome_2 = mantissas . ldexp ( exponents ) <nl> self . assertEqual ( np_outcome , pt_outcome_1 ) <nl> def test_ldexp ( self , device ) : <nl> <nl> # test bounds <nl> mantissas = torch . tensor ( [ float ( ' inf ' ) , float ( ' - inf ' ) , float ( ' inf ' ) , float ( ' nan ' ) ] , device = device ) <nl> - exponents = torch . randint ( 0 , 31 , ( 4 , ) , device = device ) <nl> - np_outcome = np . ldexp ( mantissas . cpu ( ) . numpy ( ) , exponents . cpu ( ) . numpy ( ) ) <nl> + exponents = torch . randint ( 0 , 31 , ( 4 , ) , device = device , dtype = torch . int32 ) <nl> + np_outcome = np . ldexp ( mantissas . numpy ( ) , exponents . numpy ( ) ) <nl> pt_outcome = torch . ldexp ( mantissas , exponents ) <nl> self . assertEqual ( np_outcome , pt_outcome ) <nl> <nl> | Fix test_ldexp on Windows ( ) | pytorch/pytorch | dc843fe19761a5a6db54b114deba937096884bca | 2020-11-20T23:41:59Z |
mmm a / lang / matmul . cpp <nl> ppp b / lang / matmul . cpp <nl> Matrix operator + ( const Matrix & A , const Matrix & B ) { <nl> for ( int i = 0 ; i < A . n ; i + + ) { <nl> for ( int j = 0 ; j < A . m ; j + + ) { <nl> C ( i , j ) = A ( i , j ) + B ( i , j ) ; <nl> - TC_P ( C ( i , j ) . node . get ( ) ) ; <nl> } <nl> } <nl> return C ; <nl> auto test_slp = [ ] ( ) { <nl> addr . coeff_const = i ; <nl> <nl> addr . stream_id = 2 ; <nl> - TC_P ( vec_c ( i ) . node . get ( ) ) ; <nl> - TC_P ( ( int ) vec_c ( i ) . node - > type ) ; <nl> ret . store ( vec_c ( i ) , addr ) ; <nl> } <nl> <nl> mmm a / lang / tlang . h <nl> ppp b / lang / tlang . h <nl> struct Address { <nl> } <nl> } ; <nl> <nl> + class Expr ; <nl> + <nl> class Node { <nl> public : <nl> enum class Type : int { mul , add , sub , div , load , store , combine , constant } ; <nl> <nl> Address addr ; <nl> - std : : vector < Handle < Node > > ch ; / / Four child max <nl> - std : : vector < Handle < Node > > serial_ops ; <nl> + std : : vector < Expr > ch ; / / Four child max <nl> Type type ; <nl> std : : string var_name ; <nl> float64 value ; <nl> class Node { <nl> Node ( Type type ) : type ( type ) { <nl> } <nl> <nl> - Node ( Type type , Handle < Node > ch0 , Handle < Node > ch1 ) : type ( type ) { <nl> - ch . resize ( 2 ) ; <nl> - ch [ 0 ] = ch0 ; <nl> - ch [ 1 ] = ch1 ; <nl> - } <nl> + Node ( Type type , Expr ch0 , Expr ch1 ) ; <nl> } ; <nl> <nl> using NodeType = Node : : Type ; <nl> <nl> / / Reference counted . . . <nl> class Expr { <nl> - public : <nl> + private : <nl> Handle < Node > node ; <nl> <nl> + public : <nl> + <nl> Expr ( ) { <nl> } <nl> <nl> class Expr { <nl> } <nl> <nl> Expr ( Handle < Node > node ) : node ( node ) { <nl> + TC_P ( ( int ) node - > type ) ; <nl> } <nl> <nl> # define BINARY_OP ( op , name ) \ <nl> class Expr { <nl> return node . get ( ) ; <nl> } <nl> <nl> + const Node * operator - > ( ) const { <nl> + return node . get ( ) ; <nl> + } <nl> + <nl> bool operator < ( const Expr & o ) const { <nl> return node . get ( ) < o . node . get ( ) ; <nl> } <nl> + <nl> + operator bool ( ) const { <nl> + return node . get ( ) ! = nullptr ; <nl> + } <nl> + <nl> + operator void * ( ) const { <nl> + return ( void * ) node . get ( ) ; <nl> + } <nl> } ; <nl> <nl> + Node : : Node ( Type type , Expr ch0 , Expr ch1 ) : type ( type ) { <nl> + ch . resize ( 2 ) ; <nl> + ch [ 0 ] = ch0 ; <nl> + ch [ 1 ] = ch1 ; <nl> + } <nl> + <nl> inline Expr load ( Address addr ) { <nl> auto n = std : : make_shared < Node > ( NodeType : : load ) ; <nl> TC_ASSERT ( addr . initialized ( ) ) ; <nl> class CodeGen { <nl> <nl> using FunctionType = void ( * ) ( float32 * , float32 * , float32 * , int ) ; <nl> <nl> - std : : string run ( const Expr & e ) { <nl> + std : : string run ( Expr & e ) { <nl> code = " # include < immintrin . h > \ n # include < cstdio > \ n " ; <nl> code + = " using float32 = float ; \ n " ; <nl> code + = " using float64 = double ; \ n \ n " ; <nl> class CodeGen { <nl> " * stream02 , " <nl> " int n ) { \ n " ; <nl> code + = fmt : : format ( " for ( int i = 0 ; i < n ; i + = { } ) { { \ n " , simd_width ) ; <nl> - visit ( e . node ) ; <nl> + visit ( e ) ; <nl> code + = " } \ n } \ n " ; <nl> return code ; <nl> } <nl> class CodeGen { <nl> return ; <nl> visited . insert ( expr ) ; <nl> ret . push_back ( expr ) ; <nl> - for ( auto c : expr - > ch ) { <nl> + for ( auto c : expr - > ch ) { <nl> / / TODO : refactor . . . <nl> / / visit ( c ) ; <nl> } <nl> class CodeGen { <nl> for ( int i = 0 ; i < group_size ; i + + ) { <nl> } <nl> <nl> - / / expr std : : vector < Handle < Node > > & nodes ; <nl> + / / expr std : : vector < Handle < Node > > & nodes ; <nl> } <nl> <nl> - void visit ( const Handle < Node > & node ) { <nl> + void visit ( Expr & node ) { <nl> for ( auto & c : node - > ch ) { <nl> if ( c ) <nl> visit ( c ) ; <nl> class CodeGen { <nl> } else if ( node - > type = = NodeType : : combine ) { <nl> / / do nothing <nl> } else { <nl> + TC_P ( ( int ) node - > type ) ; <nl> TC_NOT_IMPLEMENTED ; <nl> } <nl> } <nl> class CodeGen { <nl> # endif <nl> } <nl> <nl> - FunctionType get ( const Expr & e , int group_size = 4 ) { <nl> + FunctionType get ( Expr & e , int group_size = 4 ) { <nl> SLP ( e , group_size ) ; <nl> run ( e ) ; <nl> { <nl> class CodeGen { <nl> } <nl> <nl> bool prior_to ( Expr & a , Expr & b ) { <nl> - auto address1 = a . node - > addr ; <nl> - auto address2 = b . node - > addr ; <nl> + auto address1 = a - > addr ; <nl> + auto address2 = b - > addr ; <nl> return address1 . same_type ( address2 ) & & <nl> address1 . offset ( ) + 1 = = address2 . offset ( ) ; <nl> } <nl> class CodeGen { <nl> std : : set < void * > visited ; <nl> <nl> std : : function < void ( Expr ) > walk = [ & ] ( Expr expr ) - > void { <nl> - TC_ASSERT ( expr . node ! = nullptr ) ; <nl> - TC_P ( ( int ) expr . node - > type ) ; <nl> - if ( visited . find ( expr . node . get ( ) ) ! = visited . end ( ) ) <nl> + TC_ASSERT ( expr ) ; <nl> + if ( visited . find ( expr ) ! = visited . end ( ) ) <nl> return ; <nl> - visited . insert ( expr . node . get ( ) ) ; <nl> - for ( auto & ch : expr . node - > ch ) { <nl> + visited . insert ( expr ) ; <nl> + for ( auto & ch : expr - > ch ) { <nl> walk ( ch ) ; <nl> } <nl> inst . push_back ( expr ) ; <nl> class CodeGen { <nl> <nl> std : : vector < int > continuous_loads ( int i ) { <nl> std : : vector < int > ret ; <nl> - if ( grouped [ i ] | | inst [ i ] . node - > type ! = NodeType : : load ) { <nl> + if ( grouped [ i ] | | inst [ i ] - > type ! = NodeType : : load ) { <nl> return ret ; <nl> } <nl> ret . push_back ( i ) ; <nl> while ( 1 ) { <nl> bool found = false ; <nl> for ( int j = 0 ; j < inst . size ( ) ; j + + ) { <nl> - if ( grouped [ j ] | | i = = j | | inst [ i ] . node - > type ! = NodeType : : load ) { <nl> + if ( grouped [ j ] | | i = = j | | inst [ i ] - > type ! = NodeType : : load ) { <nl> continue ; <nl> } <nl> if ( prior_to ( inst [ i ] , inst [ j ] ) ) { <nl> class CodeGen { <nl> } / / namespace Tlang <nl> <nl> TC_NAMESPACE_END <nl> + <nl> + / * <nl> + Expr should be what the users play with . <nl> + Simply a ref - counted pointer to nodes , with some operator overloading for users to program <nl> + Node is the IR node , with computational graph connectivity , imm , op type etc . <nl> + <nl> + No double support this time . <nl> + * / <nl> | Handle < Node > - > Expr | taichi-dev/taichi | 4131c2662607bad51babf70db6c9cc9b7382d8dc | 2018-12-03T02:58:36Z |
mmm a / xbmc / cores / VideoPlayer / DVDCodecs / Video / DVDVideoCodec . h <nl> ppp b / xbmc / cores / VideoPlayer / DVDCodecs / Video / DVDVideoCodec . h <nl> struct DVDVideoUserData <nl> # define DVP_FLAG_ALLOCATED 0x00000004 / / < Set to indicate that this has allocated data <nl> # define DVP_FLAG_INTERLACED 0x00000008 / / < Set to indicate that this frame is interlaced <nl> <nl> - # define DVP_FLAG_NOSKIP 0x00000010 / / < indicate this picture should never be dropped <nl> - # define DVP_FLAG_DROPPED 0x00000020 / / < indicate that this picture has been dropped in decoder stage , will have no data <nl> + # define DVP_FLAG_DROPPED 0x00000010 / / < indicate that this picture has been dropped in decoder stage , will have no data <nl> <nl> # define DVD_CODEC_CTRL_SKIPDEINT 0x01000000 / / < indicate that this picture was requested to have been dropped in deint stage <nl> # define DVD_CODEC_CTRL_NO_POSTPROC 0x02000000 / / < see GetCodecStats <nl> - # define DVD_CODEC_CTRL_DRAIN 0x04000000 / / < see GetCodecStats <nl> + # define DVD_CODEC_CTRL_HURRY 0x04000000 / / < see GetCodecStats <nl> + # define DVD_CODEC_CTRL_DROP 0x08000000 / / < this frame is going to be dropped in output <nl> + # define DVD_CODEC_CTRL_DRAIN 0x10000000 / / < squeeze out pictured without feeding new packets <nl> <nl> / / DVP_FLAG 0x00000100 - 0x00000f00 is in use by libmpeg2 ! <nl> <nl> class CDVDVideoCodec <nl> * if speed is not normal the codec can switch off <nl> * postprocessing and de - interlacing <nl> * <nl> - * DVD_CODEC_CTRL_DRAIN : <nl> + * DVD_CODEC_CTRL_HURRY : <nl> * codecs may do postprocessing and de - interlacing . <nl> * If video buffers in RenderManager are about to run dry , <nl> * this is signaled to codec . Codec can wait for post - proc <nl> * to be finished instead of returning empty and getting another <nl> * packet . <nl> * <nl> + * DVD_CODEC_CTRL_DRAIN : <nl> + * instruct decoder to deliver last pictures without requesting <nl> + * new packets <nl> + * <nl> + * DVD_CODEC_CTRL_DROP : <nl> + * this packet is going to be dropped . decoder is free to use it <nl> + * for decoding <nl> + * <nl> * / <nl> virtual void SetCodecControl ( int flags ) { } <nl> <nl> mmm a / xbmc / cores / VideoPlayer / DVDCodecs / Video / DVDVideoCodecFFmpeg . cpp <nl> ppp b / xbmc / cores / VideoPlayer / DVDCodecs / Video / DVDVideoCodecFFmpeg . cpp <nl> bool CDVDVideoCodecFFmpeg : : GetPictureCommon ( DVDVideoPicture * pDvdVideoPicture ) <nl> pDvdVideoPicture - > iFlags | = m_pFrame - > interlaced_frame ? DVP_FLAG_INTERLACED : 0 ; <nl> pDvdVideoPicture - > iFlags | = m_pFrame - > top_field_first ? DVP_FLAG_TOP_FIELD_FIRST : 0 ; <nl> <nl> + if ( m_codecControlFlags & DVD_CODEC_CTRL_DROP ) <nl> + pDvdVideoPicture - > iFlags | = DVP_FLAG_DROPPED ; <nl> + <nl> pDvdVideoPicture - > chroma_position = m_pCodecContext - > chroma_sample_location ; <nl> pDvdVideoPicture - > color_primaries = m_pCodecContext - > color_primaries ; <nl> pDvdVideoPicture - > color_transfer = m_pCodecContext - > color_trc ; <nl> mmm a / xbmc / cores / VideoPlayer / DVDMessage . h <nl> ppp b / xbmc / cores / VideoPlayer / DVDMessage . h <nl> class CDVDMsg : public IDVDResourceCounted < CDVDMsg > <nl> <nl> VIDEO_NOSKIP , / / next pictures is not to be skipped by the video renderer <nl> VIDEO_SET_ASPECT , / / set aspectratio of video <nl> + VIDEO_DRAIN , / / wait for decoder to output last frame <nl> <nl> / / audio related messages <nl> <nl> mmm a / xbmc / cores / VideoPlayer / VideoPlayerVideo . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoPlayerVideo . cpp <nl> <nl> <nl> using namespace RenderManager ; <nl> <nl> - class CPulldownCorrection <nl> - { <nl> - public : <nl> - CPulldownCorrection ( ) <nl> - { <nl> - m_duration = 0 . 0 ; <nl> - m_accum = 0 ; <nl> - m_total = 0 ; <nl> - m_next = m_pattern . end ( ) ; <nl> - } <nl> - <nl> - void init ( double fps , int * begin , int * end ) <nl> - { <nl> - std : : copy ( begin , end , std : : back_inserter ( m_pattern ) ) ; <nl> - m_duration = DVD_TIME_BASE / fps ; <nl> - m_accum = 0 ; <nl> - m_total = std : : accumulate ( m_pattern . begin ( ) , m_pattern . end ( ) , 0 ) ; <nl> - m_next = m_pattern . begin ( ) ; <nl> - } <nl> - <nl> - double pts ( ) <nl> - { <nl> - double input = m_duration * std : : distance ( m_pattern . begin ( ) , m_next ) ; <nl> - double output = m_duration * m_accum / m_total ; <nl> - return output - input ; <nl> - } <nl> - <nl> - double dur ( ) <nl> - { <nl> - return m_duration * m_pattern . size ( ) * * m_next / m_total ; <nl> - } <nl> - <nl> - void next ( ) <nl> - { <nl> - m_accum + = * m_next ; <nl> - if ( + + m_next = = m_pattern . end ( ) ) <nl> - { <nl> - m_next = m_pattern . begin ( ) ; <nl> - m_accum = 0 ; <nl> - } <nl> - } <nl> - <nl> - bool enabled ( ) <nl> - { <nl> - return ! m_pattern . empty ( ) ; <nl> - } <nl> - private : <nl> - double m_duration ; <nl> - int m_total ; <nl> - int m_accum ; <nl> - std : : vector < int > m_pattern ; <nl> - std : : vector < int > : : iterator m_next ; <nl> - } ; <nl> - <nl> - <nl> class CDVDMsgVideoCodecChange : public CDVDMsg <nl> { <nl> public : <nl> void CVideoPlayerVideo : : CloseStream ( bool bWaitForBuffers ) <nl> { <nl> / / wait until buffers are empty <nl> if ( bWaitForBuffers & & m_speed > 0 ) <nl> + { <nl> + m_messageQueue . Put ( new CDVDMsg ( CDVDMsg : : VIDEO_DRAIN ) , 0 ) ; <nl> m_messageQueue . WaitUntilEmpty ( ) ; <nl> + } <nl> <nl> m_messageQueue . Abort ( ) ; <nl> <nl> void CVideoPlayerVideo : : Process ( ) <nl> { <nl> CLog : : Log ( LOGNOTICE , " running thread : video_thread " ) ; <nl> <nl> - DVDVideoPicture picture ; <nl> - CPulldownCorrection pulldown ; <nl> - CDVDVideoPPFFmpeg mPostProcess ( " " ) ; <nl> - std : : string sPostProcessType ; <nl> - bool bPostProcessDeint = false ; <nl> - <nl> - memset ( & picture , 0 , sizeof ( DVDVideoPicture ) ) ; <nl> + memset ( & m_picture , 0 , sizeof ( DVDVideoPicture ) ) ; <nl> <nl> double pts = 0 ; <nl> double frametime = ( double ) DVD_TIME_BASE / m_fFrameRate ; <nl> void CVideoPlayerVideo : : Process ( ) <nl> <nl> while ( ! m_bStop ) <nl> { <nl> - int iQueueTimeOut = ( int ) ( m_stalled ? frametime / 4 : frametime * 10 ) / 1000 ; <nl> + int iQueueTimeOut = ( int ) ( m_stalled ? frametime : frametime * 10 ) / 1000 ; <nl> int iPriority = ( m_speed = = DVD_PLAYSPEED_PAUSE & & m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) ? 1 : 0 ; <nl> <nl> if ( m_syncState = = IDVDStreamPlayer : : SYNC_WAITSYNC ) <nl> void CVideoPlayerVideo : : Process ( ) <nl> if ( iPriority ) <nl> continue ; <nl> <nl> + / / check if decoder has produced some output <nl> + m_pVideoCodec - > SetCodecControl ( DVD_CODEC_CTRL_DRAIN ) ; <nl> + int decoderState = m_pVideoCodec - > Decode ( NULL , 0 , DVD_NOPTS_VALUE , DVD_NOPTS_VALUE ) ; <nl> + ProcessDecoderOutput ( decoderState , frametime , pts ) ; <nl> + <nl> / / Okey , start rendering at stream fps now instead , we are likely in a stillframe <nl> if ( ! m_stalled ) <nl> { <nl> if ( m_syncState = = IDVDStreamPlayer : : SYNC_INSYNC ) <nl> CLog : : Log ( LOGINFO , " CVideoPlayerVideo - Stillframe detected , switching to forced % f fps " , m_fFrameRate ) ; <nl> m_stalled = true ; <nl> - pts + = frametime * 4 ; <nl> + pts + = frametime * 4 ; <nl> } <nl> <nl> / / Waiting timed out , output last picture <nl> - if ( picture . iFlags & DVP_FLAG_ALLOCATED ) <nl> + if ( m_picture . iFlags & DVP_FLAG_ALLOCATED ) <nl> { <nl> - / / Remove interlaced flag before outputting <nl> - / / no need to output this as if it was interlaced <nl> - picture . iFlags & = ~ DVP_FLAG_INTERLACED ; <nl> - picture . iFlags | = DVP_FLAG_NOSKIP ; <nl> - OutputPicture ( & picture , pts ) ; <nl> - pts + = frametime ; <nl> + OutputPicture ( & m_picture , pts ) ; <nl> + pts + = frametime ; <nl> } <nl> <nl> continue ; <nl> void CVideoPlayerVideo : : Process ( ) <nl> { <nl> if ( m_pVideoCodec ) <nl> m_pVideoCodec - > Reset ( ) ; <nl> - picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> + m_picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> m_packets . clear ( ) ; <nl> m_droppingStats . Reset ( ) ; <nl> } <nl> void CVideoPlayerVideo : : Process ( ) <nl> bool sync = static_cast < CDVDMsgBool * > ( pMsg ) - > m_value ; <nl> if ( m_pVideoCodec ) <nl> m_pVideoCodec - > Reset ( ) ; <nl> - picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> + m_picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> m_packets . clear ( ) ; <nl> <nl> m_pullupCorrection . Flush ( ) ; <nl> void CVideoPlayerVideo : : Process ( ) <nl> CDVDMsgVideoCodecChange * msg ( static_cast < CDVDMsgVideoCodecChange * > ( pMsg ) ) ; <nl> OpenStream ( msg - > m_hints , msg - > m_codec ) ; <nl> msg - > m_codec = NULL ; <nl> - picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> + m_picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> } <nl> + else if ( pMsg - > IsType ( CDVDMsg : : VIDEO_DRAIN ) ) <nl> + { <nl> + while ( ! m_bStop ) <nl> + { <nl> + m_pVideoCodec - > SetCodecControl ( DVD_CODEC_CTRL_DRAIN ) ; <nl> + int decoderState = m_pVideoCodec - > Decode ( NULL , 0 , DVD_NOPTS_VALUE , DVD_NOPTS_VALUE ) ; <nl> + <nl> + bool cont = ProcessDecoderOutput ( decoderState , frametime , pts ) ; <nl> + <nl> + if ( ! cont ) <nl> + break ; <nl> <nl> - if ( pMsg - > IsType ( CDVDMsg : : DEMUXER_PACKET ) ) <nl> + if ( decoderState & VC_BUFFER ) <nl> + break ; <nl> + } <nl> + } <nl> + else if ( pMsg - > IsType ( CDVDMsg : : DEMUXER_PACKET ) ) <nl> { <nl> DemuxPacket * pPacket = ( ( CDVDMsgDemuxerPacket * ) pMsg ) - > GetPacket ( ) ; <nl> bool bPacketDrop = ( ( CDVDMsgDemuxerPacket * ) pMsg ) - > GetPacketDrop ( ) ; <nl> void CVideoPlayerVideo : : Process ( ) <nl> } <nl> int codecControl = 0 ; <nl> if ( iDropDirective & EOS_BUFFER_LEVEL ) <nl> - codecControl | = DVD_CODEC_CTRL_DRAIN ; <nl> + codecControl | = DVD_CODEC_CTRL_HURRY ; <nl> if ( m_speed > DVD_PLAYSPEED_NORMAL ) <nl> codecControl | = DVD_CODEC_CTRL_NO_POSTPROC ; <nl> + if ( bPacketDrop ) <nl> + codecControl | = DVD_CODEC_CTRL_DROP ; <nl> m_pVideoCodec - > SetCodecControl ( codecControl ) ; <nl> if ( iDropDirective & EOS_DROPPED ) <nl> { <nl> void CVideoPlayerVideo : : Process ( ) <nl> EDEINTERLACEMODE mDeintMode = CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_DeinterlaceMode ; <nl> EINTERLACEMETHOD mInt = m_renderManager . AutoInterlaceMethod ( CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_InterlaceMethod ) ; <nl> <nl> - unsigned int mFilters = 0 ; <nl> + unsigned int mFilters = 0 ; <nl> <nl> if ( mDeintMode ! = VS_DEINTERLACEMODE_OFF ) <nl> { <nl> void CVideoPlayerVideo : : Process ( ) <nl> / / setting the flag to a new value <nl> bRequestDrop = false ; <nl> <nl> - / / loop while no error <nl> + / / loop while no error and decoder produces pics <nl> while ( ! m_bStop ) <nl> { <nl> - / / if decoder was flushed , we need to seek back again to resume rendering <nl> - if ( iDecoderState & VC_FLUSHED ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " CVideoPlayerVideo - video decoder was flushed " ) ; <nl> - while ( ! m_packets . empty ( ) ) <nl> - { <nl> - CDVDMsgDemuxerPacket * msg = ( CDVDMsgDemuxerPacket * ) m_packets . front ( ) . message - > Acquire ( ) ; <nl> - m_packets . pop_front ( ) ; <nl> - <nl> - / / all packets except the last one should be dropped <nl> - / / if prio packets and current packet should be dropped , this is likely a new reset <nl> - msg - > m_drop = ! m_packets . empty ( ) | | ( iPriority > 0 & & bPacketDrop ) ; <nl> - m_messageQueue . Put ( msg , iPriority + 10 ) ; <nl> - } <nl> - <nl> - m_pVideoCodec - > Reset ( ) ; <nl> - m_packets . clear ( ) ; <nl> - picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> - m_renderManager . DiscardBuffer ( ) ; <nl> - break ; <nl> - } <nl> - <nl> - if ( iDecoderState & VC_REOPEN ) <nl> - { <nl> - while ( ! m_packets . empty ( ) ) <nl> - { <nl> - CDVDMsgDemuxerPacket * msg = ( CDVDMsgDemuxerPacket * ) m_packets . front ( ) . message - > Acquire ( ) ; <nl> - msg - > m_drop = false ; <nl> - m_packets . pop_front ( ) ; <nl> - m_messageQueue . Put ( msg , iPriority + 10 ) ; <nl> - } <nl> - <nl> - m_pVideoCodec - > Reopen ( ) ; <nl> - m_packets . clear ( ) ; <nl> - picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> - m_renderManager . DiscardBuffer ( ) ; <nl> - break ; <nl> - } <nl> + int dropped = m_iDroppedFrames ; <nl> + bool cont = ProcessDecoderOutput ( iDecoderState , frametime , pts ) ; <nl> + iDropped + = m_iDroppedFrames - dropped ; <nl> <nl> - / / if decoder had an error , tell it to reset to avoid more problems <nl> - if ( iDecoderState & VC_ERROR ) <nl> - { <nl> - CLog : : Log ( LOGDEBUG , " CVideoPlayerVideo - video decoder returned error " ) ; <nl> + if ( ! cont ) <nl> break ; <nl> - } <nl> - <nl> - / / check for a new picture <nl> - if ( iDecoderState & VC_PICTURE ) <nl> - { <nl> - / / try to retrieve the picture ( should never fail ! ) , unless there is a demuxer bug ofcours <nl> - m_pVideoCodec - > ClearPicture ( & picture ) ; <nl> - if ( m_pVideoCodec - > GetPicture ( & picture ) ) <nl> - { <nl> - sPostProcessType . clear ( ) ; <nl> - <nl> - if ( picture . iDuration = = 0 . 0 ) <nl> - picture . iDuration = frametime ; <nl> - <nl> - if ( bPacketDrop ) <nl> - picture . iFlags | = DVP_FLAG_DROPPED ; <nl> - <nl> - if ( m_iNrOfPicturesNotToSkip > 0 ) <nl> - { <nl> - picture . iFlags | = DVP_FLAG_NOSKIP ; <nl> - m_iNrOfPicturesNotToSkip - - ; <nl> - } <nl> - <nl> - / / validate picture timing , <nl> - / / if both dts / pts invalid , use pts calulated from picture . iDuration <nl> - / / if pts invalid use dts , else use picture . pts as passed <nl> - if ( picture . dts = = DVD_NOPTS_VALUE & & picture . pts = = DVD_NOPTS_VALUE ) <nl> - picture . pts = pts ; <nl> - else if ( picture . pts = = DVD_NOPTS_VALUE ) <nl> - picture . pts = picture . dts ; <nl> - <nl> - / * use forced aspect if any * / <nl> - if ( m_fForcedAspectRatio ! = 0 . 0f ) <nl> - picture . iDisplayWidth = ( int ) ( picture . iDisplayHeight * m_fForcedAspectRatio ) ; <nl> - <nl> - / / Deinterlace if codec said format was interlaced or if we have selected we want to deinterlace <nl> - / / this video <nl> - if ( ( mDeintMode = = VS_DEINTERLACEMODE_AUTO & & ( picture . iFlags & DVP_FLAG_INTERLACED ) ) | | mDeintMode = = VS_DEINTERLACEMODE_FORCE ) <nl> - { <nl> - if ( mInt = = VS_INTERLACEMETHOD_SW_BLEND ) <nl> - { <nl> - if ( ! sPostProcessType . empty ( ) ) <nl> - sPostProcessType + = " , " ; <nl> - sPostProcessType + = g_advancedSettings . m_videoPPFFmpegDeint ; <nl> - bPostProcessDeint = true ; <nl> - } <nl> - } <nl> - <nl> - if ( CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_PostProcess ) <nl> - { <nl> - if ( ! sPostProcessType . empty ( ) ) <nl> - sPostProcessType + = " , " ; <nl> - / / This is what mplayer uses for its " high - quality filter combination " <nl> - sPostProcessType + = g_advancedSettings . m_videoPPFFmpegPostProc ; <nl> - } <nl> - <nl> - if ( ! sPostProcessType . empty ( ) ) <nl> - { <nl> - mPostProcess . SetType ( sPostProcessType , bPostProcessDeint ) ; <nl> - if ( mPostProcess . Process ( & picture ) ) <nl> - mPostProcess . GetPicture ( & picture ) ; <nl> - } <nl> - <nl> - / * if frame has a pts ( usually originiating from demux packet ) , use that * / <nl> - if ( picture . pts ! = DVD_NOPTS_VALUE ) <nl> - { <nl> - if ( pulldown . enabled ( ) ) <nl> - picture . pts + = pulldown . pts ( ) ; <nl> - <nl> - pts = picture . pts ; <nl> - } <nl> - <nl> - if ( pulldown . enabled ( ) ) <nl> - { <nl> - picture . iDuration = pulldown . dur ( ) ; <nl> - pulldown . next ( ) ; <nl> - } <nl> - <nl> - if ( picture . iRepeatPicture ) <nl> - picture . iDuration * = picture . iRepeatPicture + 1 ; <nl> - <nl> - int iResult = OutputPicture ( & picture , pts ) ; <nl> - <nl> - frametime = ( double ) DVD_TIME_BASE / m_fFrameRate ; <nl> - <nl> - if ( m_syncState = = IDVDStreamPlayer : : SYNC_STARTING & & ! ( picture . iFlags & DVP_FLAG_DROPPED ) ) <nl> - { <nl> - m_codecname = m_pVideoCodec - > GetName ( ) ; <nl> - m_syncState = IDVDStreamPlayer : : SYNC_WAITSYNC ; <nl> - SStartMsg msg ; <nl> - msg . player = VideoPlayer_VIDEO ; <nl> - msg . cachetime = DVD_MSEC_TO_TIME ( 50 ) ; / / TODO <nl> - msg . cachetotal = DVD_MSEC_TO_TIME ( 100 ) ; / / TODO <nl> - msg . timestamp = pts ; <nl> - m_messageParent . Put ( new CDVDMsgType < SStartMsg > ( CDVDMsg : : PLAYER_STARTED , msg ) ) ; <nl> - } <nl> - <nl> - / / guess next frame pts . iDuration is always valid <nl> - if ( m_speed ! = 0 ) <nl> - pts + = picture . iDuration * m_speed / abs ( m_speed ) ; <nl> - <nl> - if ( iResult & EOS_ABORT ) <nl> - { <nl> - / / if we break here and we directly try to decode again wihout <nl> - / / flushing the video codec things break for some reason <nl> - / / i think the decoder ( libmpeg2 atleast ) still has a pointer <nl> - / / to the data , and when the packet is freed that will fail . <nl> - iDecoderState = m_pVideoCodec - > Decode ( NULL , 0 , DVD_NOPTS_VALUE , DVD_NOPTS_VALUE ) ; <nl> - break ; <nl> - } <nl> - <nl> - if ( ( iResult & EOS_DROPPED ) & & ! bPacketDrop ) <nl> - { <nl> - m_iDroppedFrames + + ; <nl> - iDropped + + ; <nl> - } <nl> - else <nl> - iDropped = 0 ; <nl> - } <nl> - else <nl> - { <nl> - CLog : : Log ( LOGWARNING , " Decoder Error getting videoPicture . " ) ; <nl> - m_pVideoCodec - > Reset ( ) ; <nl> - } <nl> - } <nl> <nl> - / / if the decoder needs more data , we just break this loop <nl> - / / and try to get more data from the videoQueue <nl> if ( iDecoderState & VC_BUFFER ) <nl> break ; <nl> <nl> - / / update dropping stats <nl> - int ret = CalcDropRequirement ( pts , true ) ; <nl> - if ( ret & EOS_DROPPED ) <nl> - { <nl> - m_iDroppedFrames + + ; <nl> - } <nl> - <nl> / / the decoder didn ' t need more data , flush the remaning buffer <nl> iDecoderState = m_pVideoCodec - > Decode ( NULL , 0 , DVD_NOPTS_VALUE , DVD_NOPTS_VALUE ) ; <nl> } <nl> void CVideoPlayerVideo : : Process ( ) <nl> } <nl> <nl> / / we need to let decoder release any picture retained resources . <nl> - m_pVideoCodec - > ClearPicture ( & picture ) ; <nl> + m_pVideoCodec - > ClearPicture ( & m_picture ) ; <nl> + } <nl> + <nl> + bool CVideoPlayerVideo : : ProcessDecoderOutput ( int & decoderState , double & frametime , double & pts ) <nl> + { <nl> + std : : string sPostProcessType ; <nl> + bool bPostProcessDeint = false ; <nl> + CDVDVideoPPFFmpeg mPostProcess ( " " ) ; <nl> + <nl> + / / if decoder was flushed , we need to seek back again to resume rendering <nl> + if ( decoderState & VC_FLUSHED ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " CVideoPlayerVideo - video decoder was flushed " ) ; <nl> + while ( ! m_packets . empty ( ) ) <nl> + { <nl> + CDVDMsgDemuxerPacket * msg = ( CDVDMsgDemuxerPacket * ) m_packets . front ( ) . message - > Acquire ( ) ; <nl> + m_packets . pop_front ( ) ; <nl> + <nl> + m_messageQueue . Put ( msg , 10 ) ; <nl> + } <nl> + <nl> + m_pVideoCodec - > Reset ( ) ; <nl> + m_packets . clear ( ) ; <nl> + / / picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> + m_renderManager . DiscardBuffer ( ) ; <nl> + return false ; <nl> + } <nl> + <nl> + if ( decoderState & VC_REOPEN ) <nl> + { <nl> + while ( ! m_packets . empty ( ) ) <nl> + { <nl> + CDVDMsgDemuxerPacket * msg = ( CDVDMsgDemuxerPacket * ) m_packets . front ( ) . message - > Acquire ( ) ; <nl> + m_packets . pop_front ( ) ; <nl> + m_messageQueue . Put ( msg , 10 ) ; <nl> + } <nl> + <nl> + m_pVideoCodec - > Reopen ( ) ; <nl> + m_packets . clear ( ) ; <nl> + / / picture . iFlags & = ~ DVP_FLAG_ALLOCATED ; <nl> + m_renderManager . DiscardBuffer ( ) ; <nl> + return false ; <nl> + } <nl> + <nl> + / / if decoder had an error , tell it to reset to avoid more problems <nl> + if ( decoderState & VC_ERROR ) <nl> + { <nl> + CLog : : Log ( LOGDEBUG , " CVideoPlayerVideo - video decoder returned error " ) ; <nl> + return false ; <nl> + } <nl> + <nl> + / / check for a new picture <nl> + if ( decoderState & VC_PICTURE ) <nl> + { <nl> + / / try to retrieve the picture ( should never fail ! ) , unless there is a demuxer bug ofcours <nl> + m_pVideoCodec - > ClearPicture ( & m_picture ) ; <nl> + if ( m_pVideoCodec - > GetPicture ( & m_picture ) ) <nl> + { <nl> + sPostProcessType . clear ( ) ; <nl> + <nl> + if ( m_picture . iDuration = = 0 . 0 ) <nl> + m_picture . iDuration = frametime ; <nl> + <nl> + if ( m_iNrOfPicturesNotToSkip > 0 ) <nl> + { <nl> + m_iNrOfPicturesNotToSkip - - ; <nl> + } <nl> + <nl> + / / validate picture timing , <nl> + / / if both dts / pts invalid , use pts calulated from picture . iDuration <nl> + / / if pts invalid use dts , else use picture . pts as passed <nl> + if ( m_picture . dts = = DVD_NOPTS_VALUE & & m_picture . pts = = DVD_NOPTS_VALUE ) <nl> + m_picture . pts = pts ; <nl> + else if ( m_picture . pts = = DVD_NOPTS_VALUE ) <nl> + m_picture . pts = m_picture . dts ; <nl> + <nl> + / * use forced aspect if any * / <nl> + if ( m_fForcedAspectRatio ! = 0 . 0f ) <nl> + m_picture . iDisplayWidth = ( int ) ( m_picture . iDisplayHeight * m_fForcedAspectRatio ) ; <nl> + <nl> + / / Deinterlace if codec said format was interlaced or if we have selected we want to deinterlace <nl> + / / this video <nl> + / / ask codec to do deinterlacing if possible <nl> + EDEINTERLACEMODE mDeintMode = CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_DeinterlaceMode ; <nl> + EINTERLACEMETHOD mInt = m_renderManager . AutoInterlaceMethod ( CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_InterlaceMethod ) ; <nl> + if ( ( mDeintMode = = VS_DEINTERLACEMODE_AUTO & & ( m_picture . iFlags & DVP_FLAG_INTERLACED ) ) | | mDeintMode = = VS_DEINTERLACEMODE_FORCE ) <nl> + { <nl> + if ( mInt = = VS_INTERLACEMETHOD_SW_BLEND ) <nl> + { <nl> + if ( ! sPostProcessType . empty ( ) ) <nl> + sPostProcessType + = " , " ; <nl> + sPostProcessType + = g_advancedSettings . m_videoPPFFmpegDeint ; <nl> + bPostProcessDeint = true ; <nl> + } <nl> + } <nl> + <nl> + if ( CMediaSettings : : GetInstance ( ) . GetCurrentVideoSettings ( ) . m_PostProcess ) <nl> + { <nl> + if ( ! sPostProcessType . empty ( ) ) <nl> + sPostProcessType + = " , " ; <nl> + / / This is what mplayer uses for its " high - quality filter combination " <nl> + sPostProcessType + = g_advancedSettings . m_videoPPFFmpegPostProc ; <nl> + } <nl> + <nl> + if ( ! sPostProcessType . empty ( ) ) <nl> + { <nl> + mPostProcess . SetType ( sPostProcessType , bPostProcessDeint ) ; <nl> + if ( mPostProcess . Process ( & m_picture ) ) <nl> + mPostProcess . GetPicture ( & m_picture ) ; <nl> + } <nl> + <nl> + / * if frame has a pts ( usually originiating from demux packet ) , use that * / <nl> + if ( m_picture . pts ! = DVD_NOPTS_VALUE ) <nl> + { <nl> + pts = m_picture . pts ; <nl> + } <nl> + <nl> + if ( m_picture . iRepeatPicture ) <nl> + m_picture . iDuration * = m_picture . iRepeatPicture + 1 ; <nl> + <nl> + int iResult = OutputPicture ( & m_picture , pts ) ; <nl> + <nl> + frametime = ( double ) DVD_TIME_BASE / m_fFrameRate ; <nl> + <nl> + if ( m_syncState = = IDVDStreamPlayer : : SYNC_STARTING & & ! ( m_picture . iFlags & DVP_FLAG_DROPPED ) ) <nl> + { <nl> + m_codecname = m_pVideoCodec - > GetName ( ) ; <nl> + m_syncState = IDVDStreamPlayer : : SYNC_WAITSYNC ; <nl> + SStartMsg msg ; <nl> + msg . player = VideoPlayer_VIDEO ; <nl> + msg . cachetime = DVD_MSEC_TO_TIME ( 50 ) ; / / TODO <nl> + msg . cachetotal = DVD_MSEC_TO_TIME ( 100 ) ; / / TODO <nl> + msg . timestamp = pts ; <nl> + m_messageParent . Put ( new CDVDMsgType < SStartMsg > ( CDVDMsg : : PLAYER_STARTED , msg ) ) ; <nl> + } <nl> + <nl> + / / guess next frame pts . iDuration is always valid <nl> + if ( m_speed ! = 0 ) <nl> + pts + = m_picture . iDuration * m_speed / abs ( m_speed ) ; <nl> + <nl> + if ( iResult & EOS_ABORT ) <nl> + { <nl> + / / if we break here and we directly try to decode again wihout <nl> + / / flushing the video codec things break for some reason <nl> + / / i think the decoder ( libmpeg2 atleast ) still has a pointer <nl> + / / to the data , and when the packet is freed that will fail . <nl> + decoderState = m_pVideoCodec - > Decode ( NULL , 0 , DVD_NOPTS_VALUE , DVD_NOPTS_VALUE ) ; <nl> + return false ; <nl> + } <nl> + <nl> + if ( ( iResult & EOS_DROPPED ) & & ! ( m_picture . iFlags & DVP_FLAG_DROPPED ) ) <nl> + m_iDroppedFrames + + ; <nl> + } <nl> + else <nl> + { <nl> + CLog : : Log ( LOGWARNING , " Decoder Error getting videoPicture . " ) ; <nl> + m_pVideoCodec - > Reset ( ) ; <nl> + } <nl> + <nl> + / / update dropping stats <nl> + int ret = CalcDropRequirement ( pts , true ) ; <nl> + if ( ret & EOS_DROPPED ) <nl> + { <nl> + m_iDroppedFrames + + ; <nl> + } <nl> + } <nl> + <nl> + return true ; <nl> } <nl> <nl> void CVideoPlayerVideo : : OnExit ( ) <nl> mmm a / xbmc / cores / VideoPlayer / VideoPlayerVideo . h <nl> ppp b / xbmc / cores / VideoPlayer / VideoPlayerVideo . h <nl> class CVideoPlayerVideo : public CThread , public IDVDStreamPlayerVideo , public I <nl> virtual void OnStartup ( ) ; <nl> virtual void OnExit ( ) ; <nl> virtual void Process ( ) ; <nl> + bool ProcessDecoderOutput ( int & decoderState , double & frametime , double & pts ) ; <nl> <nl> int OutputPicture ( const DVDVideoPicture * src , double pts ) ; <nl> void ProcessOverlays ( DVDVideoPicture * pSource , double pts ) ; <nl> class CVideoPlayerVideo : public CThread , public IDVDStreamPlayerVideo , public I <nl> std : : list < DVDMessageListItem > m_packets ; <nl> CDroppingStats m_droppingStats ; <nl> CRenderManager & m_renderManager ; <nl> + DVDVideoPicture m_picture ; <nl> } ; <nl> <nl> | VideoPlayer : video - refactoring , cleanup , minor fixes | xbmc/xbmc | 37d991b26b70333be477fb148780d51f0b0ac7d9 | 2016-01-04T21:19:24Z |
mmm a / modules / planning / integration_tests / BUILD <nl> ppp b / modules / planning / integration_tests / BUILD <nl> cc_test ( <nl> ] , <nl> ) <nl> <nl> - cc_test ( <nl> - name = " sunnyvale_big_loop_test " , <nl> - size = " small " , <nl> - srcs = [ <nl> - " sunnyvale_big_loop_test . cc " , <nl> - ] , <nl> - data = [ <nl> - " / / modules / common / configs : config_gflags " , <nl> - " / / modules / map : map_data " , <nl> - " / / modules / planning : planning_testdata " , <nl> - ] , <nl> - deps = [ <nl> - " : planning_test_base " , <nl> - ] , <nl> - ) <nl> + # cc_test ( <nl> + # name = " sunnyvale_big_loop_test " , <nl> + # size = " small " , <nl> + # srcs = [ <nl> + # " sunnyvale_big_loop_test . cc " , <nl> + # ] , <nl> + # data = [ <nl> + # " / / modules / common / configs : config_gflags " , <nl> + # " / / modules / map : map_data " , <nl> + # " / / modules / planning : planning_testdata " , <nl> + # ] , <nl> + # deps = [ <nl> + # " : planning_test_base " , <nl> + # ] , <nl> + # ) <nl> <nl> cc_test ( <nl> name = " navigation_mode_test " , <nl> mmm a / modules / planning / tasks / dp_poly_path / dp_road_graph . cc <nl> ppp b / modules / planning / tasks / dp_poly_path / dp_road_graph . cc <nl> bool DPRoadGraph : : SamplePathWaypoints ( <nl> reference_line_ . Length ( ) ) ; <nl> <nl> constexpr double kSamplePointLookForwardTime = 4 . 0 ; <nl> - const double level_distance = <nl> + const double step_length = <nl> common : : math : : Clamp ( init_point . v ( ) * kSamplePointLookForwardTime , <nl> config_ . step_length_min ( ) , config_ . step_length_max ( ) ) ; <nl> + const double level_distance = ( init_point . v ( ) > FLAGS_max_stop_speed ) ? <nl> + step_length : step_length / 2 . 0 ; <nl> double accumulated_s = init_sl_point_ . s ( ) ; <nl> double prev_s = accumulated_s ; <nl> for ( std : : size_t i = 0 ; accumulated_s < total_length ; + + i ) { <nl> mmm a / modules / planning / testdata / sunnyvale_loop_test / result_change_lane_0 . pb . txt <nl> ppp b / modules / planning / testdata / sunnyvale_loop_test / result_change_lane_0 . pb . txt <nl> <nl> header { <nl> module_name : " planning " <nl> } <nl> - total_path_length : 24 . 714207617327293 <nl> + total_path_length : 24 . 714207617327279 <nl> total_path_time : 7 . 9999999999999885 <nl> is_replan : true <nl> gear : GEAR_DRIVE <nl> trajectory_point { <nl> path_point { <nl> x : 587581 . 22476385592 <nl> y : 4140851 . 2260692273 <nl> - z : 0 <nl> theta : 1 . 2656533435800004 <nl> - kappa : - 2 . 7380291269735471e - 21 <nl> - s : 0 <nl> - dkappa : 0 <nl> + kappa : 3 . 6539210352769365e - 20 <nl> + s : 1 . 7347234759768071e - 18 <nl> + dkappa : 3 . 9220359885133527e - 20 <nl> ddkappa : 0 <nl> } <nl> v : 3 . 4694469519536142e - 18 <nl> - a : 0 . 0348607934741375 <nl> + a : 0 . 034860793474137487 <nl> relative_time : 0 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22476660425 <nl> - y : 4140851 . 2260779822 <nl> - theta : 1 . 2656533691540908 <nl> - kappa : 4 . 9286159559570257e - 08 <nl> - s : 9 . 1765353830381312e - 06 <nl> - dkappa : 4 . 9206380222397963e - 08 <nl> + x : 587581 . 224766574 <nl> + y : 4140851 . 226077992 <nl> + theta : 1 . 2656534590382975 <nl> + kappa : 2 . 0777315971413378e - 07 <nl> + s : 9 . 1765353830398676e - 06 <nl> + dkappa : 2 . 0747227163616081e - 07 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 0010272952620968652 <nl> - a : 0 . 067753426078155793 <nl> + v : 0 . 0010272952620968656 <nl> + a : 0 . 067753426078155848 <nl> relative_time : 0 . 02 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22477746313 <nl> - y : 4140851 . 2261125734 <nl> - theta : 1 . 2656534701930529 <nl> - kappa : 2 . 4400755127036893e - 07 <nl> - s : 4 . 5431495331961631e - 05 <nl> - dkappa : 2 . 4361257708532642e - 07 <nl> + x : 587581 . 22477731272 <nl> + y : 4140851 . 2261126204 <nl> + theta : 1 . 2656539151947612 <nl> + kappa : 1 . 0286502412562042e - 06 <nl> + s : 4 . 54314953319634e - 05 <nl> + dkappa : 1 . 0271605945933433e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 0027055478700508465 <nl> - a : 0 . 099957682720865776 <nl> + v : 0 . 0027055478700508486 <nl> + a : 0 . 099957682720865887 <nl> relative_time : 0 . 04 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22480029031 <nl> - y : 4140851 . 22618529 <nl> - theta : 1 . 2656536825975531 <nl> - kappa : 6 . 5335161982867627e - 07 <nl> - s : 0 . 00012164681343605776 <nl> - dkappa : 6 . 5229404180600592e - 07 <nl> + x : 587581 . 22479988774 <nl> + y : 4140851 . 2261854154 <nl> + theta : 1 . 2656548741286588 <nl> + kappa : 2 . 7543012413464307e - 06 <nl> + s : 0 . 00012164681343605958 <nl> + dkappa : 2 . 75031258175337e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 005021059584296759 <nl> - a : 0 . 13148049136836523 <nl> + v : 0 . 0050210595842967642 <nl> + a : 0 . 13148049136836543 <nl> relative_time : 0 . 06 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22483886254 <nl> - y : 4140851 . 2263081628 <nl> - theta : 1 . 2656540415086059 <nl> - kappa : 1 . 3450418171177331e - 06 <nl> - s : 0 . 00025043184408652964 <nl> - dkappa : 1 . 3428646025487522e - 06 <nl> + x : 587581 . 224838034 <nl> + y : 4140851 . 2263084212 <nl> + theta : 1 . 2656564944896882 <nl> + kappa : 5 . 6702244765561947e - 06 <nl> + s : 0 . 00025043184408653153 <nl> + dkappa : 5 . 6620131034087264e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 0079602707245913715 <nl> - a : 0 . 16232877998675205 <nl> + v : 0 . 00796027072459138 <nl> + a : 0 . 16232877998675227 <nl> relative_time : 0 . 08 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22489687579 <nl> - y : 4140851 . 2264929651 <nl> - theta : 1 . 2656545813152986 <nl> - kappa : 2 . 3853524859444963e - 06 <nl> - s : 0 . 00044412613366293509 <nl> - dkappa : 2 . 3814913240694811e - 06 <nl> + x : 587581 . 2248954064 <nl> + y : 4140851 . 2264934238 <nl> + theta : 1 . 2656589315328528 <nl> + kappa : 1 . 0055809327921214e - 05 <nl> + s : 0 . 00044412613366293725 <nl> + dkappa : 1 . 0041246941011709e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 01150976017001341 <nl> - a : 0 . 19250947654212397 <nl> + v : 0 . 011509760170013424 <nl> + a : 0 . 19250947654212425 <nl> relative_time : 0 . 1 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22497794579 <nl> - y : 4140851 . 2267512153 <nl> - theta : 1 . 2656553356625126 <nl> - kappa : 3 . 839123743776401e - 06 <nl> - s : 0 . 00071480219171962624 <nl> - dkappa : 3 . 8329093673602177e - 06 <nl> + x : 587581 . 22497558093 <nl> + y : 4140851 . 2267519534 <nl> + theta : 1 . 2656623371533193 <nl> + kappa : 1 . 6184398985554698e - 05 <nl> + s : 0 . 00071480219171962884 <nl> + dkappa : 1 . 6160961440923553e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 015656245358963553 <nl> - a : 0 . 22202950900057886 <nl> + v : 0 . 015656245358963573 <nl> + a : 0 . 22202950900057916 <nl> relative_time : 0 . 12000000000000001 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22508560924 <nl> - y : 4140851 . 22709418 <nl> - theta : 1 . 2656563374586467 <nl> - kappa : 5 . 76977636647812e - 06 <nl> - s : 0 . 0010742682621721884 <nl> - dkappa : 5 . 7604368492935987e - 06 <nl> + x : 587581 . 225082055 <nl> + y : 4140851 . 2270952887 <nl> + theta : 1 . 2656668599212928 <nl> + kappa : 2 . 4323353193260484e - 05 <nl> + s : 0 . 0010742682621721915 <nl> + dkappa : 2 . 4288129168163533e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 020386582289164439 <nl> - a : 0 . 25089580532821448 <nl> + v : 0 . 020386582289164467 <nl> + a : 0 . 25089580532821487 <nl> relative_time : 0 . 14 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22522332438 <nl> - y : 4140851 . 2275328748 <nl> - theta : 1 . 2656576188833428 <nl> - kappa : 8 . 2393266720483158e - 06 <nl> - s : 0 . 0015340710944838785 <nl> - dkappa : 8 . 22598970226738e - 06 <nl> + x : 587581 . 2252182489 <nl> + y : 4140851 . 227534458 <nl> + theta : 1 . 2656726451168785 <nl> + kappa : 3 . 4734110993146175e - 05 <nl> + s : 0 . 0015340710944838824 <nl> + dkappa : 3 . 4683810560158096e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 025687765517660655 <nl> - a : 0 . 27911529349112868 <nl> + v : 0 . 025687765517660686 <nl> + a : 0 . 27911529349112912 <nl> relative_time : 0 . 16 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22539447225 <nl> - y : 4140851 . 22807807 <nl> - theta : 1 . 2656592113952048 <nl> - kappa : 1 . 1308401404356422e - 05 <nl> - s : 0 . 0021054987148520663 <nl> - dkappa : 1 . 129009653384895e - 05 <nl> + x : 587581 . 2253875063 <nl> + y : 4140851 . 2280802433 <nl> + theta : 1 . 2656798347649518 <nl> + kappa : 4 . 7672253470236289e - 05 <nl> + s : 0 . 0021054987148520707 <nl> + dkappa : 4 . 760321658048997e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 031546928160818749 <nl> - a : 0 . 30669490145541928 <nl> + v : 0 . 031546928160818805 <nl> + a : 0 . 30669490145541978 <nl> relative_time : 0 . 18 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22560235707 <nl> - y : 4140851 . 2287402917 <nl> - theta : 1 . 265661145739525 <nl> - kappa : 1 . 5036252616879368e - 05 <nl> - s : 0 . 00279958319739467 <nl> - dkappa : 1 . 5011913486419813e - 05 <nl> + x : 587581 . 2255930946 <nl> + y : 4140851 . 2287431811 <nl> + theta : 1 . 2656885676700202 <nl> + kappa : 6 . 3387566497085442e - 05 <nl> + s : 0 . 0027995831973946761 <nl> + dkappa : 6 . 32957713726473e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 037951341894327251 <nl> - a : 0 . 33364155718718408 <nl> + v : 0 . 037951341894327306 <nl> + a : 0 . 33364155718718463 <nl> relative_time : 0 . 19999999999999998 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22585020692 <nl> - y : 4140851 . 2295298227 <nl> - theta : 1 . 265663451956005 <nl> - kappa : 1 . 9480772556438381e - 05 <nl> - s : 0 . 0036271034353366 <nl> - dkappa : 1 . 9449239096820138e - 05 <nl> + x : 587581 . 22583820682 <nl> + y : 4140851 . 2295335671 <nl> + theta : 1 . 2656989794510958 <nl> + kappa : 8 . 21241034783914e - 05 <nl> + s : 0 . 0036271034353366071 <nl> + dkappa : 8 . 2005174913772714e - 05 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 044888416953196592 <nl> - a : 0 . 35996218865252094 <nl> + v : 0 . 044888416953196661 <nl> + a : 0 . 3599621886525215 <nl> relative_time : 0 . 21999999999999997 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22614117549 <nl> - y : 4140851 . 23045671 <nl> - theta : 1 . 2656661593864795 <nl> - kappa : 2 . 4698508546935712e - 05 <nl> - s : 0 . 0045985879121961948 <nl> - dkappa : 2 . 465852915599322e - 05 <nl> + x : 587581 . 22612596129 <nl> + y : 4140851 . 2304614568 <nl> + theta : 1 . 2657112025765587 <nl> + kappa : 0 . 00010412024809560832 <nl> + s : 0 . 0045985879121962026 <nl> + dkappa : 0 . 00010396946566841252 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 052345702131759209 <nl> - a : 0 . 38566372381752756 <nl> + v : 0 . 052345702131759278 <nl> + a : 0 . 38566372381752823 <nl> relative_time : 0 . 23999999999999996 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 226478342 <nl> - y : 4140851 . 2315307609 <nl> - theta : 1 . 2656692966826393 <nl> - kappa : 3 . 0744677873091409e - 05 <nl> - s : 0 . 0057243174729716588 <nl> - dkappa : 3 . 069491156863e - 05 <nl> + x : 587581 . 22645940329 <nl> + y : 4140851 . 23153667 <nl> + theta : 1 . 2657253663990247 <nl> + kappa : 0 . 00012960877705155987 <nl> + s : 0 . 0057243174729716675 <nl> + dkappa : 0 . 00012942108324226573 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 060310884783669472 <nl> - a : 0 . 4107530906483019 <nl> + v : 0 . 060310884783669555 <nl> + a : 0 . 41075309064830257 <nl> relative_time : 0 . 25999999999999995 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 226864712 <nl> - y : 4140851 . 2327615512 <nl> - theta : 1 . 2656728918137556 <nl> - kappa : 3 . 7673182664180077e - 05 <nl> - s : 0 . 0070143280953275065 <nl> - dkappa : 3 . 7612201212813608e - 05 <nl> + x : 587581 . 22684150538 <nl> + y : 4140851 . 2327687922 <nl> + theta : 1 . 2657415971902122 <nl> + kappa : 0 . 00015881692281505234 <nl> + s : 0 . 0070143280953275178 <nl> + dkappa : 0 . 0001585869310359333 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 068771790821903711 <nl> - a : 0 . 43523721711094171 <nl> + v : 0 . 068771790821903822 <nl> + a : 0 . 43523721711094249 <nl> relative_time : 0 . 27999999999999997 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22730321926 <nl> - y : 4140851 . 2341584261 <nl> - theta : 1 . 2656769720743988 <nl> - kappa : 4 . 553662477776764e - 05 <nl> - s : 0 . 0084784136607809982 <nl> - dkappa : 4 . 54629147996638e - 05 <nl> + x : 587581 . 22727516876 <nl> + y : 4140851 . 2341671777 <nl> + theta : 1 . 2657600181758077 <nl> + kappa : 0 . 00019196643636548784 <nl> + s : 0 . 0084784136607810121 <nl> + dkappa : 0 . 00019168843889866716 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 077716384718760231 <nl> - a : 0 . 4591230311715449 <nl> + v : 0 . 077716384718760356 <nl> + a : 0 . 45912303117154563 <nl> relative_time : 0 . 3 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 227796725 <nl> - y : 4140851 . 2357305 <nl> - theta : 1 . 2656815640921684 <nl> - kappa : 5 . 4386320683448082e - 05 <nl> - s : 0 . 010126128725888575 <nl> - dkappa : 5 . 4298285732981513e - 05 <nl> + x : 587581 . 22776322311 <nl> + y : 4140851 . 2357409531 <nl> + theta : 1 . 2657807495703359 <nl> + kappa : 0 . 00022927364993747718 <nl> + s : 0 . 01012612872588859 <nl> + dkappa : 0 . 00022894162578211913 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 08713276950585927 <nl> - a : 0 . 48241746079620912 <nl> + v : 0 . 087132769505859409 <nl> + a : 0 . 48241746079620984 <nl> relative_time : 0 . 32 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22834802046 <nl> - y : 4140851 . 2374866637 <nl> - theta : 1 . 2656866938354092 <nl> - kappa : 6 . 4272316346580263e - 05 <nl> - s : 0 . 011966791293432311 <nl> - dkappa : 6 . 41682789688934e - 05 <nl> + x : 587581 . 22830842889 <nl> + y : 4140851 . 2374990173 <nl> + theta : 1 . 265803908612023 <nl> + kappa : 0 . 00027094953976545367 <nl> + s : 0 . 011966791293432326 <nl> + dkappa : 0 . 00027055716239409063 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 097009186774143011 <nl> - a : 0 . 50512743395103221 <nl> + v : 0 . 097009186774143163 <nl> + a : 0 . 505127433951033 <nl> relative_time : 0 . 34 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22895982629 <nl> - y : 4140851 . 2394355848 <nl> - theta : 1 . 2656923866209375 <nl> - kappa : 7 . 524340211202453e - 05 <nl> - s : 0 . 01400948558360633 <nl> - dkappa : 7 . 5121605875496178e - 05 <nl> + x : 587581 . 22891347646 <nl> + y : 4140851 . 2394500473 <nl> + theta : 1 . 2658296095976649 <nl> + kappa : 0 . 00031719978882828546 <nl> + s : 0 . 014009485583606353 <nl> + dkappa : 0 . 00031674043385228112 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 10733401667387563 <nl> - a : 0 . 5272598786021121 <nl> + v : 0 . 10733401667387578 <nl> + a : 0 . 52725987860211287 <nl> relative_time : 0 . 36000000000000004 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 22963479417 <nl> - y : 4140851 . 24158571 <nl> - theta : 1 . 2656986671217654 <nl> - kappa : 8 . 7347127587879771e - 05 <nl> - s : 0 . 01626306480520328 <nl> - dkappa : 8 . 720573909250145e - 05 <nl> + x : 587581 . 22958098853 <nl> + y : 4140851 . 2416024986 <nl> + theta : 1 . 2658579639174947 <nl> + kappa : 0 . 00036822484959388922 <nl> + s : 0 . 0162630648052033 <nl> + dkappa : 0 . 00036769160233803753 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 11809577791464321 <nl> - a : 0 . 54882172271554652 <nl> + v : 0 . 1180957779146434 <nl> + a : 0 . 54882172271554741 <nl> relative_time : 0 . 38000000000000006 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23037550726 <nl> - y : 4140851 . 243945268 <nl> - theta : 1 . 2657055593748208 <nl> - kappa : 0 . 00010062981652921974 <nl> - s : 0 . 018736153926800721 <nl> - dkappa : 0 . 00010046692739087976 <nl> + x : 587581 . 23031351948 <nl> + y : 4140851 . 2439646092 <nl> + theta : 1 . 265889080090048 <nl> + kappa : 0 . 00042422000676384318 <nl> + s : 0 . 018736153926800753 <nl> + dkappa : 0 . 00042360566975010365 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 12928312776535386 <nl> - a : 0 . 5698198942574334 <nl> + v : 0 . 12928312776535408 <nl> + a : 0 . 56981989425743429 <nl> relative_time : 0 . 40000000000000008 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23118448129 <nl> - y : 4140851 . 2465222734 <nl> - theta : 1 . 2657130867886721 <nl> - kappa : 0 . 00011513658172183036 <nl> - s : 0 . 021437152447947631 <nl> - dkappa : 0 . 00011495021053250563 <nl> + x : 587581 . 23111355735 <nl> + y : 4140851 . 2465444021 <nl> + theta : 1 . 2659230637970298 <nl> + kappa : 0 . 00048537544001800008 <nl> + s : 0 . 021437152447947666 <nl> + dkappa : 0 . 00048467254035836863 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 14088486205423759 <nl> - a : 0 . 59026132119387031 <nl> + v : 0 . 14088486205423778 <nl> + a : 0 . 59026132119387131 <nl> relative_time : 0 . 4200000000000001 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23206416517 <nl> - y : 4140851 . 2493245257 <nl> - theta : 1 . 2657212721512519 <nl> - kappa : 0 . 00013091133986594595 <nl> - s : 0 . 024374237170350775 <nl> - dkappa : 0 . 00013069943412980157 <nl> + x : 587581 . 23198352417 <nl> + y : 4140851 . 2493496877 <nl> + theta : 1 . 265960017918184 <nl> + kappa : 0 . 0005518762867591006 <nl> + s : 0 . 024374237170350817 <nl> + dkappa : 0 . 00055107708345761655 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 15288991516884637 <nl> - a : 0 . 61015293149095529 <nl> + v : 0 . 15288991516884659 <nl> + a : 0 . 61015293149095629 <nl> relative_time : 0 . 44000000000000011 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23301694251 <nl> - y : 4140851 . 2523596189 <nl> - theta : 1 . 2657301376375774 <nl> - kappa : 0 . 00014799682645998631 <nl> - s : 0 . 027555364969061195 <nl> - dkappa : 0 . 00014775726450538292 <nl> + x : 587581 . 23292577663 <nl> + y : 4140851 . 2523880643 <nl> + theta : 1 . 2660000425661559 <nl> + kappa : 0 . 000623902704857386 <nl> + s : 0 . 027555364969061233 <nl> + dkappa : 0 . 00062299919602127522 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 16528736005605413 <nl> - a : 0 . 62950165311478612 <nl> + v : 0 . 16528736005605435 <nl> + a : 0 . 62950165311478712 <nl> relative_time : 0 . 46000000000000013 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 2340451309 <nl> - y : 4140851 . 255634937 <nl> - theta : 1 . 2657397048174763 <nl> - kappa : 0 . 00016643461068429331 <nl> - s : 0 . 03098827556366061 <nl> - dkappa : 0 . 00016616520355170207 <nl> + x : 587581 . 23394260742 <nl> + y : 4140851 . 2556669256 <nl> + theta : 1 . 266043235121364 <nl> + kappa : 0 . 00070162993539521208 <nl> + s : 0 . 030988275563660662 <nl> + dkappa : 0 . 00070061386535516588 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 17806640822205677 <nl> - a : 0 . 64831441403146062 <nl> + v : 0 . 17806640822205705 <nl> + a : 0 . 64831441403146151 <nl> relative_time : 0 . 48000000000000015 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23515098449 <nl> - y : 4140851 . 2591576576 <nl> - theta : 1 . 2657499946633095 <nl> - kappa : 0 . 00018626511028486787 <nl> - s : 0 . 034680494289447909 <nl> - dkappa : 0 . 00018596360359069341 <nl> + x : 587581 . 23503624555 <nl> + y : 4140851 . 2591934586 <nl> + theta : 1 . 2660896902668632 <nl> + kappa : 0 . 00078522836541166166 <nl> + s : 0 . 034680494289447965 <nl> + dkappa : 0 . 00078409123175125154 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 19121640973237214 <nl> - a : 0 . 66659814220707636 <nl> + v : 0 . 19121640973237244 <nl> + a : 0 . 66659814220707736 <nl> relative_time : 0 . 50000000000000011 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 236336694 <nl> - y : 4140851 . 2629347616 <nl> - theta : 1 . 2657610275576925 <nl> - kappa : 0 . 00020752760645710659 <nl> - s : 0 . 038639334868625551 <nl> - dkappa : 0 . 00020719168223341745 <nl> + x : 587581 . 23620885715 <nl> + y : 4140851 . 2629746492 <nl> + theta : 1 . 2661395000232094 <nl> + kappa : 0 . 00087486359064715817 <nl> + s : 0 . 038639334868625613 <nl> + dkappa : 0 . 000873596651141387 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 20472685321184006 <nl> - a : 0 . 68435976560773149 <nl> + v : 0 . 20472685321184036 <nl> + a : 0 . 6843597656077326 <nl> relative_time : 0 . 52000000000000013 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23760438687 <nl> - y : 4140851 . 2669730261 <nl> - theta : 1 . 2657728233012175 <nl> - kappa : 0 . 00023026025872953845 <nl> - s : 0 . 042871902181485996 <nl> - dkappa : 0 . 0002298875372397054 <nl> + x : 587581 . 23746254691 <nl> + y : 4140851 . 2670172825 <nl> + theta : 1 . 2661927537833337 <nl> + kappa : 0 . 00097069647828807826 <nl> + s : 0 . 042871902181486059 <nl> + dkappa : 0 . 000969290757751067 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 21858736584462232 <nl> - a : 0 . 70160621219952379 <nl> + v : 0 . 21858736584462268 <nl> + a : 0 . 7016062121995249 <nl> relative_time : 0 . 54000000000000015 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 23895612988 <nl> - y : 4140851 . 2712790333 <nl> - theta : 1 . 2657854011201808 <nl> - kappa : 0 . 00025450011984756151 <nl> - s : 0 . 047385095037598149 <nl> - dkappa : 0 . 00025408816137780361 <nl> + x : 587581 . 23879935825 <nl> + y : 4140851 . 2713279491 <nl> + theta : 1 . 2662495383474033 <nl> + kappa : 0 . 0010728832297113651 <nl> + s : 0 . 047385095037598225 <nl> + dkappa : 0 . 0010713295267531758 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 23278771337420262 <nl> - a : 0 . 71834440994855087 <nl> + v : 0 . 23278771337420298 <nl> + a : 0 . 71834440994855187 <nl> relative_time : 0 . 56000000000000016 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 24039392814 <nl> - y : 4140851 . 275859172 <nl> - theta : 1 . 2657987796743022 <nl> - kappa : 0 . 00028028315065717986 <nl> - s : 0 . 052185608946993832 <nl> - dkappa : 0 . 00027982945728401832 <nl> + x : 587581 . 24022127432 <nl> + y : 4140851 . 2759130434 <nl> + theta : 1 . 266309937957689 <nl> + kappa : 0 . 0011815754432291422 <nl> + s : 0 . 052185608946993915 <nl> + dkappa : 0 . 0011798643369217363 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 24731780010338661 <nl> - a : 0 . 73458128682091073 <nl> + v : 0 . 247317800103387 <nl> + a : 0 . 73458128682091173 <nl> relative_time : 0 . 58000000000000018 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 24191972706 <nl> - y : 4140851 . 2807196393 <nl> - theta : 1 . 2658129770644475 <nl> - kappa : 0 . 00030764423498874036 <nl> - s : 0 . 057279938891354194 <nl> - dkappa : 0 . 00030714625232236006 <nl> + x : 587581 . 24173021875 <nl> + y : 4140851 . 2807787689 <nl> + theta : 1 . 2663740343334347 <nl> + kappa : 0 . 0012969201768333253 <nl> + s : 0 . 057279938891354285 <nl> + dkappa : 0 . 0012950420332856595 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 26216766889430193 <nl> - a : 0 . 75032377078270107 <nl> + v : 0 . 26216766889430237 <nl> + a : 0 . 75032377078270218 <nl> relative_time : 0 . 6000000000000002 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 24353541271 <nl> - y : 4140851 . 2858664412 <nl> - theta : 1 . 2658280108403543 <nl> - kappa : 0 . 00033661719454066948 <nl> - s : 0 . 062674382095196171 <nl> - dkappa : 0 . 00033607231344418812 <nl> + x : 587581 . 24332805711 <nl> + y : 4140851 . 28593114 <nl> + theta : 1 . 2664419067057198 <nl> + kappa : 0 . 0014190600109402376 <nl> + s : 0 . 062674382095196268 <nl> + dkappa : 0 . 0014170049897824931 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 27732750116839822 <nl> - a : 0 . 76557878980001981 <nl> + v : 0 . 27732750116839866 <nl> + a : 0 . 76557878980002092 <nl> relative_time : 0 . 62000000000000022 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 24524281267 <nl> - y : 4140851 . 2913054028 <nl> - theta : 1 . 2658438980083551 <nl> - kappa : 0 . 0003672348037632095 <nl> - s : 0 . 068375040797058875 <nl> - dkappa : 0 . 00036664036204785491 <nl> + x : 587581 . 24501659663 <nl> + y : 4140851 . 2913759863 <nl> + theta : 1 . 266513631852332 <nl> + kappa : 0 . 0015481331111352194 <nl> + s : 0 . 068375040797058972 <nl> + dkappa : 0 . 00154589117191217 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 29278761690644695 <nl> - a : 0 . 78035327183896475 <nl> + v : 0 . 29278761690644733 <nl> + a : 0 . 78035327183896575 <nl> relative_time : 0 . 64000000000000024 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 24704369716 <nl> - y : 4140851 . 29704216 <nl> - theta : 1 . 2658606550390958 <nl> - kappa : 0 . 0003995288047421559 <nl> - s : 0 . 074387825020690052 <nl> - dkappa : 0 . 00039888208883835056 <nl> + x : 587581 . 24679758807 <nl> + y : 4140851 . 2971189506 <nl> + theta : 1 . 2665892841326283 <nl> + kappa : 0 . 0016842732909172454 <nl> + s : 0 . 07438782502069019 <nl> + dkappa : 0 . 0016818341993907597 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 30853847464854167 <nl> - a : 0 . 79465414486563357 <nl> + v : 0 . 30853847464854212 <nl> + a : 0 . 79465414486563457 <nl> relative_time : 0 . 66000000000000025 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 24893977947 <nl> - y : 4140851 . 3030821728 <nl> - theta : 1 . 2658782978752647 <nl> - kappa : 0 . 00043352992208259424 <nl> - s : 0 . 080718455346232648 <nl> - dkappa : 0 . 000432828168686948 <nl> + x : 587581 . 24867272575 <nl> + y : 4140851 . 3031654982 <nl> + theta : 1 . 2666689355224054 <nl> + kappa : 0 . 0018276100744435346 <nl> + s : 0 . 080718455346232787 <nl> + dkappa : 0 . 0018249634088042146 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 3245706714940978 <nl> - a : 0 . 8084883368461242 <nl> + v : 0 . 3245706714940983 <nl> + a : 0 . 80848833684612509 <nl> relative_time : 0 . 68000000000000027 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 250932717 <nl> - y : 4140851 . 30943072 <nl> - theta : 1 . 2658968419393108 <nl> - kappa : 0 . 00046926787779263615 <nl> - s : 0 . 087372465681411021 <nl> - dkappa : 0 . 00046850827549084636 <nl> + x : 587581 . 25064364879 <nl> + y : 4140851 . 3095209142 <nl> + theta : 1 . 266752655648764 <nl> + kappa : 0 . 0019782687592741648 <nl> + s : 0 . 087372465681411146 <nl> + dkappa : 0 . 0019754039162621206 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 34087494310185279 <nl> - a : 0 . 82186277574653444 <nl> + v : 0 . 34087494310185329 <nl> + a : 0 . 82186277574653543 <nl> relative_time : 0 . 70000000000000029 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 25302411243 <nl> - y : 4140851 . 3160929065 <nl> - theta : 1 . 2659163021411688 <nl> - kappa : 0 . 00050677140616715659 <nl> - s : 0 . 094355206032717534 <nl> - dkappa : 0 . 00050595109703281617 <nl> + x : 587581 . 25271194207 <nl> + y : 4140851 . 3161903089 <nl> + theta : 1 . 266840511824979 <nl> + kappa : 0 . 0021363704791166863 <nl> + s : 0 . 094355206032717687 <nl> + dkappa : 0 . 0021332766800514469 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 357442163689866 <nl> - a : 0 . 834784389532962 <nl> + v : 0 . 35744216368986648 <nl> + a : 0 . 83478438953296308 <nl> relative_time : 0 . 72000000000000031 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 25521551352 <nl> - y : 4140851 . 3230736637 <nl> - theta : 1 . 2659366928859832 <nl> - kappa : 0 . 0005460682686715308 <nl> - s : 0 . 10167184527659898 <nl> - dkappa : 0 . 00054518434984084422 <nl> + x : 587581 . 25487913622 <nl> + y : 4140851 . 3231786191 <nl> + theta : 1 . 2669325690853626 <nl> + kappa : 0 . 0023020322665707345 <nl> + s : 0 . 10167184527659914 <nl> + dkappa : 0 . 0022986985632902942 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 3742633460355187 <nl> - a : 0 . 84726010617150493 <nl> + v : 0 . 37426334603551931 <nl> + a : 0 . 847260106171506 <nl> relative_time : 0 . 74000000000000032 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 257508415 <nl> - y : 4140851 . 3303777534 <nl> - theta : 1 . 2659580280818306 <nl> - kappa : 0 . 00058718526882537065 <nl> - s : 0 . 10932737393064297 <nl> - dkappa : 0 . 0005862347940477769 <nl> + x : 587581 . 25714671 <nl> + y : 4140851 . 3304906115 <nl> + theta : 1 . 2670288902201339 <nl> + kappa : 0 . 0024753671158726409 <nl> + s : 0 . 10932737393064312 <nl> + dkappa : 0 . 0024717823965816417 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 3913296414755143 <nl> - a : 0 . 85929685362826069 <nl> + v : 0 . 39132964147551491 <nl> + a : 0 . 85929685362826191 <nl> relative_time : 0 . 76000000000000034 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 25990425923 <nl> - y : 4140851 . 3380097682 <nl> - theta : 1 . 2659803211474383 <nl> - kappa : 0 . 00063014826708626139 <nl> - s : 0 . 1173266069247644 <nl> - dkappa : 0 . 00062912824825096607 <nl> + x : 587581 . 259516089 <nl> + y : 4140851 . 3381308848 <nl> + theta : 1 . 2671295358102821 <nl> + kappa : 0 . 002656484045640051 <nl> + s : 0 . 11732660692476456 <nl> + dkappa : 0 . 0026526370406671014 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 40863233990587788 <nl> - a : 0 . 8709015598693276 <nl> + v : 0 . 40863233990587849 <nl> + a : 0 . 8709015598693286 <nl> relative_time : 0 . 78000000000000036 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 26240443648 <nl> - y : 4140851 . 3459741389 <nl> - theta : 1 . 2660035850199165 <nl> - kappa : 0 . 00067498219573349893 <nl> - s : 0 . 12567418637239192 <nl> - dkappa : 0 . 00067388960437191232 <nl> + x : 587581 . 26198864856 <nl> + y : 4140851 . 3461038722 <nl> + theta : 1 . 2672345642624405 <nl> + kappa : 0 . 0028454881616165333 <nl> + s : 0 . 12567418637239211 <nl> + dkappa : 0 . 0028413674490806633 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 42616286978195678 <nl> - a : 0 . 88208115286080313 <nl> + v : 0 . 42616286978195739 <nl> + a : 0 . 88208115286080424 <nl> relative_time : 0 . 80000000000000038 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 26501028612 <nl> - y : 4140851 . 3542751316 <nl> - theta : 1 . 2660278321624734 <nl> - kappa : 0 . 00072171107375182571 <nl> - s : 0 . 13437458434165428 <nl> - dkappa : 0 . 00072054284251591035 <nl> + x : 587581 . 26456571324 <nl> + y : 4140851 . 3544138456 <nl> + theta : 1 . 2673440318437441 <nl> + kappa : 0 . 0030424807194161922 <nl> + s : 0 . 13437458434165447 <nl> + dkappa : 0 . 0030380747308024428 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 4439127981184201 <nl> - a : 0 . 89284256056878519 <nl> + v : 0 . 44391279811842077 <nl> + a : 0 . 8928425605687863 <nl> relative_time : 0 . 8200000000000004 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 26772309747 <nl> - y : 4140851 . 3629168523 <nl> - theta : 1 . 266053074572139 <nl> - kappa : 0 . 000770358021715169 <nl> - s : 0 . 14343210562656694 <nl> - dkappa : 0 . 00076911104583169364 <nl> + x : 587581 . 26724855823 <nl> + y : 4140851 . 3630649168 <nl> + theta : 1 . 267457992716702 <nl> + kappa : 0 . 0032475591872682869 <nl> + s : 0 . 14343210562656714 <nl> + dkappa : 0 . 0032428562129124374 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 46187383048925895 <nl> - a : 0 . 90319271095937159 <nl> + v : 0 . 46187383048925962 <nl> + a : 0 . 90319271095937248 <nl> relative_time : 0 . 84000000000000041 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 27054411056 <nl> - y : 4140851 . 3719032528 <nl> - theta : 1 . 266079323787495 <nl> - kappa : 0 . 00082094527667037564 <nl> - s : 0 . 15285089051821829 <nl> - dkappa : 0 . 00081961641537107771 <nl> + x : 587581 . 27003840962 <nl> + y : 4140851 . 3720610403 <nl> + theta : 1 . 2675764989740657 <nl> + kappa : 0 . 0034608173087618374 <nl> + s : 0 . 15285089051821851 <nl> + dkappa : 0 . 0034558055032442667 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 48003781102778631 <nl> - a : 0 . 91313853199866 <nl> + v : 0 . 48003781102778709 <nl> + a : 0 . 9131385319986608 <nl> relative_time : 0 . 86000000000000043 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 27347451716 <nl> - y : 4140851 . 381238129 <nl> - theta : 1 . 2661065908963884 <nl> - kappa : 0 . 00087349420702094991 <nl> - s : 0 . 16263491757595624 <nl> - dkappa : 0 . 00087208028494860563 <nl> + x : 587581 . 27293644613 <nl> + y : 4140851 . 3814060162 <nl> + theta : 1 . 2676996006736889 <nl> + kappa : 0 . 0036823451655902408 <nl> + s : 0 . 16263491757595652 <nl> + dkappa : 0 . 0036770125530389279 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 49839672242663735 <nl> - a : 0 . 92268695165274839 <nl> + v : 0 . 498396722426638 <nl> + a : 0 . 92268695165274928 <nl> relative_time : 0 . 88000000000000045 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 27651546116 <nl> - y : 4140851 . 3909251238 <nl> - theta : 1 . 2661348865436608 <nl> - kappa : 0 . 00092802532741079137 <nl> - s : 0 . 17278800639857472 <nl> - dkappa : 0 . 00092652313600119322 <nl> + x : 587581 . 27594379906 <nl> + y : 4140851 . 3911034926 <nl> + theta : 1 . 2678273458734024 <nl> + kappa : 0 . 0039122292402958836 <nl> + s : 0 . 17278800639857497 <nl> + dkappa : 0 . 0039065637195985405 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 516942685937769 <nl> - a : 0 . 9318448978877345 <nl> + v : 0 . 51694268593776971 <nl> + a : 0 . 93184489788773539 <nl> relative_time : 0 . 90000000000000047 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 27966803976 <nl> - y : 4140851 . 4009677335 <nl> - theta : 1 . 2661642209388688 <nl> - kappa : 0 . 00098455831360792843 <nl> - s : 0 . 18331382039549979 <nl> - dkappa : 0 . 00098296461244777068 <nl> + x : 587581 . 27906155342 <nl> + y : 4140851 . 4011569675 <nl> + theta : 1 . 2679597806658762 <nl> + kappa : 0 . 0041505524790147593 <nl> + s : 0 . 18331382039550009 <nl> + dkappa : 0 . 0041445418289401 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 53566796137246009 <nl> - a : 0 . 94061929866971616 <nl> + v : 0 . 53566796137246087 <nl> + a : 0 . 94061929866971716 <nl> relative_time : 0 . 92000000000000048 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 2829333042 <nl> - y : 4140851 . 4113693065 <nl> - theta : 1 . 26619460386401 <nl> - kappa : 0 . 0010431120173882594 <nl> - s : 0 . 19421586955797657 <nl> - dkappa : 0 . 0010414235355489317 <nl> + x : 587581 . 2822907489 <nl> + y : 4140851 . 4115697946 <nl> + theta : 1 . 2680969492134846 <nl> + kappa : 0 . 0043973943542210733 <nl> + s : 0 . 19421586955797684 <nl> + dkappa : 0 . 0043910262384492232 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 55456494710131155 <nl> - a : 0 . 94901708196479129 <nl> + v : 0 . 55456494710131232 <nl> + a : 0 . 94901708196479229 <nl> relative_time : 0 . 9400000000000005 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 28631226078 <nl> - y : 4140851 . 4221330481 <nl> - theta : 1 . 2662260446812414 <nl> - kappa : 0 . 0011037044814192854 <nl> - s : 0 . 20549751323025525 <nl> - dkappa : 0 . 0011019179187665732 <nl> + x : 587581 . 28563238063 <nl> + y : 4140851 . 4223451824 <nl> + theta : 1 . 268238893783181 <nl> + kappa : 0 . 0046528309274718642 <nl> + s : 0 . 20549751323025553 <nl> + dkappa : 0 . 0046460928995339006 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 57362618005424637 <nl> - a : 0 . 95704517573905767 <nl> + v : 0 . 57362618005424715 <nl> + a : 0 . 95704517573905856 <nl> relative_time : 0 . 96000000000000052 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 28980587132 <nl> - y : 4140851 . 4332620222 <nl> - theta : 1 . 2662585523406049 <nl> - kappa : 0 . 0011663529541438484 <nl> - s : 0 . 21716196288077766 <nl> - dkappa : 0 . 0011644649826235422 <nl> + x : 587581 . 28908739972 <nl> + y : 4140851 . 4334861981 <nl> + theta : 1 . 2683856547813548 <nl> + kappa : 0 . 00491693491215161 <nl> + s : 0 . 217161962880778 <nl> + dkappa : 0 . 00490981442027824 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 59284433572050921 <nl> - a : 0 . 964710507958613 <nl> + v : 0 . 59284433572051 <nl> + a : 0 . 964710507958614 <nl> relative_time : 0 . 98000000000000054 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 29341505421 <nl> - y : 4140851 . 4447591556 <nl> - theta : 1 . 2662921353877525 <nl> - kappa : 0 . 00123107390466387 <nl> - s : 0 . 2292122848733639 <nl> - dkappa : 0 . 0012290811695632813 <nl> + x : 587581 . 29265671456 <nl> + y : 4140851 . 44499577 <nl> + theta : 1 . 2685372707887046 <nl> + kappa : 0 . 0051897757362168459 <nl> + s : 0 . 22921228487336423 <nl> + dkappa : 0 . 0051822601280962222 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 61221222814866672 <nl> - a : 0 . 97202000658955523 <nl> + v : 0 . 61221222814866749 <nl> + a : 0 . 97202000658955612 <nl> relative_time : 1 . 0000000000000004 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 31322375988 <nl> - y : 4140851 . 5078602303 <nl> - theta : 1 . 2664764532036603 <nl> - kappa : 0 . 0015862895642493598 <nl> - s : 0 . 295349494546913 <nl> - dkappa : 0 . 00158372184278081 <nl> + x : 587581 . 31224660808 <nl> + y : 4140851 . 5081651192 <nl> + theta : 1 . 2693694028822824 <nl> + kappa : 0 . 0066872403516693027 <nl> + s : 0 . 29534949454691334 <nl> + dkappa : 0 . 00667755618024339 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 71105593257025046 <nl> - a : 1 . 0034724747284924 <nl> + v : 0 . 71105593257025157 <nl> + a : 1 . 0034724747284931 <nl> relative_time : 1 . 1000000000000005 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 33603599737 <nl> - y : 4140851 . 5805291235 <nl> - theta : 1 . 2666887185515456 <nl> - kappa : 0 . 0019953654595185268 <nl> - s : 0 . 37151488176374642 <nl> - dkappa : 0 . 0019921355683035313 <nl> + x : 587581 . 33480685542 <nl> + y : 4140851 . 5809126371 <nl> + theta : 1 . 2703277085537152 <nl> + kappa : 0 . 0084117608272444647 <nl> + s : 0 . 371514881763747 <nl> + dkappa : 0 . 00839957928006492 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 81264483493434347 <nl> - a : 1 . 0270682980667731 <nl> + v : 0 . 81264483493434458 <nl> + a : 1 . 027068298066774 <nl> relative_time : 1 . 2000000000000006 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 36192265467 <nl> - y : 4140851 . 6629916495 <nl> - theta : 1 . 2669295910347076 <nl> - kappa : 0 . 0024595727720946557 <nl> - s : 0 . 45794512642038226 <nl> - dkappa : 0 . 0024555914700974024 <nl> + x : 587581 . 36040756176 <nl> + y : 4140851 . 6634643846 <nl> + theta : 1 . 2714151656869923 <nl> + kappa : 0 . 010368696018750945 <nl> + s : 0 . 45794512642038293 <nl> + dkappa : 0 . 01035368052290688 <nl> ddkappa : 0 <nl> } <nl> - v : 0 . 9162365705489911 <nl> - a : 1 . 0436734723666243 <nl> + v : 0 . 916236570548992 <nl> + a : 1 . 043673472366625 <nl> relative_time : 1 . 3000000000000007 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 39093368175 <nl> - y : 4140851 . 7554069255 <nl> - theta : 1 . 2671995354340169 <nl> - kappa : 0 . 0029798072235504196 <nl> - s : 0 . 55480700192295418 <nl> - dkappa : 0 . 0029749838198336646 <nl> + x : 587581 . 38909812528 <nl> + y : 4140851 . 7559796516 <nl> + theta : 1 . 2726338726092772 <nl> + kappa : 0 . 012561821974130839 <nl> + s : 0 . 554807001922955 <nl> + dkappa : 0 . 012543630488402511 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 0211753742984617 <nl> - a : 1 . 0541539933902728 <nl> + v : 1 . 0211753742984628 <nl> + a : 1 . 0541539933902735 <nl> relative_time : 1 . 4000000000000008 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 423100685 <nl> - y : 4140851 . 857875634 <nl> - theta : 1 . 2674988458423604 <nl> - kappa : 0 . 0035566355870852604 <nl> - s : 0 . 66220603514483389 <nl> - dkappa : 0 . 0035508784732779281 <nl> + x : 587581 . 42090980324 <nl> + y : 4140851 . 8585592275 <nl> + theta : 1 . 2739851570496743 <nl> + kappa : 0 . 0149935280103758 <nl> + s : 0 . 66220603514483489 <nl> + dkappa : 0 . 014971815033438241 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 1268920806432468 <nl> - a : 1 . 0593758568999458 <nl> + v : 1 . 1268920806432479 <nl> + a : 1 . 0593758568999465 <nl> relative_time : 1 . 5000000000000009 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 45843952056 <nl> - y : 4140851 . 9704482849 <nl> - theta : 1 . 2678276697990736 <nl> - kappa : 0 . 0041903421992027555 <nl> - s : 0 . 78019516638425324 <nl> - dkappa : 0 . 00418355930667925 <nl> + x : 587581 . 45585827657 <nl> + y : 4140851 . 9712536782 <nl> + theta : 1 . 2754696850979883 <nl> + kappa : 0 . 017665012790443104 <nl> + s : 0 . 78019516638425424 <nl> + dkappa : 0 . 017639431085119638 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 2329041236200595 <nl> - a : 1 . 0602050586578702 <nl> + v : 1 . 2329041236200609 <nl> + a : 1 . 0602050586578708 <nl> relative_time : 1 . 600000000000001 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 49695288809 <nl> - y : 4140852 . 0931334784 <nl> - theta : 1 . 2681860324243894 <nl> - kappa : 0 . 0048809754713880016 <nl> - s : 0 . 90878340932192669 <nl> - dkappa : 0 . 0048730746531592192 <nl> + x : 587581 . 493946215 <nl> + y : 4140852 . 0940716136 <nl> + theta : 1 . 2770875701634958 <nl> + kappa : 0 . 020576480400171751 <nl> + s : 0 . 9087834093219278 <nl> + dkappa : 0 . 020546682433737442 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 3388155368418369 <nl> - a : 1 . 0575075944262731 <nl> + v : 1 . 3388155368418382 <nl> + a : 1 . 0575075944262735 <nl> relative_time : 1 . 7000000000000011 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 53840014583 <nl> - y : 4140852 . 2259785687 <nl> - theta : 1 . 2687893108977857 <nl> - kappa : 0 . 0055577277347994992 <nl> - s : 1 . 0479445109786738 <nl> - dkappa : 0 . 005300320137360314 <nl> + x : 587581 . 53420552332 <nl> + y : 4140852 . 2272744616 <nl> + theta : 1 . 2795625338244667 <nl> + kappa : 0 . 023087812070680253 <nl> + s : 1 . 0479445109786749 <nl> + dkappa : 0 . 022002687302801093 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 4443169534977378 <nl> - a : 1 . 0521494599673813 <nl> + v : 1 . 4443169534977394 <nl> + a : 1 . 0521494599673817 <nl> relative_time : 1 . 8000000000000012 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 58247885713 <nl> - y : 4140852 . 3690222651 <nl> - theta : 1 . 2699026278524936 <nl> - kappa : 0 . 0061333078306557223 <nl> - s : 1 . 19762561167304 <nl> - dkappa : 0 . 0050722932484674093 <nl> + x : 587581 . 57544958277 <nl> + y : 4140852 . 3711610814 <nl> + theta : 1 . 2837767809311149 <nl> + kappa : 0 . 024418009369205865 <nl> + s : 1 . 1976256116730415 <nl> + dkappa : 0 . 0199452040567536 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 5491856063531442 <nl> + v : 1 . 5491856063531459 <nl> a : 1 . 0449966510434219 <nl> relative_time : 1 . 9000000000000012 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 62963468989 <nl> - y : 4140852 . 5220517991 <nl> - theta : 1 . 2710936651288955 <nl> - kappa : 0 . 006749068999854245 <nl> - s : 1 . 3577559049789221 <nl> - dkappa : 0 . 0048283478710572942 <nl> + x : 587581 . 61957287765 <nl> + y : 4140852 . 5250923838 <nl> + theta : 1 . 2882852233605222 <nl> + kappa : 0 . 025841067340983688 <nl> + s : 1 . 357755904978923 <nl> + dkappa : 0 . 017744088524243879 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 653285327749662 <nl> - a : 1 . 0369151634166218 <nl> + v : 1 . 6532853277496613 <nl> + a : 1 . 0369151634166214 <nl> relative_time : 2 . 0000000000000013 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 67984373448 <nl> - y : 4140852 . 684989579 <nl> - theta : 1 . 2723618188257584 <nl> - kappa : 0 . 0074046990280455648 <nl> - s : 1 . 528254198742834 <nl> - dkappa : 0 . 0045686076947212134 <nl> + x : 587581 . 66655303561 <nl> + y : 4140852 . 6889903182 <nl> + theta : 1 . 2930855751607808 <nl> + kappa : 0 . 027356264441458493 <nl> + s : 1 . 5282541987428346 <nl> + dkappa : 0 . 015400456754624384 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 756522045722825 <nl> - a : 1 . 0274085629705372 <nl> + v : 1 . 7565220457228243 <nl> + a : 1 . 0274085629705367 <nl> relative_time : 2 . 1000000000000014 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 73307775566 <nl> - y : 4140852 . 8577439757 <nl> - theta : 1 . 2737063757925329 <nl> - kappa : 0 . 0080998292194501675 <nl> - s : 1 . 7090246126685389 <nl> - dkappa : 0 . 0042932187852320371 <nl> + x : 587581 . 71636363748 <nl> + y : 4140852 . 8627627166 <nl> + theta : 1 . 2981751368377052 <nl> + kappa : 0 . 028962748594379872 <nl> + s : 1 . 7090246126685396 <nl> + dkappa : 0 . 012915626697203475 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 858688421627279 <nl> - a : 1 . 0155434010297297 <nl> + v : 1 . 8586884216272781 <nl> + a : 1 . 0155434010297295 <nl> relative_time : 2 . 2000000000000015 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 78930186387 <nl> - y : 4140853 . 0402017664 <nl> - theta : 1 . 2751264548118248 <nl> - kappa : 0 . 0088340039884508871 <nl> - s : 1 . 8999486705315654 <nl> - dkappa : 0 . 0040023616314073156 <nl> + x : 587581 . 76897203736 <nl> + y : 4140853 . 0462956894 <nl> + theta : 1 . 3035505727124157 <nl> + kappa : 0 . 030659466916297607 <nl> + s : 1 . 899948670531566 <nl> + dkappa : 0 . 010291226899912546 <nl> ddkappa : 0 <nl> } <nl> - v : 1 . 959559123009738 <nl> - a : 1 . 0015301515179336 <nl> + v : 1 . 9595591230097373 <nl> + a : 1 . 0015301515179333 <nl> relative_time : 2 . 3000000000000016 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 8476185539 <nl> - y : 4140853 . 2324887863 <nl> - theta : 1 . 2769293400445996 <nl> - kappa : 0 . 0094728417926149763 <nl> - s : 2 . 1008864152317317 <nl> - dkappa : 0 . 0037117115480208 <nl> + x : 587581 . 82141216646 <nl> + y : 4140853 . 2402454214 <nl> + theta : 1 . 3095340481741014 <nl> + kappa : 0 . 031396242092779733 <nl> + s : 2 . 1008864152317326 <nl> + dkappa : 0 . 0078386385692524579 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 0589298648092909 <nl> - a : 0 . 98557928835888275 <nl> + v : 2 . 05892986480929 <nl> + a : 0 . 98557928835888242 <nl> relative_time : 2 . 4000000000000017 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 907849575 <nl> - y : 4140853 . 4344925806 <nl> - theta : 1 . 2791611343643119 <nl> - kappa : 0 . 009995202313672789 <nl> - s : 2 . 3116785135323843 <nl> - dkappa : 0 . 0034238826964347272 <nl> + x : 587581 . 87324744323 <nl> + y : 4140853 . 4445648105 <nl> + theta : 1 . 3161648191017576 <nl> + kappa : 0 . 031030960507728696 <nl> + s : 2 . 3116785135323847 <nl> + dkappa : 0 . 0056015606190943611 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 1566174093573975 <nl> - a : 0 . 96790128547631138 <nl> + v : 2 . 1566174093573967 <nl> + a : 0 . 96790128547631116 <nl> relative_time : 2 . 5000000000000018 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587581 . 97084588336 <nl> - y : 4140853 . 6457706406 <nl> - theta : 1 . 2814953933653168 <nl> - kappa : 0 . 010541545109368087 <nl> - s : 2 . 5321483607996331 <nl> - dkappa : 0 . 0031228392348445324 <nl> + x : 587581 . 92746254732 <nl> + y : 4140853 . 6582647781 <nl> + theta : 1 . 3231000176479544 <nl> + kappa : 0 . 030648908353426048 <nl> + s : 2 . 5321483607996336 <nl> + dkappa : 0 . 0032617754090585264 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 2524595663778935 <nl> - a : 0 . 94870661679395363 <nl> + v : 2 . 2524595663778926 <nl> + a : 0 . 94870661679395352 <nl> relative_time : 2 . 6000000000000019 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 03655268275 <nl> - y : 4140853 . 8661391907 <nl> - theta : 1 . 2839300866390575 <nl> - kappa : 0 . 011111394954335397 <nl> + x : 587581 . 98401032086 <nl> + y : 4140853 . 8811594406 <nl> + theta : 1 . 3303336113687951 <nl> + kappa : 0 . 03025041795030519 <nl> s : 2 . 7621041857415918 <nl> - dkappa : 0 . 0028088430198907428 <nl> + dkappa : 0 . 0008213181545634299 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 346315192986987 <nl> - a : 0 . 9282057562355438 <nl> + v : 2 . 3463151929869857 <nl> + a : 0 . 92820575623554358 <nl> relative_time : 2 . 700000000000002 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 104911445 <nl> - y : 4140854 . 0954019376 <nl> - theta : 1 . 2864630454818702 <nl> - kappa : 0 . 011704244254677669 <nl> + x : 587582 . 042840394 <nl> + y : 4140854 . 1130502555 <nl> + theta : 1 . 3378591569387979 <nl> + kappa : 0 . 029835844253795964 <nl> s : 3 . 0013411551476121 <nl> - dkappa : 0 . 0024821737437831851 <nl> + dkappa : - 0 . 0017176373064591781 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 4380641936932586 <nl> - a : 0 . 90660917772481575 <nl> + v : 2 . 4380641936932577 <nl> + a : 0 . 90660917772481553 <nl> relative_time : 2 . 800000000000002 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 17312661046 <nl> - y : 4140854 . 3341499972 <nl> - theta : 1 . 2895493752488756 <nl> - kappa : 0 . 012022127136315272 <nl> + x : 587582 . 09687547025 <nl> + y : 4140854 . 3554011388 <nl> + theta : 1 . 3442732007490097 <nl> + kappa : 0 . 02760715508620477 <nl> s : 3 . 2496434786275232 <nl> - dkappa : 0 . 0021808474093225894 <nl> + dkappa : - 0 . 0035297024754136343 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 5276075203976638 <nl> - a : 0 . 88412735518550378 <nl> + v : 2 . 5276075203976629 <nl> + a : 0 . 88412735518550367 <nl> relative_time : 2 . 9000000000000021 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 24373598234 <nl> - y : 4140854 . 5814086813 <nl> - theta : 1 . 2927513761099156 <nl> - kappa : 0 . 012347567244708448 <nl> + x : 587582 . 15279206808 <nl> + y : 4140854 . 6063909284 <nl> + theta : 1 . 3509071880789119 <nl> + kappa : 0 . 025288264459567639 <nl> s : 3 . 5067865133508693 <nl> - dkappa : 0 . 0018692694319618129 <nl> + dkappa : - 0 . 0054013197791198319 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 6148671723935308 <nl> - a : 0 . 86097076254134208 <nl> + v : 2 . 61486717239353 <nl> + a : 0 . 860970762541342 <nl> relative_time : 3 . 0000000000000022 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 31670940307 <nl> - y : 4140854 . 8369457521 <nl> - theta : 1 . 2960605821004361 <nl> - kappa : 0 . 012683903305499076 <nl> - s : 3 . 7725388687861443 <nl> - dkappa : 0 . 0015472596156178846 <nl> + x : 587582 . 21058079088 <nl> + y : 4140854 . 8657840239 <nl> + theta : 1 . 3577632857396802 <nl> + kappa : 0 . 022891735824488703 <nl> + s : 3 . 7725388687861439 <nl> + dkappa : - 0 . 0073356000813977681 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 6997861963665617 <nl> - a : 0 . 83734987371606473 <nl> + v : 2 . 6997861963665608 <nl> + a : 0 . 83734987371606451 <nl> relative_time : 3 . 1000000000000023 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 391464221 <nl> - y : 4140855 . 1006785207 <nl> - theta : 1 . 2995022658665665 <nl> - kappa : 0 . 012986715532009196 <nl> + x : 587582 . 26932754542 <nl> + y : 4140855 . 1335329833 <nl> + theta : 1 . 3643498618386118 <nl> + kappa : 0 . 020253728673046874 <nl> s : 4 . 0466645114400315 <nl> - dkappa : 0 . 0012214967489135426 <nl> + dkappa : - 0 . 00918357496930146 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 7823286863948313 <nl> - a : 0 . 81347516263340591 <nl> + v : 2 . 7823286863948304 <nl> + a : 0 . 8134751626334058 <nl> relative_time : 3 . 2000000000000024 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 46546322049 <nl> - y : 4140855 . 3730662186 <nl> - theta : 1 . 3032081147615306 <nl> - kappa : 0 . 013045107082675547 <nl> + x : 587582 . 32504167291 <nl> + y : 4140855 . 4102401314 <nl> + theta : 1 . 3684441200461865 <nl> + kappa : 0 . 016618697950961679 <nl> s : 4 . 3289248695966425 <nl> - dkappa : 0 . 00092278342348382661 <nl> + dkappa : - 0 . 01027126729844053 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 8624797839487881 <nl> - a : 0 . 7895571032170996 <nl> + v : 2 . 8624797839487872 <nl> + a : 0 . 78955710321709949 <nl> relative_time : 3 . 3000000000000025 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 54153220484 <nl> - y : 4140855 . 6530734571 <nl> - theta : 1 . 3070176279099313 <nl> - kappa : 0 . 013105132028733457 <nl> - s : 4 . 6190809380567481 <nl> - dkappa : 0 . 00061571414648942807 <nl> + x : 587582 . 38231429982 <nl> + y : 4140855 . 6946876487 <nl> + theta : 1 . 3726529075377378 <nl> + kappa : 0 . 012881983982552653 <nl> + s : 4 . 619080938056749 <nl> + dkappa : - 0 . 011389385804270876 <nl> ddkappa : 0 <nl> } <nl> - v : 2 . 9402456778912542 <nl> - a : 0 . 7658061693908802 <nl> + v : 2 . 9402456778912534 <nl> + a : 0 . 76580616939088 <nl> relative_time : 3 . 4000000000000026 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 61960895325 <nl> - y : 4140855 . 940471204 <nl> - theta : 1 . 3109276893101551 <nl> - kappa : 0 . 013166741272615466 <nl> - s : 4 . 9168953828770263 <nl> - dkappa : 0 . 0003005400860784713 <nl> + x : 587582 . 44109857979 <nl> + y : 4140855 . 9866428711 <nl> + theta : 1 . 3769727817241204 <nl> + kappa : 0 . 0090466432231471432 <nl> + s : 4 . 9168953828770245 <nl> + dkappa : - 0 . 012537015918727449 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 0156536044774249 <nl> - a : 0 . 74243283507848168 <nl> + v : 3 . 015653604477424 <nl> + a : 0 . 74243283507848146 <nl> relative_time : 3 . 5000000000000027 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 696883247 <nl> - y : 4140856 . 2357615512 <nl> - theta : 1 . 3148888466328108 <nl> - kappa : 0 . 013035062544981577 <nl> - s : 5 . 2221346461092848 <nl> - dkappa : 1 . 217842713300223e - 05 <nl> + x : 587582 . 49967818032 <nl> + y : 4140856 . 2862063097 <nl> + theta : 1 . 3784936306542459 <nl> + kappa : 0 . 0049922075714686331 <nl> + s : 5 . 2221346461092839 <nl> + dkappa : - 0 . 01300145244020684 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 0887518473548692 <nl> - a : 0 . 71964757420363812 <nl> + v : 3 . 0887518473548683 <nl> + a : 0 . 7196475742036379 <nl> relative_time : 3 . 6000000000000028 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 77483044507 <nl> - y : 4140856 . 5383185558 <nl> - theta : 1 . 3189240127756756 <nl> - kappa : 0 . 012818839389906252 <nl> - s : 5 . 5345710505397161 <nl> - dkappa : - 0 . 0002684897857994708 <nl> + x : 587582 . 55894024542 <nl> + y : 4140856 . 5929709012 <nl> + theta : 1 . 3788342692545275 <nl> + kappa : 0 . 0007905157137017239 <nl> + s : 5 . 5345710505397143 <nl> + dkappa : - 0 . 013179045803355143 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 159609737563529 <nl> - a : 0 . 69766086069008382 <nl> + v : 3 . 1596097375635281 <nl> + a : 0 . 69766086069008371 <nl> relative_time : 3 . 7000000000000028 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 85451838991 <nl> - y : 4140856 . 8476323797 <nl> - theta : 1 . 3230492937944538 <nl> - kappa : 0 . 012597787456510643 <nl> - s : 5 . 8539849044281187 <nl> - dkappa : - 0 . 000555425988769038 <nl> + x : 587582 . 61952577368 <nl> + y : 4140856 . 9065862764 <nl> + theta : 1 . 3791825151265087 <nl> + kappa : - 0 . 0035050099360299418 <nl> + s : 5 . 8539849044281169 <nl> + dkappa : - 0 . 013360605249513838 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 2283176535357203 <nl> + v : 3 . 2283176535357194 <nl> a : 0 . 67668316846155274 <nl> relative_time : 3 . 8000000000000029 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587582 . 93379466853 <nl> - y : 4140857 . 1640271931 <nl> - theta : 1 . 3270756961221624 <nl> - kappa : 0 . 012244106943825871 <nl> - s : 6 . 180166606247143 <nl> - dkappa : - 0 . 000819946364169013 <nl> + x : 587582 . 68229892012 <nl> + y : 4140857 . 226669509 <nl> + theta : 1 . 3773972497632858 <nl> + kappa : - 0 . 0074403299417536923 <nl> + s : 6 . 1801666062471412 <nl> + dkappa : - 0 . 012997784523823028 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 2949870210961318 <nl> - a : 0 . 65692497144177919 <nl> + v : 3 . 294987021096131 <nl> + a : 0 . 6569249714417793 <nl> relative_time : 3 . 900000000000003 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 012796099 <nl> - y : 4140857 . 4872651286 <nl> - theta : 1 . 3310171749261732 <nl> - kappa : 0 . 011769270188902484 <nl> + x : 587582 . 74716145755 <nl> + y : 4140857 . 5530386865 <nl> + theta : 1 . 3736222232449187 <nl> + kappa : - 0 . 011043132129098972 <nl> s : 6 . 5129187494215266 <nl> - dkappa : - 0 . 0010643982382162918 <nl> + dkappa : - 0 . 012127337246508541 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 3597503134618263 <nl> - a : 0 . 6385967435544978 <nl> + v : 3 . 3597503134618241 <nl> + a : 0 . 63859674355449736 <nl> relative_time : 4 . 0000000000000027 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 0933137174 <nl> - y : 4140857 . 8167066155 <nl> - theta : 1 . 3350342982195045 <nl> - kappa : 0 . 011285320411780474 <nl> + x : 587582 . 81326883053 <nl> + y : 4140857 . 8856715104 <nl> + theta : 1 . 3697747467737926 <nl> + kappa : - 0 . 014715078956091653 <nl> s : 6 . 852057040331835 <nl> - dkappa : - 0 . 0013135416095657276 <nl> + dkappa : - 0 . 011240184427176407 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 4227136836820296 <nl> - a : 0 . 62049302491051717 <nl> + v : 3 . 4227136836820273 <nl> + a : 0 . 62049302491051672 <nl> relative_time : 4 . 1000000000000023 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 17325862823 <nl> - y : 4140858 . 1526629454 <nl> - theta : 1 . 3387947103008191 <nl> - kappa : 0 . 010684255307531797 <nl> - s : 7 . 1973995119389551 <nl> - dkappa : - 0 . 0015351577092144132 <nl> + x : 587582 . 883517551 <nl> + y : 4140858 . 223782429 <nl> + theta : 1 . 3643273286709157 <nl> + kappa : - 0 . 017344589541506816 <nl> + s : 7 . 1973995119389542 <nl> + dkappa : - 0 . 0097192463626888815 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 483818936352185 <nl> - a : 0 . 60148984351420542 <nl> + v : 3 . 4838189363521828 <nl> + a : 0 . 60148984351420509 <nl> relative_time : 4 . 200000000000002 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 252912073 <nl> - y : 4140858 . 4948719433 <nl> - theta : 1 . 3423488312225871 <nl> - kappa : 0 . 0099836136914084467 <nl> - s : 7 . 5487564096837261 <nl> - dkappa : - 0 . 0017342247827788946 <nl> + x : 587582 . 957500908 <nl> + y : 4140858 . 5672618784 <nl> + theta : 1 . 3574749983144612 <nl> + kappa : - 0 . 019069530987992061 <nl> + s : 7 . 5487564096837252 <nl> + dkappa : - 0 . 00764289305384027 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 5429927604852618 <nl> - a : 0 . 58191990512963387 <nl> + v : 3 . 5429927604852596 <nl> + a : 0 . 58191990512963343 <nl> relative_time : 4 . 3000000000000016 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 3338847 <nl> - y : 4140858 . 8427484427 <nl> - theta : 1 . 3459618137862464 <nl> - kappa : 0 . 0092713683857062381 <nl> - s : 7 . 9059323114371081 <nl> - dkappa : - 0 . 001936588709504618 <nl> + x : 587583 . 032709542 <nl> + y : 4140858 . 91642987 <nl> + theta : 1 . 3505091829572322 <nl> + kappa : - 0 . 020823040085364886 <nl> + s : 7 . 9059323114371063 <nl> + dkappa : - 0 . 0055321521797978852 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 6001951156706382 <nl> - a : 0 . 56211591552087325 <nl> + v : 3 . 600195115670636 <nl> + a : 0 . 56211591552087292 <nl> relative_time : 4 . 4000000000000012 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 413826052 <nl> - y : 4140859 . 1966255037 <nl> - theta : 1 . 3490547327214371 <nl> - kappa : 0 . 0084429079065216472 <nl> - s : 8 . 2687294545578141 <nl> - dkappa : - 0 . 0020990177615237324 <nl> + x : 587583 . 11426697322 <nl> + y : 4140859 . 2699248074 <nl> + theta : 1 . 343292608654342 <nl> + kappa : - 0 . 020209413152349993 <nl> + s : 8 . 2687294545578123 <nl> + dkappa : - 0 . 0025158160000442135 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 6554192320740988 <nl> - a : 0 . 54241058045199519 <nl> + v : 3 . 6554192320740966 <nl> + a : 0 . 54241058045199486 <nl> relative_time : 4 . 5000000000000009 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 49405351072 <nl> - y : 4140859 . 5560009452 <nl> - theta : 1 . 3519664018566155 <nl> - kappa : 0 . 0075606542924139212 <nl> - s : 8 . 63695106294996 <nl> - dkappa : - 0 . 0022468723552201012 <nl> + x : 587583 . 19919010426 <nl> + y : 4140859 . 628219679 <nl> + theta : 1 . 3359094898517361 <nl> + kappa : - 0 . 018591532486941374 <nl> + s : 8 . 636951062949958 <nl> + dkappa : 0 . 00090811676465488047 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 7086916104378354 <nl> - a : 0 . 52313660568707065 <nl> + v : 3 . 7086916104378331 <nl> + a : 0 . 52313660568707032 <nl> relative_time : 4 . 6000000000000005 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 57540726382 <nl> - y : 4140859 . 920485694 <nl> - theta : 1 . 3549140420112895 <nl> - kappa : 0 . 0066653676038527735 <nl> - s : 9 . 0104046741207 <nl> - dkappa : - 0 . 0023964821730706725 <nl> + x : 587583 . 28531989467 <nl> + y : 4140859 . 9916055053 <nl> + theta : 1 . 3284214654781108 <nl> + kappa : - 0 . 016950663610238434 <nl> + s : 9 . 0104046741206982 <nl> + dkappa : 0 . 0043806996501789021 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 7600720220804469 <nl> - a : 0 . 50462669699017071 <nl> + v : 3 . 7600720220804442 <nl> + a : 0 . 50462669699017038 <nl> relative_time : 4 . 7 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 655412396 <nl> - y : 4140860 . 2904343773 <nl> - theta : 1 . 3569326098917047 <nl> - kappa : 0 . 0056687639624860052 <nl> - s : 9 . 38890546623787 <nl> - dkappa : - 0 . 0024861053611602004 <nl> + x : 587583 . 37843750394 <nl> + y : 4140860 . 3584730923 <nl> + theta : 1 . 32475819567696 <nl> + kappa : - 0 . 010518105326581 <nl> + s : 9 . 3889054662378673 <nl> + dkappa : 0 . 00915705485806078 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 8096535088969392 <nl> - a : 0 . 48721356012536648 <nl> + v : 3 . 8096535088969365 <nl> + a : 0 . 48721356012536626 <nl> relative_time : 4 . 8 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 73644762137 <nl> - y : 4140860 . 6651462764 <nl> - theta : 1 . 3589771675222604 <nl> - kappa : 0 . 0046593287092228566 <nl> - s : 9 . 7722795851876221 <nl> - dkappa : - 0 . 00257688247837335 <nl> + x : 587583 . 47277602018 <nl> + y : 4140860 . 7300588288 <nl> + theta : 1 . 3210625813215433 <nl> + kappa : - 0 . 0039847197557369039 <nl> + s : 9 . 77227958518762 <nl> + dkappa : 0 . 013999652104185644 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 8575623833587254 <nl> - a : 0 . 47122990085672933 <nl> + v : 3 . 8575623833587227 <nl> + a : 0 . 47122990085672906 <nl> relative_time : 4 . 8999999999999995 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 81788370141 <nl> - y : 4140861 . 0445929654 <nl> - theta : 1 . 3606476985000633 <nl> - kappa : 0 . 00362604757206703 <nl> - s : 10 . 160367471632075 <nl> - dkappa : - 0 . 0026444493043456981 <nl> + x : 587583 . 5685053817 <nl> + y : 4140861 . 1061545671 <nl> + theta : 1 . 3194753094597038 <nl> + kappa : 0 . 0013905391546700429 <nl> + s : 10 . 160367471632073 <nl> + dkappa : 0 . 01580371819780059 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 9039582285136256 <nl> - a : 0 . 45700842494833027 <nl> + v : 3 . 903958228513623 <nl> + a : 0 . 45700842494833005 <nl> relative_time : 4 . 9999999999999991 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 89933274663 <nl> - y : 4140861 . 4287123559 <nl> - theta : 1 . 3617035578184868 <nl> - kappa : 0 . 0025624225022432097 <nl> - s : 10 . 553027188066947 <nl> - dkappa : - 0 . 0026741526742133839 <nl> + x : 587583 . 66574458941 <nl> + y : 4140861 . 4865835211 <nl> + theta : 1 . 3214335043985468 <nl> + kappa : 0 . 004779670664219195 <nl> + s : 10 . 553027188066944 <nl> + dkappa : 0 . 012502219787096161 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 9490338979858679 <nl> - a : 0 . 44488183816424043 <nl> + v : 3 . 9490338979858652 <nl> + a : 0 . 44488183816424021 <nl> relative_time : 5 . 0999999999999988 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587583 . 981705026 <nl> - y : 4140861 . 8171857819 <nl> - theta : 1 . 3627713854192329 <nl> - kappa : 0 . 0014867411244441858 <nl> - s : 10 . 950137745879204 <nl> - dkappa : - 0 . 0027041927350685106 <nl> + x : 587583 . 76408601459 <nl> + y : 4140861 . 87132468 <nl> + theta : 1 . 3234138956939141 <nl> + kappa : 0 . 0082072183548145411 <nl> + s : 10 . 950137745879202 <nl> + dkappa : 0 . 0091632985262273241 <nl> ddkappa : 0 <nl> } <nl> - v : 3 . 9930155159760861 <nl> - a : 0 . 43518284626853104 <nl> + v : 3 . 9930155159760834 <nl> + a : 0 . 43518284626853082 <nl> relative_time : 5 . 1999999999999984 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 064541297 <nl> - y : 4140862 . 2100114571 <nl> - theta : 1 . 3629357691241575 <nl> - kappa : 0 . 000428060510609213 <nl> - s : 11 . 351602432404691 <nl> - dkappa : - 0 . 0026800219997023169 <nl> + x : 587583 . 86079206434 <nl> + y : 4140862 . 2609660141 <nl> + theta : 1 . 3273201515340585 <nl> + kappa : 0 . 00972427363996958 <nl> + s : 11 . 351602432404688 <nl> + dkappa : 0 . 0066844089797860112 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 036162477261322 <nl> - a : 0 . 42824415502527319 <nl> + v : 4 . 0361624772613194 <nl> + a : 0 . 428244155025273 <nl> relative_time : 5 . 299999999999998 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 148182935 <nl> - y : 4140862 . 6070465939 <nl> - theta : 1 . 3629377418417743 <nl> - kappa : - 0 . 00063675445371953016 <nl> - s : 11 . 757352137985775 <nl> - dkappa : - 0 . 0026458095325366405 <nl> + x : 587583 . 95801709231 <nl> + y : 4140862 . 654895091 <nl> + theta : 1 . 3316282180446661 <nl> + kappa : 0 . 010889096887748818 <nl> + s : 11 . 757352137985771 <nl> + dkappa : 0 . 0043486360092485606 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 078767447195025 <nl> - a : 0 . 42439847019853805 <nl> + v : 4 . 0787674471950224 <nl> + a : 0 . 42439847019853782 <nl> relative_time : 5 . 3999999999999977 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 23289336683 <nl> - y : 4140863 . 0081964978 <nl> - theta : 1 . 3625488313080742 <nl> - kappa : - 0 . 0016751992813705228 <nl> - s : 12 . 167348683028994 <nl> - dkappa : - 0 . 0025868458495414341 <nl> + x : 587584 . 05457968242 <nl> + y : 4140863 . 0533519043 <nl> + theta : 1 . 3360724958058459 <nl> + kappa : 0 . 011404199304584509 <nl> + s : 12 . 16734868302899 <nl> + dkappa : 0 . 0022092239691343194 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 1211563617070492 <nl> - a : 0 . 42397849755239675 <nl> + v : 4 . 1211563617070466 <nl> + a : 0 . 42397849755239664 <nl> relative_time : 5 . 4999999999999973 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 31880156 <nl> - y : 4140863 . 4134299047 <nl> - theta : 1 . 3615066589654479 <nl> - kappa : - 0 . 002662083120787466 <nl> - s : 12 . 581588145062694 <nl> - dkappa : - 0 . 0024867583755900069 <nl> + x : 587584 . 14929861366 <nl> + y : 4140863 . 4566168985 <nl> + theta : 1 . 3407169573302458 <nl> + kappa : 0 . 010804560060950229 <nl> + s : 12 . 581588145062691 <nl> + dkappa : 0 . 00042131883316248004 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 16368842730366 <nl> - a : 0 . 42731694285092037 <nl> + v : 4 . 163688427303657 <nl> + a : 0 . 42731694285092026 <nl> relative_time : 5 . 599999999999997 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 40559666313 <nl> - y : 4140863 . 8228469128 <nl> - theta : 1 . 3604537273097952 <nl> - kappa : - 0 . 0036591554786632504 <nl> - s : 13 . 000104185794658 <nl> - dkappa : - 0 . 0023856376056870972 <nl> + x : 587584 . 24499541626 <nl> + y : 4140863 . 8640451725 <nl> + theta : 1 . 3454093679445815 <nl> + kappa : 0 . 010198730184479136 <nl> + s : 13 . 000104185794655 <nl> + dkappa : - 0 . 0013850445080741945 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 2067561210675244 <nl> - a : 0 . 43474651185818025 <nl> + v : 4 . 2067561210675217 <nl> + a : 0 . 4347465118581802 <nl> relative_time : 5 . 6999999999999966 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 4947604374 <nl> - y : 4140864 . 2362068174 <nl> - theta : 1 . 3584735511366404 <nl> - kappa : - 0 . 0045014570215952034 <nl> - s : 13 . 422971378169768 <nl> - dkappa : - 0 . 0022174397696864655 <nl> + x : 587584 . 33772846183 <nl> + y : 4140864 . 2766183894 <nl> + theta : 1 . 3489064903385 <nl> + kappa : 0 . 0084147823804515149 <nl> + s : 13 . 422971378169763 <nl> + dkappa : - 0 . 0026184806547686 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 2507851906577212 <nl> + v : 4 . 2507851906577194 <nl> a : 0 . 44659991033824731 <nl> relative_time : 5 . 7999999999999963 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 58491522761 <nl> - y : 4140864 . 6539258109 <nl> - theta : 1 . 3564421347788134 <nl> - kappa : - 0 . 0053471999404675775 <nl> - s : 13 . 85030853342762 <nl> - dkappa : - 0 . 0020452800362010326 <nl> + x : 587584 . 43129984871 <nl> + y : 4140864 . 6935853423 <nl> + theta : 1 . 3523959842765869 <nl> + kappa : 0 . 0065699720291120048 <nl> + s : 13 . 850308533427615 <nl> + dkappa : - 0 . 0038437445810443407 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 2962346543097363 <nl> - a : 0 . 463209844055193 <nl> + v : 4 . 2962346543097336 <nl> + a : 0 . 46320984405519294 <nl> relative_time : 5 . 8999999999999959 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 677520419 <nl> - y : 4140865 . 0758546968 <nl> - theta : 1 . 3539415006140576 <nl> - kappa : - 0 . 0060505835392353413 <nl> - s : 14 . 282282028160195 <nl> - dkappa : - 0 . 0018276956699219723 <nl> + x : 587584 . 52438922261 <nl> + y : 4140865 . 1154075968 <nl> + theta : 1 . 3545583668677068 <nl> + kappa : 0 . 004324243823217739 <nl> + s : 14 . 282282028160187 <nl> + dkappa : - 0 . 0046975275521624347 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 3435968008354582 <nl> + v : 4 . 3435968008354555 <nl> a : 0 . 48490901877308817 <nl> relative_time : 5 . 9999999999999956 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 77207972156 <nl> - y : 4140865 . 5023244573 <nl> - theta : 1 . 3511352772078871 <nl> - kappa : - 0 . 0066678365523607248 <nl> - s : 14 . 719109131369468 <nl> - dkappa : - 0 . 0015806377672014603 <nl> + x : 587584 . 61758831074 <nl> + y : 4140865 . 5421766778 <nl> + theta : 1 . 3558915054217682 <nl> + kappa : 0 . 0018150974823284908 <nl> + s : 14 . 719109131369462 <nl> + dkappa : - 0 . 0053202972642109794 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 3933971896231876 <nl> + v : 4 . 3933971896231849 <nl> a : 0 . 51203014025600424 <nl> relative_time : 6 . 0999999999999952 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 868747397 <nl> - y : 4140865 . 9335726155 <nl> - theta : 1 . 3481413979590915 <nl> - kappa : - 0 . 0071859320973362189 <nl> - s : 15 . 161061331525078 <nl> - dkappa : - 0 . 0013067069450836729 <nl> + x : 587584 . 71186570683 <nl> + y : 4140865 . 9739561575 <nl> + theta : 1 . 3563908728707696 <nl> + kappa : - 0 . 00072342731880143437 <nl> + s : 15 . 161061331525072 <nl> + dkappa : - 0 . 0057439660613017376 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 4461946506376284 <nl> + v : 4 . 4461946506376258 <nl> a : 0 . 54490591426801194 <nl> relative_time : 6 . 1999999999999948 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587584 . 96866607177 <nl> - y : 4140866 . 3696788922 <nl> - theta : 1 . 3447917954472537 <nl> - kappa : - 0 . 0074912190245708526 <nl> - s : 15 . 608467663621944 <nl> - dkappa : - 0 . 00098000406342126046 <nl> + x : 587584 . 80727507558 <nl> + y : 4140866 . 4110711059 <nl> + theta : 1 . 3551306317391747 <nl> + kappa : - 0 . 0032931548442878219 <nl> + s : 15 . 608467663621939 <nl> + dkappa : - 0 . 0057437800557198614 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 5025812844198949 <nl> - a : 0 . 58386904657318284 <nl> + v : 4 . 5025812844198922 <nl> + a : 0 . 583869046573183 <nl> relative_time : 6 . 2999999999999945 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 07023227459 <nl> - y : 4140866 . 8114017718 <nl> - theta : 1 . 341387205021964 <nl> - kappa : - 0 . 0077596250376189817 <nl> - s : 16 . 061718036237924 <nl> - dkappa : - 0 . 00064151133519184324 <nl> + x : 587584 . 90417091618 <nl> + y : 4140866 . 8538425365 <nl> + theta : 1 . 3536249998655476 <nl> + kappa : - 0 . 0058329357071945104 <nl> + s : 16 . 061718036237917 <nl> + dkappa : - 0 . 005680164119407422 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 5631824620875054 <nl> - a : 0 . 62925224293558812 <nl> + v : 4 . 5631824620875028 <nl> + a : 0 . 62925224293558824 <nl> relative_time : 6 . 3999999999999941 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 176311428 <nl> - y : 4140867 . 2585393987 <nl> - theta : 1 . 3378335271158566 <nl> - kappa : - 0 . 0076615369931180141 <nl> - s : 16 . 521266558591446 <nl> - dkappa : - 0 . 00023017017950626456 <nl> + x : 587585 . 00465398328 <nl> + y : 4140867 . 3022709005 <nl> + theta : 1 . 349963053524089 <nl> + kappa : - 0 . 0078155798921480321 <nl> + s : 16 . 521266558591439 <nl> + dkappa : - 0 . 0050240292907682976 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 6286568253343887 <nl> - a : 0 . 68138820911929843 <nl> + v : 4 . 628656825334386 <nl> + a : 0 . 68138820911929854 <nl> relative_time : 6 . 4999999999999938 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 28396481613 <nl> - y : 4140867 . 7123126327 <nl> - theta : 1 . 3342271119600824 <nl> - kappa : - 0 . 0075619933035561155 <nl> - s : 16 . 987634867599144 <nl> - dkappa : 0 . 00018727535642950246 <nl> + x : 587585 . 10662823787 <nl> + y : 4140867 . 7573540257 <nl> + theta : 1 . 3462467632089838 <nl> + kappa : - 0 . 0098276468903515657 <nl> + s : 16 . 987634867599137 <nl> + dkappa : - 0 . 0043581572976513581 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 6996962864308776 <nl> - a : 0 . 74060965088838548 <nl> + v : 4 . 6996962864308749 <nl> + a : 0 . 7406096508883856 <nl> relative_time : 6 . 5999999999999934 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 39655577275 <nl> - y : 4140868 . 1725197565 <nl> - theta : 1 . 3308868379916738 <nl> - kappa : - 0 . 0069895973108099708 <nl> + x : 587585 . 21442086319 <nl> + y : 4140868 . 218708022 <nl> + theta : 1 . 3411396445044268 <nl> + kappa : - 0 . 010600181730391216 <nl> s : 17 . 461415454933519 <nl> - dkappa : 0 . 00068454504613294832 <nl> + dkappa : - 0 . 0030434602348514577 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 7770260282237142 <nl> - a : 0 . 80724927400692015 <nl> + v : 4 . 7770260282237107 <nl> + a : 0 . 80724927400692026 <nl> relative_time : 6 . 6999999999999931 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 51128245785 <nl> - y : 4140868 . 6405223235 <nl> - theta : 1 . 3275112407625809 <nl> - kappa : - 0 . 0063759187278310215 <nl> - s : 17 . 943274994080561 <nl> - dkappa : 0 . 0011951896772660539 <nl> + x : 587585 . 32434733841 <nl> + y : 4140868 . 6878612866 <nl> + theta : 1 . 3358516043882229 <nl> + kappa : - 0 . 011296300846560074 <nl> + s : 17 . 943274994080554 <nl> + dkappa : - 0 . 0016613751149803348 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 8614045041360452 <nl> - a : 0 . 88163978423897338 <nl> + v : 4 . 8614045041360425 <nl> + a : 0 . 88163978423897382 <nl> relative_time : 6 . 7999999999999927 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 6306093426 <nl> - y : 4140869 . 1164733754 <nl> - theta : 1 . 324838298685199 <nl> - kappa : - 0 . 0052415084723861074 <nl> - s : 18 . 433957667397387 <nl> - dkappa : 0 . 0017828994765881443 <nl> + x : 587585 . 44074628956 <nl> + y : 4140869 . 1645340691 <nl> + theta : 1 . 3306983525960607 <nl> + kappa : - 0 . 010211290979992071 <nl> + s : 18 . 43395766739738 <nl> + dkappa : 0 . 00034863022324389294 <nl> ddkappa : 0 <nl> } <nl> - v : 4 . 9536234381674271 <nl> - a : 0 . 9641138873486168 <nl> + v : 4 . 9536234381674245 <nl> + a : 0 . 96411388734861725 <nl> relative_time : 6 . 8999999999999924 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 75273481756 <nl> - y : 4140869 . 6016705558 <nl> - theta : 1 . 3222511346733885 <nl> - kappa : - 0 . 0039925958510360618 <nl> - s : 18 . 934288493169902 <nl> - dkappa : 0 . 002394418757545899 <nl> + x : 587585 . 56026166072 <nl> + y : 4140869 . 6503807642 <nl> + theta : 1 . 3254867611289214 <nl> + kappa : - 0 . 00877200824398005 <nl> + s : 18 . 934288493169895 <nl> + dkappa : 0 . 0025100012142177603 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 0545078248938236 <nl> - a : 1 . 0550042890999214 <nl> + v : 5 . 0545078248938209 <nl> + a : 1 . 0550042890999216 <nl> relative_time : 6 . 999999999999992 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587585 . 87895925622 <nl> - y : 4140870 . 0967196668 <nl> - theta : 1 . 3209844871616969 <nl> - kappa : - 0 . 0021250266602332846 <nl> - s : 19 . 445176652670419 <nl> - dkappa : 0 . 0030877074605214433 <nl> + x : 587585 . 68562536815 <nl> + y : 4140870 . 145646662 <nl> + theta : 1 . 3225819768780109 <nl> + kappa : - 0 . 0048490083378740362 <nl> + s : 19 . 445176652670412 <nl> + dkappa : 0 . 0053317686043161776 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 1649159294676021 <nl> - a : 1 . 1546436952569579 <nl> + v : 5 . 1649159294675995 <nl> + a : 1 . 1546436952569583 <nl> relative_time : 7 . 0999999999999917 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 00834134291 <nl> - y : 4140870 . 6028877129 <nl> - theta : 1 . 3199629591099313 <nl> - kappa : - 9 . 7301740345318737e - 05 <nl> - s : 19 . 967618817215321 <nl> - dkappa : 0 . 0038103855318452405 <nl> + x : 587585 . 81450199126 <nl> + y : 4140870 . 6519436412 <nl> + theta : 1 . 3201039024922165 <nl> + kappa : - 0 . 00033743334764324291 <nl> + s : 19 . 96761881721531 <nl> + dkappa : 0 . 0083426114095960235 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 2857392876175417 <nl> - a : 1 . 2633648115837983 <nl> + v : 5 . 2857392876175382 <nl> + a : 1 . 2633648115837988 <nl> relative_time : 7 . 1999999999999913 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 14103114279 <nl> - y : 4140871 . 1212581731 <nl> - theta : 1 . 3202429902220869 <nl> - kappa : 0 . 0007207012471774864 <nl> - s : 20 . 502702475222666 <nl> - dkappa : 0 . 0026228351581703852 <nl> + x : 587585 . 94725144992 <nl> + y : 4140871 . 1702987691 <nl> + theta : 1 . 3199156758911297 <nl> + kappa : 0 . 00010064089933273897 <nl> + s : 20 . 502702475222655 <nl> + dkappa : 0 . 0044540610837478217 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 4179027056488227 <nl> - a : 1 . 3815003438445128 <nl> + v : 5 . 4179027056488209 <nl> + a : 1 . 3815003438445135 <nl> relative_time : 7 . 2999999999999909 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 2771256353 <nl> - y : 4140871 . 6530259009 <nl> - theta : 1 . 3207091461194054 <nl> - kappa : 0 . 0014099917437988302 <nl> - s : 21 . 05160925926987 <nl> - dkappa : 0 . 0012665524699625296 <nl> + x : 587586 . 08350838267 <nl> + y : 4140871 . 702024926 <nl> + theta : 1 . 3199755416323002 <nl> + kappa : 9 . 9647722219977931e - 05 <nl> + s : 21 . 051609259269863 <nl> + dkappa : - 1 . 8093134113867527e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 562364260443041 <nl> - a : 1 . 5093829978031734 <nl> + v : 5 . 5623642604430383 <nl> + a : 1 . 5093829978031739 <nl> relative_time : 7 . 3999999999999906 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 41627165838 <nl> - y : 4140872 . 1996011953 <nl> - theta : 1 . 3217578449062071 <nl> - kappa : 0 . 001903496634546351 <nl> - s : 21 . 615618273151323 <nl> - dkappa : 0 . 0010383714302760563 <nl> + x : 587586 . 22346331854 <nl> + y : 4140872 . 2483936511 <nl> + theta : 1 . 3200359310348269 <nl> + kappa : 9 . 8626050632384638e - 05 <nl> + s : 21 . 615618273151313 <nl> + dkappa : - 1 . 8105564438892876e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 7201152994581905 <nl> - a : 1 . 6473454792238504 <nl> + v : 5 . 7201152994581879 <nl> + a : 1 . 6473454792238509 <nl> relative_time : 7 . 49999999999999 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 55910239089 <nl> - y : 4140872 . 7622458413 <nl> - theta : 1 . 3229531709108677 <nl> - kappa : 0 . 002347864980454003 <nl> - s : 22 . 19610941893604 <nl> - dkappa : 0 . 00081145588912365956 <nl> + x : 587586 . 36749126541 <nl> + y : 4140872 . 8107333351 <nl> + theta : 1 . 3200915557106194 <nl> + kappa : 9 . 7575401502703534e - 05 <nl> + s : 22 . 196109418936029 <nl> + dkappa : - 1 . 810566801690396e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 5 . 8921804407286782 <nl> - a : 1 . 7957204938706157 <nl> + v : 5 . 8921804407286764 <nl> + a : 1 . 7957204938706166 <nl> relative_time : 7 . 59999999999999 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 7054544898 <nl> - y : 4140873 . 3425321579 <nl> - theta : 1 . 3244588147674454 <nl> - kappa : 0 . 0026562038684510869 <nl> - s : 22 . 794566724025291 <nl> - dkappa : 0 . 00059621395510818238 <nl> + x : 587586 . 51593714231 <nl> + y : 4140873 . 3904875652 <nl> + theta : 1 . 320133568558596 <nl> + kappa : 9 . 6494298901864741e - 05 <nl> + s : 22 . 794566724025277 <nl> + dkappa : - 1 . 8075974501712454e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 6 . 0796175728653186 <nl> - a : 1 . 95484074750754 <nl> + v : 6 . 0796175728653159 <nl> + a : 1 . 9548407475075404 <nl> relative_time : 7 . 6999999999999895 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587586 . 85554242833 <nl> - y : 4140873 . 9420448048 <nl> - theta : 1 . 326166590500037 <nl> - kappa : 0 . 0028509456513499164 <nl> - s : 23 . 412581668210244 <nl> - dkappa : 0 . 00039158876841271519 <nl> + x : 587586 . 66919730231 <nl> + y : 4140873 . 9891976733 <nl> + theta : 1 . 3201900671405413 <nl> + kappa : 9 . 538322193007873e - 05 <nl> + s : 23 . 41258166821023 <nl> + dkappa : - 1 . 8011277695396619e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 6 . 2835178550553277 <nl> + v : 6 . 283517855055325 <nl> a : 2 . 1250389458986949 <nl> relative_time : 7 . 7999999999999892 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587587 . 010081598 <nl> - y : 4140874 . 5623590811 <nl> - theta : 1 . 3280257469669765 <nl> - kappa : 0 . 0029704762094630954 <nl> - s : 24 . 051856510729611 <nl> - dkappa : 0 . 00019182054239493733 <nl> + x : 587586 . 82770488842 <nl> + y : 4140874 . 6085099285 <nl> + theta : 1 . 3202561921708948 <nl> + kappa : 9 . 4237551848189608e - 05 <nl> + s : 24 . 051856510729596 <nl> + dkappa : - 1 . 7924011591169596e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 6 . 5050057170623319 <nl> - a : 2 . 3066477948081507 <nl> + v : 6 . 5050057170623292 <nl> + a : 2 . 3066477948081516 <nl> relative_time : 7 . 8999999999999888 <nl> } <nl> trajectory_point { <nl> path_point { <nl> - x : 587587 . 16839657025 <nl> - y : 4140875 . 2055117516 <nl> - theta : 1 . 3299906720894574 <nl> - kappa : 0 . 0029244815898477258 <nl> - s : 24 . 714207617327293 <nl> - dkappa : 1 . 3155858200981896e - 05 <nl> + x : 587586 . 99187704816 <nl> + y : 4140875 . 2501924746 <nl> + theta : 1 . 3203233860619559 <nl> + kappa : 9 . 305953514507645e - 05 <nl> + s : 24 . 714207617327279 <nl> + dkappa : - 1 . 7829154528346041e - 06 <nl> ddkappa : 0 <nl> } <nl> - v : 6 . 7452388592263688 <nl> - a : 2 . 4999999999999796 <nl> + v : 6 . 7452388592263661 <nl> + a : 2 . 4999999999999805 <nl> relative_time : 7 . 9999999999999885 <nl> } <nl> decision { <nl> | planning : ( 1 ) use less ( half ) level_distance when adc is stopped for sample waypoints ; ( 2 ) tempararily disable sunnyvale_big_loop tests | ApolloAuto/apollo | b5bda3176df9cf77bc343ff59e2d84efa5bc35d8 | 2018-04-26T23:02:24Z |
mmm a / tensorflow / core / common_runtime / kernel_benchmark_testlib . cc <nl> ppp b / tensorflow / core / common_runtime / kernel_benchmark_testlib . cc <nl> Benchmark : : Benchmark ( const string & device , Graph * g , <nl> TF_CHECK_OK ( NewExecutor ( executor_type , params , * g , & exec_ ) ) ; <nl> } <nl> <nl> + Benchmark : : Benchmark ( const string & device , Graph * g , bool old_benchmark_api ) <nl> + : Benchmark ( device , g , nullptr , nullptr , nullptr , " " , old_benchmark_api ) { } <nl> + <nl> Benchmark : : ~ Benchmark ( ) { <nl> if ( device_ ) { <nl> rendez_ - > Unref ( ) ; <nl> mmm a / tensorflow / core / common_runtime / kernel_benchmark_testlib . h <nl> ppp b / tensorflow / core / common_runtime / kernel_benchmark_testlib . h <nl> class Benchmark { <nl> const SessionOptions * options = nullptr , Graph * init = nullptr , <nl> Rendezvous * rendez = nullptr , const char * executor_type = " " , <nl> bool old_benchmark_api = true ) ; <nl> + <nl> + Benchmark ( const string & device , Graph * g , bool old_benchmark_api ) ; <nl> + <nl> ~ Benchmark ( ) ; <nl> <nl> / / Executes the graph for " iters " times . <nl> | internal framework cleanup | tensorflow/tensorflow | d3d44d02c855e79d54944ad10021b0a0fa373526 | 2020-10-24T18:27:07Z |
mmm a / src / doc / algo . cpp <nl> ppp b / src / doc / algo . cpp <nl> void algo_line ( int x1 , int y1 , int x2 , int y2 , void * data , AlgoPixel proc ) <nl> } <nl> } <nl> <nl> + / * Additional helper functions for the ellipse - drawing helper function <nl> + below corresponding to routines in Bresenham ' s algorithm . * / <nl> + namespace { <nl> + int bresenham_ellipse_error ( int rx , int ry , int x , int y ) <nl> + { <nl> + return x * x * ry * ry + y * y * rx * rx - rx * rx * ry * ry ; <nl> + } <nl> + <nl> + / / Initialize positions x and y for Bresenham ' s algorithm <nl> + void bresenham_ellipse_init ( int rx , int ry , int * px , int * py ) <nl> + { <nl> + / / Start at the fatter pole <nl> + if ( rx > ry ) { * px = 0 ; * py = ry ; } <nl> + else { * px = rx ; * py = 0 ; } <nl> + } <nl> + <nl> + / / Move to next pixel to draw , according to Bresenham ' s algorithm <nl> + void bresenham_ellipse_step ( int rx , int ry , int * px , int * py ) <nl> + { <nl> + int & x = * px , & y = * py ; <nl> + / / Move towards the skinnier pole . Having 2 cases isn ' t needed , but it ensures <nl> + / / swapping rx and ry is the same as rotating 90 degrees . <nl> + if ( rx > ry ) { <nl> + int ex = bresenham_ellipse_error ( rx , ry , x , y - 1 ) ; <nl> + int ey = bresenham_ellipse_error ( rx , ry , x + 1 , y ) ; <nl> + int exy = bresenham_ellipse_error ( rx , ry , x + 1 , y - 1 ) ; <nl> + if ( ex + exy < 0 ) + + x ; <nl> + if ( ey + exy > 0 ) - - y ; <nl> + } <nl> + else { <nl> + int ex = bresenham_ellipse_error ( rx , ry , x , y + 1 ) ; <nl> + int ey = bresenham_ellipse_error ( rx , ry , x - 1 , y ) ; <nl> + int exy = bresenham_ellipse_error ( rx , ry , x - 1 , y + 1 ) ; <nl> + if ( ex + exy > 0 ) - - x ; <nl> + if ( ey + exy < 0 ) + + y ; <nl> + } <nl> + } <nl> + } <nl> + <nl> / * Helper function for the ellipse drawing routines . Calculates the <nl> points of an ellipse which fits onto the rectangle specified by x1 , <nl> y1 , x2 and y2 , and calls the specified routine for each one . The <nl> void algo_line ( int x1 , int y1 , int x2 , int y2 , void * data , AlgoPixel proc ) <nl> void algo_ellipse ( int x1 , int y1 , int x2 , int y2 , void * data , AlgoPixel proc ) <nl> { <nl> int mx , my , rx , ry ; <nl> - <nl> - int err ; <nl> - int xx , yy ; <nl> - int xa , ya ; <nl> int x , y ; <nl> <nl> / * Cheap hack to get elllipses with integer diameter , by just offsetting <nl> void algo_ellipse ( int x1 , int y1 , int x2 , int y2 , void * data , AlgoPixel proc ) <nl> proc ( mx - rx , my2 , data ) ; <nl> } <nl> <nl> - xx = rx * rx ; <nl> - yy = ry * ry ; <nl> - <nl> - / * Do the ' x direction ' part of the arc . * / <nl> - <nl> - x = 0 ; <nl> - y = ry ; <nl> - xa = 0 ; <nl> - ya = xx * 2 * ry ; <nl> - err = xx / 4 - xx * ry ; <nl> + / * Initialize drawing position at a pole . * / <nl> + bresenham_ellipse_init ( rx , ry , & x , & y ) ; <nl> <nl> for ( ; ; ) { <nl> - err + = xa + yy ; <nl> - if ( err > = 0 ) { <nl> - ya - = xx * 2 ; <nl> - err - = ya ; <nl> - y - - ; <nl> - } <nl> - xa + = yy * 2 ; <nl> - x + + ; <nl> - if ( xa > = ya ) <nl> - break ; <nl> + / * Step to the next pixel to draw . * / <nl> + bresenham_ellipse_step ( rx , ry , & x , & y ) ; <nl> <nl> - proc ( mx2 + x , my - y , data ) ; <nl> - proc ( mx - x , my - y , data ) ; <nl> - proc ( mx2 + x , my2 + y , data ) ; <nl> - proc ( mx - x , my2 + y , data ) ; <nl> - } <nl> - <nl> - / * Fill in missing pixels for very thin ellipses . ( This is caused because <nl> - * we always take 1 - pixel steps above , and thus might jump past the actual <nl> - * ellipse line . ) <nl> - * / <nl> - if ( y = = 0 ) <nl> - while ( x < rx ) { <nl> - proc ( mx2 + x , my - 1 , data ) ; <nl> - proc ( mx2 + x , my2 + 1 , data ) ; <nl> - proc ( mx - x , my - 1 , data ) ; <nl> - proc ( mx - x , my2 + 1 , data ) ; <nl> - x + + ; <nl> - } <nl> + / * Edge conditions * / <nl> + if ( y = = 0 & & x < rx ) + + y ; / / don ' t move to horizontal radius except at pole <nl> + if ( x = = 0 & & y < ry ) + + x ; / / don ' t move to vertical radius except at pole <nl> + if ( y < = 0 | | x < = 0 ) break ; / / stop before pole , since it ' s already drawn <nl> <nl> - / * Do the ' y direction ' part of the arc . * / <nl> - <nl> - x = rx ; <nl> - y = 0 ; <nl> - xa = yy * 2 * rx ; <nl> - ya = 0 ; <nl> - err = yy / 4 - yy * rx ; <nl> - <nl> - for ( ; ; ) { <nl> - err + = ya + xx ; <nl> - if ( err > = 0 ) { <nl> - xa - = yy * 2 ; <nl> - err - = xa ; <nl> - x - - ; <nl> - } <nl> - ya + = xx * 2 ; <nl> - y + + ; <nl> - if ( ya > xa ) <nl> - break ; <nl> + / * Process pixel * / <nl> proc ( mx2 + x , my - y , data ) ; <nl> proc ( mx - x , my - y , data ) ; <nl> proc ( mx2 + x , my2 + y , data ) ; <nl> proc ( mx - x , my2 + y , data ) ; <nl> } <nl> - <nl> - / * See comment above . * / <nl> - if ( x = = 0 ) <nl> - while ( y < ry ) { <nl> - proc ( mx - 1 , my - y , data ) ; <nl> - proc ( mx2 + 1 , my - y , data ) ; <nl> - proc ( mx - 1 , my2 + y , data ) ; <nl> - proc ( mx2 + 1 , my2 + y , data ) ; <nl> - y + + ; <nl> - } <nl> } <nl> <nl> void algo_ellipsefill ( int x1 , int y1 , int x2 , int y2 , void * data , AlgoHLine proc ) <nl> { <nl> int mx , my , rx , ry ; <nl> - <nl> - int err ; <nl> - int xx , yy ; <nl> - int xa , ya ; <nl> int x , y ; <nl> <nl> / * Cheap hack to get elllipses with integer diameter , by just offsetting <nl> void algo_ellipsefill ( int x1 , int y1 , int x2 , int y2 , void * data , AlgoHLine proc <nl> rx / = 2 ; <nl> ry / = 2 ; <nl> <nl> - / * Draw the 4 poles . * / <nl> - proc ( mx , my2 + ry , mx , data ) ; <nl> - proc ( mx , my - ry , mx , data ) ; <nl> - / * proc ( mx2 + rx , my , mx2 + rx , data ) ; * / <nl> - / * proc ( mx - rx , my , mx - rx , data ) ; * / <nl> - proc ( mx - rx , my , mx2 + rx , data ) ; <nl> - <nl> - / * For even diameter axis , double the poles . * / <nl> - if ( mx ! = mx2 ) { <nl> - proc ( mx2 , my2 + ry , mx2 , data ) ; <nl> - proc ( mx2 , my - ry , mx2 , data ) ; <nl> - } <nl> - <nl> - if ( my ! = my2 ) { <nl> - / * proc ( mx2 + rx , my2 , data ) ; * / <nl> - / * proc ( mx - rx , my2 , data ) ; * / <nl> - proc ( mx - rx , my2 , mx2 + rx , data ) ; <nl> - } <nl> + / * Draw the north and south poles ( possibly 2 pixels ) * / <nl> + proc ( mx , my2 + ry , mx2 , data ) ; <nl> + proc ( mx , my - ry , mx2 , data ) ; <nl> <nl> - xx = rx * rx ; <nl> - yy = ry * ry ; <nl> - <nl> - / * Do the ' x direction ' part of the arc . * / <nl> + / * Draw the equator ( possibly width 2 ) * / <nl> + proc ( mx - rx , my , mx2 + rx , data ) ; <nl> + if ( my ! = my2 ) proc ( mx - rx , my2 , mx2 + rx , data ) ; <nl> <nl> - x = 0 ; <nl> - y = ry ; <nl> - xa = 0 ; <nl> - ya = xx * 2 * ry ; <nl> - err = xx / 4 - xx * ry ; <nl> + / * Initialize drawing position at a pole . * / <nl> + bresenham_ellipse_init ( rx , ry , & x , & y ) ; <nl> <nl> for ( ; ; ) { <nl> - err + = xa + yy ; <nl> - if ( err > = 0 ) { <nl> - ya - = xx * 2 ; <nl> - err - = ya ; <nl> - y - - ; <nl> - } <nl> - xa + = yy * 2 ; <nl> - x + + ; <nl> - if ( xa > = ya ) <nl> - break ; <nl> - <nl> - / * proc ( mx2 + x , my - y , data ) ; * / <nl> - / * proc ( mx - x , my - y , data ) ; * / <nl> - / * proc ( mx2 + x , my2 + y , data ) ; * / <nl> - / * proc ( mx - x , my2 + y , data ) ; * / <nl> - proc ( mx - x , my - y , mx2 + x , data ) ; <nl> - proc ( mx - x , my2 + y , mx2 + x , data ) ; <nl> - } <nl> - <nl> - / * Fill in missing pixels for very thin ellipses . ( This is caused because <nl> - * we always take 1 - pixel steps above , and thus might jump past the actual <nl> - * ellipse line . ) <nl> - * / <nl> - if ( y = = 0 ) <nl> - while ( x < rx ) { <nl> - / * proc ( mx2 + x , my - 1 , data ) ; * / <nl> - / * proc ( mx2 + x , my2 + 1 , data ) ; * / <nl> - / * proc ( mx - x , my - 1 , data ) ; * / <nl> - / * proc ( mx - x , my2 + 1 , data ) ; * / <nl> - x + + ; <nl> - } <nl> + / * Step to the next pixel to draw . * / <nl> + bresenham_ellipse_step ( rx , ry , & x , & y ) ; <nl> <nl> - / * Do the ' y direction ' part of the arc . * / <nl> + / * Edge conditions * / <nl> + if ( y = = 0 & & x < rx ) + + y ; / / don ' t move to horizontal radius except at pole <nl> + if ( x = = 0 & & y < ry ) + + x ; / / don ' t move to vertical radius except at pole <nl> + if ( y < = 0 | | x < = 0 ) break ; / / stop before pole , since it ' s already drawn <nl> <nl> - x = rx ; <nl> - y = 0 ; <nl> - xa = yy * 2 * rx ; <nl> - ya = 0 ; <nl> - err = yy / 4 - yy * rx ; <nl> - <nl> - for ( ; ; ) { <nl> - err + = ya + xx ; <nl> - if ( err > = 0 ) { <nl> - xa - = yy * 2 ; <nl> - err - = xa ; <nl> - x - - ; <nl> - } <nl> - ya + = xx * 2 ; <nl> - y + + ; <nl> - if ( ya > xa ) <nl> - break ; <nl> - / * proc ( mx2 + x , my - y , data ) ; * / <nl> - / * proc ( mx - x , my - y , data ) ; * / <nl> - / * proc ( mx2 + x , my2 + y , data ) ; * / <nl> - / * proc ( mx - x , my2 + y , data ) ; * / <nl> + / * Draw the north and south ' lines of latitude ' * / <nl> proc ( mx - x , my - y , mx2 + x , data ) ; <nl> proc ( mx - x , my2 + y , mx2 + x , data ) ; <nl> } <nl> - <nl> - / * See comment above . * / <nl> - if ( x = = 0 ) <nl> - while ( y < ry ) { <nl> - / * proc ( mx - 1 , my - y , data ) ; * / <nl> - / * proc ( mx2 + 1 , my - y , data ) ; * / <nl> - / * proc ( mx - 1 , my2 + y , data ) ; * / <nl> - / * proc ( mx2 + 1 , my2 + y , data ) ; * / <nl> - y + + ; <nl> - } <nl> } <nl> <nl> / / Algorightm from Allegro ( allegro / src / spline . c ) <nl> | Merge pull request from yuxshao / fix - ellipse - gaps | aseprite/aseprite | 97251969f63aa42cf3b6154da68ed13eb4cf4c18 | 2018-02-22T16:47:39Z |
mmm a / src / mongo / db / auth / authorization_manager . cpp <nl> ppp b / src / mongo / db / auth / authorization_manager . cpp <nl> namespace mongo { <nl> <nl> bool AuthorizationManager : : _doesSupportOldStylePrivileges = true ; <nl> <nl> + / * * <nl> + * Guard object for synchronizing accesses to the user cache . This guard allows one thread to <nl> + * access the cache at a time , and provides an exception - safe mechanism for a thread to release <nl> + * the cache mutex while performing network or disk operations while allowing other readers <nl> + * to proceed . <nl> + * <nl> + * There are two ways to use this guard . One may simply instantiate the guard like a <nl> + * std : : lock_guard , and perform reads or writes of the cache . <nl> + * <nl> + * Alternatively , one may instantiate the guard , examine the cache , and then enter into an <nl> + * update mode by first wait ( ) ing until otherUpdateInFetchPhase ( ) is false , and then <nl> + * calling beginFetchPhase ( ) . At this point , other threads may acquire the guard in the simple <nl> + * manner and do reads , but other threads may not enter into a fetch phase . During the fetch <nl> + * phase , the thread should perform required network or disk activity to determine what update <nl> + * it will make to the cache . Then , it should call endFetchPhase ( ) , to reacquire the user cache <nl> + * mutex . At that point , the thread can make its modifications to the cache and let the guard <nl> + * go out of scope . <nl> + * <nl> + * All updates by guards using a fetch - phase are totally ordered with respect to one another , <nl> + * and all guards using no fetch phase are totally ordered with respect to one another , but <nl> + * there is not a total ordering among all guard objects . <nl> + * / <nl> + class AuthorizationManager : : CacheGuard { <nl> + MONGO_DISALLOW_COPYING ( CacheGuard ) ; <nl> + public : <nl> + enum FetchSynchronization { <nl> + fetchSynchronizationAutomatic , <nl> + fetchSynchronizationManual <nl> + } ; <nl> + <nl> + / * * <nl> + * Constructs a cache guard , locking the mutex that synchronizes user cache accesses . <nl> + * / <nl> + CacheGuard ( AuthorizationManager * authzManager , <nl> + const FetchSynchronization sync = fetchSynchronizationAutomatic ) : <nl> + _isThisGuardInFetchPhase ( false ) , <nl> + _authzManager ( authzManager ) , <nl> + _lock ( authzManager - > _userCacheMutex ) { <nl> + <nl> + if ( fetchSynchronizationAutomatic = = sync ) { <nl> + synchronizeWithFetchPhase ( ) ; <nl> + } <nl> + } <nl> + <nl> + / * * <nl> + * Releases the mutex that synchronizes user cache access , if held , and notifies <nl> + * any threads waiting for their own opportunity to update the user cache . <nl> + * / <nl> + ~ CacheGuard ( ) { <nl> + if ( ! _lock . owns_lock ( ) ) { <nl> + _lock . lock ( ) ; <nl> + } <nl> + if ( _isThisGuardInFetchPhase ) { <nl> + fassert ( 0 , _authzManager - > _isFetchPhaseBusy ) ; <nl> + _authzManager - > _isFetchPhaseBusy = false ; <nl> + _authzManager - > _fetchPhaseIsReady . notify_all ( ) ; <nl> + } <nl> + } <nl> + <nl> + bool otherUpdateInFetchPhase ( ) { return _authzManager - > _isFetchPhaseBusy ; } <nl> + <nl> + void wait ( ) { <nl> + _authzManager - > _fetchPhaseIsReady . wait ( _lock ) ; <nl> + } <nl> + <nl> + void beginFetchPhase ( ) { <nl> + fassert ( 0 , ! _authzManager - > _isFetchPhaseBusy ) ; <nl> + _isThisGuardInFetchPhase = true ; <nl> + _authzManager - > _isFetchPhaseBusy = true ; <nl> + _lock . unlock ( ) ; <nl> + } <nl> + <nl> + void endFetchPhase ( ) { <nl> + _lock . lock ( ) ; <nl> + } <nl> + <nl> + private : <nl> + void synchronizeWithFetchPhase ( ) { <nl> + while ( otherUpdateInFetchPhase ( ) ) <nl> + wait ( ) ; <nl> + fassert ( 0 , ! _authzManager - > _isFetchPhaseBusy ) ; <nl> + _isThisGuardInFetchPhase = true ; <nl> + _authzManager - > _isFetchPhaseBusy = true ; <nl> + } <nl> + <nl> + bool _isThisGuardInFetchPhase ; <nl> + AuthorizationManager * _authzManager ; <nl> + boost : : unique_lock < boost : : mutex > _lock ; <nl> + } ; <nl> + <nl> AuthorizationManager : : AuthorizationManager ( AuthzManagerExternalState * externalState ) : <nl> - _authEnabled ( false ) , _externalState ( externalState ) { <nl> + _authEnabled ( false ) , _externalState ( externalState ) , _isFetchPhaseBusy ( false ) { <nl> <nl> setAuthorizationVersion ( 2 ) ; <nl> } <nl> namespace mongo { <nl> } <nl> <nl> Status AuthorizationManager : : setAuthorizationVersion ( int version ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this ) ; <nl> <nl> if ( version ! = 1 & & version ! = 2 ) { <nl> return Status ( ErrorCodes : : UnsupportedFormat , <nl> namespace mongo { <nl> } <nl> <nl> int AuthorizationManager : : getAuthorizationVersion ( ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this , CacheGuard : : fetchSynchronizationManual ) ; <nl> return _getVersion_inlock ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> } <nl> <nl> Status AuthorizationManager : : acquireUser ( const UserName & userName , User * * acquiredUser ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> - unordered_map < UserName , User * > : : iterator it = _userCache . find ( userName ) ; <nl> + unordered_map < UserName , User * > : : iterator it ; <nl> + <nl> + CacheGuard guard ( this , CacheGuard : : fetchSynchronizationManual ) ; <nl> + while ( ( _userCache . end ( ) = = ( it = _userCache . find ( userName ) ) ) & & <nl> + guard . otherUpdateInFetchPhase ( ) ) { <nl> + <nl> + guard . wait ( ) ; <nl> + } <nl> + <nl> if ( it ! = _userCache . end ( ) ) { <nl> fassert ( 16914 , it - > second ) ; <nl> fassert ( 17003 , it - > second - > isValid ( ) ) ; <nl> namespace mongo { <nl> " User " < < userName . getFullName ( ) < < " not found . " ) ; <nl> } <nl> <nl> - / / Put the new user into an auto_ptr temporarily in case there ' s an error while <nl> - / / initializing the user . <nl> - auto_ptr < User > userHolder ( new User ( userName ) ) ; <nl> - User * user = userHolder . get ( ) ; <nl> - <nl> + guard . beginFetchPhase ( ) ; <nl> BSONObj userObj ; <nl> Status status = getUserDescription ( userName , & userObj ) ; <nl> if ( ! status . isOK ( ) ) { <nl> return status ; <nl> } <nl> <nl> + / / Put the new user into an auto_ptr temporarily in case there ' s an error while <nl> + / / initializing the user . <nl> + auto_ptr < User > userHolder ( new User ( userName ) ) ; <nl> + User * user = userHolder . get ( ) ; <nl> + <nl> status = _initializeUserFromPrivilegeDocument ( user , userObj ) ; <nl> if ( ! status . isOK ( ) ) { <nl> return status ; <nl> } <nl> <nl> + guard . endFetchPhase ( ) ; <nl> user - > incrementRefCount ( ) ; <nl> _userCache . insert ( make_pair ( userName , userHolder . release ( ) ) ) ; <nl> * acquiredUser = user ; <nl> namespace mongo { <nl> return ; <nl> } <nl> <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this , CacheGuard : : fetchSynchronizationManual ) ; <nl> user - > decrementRefCount ( ) ; <nl> if ( user - > getRefCount ( ) = = 0 ) { <nl> / / If it ' s been invalidated then it ' s not in the _userCache anymore . <nl> namespace mongo { <nl> } <nl> } <nl> <nl> - void AuthorizationManager : : invalidateUser ( User * user ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> - if ( ! user - > isValid ( ) ) { <nl> - return ; <nl> - } <nl> - <nl> - unordered_map < UserName , User * > : : iterator it = _userCache . find ( user - > getName ( ) ) ; <nl> - massert ( 17052 , <nl> - mongoutils : : str : : stream ( ) < < <nl> - " Invalidating cache for user " < < user - > getName ( ) . getFullName ( ) < < <nl> - " failed as it is not present in the user cache " , <nl> - it ! = _userCache . end ( ) & & it - > second = = user ) ; <nl> - _userCache . erase ( it ) ; <nl> - user - > invalidate ( ) ; <nl> - } <nl> - <nl> void AuthorizationManager : : invalidateUserByName ( const UserName & userName ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> - <nl> + CacheGuard guard ( this ) ; <nl> unordered_map < UserName , User * > : : iterator it = _userCache . find ( userName ) ; <nl> if ( it = = _userCache . end ( ) ) { <nl> return ; <nl> namespace mongo { <nl> } <nl> <nl> void AuthorizationManager : : invalidateUsersFromDB ( const std : : string & dbname ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> - <nl> + CacheGuard guard ( this ) ; <nl> unordered_map < UserName , User * > : : iterator it = _userCache . begin ( ) ; <nl> while ( it ! = _userCache . end ( ) ) { <nl> User * user = it - > second ; <nl> namespace mongo { <nl> <nl> <nl> void AuthorizationManager : : addInternalUser ( User * user ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this ) ; <nl> _userCache . insert ( make_pair ( user - > getName ( ) , user ) ) ; <nl> } <nl> <nl> void AuthorizationManager : : invalidateUserCache ( ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this ) ; <nl> _invalidateUserCache_inlock ( ) ; <nl> } <nl> <nl> namespace mongo { <nl> } <nl> <nl> Status AuthorizationManager : : _initializeAllV1UserData ( ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this ) ; <nl> _invalidateUserCache_inlock ( ) ; <nl> V1UserDocumentParser parser ; <nl> <nl> namespace mongo { <nl> if ( ! lkUpgrade . tryLock ( " Upgrade authorization data " ) ) { <nl> return Status ( ErrorCodes : : LockBusy , " Could not lock auth data upgrade process lock . " ) ; <nl> } <nl> - boost : : lock_guard < boost : : mutex > lkLocal ( _lock ) ; <nl> + CacheGuard guard ( this ) ; <nl> int durableVersion = 0 ; <nl> Status status = readAuthzVersion ( _externalState . get ( ) , & durableVersion ) ; <nl> if ( ! status . isOK ( ) ) <nl> namespace mongo { <nl> if ( ns = = rolesCollectionNamespace . ns ( ) | | <nl> ns = = adminCommandNamespace . ns ( ) | | <nl> ns = = usersCollectionNamespace . ns ( ) ) { <nl> - boost : : lock_guard < boost : : mutex > lk ( _lock ) ; <nl> + CacheGuard guard ( this ) ; <nl> if ( _getVersion_inlock ( ) = = 2 ) { <nl> _invalidateUserCache_inlock ( ) ; <nl> } <nl> mmm a / src / mongo / db / auth / authorization_manager . h <nl> ppp b / src / mongo / db / auth / authorization_manager . h <nl> <nl> <nl> # include < boost / function . hpp > <nl> # include < boost / scoped_ptr . hpp > <nl> + # include < boost / thread / condition_variable . hpp > <nl> # include < boost / thread / mutex . hpp > <nl> # include < string > <nl> <nl> namespace mongo { <nl> * / <nl> void releaseUser ( User * user ) ; <nl> <nl> - / * * <nl> - * Marks the given user as invalid and removes it from the user cache . <nl> - * / <nl> - void invalidateUser ( User * user ) ; <nl> - <nl> / * * <nl> * Marks the given user as invalid and removes it from the user cache . <nl> * / <nl> namespace mongo { <nl> const BSONObj * fullObj ) ; <nl> <nl> private : <nl> + / * * <nl> + * Type used to guard accesses and updates to the user cache . <nl> + * / <nl> + class CacheGuard ; <nl> + friend class AuthorizationManager : : CacheGuard ; <nl> <nl> / * * <nl> * Returns the current version number of the authorization system . Should only be called <nl> - * when holding _lock . <nl> + * when holding _userCacheMutex . <nl> * / <nl> int _getVersion_inlock ( ) const { return _version ; } <nl> <nl> / * * <nl> * Invalidates all User objects in the cache and removes them from the cache . <nl> - * Should only be called when already holding _lock . <nl> + * Should only be called when already holding _userCacheMutex . <nl> * / <nl> void _invalidateUserCache_inlock ( ) ; <nl> <nl> namespace mongo { <nl> * The current version is 2 . When upgrading to v2 . 6 or later from v2 . 4 or prior , the <nl> * version is 1 . After running the upgrade process to upgrade to the new privilege document <nl> * format , the version will be 2 . <nl> - * All reads / writes to _version must be done within _lock . <nl> + * All reads / writes to _version must be done within _userCacheMutex . <nl> * / <nl> int _version ; <nl> <nl> namespace mongo { <nl> unordered_map < UserName , User * > _userCache ; <nl> <nl> / * * <nl> - * Protects _userCache and _version . <nl> + * True if there is an update to the _userCache in progress , and that update is currently in <nl> + * the " fetch phase " , during which it does not hold the _userCacheMutex . <nl> + * <nl> + * Manipulated via CacheGuard . <nl> + * / <nl> + bool _isFetchPhaseBusy ; <nl> + <nl> + / * * <nl> + * Protects _userCache , _version and _isFetchPhaseBusy . Manipulated via CacheGuard . <nl> + * / <nl> + boost : : mutex _userCacheMutex ; <nl> + <nl> + / * * <nl> + * Condition used to signal that it is OK for another CacheGuard to enter a fetch phase . <nl> + * Manipulated via CacheGuard . <nl> * / <nl> - boost : : mutex _lock ; <nl> + boost : : condition_variable _fetchPhaseIsReady ; <nl> } ; <nl> <nl> } / / namespace mongo <nl> mmm a / src / mongo / db / auth / authorization_session_test . cpp <nl> ppp b / src / mongo / db / auth / authorization_session_test . cpp <nl> namespace { <nl> BSONObj ( ) ) ) ; <nl> <nl> / / Make sure that invalidating the user causes the session to reload its privileges . <nl> - authzManager - > invalidateUser ( user ) ; <nl> + authzManager - > invalidateUserByName ( user - > getName ( ) ) ; <nl> ASSERT_TRUE ( authzSession - > isAuthorizedForActionsOnResource ( <nl> testFooCollResource , ActionType : : find ) ) ; <nl> ASSERT_FALSE ( authzSession - > isAuthorizedForActionsOnResource ( <nl> namespace { <nl> / / Delete the user . <nl> managerState - > clearPrivilegeDocuments ( ) ; <nl> / / Make sure that invalidating the user causes the session to reload its privileges . <nl> - authzManager - > invalidateUser ( user ) ; <nl> + authzManager - > invalidateUserByName ( user - > getName ( ) ) ; <nl> ASSERT_FALSE ( authzSession - > isAuthorizedForActionsOnResource ( <nl> testFooCollResource , ActionType : : find ) ) ; <nl> ASSERT_FALSE ( authzSession - > isAuthorizedForActionsOnResource ( <nl> namespace { <nl> / / Even though the user ' s privileges have been reduced , since we ' ve configured user <nl> / / document lookup to fail , the authz session should continue to use its known out - of - date <nl> / / privilege data . <nl> - authzManager - > invalidateUser ( user ) ; <nl> + authzManager - > invalidateUserByName ( user - > getName ( ) ) ; <nl> ASSERT_TRUE ( authzSession - > isAuthorizedForActionsOnResource ( <nl> testFooCollResource , ActionType : : find ) ) ; <nl> ASSERT_TRUE ( authzSession - > isAuthorizedForActionsOnResource ( <nl> | SERVER - 10670 Allow acquireUser to drop the cache mutex while doing disk / network IO . | mongodb/mongo | 4e50e1e592e2109b7bf1a0fd6002cb0285916d0b | 2013-10-11T14:52:29Z |
mmm a / configure . in <nl> ppp b / configure . in <nl> if test " $ host_vendor " = " apple " ; then <nl> AC_SUBST ( MACOSX_DEPLOYMENT_TARGET , " 10 . 4 " ) <nl> # need these in CFLAGS / CXXFLAGS so AC_CHECK_LIB works <nl> CFLAGS = " $ CFLAGS - I / opt / local / include " <nl> - CFLAGS = " $ CFLAGS - I / opt / local / include / freetype2 " <nl> - CFLAGS = " $ CFLAGS - I / opt / local / include / mysql5 " <nl> CXXFLAGS = " $ CXXFLAGS - I / opt / local / include " <nl> - CXXFLAGS = " $ CXXFLAGS - I / opt / local / include / freetype2 " <nl> - CXXFLAGS = " $ CXXFLAGS - I / opt / local / include / mysql5 " <nl> # standard xbmc paths <nl> - INCLUDES = " $ INCLUDES - I . " <nl> - INCLUDES = " $ INCLUDES - I \ $ ( abs_top_srcdir ) / xbmc " <nl> - INCLUDES = " $ INCLUDES - I \ $ ( abs_top_srcdir ) / xbmc / lib " <nl> INCLUDES = " $ INCLUDES - I \ $ ( abs_top_srcdir ) / xbmc / osx " <nl> - INCLUDES = " $ INCLUDES - I / opt / local / include " <nl> - INCLUDES = " $ INCLUDES - I / opt / local / include / freetype2 " <nl> - INCLUDES = " $ INCLUDES - I / opt / local / include / mysql5 " <nl> LDFLAGS = " $ LDFLAGS - mmacosx - version - min = 10 . 4 " <nl> LDFLAGS = " $ LDFLAGS - isysroot / Developer / SDKs / MacOSX10 . 4u . sdk " <nl> LDFLAGS = " $ LDFLAGS - framework IOKit " <nl> | [ osx ] remove redundent forced includes | xbmc/xbmc | 8f7394419253e27fd82db1c6d1cff4054d503c32 | 2010-01-16T04:40:20Z |
mmm a / test / core / iomgr / endpoint_tests . c <nl> ppp b / test / core / iomgr / endpoint_tests . c <nl> static void read_and_write_test_write_handler ( void * data , int success , <nl> size_t nslices ; <nl> <nl> if ( success ) { <nl> - for ( ; ; ) { <nl> - / * Need to do inline writes until they don ' t succeed synchronously or we <nl> - finish writing * / <nl> - state - > bytes_written + = state - > current_write_size ; <nl> - if ( state - > target_bytes - state - > bytes_written < <nl> - state - > current_write_size ) { <nl> - state - > current_write_size = state - > target_bytes - state - > bytes_written ; <nl> - } <nl> - if ( state - > current_write_size = = 0 ) { <nl> - break ; <nl> - } <nl> - <nl> + state - > bytes_written + = state - > current_write_size ; <nl> + if ( state - > target_bytes - state - > bytes_written < <nl> + state - > current_write_size ) { <nl> + state - > current_write_size = state - > target_bytes - state - > bytes_written ; <nl> + } <nl> + if ( state - > current_write_size ! = 0 ) { <nl> slices = allocate_blocks ( state - > current_write_size , 8192 , & nslices , <nl> & state - > current_write_data ) ; <nl> gpr_slice_buffer_reset_and_unref ( & state - > outgoing ) ; <nl> static void read_and_write_test_write_handler ( void * data , int success , <nl> grpc_endpoint_write ( state - > write_ep , & state - > outgoing , & state - > done_write , <nl> call_list ) ; <nl> free ( slices ) ; <nl> + return ; <nl> } <nl> - GPR_ASSERT ( state - > bytes_written = = state - > target_bytes ) ; <nl> } <nl> <nl> gpr_log ( GPR_INFO , " Write handler done " ) ; <nl> mmm a / test / core / iomgr / tcp_server_posix_test . c <nl> ppp b / test / core / iomgr / tcp_server_posix_test . c <nl> static void test_connect ( int n ) { <nl> <nl> gpr_mu_unlock ( GRPC_POLLSET_MU ( & g_pollset ) ) ; <nl> <nl> - grpc_tcp_server_destroy ( s , NULL , NULL ) ; <nl> + grpc_tcp_server_destroy ( s , NULL , & call_list ) ; <nl> + grpc_call_list_run ( & call_list ) ; <nl> } <nl> <nl> static void destroy_pollset ( void * p , int success , grpc_call_list * call_list ) { <nl> mmm a / test / core / security / secure_endpoint_test . c <nl> ppp b / test / core / security / secure_endpoint_test . c <nl> static grpc_endpoint_test_config configs [ ] = { <nl> <nl> static void inc_call_ctr ( void * arg , int success , grpc_call_list * call_list ) { <nl> + + * ( int * ) arg ; <nl> - ; <nl> } <nl> <nl> static void test_leftover ( grpc_endpoint_test_config config , size_t slice_size ) { <nl> static void test_leftover ( grpc_endpoint_test_config config , size_t slice_size ) { <nl> <nl> gpr_slice_buffer_init ( & incoming ) ; <nl> grpc_closure_init ( & done_closure , inc_call_ctr , & n ) ; <nl> - grpc_endpoint_read ( f . client_ep , & incoming , NULL , & call_list ) ; <nl> + grpc_endpoint_read ( f . client_ep , & incoming , & done_closure , & call_list ) ; <nl> grpc_call_list_run ( & call_list ) ; <nl> GPR_ASSERT ( n = = 1 ) ; <nl> GPR_ASSERT ( incoming . count = = 1 ) ; <nl> | Fix tests | grpc/grpc | d9fdaf204cbca6472f5588eba83f37c01bb6c8a4 | 2015-09-22T15:31:23Z |
mmm a / editor / editor_settings . cpp <nl> ppp b / editor / editor_settings . cpp <nl> void EditorSettings : : _load_defaults ( Ref < ConfigFile > p_extra_config ) { <nl> set ( " editors / 3d / emulate_3_button_mouse " , false ) ; <nl> set ( " editors / 3d / warped_mouse_panning " , true ) ; <nl> <nl> - set ( " editors / 3d / freelook_base_speed " , 5 ) ; <nl> - set ( " editors / 3d / freelook_acceleration " , 10 ) ; <nl> - set ( " editors / 3d / freelook_max_speed " , 100 ) ; <nl> - set ( " editors / 3d / freelook_modifier_speed_factor " , 1 . 0 / 5 . 0 ) ; <nl> + set ( " editors / 3d / freelook_base_speed " , 1 ) ; <nl> + set ( " editors / 3d / freelook_modifier_speed_factor " , 5 . 0 ) ; <nl> <nl> set ( " editors / 2d / bone_width " , 5 ) ; <nl> set ( " editors / 2d / bone_color1 " , Color ( 1 . 0 , 1 . 0 , 1 . 0 , 0 . 9 ) ) ; <nl> mmm a / editor / plugins / spatial_editor_plugin . cpp <nl> ppp b / editor / plugins / spatial_editor_plugin . cpp <nl> <nl> / / # define GIZMO_SCALE_DEFAULT 0 . 28 <nl> # define GIZMO_SCALE_DEFAULT 0 . 15 <nl> <nl> + # define ZOOM_MIN_DISTANCE 0 . 001 <nl> + # define ZOOM_MULTIPLIER 1 . 08 <nl> + # define ZOOM_INDICATOR_DELAY_S 1 . 5 <nl> + <nl> + # define FREELOOK_MIN_SPEED 0 . 1 <nl> + <nl> void SpatialEditorViewport : : _update_camera ( ) { <nl> if ( orthogonal ) { <nl> / / camera - > set_orthogonal ( size . width * cursor . distance , get_znear ( ) , get_zfar ( ) ) ; <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> switch ( b . button_index ) { <nl> <nl> case BUTTON_WHEEL_UP : { <nl> - <nl> - cursor . distance / = 1 . 08 ; <nl> - if ( cursor . distance < 0 . 001 ) <nl> - cursor . distance = 0 . 001 ; <nl> - <nl> + scale_cursor_distance ( is_freelook_active ( ) ? ZOOM_MULTIPLIER : 1 . 0 / ZOOM_MULTIPLIER ) ; <nl> } break ; <nl> - case BUTTON_WHEEL_DOWN : { <nl> - <nl> - if ( cursor . distance < 0 . 001 ) <nl> - cursor . distance = 0 . 001 ; <nl> - cursor . distance * = 1 . 08 ; <nl> <nl> + case BUTTON_WHEEL_DOWN : { <nl> + scale_cursor_distance ( is_freelook_active ( ) ? 1 . 0 / ZOOM_MULTIPLIER : ZOOM_MULTIPLIER ) ; <nl> } break ; <nl> + <nl> case BUTTON_RIGHT : { <nl> <nl> NavigationScheme nav_scheme = ( NavigationScheme ) EditorSettings : : get_singleton ( ) - > get ( " editors / 3d / navigation_scheme " ) . operator int ( ) ; <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> / / VisualServer : : get_singleton ( ) - > poly_clear ( indicators ) ; <nl> set_message ( TTR ( " Transform Aborted . " ) , 3 ) ; <nl> } <nl> + <nl> + freelook_active = b . pressed ; <nl> + <nl> } break ; <nl> case BUTTON_MIDDLE : { <nl> <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> <nl> surface - > update ( ) ; <nl> } <nl> + <nl> } break ; <nl> } <nl> } break ; <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> String n = _edit . gizmo - > get_handle_name ( _edit . gizmo_handle ) ; <nl> set_message ( n + " : " + String ( v ) ) ; <nl> <nl> - } else if ( m . button_mask & 1 ) { <nl> + } else if ( m . button_mask & BUTTON_MASK_LEFT ) { <nl> <nl> if ( nav_scheme = = NAVIGATION_MAYA & & m . mod . alt ) { <nl> nav_mode = NAVIGATION_ORBIT ; <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> } <nl> } <nl> <nl> - } else if ( m . button_mask & 2 ) { <nl> + } else if ( m . button_mask & BUTTON_MASK_RIGHT ) { <nl> <nl> if ( nav_scheme = = NAVIGATION_MAYA & & m . mod . alt ) { <nl> nav_mode = NAVIGATION_ZOOM ; <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> nav_mode = NAVIGATION_LOOK ; <nl> } <nl> <nl> - } else if ( m . button_mask & 4 ) { <nl> + } else if ( m . button_mask & BUTTON_MASK_MIDDLE ) { <nl> <nl> if ( nav_scheme = = NAVIGATION_GODOT ) { <nl> <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> NavigationZoomStyle zoom_style = ( NavigationZoomStyle ) EditorSettings : : get_singleton ( ) - > get ( " editors / 3d / zoom_style " ) . operator int ( ) ; <nl> if ( zoom_style = = NAVIGATION_ZOOM_HORIZONTAL ) { <nl> if ( m . relative_x > 0 ) <nl> - cursor . distance * = 1 - m . relative_x * zoom_speed ; <nl> + scale_cursor_distance ( 1 - m . relative_x * zoom_speed ) ; <nl> else if ( m . relative_x < 0 ) <nl> - cursor . distance / = 1 + m . relative_x * zoom_speed ; <nl> + scale_cursor_distance ( 1 . 0 / ( 1 + m . relative_x * zoom_speed ) ) ; <nl> } else { <nl> if ( m . relative_y > 0 ) <nl> - cursor . distance * = 1 + m . relative_y * zoom_speed ; <nl> + scale_cursor_distance ( 1 + m . relative_y * zoom_speed ) ; <nl> else if ( m . relative_y < 0 ) <nl> - cursor . distance / = 1 - m . relative_y * zoom_speed ; <nl> + scale_cursor_distance ( 1 . 0 / ( 1 - m . relative_y * zoom_speed ) ) ; <nl> } <nl> <nl> } break ; <nl> void SpatialEditorViewport : : _sinput ( const InputEvent & p_event ) { <nl> } <nl> } <nl> <nl> + void SpatialEditorViewport : : scale_cursor_distance ( real_t scale ) { <nl> + <nl> + / / Prevents zero distance which would short - circuit any scaling <nl> + if ( cursor . distance < ZOOM_MIN_DISTANCE ) <nl> + cursor . distance = ZOOM_MIN_DISTANCE ; <nl> + <nl> + real_t prev_distance = cursor . distance ; <nl> + cursor . distance * = scale ; <nl> + <nl> + if ( cursor . distance < ZOOM_MIN_DISTANCE ) <nl> + cursor . distance = ZOOM_MIN_DISTANCE ; <nl> + <nl> + if ( is_freelook_active ( ) ) { <nl> + / / In freelook mode , cursor reference is reversed so it needs to be adjusted <nl> + Vector3 forward = camera - > get_transform ( ) . basis . xform ( Vector3 ( 0 , 0 , - 1 ) ) ; <nl> + cursor . pos + = ( cursor . distance - prev_distance ) * forward ; <nl> + } <nl> + <nl> + zoom_indicator_delay = ZOOM_INDICATOR_DELAY_S ; <nl> + surface - > update ( ) ; <nl> + } <nl> + <nl> Point2i SpatialEditorViewport : : _get_warped_mouse_motion ( const InputEventMouseMotion & p_ev_mouse_motion ) const { <nl> Point2i relative ; <nl> if ( bool ( EditorSettings : : get_singleton ( ) - > get ( " editors / 3d / warped_mouse_panning " ) ) ) { <nl> Point2i SpatialEditorViewport : : _get_warped_mouse_motion ( const InputEventMouseMot <nl> <nl> void SpatialEditorViewport : : _update_freelook ( real_t delta ) { <nl> <nl> - const Input & input = * Input : : get_singleton ( ) ; <nl> - <nl> - if ( ! input . is_mouse_button_pressed ( BUTTON_RIGHT ) ) <nl> + if ( ! is_freelook_active ( ) ) <nl> return ; <nl> <nl> Vector3 forward = camera - > get_transform ( ) . basis . xform ( Vector3 ( 0 , 0 , - 1 ) ) ; <nl> Vector3 right = camera - > get_transform ( ) . basis . xform ( Vector3 ( 1 , 0 , 0 ) ) ; <nl> - Vector3 up = Vector3 ( 0 , 1 , 0 ) ; <nl> + Vector3 up = camera - > get_transform ( ) . basis . xform ( Vector3 ( 0 , 1 , 0 ) ) ; <nl> <nl> int key_left = ED_SHORTCUT ( " spatial_editor / freelook_left " , TTR ( " Freelook Left " ) , KEY_A ) - > get_shortcut ( ) . key . scancode ; <nl> int key_right = ED_SHORTCUT ( " spatial_editor / freelook_right " , TTR ( " Freelook Right " ) , KEY_D ) - > get_shortcut ( ) . key . scancode ; <nl> void SpatialEditorViewport : : _update_freelook ( real_t delta ) { <nl> bool pressed = false ; <nl> bool speed_modifier = false ; <nl> <nl> + const Input & input = * Input : : get_singleton ( ) ; <nl> + <nl> if ( input . is_key_pressed ( key_left ) ) { <nl> velocity - = right ; <nl> pressed = true ; <nl> void SpatialEditorViewport : : _update_freelook ( real_t delta ) { <nl> speed_modifier = true ; <nl> } <nl> <nl> - const EditorSettings & s = * EditorSettings : : get_singleton ( ) ; <nl> + if ( pressed ) { <nl> + const EditorSettings & s = * EditorSettings : : get_singleton ( ) ; <nl> + const real_t base_speed = s . get ( " editors / 3d / freelook_base_speed " ) ; <nl> + const real_t modifier_speed_factor = s . get ( " editors / 3d / freelook_modifier_speed_factor " ) ; <nl> <nl> - real_t base_speed = s . get ( " editors / 3d / freelook_base_speed " ) ; <nl> - real_t acceleration = s . get ( " editors / 3d / freelook_acceleration " ) ; <nl> - real_t max_speed = s . get ( " editors / 3d / freelook_max_speed " ) ; <nl> - real_t modifier_speed_factor = s . get ( " editors / 3d / freelook_modifier_speed_factor " ) ; <nl> + real_t speed = base_speed * cursor . distance ; <nl> + if ( speed_modifier ) <nl> + speed * = modifier_speed_factor ; <nl> <nl> - if ( pressed ) { <nl> velocity . normalize ( ) ; <nl> - freelook_speed + = acceleration * delta ; <nl> - if ( freelook_speed > max_speed ) <nl> - freelook_speed = max_speed ; <nl> - cursor . pos + = velocity * ( ( freelook_speed * ( speed_modifier ? modifier_speed_factor : 1 . 0 ) * delta ) ) ; <nl> - } else { <nl> - freelook_speed = base_speed ; <nl> + <nl> + cursor . pos + = velocity * ( speed * delta ) ; <nl> } <nl> } <nl> <nl> void SpatialEditorViewport : : _notification ( int p_what ) { <nl> } <nl> * / <nl> <nl> - _update_freelook ( get_tree ( ) - > get_idle_process_time ( ) ) ; <nl> + real_t delta = get_tree ( ) - > get_idle_process_time ( ) ; <nl> + <nl> + if ( zoom_indicator_delay > 0 ) { <nl> + zoom_indicator_delay - = delta ; <nl> + if ( zoom_indicator_delay < = 0 ) { <nl> + surface - > update ( ) ; <nl> + } <nl> + } <nl> + <nl> + _update_freelook ( delta ) ; <nl> <nl> _update_camera ( ) ; <nl> <nl> void SpatialEditorViewport : : _notification ( int p_what ) { <nl> } <nl> } <nl> <nl> + / / TODO That should be part of the drawing API . . . <nl> + static void stroke_rect ( CanvasItem * ci , Rect2 rect , Color color , real_t width = 1 . 0 ) { <nl> + <nl> + / / ammmb <nl> + / / | | <nl> + / / cmmmd <nl> + Vector2 a ( rect . pos ) ; <nl> + Vector2 b ( rect . pos . x + rect . size . x , rect . pos . y ) ; <nl> + Vector2 c ( rect . pos . x , rect . pos . y + rect . size . y ) ; <nl> + Vector2 d ( rect . pos + rect . size ) ; <nl> + <nl> + ci - > draw_line ( a , b , color , width ) ; <nl> + ci - > draw_line ( b , d , color , width ) ; <nl> + ci - > draw_line ( d , c , color , width ) ; <nl> + ci - > draw_line ( c , a , color , width ) ; <nl> + } <nl> + <nl> void SpatialEditorViewport : : _draw ( ) { <nl> <nl> if ( surface - > has_focus ( ) ) { <nl> void SpatialEditorViewport : : _draw ( ) { <nl> <nl> draw_rect = Rect2 ( Vector2 ( ) , s ) . clip ( draw_rect ) ; <nl> <nl> - surface - > draw_line ( draw_rect . pos , draw_rect . pos + Vector2 ( draw_rect . size . x , 0 ) , Color ( 0 . 6 , 0 . 6 , 0 . 1 , 0 . 5 ) , 2 . 0 ) ; <nl> - surface - > draw_line ( draw_rect . pos + Vector2 ( draw_rect . size . x , 0 ) , draw_rect . pos + draw_rect . size , Color ( 0 . 6 , 0 . 6 , 0 . 1 , 0 . 5 ) , 2 . 0 ) ; <nl> - surface - > draw_line ( draw_rect . pos + draw_rect . size , draw_rect . pos + Vector2 ( 0 , draw_rect . size . y ) , Color ( 0 . 6 , 0 . 6 , 0 . 1 , 0 . 5 ) , 2 . 0 ) ; <nl> - surface - > draw_line ( draw_rect . pos , draw_rect . pos + Vector2 ( 0 , draw_rect . size . y ) , Color ( 0 . 6 , 0 . 6 , 0 . 1 , 0 . 5 ) , 2 . 0 ) ; <nl> + stroke_rect ( surface , draw_rect , Color ( 0 . 6 , 0 . 6 , 0 . 1 , 0 . 5 ) , 2 . 0 ) ; <nl> + <nl> + } else { <nl> + <nl> + if ( zoom_indicator_delay > 0 . 0 ) { <nl> + / / Show indicative zoom factor <nl> + <nl> + real_t min_distance = ZOOM_MIN_DISTANCE ; / / TODO Why not pick znear to limit zoom ? <nl> + real_t max_distance = camera - > get_zfar ( ) ; <nl> + real_t scale_length = ( max_distance - min_distance ) ; <nl> + <nl> + if ( Math : : abs ( scale_length ) > CMP_EPSILON ) { <nl> + real_t logscale_t = 1 . 0 - Math : : log ( 1 + cursor . distance - min_distance ) / Math : : log ( 1 + scale_length ) ; <nl> + <nl> + / / There is no real maximum distance so that factor can become negative , <nl> + / / Let ' s make it look asymptotic instead ( will decrease slower and slower ) . <nl> + if ( logscale_t < 0 . 25 ) <nl> + logscale_t = 0 . 25 * Math : : exp ( 4 . 0 * logscale_t - 1 . 0 ) ; <nl> + <nl> + Vector2 surface_size = surface - > get_size ( ) ; <nl> + real_t h = surface_size . y / 2 . 0 ; <nl> + real_t y = ( surface_size . y - h ) / 2 . 0 ; <nl> + <nl> + Rect2 r ( 10 , y , 6 , h ) ; <nl> + real_t sy = r . size . y * logscale_t ; <nl> + <nl> + surface - > draw_rect ( r , Color ( 1 , 1 , 1 , 0 . 2 ) ) ; <nl> + surface - > draw_rect ( Rect2 ( r . pos . x , r . pos . y + r . size . y - sy , r . size . x , sy ) , Color ( 1 , 1 , 1 , 0 . 6 ) ) ; <nl> + stroke_rect ( surface , r . grow ( 1 ) , Color ( 0 , 0 , 0 , 0 . 7 ) ) ; <nl> + } <nl> + } <nl> } <nl> } <nl> <nl> SpatialEditorViewport : : SpatialEditorViewport ( SpatialEditor * p_spatial_editor , Ed <nl> clicked_includes_current = false ; <nl> orthogonal = false ; <nl> message_time = 0 ; <nl> + zoom_indicator_delay = 0 . 0 ; <nl> <nl> spatial_editor = p_spatial_editor ; <nl> ViewportContainer * c = memnew ( ViewportContainer ) ; <nl> SpatialEditorViewport : : SpatialEditorViewport ( SpatialEditor * p_spatial_editor , Ed <nl> previewing = NULL ; <nl> preview = NULL ; <nl> gizmo_scale = 1 . 0 ; <nl> - freelook_speed = 0 ; <nl> + <nl> + freelook_active = false ; <nl> <nl> selection_menu = memnew ( PopupMenu ) ; <nl> add_child ( selection_menu ) ; <nl> void SpatialEditor : : update_transform_gizmo ( ) { <nl> gizmo . transform . origin = pcenter ; <nl> gizmo . transform . basis = gizmo_basis ; <nl> <nl> - for ( int i = 0 ; i < 4 ; i + + ) { <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> viewports [ i ] - > update_transform_gizmo_view ( ) ; <nl> } <nl> } <nl> void SpatialEditor : : set_state ( const Dictionary & p_state ) { <nl> Array vp = d [ " viewports " ] ; <nl> ERR_FAIL_COND ( vp . size ( ) > 4 ) ; <nl> <nl> - for ( int i = 0 ; i < 4 ; i + + ) { <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> viewports [ i ] - > set_state ( vp [ i ] ) ; <nl> } <nl> } <nl> void SpatialEditor : : _menu_item_pressed ( int p_option ) { <nl> } break ; <nl> case MENU_VIEW_USE_3_VIEWPORTS : { <nl> <nl> - for ( int i = 1 ; i < 4 ; i + + ) { <nl> + for ( int i = 1 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> <nl> if ( i = = 1 ) <nl> viewports [ i ] - > hide ( ) ; <nl> void SpatialEditor : : _menu_item_pressed ( int p_option ) { <nl> } break ; <nl> case MENU_VIEW_USE_3_VIEWPORTS_ALT : { <nl> <nl> - for ( int i = 1 ; i < 4 ; i + + ) { <nl> + for ( int i = 1 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> <nl> if ( i = = 1 ) <nl> viewports [ i ] - > hide ( ) ; <nl> void SpatialEditor : : _menu_item_pressed ( int p_option ) { <nl> } break ; <nl> case MENU_VIEW_USE_4_VIEWPORTS : { <nl> <nl> - for ( int i = 1 ; i < 4 ; i + + ) { <nl> + for ( int i = 1 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> <nl> viewports [ i ] - > show ( ) ; <nl> } <nl> void SpatialEditor : : _finish_indicators ( ) { <nl> VisualServer : : get_singleton ( ) - > free ( cursor_mesh ) ; <nl> } <nl> <nl> + bool SpatialEditor : : is_any_freelook_active ( ) const { <nl> + for ( unsigned int i = 0 ; i < VIEWPORTS_COUNT ; + + i ) { <nl> + if ( viewports [ i ] - > is_freelook_active ( ) ) <nl> + return true ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> void SpatialEditor : : _unhandled_key_input ( InputEvent p_event ) { <nl> <nl> if ( ! is_visible_in_tree ( ) | | get_viewport ( ) - > gui_has_modal_stack ( ) ) <nl> void SpatialEditor : : _unhandled_key_input ( InputEvent p_event ) { <nl> <nl> case InputEvent : : KEY : { <nl> <nl> - const InputEventKey & k = p_event . key ; <nl> + / / Note : need to check is_echo because first person movement keys might still be held <nl> + if ( ! is_any_freelook_active ( ) & & ! p_event . is_echo ( ) ) { <nl> <nl> - if ( ! k . pressed ) <nl> - break ; <nl> + const InputEventKey & k = p_event . key ; <nl> <nl> - switch ( k . scancode ) { <nl> + if ( ! k . pressed ) <nl> + break ; <nl> <nl> - case KEY_Q : _menu_item_pressed ( MENU_TOOL_SELECT ) ; break ; <nl> - case KEY_W : _menu_item_pressed ( MENU_TOOL_MOVE ) ; break ; <nl> - case KEY_E : _menu_item_pressed ( MENU_TOOL_ROTATE ) ; break ; <nl> - case KEY_R : _menu_item_pressed ( MENU_TOOL_SCALE ) ; break ; <nl> + if ( ED_IS_SHORTCUT ( " spatial_editor / tool_select " , p_event ) ) <nl> + _menu_item_pressed ( MENU_TOOL_SELECT ) ; <nl> <nl> - case KEY_Z : { <nl> + else if ( ED_IS_SHORTCUT ( " spatial_editor / tool_move " , p_event ) ) <nl> + _menu_item_pressed ( MENU_TOOL_MOVE ) ; <nl> + <nl> + else if ( ED_IS_SHORTCUT ( " spatial_editor / tool_rotate " , p_event ) ) <nl> + _menu_item_pressed ( MENU_TOOL_ROTATE ) ; <nl> + <nl> + else if ( ED_IS_SHORTCUT ( " spatial_editor / tool_scale " , p_event ) ) <nl> + _menu_item_pressed ( MENU_TOOL_SCALE ) ; <nl> + <nl> + else if ( ED_IS_SHORTCUT ( " spatial_editor / display_wireframe " , p_event ) ) { <nl> if ( k . mod . shift | | k . mod . control | | k . mod . command ) <nl> break ; <nl> <nl> void SpatialEditor : : _unhandled_key_input ( InputEvent p_event ) { <nl> } else { <nl> _menu_item_pressed ( MENU_VIEW_DISPLAY_WIREFRAME ) ; <nl> } <nl> - } break ; <nl> - <nl> - # if 0 <nl> - # endif <nl> + } <nl> } <nl> <nl> } break ; <nl> void SpatialEditor : : _toggle_maximize_view ( Object * p_viewport ) { <nl> <nl> if ( ! maximized ) { <nl> <nl> - for ( int i = 0 ; i < 4 ; i + + ) { <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> if ( i = = index ) <nl> viewports [ i ] - > set_area_as_parent_rect ( ) ; <nl> else <nl> void SpatialEditor : : _toggle_maximize_view ( Object * p_viewport ) { <nl> } <nl> } else { <nl> <nl> - for ( int i = 0 ; i < 4 ; i + + ) <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) <nl> viewports [ i ] - > show ( ) ; <nl> <nl> if ( view_menu - > get_popup ( ) - > is_item_checked ( view_menu - > get_popup ( ) - > get_item_index ( MENU_VIEW_USE_1_VIEWPORT ) ) ) <nl> void SpatialEditor : : clear ( ) { <nl> settings_znear - > set_value ( EDITOR_DEF ( " editors / 3d / default_z_near " , 0 . 1 ) ) ; <nl> settings_zfar - > set_value ( EDITOR_DEF ( " editors / 3d / default_z_far " , 1500 . 0 ) ) ; <nl> <nl> - for ( int i = 0 ; i < 4 ; i + + ) { <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> viewports [ i ] - > reset ( ) ; <nl> } <nl> <nl> void SpatialEditor : : clear ( ) { <nl> } <nl> } <nl> <nl> - for ( int i = 0 ; i < 4 ; i + + ) { <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> <nl> viewports [ i ] - > view_menu - > get_popup ( ) - > set_item_checked ( view_menu - > get_popup ( ) - > get_item_index ( SpatialEditorViewport : : VIEW_AUDIO_LISTENER ) , i = = 0 ) ; <nl> viewports [ i ] - > viewport - > set_as_audio_listener ( i = = 0 ) ; <nl> SpatialEditor : : SpatialEditor ( EditorNode * p_editor ) { <nl> ED_SHORTCUT ( " spatial_editor / focus_selection " , TTR ( " Focus Selection " ) , KEY_F ) ; <nl> ED_SHORTCUT ( " spatial_editor / align_selection_with_view " , TTR ( " Align Selection With View " ) , KEY_MASK_ALT + KEY_MASK_CMD + KEY_F ) ; <nl> <nl> + ED_SHORTCUT ( " spatial_editor / tool_select " , TTR ( " Tool Select " ) , KEY_Q ) ; <nl> + ED_SHORTCUT ( " spatial_editor / tool_move " , TTR ( " Tool Move " ) , KEY_W ) ; <nl> + ED_SHORTCUT ( " spatial_editor / tool_rotate " , TTR ( " Tool Rotate " ) , KEY_E ) ; <nl> + ED_SHORTCUT ( " spatial_editor / tool_scale " , TTR ( " Tool Scale " ) , KEY_R ) ; <nl> + <nl> + ED_SHORTCUT ( " spatial_editor / display_wireframe " , TTR ( " Display Wireframe " ) , KEY_Z ) ; <nl> + <nl> PopupMenu * p ; <nl> <nl> transform_menu = memnew ( MenuButton ) ; <nl> SpatialEditor : : SpatialEditor ( EditorNode * p_editor ) { <nl> viewport_base = memnew ( Control ) ; <nl> shader_split - > add_child ( viewport_base ) ; <nl> viewport_base - > set_v_size_flags ( SIZE_EXPAND_FILL ) ; <nl> - for ( int i = 0 ; i < 4 ; i + + ) { <nl> + for ( int i = 0 ; i < VIEWPORTS_COUNT ; i + + ) { <nl> <nl> viewports [ i ] = memnew ( SpatialEditorViewport ( this , editor , i ) ) ; <nl> viewports [ i ] - > connect ( " toggle_maximize_view " , this , " _toggle_maximize_view " ) ; <nl> mmm a / editor / plugins / spatial_editor_plugin . h <nl> ppp b / editor / plugins / spatial_editor_plugin . h <nl> class SpatialEditorViewport : public Control { <nl> bool transforming ; <nl> bool orthogonal ; <nl> float gizmo_scale ; <nl> - real_t freelook_speed ; <nl> + <nl> + bool freelook_active ; <nl> <nl> struct _RayResult { <nl> <nl> class SpatialEditorViewport : public Control { <nl> } <nl> } cursor ; <nl> <nl> + void scale_cursor_distance ( real_t scale ) ; <nl> + <nl> + real_t zoom_indicator_delay ; <nl> + <nl> RID move_gizmo_instance [ 3 ] , rotate_gizmo_instance [ 3 ] ; <nl> <nl> String last_message ; <nl> class SpatialEditorViewport : public Control { <nl> void set_state ( const Dictionary & p_state ) ; <nl> Dictionary get_state ( ) const ; <nl> void reset ( ) ; <nl> + bool is_freelook_active ( ) const { return freelook_active ; } <nl> <nl> void focus_selection ( ) ; <nl> <nl> class SpatialEditor : public VBoxContainer { <nl> } ; <nl> <nl> private : <nl> + static const unsigned int VIEWPORTS_COUNT = 4 ; <nl> + <nl> EditorNode * editor ; <nl> EditorSelection * editor_selection ; <nl> <nl> Control * viewport_base ; <nl> - SpatialEditorViewport * viewports [ 4 ] ; <nl> + SpatialEditorViewport * viewports [ VIEWPORTS_COUNT ] ; <nl> VSplitContainer * shader_split ; <nl> HSplitContainer * palette_split ; <nl> <nl> class SpatialEditor : public VBoxContainer { <nl> void _update_default_light_angle ( ) ; <nl> void _default_light_angle_input ( const InputEvent & p_event ) ; <nl> <nl> + bool is_any_freelook_active ( ) const ; <nl> + <nl> protected : <nl> void _notification ( int p_what ) ; <nl> / / void _gui_input ( InputEvent p_event ) ; <nl> | Improved freelook | godotengine/godot | aaf9cacf5ff4cbe8c097efacf347f7bde9839e36 | 2017-05-08T00:57:20Z |
deleted file mode 100644 <nl> index ed58d643ac . . 0000000000 <nl> mmm a / change / react - native - windows - 2020 - 04 - 10 - 16 - 49 - 23 - redbox . json <nl> ppp / dev / null <nl> <nl> - { <nl> - " type " : " prerelease " , <nl> - " comment " : " Improve RedBox ux , and don ' t require elevation unless it ' s the first time running the build ( and need to enable dev mode ) " , <nl> - " packageName " : " react - native - windows " , <nl> - " email " : " asklar @ microsoft . com " , <nl> - " dependentChangeType " : " patch " , <nl> - " date " : " 2020 - 04 - 10T23 : 49 : 23 . 308Z " <nl> - } <nl> \ No newline at end of file <nl> mmm a / packages / E2ETest / package . json <nl> ppp b / packages / E2ETest / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 9 . 0 " , <nl> " react - native " : " 0 . 61 . 5 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - master . 30 " , <nl> + " react - native - windows " : " 0 . 0 . 0 - master . 31 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 6 . 1 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / microsoft - reactnative - sampleapps / package . json <nl> ppp b / packages / microsoft - reactnative - sampleapps / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 9 . 0 " , <nl> " react - native " : " 0 . 61 . 5 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - master . 30 " , <nl> + " react - native - windows " : " 0 . 0 . 0 - master . 31 " , <nl> " rnpm - plugin - windows " : " ^ 0 . 6 . 1 " <nl> } , <nl> " devDependencies " : { <nl> mmm a / packages / playground / package . json <nl> ppp b / packages / playground / package . json <nl> <nl> " dependencies " : { <nl> " react " : " 16 . 9 . 0 " , <nl> " react - native " : " 0 . 61 . 5 " , <nl> - " react - native - windows " : " 0 . 0 . 0 - master . 30 " <nl> + " react - native - windows " : " 0 . 0 . 0 - master . 31 " <nl> } , <nl> " devDependencies " : { <nl> " @ babel / core " : " ^ 7 . 8 . 4 " , <nl> mmm a / vnext / CHANGELOG . json <nl> ppp b / vnext / CHANGELOG . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> " entries " : [ <nl> + { <nl> + " date " : " Sat , 11 Apr 2020 02 : 36 : 32 GMT " , <nl> + " tag " : " react - native - windows_v0 . 0 . 0 - master . 31 " , <nl> + " version " : " 0 . 0 . 0 - master . 31 " , <nl> + " comments " : { <nl> + " prerelease " : [ <nl> + { <nl> + " comment " : " Improve RedBox ux , and don ' t require elevation unless it ' s the first time running the build ( and need to enable dev mode ) " , <nl> + " author " : " asklar @ microsoft . com " , <nl> + " commit " : " 69528f606dd6ed89f75b4be2780d7eaefdf1656e " , <nl> + " package " : " react - native - windows " <nl> + } <nl> + ] <nl> + } <nl> + } , <nl> { <nl> " date " : " Fri , 10 Apr 2020 22 : 03 : 49 GMT " , <nl> " tag " : " react - native - windows_v0 . 0 . 0 - master . 30 " , <nl> mmm a / vnext / CHANGELOG . md <nl> ppp b / vnext / CHANGELOG . md <nl> <nl> # Change Log - react - native - windows <nl> <nl> - This log was last generated on Fri , 10 Apr 2020 22 : 03 : 49 GMT and should not be manually modified . <nl> + This log was last generated on Sat , 11 Apr 2020 02 : 36 : 32 GMT and should not be manually modified . <nl> <nl> < ! - - Start content - - > <nl> <nl> + # # 0 . 0 . 0 - master . 31 <nl> + <nl> + Sat , 11 Apr 2020 02 : 36 : 32 GMT <nl> + <nl> + # # # Changes <nl> + <nl> + - Improve RedBox ux , and don ' t require elevation unless it ' s the first time running the build ( and need to enable dev mode ) ( asklar @ microsoft . com ) <nl> + <nl> # # 0 . 0 . 0 - master . 30 <nl> <nl> Fri , 10 Apr 2020 22 : 03 : 49 GMT <nl> mmm a / vnext / package . json <nl> ppp b / vnext / package . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> - " version " : " 0 . 0 . 0 - master . 30 " , <nl> + " version " : " 0 . 0 . 0 - master . 31 " , <nl> " license " : " MIT " , <nl> " repository " : { <nl> " type " : " git " , <nl> | applying package updates * * * NO_CI * * * | microsoft/react-native-windows | 9e181a7bbeea6ceb43c12312280580e68f144a33 | 2020-04-11T02:36:32Z |
mmm a / tensorflow / g3doc / api_docs / python / client . md <nl> ppp b / tensorflow / g3doc / api_docs / python / client . md <nl> Example : <nl> # v is the numpy array [ 10 , 20 ] <nl> # ' fetches ' can be a list . <nl> v = session . run ( [ a , b ] ) <nl> - # v is a Python list with 2 numpy arrays : the numpy array [ 10 , 20 ] and the <nl> + # v a Python list with 2 numpy arrays : the numpy array [ 10 , 20 ] and the <nl> # 1 - D array [ 1 . 0 , 2 . 0 ] <nl> # ' fetches ' can be arbitrary lists , tuples , namedtuple , dicts : <nl> MyData = collections . namedtuple ( ' MyData ' , [ ' a ' , ' b ' ] ) <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . ctc_loss . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard0 / tf . nn . ctc_loss . md <nl> <nl> - # # # ` tf . nn . ctc_loss ( inputs , labels , sequence_length , preprocess_collapse_repeated = False , ctc_merge_repeated = True ) ` { # ctc_loss } <nl> + # # # ` tf . nn . ctc_loss ( inputs , labels , sequence_length , preprocess_collapse_repeated = False , ctc_merge_repeated = True , time_major = True ) ` { # ctc_loss } <nl> <nl> Computes the CTC ( Connectionist Temporal Classification ) Loss . <nl> <nl> Here is a table of the ( roughly ) expected first order behavior : <nl> # # # # # Args : <nl> <nl> <nl> - * < b > ` inputs ` < / b > : 3 - D ` float ` ` Tensor ` sized <nl> - ` [ max_time x batch_size x num_classes ] ` . The logits . <nl> + * < b > ` inputs ` < / b > : 3 - D ` float ` ` Tensor ` . <nl> + If time_major = = False , this will be a ` Tensor ` shaped : <nl> + ` [ batch_size x max_time x num_classes ] ` . <nl> + If time_major = = True ( default ) , this will be a ` Tensor ` shaped : <nl> + ` [ max_time x batch_size x num_classes ] ` . <nl> + The logits . <nl> * < b > ` labels ` < / b > : An ` int32 ` ` SparseTensor ` . <nl> ` labels . indices [ i , : ] = = [ b , t ] ` means ` labels . values [ i ] ` stores <nl> the id for ( batch b , time t ) . <nl> Here is a table of the ( roughly ) expected first order behavior : <nl> * < b > ` preprocess_collapse_repeated ` < / b > : Boolean . Default : False . <nl> If True , repeated labels are collapsed prior to the CTC calculation . <nl> * < b > ` ctc_merge_repeated ` < / b > : Boolean . Default : True . <nl> + * < b > ` time_major ` < / b > : The shape format of the ` inputs ` Tensors . <nl> + If True , these ` Tensors ` must be shaped ` [ max_time , batch_size , num_classes ] ` . <nl> + If False , these ` Tensors ` must be shaped ` [ batch_size , max_time , num_classes ] ` . <nl> + Using ` time_major = True ` ( default ) is a bit more efficient because it avoids <nl> + transposes at the beginning of the ctc_loss calculation . However , most <nl> + TensorFlow data is batch - major , so by this function also accepts inputs <nl> + in batch - major form . <nl> <nl> # # # # # Returns : <nl> <nl> mmm a / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . conv2d_transpose . md <nl> ppp b / tensorflow / g3doc / api_docs / python / functions_and_classes / shard4 / tf . nn . conv2d_transpose . md <nl> deconvolution . <nl> <nl> <nl> * < b > ` value ` < / b > : A 4 - D ` Tensor ` of type ` float ` and shape <nl> - ` [ batch , in_height , in_width , in_channels ] ` . <nl> + ` [ batch , height , width , in_channels ] ` . <nl> * < b > ` filter ` < / b > : A 4 - D ` Tensor ` with the same type as ` value ` and shape <nl> - ` [ filter_height , filter_width , output_channels , in_channels ] ` . ` filter ` ' s <nl> + ` [ height , width , output_channels , in_channels ] ` . ` filter ` ' s <nl> ` in_channels ` dimension must match that of ` value ` . <nl> * < b > ` output_shape ` < / b > : A 1 - D ` Tensor ` representing the output shape of the <nl> deconvolution op . <nl> mmm a / tensorflow / g3doc / api_docs / python / nn . md <nl> ppp b / tensorflow / g3doc / api_docs / python / nn . md <nl> outputs = outputs_ta . pack ( ) <nl> <nl> - - - <nl> <nl> - # # # ` tf . nn . ctc_loss ( inputs , labels , sequence_length , preprocess_collapse_repeated = False , ctc_merge_repeated = True ) ` { # ctc_loss } <nl> + # # # ` tf . nn . ctc_loss ( inputs , labels , sequence_length , preprocess_collapse_repeated = False , ctc_merge_repeated = True , time_major = True ) ` { # ctc_loss } <nl> <nl> Computes the CTC ( Connectionist Temporal Classification ) Loss . <nl> <nl> Here is a table of the ( roughly ) expected first order behavior : <nl> # # # # # Args : <nl> <nl> <nl> - * < b > ` inputs ` < / b > : 3 - D ` float ` ` Tensor ` sized <nl> - ` [ max_time x batch_size x num_classes ] ` . The logits . <nl> + * < b > ` inputs ` < / b > : 3 - D ` float ` ` Tensor ` . <nl> + If time_major = = False , this will be a ` Tensor ` shaped : <nl> + ` [ batch_size x max_time x num_classes ] ` . <nl> + If time_major = = True ( default ) , this will be a ` Tensor ` shaped : <nl> + ` [ max_time x batch_size x num_classes ] ` . <nl> + The logits . <nl> * < b > ` labels ` < / b > : An ` int32 ` ` SparseTensor ` . <nl> ` labels . indices [ i , : ] = = [ b , t ] ` means ` labels . values [ i ] ` stores <nl> the id for ( batch b , time t ) . <nl> Here is a table of the ( roughly ) expected first order behavior : <nl> * < b > ` preprocess_collapse_repeated ` < / b > : Boolean . Default : False . <nl> If True , repeated labels are collapsed prior to the CTC calculation . <nl> * < b > ` ctc_merge_repeated ` < / b > : Boolean . Default : True . <nl> + * < b > ` time_major ` < / b > : The shape format of the ` inputs ` Tensors . <nl> + If True , these ` Tensors ` must be shaped ` [ max_time , batch_size , num_classes ] ` . <nl> + If False , these ` Tensors ` must be shaped ` [ batch_size , max_time , num_classes ] ` . <nl> + Using ` time_major = True ` ( default ) is a bit more efficient because it avoids <nl> + transposes at the beginning of the ctc_loss calculation . However , most <nl> + TensorFlow data is batch - major , so by this function also accepts inputs <nl> + in batch - major form . <nl> <nl> # # # # # Returns : <nl> <nl> | Update generated Python Op docs . | tensorflow/tensorflow | eb547acbf48fb09ea999b3084c6a3d57f6676ec1 | 2016-09-14T07:32:52Z |
mmm a / cocos2dx / textures / CCTextureCache . cpp <nl> ppp b / cocos2dx / textures / CCTextureCache . cpp <nl> CCDictionary * CCTextureCache : : snapshotTextures ( ) <nl> { <nl> pRet - > setObject ( pElement - > getObject ( ) , pElement - > getStrKey ( ) ) ; <nl> } <nl> + pRet - > autorelease ( ) ; <nl> return pRet ; <nl> } <nl> <nl> mmm a / extensions / GUI / CCControlExtension / CCControlColourPicker . cpp <nl> ppp b / extensions / GUI / CCControlExtension / CCControlColourPicker . cpp <nl> CCControlColourPicker : : CCControlColourPicker ( ) <nl> <nl> CCControlColourPicker : : ~ CCControlColourPicker ( ) <nl> { <nl> - if ( m_background ) <nl> - { <nl> - m_background - > removeFromParentAndCleanup ( true ) ; <nl> - } <nl> - <nl> - if ( m_huePicker ) <nl> - { <nl> - m_huePicker - > removeFromParentAndCleanup ( true ) ; <nl> - } <nl> - <nl> - if ( m_colourPicker ) <nl> - { <nl> - m_colourPicker - > removeFromParentAndCleanup ( true ) ; <nl> - } <nl> - <nl> - m_background = NULL ; <nl> - m_huePicker = NULL ; <nl> - m_colourPicker = NULL ; <nl> + CC_SAFE_RELEASE ( m_background ) ; <nl> + CC_SAFE_RELEASE ( m_huePicker ) ; <nl> + CC_SAFE_RELEASE ( m_colourPicker ) ; <nl> } <nl> <nl> bool CCControlColourPicker : : init ( ) <nl> mmm a / extensions / network / WebSocket . cpp <nl> ppp b / extensions / network / WebSocket . cpp <nl> int WebSocket : : onSocketCallback ( struct libwebsocket_context * ctx , <nl> CC_SAFE_DELETE ( data ) ; <nl> CC_SAFE_DELETE_ARRAY ( buf ) ; <nl> } <nl> + <nl> + CC_SAFE_DELETE ( subThreadMsg ) ; <nl> } <nl> <nl> _wsHelper - > _subThreadWsMessageQueue - > clear ( ) ; <nl> | Merge pull request from minggo / master | cocos2d/cocos2d-x | caaf2137d8926ef3b7dc1b4ea4b418e6de109929 | 2013-06-09T07:06:13Z |
mmm a / editor / editor_audio_buses . cpp <nl> ppp b / editor / editor_audio_buses . cpp <nl> EditorAudioBus : : EditorAudioBus ( EditorAudioBuses * p_buses ) { <nl> bus_popup = bus_options - > get_popup ( ) ; <nl> bus_popup - > add_item ( TTR ( " Duplicate " ) ) ; <nl> bus_popup - > add_item ( TTR ( " Delete " ) ) ; <nl> - add_child ( bus_popup ) ; <nl> bus_popup - > connect ( " index_pressed " , this , " _bus_popup_pressed " ) ; <nl> <nl> delete_effect_popup = memnew ( PopupMenu ) ; <nl> mmm a / scene / main / node . cpp <nl> ppp b / scene / main / node . cpp <nl> void Node : : _add_child_nocheck ( Node * p_child , const StringName & p_name ) { <nl> void Node : : add_child ( Node * p_child , bool p_legible_unique_name ) { <nl> <nl> ERR_FAIL_NULL ( p_child ) ; <nl> - / * Fail if node has a parent * / <nl> + <nl> if ( p_child = = this ) { <nl> - ERR_EXPLAIN ( " Can ' t add child " + p_child - > get_name ( ) + " to itself . " ) <nl> + ERR_EXPLAIN ( " Can ' t add child ' " + p_child - > get_name ( ) + " ' to itself . " ) <nl> ERR_FAIL_COND ( p_child = = this ) ; / / adding to itself ! <nl> } <nl> - ERR_EXPLAIN ( " Can ' t add child , already has a parent " ) ; <nl> - ERR_FAIL_COND ( p_child - > data . parent ) ; <nl> + <nl> + / * Fail if node has a parent * / <nl> + if ( p_child - > data . parent ) { <nl> + ERR_EXPLAIN ( " Can ' t add child ' " + p_child - > get_name ( ) + " ' to ' " + get_name ( ) + " ' , already has a parent ' " + p_child - > data . parent - > get_name ( ) + " ' . " ) ; <nl> + ERR_FAIL_COND ( p_child - > data . parent ) ; <nl> + } <nl> <nl> if ( data . blocked > 0 ) { <nl> - ERR_EXPLAIN ( " Parent node is busy setting up children , add_node ( ) failed . Consider using call_deferred ( \ " add_child \ " , child ) instead . " ) ; <nl> + ERR_EXPLAIN ( " Parent node is busy setting up children , add_node ( ) failed . Consider using call_deferred ( \ " add_child \ " , child ) instead . " ) ; <nl> ERR_FAIL_COND ( data . blocked > 0 ) ; <nl> } <nl> <nl> - ERR_EXPLAIN ( " Can ' t add child while a notification is happening " ) ; <nl> + ERR_EXPLAIN ( " Can ' t add child while a notification is happening . " ) ; <nl> ERR_FAIL_COND ( data . blocked > 0 ) ; <nl> <nl> / * Validate name * / <nl> void Node : : add_child_below_node ( Node * p_node , Node * p_child , bool p_legible_uniq <nl> if ( is_a_parent_of ( p_node ) ) { <nl> move_child ( p_child , p_node - > get_position_in_parent ( ) + 1 ) ; <nl> } else { <nl> - WARN_PRINTS ( " Cannot move under node " + p_node - > get_name ( ) + " as " + p_child - > get_name ( ) + " does not share a parent " ) <nl> + WARN_PRINTS ( " Cannot move under node " + p_node - > get_name ( ) + " as " + p_child - > get_name ( ) + " does not share a parent . " ) <nl> } <nl> } <nl> <nl> | Node : Add debug info to add_child reparenting check | godotengine/godot | 3c5ce736e63c0db6b2f0b5e8fef1119da5529df2 | 2017-08-26T16:14:42Z |
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> ERROR ( circular_class_inheritance , sema_tcd , none , <nl> " circular class inheritance % 0 " , ( StringRef ) ) <nl> NOTE ( class_here , sema_tcd , none , <nl> " class % 0 declared here " , ( Identifier ) ) <nl> + ERROR ( inheritance_from_final_class , sema_tcd , none , <nl> + " Inheritance from a final class % 0 " , ( Identifier ) ) <nl> <nl> / / Enum raw types <nl> ERROR ( multiple_enum_raw_types , sema_tcd , none , <nl> mmm a / lib / Sema / TypeCheckDecl . cpp <nl> ppp b / lib / Sema / TypeCheckDecl . cpp <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> checkRequiredInClassInits ( CD ) ; <nl> <nl> if ( ! IsFirstPass ) { <nl> + / / Check that we don ' t inherit from a final class . <nl> + if ( auto superclassTy = CD - > getSuperclass ( ) ) { <nl> + ClassDecl * Super = superclassTy - > getClassOrBoundGenericClass ( ) ; <nl> + if ( Super - > isFinal ( ) ) { <nl> + TC . diagnose ( CD , diag : : inheritance_from_final_class , <nl> + Super - > getName ( ) ) ; <nl> + return ; <nl> + } <nl> + } <nl> + <nl> / / Check for inconsistencies between the initializers of our <nl> / / superclass and our own initializers . <nl> if ( auto superclassTy = CD - > getSuperclass ( ) ) { <nl> | Forbid the any kind of inheritance from @ final classes . | apple/swift | 0d8729a9f69f34b59ac532cde49a3b93d599ed33 | 2014-04-05T06:58:41Z |
mmm a / test / functional / forknotify . py <nl> ppp b / test / functional / forknotify . py <nl> <nl> # Distributed under the MIT software license , see the accompanying <nl> # file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> " " " Test the - alertnotify option . " " " <nl> + import os <nl> + import time <nl> <nl> from test_framework . test_framework import BitcoinTestFramework <nl> from test_framework . util import * <nl> def run_test ( self ) : <nl> self . nodes [ 1 ] . generate ( 1 ) <nl> self . sync_all ( ) <nl> <nl> + # Give bitcoind 10 seconds to write the alert notification <nl> + timeout = 10 . 0 <nl> + while timeout > 0 : <nl> + if os . path . exists ( self . alert_filename ) and os . path . getsize ( self . alert_filename ) : <nl> + break <nl> + time . sleep ( 0 . 1 ) <nl> + timeout - = 0 . 1 <nl> + else : <nl> + assert False , " - alertnotify did not warn of up - version blocks " <nl> + <nl> with open ( self . alert_filename , ' r ' , encoding = ' utf8 ' ) as f : <nl> alert_text = f . read ( ) <nl> <nl> - if len ( alert_text ) = = 0 : <nl> - raise AssertionError ( " - alertnotify did not warn of up - version blocks " ) <nl> - <nl> # Mine more up - version blocks , should not get more alerts : <nl> self . nodes [ 1 ] . generate ( 1 ) <nl> self . sync_all ( ) <nl> | Make forknotify . py more robust | bitcoin/bitcoin | a4fd89fddba49c56e77f131fe95feffa8d7cf6ed | 2017-03-28T20:22:19Z |
mmm a / configure <nl> ppp b / configure <nl> while true ; do <nl> CUDA_DNN_LIB_ALT_PATH = " libcudnn $ { TF_CUDNN_EXT } . dylib " <nl> fi <nl> <nl> - if [ - e " $ CUDNN_INSTALL_PATH / $ { CUDA_DNN_LIB_ALT_PATH } " - o - e " $ CUDNN_INSTALL_PATH / $ { CUDA_DNN_LIB_PATH } " ] ; then <nl> + if [ - e " $ CUDNN_INSTALL_PATH / $ { CUDA_DNN_LIB_ALT_PATH } " ] | | [ - e " $ CUDNN_INSTALL_PATH / $ { CUDA_DNN_LIB_PATH } " ] ; then <nl> export TF_CUDNN_VERSION <nl> write_action_env_to_bazelrc " TF_CUDNN_VERSION " " $ TF_CUDNN_VERSION " <nl> export CUDNN_INSTALL_PATH <nl> while true ; do <nl> fi <nl> <nl> # Check that the include and library folders are where we expect them to be <nl> - if [ - e " $ MPI_HOME / include " - a - e " $ MPI_HOME / lib " ] ; then <nl> + if [ - e " $ MPI_HOME / include " ] & & [ - e " $ MPI_HOME / lib " ] ; then <nl> break <nl> fi <nl> <nl> | [ Bash ] Prefer [ p ] & & [ q ] over [ p - a q ] ( ) | tensorflow/tensorflow | 0a389674dc41f1973c3a94ddb14ef306cbd377f5 | 2017-06-08T06:30:45Z |
mmm a / tensorflow / compiler / mlir / lite / quantization / quantization_config . h <nl> ppp b / tensorflow / compiler / mlir / lite / quantization / quantization_config . h <nl> struct QuantizationSpecs { <nl> bool RunWeightQuantization ( ) const { return weight_quantization ; } <nl> <nl> / / Whether this inference type represents a signed storage type . <nl> - bool IsSignedInferenceType ( ) { <nl> + bool IsSignedInferenceType ( ) const { <nl> switch ( inference_type ) { <nl> case tensorflow : : DT_QUINT8 : <nl> case tensorflow : : DT_QUINT16 : <nl> struct QuantizationSpecs { <nl> <nl> / / Gets the width of this quantization type . Returns 0 if it isn ' t a <nl> / / quantization type . <nl> - int64_t GetQuantizationTypeWidth ( ) { <nl> + int64_t GetQuantizationTypeWidth ( ) const { <nl> switch ( inference_type ) { <nl> case tensorflow : : DT_QINT8 : <nl> case tensorflow : : DT_QUINT8 : <nl> mmm a / tensorflow / compiler / mlir / lite / tf_tfl_passes . cc <nl> ppp b / tensorflow / compiler / mlir / lite / tf_tfl_passes . cc <nl> void AddQuantizationPasses ( const mlir : : TFL : : QuantizationSpecs & quant_specs , <nl> quant_specs . default_ranges . second . hasValue ( ) ) { <nl> pass_manager - > addPass ( mlir : : TFL : : CreateDefaultQuantParamsPass ( <nl> quant_specs . default_ranges . first . getValueOr ( 0 . 0 ) , <nl> - quant_specs . default_ranges . second . getValueOr ( 0 . 0 ) ) ) ; <nl> + quant_specs . default_ranges . second . getValueOr ( 0 . 0 ) , <nl> + quant_specs . IsSignedInferenceType ( ) ) ) ; <nl> pass_manager - > addPass ( mlir : : TFL : : CreateQuantizePass ( ) ) ; <nl> pass_manager - > addPass ( <nl> mlir : : TFL : : CreatePostQuantizePass ( emit_quant_adaptor_ops ) ) ; <nl> mmm a / tensorflow / compiler / mlir / lite / transforms / default_quant_params . cc <nl> ppp b / tensorflow / compiler / mlir / lite / transforms / default_quant_params . cc <nl> namespace { <nl> class DefaultQuantParamsPass <nl> : public PassWrapper < DefaultQuantParamsPass , FunctionPass > { <nl> public : <nl> - explicit DefaultQuantParamsPass ( double default_min , double default_max ) <nl> - : default_min_ ( default_min ) , default_max_ ( default_max ) { } <nl> + explicit DefaultQuantParamsPass ( double default_min , double default_max , <nl> + bool is_signed ) <nl> + : default_min_ ( default_min ) , <nl> + default_max_ ( default_max ) , <nl> + is_signed_ ( is_signed ) { } <nl> <nl> void runOnFunction ( ) override ; <nl> <nl> class DefaultQuantParamsPass <nl> <nl> double default_min_ ; <nl> double default_max_ ; <nl> + bool is_signed_ ; <nl> quant : : QuantParams default_quant_params_ ; <nl> } ; <nl> } / / namespace <nl> quant : : QuantParams DefaultQuantParamsPass : : GetDefaultQuantParams ( <nl> default_quant_params_ = quant : : fakeQuantAttrsToType ( <nl> builder . getUnknownLoc ( ) , <nl> / * numBits = * / 8 , default_min_ , default_max_ , / * narrowRange = * / false , <nl> - builder . getF32Type ( ) ) ; <nl> + builder . getF32Type ( ) , is_signed_ ) ; <nl> } <nl> return default_quant_params_ ; <nl> } <nl> <nl> / / Creates an instance of the default quant parameters pass . <nl> std : : unique_ptr < OperationPass < FuncOp > > CreateDefaultQuantParamsPass ( <nl> - double default_min , double default_max ) { <nl> - return absl : : make_unique < DefaultQuantParamsPass > ( default_min , default_max ) ; <nl> + double default_min , double default_max , bool is_signed ) { <nl> + return absl : : make_unique < DefaultQuantParamsPass > ( default_min , default_max , <nl> + is_signed ) ; <nl> } <nl> <nl> / / Registers this pass with default values , only for test <nl> static PassRegistration < DefaultQuantParamsPass > pass ( <nl> " tfl - default - quant " , <nl> " Apply quantization with default quantization parameter " , [ ] { <nl> return CreateDefaultQuantParamsPass ( / * default_min = * / - 1 . 0 , <nl> - / * default_max = * / 1 . 0 ) ; <nl> + / * default_max = * / 1 . 0 , <nl> + / * is_signed = * / false ) ; <nl> } ) ; <nl> <nl> } / / namespace TFL <nl> mmm a / tensorflow / compiler / mlir / lite / transforms / passes . h <nl> ppp b / tensorflow / compiler / mlir / lite / transforms / passes . h <nl> std : : unique_ptr < OperationPass < ModuleOp > > CreateOptimizeFunctionalOpsPass ( ) ; <nl> / / Creates an instance of the TensorFlow Lite dialect pass to add default <nl> / / quantization parameters . <nl> std : : unique_ptr < OperationPass < FuncOp > > CreateDefaultQuantParamsPass ( <nl> - double default_min , double default_max ) ; <nl> + double default_min , double default_max , bool is_signed ) ; <nl> <nl> / / Creates an instance of the TensorFlow Lite dialect pass to convert dense <nl> / / tensor to sparse format . <nl> | Merge pull request from lgeiger : default - quant - int8 | tensorflow/tensorflow | eaacee173897b77cdb6afd22d5e78154177a10f3 | 2020-05-13T17:16:29Z |
mmm a / dlib / cmake_utils / add_python_module <nl> ppp b / dlib / cmake_utils / add_python_module <nl> if ( PYTHON3 ) <nl> FIND_PACKAGE ( Boost 1 . 41 . 0 COMPONENTS python ) <nl> endif ( ) <nl> set ( Python_ADDITIONAL_VERSIONS 3 . 5 3 . 6 ) <nl> - FIND_PACKAGE ( PythonLibs 3 . 4 REQUIRED ) <nl> + FIND_PACKAGE ( PythonLibs 3 . 4 ) <nl> else ( ) <nl> FIND_PACKAGE ( Boost 1 . 41 . 0 COMPONENTS python ) <nl> - FIND_PACKAGE ( PythonLibs 2 . 6 REQUIRED ) <nl> + FIND_PACKAGE ( PythonLibs 2 . 6 ) <nl> endif ( ) <nl> <nl> if ( NOT Boost_FOUND ) <nl> else ( ) <nl> endif ( ) <nl> <nl> message ( STATUS " USING BOOST_LIBS : $ { Boost_LIBRARIES } " ) <nl> - message ( STATUS " USING PYTHON_LIBS : $ { PYTHON_LIBRARIES } " ) <nl> <nl> if ( CMAKE_COMPILER_IS_GNUCXX ) <nl> # Just setting CMAKE_POSITION_INDEPENDENT_CODE should be enough to set <nl> include ( $ { CMAKE_CURRENT_LIST_DIR } / . . / cmake ) <nl> # output name is set to what the user asked for ( i . e . no _ ) . <nl> macro ( add_python_module module_name module_sources ) <nl> ADD_LIBRARY ( $ { module_name } _ SHARED $ { module_sources } $ { ARGN } ) <nl> - TARGET_LINK_LIBRARIES ( $ { module_name } _ $ { Boost_LIBRARIES } $ { PYTHON_LIBRARIES } dlib : : dlib ) <nl> + TARGET_LINK_LIBRARIES ( $ { module_name } _ $ { Boost_LIBRARIES } dlib : : dlib ) <nl> + <nl> if ( WIN32 AND NOT CYGWIN ) <nl> SET_TARGET_PROPERTIES ( $ { module_name } _ <nl> PROPERTIES <nl> macro ( add_python_module module_name module_sources ) <nl> SUFFIX " . dll " <nl> OUTPUT_NAME $ { module_name } <nl> ) <nl> + elseif ( APPLE ) <nl> + SET_TARGET_PROPERTIES ( $ { module_name } _ <nl> + PROPERTIES <nl> + LINK_FLAGS " - undefined dynamic_lookup " <nl> + PREFIX " " <nl> + SUFFIX " . so " <nl> + OUTPUT_NAME $ { module_name } <nl> + ) <nl> else ( ) <nl> SET_TARGET_PROPERTIES ( $ { module_name } _ <nl> PROPERTIES <nl> + LINK_FLAGS " - shared " <nl> PREFIX " " <nl> SUFFIX " . so " <nl> OUTPUT_NAME $ { module_name } <nl> | remove linking to libpython on linux / OSX ( ) | davisking/dlib | 4a4fd91a26f93e8e2a486f50c7817661876a5dba | 2017-07-15T01:12:00Z |
mmm a / include / internal / clara . h <nl> ppp b / include / internal / clara . h <nl> namespace Clara { <nl> int position ; <nl> } ; <nl> <nl> + / / NOTE : std : : auto_ptr is deprecated in c + + 11 / c + + 0x <nl> + # if defined ( __cplusplus ) & & __cplusplus > 199711L <nl> + typedef std : : unique_ptr < Arg > ArgAutoPtr ; <nl> + # else <nl> + typedef std : : auto_ptr < Arg > ArgAutoPtr ; <nl> + # endif <nl> + <nl> class ArgBinder { <nl> public : <nl> template < typename F > <nl> namespace Clara { <nl> else if ( m_arg . isAnyPositional ( ) ) { <nl> if ( m_cl - > m_arg . get ( ) ) <nl> throw std : : logic_error ( " Only one unpositional argument can be added " ) ; <nl> - m_cl - > m_arg = std : : auto_ptr < Arg > ( new Arg ( m_arg ) ) ; <nl> + m_cl - > m_arg = ArgAutoPtr ( new Arg ( m_arg ) ) ; <nl> } <nl> else <nl> m_cl - > m_options . push_back ( m_arg ) ; <nl> namespace Clara { <nl> m_highestSpecifiedArgPosition ( other . m_highestSpecifiedArgPosition ) <nl> { <nl> if ( other . m_arg . get ( ) ) <nl> - m_arg = std : : auto_ptr < Arg > ( new Arg ( * other . m_arg ) ) ; <nl> + m_arg = ArgAutoPtr ( new Arg ( * other . m_arg ) ) ; <nl> } <nl> <nl> template < typename F > <nl> namespace Clara { <nl> Detail : : BoundArgFunction < ConfigT > m_boundProcessName ; <nl> std : : vector < Arg > m_options ; <nl> std : : map < int , Arg > m_positionalArgs ; <nl> - std : : auto_ptr < Arg > m_arg ; <nl> + ArgAutoPtr m_arg ; <nl> int m_highestSpecifiedArgPosition ; <nl> } ; <nl> <nl> | std : : auto_ptr is deprecated in c + + 11 / c + + 0x | catchorg/Catch2 | d1e5480d957d3ef39be921a693a24798b4015fbd | 2013-11-17T17:44:35Z |
mmm a / src / builtins / builtins - regexp - gen . cc <nl> ppp b / src / builtins / builtins - regexp - gen . cc <nl> void RegExpBuiltinsAssembler : : GetStringPointers ( <nl> TNode < HeapObject > RegExpBuiltinsAssembler : : RegExpExecInternal ( <nl> TNode < Context > context , TNode < JSRegExp > regexp , TNode < String > string , <nl> TNode < Number > last_index , TNode < RegExpMatchInfo > match_info ) { <nl> - / / Just jump directly to runtime if native RegExp is not selected at compile <nl> - / / time or if regexp entry in generated code is turned off runtime switch or <nl> - / / at compilation . <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - return CAST ( CallRuntime ( Runtime : : kRegExpExec , context , regexp , string , <nl> - last_index , match_info ) ) ; <nl> - # else / / V8_INTERPRETED_REGEXP <nl> ToDirectStringAssembler to_direct ( state ( ) , string ) ; <nl> <nl> TVARIABLE ( HeapObject , var_result ) ; <nl> TNode < HeapObject > RegExpBuiltinsAssembler : : RegExpExecInternal ( <nl> # endif <nl> <nl> GotoIf ( TaggedIsSmi ( var_code . value ( ) ) , & runtime ) ; <nl> + GotoIfNot ( IsCode ( CAST ( var_code . value ( ) ) ) , & runtime ) ; <nl> TNode < Code > code = CAST ( var_code . value ( ) ) ; <nl> <nl> Label if_success ( this ) , if_exception ( this , Label : : kDeferred ) ; <nl> TNode < HeapObject > RegExpBuiltinsAssembler : : RegExpExecInternal ( <nl> <nl> BIND ( & out ) ; <nl> return var_result . value ( ) ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> } <nl> <nl> / / ES # sec - regexp . prototype . exec <nl> mmm a / src / external - reference . cc <nl> ppp b / src / external - reference . cc <nl> <nl> # include " src / wasm / wasm - external - refs . h " <nl> <nl> / / Include native regexp - macro - assembler . <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> # if V8_TARGET_ARCH_IA32 <nl> # include " src / regexp / ia32 / regexp - macro - assembler - ia32 . h " / / NOLINT <nl> # elif V8_TARGET_ARCH_X64 <nl> <nl> # else / / Unknown architecture . <nl> # error " Unknown architecture . " <nl> # endif / / Target architecture . <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> <nl> # ifdef V8_INTL_SUPPORT <nl> # include " src / objects / intl - objects . h " <nl> ExternalReference ExternalReference : : invoke_accessor_getter_callback ( ) { <nl> return ExternalReference : : Create ( & thunk_fun , thunk_type ) ; <nl> } <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - <nl> # if V8_TARGET_ARCH_X64 <nl> # define re_stack_check_func RegExpMacroAssemblerX64 : : CheckStackGuardState <nl> # elif V8_TARGET_ARCH_IA32 <nl> ExternalReference ExternalReference : : address_of_regexp_stack_memory_size ( <nl> return ExternalReference ( isolate - > regexp_stack ( ) - > memory_size_address ( ) ) ; <nl> } <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> FUNCTION_REFERENCE_WITH_TYPE ( ieee754_acos_function , base : : ieee754 : : acos , <nl> BUILTIN_FP_CALL ) <nl> FUNCTION_REFERENCE_WITH_TYPE ( ieee754_acosh_function , base : : ieee754 : : acosh , <nl> mmm a / src / external - reference . h <nl> ppp b / src / external - reference . h <nl> class StatsCounter ; <nl> " IsolateData : : fast_c_call_caller_fp_address " ) \ <nl> V ( fast_c_call_caller_pc_address , \ <nl> " IsolateData : : fast_c_call_caller_pc_address " ) \ <nl> - EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP ( V ) <nl> + V ( address_of_regexp_stack_limit , " RegExpStack : : limit_address ( ) " ) \ <nl> + V ( address_of_regexp_stack_memory_address , " RegExpStack : : memory_address ( ) " ) \ <nl> + V ( address_of_regexp_stack_memory_size , " RegExpStack : : memory_size ( ) " ) \ <nl> + V ( address_of_static_offsets_vector , " OffsetsVector : : static_offsets_vector " ) \ <nl> + V ( re_case_insensitive_compare_uc16 , \ <nl> + " NativeRegExpMacroAssembler : : CaseInsensitiveCompareUC16 ( ) " ) \ <nl> + V ( re_check_stack_guard_state , \ <nl> + " RegExpMacroAssembler * : : CheckStackGuardState ( ) " ) \ <nl> + V ( re_grow_stack , " NativeRegExpMacroAssembler : : GrowStack ( ) " ) \ <nl> + V ( re_word_character_map , " NativeRegExpMacroAssembler : : word_character_map " ) <nl> <nl> # define EXTERNAL_REFERENCE_LIST ( V ) \ <nl> V ( abort_with_reason , " abort_with_reason " ) \ <nl> class StatsCounter ; <nl> " atomic_pair_compare_exchange_function " ) \ <nl> EXTERNAL_REFERENCE_LIST_INTL ( V ) <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - # define EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP ( V ) \ <nl> - V ( address_of_regexp_stack_limit , " RegExpStack : : limit_address ( ) " ) \ <nl> - V ( address_of_regexp_stack_memory_address , " RegExpStack : : memory_address ( ) " ) \ <nl> - V ( address_of_regexp_stack_memory_size , " RegExpStack : : memory_size ( ) " ) \ <nl> - V ( address_of_static_offsets_vector , " OffsetsVector : : static_offsets_vector " ) \ <nl> - V ( re_case_insensitive_compare_uc16 , \ <nl> - " NativeRegExpMacroAssembler : : CaseInsensitiveCompareUC16 ( ) " ) \ <nl> - V ( re_check_stack_guard_state , \ <nl> - " RegExpMacroAssembler * : : CheckStackGuardState ( ) " ) \ <nl> - V ( re_grow_stack , " NativeRegExpMacroAssembler : : GrowStack ( ) " ) \ <nl> - V ( re_word_character_map , " NativeRegExpMacroAssembler : : word_character_map " ) <nl> - # else <nl> - # define EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP ( V ) <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> # ifdef V8_INTL_SUPPORT <nl> # define EXTERNAL_REFERENCE_LIST_INTL ( V ) \ <nl> V ( intl_convert_one_byte_to_lower , " intl_convert_one_byte_to_lower " ) \ <nl> mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_UINT ( serialization_chunk_size , 4096 , <nl> DEFINE_BOOL ( regexp_optimization , true , " generate optimized regexp code " ) <nl> DEFINE_BOOL ( regexp_mode_modifiers , false , " enable inline flags in regexp . " ) <nl> <nl> + # ifdef V8_INTERPRETED_REGEXP <nl> + # define V8_INTERPRETED_REGEXP_BOOL true <nl> + # else <nl> + # define V8_INTERPRETED_REGEXP_BOOL false <nl> + # endif <nl> + DEFINE_BOOL ( regexp_interpret_all , V8_INTERPRETED_REGEXP_BOOL , <nl> + " interpret all regexp code " ) <nl> + # undef V8_INTERPRETED_REGEXP_BOOL <nl> + <nl> / / Testing flags test / cctest / test - { flags , api , serialization } . cc <nl> DEFINE_BOOL ( testing_bool_flag , true , " testing_bool_flag " ) <nl> DEFINE_MAYBE_BOOL ( testing_maybe_bool_flag , " testing_maybe_bool_flag " ) <nl> DEFINE_BOOL ( jitless , V8_LITE_BOOL , <nl> / / Optimizations ( i . e . jitting ) are disabled . <nl> DEFINE_NEG_IMPLICATION ( jitless , opt ) <nl> # endif <nl> + / / Regexps are interpreted . <nl> + DEFINE_IMPLICATION ( jitless , regexp_interpret_all ) <nl> / / asm . js validation is disabled since it triggers wasm code generation . <nl> DEFINE_NEG_IMPLICATION ( jitless , validate_asm ) <nl> / / Wasm is put into interpreter - only mode . We repeat flag implications down <nl> mmm a / src / objects / js - regexp - inl . h <nl> ppp b / src / objects / js - regexp - inl . h <nl> void JSRegExp : : SetDataAt ( int index , Object value ) { <nl> } <nl> <nl> bool JSRegExp : : HasCompiledCode ( ) const { <nl> - return TypeTag ( ) = = IRREGEXP & & ( DataAt ( kIrregexpLatin1CodeIndex ) - > IsCode ( ) | | <nl> - DataAt ( kIrregexpUC16CodeIndex ) - > IsCode ( ) ) ; <nl> + if ( TypeTag ( ) ! = IRREGEXP ) return false ; <nl> + # ifdef DEBUG <nl> + DCHECK ( DataAt ( kIrregexpLatin1CodeIndex ) - > IsCode ( ) | | <nl> + DataAt ( kIrregexpLatin1CodeIndex ) - > IsByteArray ( ) | | <nl> + DataAt ( kIrregexpLatin1CodeIndex ) = = Smi : : FromInt ( kUninitializedValue ) ) ; <nl> + DCHECK ( DataAt ( kIrregexpUC16CodeIndex ) - > IsCode ( ) | | <nl> + DataAt ( kIrregexpUC16CodeIndex ) - > IsByteArray ( ) | | <nl> + DataAt ( kIrregexpUC16CodeIndex ) = = Smi : : FromInt ( kUninitializedValue ) ) ; <nl> + # endif / / DEBUG <nl> + Smi uninitialized = Smi : : FromInt ( kUninitializedValue ) ; <nl> + return ( DataAt ( kIrregexpLatin1CodeIndex ) ! = uninitialized | | <nl> + DataAt ( kIrregexpUC16CodeIndex ) ! = uninitialized ) ; <nl> } <nl> <nl> void JSRegExp : : DiscardCompiledCodeForSerialization ( ) { <nl> mmm a / src / regexp / arm / regexp - macro - assembler - arm . cc <nl> ppp b / src / regexp / arm / regexp - macro - assembler - arm . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> / * <nl> * This assembler uses the following register assignment convention <nl> * - r4 : Temporarily stores the index of capture start after a matching pass <nl> void RegExpMacroAssemblerARM : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / arm / regexp - macro - assembler - arm . h <nl> ppp b / src / regexp / arm / regexp - macro - assembler - arm . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerARM : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerARM ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerARM : public NativeRegExpMacroAssembler { <nl> Label stack_overflow_label_ ; <nl> } ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / arm64 / regexp - macro - assembler - arm64 . cc <nl> ppp b / src / regexp / arm64 / regexp - macro - assembler - arm64 . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> / * <nl> * This assembler uses the following register assignment convention : <nl> * - w19 : Used to temporarely store a value before a call to C code . <nl> void RegExpMacroAssemblerARM64 : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> } <nl> } <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / arm64 / regexp - macro - assembler - arm64 . h <nl> ppp b / src / regexp / arm64 / regexp - macro - assembler - arm64 . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerARM64 : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerARM64 ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerARM64 : public NativeRegExpMacroAssembler { <nl> Label stack_overflow_label_ ; <nl> } ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / bytecodes - irregexp . h <nl> ppp b / src / regexp / bytecodes - irregexp . h <nl> <nl> # ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_ <nl> # define V8_REGEXP_BYTECODES_IRREGEXP_H_ <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - <nl> namespace v8 { <nl> namespace internal { <nl> <nl> BYTECODE_ITERATOR ( DECLARE_BYTECODE_LENGTH ) <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> # endif / / V8_REGEXP_BYTECODES_IRREGEXP_H_ <nl> mmm a / src / regexp / ia32 / regexp - macro - assembler - ia32 . cc <nl> ppp b / src / regexp / ia32 / regexp - macro - assembler - ia32 . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> / * <nl> * This assembler uses the following register assignment convention <nl> * - edx : Current character . Must be loaded using LoadCurrentCharacter <nl> void RegExpMacroAssemblerIA32 : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / ia32 / regexp - macro - assembler - ia32 . h <nl> ppp b / src / regexp / ia32 / regexp - macro - assembler - ia32 . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerIA32 : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerIA32 ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerIA32 : public NativeRegExpMacroAssembler { <nl> Label check_preempt_label_ ; <nl> Label stack_overflow_label_ ; <nl> } ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / regexp / interpreter - irregexp . cc <nl> ppp b / src / regexp / interpreter - irregexp . cc <nl> <nl> <nl> / / A simple interpreter for the Irregexp byte code . <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - <nl> # include " src / regexp / interpreter - irregexp . h " <nl> <nl> # include " src / ast / ast . h " <nl> RegExpImpl : : IrregexpResult IrregexpInterpreter : : Match ( <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> - <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> mmm a / src / regexp / interpreter - irregexp . h <nl> ppp b / src / regexp / interpreter - irregexp . h <nl> <nl> # ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_ <nl> # define V8_REGEXP_INTERPRETER_IRREGEXP_H_ <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - <nl> # include " src / regexp / jsregexp . h " <nl> <nl> namespace v8 { <nl> class IrregexpInterpreter { <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> # endif / / V8_REGEXP_INTERPRETER_IRREGEXP_H_ <nl> mmm a / src / regexp / jsregexp . cc <nl> ppp b / src / regexp / jsregexp . cc <nl> <nl> # include " unicode / utypes . h " <nl> # endif / / V8_INTL_SUPPORT <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> # if V8_TARGET_ARCH_IA32 <nl> # include " src / regexp / ia32 / regexp - macro - assembler - ia32 . h " <nl> # elif V8_TARGET_ARCH_X64 <nl> <nl> # else <nl> # error Unsupported target architecture . <nl> # endif <nl> - # endif <nl> - <nl> <nl> namespace v8 { <nl> namespace internal { <nl> bool RegExpImpl : : EnsureCompiledIrregexp ( Isolate * isolate , Handle < JSRegExp > re , <nl> Handle < String > sample_subject , <nl> bool is_one_byte ) { <nl> Object compiled_code = re - > DataAt ( JSRegExp : : code_index ( is_one_byte ) ) ; <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - if ( compiled_code - > IsByteArray ( ) ) return true ; <nl> - # else / / V8_INTERPRETED_REGEXP ( RegExp native code ) <nl> - if ( compiled_code - > IsCode ( ) ) return true ; <nl> - # endif <nl> + if ( compiled_code ! = Smi : : FromInt ( JSRegExp : : kUninitializedValue ) ) { <nl> + DCHECK ( FLAG_regexp_interpret_all ? compiled_code - > IsByteArray ( ) <nl> + : compiled_code - > IsCode ( ) ) ; <nl> + return true ; <nl> + } <nl> return CompileIrregexp ( isolate , re , sample_subject , is_one_byte ) ; <nl> } <nl> <nl> int RegExpImpl : : IrregexpPrepare ( Isolate * isolate , Handle < JSRegExp > regexp , <nl> bool is_one_byte = String : : IsOneByteRepresentationUnderneath ( * subject ) ; <nl> if ( ! EnsureCompiledIrregexp ( isolate , regexp , subject , is_one_byte ) ) return - 1 ; <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - / / Byte - code regexp needs space allocated for all its registers . <nl> - / / The result captures are copied to the start of the registers array <nl> - / / if the match succeeds . This way those registers are not clobbered <nl> - / / when we set the last match info from last successful match . <nl> - return IrregexpNumberOfRegisters ( FixedArray : : cast ( regexp - > data ( ) ) ) + <nl> - ( IrregexpNumberOfCaptures ( FixedArray : : cast ( regexp - > data ( ) ) ) + 1 ) * 2 ; <nl> - # else / / V8_INTERPRETED_REGEXP <nl> - / / Native regexp only needs room to output captures . Registers are handled <nl> - / / internally . <nl> - return ( IrregexpNumberOfCaptures ( FixedArray : : cast ( regexp - > data ( ) ) ) + 1 ) * 2 ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> + if ( FLAG_regexp_interpret_all ) { <nl> + / / Byte - code regexp needs space allocated for all its registers . <nl> + / / The result captures are copied to the start of the registers array <nl> + / / if the match succeeds . This way those registers are not clobbered <nl> + / / when we set the last match info from last successful match . <nl> + return IrregexpNumberOfRegisters ( FixedArray : : cast ( regexp - > data ( ) ) ) + <nl> + ( IrregexpNumberOfCaptures ( FixedArray : : cast ( regexp - > data ( ) ) ) + 1 ) * 2 ; <nl> + } else { <nl> + / / Native regexp only needs room to output captures . Registers are handled <nl> + / / internally . <nl> + return ( IrregexpNumberOfCaptures ( FixedArray : : cast ( regexp - > data ( ) ) ) + 1 ) * 2 ; <nl> + } <nl> } <nl> <nl> int RegExpImpl : : IrregexpExecRaw ( Isolate * isolate , Handle < JSRegExp > regexp , <nl> int RegExpImpl : : IrregexpExecRaw ( Isolate * isolate , Handle < JSRegExp > regexp , <nl> <nl> bool is_one_byte = String : : IsOneByteRepresentationUnderneath ( * subject ) ; <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - DCHECK ( output_size > = ( IrregexpNumberOfCaptures ( * irregexp ) + 1 ) * 2 ) ; <nl> - do { <nl> - EnsureCompiledIrregexp ( isolate , regexp , subject , is_one_byte ) ; <nl> - Handle < Code > code ( IrregexpNativeCode ( * irregexp , is_one_byte ) , isolate ) ; <nl> - / / The stack is used to allocate registers for the compiled regexp code . <nl> - / / This means that in case of failure , the output registers array is left <nl> - / / untouched and contains the capture results from the previous successful <nl> - / / match . We can use that to set the last match info lazily . <nl> - NativeRegExpMacroAssembler : : Result res = <nl> - NativeRegExpMacroAssembler : : Match ( code , <nl> - subject , <nl> - output , <nl> - output_size , <nl> - index , <nl> - isolate ) ; <nl> - if ( res ! = NativeRegExpMacroAssembler : : RETRY ) { <nl> - DCHECK ( res ! = NativeRegExpMacroAssembler : : EXCEPTION | | <nl> - isolate - > has_pending_exception ( ) ) ; <nl> - STATIC_ASSERT ( <nl> - static_cast < int > ( NativeRegExpMacroAssembler : : SUCCESS ) = = RE_SUCCESS ) ; <nl> - STATIC_ASSERT ( <nl> - static_cast < int > ( NativeRegExpMacroAssembler : : FAILURE ) = = RE_FAILURE ) ; <nl> - STATIC_ASSERT ( static_cast < int > ( NativeRegExpMacroAssembler : : EXCEPTION ) <nl> - = = RE_EXCEPTION ) ; <nl> - return static_cast < IrregexpResult > ( res ) ; <nl> - } <nl> - / / If result is RETRY , the string has changed representation , and we <nl> - / / must restart from scratch . <nl> - / / In this case , it means we must make sure we are prepared to handle <nl> - / / the , potentially , different subject ( the string can switch between <nl> - / / being internal and external , and even between being Latin1 and UC16 , <nl> - / / but the characters are always the same ) . <nl> - IrregexpPrepare ( isolate , regexp , subject ) ; <nl> - is_one_byte = String : : IsOneByteRepresentationUnderneath ( * subject ) ; <nl> - } while ( true ) ; <nl> - UNREACHABLE ( ) ; <nl> - # else / / V8_INTERPRETED_REGEXP <nl> - <nl> - DCHECK ( output_size > = IrregexpNumberOfRegisters ( * irregexp ) ) ; <nl> - / / We must have done EnsureCompiledIrregexp , so we can get the number of <nl> - / / registers . <nl> - int number_of_capture_registers = <nl> - ( IrregexpNumberOfCaptures ( * irregexp ) + 1 ) * 2 ; <nl> - int32_t * raw_output = & output [ number_of_capture_registers ] ; <nl> - / / We do not touch the actual capture result registers until we know there <nl> - / / has been a match so that we can use those capture results to set the <nl> - / / last match info . <nl> - for ( int i = number_of_capture_registers - 1 ; i > = 0 ; i - - ) { <nl> - raw_output [ i ] = - 1 ; <nl> - } <nl> - Handle < ByteArray > byte_codes ( IrregexpByteCode ( * irregexp , is_one_byte ) , <nl> - isolate ) ; <nl> - <nl> - IrregexpResult result = IrregexpInterpreter : : Match ( isolate , <nl> - byte_codes , <nl> - subject , <nl> - raw_output , <nl> - index ) ; <nl> - if ( result = = RE_SUCCESS ) { <nl> - / / Copy capture results to the start of the registers array . <nl> - MemCopy ( output , raw_output , number_of_capture_registers * sizeof ( int32_t ) ) ; <nl> - } <nl> - if ( result = = RE_EXCEPTION ) { <nl> - DCHECK ( ! isolate - > has_pending_exception ( ) ) ; <nl> - isolate - > StackOverflow ( ) ; <nl> + if ( ! FLAG_regexp_interpret_all ) { <nl> + DCHECK ( output_size > = ( IrregexpNumberOfCaptures ( * irregexp ) + 1 ) * 2 ) ; <nl> + do { <nl> + EnsureCompiledIrregexp ( isolate , regexp , subject , is_one_byte ) ; <nl> + Handle < Code > code ( IrregexpNativeCode ( * irregexp , is_one_byte ) , isolate ) ; <nl> + / / The stack is used to allocate registers for the compiled regexp code . <nl> + / / This means that in case of failure , the output registers array is left <nl> + / / untouched and contains the capture results from the previous successful <nl> + / / match . We can use that to set the last match info lazily . <nl> + NativeRegExpMacroAssembler : : Result res = <nl> + NativeRegExpMacroAssembler : : Match ( code , subject , output , output_size , <nl> + index , isolate ) ; <nl> + if ( res ! = NativeRegExpMacroAssembler : : RETRY ) { <nl> + DCHECK ( res ! = NativeRegExpMacroAssembler : : EXCEPTION | | <nl> + isolate - > has_pending_exception ( ) ) ; <nl> + STATIC_ASSERT ( static_cast < int > ( NativeRegExpMacroAssembler : : SUCCESS ) = = <nl> + RE_SUCCESS ) ; <nl> + STATIC_ASSERT ( static_cast < int > ( NativeRegExpMacroAssembler : : FAILURE ) = = <nl> + RE_FAILURE ) ; <nl> + STATIC_ASSERT ( static_cast < int > ( NativeRegExpMacroAssembler : : EXCEPTION ) = = <nl> + RE_EXCEPTION ) ; <nl> + return static_cast < IrregexpResult > ( res ) ; <nl> + } <nl> + / / If result is RETRY , the string has changed representation , and we <nl> + / / must restart from scratch . <nl> + / / In this case , it means we must make sure we are prepared to handle <nl> + / / the , potentially , different subject ( the string can switch between <nl> + / / being internal and external , and even between being Latin1 and UC16 , <nl> + / / but the characters are always the same ) . <nl> + IrregexpPrepare ( isolate , regexp , subject ) ; <nl> + is_one_byte = String : : IsOneByteRepresentationUnderneath ( * subject ) ; <nl> + } while ( true ) ; <nl> + UNREACHABLE ( ) ; <nl> + } else { <nl> + DCHECK ( FLAG_regexp_interpret_all ) ; <nl> + DCHECK ( output_size > = IrregexpNumberOfRegisters ( * irregexp ) ) ; <nl> + / / We must have done EnsureCompiledIrregexp , so we can get the number of <nl> + / / registers . <nl> + int number_of_capture_registers = <nl> + ( IrregexpNumberOfCaptures ( * irregexp ) + 1 ) * 2 ; <nl> + int32_t * raw_output = & output [ number_of_capture_registers ] ; <nl> + / / We do not touch the actual capture result registers until we know there <nl> + / / has been a match so that we can use those capture results to set the <nl> + / / last match info . <nl> + for ( int i = number_of_capture_registers - 1 ; i > = 0 ; i - - ) { <nl> + raw_output [ i ] = - 1 ; <nl> + } <nl> + Handle < ByteArray > byte_codes ( IrregexpByteCode ( * irregexp , is_one_byte ) , <nl> + isolate ) ; <nl> + <nl> + IrregexpResult result = IrregexpInterpreter : : Match ( <nl> + isolate , byte_codes , subject , raw_output , index ) ; <nl> + if ( result = = RE_SUCCESS ) { <nl> + / / Copy capture results to the start of the registers array . <nl> + MemCopy ( output , raw_output , <nl> + number_of_capture_registers * sizeof ( int32_t ) ) ; <nl> + } <nl> + if ( result = = RE_EXCEPTION ) { <nl> + DCHECK ( ! isolate - > has_pending_exception ( ) ) ; <nl> + isolate - > StackOverflow ( ) ; <nl> + } <nl> + return result ; <nl> } <nl> - return result ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> } <nl> <nl> MaybeHandle < Object > RegExpImpl : : IrregexpExec ( <nl> MaybeHandle < Object > RegExpImpl : : IrregexpExec ( <nl> subject = String : : Flatten ( isolate , subject ) ; <nl> <nl> / / Prepare space for the return values . <nl> - # if defined ( V8_INTERPRETED_REGEXP ) & & defined ( DEBUG ) <nl> - if ( FLAG_trace_regexp_bytecodes ) { <nl> + # ifdef DEBUG <nl> + if ( FLAG_regexp_interpret_all & & FLAG_trace_regexp_bytecodes ) { <nl> String pattern = regexp - > Pattern ( ) ; <nl> PrintF ( " \ n \ nRegexp match : / % s / \ n \ n " , pattern - > ToCString ( ) . get ( ) ) ; <nl> PrintF ( " \ n \ nSubject string : ' % s ' \ n \ n " , subject - > ToCString ( ) . get ( ) ) ; <nl> RegExpImpl : : GlobalCache : : GlobalCache ( Handle < JSRegExp > regexp , <nl> regexp_ ( regexp ) , <nl> subject_ ( subject ) , <nl> isolate_ ( isolate ) { <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - bool interpreted = true ; <nl> - # else <nl> - bool interpreted = false ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> + bool interpreted = FLAG_regexp_interpret_all ; <nl> <nl> if ( regexp_ - > TypeTag ( ) = = JSRegExp : : ATOM ) { <nl> static const int kAtomRegistersPerMatch = 2 ; <nl> RegExpEngine : : CompilationResult RegExpCompiler : : Assemble ( <nl> Handle < HeapObject > code = macro_assembler_ - > GetCode ( pattern ) ; <nl> isolate - > IncreaseTotalRegexpCodeGenerated ( code - > Size ( ) ) ; <nl> work_list_ = nullptr ; <nl> - # if defined ( ENABLE_DISASSEMBLER ) & & ! defined ( V8_INTERPRETED_REGEXP ) <nl> - if ( FLAG_print_code ) { <nl> + # ifdef ENABLE_DISASSEMBLER <nl> + if ( FLAG_print_code & & ! FLAG_regexp_interpret_all ) { <nl> CodeTracer : : Scope trace_scope ( isolate - > GetCodeTracer ( ) ) ; <nl> OFStream os ( trace_scope . file ( ) ) ; <nl> Handle < Code > : : cast ( code ) - > Disassemble ( pattern - > ToCString ( ) . get ( ) , os ) ; <nl> RegExpEngine : : CompilationResult RegExpEngine : : Compile ( <nl> } <nl> <nl> / / Create the correct assembler for the architecture . <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - DCHECK ( ! FLAG_jitless ) ; <nl> - <nl> - / / Native regexp implementation . <nl> + std : : unique_ptr < RegExpMacroAssembler > macro_assembler ; <nl> + if ( ! FLAG_regexp_interpret_all ) { <nl> + / / Native regexp implementation . <nl> + DCHECK ( ! FLAG_jitless ) ; <nl> <nl> - NativeRegExpMacroAssembler : : Mode mode = <nl> - is_one_byte ? NativeRegExpMacroAssembler : : LATIN1 <nl> - : NativeRegExpMacroAssembler : : UC16 ; <nl> + NativeRegExpMacroAssembler : : Mode mode = <nl> + is_one_byte ? NativeRegExpMacroAssembler : : LATIN1 <nl> + : NativeRegExpMacroAssembler : : UC16 ; <nl> <nl> # if V8_TARGET_ARCH_IA32 <nl> - RegExpMacroAssemblerIA32 macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerIA32 ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_X64 <nl> - RegExpMacroAssemblerX64 macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerX64 ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_ARM <nl> - RegExpMacroAssemblerARM macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerARM ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_ARM64 <nl> - RegExpMacroAssemblerARM64 macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerARM64 ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_S390 <nl> - RegExpMacroAssemblerS390 macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerS390 ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_PPC <nl> - RegExpMacroAssemblerPPC macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerPPC ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_MIPS <nl> - RegExpMacroAssemblerMIPS macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerMIPS ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # elif V8_TARGET_ARCH_MIPS64 <nl> - RegExpMacroAssemblerMIPS macro_assembler ( isolate , zone , mode , <nl> - ( data - > capture_count + 1 ) * 2 ) ; <nl> + macro_assembler . reset ( new RegExpMacroAssemblerMIPS ( <nl> + isolate , zone , mode , ( data - > capture_count + 1 ) * 2 ) ) ; <nl> # else <nl> # error " Unsupported architecture " <nl> # endif <nl> + } else { <nl> + DCHECK ( FLAG_regexp_interpret_all ) ; <nl> <nl> - # else / / V8_INTERPRETED_REGEXP <nl> - / / Interpreted regexp implementation . <nl> - EmbeddedVector < byte , 1024 > codes ; <nl> - RegExpMacroAssemblerIrregexp macro_assembler ( isolate , codes , zone ) ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> + / / Interpreted regexp implementation . <nl> + macro_assembler . reset ( new RegExpMacroAssemblerIrregexp ( isolate , zone ) ) ; <nl> + } <nl> <nl> - macro_assembler . set_slow_safe ( TooMuchRegExpCode ( isolate , pattern ) ) ; <nl> + macro_assembler - > set_slow_safe ( TooMuchRegExpCode ( isolate , pattern ) ) ; <nl> <nl> / / Inserted here , instead of in Assembler , because it depends on information <nl> / / in the AST that isn ' t replicated in the Node structure . <nl> static const int kMaxBacksearchLimit = 1024 ; <nl> if ( is_end_anchored & & ! is_start_anchored & & ! is_sticky & & <nl> max_length < kMaxBacksearchLimit ) { <nl> - macro_assembler . SetCurrentPositionFromEnd ( max_length ) ; <nl> + macro_assembler - > SetCurrentPositionFromEnd ( max_length ) ; <nl> } <nl> <nl> if ( is_global ) { <nl> RegExpEngine : : CompilationResult RegExpEngine : : Compile ( <nl> } else if ( is_unicode ) { <nl> mode = RegExpMacroAssembler : : GLOBAL_UNICODE ; <nl> } <nl> - macro_assembler . set_global_mode ( mode ) ; <nl> + macro_assembler - > set_global_mode ( mode ) ; <nl> } <nl> <nl> - return compiler . Assemble ( isolate , & macro_assembler , node , data - > capture_count , <nl> - pattern ) ; <nl> + return compiler . Assemble ( isolate , macro_assembler . get ( ) , node , <nl> + data - > capture_count , pattern ) ; <nl> } <nl> <nl> bool RegExpEngine : : TooMuchRegExpCode ( Isolate * isolate , Handle < String > pattern ) { <nl> mmm a / src / regexp / jsregexp . h <nl> ppp b / src / regexp / jsregexp . h <nl> inline bool NeedsUnicodeCaseEquivalents ( JSRegExp : : Flags flags ) { <nl> <nl> class RegExpImpl { <nl> public : <nl> - / / Whether V8 is compiled with native regexp support or not . <nl> - static bool UsesNativeRegExp ( ) { <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - return false ; <nl> - # else <nl> - return true ; <nl> - # endif <nl> - } <nl> + / / Whether the irregexp engine generates native code or interpreter bytecode . <nl> + static bool UsesNativeRegExp ( ) { return ! FLAG_regexp_interpret_all ; } <nl> <nl> / / Returns a string representation of a regular expression . <nl> / / Implements RegExp . prototype . toString , see ECMA - 262 section 15 . 10 . 6 . 4 . <nl> mmm a / src / regexp / mips / regexp - macro - assembler - mips . cc <nl> ppp b / src / regexp / mips / regexp - macro - assembler - mips . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> / * <nl> * This assembler uses the following register assignment convention <nl> * - t7 : Temporarily stores the index of capture start after a matching pass <nl> void RegExpMacroAssemblerMIPS : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / mips / regexp - macro - assembler - mips . h <nl> ppp b / src / regexp / mips / regexp - macro - assembler - mips . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerMIPS : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerMIPS ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerMIPS : public NativeRegExpMacroAssembler { <nl> Label internal_failure_label_ ; <nl> } ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / mips64 / regexp - macro - assembler - mips64 . cc <nl> ppp b / src / regexp / mips64 / regexp - macro - assembler - mips64 . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - <nl> / * clang - format off <nl> * <nl> * This assembler uses the following register assignment convention <nl> void RegExpMacroAssemblerMIPS : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / mips64 / regexp - macro - assembler - mips64 . h <nl> ppp b / src / regexp / mips64 / regexp - macro - assembler - mips64 . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerMIPS : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerMIPS ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerMIPS : public NativeRegExpMacroAssembler { <nl> Label internal_failure_label_ ; <nl> } ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / ppc / regexp - macro - assembler - ppc . cc <nl> ppp b / src / regexp / ppc / regexp - macro - assembler - ppc . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> / * <nl> * This assembler uses the following register assignment convention <nl> * - r25 : Temporarily stores the index of capture start after a matching pass <nl> void RegExpMacroAssemblerPPC : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> # endif <nl> } <nl> <nl> - <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / ppc / regexp - macro - assembler - ppc . h <nl> ppp b / src / regexp / ppc / regexp - macro - assembler - ppc . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerPPC ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler { <nl> const RegList kRegExpCalleeSaved = <nl> 1 < < 25 | 1 < < 26 | 1 < < 27 | 1 < < 28 | 1 < < 29 | 1 < < 30 | 1 < < 31 ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / regexp - macro - assembler - irregexp - inl . h <nl> ppp b / src / regexp / regexp - macro - assembler - irregexp - inl . h <nl> <nl> # ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_ <nl> # define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_ <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - <nl> # include " src / regexp / regexp - macro - assembler - irregexp . h " <nl> <nl> # include " src / ast / ast . h " <nl> void RegExpMacroAssemblerIrregexp : : Emit32 ( uint32_t word ) { <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> # endif / / V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_ <nl> mmm a / src / regexp / regexp - macro - assembler - irregexp . cc <nl> ppp b / src / regexp / regexp - macro - assembler - irregexp . cc <nl> <nl> / / Use of this source code is governed by a BSD - style license that can be <nl> / / found in the LICENSE file . <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - <nl> # include " src / regexp / regexp - macro - assembler - irregexp . h " <nl> <nl> # include " src / ast / ast . h " <nl> namespace v8 { <nl> namespace internal { <nl> <nl> RegExpMacroAssemblerIrregexp : : RegExpMacroAssemblerIrregexp ( Isolate * isolate , <nl> - Vector < byte > buffer , <nl> Zone * zone ) <nl> : RegExpMacroAssembler ( isolate , zone ) , <nl> - buffer_ ( buffer ) , <nl> + buffer_ ( Vector < byte > : : New ( 1024 ) ) , <nl> pc_ ( 0 ) , <nl> - own_buffer_ ( false ) , <nl> + own_buffer_ ( true ) , <nl> advance_current_end_ ( kInvalidPC ) , <nl> isolate_ ( isolate ) { } <nl> <nl> - <nl> RegExpMacroAssemblerIrregexp : : ~ RegExpMacroAssemblerIrregexp ( ) { <nl> if ( backtrack_ . is_linked ( ) ) backtrack_ . Unuse ( ) ; <nl> if ( own_buffer_ ) buffer_ . Dispose ( ) ; <nl> void RegExpMacroAssemblerIrregexp : : Expand ( ) { <nl> <nl> } / / namespace internal <nl> } / / namespace v8 <nl> - <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> mmm a / src / regexp / regexp - macro - assembler - irregexp . h <nl> ppp b / src / regexp / regexp - macro - assembler - irregexp . h <nl> <nl> # ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ <nl> # define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - <nl> # include " src / regexp / regexp - macro - assembler . h " <nl> <nl> namespace v8 { <nl> class RegExpMacroAssemblerIrregexp : public RegExpMacroAssembler { <nl> / / relocation information starting from the end of the buffer . See CodeDesc <nl> / / for a detailed comment on the layout ( globals . h ) . <nl> / / <nl> - / / If the provided buffer is nullptr , the assembler allocates and grows its <nl> - / / own buffer , and buffer_size determines the initial buffer size . The buffer <nl> - / / is owned by the assembler and deallocated upon destruction of the <nl> - / / assembler . <nl> - / / <nl> - / / If the provided buffer is not nullptr , the assembler uses the provided <nl> - / / buffer for code generation and assumes its size to be buffer_size . If the <nl> - / / buffer is too small , a fatal error occurs . No deallocation of the buffer is <nl> - / / done upon destruction of the assembler . <nl> - RegExpMacroAssemblerIrregexp ( Isolate * isolate , Vector < byte > buffer , <nl> - Zone * zone ) ; <nl> + / / The assembler allocates and grows its own buffer , and buffer_size <nl> + / / determines the initial buffer size . The buffer is owned by the assembler <nl> + / / and deallocated upon destruction of the assembler . <nl> + RegExpMacroAssemblerIrregexp ( Isolate * isolate , Zone * zone ) ; <nl> virtual ~ RegExpMacroAssemblerIrregexp ( ) ; <nl> / / The byte - code interpreter checks on each push anyway . <nl> virtual int stack_limit_slack ( ) { return 1 ; } <nl> class RegExpMacroAssemblerIrregexp : public RegExpMacroAssembler { <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> # endif / / V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_ <nl> mmm a / src / regexp / regexp - macro - assembler . cc <nl> ppp b / src / regexp / regexp - macro - assembler . cc <nl> bool RegExpMacroAssembler : : CheckSpecialCharacterClass ( uc16 type , <nl> return false ; <nl> } <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP / / Avoid unused code , e . g . , on ARM . <nl> - <nl> NativeRegExpMacroAssembler : : NativeRegExpMacroAssembler ( Isolate * isolate , <nl> Zone * zone ) <nl> : RegExpMacroAssembler ( isolate , zone ) { } <nl> Address NativeRegExpMacroAssembler : : GrowStack ( Address stack_pointer , <nl> return new_stack_base - stack_content_size ; <nl> } <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> mmm a / src / regexp / regexp - macro - assembler . h <nl> ppp b / src / regexp / regexp - macro - assembler . h <nl> class RegExpMacroAssembler { <nl> Zone * zone_ ; <nl> } ; <nl> <nl> - <nl> - # ifndef V8_INTERPRETED_REGEXP / / Avoid compiling unused code . <nl> - <nl> class NativeRegExpMacroAssembler : public RegExpMacroAssembler { <nl> public : <nl> / / Type of input string to generate code for . <nl> class NativeRegExpMacroAssembler : public RegExpMacroAssembler { <nl> int * output , int output_size , Isolate * isolate ) ; <nl> } ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / s390 / regexp - macro - assembler - s390 . cc <nl> ppp b / src / regexp / s390 / regexp - macro - assembler - s390 . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> / * <nl> * This assembler uses the following register assignment convention <nl> * - r6 : Temporarily stores the index of capture start after a matching pass <nl> void RegExpMacroAssemblerS390 : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / s390 / regexp - macro - assembler - s390 . h <nl> ppp b / src / regexp / s390 / regexp - macro - assembler - s390 . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerS390 ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler { <nl> const RegList kRegExpCalleeSaved = <nl> 1 < < 6 | 1 < < 7 | 1 < < 8 | 1 < < 9 | 1 < < 10 | 1 < < 11 | 1 < < 13 ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / x64 / regexp - macro - assembler - x64 . cc <nl> ppp b / src / regexp / x64 / regexp - macro - assembler - x64 . cc <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - <nl> / * <nl> * This assembler uses the following register assignment convention <nl> * - rdx : Currently loaded character ( s ) as Latin1 or UC16 . Must be loaded <nl> void RegExpMacroAssemblerX64 : : LoadCurrentCharacterUnchecked ( int cp_offset , <nl> <nl> # undef __ <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / src / regexp / x64 / regexp - macro - assembler - x64 . h <nl> ppp b / src / regexp / x64 / regexp - macro - assembler - x64 . h <nl> <nl> namespace v8 { <nl> namespace internal { <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - <nl> class RegExpMacroAssemblerX64 : public NativeRegExpMacroAssembler { <nl> public : <nl> RegExpMacroAssemblerX64 ( Isolate * isolate , Zone * zone , Mode mode , <nl> class RegExpMacroAssemblerX64 : public NativeRegExpMacroAssembler { <nl> Label stack_overflow_label_ ; <nl> } ; <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> } / / namespace internal <nl> } / / namespace v8 <nl> <nl> mmm a / test / cctest / cctest . status <nl> ppp b / test / cctest / cctest . status <nl> <nl> <nl> # Tests that generate code at runtime . <nl> ' codegen - tester / * ' : [ SKIP ] , <nl> + ' test - api / RegExpInterruption ' : [ SKIP ] , <nl> ' test - assembler - * ' : [ SKIP ] , <nl> ' test - basic - block - profiler / * ' : [ SKIP ] , <nl> ' test - branch - combine / * ' : [ SKIP ] , <nl> ' test - multiple - return / * ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblernativeAtStart ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeBackReferenceLATIN1 ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeBackReferenceUC16 ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeBackRefNoCase ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeBacktrack ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeLotsOfRegisters ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeRegisters ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeSimple ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeSimpleUC16 ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerNativeSuccess ' : [ SKIP ] , <nl> + ' test - regexp / MacroAssemblerStackOverflow ' : [ SKIP ] , <nl> ' test - run - calls - to - external - references / * ' : [ SKIP ] , <nl> } ] , # lite_mode <nl> <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> TEST ( CompileExternalTwoByteSource ) { <nl> } <nl> } <nl> <nl> - <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - <nl> struct RegExpInterruptionData { <nl> v8 : : base : : Atomic32 loop_count ; <nl> UC16VectorResource * string_resource ; <nl> TEST ( RegExpInterruption ) { <nl> i : : DeleteArray ( uc16_content ) ; <nl> } <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> - <nl> / / Test that we cannot set a property on the global object if there <nl> / / is a read - only property in the prototype chain . <nl> TEST ( ReadOnlyPropertyInGlobalProto ) { <nl> mmm a / test / cctest / test - regexp . cc <nl> ppp b / test / cctest / test - regexp . cc <nl> <nl> # include " src / assembler - arch . h " <nl> # include " src / ast / ast . h " <nl> # include " src / char - predicates - inl . h " <nl> + # include " src / macro - assembler . h " <nl> # include " src / objects - inl . h " <nl> # include " src / ostreams . h " <nl> + # include " src / regexp / interpreter - irregexp . h " <nl> # include " src / regexp / jsregexp . h " <nl> # include " src / regexp / regexp - macro - assembler - irregexp . h " <nl> # include " src / regexp / regexp - macro - assembler . h " <nl> <nl> # include " src / unicode - inl . h " <nl> # include " src / v8 . h " <nl> <nl> - # ifdef V8_INTERPRETED_REGEXP <nl> - # include " src / regexp / interpreter - irregexp . h " <nl> - # else / / V8_INTERPRETED_REGEXP <nl> - # include " src / macro - assembler . h " <nl> # if V8_TARGET_ARCH_ARM <nl> # include " src / regexp / arm / regexp - macro - assembler - arm . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_ARM64 <nl> + # elif V8_TARGET_ARCH_ARM64 <nl> # include " src / regexp / arm64 / regexp - macro - assembler - arm64 . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_S390 <nl> + # elif V8_TARGET_ARCH_S390 <nl> # include " src / regexp / s390 / regexp - macro - assembler - s390 . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_PPC <nl> + # elif V8_TARGET_ARCH_PPC <nl> # include " src / regexp / ppc / regexp - macro - assembler - ppc . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_MIPS <nl> + # elif V8_TARGET_ARCH_MIPS <nl> # include " src / regexp / mips / regexp - macro - assembler - mips . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_MIPS64 <nl> + # elif V8_TARGET_ARCH_MIPS64 <nl> # include " src / regexp / mips64 / regexp - macro - assembler - mips64 . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_X64 <nl> + # elif V8_TARGET_ARCH_X64 <nl> # include " src / regexp / x64 / regexp - macro - assembler - x64 . h " <nl> - # endif <nl> - # if V8_TARGET_ARCH_IA32 <nl> + # elif V8_TARGET_ARCH_IA32 <nl> # include " src / regexp / ia32 / regexp - macro - assembler - ia32 . h " <nl> + # else <nl> + # error Unknown architecture . <nl> # endif <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> + <nl> # include " test / cctest / cctest . h " <nl> <nl> namespace v8 { <nl> TEST ( ParsePossessiveRepetition ) { <nl> <nl> / / Tests of interpreter . <nl> <nl> - <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - <nl> # if V8_TARGET_ARCH_IA32 <nl> typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler ; <nl> # elif V8_TARGET_ARCH_X64 <nl> TEST ( MacroAssemblerNativeLotsOfRegisters ) { <nl> isolate - > clear_pending_exception ( ) ; <nl> } <nl> <nl> - # else / / V8_INTERPRETED_REGEXP <nl> - <nl> TEST ( MacroAssembler ) { <nl> - byte codes [ 1024 ] ; <nl> Zone zone ( CcTest : : i_isolate ( ) - > allocator ( ) , ZONE_NAME ) ; <nl> - RegExpMacroAssemblerIrregexp m ( CcTest : : i_isolate ( ) , Vector < byte > ( codes , 1024 ) , <nl> - & zone ) ; <nl> + RegExpMacroAssemblerIrregexp m ( CcTest : : i_isolate ( ) , & zone ) ; <nl> / / ^ f ( o ) o . <nl> Label start , fail , backtrack ; <nl> <nl> TEST ( MacroAssembler ) { <nl> CHECK_EQ ( 42 , captures [ 0 ] ) ; <nl> } <nl> <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> - <nl> - <nl> TEST ( AddInverseToTable ) { <nl> static const int kLimit = 1000 ; <nl> static const int kRangeCount = 16 ; <nl> mmm a / test / cctest / test - serialize . cc <nl> ppp b / test / cctest / test - serialize . cc <nl> UNINITIALIZED_TEST ( CustomSnapshotDataBlobStringNotInternalized ) { <nl> FreeCurrentEmbeddedBlob ( ) ; <nl> } <nl> <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> namespace { <nl> <nl> void TestCustomSnapshotDataBlobWithIrregexpCode ( <nl> UNINITIALIZED_TEST ( CustomSnapshotDataBlobWithIrregexpCodeClearCode ) { <nl> TestCustomSnapshotDataBlobWithIrregexpCode ( <nl> v8 : : SnapshotCreator : : FunctionCodeHandling : : kClear ) ; <nl> } <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> <nl> UNINITIALIZED_TEST ( SnapshotChecksum ) { <nl> DisableAlwaysOpt ( ) ; <nl> mmm a / test / cctest / test - thread - termination . cc <nl> ppp b / test / cctest / test - thread - termination . cc <nl> class TerminatorSleeperThread : public v8 : : base : : Thread { <nl> } ; <nl> <nl> TEST ( TerminateRegExp ) { <nl> - / / regexp interpreter does not support preemption . <nl> - # ifndef V8_INTERPRETED_REGEXP <nl> - i : : FLAG_allow_natives_syntax = true ; <nl> - v8 : : Isolate * isolate = CcTest : : isolate ( ) ; <nl> - v8 : : HandleScope scope ( isolate ) ; <nl> - v8 : : Local < v8 : : ObjectTemplate > global = CreateGlobalTemplate ( <nl> - isolate , TerminateCurrentThread , DoLoopCancelTerminate ) ; <nl> - v8 : : Local < v8 : : Context > context = v8 : : Context : : New ( isolate , nullptr , global ) ; <nl> - v8 : : Context : : Scope context_scope ( context ) ; <nl> - CHECK ( ! isolate - > IsExecutionTerminating ( ) ) ; <nl> - v8 : : TryCatch try_catch ( isolate ) ; <nl> - CHECK ( ! isolate - > IsExecutionTerminating ( ) ) ; <nl> - CHECK ( ! CompileRun ( " var re = / ( x + ) + y $ / ; re . test ( ' x ' ) ; " ) . IsEmpty ( ) ) ; <nl> - TerminatorSleeperThread terminator ( isolate , 100 ) ; <nl> - terminator . Start ( ) ; <nl> - CHECK ( CompileRun ( " re . test ( ' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' ) ; fail ( ) ; " ) <nl> - . IsEmpty ( ) ) ; <nl> - CHECK ( try_catch . HasCaught ( ) ) ; <nl> - CHECK ( isolate - > IsExecutionTerminating ( ) ) ; <nl> - # endif / / V8_INTERPRETED_REGEXP <nl> + / / The regexp interpreter does not support preemption . <nl> + if ( ! i : : FLAG_regexp_interpret_all ) { <nl> + i : : FLAG_allow_natives_syntax = true ; <nl> + v8 : : Isolate * isolate = CcTest : : isolate ( ) ; <nl> + v8 : : HandleScope scope ( isolate ) ; <nl> + v8 : : Local < v8 : : ObjectTemplate > global = CreateGlobalTemplate ( <nl> + isolate , TerminateCurrentThread , DoLoopCancelTerminate ) ; <nl> + v8 : : Local < v8 : : Context > context = v8 : : Context : : New ( isolate , nullptr , global ) ; <nl> + v8 : : Context : : Scope context_scope ( context ) ; <nl> + CHECK ( ! isolate - > IsExecutionTerminating ( ) ) ; <nl> + v8 : : TryCatch try_catch ( isolate ) ; <nl> + CHECK ( ! isolate - > IsExecutionTerminating ( ) ) ; <nl> + CHECK ( ! CompileRun ( " var re = / ( x + ) + y $ / ; re . test ( ' x ' ) ; " ) . IsEmpty ( ) ) ; <nl> + TerminatorSleeperThread terminator ( isolate , 100 ) ; <nl> + terminator . Start ( ) ; <nl> + CHECK ( CompileRun ( " re . test ( ' xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' ) ; fail ( ) ; " ) <nl> + . IsEmpty ( ) ) ; <nl> + CHECK ( try_catch . HasCaught ( ) ) ; <nl> + CHECK ( isolate - > IsExecutionTerminating ( ) ) ; <nl> + } <nl> } <nl> <nl> TEST ( TerminateInMicrotask ) { <nl> | [ nojit ] Ship the regexp interpreter unconditionally | v8/v8 | 7e616f2b6e043c6e6c55e920166309be3e449c28 | 2019-01-23T16:06:31Z |
mmm a / LanguageBindings / Python / cntk / optimizer . py <nl> ppp b / LanguageBindings / Python / cntk / optimizer . py <nl> class SGD ( dict ) : <nl> <nl> def __init__ ( self , epoch_size = 0 , minibatch_size = 1 , learning_ratesPerMB = " 0 . 1 " , \ <nl> learning_rates_per_sample = None , momentum_per_mb = " 0 . 9 " , \ <nl> - momentum_per_sample = None , max_epochs = 5 , dropout_rate = " " ) : <nl> + momentum_per_sample = None , max_epochs = 5 , dropout_rate = None ) : <nl> " " " SGD constructor <nl> <nl> : param epoch_size : the number of samples to use in each epoch . An intermediate <nl> def generate_config ( self ) : <nl> <nl> config = [ ] <nl> for k , v in self . items ( ) : <nl> - config . append ( ' { 0 } = { 1 } \ r \ n ' . format ( k , v ) ) <nl> + if ( v is not None ) : <nl> + config . append ( ' { 0 } = { 1 } \ r \ n ' . format ( k , v ) ) <nl> return ' ' . join ( config ) <nl> \ No newline at end of file <nl> mmm a / LanguageBindings / Python / cntk / reader . py <nl> ppp b / LanguageBindings / Python / cntk / reader . py <nl> def generate_config ( self ) : <nl> if self [ ' FeaturesStart ' ] is not None : <nl> template + = ' ' ' <nl> <nl> - features = [ <nl> + v2 = [ <nl> start = " % ( FeaturesStart ) s " <nl> dim = " % ( FeaturesDim ) s " <nl> ] ' ' ' <nl> def generate_config ( self ) : <nl> if self [ ' LabelsStart ' ] is not None : <nl> template + = ' ' ' <nl> <nl> - labels = [ <nl> + v0 = [ <nl> start = " % ( LabelsStart ) s " <nl> dim = " % ( LabelsDim ) s " <nl> labelDim = " % ( NumOfClasses ) s " <nl> | train action improvments | microsoft/CNTK | b82aef9cb1da96620ec31e8ea33a67d0bdcf796a | 2016-03-14T14:59:31Z |
mmm a / xbmc / addons / RepositoryUpdater . cpp <nl> ppp b / xbmc / addons / RepositoryUpdater . cpp <nl> CRepositoryUpdater : : CRepositoryUpdater ( CAddonMgr & addonMgr ) : <nl> <nl> void CRepositoryUpdater : : Start ( ) <nl> { <nl> - CServiceBroker : : GetAddonMgr ( ) . Events ( ) . Subscribe ( this , & CRepositoryUpdater : : OnEvent ) ; <nl> + m_addonMgr . Events ( ) . Subscribe ( this , & CRepositoryUpdater : : OnEvent ) ; <nl> ScheduleUpdate ( ) ; <nl> } <nl> <nl> CRepositoryUpdater : : ~ CRepositoryUpdater ( ) <nl> { <nl> - CServiceBroker : : GetAddonMgr ( ) . Events ( ) . Unsubscribe ( this ) ; <nl> + m_addonMgr . Events ( ) . Unsubscribe ( this ) ; <nl> } <nl> <nl> void CRepositoryUpdater : : OnEvent ( const ADDON : : AddonEvent & event ) <nl> void CRepositoryUpdater : : OnEvent ( const ADDON : : AddonEvent & event ) <nl> if ( auto enableEvent = dynamic_cast < const AddonEvents : : Enabled * > ( & event ) ) <nl> { <nl> AddonPtr addon ; <nl> - if ( CAddonMgr : : GetInstance ( ) . GetAddon ( enableEvent - > id , addon , ADDON_REPOSITORY ) ) <nl> + if ( m_addonMgr . GetAddon ( enableEvent - > id , addon , ADDON_REPOSITORY ) ) <nl> ScheduleUpdate ( ) ; <nl> } <nl> } <nl> void CRepositoryUpdater : : OnJobComplete ( unsigned int jobID , bool success , CJob * j <nl> CLog : : Log ( LOGDEBUG , " CRepositoryUpdater : done . " ) ; <nl> m_doneEvent . Set ( ) ; <nl> <nl> - VECADDONS updates = CAddonMgr : : GetInstance ( ) . GetAvailableUpdates ( ) ; <nl> + VECADDONS updates = m_addonMgr . GetAvailableUpdates ( ) ; <nl> <nl> if ( CServiceBroker : : GetSettings ( ) . GetInt ( CSettings : : SETTING_ADDONS_AUTOUPDATES ) = = AUTO_UPDATES_NOTIFY ) <nl> { <nl> void CRepositoryUpdater : : OnJobComplete ( unsigned int jobID , bool success , CJob * j <nl> bool CRepositoryUpdater : : CheckForUpdates ( bool showProgress ) <nl> { <nl> VECADDONS addons ; <nl> - if ( CAddonMgr : : GetInstance ( ) . GetAddons ( addons , ADDON_REPOSITORY ) & & ! addons . empty ( ) ) <nl> + if ( m_addonMgr . GetAddons ( addons , ADDON_REPOSITORY ) & & ! addons . empty ( ) ) <nl> { <nl> CSingleLock lock ( m_criticalSection ) ; <nl> for ( const auto & addon : addons ) <nl> void CRepositoryUpdater : : OnSettingChanged ( std : : shared_ptr < const CSetting > settin <nl> CDateTime CRepositoryUpdater : : LastUpdated ( ) const <nl> { <nl> VECADDONS repos ; <nl> - if ( ! CAddonMgr : : GetInstance ( ) . GetAddons ( repos , ADDON_REPOSITORY ) | | repos . empty ( ) ) <nl> + if ( ! m_addonMgr . GetAddons ( repos , ADDON_REPOSITORY ) | | repos . empty ( ) ) <nl> return CDateTime ( ) ; <nl> <nl> CAddonDatabase db ; <nl> void CRepositoryUpdater : : ScheduleUpdate ( ) <nl> if ( CServiceBroker : : GetSettings ( ) . GetInt ( CSettings : : SETTING_ADDONS_AUTOUPDATES ) = = AUTO_UPDATES_NEVER ) <nl> return ; <nl> <nl> - if ( ! CAddonMgr : : GetInstance ( ) . HasAddons ( ADDON_REPOSITORY ) ) <nl> + if ( ! m_addonMgr . HasAddons ( ADDON_REPOSITORY ) ) <nl> return ; <nl> <nl> auto prev = LastUpdated ( ) ; <nl> | [ addons ] Fix compiler warning : xbmc / xbmc / addons / RepositoryUpdater . h : Private field ' m_addonMgr ' is not used . | xbmc/xbmc | 2497980444302c04360de2e98b20e374224461d6 | 2017-07-11T16:00:57Z |
mmm a / src / runtime . cc <nl> ppp b / src / runtime . cc <nl> RUNTIME_FUNCTION ( MaybeObject * , Runtime_HasLocalProperty ) { <nl> ASSERT ( args . length ( ) = = 2 ) ; <nl> CONVERT_CHECKED ( String , key , args [ 1 ] ) ; <nl> <nl> + uint32_t index ; <nl> + const bool key_is_array_index = key - > AsArrayIndex ( & index ) ; <nl> + <nl> Object * obj = args [ 0 ] ; <nl> / / Only JS objects can have properties . <nl> if ( obj - > IsJSObject ( ) ) { <nl> JSObject * object = JSObject : : cast ( obj ) ; <nl> - / / Fast case - no interceptors . <nl> + / / Fast case : either the key is a real named property or it is not <nl> + / / an array index and there are no interceptors or hidden <nl> + / / prototypes . <nl> if ( object - > HasRealNamedProperty ( key ) ) return isolate - > heap ( ) - > true_value ( ) ; <nl> - / / Slow case . Either it ' s not there or we have an interceptor . We should <nl> - / / have handles for this kind of deal . <nl> + Map * map = object - > map ( ) ; <nl> + if ( ! key_is_array_index & & <nl> + ! map - > has_named_interceptor ( ) & & <nl> + ! HeapObject : : cast ( map - > prototype ( ) ) - > map ( ) - > is_hidden_prototype ( ) ) { <nl> + return isolate - > heap ( ) - > false_value ( ) ; <nl> + } <nl> + / / Slow case . <nl> HandleScope scope ( isolate ) ; <nl> return HasLocalPropertyImplementation ( isolate , <nl> Handle < JSObject > ( object ) , <nl> Handle < String > ( key ) ) ; <nl> - } else if ( obj - > IsString ( ) ) { <nl> + } else if ( obj - > IsString ( ) & & key_is_array_index ) { <nl> / / Well , there is one exception : Handle [ ] on strings . <nl> - uint32_t index ; <nl> - if ( key - > AsArrayIndex ( & index ) ) { <nl> - String * string = String : : cast ( obj ) ; <nl> - if ( index < static_cast < uint32_t > ( string - > length ( ) ) ) <nl> - return isolate - > heap ( ) - > true_value ( ) ; <nl> + String * string = String : : cast ( obj ) ; <nl> + if ( index < static_cast < uint32_t > ( string - > length ( ) ) ) { <nl> + return isolate - > heap ( ) - > true_value ( ) ; <nl> } <nl> } <nl> return isolate - > heap ( ) - > false_value ( ) ; <nl> | Extend the fast case of HasLocalProperty . | v8/v8 | bc52ed0850283db63f183cfd5b5ba55aad982010 | 2011-05-12T12:48:10Z |
mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> static Type diagnoseUnknownType ( TypeResolution resolution , <nl> AbstractFunctionDecl * methodDecl = dc - > getInnermostMethodContext ( ) ; <nl> bool declaringMethod = methodDecl & & <nl> methodDecl - > getDeclContext ( ) = = dc - > getParentForLookup ( ) ; <nl> + bool isPropertyOfClass = insideClass & & <nl> + options . is ( TypeResolverContext : : PatternBindingDecl ) ; <nl> <nl> - if ( ( ( ! insideClass | | ! declaringMethod ) & & ! ( insideClass & & <nl> - options . is ( TypeResolverContext : : PatternBindingDecl ) ) & & <nl> + if ( ( ( ! insideClass | | ! declaringMethod ) & & ! isPropertyOfClass & & <nl> ! options . is ( TypeResolverContext : : GenericRequirement ) ) | | <nl> options . is ( TypeResolverContext : : ExplicitCastExpr ) ) { <nl> Type SelfType = nominal - > getSelfInterfaceType ( ) ; <nl> | Intermediate variable | apple/swift | f89f28c3619592fbf4b2694e177a794341f7e788 | 2019-03-24T12:59:48Z |
mmm a / src / operator / nn / pooling - inl . h <nl> ppp b / src / operator / nn / pooling - inl . h <nl> struct PoolingParam : public dmlc : : Parameter < PoolingParam > { <nl> bool global_pool ; <nl> bool cudnn_off ; <nl> DMLC_DECLARE_PARAMETER ( PoolingParam ) { <nl> - DMLC_DECLARE_FIELD ( global_pool ) . set_default ( false ) <nl> - . describe ( " Ignore kernel size , do global pooling based on current input feature map . " ) ; <nl> - <nl> - DMLC_DECLARE_FIELD ( cudnn_off ) . set_default ( false ) <nl> - . describe ( " Turn off cudnn pooling and use MXNet pooling operator . " ) ; <nl> - <nl> - DMLC_DECLARE_FIELD ( kernel ) <nl> + DMLC_DECLARE_FIELD ( kernel ) . set_default ( TShape ( ) ) / / add default value here <nl> . enforce_nonzero ( ) <nl> . describe ( " Pooling kernel size : ( y , x ) or ( d , y , x ) " ) ; <nl> <nl> - DMLC_DECLARE_FIELD ( pool_type ) <nl> + DMLC_DECLARE_FIELD ( pool_type ) . set_default ( pool_enum : : kMaxPooling ) / / add default pooling method <nl> . add_enum ( " max " , pool_enum : : kMaxPooling ) <nl> . add_enum ( " avg " , pool_enum : : kAvgPooling ) <nl> . add_enum ( " sum " , pool_enum : : kSumPooling ) <nl> . describe ( " Pooling type to be applied . " ) ; <nl> <nl> + DMLC_DECLARE_FIELD ( global_pool ) . set_default ( false ) <nl> + . describe ( " Ignore kernel size , do global pooling based on current input feature map . " ) ; <nl> + <nl> + DMLC_DECLARE_FIELD ( cudnn_off ) . set_default ( false ) <nl> + . describe ( " Turn off cudnn pooling and use MXNet pooling operator . " ) ; <nl> + <nl> DMLC_DECLARE_FIELD ( pooling_convention ) . set_default ( pool_enum : : kValid ) <nl> . add_enum ( " full " , pool_enum : : kFull ) <nl> . add_enum ( " valid " , pool_enum : : kValid ) <nl> class PoolingOp { <nl> using namespace mshadow ; <nl> Stream < xpu > * s = ctx . get_stream < xpu > ( ) ; <nl> const TShape & ishape = in_data . shape_ ; <nl> + TShape kernel = param_ . kernel ; <nl> TShape padding = param_ . pad ; <nl> + TShape stride = param_ . stride ; <nl> if ( param_ . global_pool ) { <nl> - for ( index_t i = 0 ; i < padding . ndim ( ) ; i + + ) { <nl> + kernel = TShape ( ishape . data ( ) + 2 , <nl> + ishape . data ( ) + ishape . ndim ( ) ) ; <nl> + padding = TShape ( ishape . ndim ( ) - 2 ) ; <nl> + for ( index_t i = 0 ; i < ishape . ndim ( ) - 2 ; i + + ) { <nl> padding [ i ] = 0 ; <nl> } <nl> + stride = TShape ( ishape . ndim ( ) - 2 ) ; <nl> } <nl> <nl> pool ( s , in_data . dptr < DType > ( ) , in_data . shape_ , out_data . shape_ , <nl> - param_ . global_pool ? <nl> - TShape ( ishape . data ( ) + ishape . ndim ( ) - param_ . kernel . ndim ( ) , ishape . data ( ) + ishape . ndim ( ) ) <nl> - : param_ . kernel , <nl> + kernel , <nl> padding , <nl> - param_ . global_pool ? TShape ( param_ . kernel . ndim ( ) ) : param_ . stride , <nl> + stride , <nl> param_ . pool_type , req , out_data . dptr < DType > ( ) ) ; <nl> } <nl> <nl> class PoolingOp { <nl> using namespace mshadow ; <nl> Stream < xpu > * s = ctx . get_stream < xpu > ( ) ; <nl> const TShape & ishape = in_data . shape_ ; <nl> + TShape kernel = param_ . kernel ; <nl> TShape padding = param_ . pad ; <nl> + TShape stride = param_ . stride ; <nl> if ( param_ . global_pool ) { <nl> - for ( index_t i = 0 ; i < padding . ndim ( ) ; i + + ) { <nl> + kernel = TShape ( ishape . data ( ) + 2 , <nl> + ishape . data ( ) + ishape . ndim ( ) ) ; <nl> + padding = TShape ( ishape . ndim ( ) - 2 ) ; <nl> + for ( index_t i = 0 ; i < ishape . ndim ( ) - 2 ; i + + ) { <nl> padding [ i ] = 0 ; <nl> } <nl> + stride = TShape ( ishape . ndim ( ) - 2 ) ; <nl> } <nl> <nl> unpool ( s , out_grad . dptr < DType > ( ) , in_data . dptr < DType > ( ) , out_data . dptr < DType > ( ) , <nl> in_grad . shape_ , out_grad . shape_ , <nl> - param_ . global_pool ? <nl> - TShape ( ishape . data ( ) + ishape . ndim ( ) - param_ . kernel . ndim ( ) , ishape . data ( ) + ishape . ndim ( ) ) <nl> - : param_ . kernel , <nl> + kernel , <nl> padding , <nl> - param_ . global_pool ? TShape ( param_ . kernel . ndim ( ) ) : param_ . stride , <nl> + stride , <nl> param_ . pool_type , req , in_grad . dptr < DType > ( ) ) ; <nl> } <nl> <nl> class PoolingOp { <nl> template < typename xpu , typename DType > <nl> PoolingOp < xpu , DType > & GetPoolingOp ( const PoolingParam & param ) { <nl> static thread_local PoolingOp < xpu , DType > op ; <nl> + / / check if filter size assigned correctly <nl> + if ( param . global_pool = = false ) { <nl> + CHECK_GT ( param . kernel . ndim ( ) , 0U ) <nl> + < < " You need to set the kernel size if global pooling is not used " ; <nl> + } <nl> op . Init ( param ) ; <nl> return op ; <nl> } <nl> mmm a / src / operator / nn / pooling . cc <nl> ppp b / src / operator / nn / pooling . cc <nl> static void PoolingParamParser ( nnvm : : NodeAttrs * attrs ) { <nl> if ( param . stride . ndim ( ) = = 0 ) param . stride = Shape2 ( 1 , 1 ) ; <nl> if ( param . pad . ndim ( ) = = 0 ) param . pad = Shape2 ( 0 , 0 ) ; <nl> } else { <nl> - CHECK_EQ ( param . kernel . ndim ( ) , 3U ) < < param . kernel . ndim ( ) <nl> - < < " D pooling not supported " ; <nl> + / / ignore kernel size only if global_pool not assigned false <nl> + if ( param . global_pool = = false ) { <nl> + CHECK_EQ ( param . kernel . ndim ( ) , 3U ) < < param . kernel . ndim ( ) <nl> + < < " D pooling not supported " ; <nl> + } <nl> if ( param . stride . ndim ( ) = = 0 ) param . stride = Shape3 ( 1 , 1 , 1 ) ; <nl> if ( param . pad . ndim ( ) = = 0 ) param . pad = Shape3 ( 0 , 0 , 0 ) ; <nl> } <nl> - CHECK_EQ ( param . stride . ndim ( ) , param . kernel . ndim ( ) ) <nl> - < < " stride and kernel should have the same length " ; <nl> - CHECK_EQ ( param . pad . ndim ( ) , param . kernel . ndim ( ) ) <nl> - < < " pad and kernel should have the same length " ; <nl> attrs - > parsed = std : : move ( param ) ; <nl> } <nl> <nl> static bool PoolingShape ( const nnvm : : NodeAttrs & attrs , <nl> < < " Pooling : Input data should be 3D in ( batch , channel , x ) " <nl> < < " Or 4D in ( batch , channel , y , x ) " <nl> < < " Or 5D in ( batch , channel , d , y , x ) " ; <nl> + CHECK_LE ( dshape . ndim ( ) , 5U ) <nl> + < < " Pooling : Input data should be 3D in ( batch , channel , x ) " <nl> + < < " Or 4D in ( batch , channel , y , x ) " <nl> + < < " Or 5D in ( batch , channel , d , y , x ) " ; <nl> TShape oshape = dshape ; <nl> if ( dshape . ndim ( ) = = 0 ) return false ; <nl> - if ( param . kernel . ndim ( ) = = 1 ) { <nl> + if ( param . global_pool ) { <nl> + for ( size_t i { 2 } ; i < dshape . ndim ( ) ; i + + ) <nl> + oshape [ i ] = 1 ; <nl> + out_shape - > clear ( ) ; <nl> + out_shape - > push_back ( oshape ) ; / / save output shape <nl> + # if MXNET_USE_MKLDNN = = 1 <nl> + if ( MKLDNNRequireWorkspace ( param ) & & SupportMKLDNNPooling ( param ) ) <nl> + out_shape - > push_back ( oshape ) ; / / for workspace <nl> + # endif <nl> + } else if ( param . kernel . ndim ( ) = = 1 ) { <nl> CHECK_EQ ( dshape . ndim ( ) , 3U ) <nl> < < " Pooling : Input data should be 3D in ( batch , channel , x ) " ; <nl> - if ( param . global_pool ) { <nl> - oshape [ 2 ] = 1 ; <nl> + CHECK ( param . kernel [ 0 ] < = dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> + < < " kernel size ( " < < param . kernel [ 0 ] < < " ) exceeds input ( " <nl> + < < dshape [ 2 ] < < " padded to " < < ( dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> + < < " ) " ; <nl> + if ( param . pooling_convention = = pool_enum : : kValid ) { <nl> + oshape [ 2 ] = 1 + <nl> + ( dshape [ 2 ] + 2 * param . pad [ 0 ] - param . kernel [ 0 ] ) / <nl> + param . stride [ 0 ] ; <nl> } else { <nl> - CHECK ( param . kernel [ 0 ] < = dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> - < < " kernel size ( " < < param . kernel [ 0 ] < < " ) exceeds input ( " <nl> - < < dshape [ 2 ] < < " padded to " < < ( dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> - < < " ) " ; <nl> - if ( param . pooling_convention = = pool_enum : : kValid ) { <nl> - oshape [ 2 ] = 1 + <nl> - ( dshape [ 2 ] + 2 * param . pad [ 0 ] - param . kernel [ 0 ] ) / <nl> - param . stride [ 0 ] ; <nl> - } else { <nl> - oshape [ 2 ] = 1 + static_cast < int > ( ceil ( <nl> - static_cast < float > ( dshape [ 2 ] + 2 * param . pad [ 0 ] - <nl> - param . kernel [ 0 ] ) / <nl> - param . stride [ 0 ] ) ) ; <nl> - } <nl> + oshape [ 2 ] = 1 + static_cast < int > ( ceil ( <nl> + static_cast < float > ( dshape [ 2 ] + 2 * param . pad [ 0 ] - <nl> + param . kernel [ 0 ] ) / <nl> + param . stride [ 0 ] ) ) ; <nl> } <nl> out_shape - > clear ( ) ; <nl> out_shape - > push_back ( oshape ) ; / / save output shape <nl> static bool PoolingShape ( const nnvm : : NodeAttrs & attrs , <nl> } else if ( param . kernel . ndim ( ) = = 2 ) { <nl> CHECK_EQ ( dshape . ndim ( ) , 4U ) <nl> < < " Pooling : Input data should be 4D in ( batch , channel , y , x ) " ; <nl> - if ( param . global_pool ) { <nl> - oshape [ 2 ] = 1 ; <nl> - oshape [ 3 ] = 1 ; <nl> + CHECK ( param . kernel [ 0 ] < = dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> + < < " kernel size ( " < < param . kernel [ 0 ] < < " ) exceeds input ( " <nl> + < < dshape [ 2 ] < < " padded to " < < ( dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> + < < " ) " ; <nl> + CHECK ( param . kernel [ 1 ] < = dshape [ 3 ] + 2 * param . pad [ 1 ] ) <nl> + < < " kernel size ( " < < param . kernel [ 1 ] < < " ) exceeds input ( " <nl> + < < dshape [ 3 ] < < " padded to " < < ( dshape [ 3 ] + 2 * param . pad [ 1 ] ) <nl> + < < " ) " ; <nl> + if ( param . pooling_convention = = pool_enum : : kValid ) { <nl> + oshape [ 2 ] = 1 + <nl> + ( dshape [ 2 ] + 2 * param . pad [ 0 ] - param . kernel [ 0 ] ) / <nl> + param . stride [ 0 ] ; <nl> + oshape [ 3 ] = 1 + <nl> + ( dshape [ 3 ] + 2 * param . pad [ 1 ] - param . kernel [ 1 ] ) / <nl> + param . stride [ 1 ] ; <nl> } else { <nl> - CHECK ( param . kernel [ 0 ] < = dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> - < < " kernel size ( " < < param . kernel [ 0 ] < < " ) exceeds input ( " <nl> - < < dshape [ 2 ] < < " padded to " < < ( dshape [ 2 ] + 2 * param . pad [ 0 ] ) <nl> - < < " ) " ; <nl> - CHECK ( param . kernel [ 1 ] < = dshape [ 3 ] + 2 * param . pad [ 1 ] ) <nl> - < < " kernel size ( " < < param . kernel [ 1 ] < < " ) exceeds input ( " <nl> - < < dshape [ 3 ] < < " padded to " < < ( dshape [ 3 ] + 2 * param . pad [ 1 ] ) <nl> - < < " ) " ; <nl> - if ( param . pooling_convention = = pool_enum : : kValid ) { <nl> - oshape [ 2 ] = 1 + <nl> - ( dshape [ 2 ] + 2 * param . pad [ 0 ] - param . kernel [ 0 ] ) / <nl> - param . stride [ 0 ] ; <nl> - oshape [ 3 ] = 1 + <nl> - ( dshape [ 3 ] + 2 * param . pad [ 1 ] - param . kernel [ 1 ] ) / <nl> - param . stride [ 1 ] ; <nl> - } else { <nl> - oshape [ 2 ] = 1 + static_cast < int > ( ceil ( <nl> - static_cast < float > ( dshape [ 2 ] + 2 * param . pad [ 0 ] - <nl> - param . kernel [ 0 ] ) / <nl> - param . stride [ 0 ] ) ) ; <nl> - oshape [ 3 ] = 1 + static_cast < int > ( ceil ( <nl> - static_cast < float > ( dshape [ 3 ] + 2 * param . pad [ 1 ] - <nl> - param . kernel [ 1 ] ) / <nl> - param . stride [ 1 ] ) ) ; <nl> - } <nl> + oshape [ 2 ] = 1 + static_cast < int > ( ceil ( <nl> + static_cast < float > ( dshape [ 2 ] + 2 * param . pad [ 0 ] - <nl> + param . kernel [ 0 ] ) / <nl> + param . stride [ 0 ] ) ) ; <nl> + oshape [ 3 ] = 1 + static_cast < int > ( ceil ( <nl> + static_cast < float > ( dshape [ 3 ] + 2 * param . pad [ 1 ] - <nl> + param . kernel [ 1 ] ) / <nl> + param . stride [ 1 ] ) ) ; <nl> } <nl> out_shape - > clear ( ) ; <nl> out_shape - > push_back ( oshape ) ; / / save output shape <nl> static bool PoolingShape ( const nnvm : : NodeAttrs & attrs , <nl> < < " kernel size exceeds input " ; <nl> CHECK_LE ( param . kernel [ 2 ] , dshape [ 4 ] + 2 * param . pad [ 2 ] ) <nl> < < " kernel size exceeds input " ; <nl> - if ( param . global_pool ) { <nl> - oshape [ 2 ] = 1 ; <nl> - oshape [ 3 ] = 1 ; <nl> - oshape [ 4 ] = 1 ; <nl> + if ( param . pooling_convention = = pool_enum : : kValid ) { <nl> + oshape [ 2 ] = 1 + <nl> + ( dshape [ 2 ] + 2 * param . pad [ 0 ] - param . kernel [ 0 ] ) / <nl> + param . stride [ 0 ] ; <nl> + oshape [ 3 ] = 1 + <nl> + ( dshape [ 3 ] + 2 * param . pad [ 1 ] - param . kernel [ 1 ] ) / <nl> + param . stride [ 1 ] ; <nl> + oshape [ 4 ] = 1 + <nl> + ( dshape [ 4 ] + 2 * param . pad [ 2 ] - param . kernel [ 2 ] ) / <nl> + param . stride [ 2 ] ; <nl> } else { <nl> - if ( param . pooling_convention = = pool_enum : : kValid ) { <nl> - oshape [ 2 ] = 1 + <nl> - ( dshape [ 2 ] + 2 * param . pad [ 0 ] - param . kernel [ 0 ] ) / <nl> - param . stride [ 0 ] ; <nl> - oshape [ 3 ] = 1 + <nl> - ( dshape [ 3 ] + 2 * param . pad [ 1 ] - param . kernel [ 1 ] ) / <nl> - param . stride [ 1 ] ; <nl> - oshape [ 4 ] = 1 + <nl> - ( dshape [ 4 ] + 2 * param . pad [ 2 ] - param . kernel [ 2 ] ) / <nl> - param . stride [ 2 ] ; <nl> - } else { <nl> - oshape [ 2 ] = 1 + static_cast < int > ( ceil ( <nl> - static_cast < float > ( dshape [ 2 ] + 2 * param . pad [ 0 ] - <nl> - param . kernel [ 0 ] ) / <nl> - param . stride [ 0 ] ) ) ; <nl> - oshape [ 3 ] = 1 + static_cast < int > ( ceil ( <nl> - static_cast < float > ( dshape [ 3 ] + 2 * param . pad [ 1 ] - <nl> - param . kernel [ 1 ] ) / <nl> - param . stride [ 1 ] ) ) ; <nl> - oshape [ 4 ] = 1 + static_cast < int > ( ceil ( <nl> - static_cast < float > ( dshape [ 4 ] + 2 * param . pad [ 2 ] - <nl> - param . kernel [ 2 ] ) / <nl> - param . stride [ 2 ] ) ) ; <nl> - } <nl> + oshape [ 2 ] = 1 + static_cast < int > ( ceil ( <nl> + static_cast < float > ( dshape [ 2 ] + 2 * param . pad [ 0 ] - <nl> + param . kernel [ 0 ] ) / <nl> + param . stride [ 0 ] ) ) ; <nl> + oshape [ 3 ] = 1 + static_cast < int > ( ceil ( <nl> + static_cast < float > ( dshape [ 3 ] + 2 * param . pad [ 1 ] - <nl> + param . kernel [ 1 ] ) / <nl> + param . stride [ 1 ] ) ) ; <nl> + oshape [ 4 ] = 1 + static_cast < int > ( ceil ( <nl> + static_cast < float > ( dshape [ 4 ] + 2 * param . pad [ 2 ] - <nl> + param . kernel [ 2 ] ) / <nl> + param . stride [ 2 ] ) ) ; <nl> } <nl> <nl> out_shape - > clear ( ) ; <nl> mmm a / src / operator / pooling_v1 - inl . h <nl> ppp b / src / operator / pooling_v1 - inl . h <nl> struct PoolingV1Param : public dmlc : : Parameter < PoolingV1Param > { <nl> int pooling_convention ; <nl> bool global_pool ; <nl> DMLC_DECLARE_PARAMETER ( PoolingV1Param ) { <nl> - DMLC_DECLARE_FIELD ( global_pool ) . set_default ( false ) <nl> - . describe ( " Ignore kernel size , do global pooling based on current input feature map . " ) ; <nl> - <nl> - DMLC_DECLARE_FIELD ( kernel ) <nl> + DMLC_DECLARE_FIELD ( kernel ) . set_default ( TShape ( ) ) <nl> . enforce_nonzero ( ) <nl> . describe ( " pooling kernel size : ( y , x ) or ( d , y , x ) " ) ; <nl> <nl> - DMLC_DECLARE_FIELD ( pool_type ) <nl> + DMLC_DECLARE_FIELD ( pool_type ) . set_default ( pool_v1_enum : : kMaxPooling ) <nl> . add_enum ( " max " , pool_v1_enum : : kMaxPooling ) <nl> . add_enum ( " avg " , pool_v1_enum : : kAvgPooling ) <nl> . add_enum ( " sum " , pool_v1_enum : : kSumPooling ) <nl> . describe ( " Pooling type to be applied . " ) ; <nl> <nl> + DMLC_DECLARE_FIELD ( global_pool ) . set_default ( false ) <nl> + . describe ( " Ignore kernel size , do global pooling based on current input feature map . " ) ; <nl> + <nl> DMLC_DECLARE_FIELD ( pooling_convention ) . set_default ( pool_v1_enum : : kValid ) <nl> . add_enum ( " full " , pool_v1_enum : : kFull ) <nl> . add_enum ( " valid " , pool_v1_enum : : kValid ) <nl> class PoolingV1Op : public Operator { <nl> <nl> / / reset padding size for global pooling <nl> TShape padding = param_ . pad ; <nl> + / / TShape kernel = param_ . kernel ; <nl> if ( param_ . global_pool ) { <nl> padding [ 0 ] = padding [ 1 ] = 0 ; <nl> + / / kernel [ 0 ] = kernel [ 1 ] = 0 ; <nl> } <nl> <nl> Tensor < xpu , 4 , DType > data = in_data [ pool_v1_enum : : kData ] . get < xpu , 4 , DType > ( s ) ; <nl> class PoolingV1Prop : public OperatorProperty { <nl> void Init ( const std : : vector < std : : pair < std : : string , std : : string > > & kwargs ) override { <nl> using namespace mshadow ; <nl> param_ . Init ( kwargs ) ; <nl> - if ( param_ . kernel . ndim ( ) = = 2 ) { <nl> - if ( param_ . stride . ndim ( ) = = 0 ) param_ . stride = Shape2 ( 1 , 1 ) ; <nl> - if ( param_ . pad . ndim ( ) = = 0 ) param_ . pad = Shape2 ( 0 , 0 ) ; <nl> - } else { <nl> - CHECK_EQ ( param_ . kernel . ndim ( ) , 3U ) < < param_ . kernel . ndim ( ) < < " D pooling not supported " ; <nl> - if ( param_ . stride . ndim ( ) = = 0 ) param_ . stride = Shape3 ( 1 , 1 , 1 ) ; <nl> - if ( param_ . pad . ndim ( ) = = 0 ) param_ . pad = Shape3 ( 0 , 0 , 0 ) ; <nl> + if ( ! param_ . global_pool ) { <nl> + if ( param_ . kernel . ndim ( ) = = 2 ) { <nl> + if ( param_ . stride . ndim ( ) = = 0 ) param_ . stride = Shape2 ( 1 , 1 ) ; <nl> + if ( param_ . pad . ndim ( ) = = 0 ) param_ . pad = Shape2 ( 0 , 0 ) ; <nl> + } else { <nl> + CHECK_EQ ( param_ . kernel . ndim ( ) , 3U ) < < param_ . kernel . ndim ( ) < < " D pooling not supported " ; <nl> + if ( param_ . stride . ndim ( ) = = 0 ) param_ . stride = Shape3 ( 1 , 1 , 1 ) ; <nl> + if ( param_ . pad . ndim ( ) = = 0 ) param_ . pad = Shape3 ( 0 , 0 , 0 ) ; <nl> + } <nl> + CHECK_EQ ( param_ . stride . ndim ( ) , param_ . kernel . ndim ( ) ) <nl> + < < " stride and kernel should have the same length " ; <nl> + CHECK_EQ ( param_ . pad . ndim ( ) , param_ . kernel . ndim ( ) ) <nl> + < < " pad and kernel should have the same length " ; <nl> } <nl> - CHECK_EQ ( param_ . stride . ndim ( ) , param_ . kernel . ndim ( ) ) <nl> - < < " stride and kernel should have the same length " ; <nl> - CHECK_EQ ( param_ . pad . ndim ( ) , param_ . kernel . ndim ( ) ) <nl> - < < " pad and kernel should have the same length " ; <nl> } <nl> <nl> std : : map < std : : string , std : : string > GetParams ( ) const override { <nl> class PoolingV1Prop : public OperatorProperty { <nl> const TShape & dshape = ( * in_shape ) [ 0 ] ; <nl> CHECK_GE ( dshape . ndim ( ) , 4U ) < < " Pooling : Input data should be 4D in ( batch , channel , y , x ) " <nl> < < " Or 5D in ( batch , channel , d , y , x ) " ; <nl> + CHECK_LE ( dshape . ndim ( ) , 5U ) < < " Pooling : Input data should be 4D in ( batch , channel , y , x ) " <nl> + < < " Or 5D in ( batch , channel , d , y , x ) " ; <nl> TShape oshape = dshape ; <nl> if ( dshape . ndim ( ) = = 0 ) return false ; <nl> - if ( param_ . kernel . ndim ( ) = = 2 ) { <nl> - CHECK_EQ ( dshape . ndim ( ) , 4 ) < < " Pooling : Input data should be 4D in ( batch , channel , y , x ) " ; <nl> - if ( param_ . global_pool ) { <nl> + if ( param_ . global_pool ) { <nl> + if ( dshape . ndim ( ) = = 4 ) { <nl> oshape [ 2 ] = 1 ; <nl> oshape [ 3 ] = 1 ; <nl> } else { <nl> - CHECK ( param_ . kernel [ 0 ] < = dshape [ 2 ] + 2 * param_ . pad [ 0 ] ) <nl> - < < " kernel size ( " < < param_ . kernel [ 0 ] < < " ) exceeds input ( " < < dshape [ 2 ] <nl> - < < " padded to " < < ( dshape [ 2 ] + 2 * param_ . pad [ 0 ] ) < < " ) " ; <nl> - CHECK ( param_ . kernel [ 1 ] < = dshape [ 3 ] + 2 * param_ . pad [ 1 ] ) <nl> - < < " kernel size ( " < < param_ . kernel [ 1 ] < < " ) exceeds input ( " < < dshape [ 3 ] <nl> - < < " padded to " < < ( dshape [ 3 ] + 2 * param_ . pad [ 1 ] ) < < " ) " ; <nl> - if ( param_ . pooling_convention = = pool_v1_enum : : kValid ) { <nl> + oshape [ 2 ] = 1 ; <nl> + oshape [ 3 ] = 1 ; <nl> + oshape [ 4 ] = 1 ; <nl> + } <nl> + out_shape - > clear ( ) ; <nl> + out_shape - > push_back ( oshape ) ; <nl> + } else if ( param_ . kernel . ndim ( ) = = 2 ) { <nl> + CHECK_EQ ( dshape . ndim ( ) , 4 ) < < " Pooling : Input data should be 4D in ( batch , channel , y , x ) " ; <nl> + CHECK ( param_ . kernel [ 0 ] < = dshape [ 2 ] + 2 * param_ . pad [ 0 ] ) <nl> + < < " kernel size ( " < < param_ . kernel [ 0 ] < < " ) exceeds input ( " < < dshape [ 2 ] <nl> + < < " padded to " < < ( dshape [ 2 ] + 2 * param_ . pad [ 0 ] ) < < " ) " ; <nl> + CHECK ( param_ . kernel [ 1 ] < = dshape [ 3 ] + 2 * param_ . pad [ 1 ] ) <nl> + < < " kernel size ( " < < param_ . kernel [ 1 ] < < " ) exceeds input ( " < < dshape [ 3 ] <nl> + < < " padded to " < < ( dshape [ 3 ] + 2 * param_ . pad [ 1 ] ) < < " ) " ; <nl> + if ( param_ . pooling_convention = = pool_v1_enum : : kValid ) { <nl> oshape [ 2 ] = 1 + ( dshape [ 2 ] + 2 * param_ . pad [ 0 ] - param_ . kernel [ 0 ] ) / <nl> param_ . stride [ 0 ] ; <nl> oshape [ 3 ] = 1 + ( dshape [ 3 ] + 2 * param_ . pad [ 1 ] - param_ . kernel [ 1 ] ) / <nl> param_ . stride [ 1 ] ; <nl> - } else { <nl> + } else { <nl> oshape [ 2 ] = 1 + static_cast < int > ( ceil ( static_cast < float > ( <nl> dshape [ 2 ] + 2 * param_ . pad [ 0 ] - <nl> param_ . kernel [ 0 ] ) / param_ . stride [ 0 ] ) ) ; <nl> oshape [ 3 ] = 1 + static_cast < int > ( ceil ( static_cast < float > ( <nl> dshape [ 3 ] + 2 * param_ . pad [ 1 ] - <nl> param_ . kernel [ 1 ] ) / param_ . stride [ 1 ] ) ) ; <nl> - } <nl> } <nl> + <nl> out_shape - > clear ( ) ; <nl> out_shape - > push_back ( oshape ) ; <nl> } else if ( param_ . kernel . ndim ( ) = = 3 ) { <nl> class PoolingV1Prop : public OperatorProperty { <nl> CHECK_LE ( param_ . kernel [ 0 ] , dshape [ 2 ] + 2 * param_ . pad [ 0 ] ) < < " kernel size exceeds input " ; <nl> CHECK_LE ( param_ . kernel [ 1 ] , dshape [ 3 ] + 2 * param_ . pad [ 1 ] ) < < " kernel size exceeds input " ; <nl> CHECK_LE ( param_ . kernel [ 2 ] , dshape [ 4 ] + 2 * param_ . pad [ 2 ] ) < < " kernel size exceeds input " ; <nl> - if ( param_ . global_pool ) { <nl> - oshape [ 2 ] = 1 ; <nl> - oshape [ 3 ] = 1 ; <nl> - oshape [ 4 ] = 1 ; <nl> - } else { <nl> - if ( param_ . pooling_convention = = pool_v1_enum : : kValid ) { <nl> + if ( param_ . pooling_convention = = pool_v1_enum : : kValid ) { <nl> oshape [ 2 ] = 1 + ( dshape [ 2 ] + 2 * param_ . pad [ 0 ] - param_ . kernel [ 0 ] ) / <nl> param_ . stride [ 0 ] ; <nl> oshape [ 3 ] = 1 + ( dshape [ 3 ] + 2 * param_ . pad [ 1 ] - param_ . kernel [ 1 ] ) / <nl> param_ . stride [ 1 ] ; <nl> oshape [ 4 ] = 1 + ( dshape [ 4 ] + 2 * param_ . pad [ 2 ] - param_ . kernel [ 2 ] ) / <nl> param_ . stride [ 2 ] ; <nl> - } else { <nl> + } else { <nl> oshape [ 2 ] = 1 + static_cast < int > ( ceil ( static_cast < float > ( <nl> dshape [ 2 ] + 2 * param_ . pad [ 0 ] - <nl> param_ . kernel [ 0 ] ) / param_ . stride [ 0 ] ) ) ; <nl> class PoolingV1Prop : public OperatorProperty { <nl> oshape [ 4 ] = 1 + static_cast < int > ( ceil ( static_cast < float > ( <nl> dshape [ 4 ] + 2 * param_ . pad [ 2 ] - <nl> param_ . kernel [ 2 ] ) / param_ . stride [ 2 ] ) ) ; <nl> - } <nl> } <nl> <nl> out_shape - > clear ( ) ; <nl> mmm a / tests / python / gpu / test_operator_gpu . py <nl> ppp b / tests / python / gpu / test_operator_gpu . py <nl> def test_1d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . cpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> + <nl> ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pad = pad , stride = stride , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = False , name = ' pool ' ) ) <nl> def test_1d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = False , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , cudnn_off = False , name = ' pool ' ) ) <nl> + <nl> ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pad = pad , stride = stride , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = True , name = ' pool ' ) ) <nl> def test_1d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = True , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , cudnn_off = True , name = ' pool ' ) ) <nl> + <nl> check_consistency ( sym_list , ctx_list ) <nl> <nl> def test_2d_pooling ( pool_type ) : <nl> def test_2d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling_v1 ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . cpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling_v1 ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> + <nl> ctx_list . append ( { ' ctx ' : mx . cpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pad = pad , stride = stride , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> def test_2d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . cpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , name = ' pool ' ) ) <nl> + <nl> ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pad = pad , stride = stride , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = False , name = ' pool ' ) ) <nl> def test_2d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = False , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , cudnn_off = False , name = ' pool ' ) ) <nl> + <nl> ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pad = pad , stride = stride , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = True , name = ' pool ' ) ) <nl> def test_2d_pooling ( pool_type ) : <nl> sym_list . append ( mx . sym . Pooling ( kernel = kernel , pool_type = pool_type , <nl> pooling_convention = pooling_convention , global_pool = True , cudnn_off = True , name = ' pool ' ) ) <nl> <nl> + ctx_list . append ( { ' ctx ' : mx . gpu ( 0 ) , ' pool_data ' : data , ' type_dict ' : { ' pool_data ' : np . float32 } } ) <nl> + sym_list . append ( mx . sym . Pooling ( pool_type = pool_type , <nl> + pooling_convention = pooling_convention , global_pool = True , cudnn_off = True , name = ' pool ' ) ) <nl> + <nl> + <nl> check_consistency ( sym_list , ctx_list ) <nl> <nl> test_1d_pooling ( ' max ' ) <nl> | [ MXNET - 80 ] Fix average pooling kernel size assignment error ( ) | apache/incubator-mxnet | 1e532bf2bc9c9bd51698ac61e89569828bea646d | 2018-04-09T18:49:24Z |
mmm a / src / mongo / db / ops / query . h <nl> ppp b / src / mongo / db / ops / query . h <nl> namespace mongo { <nl> private : <nl> void handleReorderMatch ( ) ; <nl> bool handleOrderedMatch ( ) ; <nl> - SmallDupSet _scanAndOrderDups ; <nl> + DiskLocDupSet _scanAndOrderDups ; <nl> OrderedBuildStrategy _orderedBuild ; <nl> ReorderBuildStrategy _reorderBuild ; <nl> } ; <nl> mmm a / src / mongo / db / queryoptimizercursorimpl . h <nl> ppp b / src / mongo / db / queryoptimizercursorimpl . h <nl> namespace mongo { <nl> set < DiskLoc > _dups ; <nl> } ; <nl> <nl> + / * * Dup tracking class , optimizing one common case with small set and few initial reads . * / <nl> + class SmallDupSet { <nl> + public : <nl> + SmallDupSet ( ) : _accesses ( ) { <nl> + _vec . reserve ( 250 ) ; <nl> + } <nl> + / * * @ return true if @ param ' loc ' already added to the set , false if adding to the set in this call . * / <nl> + bool getsetdup ( const DiskLoc & loc ) { <nl> + access ( ) ; <nl> + return vec ( ) ? getsetdupVec ( loc ) : getsetdupSet ( loc ) ; <nl> + } <nl> + / * * @ return true when @ param loc in the set . * / <nl> + bool getdup ( const DiskLoc & loc ) { <nl> + access ( ) ; <nl> + return vec ( ) ? getdupVec ( loc ) : getdupSet ( loc ) ; <nl> + } <nl> + private : <nl> + void access ( ) { <nl> + + + _accesses ; <nl> + mayUpgrade ( ) ; <nl> + } <nl> + void mayUpgrade ( ) { <nl> + if ( vec ( ) & & _accesses > 500 ) { <nl> + _set . insert ( _vec . begin ( ) , _vec . end ( ) ) ; <nl> + } <nl> + } <nl> + bool vec ( ) const { <nl> + return _set . size ( ) = = 0 ; <nl> + } <nl> + bool getsetdupVec ( const DiskLoc & loc ) { <nl> + if ( getdupVec ( loc ) ) { <nl> + return true ; <nl> + } <nl> + _vec . push_back ( loc ) ; <nl> + return false ; <nl> + } <nl> + bool getdupVec ( const DiskLoc & loc ) const { <nl> + for ( vector < DiskLoc > : : const_iterator i = _vec . begin ( ) ; i ! = _vec . end ( ) ; + + i ) { <nl> + if ( * i = = loc ) { <nl> + return true ; <nl> + } <nl> + } <nl> + return false ; <nl> + } <nl> + bool getsetdupSet ( const DiskLoc & loc ) { <nl> + pair < set < DiskLoc > : : iterator , bool > p = _set . insert ( loc ) ; <nl> + return ! p . second ; <nl> + } <nl> + bool getdupSet ( const DiskLoc & loc ) { <nl> + return _set . count ( loc ) > 0 ; <nl> + } <nl> + vector < DiskLoc > _vec ; <nl> + set < DiskLoc > _set ; <nl> + long long _accesses ; <nl> + } ; <nl> + <nl> class CursorGenerator { <nl> public : <nl> CursorGenerator ( const char * ns , <nl> mmm a / src / mongo / db / queryutil . h <nl> ppp b / src / mongo / db / queryutil . h <nl> namespace mongo { <nl> <nl> extern const int MaxBytesToReturnToClientAtOnce ; <nl> <nl> - / * * Dup tracking class , optimizing one common case with small set and few initial reads . * / <nl> - class SmallDupSet { <nl> + class DiskLocDupSet { <nl> public : <nl> - SmallDupSet ( ) : _accesses ( ) { <nl> - _vec . reserve ( 250 ) ; <nl> - } <nl> - / * * @ return true if @ param ' loc ' already added to the set , false if adding to the set in this call . * / <nl> bool getsetdup ( const DiskLoc & loc ) { <nl> - access ( ) ; <nl> - return vec ( ) ? getsetdupVec ( loc ) : getsetdupSet ( loc ) ; <nl> - } <nl> - / * * @ return true when @ param loc in the set . * / <nl> - bool getdup ( const DiskLoc & loc ) { <nl> - access ( ) ; <nl> - return vec ( ) ? getdupVec ( loc ) : getdupSet ( loc ) ; <nl> - } <nl> - private : <nl> - void access ( ) { <nl> - + + _accesses ; <nl> - mayUpgrade ( ) ; <nl> - } <nl> - void mayUpgrade ( ) { <nl> - if ( vec ( ) & & _accesses > 500 ) { <nl> - _set . insert ( _vec . begin ( ) , _vec . end ( ) ) ; <nl> - } <nl> - } <nl> - bool vec ( ) const { <nl> - return _set . size ( ) = = 0 ; <nl> - } <nl> - bool getsetdupVec ( const DiskLoc & loc ) { <nl> - if ( getdupVec ( loc ) ) { <nl> - return true ; <nl> - } <nl> - _vec . push_back ( loc ) ; <nl> - return false ; <nl> - } <nl> - bool getdupVec ( const DiskLoc & loc ) const { <nl> - for ( vector < DiskLoc > : : const_iterator i = _vec . begin ( ) ; i ! = _vec . end ( ) ; + + i ) { <nl> - if ( * i = = loc ) { <nl> - return true ; <nl> - } <nl> - } <nl> - return false ; <nl> - } <nl> - bool getsetdupSet ( const DiskLoc & loc ) { <nl> - pair < set < DiskLoc > : : iterator , bool > p = _set . insert ( loc ) ; <nl> + pair < set < DiskLoc > : : iterator , bool > p = _dups . insert ( loc ) ; <nl> return ! p . second ; <nl> } <nl> - bool getdupSet ( const DiskLoc & loc ) { <nl> - return _set . count ( loc ) > 0 ; <nl> - } <nl> - vector < DiskLoc > _vec ; <nl> - set < DiskLoc > _set ; <nl> - long long _accesses ; <nl> + private : <nl> + set < DiskLoc > _dups ; <nl> } ; <nl> - <nl> + <nl> / * This is for languages whose " objects " are not well ordered ( JSON is well ordered ) . <nl> [ { a : . . . } , { b : . . . } ] - > { a : . . . , b : . . . } <nl> * / <nl> | SERVER - 4150 just use a normal set for hybrid reorder dups | mongodb/mongo | 8b794427092321ee128ba821d5ad9a211fec9c3c | 2012-02-25T07:57:31Z |
mmm a / atom / browser / lib / objects - registry . coffee <nl> ppp b / atom / browser / lib / objects - registry . coffee <nl> class ObjectsRegistry extends EventEmitter <nl> @ dereference id , 1 <nl> # Also reduce the count in owner . <nl> pointer = @ owners [ webContentsId ] <nl> + return unless pointer ? <nl> - - pointer [ id ] <nl> delete pointer [ id ] if pointer [ id ] is 0 <nl> <nl> class ObjectsRegistry extends EventEmitter <nl> # Private : Dereference the object from store . <nl> dereference : ( id , count ) - > <nl> pointer = @ storage [ id ] <nl> + return unless pointer ? <nl> pointer . count - = count <nl> if pointer . count is 0 <nl> v8Util . deleteHiddenValue pointer . object , ' atomId ' <nl> | Guard against undefined , fix | electron/electron | 927c3f34c3759e29948a1ffaa76a4985d38a1d69 | 2015-10-06T14:25:55Z |
mmm a / modules / prediction / evaluator / evaluator_manager . cc <nl> ppp b / modules / prediction / evaluator / evaluator_manager . cc <nl> bool IsTrainable ( const Feature & feature ) { <nl> return true ; <nl> } <nl> <nl> - void GroupObstaclesByObstacleId ( const int obstacle_id , <nl> - ObstaclesContainer * const obstacles_container , <nl> - IdObstacleListMap * const id_obstacle_map ) { <nl> - Obstacle * obstacle_ptr = obstacles_container - > GetObstacle ( obstacle_id ) ; <nl> - if ( obstacle_ptr = = nullptr ) { <nl> - AERROR < < " Null obstacle [ " < < obstacle_id < < " ] found " ; <nl> - return ; <nl> - } <nl> - if ( obstacle_ptr - > IsStill ( ) ) { <nl> - ADEBUG < < " Ignore still obstacle [ " < < obstacle_id < < " ] " ; <nl> - return ; <nl> - } <nl> - const Feature & feature = obstacle_ptr - > latest_feature ( ) ; <nl> - if ( feature . priority ( ) . priority ( ) = = ObstaclePriority : : IGNORE ) { <nl> - ADEBUG < < " Skip ignored obstacle [ " < < obstacle_id < < " ] " ; <nl> - return ; <nl> - } else if ( feature . priority ( ) . priority ( ) = = ObstaclePriority : : CAUTION ) { <nl> - int id_mod = obstacle_id % FLAGS_max_caution_thread_num ; <nl> - ( * id_obstacle_map ) [ id_mod ] . push_back ( obstacle_ptr ) ; <nl> - ADEBUG < < " Cautioned obstacle [ " < < obstacle_id < < " ] for thread " < < id_mod ; <nl> - } else { <nl> - int normal_thread_num = FLAGS_max_thread_num - FLAGS_max_caution_thread_num ; <nl> - int id_mod = obstacle_id % normal_thread_num + FLAGS_max_caution_thread_num ; <nl> - ( * id_obstacle_map ) [ id_mod ] . push_back ( obstacle_ptr ) ; <nl> - ADEBUG < < " Normal obstacle [ " < < obstacle_id < < " ] for thread " < < id_mod ; <nl> + void GroupObstaclesByObstacleIds ( ObstaclesContainer * const obstacles_container , <nl> + IdObstacleListMap * const id_obstacle_map ) { <nl> + int caution_thread_idx = 0 ; <nl> + for ( int obstacle_id : <nl> + obstacles_container - > curr_frame_considered_obstacle_ids ( ) ) { <nl> + Obstacle * obstacle_ptr = obstacles_container - > GetObstacle ( obstacle_id ) ; <nl> + if ( obstacle_ptr = = nullptr ) { <nl> + AERROR < < " Null obstacle [ " < < obstacle_id < < " ] found " ; <nl> + return ; <nl> + } <nl> + if ( obstacle_ptr - > IsStill ( ) ) { <nl> + ADEBUG < < " Ignore still obstacle [ " < < obstacle_id < < " ] " ; <nl> + return ; <nl> + } <nl> + const Feature & feature = obstacle_ptr - > latest_feature ( ) ; <nl> + if ( feature . priority ( ) . priority ( ) = = ObstaclePriority : : IGNORE ) { <nl> + ADEBUG < < " Skip ignored obstacle [ " < < obstacle_id < < " ] " ; <nl> + return ; <nl> + } else if ( feature . priority ( ) . priority ( ) = = ObstaclePriority : : CAUTION ) { <nl> + caution_thread_idx = caution_thread_idx % FLAGS_max_caution_thread_num ; <nl> + ( * id_obstacle_map ) [ caution_thread_idx ] . push_back ( obstacle_ptr ) ; <nl> + ADEBUG < < " Cautioned obstacle [ " < < obstacle_id < < " ] for thread " <nl> + < < caution_thread_idx ; <nl> + + + caution_thread_idx ; <nl> + } else { <nl> + int normal_thread_num = <nl> + FLAGS_max_thread_num - FLAGS_max_caution_thread_num ; <nl> + int id_mod = <nl> + obstacle_id % normal_thread_num + FLAGS_max_caution_thread_num ; <nl> + ( * id_obstacle_map ) [ id_mod ] . push_back ( obstacle_ptr ) ; <nl> + ADEBUG < < " Normal obstacle [ " < < obstacle_id < < " ] for thread " < < id_mod ; <nl> + } <nl> } <nl> } <nl> <nl> void EvaluatorManager : : Run ( ObstaclesContainer * obstacles_container ) { <nl> <nl> if ( FLAGS_enable_multi_thread ) { <nl> IdObstacleListMap id_obstacle_map ; <nl> - for ( int id : obstacles_container - > curr_frame_considered_obstacle_ids ( ) ) { <nl> - GroupObstaclesByObstacleId ( id , obstacles_container , & id_obstacle_map ) ; <nl> - } <nl> + GroupObstaclesByObstacleIds ( obstacles_container , & id_obstacle_map ) ; <nl> PredictionThreadPool : : ForEach ( <nl> id_obstacle_map . begin ( ) , id_obstacle_map . end ( ) , <nl> [ & ] ( IdObstacleListMap : : iterator : : value_type & obstacles_iter ) { <nl> | Prediction : average caution obstacles thread usage | ApolloAuto/apollo | 833dde39b4325a23e60635670238073f4b580f01 | 2019-11-20T19:30:09Z |
mmm a / tensorflow / contrib / autograph / converters / break_statements . py <nl> ppp b / tensorflow / contrib / autograph / converters / break_statements . py <nl> <nl> class BreakStatementTransformer ( transformer . Base ) : <nl> " " " Canonicalizes break statements into additional conditionals . " " " <nl> <nl> - def _track_body ( self , nodes , break_var ) : <nl> - self . enter_local_scope ( ) <nl> - self . set_local ( CONTROL_VAR_NAME , break_var ) <nl> - nodes = self . visit_block ( nodes ) <nl> - break_used = self . get_local ( BREAK_USED , False ) <nl> - self . exit_local_scope ( ) <nl> - return nodes , break_used <nl> - <nl> def visit_Break ( self , node ) : <nl> self . set_local ( BREAK_USED , True ) <nl> var_name = self . get_local ( CONTROL_VAR_NAME ) <nl> def _guard_if_present ( self , block , var_name ) : <nl> block = block ) <nl> return node <nl> <nl> + def _track_body ( self , nodes , break_var ) : <nl> + self . enter_local_scope ( ) <nl> + self . set_local ( CONTROL_VAR_NAME , break_var ) <nl> + nodes = self . visit_block ( nodes ) <nl> + break_used = self . get_local ( BREAK_USED , False ) <nl> + self . exit_local_scope ( ) <nl> + return nodes , break_used <nl> + <nl> def visit_While ( self , node ) : <nl> scope = anno . getanno ( node , NodeAnno . BODY_SCOPE ) <nl> break_var = self . context . namer . new_symbol ( ' break_ ' , scope . referenced ) <nl> mmm a / tensorflow / contrib / autograph / converters / continue_statements . py <nl> ppp b / tensorflow / contrib / autograph / converters / continue_statements . py <nl> <nl> from tensorflow . contrib . autograph . pyct . static_analysis . annos import NodeAnno <nl> <nl> <nl> - class ContinueCanonicalizationTransformer ( transformer . Base ) : <nl> - " " " Canonicalizes continue statements into additional conditionals . " " " <nl> + # Tags for local state . <nl> + CONTROL_VAR_NAME = ' control_var_name ' <nl> + CONTINUE_USED = ' continue_used ' <nl> + GUARD_CREATED = ' guard_created ' <nl> + CREATE_GUARD_NEXT = ' create_guard_next ' <nl> <nl> - def __init__ ( self , context ) : <nl> - super ( ContinueCanonicalizationTransformer , self ) . __init__ ( context ) <nl> - # This is a stack structure , to correctly process nested loops . <nl> - self . continuation_uses = [ ] <nl> <nl> - def _create_continuation_check ( self ) : <nl> - template = " " " <nl> - if not var_name : <nl> - pass <nl> - " " " <nl> - cond , = templates . replace ( template , var_name = self . continuation_uses [ - 1 ] [ 1 ] ) <nl> - cond . body = [ ] <nl> - return cond <nl> + class ContinueCanonicalizationTransformer ( transformer . Base ) : <nl> + " " " Canonicalizes continue statements into additional conditionals . " " " <nl> <nl> - def _create_continuation_trigger ( self ) : <nl> + def visit_Continue ( self , node ) : <nl> + self . set_local ( CONTINUE_USED , True ) <nl> template = " " " <nl> var_name = True <nl> " " " <nl> - assign , = templates . replace ( <nl> - template , var_name = self . continuation_uses [ - 1 ] [ 1 ] ) <nl> - return assign <nl> - <nl> - def _create_continuation_init ( self ) : <nl> - template = " " " <nl> - var_name = False <nl> - " " " <nl> - assign , = templates . replace ( <nl> - template , var_name = self . continuation_uses [ - 1 ] [ 1 ] ) <nl> - return assign <nl> - <nl> - def _visit_and_reindent_if_necessary ( self , nodes ) : <nl> - reorganized_nodes = [ ] <nl> - current_dest = reorganized_nodes <nl> - continue_used_in_block = False <nl> - for i , n in enumerate ( nodes ) : <nl> - # TODO ( mdan ) : This could be optimized if control structures are simple . <nl> - self . continuation_uses [ - 1 ] [ 0 ] = False <nl> - n = self . visit ( n ) <nl> - current_dest . append ( n ) <nl> - if self . continuation_uses [ - 1 ] [ 0 ] : <nl> - continue_used_in_block = True <nl> - if i < len ( nodes ) - 1 : # Last statement in block needs no protection . <nl> - cond = self . _create_continuation_check ( ) <nl> - current_dest . append ( cond ) <nl> - current_dest = cond . body <nl> - self . continuation_uses [ - 1 ] [ 0 ] = continue_used_in_block <nl> - return reorganized_nodes <nl> - <nl> - def _process_loop_block ( self , block , scope ) : <nl> - cont_var = self . context . namer . new_symbol ( ' cont_requested ' , scope . referenced ) <nl> - self . continuation_uses . append ( [ False , cont_var ] ) <nl> - block = self . _visit_and_reindent_if_necessary ( block ) <nl> - if self . continuation_uses [ - 1 ] [ 0 ] : <nl> - block . insert ( 0 , self . _create_continuation_init ( ) ) <nl> - self . continuation_uses . pop ( ) <nl> - return block <nl> + return templates . replace ( <nl> + template , var_name = self . get_local ( CONTROL_VAR_NAME ) ) <nl> + <nl> + def _postprocess_statement ( self , node ) : <nl> + # Example of how the state machine below works : <nl> + # <nl> + # 1 | stmt # State : CONTINUE_USED = False <nl> + # | # Action : none <nl> + # 2 | if cond : <nl> + # 3 | continue # State : CONTINUE_USED = True , <nl> + # | # GUARD_CREATED = False , <nl> + # | # CREATE_GUARD_NEXT = False <nl> + # | # Action : set CREATE_GUARD_NEXT = True <nl> + # 4 | stmt # State : CONTINUE_USED = True , <nl> + # | # GUARD_CREATED = False , <nl> + # | # CREATE_GUARD_NEXT = True <nl> + # | # Action : create ` if not continue_used ` , <nl> + # | # set GUARD_CREATED = True <nl> + # 5 | stmt # State : CONTINUE_USED = True , GUARD_CREATED = True <nl> + # | # Action : none ( will be wrapped under previously <nl> + # | # created if node ) <nl> + <nl> + if self . get_local ( CONTINUE_USED , False ) : <nl> + if self . get_local ( GUARD_CREATED , False ) : <nl> + return node , None <nl> + <nl> + elif not self . get_local ( CREATE_GUARD_NEXT , False ) : <nl> + self . set_local ( CREATE_GUARD_NEXT , True ) <nl> + return node , None <nl> + <nl> + else : <nl> + self . set_local ( GUARD_CREATED , True ) <nl> + template = " " " <nl> + if not var_name : <nl> + original_node <nl> + " " " <nl> + cond , = templates . replace ( <nl> + template , <nl> + var_name = self . get_local ( CONTROL_VAR_NAME ) , <nl> + original_node = node ) <nl> + return cond , cond . body <nl> + return node , None <nl> + <nl> + def _visit_loop_body ( self , node , nodes ) : <nl> + self . enter_local_scope ( ) <nl> + scope = anno . getanno ( node , NodeAnno . BODY_SCOPE ) <nl> + continue_var = self . context . namer . new_symbol ( ' continue_ ' , scope . referenced ) <nl> + self . set_local ( CONTROL_VAR_NAME , continue_var ) <nl> + <nl> + nodes = self . visit_block ( nodes , after_visit = self . _postprocess_statement ) <nl> + <nl> + if self . get_local ( CONTINUE_USED , False ) : <nl> + template = " " " <nl> + var_name = False <nl> + " " " <nl> + control_var_init = templates . replace ( template , var_name = continue_var ) <nl> + nodes = control_var_init + nodes <nl> + <nl> + self . exit_local_scope ( ) <nl> + return nodes <nl> + <nl> + def _visit_non_loop_body ( self , nodes ) : <nl> + self . enter_local_scope ( inherit = ( CONTROL_VAR_NAME , ) ) <nl> + nodes = self . visit_block ( nodes , after_visit = self . _postprocess_statement ) <nl> + continue_used = self . get_local ( CONTINUE_USED , False ) <nl> + self . exit_local_scope ( keep = ( CONTINUE_USED , ) ) <nl> + return nodes , continue_used <nl> <nl> def visit_While ( self , node ) : <nl> - self . generic_visit ( node . test ) <nl> - node . body = self . _process_loop_block ( node . body , <nl> - anno . getanno ( node , <nl> - NodeAnno . BODY_SCOPE ) ) <nl> - for n in node . orelse : <nl> - self . generic_visit ( n ) <nl> + node . test = self . visit ( node . test ) <nl> + node . body = self . _visit_loop_body ( node , node . body ) <nl> + # A continue in the else clause applies to the containing scope . <nl> + node . orelse , _ = self . _visit_non_loop_body ( node . orelse ) <nl> return node <nl> <nl> def visit_For ( self , node ) : <nl> - self . generic_visit ( node . target ) <nl> - self . generic_visit ( node . iter ) <nl> - node . body = self . _process_loop_block ( node . body , <nl> - anno . getanno ( node , <nl> - NodeAnno . BODY_SCOPE ) ) <nl> - for n in node . orelse : <nl> - self . generic_visit ( n ) <nl> + node . target = self . generic_visit ( node . target ) <nl> + node . iter = self . generic_visit ( node . iter ) <nl> + node . body = self . _visit_loop_body ( node , node . body ) <nl> + # A continue in the else clause applies to the containing scope . <nl> + node . orelse , _ = self . _visit_non_loop_body ( node . orelse ) <nl> return node <nl> <nl> def visit_If ( self , node ) : <nl> - if self . continuation_uses : <nl> - self . generic_visit ( node . test ) <nl> - node . body = self . _visit_and_reindent_if_necessary ( node . body ) <nl> - continue_used_in_body = self . continuation_uses [ - 1 ] [ 0 ] <nl> - node . orelse = self . _visit_and_reindent_if_necessary ( node . orelse ) <nl> - self . continuation_uses [ - 1 ] [ 0 ] = ( <nl> - continue_used_in_body or self . continuation_uses [ - 1 ] [ 0 ] ) <nl> - else : <nl> - node = self . generic_visit ( node ) <nl> + node . test = self . generic_visit ( node . test ) <nl> + node . body , continue_used_body = self . _visit_non_loop_body ( node . body ) <nl> + node . orelse , continue_used_orelse = self . _visit_non_loop_body ( node . orelse ) <nl> + self . set_local ( CONTINUE_USED , continue_used_body or continue_used_orelse ) <nl> return node <nl> <nl> - def visit_Continue ( self , node ) : <nl> - self . continuation_uses [ - 1 ] [ 0 ] = True <nl> - return self . _create_continuation_trigger ( ) <nl> - <nl> - def visit_Break ( self , node ) : <nl> - assert False , ' break statement should be desugared at this point ' <nl> + def visit_With ( self , node ) : <nl> + node . items = self . visit_block ( node . items ) <nl> + node . body , _ = self . _visit_non_loop_body ( node . body ) <nl> + return node <nl> <nl> <nl> def transform ( node , namer ) : <nl> mmm a / tensorflow / contrib / autograph / pyct / transformer . py <nl> ppp b / tensorflow / contrib / autograph / pyct / transformer . py <nl> def enclosing_entities ( self ) : <nl> return tuple ( self . _enclosing_entities ) <nl> <nl> @ property <nl> - def locel_scope_level ( self ) : <nl> + def local_scope_level ( self ) : <nl> return len ( self . _local_scope_state ) <nl> <nl> - def enter_local_scope ( self ) : <nl> - self . _local_scope_state . append ( { } ) <nl> + def enter_local_scope ( self , inherit = None ) : <nl> + " " " Marks entry into a new local scope . <nl> <nl> - def exit_local_scope ( self ) : <nl> - return self . _local_scope_state . pop ( ) <nl> + Args : <nl> + inherit : Optional enumerable of variable names to copy from the <nl> + parent scope . <nl> + " " " <nl> + scope_entered = { } <nl> + if inherit : <nl> + this_scope = self . _local_scope_state [ - 1 ] <nl> + for name in inherit : <nl> + if name in this_scope : <nl> + scope_entered [ name ] = this_scope [ name ] <nl> + self . _local_scope_state . append ( scope_entered ) <nl> + <nl> + def exit_local_scope ( self , keep = None ) : <nl> + " " " Marks exit from the current local scope . <nl> + <nl> + Args : <nl> + keep : Optional enumerable of variable names to copy into the <nl> + parent scope . <nl> + Returns : <nl> + A dict containing the scope that has just been exited . <nl> + " " " <nl> + scope_left = self . _local_scope_state . pop ( ) <nl> + if keep : <nl> + this_scope = self . _local_scope_state [ - 1 ] <nl> + for name in keep : <nl> + if name in scope_left : <nl> + this_scope [ name ] = scope_left [ name ] <nl> + return scope_left <nl> <nl> def set_local ( self , name , value ) : <nl> self . _local_scope_state [ - 1 ] [ name ] = value <nl> def debug_print ( self , node ) : <nl> print ( pretty_printer . fmt ( node ) ) <nl> return node <nl> <nl> - def visit_block ( self , nodes ) : <nl> - " " " Helper equivalent to generic_visit , but for node lists . " " " <nl> + def visit_block ( self , nodes , before_visit = None , after_visit = None ) : <nl> + " " " A more powerful version of generic_visit for statement blocks . <nl> + <nl> + An example of a block is the body of an if statement . <nl> + <nl> + This function allows specifying a postprocessing callback ( the <nl> + after_visit argument ) argument which can be used to move nodes to a new <nl> + destination . This is done by after_visit by returning a non - null <nl> + second return value , e . g . return new_node , new_destination . <nl> + <nl> + For example , a transformer could perform the following move : <nl> + <nl> + foo ( ) <nl> + bar ( ) <nl> + baz ( ) <nl> + <nl> + foo ( ) <nl> + if cond : <nl> + bar ( ) <nl> + baz ( ) <nl> + <nl> + The above could be done with a postprocessor of this kind : <nl> + <nl> + def after_visit ( node ) : <nl> + if node_is_function_call ( bar ) : <nl> + new_container_node = build_cond ( ) <nl> + new_container_node . body . append ( node ) <nl> + return new_container_node , new_container_node . body <nl> + else : <nl> + # Once we set a new destination , all subsequent items will be <nl> + # moved to it , so we don ' t need to explicitly handle baz . <nl> + return node , None <nl> + <nl> + Args : <nl> + nodes : enumerable of AST node objects <nl> + before_visit : optional callable that is called before visiting each item <nl> + in nodes <nl> + after_visit : optional callable that takes in an AST node and <nl> + returns a tuple ( new_node , new_destination ) . It is called after <nl> + visiting each item in nodes . Is used in the same was as the <nl> + visit_ * methods : new_node will replace the node ; if not None , <nl> + new_destination must be a list , and subsequent nodes will be placed <nl> + in this list instead of the list returned by visit_block . <nl> + Returns : <nl> + A list of AST node objects containing the transformed items fron nodes , <nl> + except those nodes that have been relocated using after_visit . <nl> + " " " <nl> results = [ ] <nl> + node_destination = results <nl> for node in nodes : <nl> + if before_visit : <nl> + # TODO ( mdan ) : We can modify node here too , if ever needed . <nl> + before_visit ( ) <nl> + <nl> replacement = self . visit ( node ) <nl> + <nl> + if after_visit and replacement : <nl> + replacement , new_destination = after_visit ( replacement ) <nl> + else : <nl> + new_destination = None <nl> + <nl> if replacement : <nl> if isinstance ( replacement , ( list , tuple ) ) : <nl> - results . extend ( replacement ) <nl> + node_destination . extend ( replacement ) <nl> else : <nl> - results . append ( replacement ) <nl> + node_destination . append ( replacement ) <nl> + <nl> + # Allow the postprocessor to reroute the remaining nodes to a new list . <nl> + if new_destination is not None : <nl> + node_destination = new_destination <nl> return results <nl> <nl> # TODO ( mdan ) : Once we have error tracing , we may be able to just go to SSA . <nl> def visit ( self , node ) : <nl> source_code = self . context . source_code <nl> source_file = self . context . source_file <nl> did_enter_function = False <nl> - local_scope_state_size = len ( self . _local_scope_state ) <nl> + local_scope_size_at_entry = len ( self . _local_scope_state ) <nl> <nl> try : <nl> if isinstance ( node , ( gast . FunctionDef , gast . ClassDef , gast . Lambda ) ) : <nl> - self . _enclosing_entities . append ( node ) <nl> did_enter_function = True <nl> <nl> + if did_enter_function : <nl> + self . _enclosing_entities . append ( node ) <nl> + <nl> if source_code and hasattr ( node , ' lineno ' ) : <nl> self . _lineno = node . lineno <nl> self . _col_offset = node . col_offset <nl> - if anno . hasanno ( node , anno . Basic . SKIP_PROCESSING ) : <nl> - return node <nl> - return super ( Base , self ) . visit ( node ) <nl> <nl> - except ( ValueError , AttributeError , KeyError , NotImplementedError , <nl> - AssertionError ) as e : <nl> + if not anno . hasanno ( node , anno . Basic . SKIP_PROCESSING ) : <nl> + result = super ( Base , self ) . visit ( node ) <nl> + <nl> + # On exception , the local scope integrity is not guaranteed . <nl> + if did_enter_function : <nl> + self . _enclosing_entities . pop ( ) <nl> + <nl> + if local_scope_size_at_entry ! = len ( self . _local_scope_state ) : <nl> + raise AssertionError ( <nl> + ' Inconsistent local scope stack . Before entering node % s , the ' <nl> + ' stack had length % d , after exit it has length % d . This ' <nl> + ' indicates enter_local_scope and exit_local_scope are not ' <nl> + ' well paired . ' % ( <nl> + node , <nl> + local_scope_size_at_entry , <nl> + len ( self . _local_scope_state ) <nl> + ) ) <nl> + return result <nl> + <nl> + except ( ValueError , AttributeError , KeyError , NotImplementedError ) as e : <nl> msg = ' % s : % s \ nOffending source : \ n % s \ n \ nOccurred at node : \ n % s ' % ( <nl> e . __class__ . __name__ , str ( e ) , try_ast_to_source ( node ) , <nl> pretty_printer . fmt ( node , color = False ) ) <nl> def visit ( self , node ) : <nl> line = source_code . splitlines ( ) [ self . _lineno - 1 ] <nl> else : <nl> line = ' < no source available > ' <nl> + # TODO ( mdan ) : Avoid the printing of the original exception . <nl> + # In other words , we need to find how to suppress the " During handling <nl> + # of the above exception , another exception occurred " message . <nl> six . reraise ( AutographParseError , <nl> AutographParseError ( <nl> msg , <nl> ( source_file , self . _lineno , self . _col_offset + 1 , line ) ) , <nl> sys . exc_info ( ) [ 2 ] ) <nl> - finally : <nl> - if did_enter_function : <nl> - self . _enclosing_entities . pop ( ) <nl> - <nl> - if local_scope_state_size ! = len ( self . _local_scope_state ) : <nl> - raise AssertionError ( <nl> - ' Inconsistent local scope stack . Before entering node % s , the ' <nl> - ' stack had length % d , after exit it has length % d . This ' <nl> - ' indicates enter_local_scope and exit_local_scope are not ' <nl> - ' well paired . ' ) <nl> mmm a / tensorflow / contrib / autograph / pyct / transformer_test . py <nl> ppp b / tensorflow / contrib / autograph / pyct / transformer_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import gast <nl> + <nl> from tensorflow . contrib . autograph . pyct import anno <nl> from tensorflow . contrib . autograph . pyct import context <nl> from tensorflow . contrib . autograph . pyct import parser <nl> <nl> <nl> class TransformerTest ( test . TestCase ) : <nl> <nl> - def _context_for_nodetesting ( self ) : <nl> + def _context_for_testing ( self ) : <nl> return context . EntityContext ( <nl> namer = None , <nl> source_code = None , <nl> def visit_BinOp ( self , node ) : <nl> anno . setanno ( node , ' enclosing_entities ' , self . enclosing_entities ) <nl> return self . generic_visit ( node ) <nl> <nl> - tr = TestTransformer ( self . _context_for_nodetesting ( ) ) <nl> + tr = TestTransformer ( self . _context_for_testing ( ) ) <nl> <nl> def test_function ( ) : <nl> a = 0 <nl> def visit_While ( self , node ) : <nl> def visit_For ( self , node ) : <nl> return self . _annotate_result ( node ) <nl> <nl> - tr = TestTransformer ( self . _context_for_nodetesting ( ) ) <nl> + tr = TestTransformer ( self . _context_for_testing ( ) ) <nl> <nl> def test_function ( a ) : <nl> " " " Docstring . " " " <nl> def visit_For ( self , node ) : <nl> self . exit_local_scope ( ) <nl> return node <nl> <nl> - tr = TestTransformer ( self . _context_for_nodetesting ( ) ) <nl> + tr = TestTransformer ( self . _context_for_testing ( ) ) <nl> <nl> def no_exit ( a ) : <nl> if a > 0 : <nl> def no_entry ( a ) : <nl> with self . assertRaises ( AssertionError ) : <nl> tr . visit ( node ) <nl> <nl> + def test_visit_block_postprocessing ( self ) : <nl> + <nl> + class TestTransformer ( transformer . Base ) : <nl> + <nl> + def _process_body_item ( self , node ) : <nl> + if isinstance ( node , gast . Assign ) and ( node . value . id = = ' y ' ) : <nl> + if_node = gast . If ( gast . Name ( ' x ' , gast . Load ( ) , None ) , [ node ] , [ ] ) <nl> + return if_node , if_node . body <nl> + return node , None <nl> + <nl> + def visit_FunctionDef ( self , node ) : <nl> + node . body = self . visit_block ( <nl> + node . body , after_visit = self . _process_body_item ) <nl> + return node <nl> + <nl> + def test_function ( x , y ) : <nl> + z = x <nl> + z = y <nl> + return z <nl> + <nl> + tr = TestTransformer ( self . _context_for_testing ( ) ) <nl> + <nl> + node , _ = parser . parse_entity ( test_function ) <nl> + node = tr . visit ( node ) <nl> + node = node . body [ 0 ] <nl> + <nl> + self . assertEqual ( len ( node . body ) , 2 ) <nl> + self . assertTrue ( isinstance ( node . body [ 0 ] , gast . Assign ) ) <nl> + self . assertTrue ( isinstance ( node . body [ 1 ] , gast . If ) ) <nl> + self . assertTrue ( isinstance ( node . body [ 1 ] . body [ 0 ] , gast . Assign ) ) <nl> + self . assertTrue ( isinstance ( node . body [ 1 ] . body [ 1 ] , gast . Return ) ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl> | Cleanup : update continue_statements . py to use the base transformer facilities for tracking local state and reindenting node blocks . Rearrange the error handling in base transformer to avoid chained exceptions . | tensorflow/tensorflow | f6a8cf82134a305f6d27368b2f51819b11195ada | 2018-05-31T15:56:25Z |
mmm a / addons / skin . confluence / 720p / MusicOSD . xml <nl> ppp b / addons / skin . confluence / 720p / MusicOSD . xml <nl> <nl> < control type = " slider " id = " 87 " > <nl> < description > Seek Slider < / description > <nl> < posx > 430 < / posx > <nl> - < posy > 72r < / posy > <nl> + < posy > 82r < / posy > <nl> < width > 720 < / width > <nl> < height > 16 < / height > <nl> < onup > 702 < / onup > <nl> <nl> < / control > <nl> < control type = " group " id = " 100 " > <nl> < posx > 325 < / posx > <nl> - < posy > 50r < / posy > <nl> + < posy > 60r < / posy > <nl> < animation effect = " fade " time = " 200 " > VisibleChange < / animation > <nl> < visible > ! [ Window . IsVisible ( AddonSettings ) | Window . IsVisible ( SelectDialog ) | Window . IsVisible ( VisualisationPresetList ) ] < / visible > <nl> < control type = " button " id = " 600 " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 210 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDPrevTrackFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Previous ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 601 " > <nl> - < posx > 45 < / posx > <nl> + < posx > 55 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31354 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDRewindFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Rewind ) < / onclick > <nl> < / control > <nl> < control type = " togglebutton " id = " 602 " > <nl> - < posx > 90 < / posx > <nl> + < posx > 110 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31351 < / label > <nl> < altlabel > 208 < / altlabel > <nl> < font > - < / font > <nl> <nl> < onclick > PlayerControl ( Play ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 603 " > <nl> - < posx > 135 < / posx > <nl> + < posx > 165 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31352 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDStopFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Stop ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 604 " > <nl> - < posx > 180 < / posx > <nl> + < posx > 220 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31353 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDForwardFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Forward ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 605 " > <nl> - < posx > 225 < / posx > <nl> + < posx > 275 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 209 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDNextTrackFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Next ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 606 " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > $ LOCALIZE [ 486 ] $ INFO [ Playlist . Repeat , : ] < / label > <nl> < font > - < / font > <nl> < texturefocus > - < / texturefocus > <nl> <nl> < ondown > 1000 < / ondown > <nl> < / control > <nl> < control type = " image " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < texture > OSDRepeatNF . png < / texture > <nl> < visible > ! Playlist . IsRepeat + ! Playlist . IsRepeatOne < / visible > <nl> < visible > ! Control . HasFocus ( 606 ) < / visible > <nl> < / control > <nl> < control type = " image " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < texture > OSDRepeatFO . png < / texture > <nl> < visible > ! Playlist . IsRepeat + ! Playlist . IsRepeatOne < / visible > <nl> < visible > Control . HasFocus ( 606 ) < / visible > <nl> < / control > <nl> < control type = " image " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < texture > OSDRepeatOneNF . png < / texture > <nl> < visible > Playlist . IsRepeatOne < / visible > <nl> < visible > ! Control . HasFocus ( 606 ) < / visible > <nl> < / control > <nl> < control type = " image " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < texture > OSDRepeatOneFO . png < / texture > <nl> < visible > Playlist . IsRepeatOne < / visible > <nl> < visible > Control . HasFocus ( 606 ) < / visible > <nl> < / control > <nl> < control type = " image " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < texture > OSDRepeatAllNF . png < / texture > <nl> < visible > Playlist . IsRepeat < / visible > <nl> < visible > ! Control . HasFocus ( 606 ) < / visible > <nl> < / control > <nl> < control type = " image " > <nl> - < posx > 270 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < texture > OSDRepeatAllFO . png < / texture > <nl> < visible > Playlist . IsRepeat < / visible > <nl> < visible > Control . HasFocus ( 606 ) < / visible > <nl> < / control > <nl> < control type = " togglebutton " id = " 607 " > <nl> - < posx > 315 < / posx > <nl> + < posx > 385 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > $ LOCALIZE [ 590 ] $ INFO [ Playlist . Random , : ] < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDRandomOffFO . png < / texturefocus > <nl> <nl> < / control > <nl> < / control > <nl> < control type = " group " > <nl> - < posx > 250r < / posx > <nl> - < posy > 50r < / posy > <nl> + < posx > 300r < / posx > <nl> + < posy > 60r < / posy > <nl> < animation effect = " fade " time = " 200 " > VisibleChange < / animation > <nl> < visible > ! [ Window . IsVisible ( AddonSettings ) | Window . IsVisible ( SelectDialog ) | Window . IsVisible ( VisualisationPresetList ) ] < / visible > <nl> < control type = " togglebutton " id = " 701 " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31128 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDLyricsFO . png < / texturefocus > <nl> <nl> < usealttexture > IsEmpty ( Skin . String ( LyricScript_Path ) ) < / usealttexture > <nl> < / control > <nl> < control type = " button " id = " 500 " > <nl> - < posx > 45 < / posx > <nl> + < posx > 55 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 12006 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDVizFO . png < / texturefocus > <nl> <nl> < ondown > 1000 < / ondown > <nl> < / control > <nl> < control type = " button " id = " 702 " > <nl> - < posx > 90 < / posx > <nl> + < posx > 110 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > $ LOCALIZE [ 250 ] $ LOCALIZE [ 21417 ] < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDPresetSettingsFO . png < / texturefocus > <nl> <nl> < onclick > Addon . Default . OpenSettings ( xbmc . player . musicviz ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 703 " > <nl> - < posx > 135 < / posx > <nl> + < posx > 165 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31048 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDPreFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( 122 ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 704 " > <nl> - < posx > 180 < / posx > <nl> + < posx > 220 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 264 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDRecordOffFO . png < / texturefocus > <nl> mmm a / addons / skin . confluence / 720p / MusicVisualisation . xml <nl> ppp b / addons / skin . confluence / 720p / MusicVisualisation . xml <nl> <nl> < / control > <nl> < control type = " group " > <nl> < posx > 330 < / posx > <nl> - < posy > 175r < / posy > <nl> + < posy > 185r < / posy > <nl> < control type = " label " id = " 1 " > <nl> < description > Heading label < / description > <nl> < posx > 0 < / posx > <nl> <nl> < / control > <nl> < control type = " group " > <nl> < posx > 330 < / posx > <nl> - < posy > 85r < / posy > <nl> + < posy > 95r < / posy > <nl> < control type = " label " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> mmm a / addons / skin . confluence / 720p / VideoFullScreen . xml <nl> ppp b / addons / skin . confluence / 720p / VideoFullScreen . xml <nl> <nl> < / control > <nl> < control type = " group " id = " 1 " > <nl> < posx > 330 < / posx > <nl> - < posy > 175r < / posy > <nl> + < posy > 185r < / posy > <nl> < control type = " label " id = " 1 " > <nl> < description > Heading label < / description > <nl> < posx > 0 < / posx > <nl> <nl> < / control > <nl> < control type = " group " id = " 1 " > <nl> < posx > 330 < / posx > <nl> - < posy > 85r < / posy > <nl> + < posy > 95r < / posy > <nl> < visible > ! VideoPlayer . Content ( LiveTV ) | [ VideoPlayer . Content ( LiveTV ) + VideoPlayer . HasEpg ] < / visible > <nl> < control type = " label " id = " 1 " > <nl> < posx > 0 < / posx > <nl> mmm a / addons / skin . confluence / 720p / VideoOSD . xml <nl> ppp b / addons / skin . confluence / 720p / VideoOSD . xml <nl> <nl> < control type = " slider " id = " 87 " > <nl> < description > Seek Slider < / description > <nl> < posx > 430 < / posx > <nl> - < posy > 72r < / posy > <nl> + < posy > 82r < / posy > <nl> < width > 720 < / width > <nl> < height > 16 < / height > <nl> < onup > 702 < / onup > <nl> <nl> < ! - - ! LiveTV - - > <nl> < control type = " group " id = " 100 " > <nl> < posx > 325 < / posx > <nl> - < posy > 50r < / posy > <nl> + < posy > 60r < / posy > <nl> < defaultcontrol always = " true " > 602 < / defaultcontrol > <nl> < animation effect = " fade " time = " 200 " > VisibleChange < / animation > <nl> < visible > ! [ Window . IsVisible ( SliderDialog ) | Window . IsVisible ( OSDVideoSettings ) | Window . IsVisible ( OSDAudioSettings ) | Window . IsVisible ( VideoBookmarks ) ] < / visible > <nl> <nl> < control type = " button " id = " 600 " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 210 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDPrevTrackFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Previous ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 601 " > <nl> - < posx > 45 < / posx > <nl> + < posx > 55 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31354 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDRewindFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Rewind ) < / onclick > <nl> < / control > <nl> < control type = " togglebutton " id = " 602 " > <nl> - < posx > 90 < / posx > <nl> + < posx > 110 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31351 < / label > <nl> < altlabel > 208 < / altlabel > <nl> < font > - < / font > <nl> <nl> < onclick > PlayerControl ( Play ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 603 " > <nl> - < posx > 135 < / posx > <nl> + < posx > 165 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31352 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDStopFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Stop ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 604 " > <nl> - < posx > 180 < / posx > <nl> + < posx > 220 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31353 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDForwardFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Forward ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 605 " > <nl> - < posx > 225 < / posx > <nl> + < posx > 275 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 209 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDNextTrackFO . png < / texturefocus > <nl> <nl> < ! - - LiveTV - - > <nl> < control type = " group " id = " 100 " > <nl> < posx > 325 < / posx > <nl> - < posy > 50r < / posy > <nl> + < posy > 60r < / posy > <nl> < defaultcontrol always = " true " > 601 < / defaultcontrol > <nl> < animation effect = " fade " time = " 200 " > VisibleChange < / animation > <nl> < visible > ! [ Window . IsVisible ( SliderDialog ) | Window . IsVisible ( OSDVideoSettings ) | Window . IsVisible ( OSDAudioSettings ) | Window . IsVisible ( VideoBookmarks ) | Window . IsVisible ( PVROSDChannels ) | Window . IsVisible ( PVROSDGuide ) ] < / visible > <nl> <nl> < control type = " button " id = " 600 " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 210 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDChannelUPFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Previous ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 601 " > <nl> - < posx > 45 < / posx > <nl> + < posx > 55 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31354 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDChannelDownFO . png < / texturefocus > <nl> <nl> < onclick > PlayerControl ( Next ) < / onclick > <nl> < / control > <nl> < control type = " togglebutton " id = " 602 " > <nl> - < posx > 90 < / posx > <nl> + < posx > 110 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31351 < / label > <nl> < altlabel > 208 < / altlabel > <nl> < font > - < / font > <nl> <nl> < visible > False < / visible > <nl> < / control > <nl> < control type = " button " id = " 603 " > <nl> - < posx > 135 < / posx > <nl> + < posx > 165 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31351 < / label > <nl> < altlabel > 208 < / altlabel > <nl> < font > - < / font > <nl> <nl> < onclick > PlayerControl ( Stop ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 604 " > <nl> - < posx > 180 < / posx > <nl> + < posx > 220 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 19019 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDChannelListFO . png < / texturefocus > <nl> <nl> < onclick > Dialog . Close ( VideoOSD ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 605 " > <nl> - < posx > 225 < / posx > <nl> + < posx > 275 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > $ LOCALIZE [ 19029 ] $ INFO [ VideoPlayer . ChannelName , - ] < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDepgFO . png < / texturefocus > <nl> <nl> < onclick > Dialog . Close ( VideoOSD ) < / onclick > <nl> < / control > <nl> < control type = " label " id = " 606 " > <nl> - < posx > 290 < / posx > <nl> + < posx > 330 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 450 < / width > <nl> - < height > 45 < / height > <nl> + < width > 385 < / width > <nl> + < height > 55 < / height > <nl> < label > $ INFO [ VideoPlayer . NextTitle , $ LOCALIZE [ 209 ] : ] < / label > <nl> < align > center < / align > <nl> < aligny > center < / aligny > <nl> <nl> <nl> < ! - - ! LiveTV - - > <nl> < control type = " group " > <nl> - < posx > 250r < / posx > <nl> - < posy > 50r < / posy > <nl> + < posx > 300r < / posx > <nl> + < posy > 60r < / posy > <nl> < animation effect = " fade " time = " 200 " > VisibleChange < / animation > <nl> < visible > ! [ Window . IsVisible ( SliderDialog ) | Window . IsVisible ( OSDVideoSettings ) | Window . IsVisible ( OSDAudioSettings ) | Window . IsVisible ( VideoBookmarks ) ] < / visible > <nl> < visible > ! VideoPlayer . Content ( LiveTV ) < / visible > <nl> < control type = " togglebutton " id = " 701 " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31356 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDSubtitlesFO . png < / texturefocus > <nl> <nl> < usealttexture > IsEmpty ( Skin . String ( SubtitleScript_Path ) ) < / usealttexture > <nl> < / control > <nl> < control type = " button " id = " 702 " > <nl> - < posx > 45 < / posx > <nl> + < posx > 55 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 13395 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDVideoFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( OSDVideoSettings ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 703 " > <nl> - < posx > 90 < / posx > <nl> + < posx > 110 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 13396 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDAudioFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( OSDAudioSettings ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 704 " > <nl> - < posx > 135 < / posx > <nl> + < posx > 165 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31355 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDBookmarksFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( VideoBookmarks ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 705 " > <nl> - < posx > 180 < / posx > <nl> + < posx > 220 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31355 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDDvdFO . png < / texturefocus > <nl> <nl> <nl> < ! - - LiveTV - - > <nl> < control type = " group " > <nl> - < posx > 200r < / posx > <nl> - < posy > 50r < / posy > <nl> + < posx > 240r < / posx > <nl> + < posy > 60r < / posy > <nl> < animation effect = " fade " time = " 200 " > VisibleChange < / animation > <nl> < visible > ! [ Window . IsVisible ( SliderDialog ) | Window . IsVisible ( OSDVideoSettings ) | Window . IsVisible ( OSDAudioSettings ) | Window . IsVisible ( VideoBookmarks ) | Window . IsVisible ( PVROSDChannels ) | Window . IsVisible ( PVROSDGuide ) ] < / visible > <nl> < visible > VideoPlayer . Content ( LiveTV ) < / visible > <nl> < control type = " button " id = " 701 " > <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31356 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDTeleTextFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( Teletext ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 702 " > <nl> - < posx > 45 < / posx > <nl> + < posx > 55 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 13395 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDVideoFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( OSDVideoSettings ) < / onclick > <nl> < / control > <nl> < control type = " button " id = " 703 " > <nl> - < posx > 90 < / posx > <nl> + < posx > 110 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 13396 < / label > <nl> < font > - < / font > <nl> < texturefocus > OSDAudioFO . png < / texturefocus > <nl> <nl> < onclick > ActivateWindow ( OSDAudioSettings ) < / onclick > <nl> < / control > <nl> < control type = " togglebutton " id = " 704 " > <nl> - < posx > 135 < / posx > <nl> + < posx > 165 < / posx > <nl> < posy > 0 < / posy > <nl> - < width > 45 < / width > <nl> - < height > 45 < / height > <nl> + < width > 55 < / width > <nl> + < height > 55 < / height > <nl> < label > 31351 < / label > <nl> < altlabel > 208 < / altlabel > <nl> < font > - < / font > <nl> | Changed : [ Confluence ] Made the Full screen player controls 20 % bigger ( may need to redo the graphics in future for better sharpness ) | xbmc/xbmc | d576adeff3b25b29fbed5972fa5e5d971f8160db | 2012-09-09T08:32:24Z |
mmm a / utils / update - checkout <nl> ppp b / utils / update - checkout <nl> def update_working_copy ( repo_path , branch ) : <nl> check_call ( [ " git " , " fetch " ] ) <nl> check_call ( [ " git " , " rebase " , " FETCH_HEAD " ] ) <nl> <nl> - def obtain_additional_swift_sources ( with_ssh , branch ) : <nl> + def obtain_additional_swift_sources ( with_ssh , branch , fast ) : <nl> additional_repos = { <nl> ' llvm ' : ' apple / swift - llvm ' , <nl> ' clang ' : ' apple / swift - clang ' , <nl> def obtain_additional_swift_sources ( with_ssh , branch ) : <nl> remote = " git @ github . com : " + repo + ' . git ' <nl> else : <nl> remote = " https : / / github . com / " + repo + ' . git ' <nl> - check_call ( [ ' git ' , ' clone ' , remote , dir_name ] ) <nl> + if fast : <nl> + check_call ( [ ' git ' , ' clone ' , ' - - depth ' , ' 1 ' , remote , dir_name ] ) <nl> + else : <nl> + check_call ( [ ' git ' , ' clone ' , remote , dir_name ] ) <nl> if branch : <nl> src_path = SWIFT_SOURCE_ROOT + " / " + dir_name + " / " + " . git " <nl> check_call ( [ ' git ' , ' - - git - dir ' , src_path , ' - - work - tree ' , os . path . join ( SWIFT_SOURCE_ROOT , dir_name ) , ' checkout ' , branch ] ) <nl> By default , updates your checkouts of Swift , SourceKit , LLDB , and SwiftPM . " " " ) <nl> parser . add_argument ( " - - clone - with - ssh " , <nl> help = " Obtain Sources for Swift and Related Projects via SSH " , <nl> action = " store_true " ) <nl> + parser . add_argument ( " - - fast " , <nl> + help = " Obtain Sources Fast " , <nl> + action = " store_true " ) <nl> parser . add_argument ( " - - branch " , <nl> help = " Obtain Sources for specific branch " ) <nl> args = parser . parse_args ( ) <nl> <nl> clone = args . clone <nl> clone_with_ssh = args . clone_with_ssh <nl> + clone_fast = args . fast <nl> branch = args . branch <nl> <nl> if clone or clone_with_ssh : <nl> - obtain_additional_swift_sources ( clone_with_ssh , branch ) <nl> + obtain_additional_swift_sources ( clone_with_ssh , branch , clone_fast ) <nl> return 0 <nl> <nl> update_working_copy ( os . path . join ( SWIFT_SOURCE_ROOT , " llbuild " ) , branch ) <nl> | Add - - fast flag to utils / update - checkout | apple/swift | 5e75a1ca7ef386a7bdfd465babdb2e81b0c95999 | 2016-02-11T01:49:51Z |
mmm a / hphp / runtime / vm / jit / vasm - graph - color . cpp <nl> ppp b / hphp / runtime / vm / jit / vasm - graph - color . cpp <nl> struct State { <nl> / / Vreg state <nl> jit : : vector < folly : : Optional < RegInfo > > regInfo ; <nl> <nl> + / / All Vregs which are RegClass : : SF <nl> + VregSet flags ; <nl> + <nl> / / Pre - calculated mapping of spill Vregs to spill slots <nl> jit : : fast_map < Vreg , Color > spillColors ; <nl> <nl> void calculate_liveness ( State & state , const BlockSet * changed = nullptr ) { <nl> VregSet & k , <nl> VregSet & u ) { <nl> for ( auto & inst : boost : : adaptors : : reverse ( block . code ) ) { <nl> - for ( auto const r : defs_set_cached ( state , inst ) ) { <nl> - if ( reg_class ( state , r ) = = RegClass : : SF ) continue ; <nl> - k . add ( r ) ; <nl> - g . remove ( r ) ; <nl> - } <nl> - for ( auto const r : uses_set_cached ( state , inst ) ) { <nl> - if ( reg_class ( state , r ) = = RegClass : : SF ) continue ; <nl> - g . add ( r ) ; <nl> - u . add ( r ) ; <nl> - } <nl> - } <nl> + auto const & defs = defs_set_cached ( state , inst ) ; <nl> + auto const & uses = uses_set_cached ( state , inst ) ; <nl> + k | = defs ; <nl> + g - = defs ; <nl> + g | = uses ; <nl> + u | = uses ; <nl> + } <nl> + g - = state . flags ; <nl> + k - = state . flags ; <nl> + u - = state . flags ; <nl> } ; <nl> <nl> dataflow_worklist < size_t , std : : less < size_t > > worklist ( state . rpo . size ( ) ) ; <nl> State make_state ( Vunit & unit , const Abi & abi ) { <nl> scratch <nl> } ; <nl> <nl> + state . flags . add ( state . abi . sf ) ; <nl> + <nl> / / Pre - size the table to avoid excessive resizing . <nl> state . regInfo . reserve ( unit . next_vr * 2 ) ; <nl> <nl> void infer_register_classes ( State & state ) { <nl> auto & info = reg_info_create ( state , r ) ; <nl> assertx ( info . regClass = = RegClass : : Any ) ; <nl> info . regClass = detail : : reg_class ( r ) ; <nl> + if ( info . regClass = = RegClass : : SF ) state . flags . add ( r ) ; <nl> haveWide | = ( info . regClass = = RegClass : : SIMDWide ) ; <nl> } ; <nl> <nl> void infer_register_classes ( State & state ) { <nl> auto const newClass = detail : : reg_class ( r ) ; <nl> if ( auto const c = merge ( info . regClass , newClass ) ) { <nl> info . regClass = * c ; <nl> + if ( info . regClass = = RegClass : : SF ) state . flags . add ( r ) ; <nl> haveWide | = ( info . regClass = = RegClass : : SIMDWide ) ; <nl> } else { <nl> incompatible . add ( r ) ; <nl> void infer_register_classes ( State & state ) { <nl> if ( is_ignored ( state , d [ i ] ) ) continue ; <nl> if ( ! d [ i ] . isPhys ( ) ) continue ; <nl> auto const newReg = unit . makeReg ( ) ; <nl> - reg_info_insert ( state , newReg , RegInfo { reg_class ( state , d [ i ] ) } ) ; <nl> + auto const dCls = reg_class ( state , d [ i ] ) ; <nl> + assertx ( dCls ! = RegClass : : SF ) ; <nl> + reg_info_insert ( state , newReg , RegInfo { dCls } ) ; <nl> copies . emplace_back ( newReg , d [ i ] ) ; <nl> d [ i ] = newReg ; <nl> invalidate_cached_operands ( firstInst ) ; <nl> void infer_register_classes ( State & state ) { <nl> auto const sCls = reg_class ( state , s [ i ] ) ; <nl> auto const dCls = reg_class ( state , d [ i ] ) ; <nl> if ( sCls = = dCls & & ! s [ i ] . isPhys ( ) ) continue ; <nl> + assertx ( sCls ! = RegClass : : SF & & dCls ! = RegClass : : SF ) ; <nl> <nl> / / Create a new Vreg in the same RegClass as the dest and use that in <nl> / / the phi instead . Add a copy between the old src and the new <nl> void set_spill_reg_classes ( State & state , <nl> always_assert ( <nl> is_ignored ( state , uses [ i ] ) = = is_ignored ( state , defs [ i ] ) <nl> ) ; <nl> + if ( is_ignored ( state , uses [ i ] ) ) break ; <nl> auto const dCls = reg_class ( state , defs [ i ] ) ; <nl> auto const uCls = reg_class ( state , uses [ i ] ) ; <nl> always_assert ( is_spill ( dCls ) = = is_spill ( uCls ) ) ; <nl> | Speed - up liveness calculation in vasm - graph - color | facebook/hhvm | d657533fdf4cf99ce67a8ae71b7cf53c1564f179 | 2019-12-11T23:14:29Z |
mmm a / js / apps / system / aardvark / frontend / js / graphViewer / ui / modalDialogHelper . js <nl> ppp b / js / apps / system / aardvark / frontend / js / graphViewer / ui / modalDialogHelper . js <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> i , <nl> id = idPre + idPost , <nl> lastId = 1 , <nl> + buttonTh = document . createElement ( " th " ) , <nl> addLineButton = document . createElement ( " button " ) , <nl> input = document . createElement ( " input " ) , <nl> addNewLine = function ( content ) { <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> var innerTr = document . createElement ( " tr " ) , <nl> innerLabelTh = document . createElement ( " th " ) , <nl> innerContentTh = document . createElement ( " th " ) , <nl> + innerButtonTh = document . createElement ( " th " ) , <nl> innerInput = document . createElement ( " input " ) , <nl> removeRow = document . createElement ( " button " ) , <nl> lastItem ; <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> table . removeChild ( innerTr ) ; <nl> rows . splice ( rows . indexOf ( innerTr ) , 1 ) ; <nl> } ; <nl> - <nl> - innerContentTh . appendChild ( removeRow ) ; <nl> + innerButtonTh . appendChild ( removeRow ) ; <nl> + innerTr . appendChild ( innerButtonTh ) ; <nl> rows . push ( innerTr ) ; <nl> } ; <nl> input . type = " text " ; <nl> input . id = id + " _1 " ; <nl> contentTh . appendChild ( input ) ; <nl> - contentTh . appendChild ( addLineButton ) ; <nl> + buttonTh . appendChild ( addLineButton ) ; <nl> + tr . appendChild ( buttonTh ) ; <nl> addLineButton . onclick = function ( ) { <nl> addNewLine ( ) ; <nl> } ; <nl> var modalDialogHelper = modalDialogHelper | | { } ; <nl> <nl> / / Set Classnames and attributes . <nl> div . id = idprefix + " modal " ; <nl> - div . className = " modal hide fade " ; <nl> + div . className = " modal hide fade createModalDialog " ; <nl> div . setAttribute ( " tabindex " , " - 1 " ) ; <nl> div . setAttribute ( " role " , " dialog " ) ; <nl> div . setAttribute ( " aria - labelledby " , " myModalLabel " ) ; <nl> mmm a / js / apps / system / aardvark / frontend / scss / _collection . scss <nl> ppp b / js / apps / system / aardvark / frontend / scss / _collection . scss <nl> <nl> text - align : left ; <nl> width : 20 % ! important ; <nl> <nl> - input , <nl> select , <nl> textarea { <nl> margin - top : 10px ; <nl> mmm a / js / apps / system / aardvark / frontend / scss / _modals . scss <nl> ppp b / js / apps / system / aardvark / frontend / scss / _modals . scss <nl> <nl> margin - bottom : 10px ; <nl> margin - top : 10px ; <nl> } <nl> + <nl> } <nl> <nl> . icon - info - sign { <nl> mmm a / js / apps / system / aardvark / frontend / scss / generated . css <nl> ppp b / js / apps / system / aardvark / frontend / scss / generated . css <nl> pre . gv - object - view { <nl> margin - left : 0 ; } <nl> <nl> . dashboard - interior - chart { <nl> - height : 222px ; <nl> - background - color : white ; } <nl> + background - color : white ; <nl> + height : 222px ; } <nl> <nl> . dashboard - large - chart { <nl> height : 250px ; <nl> input . gv - radio - button { <nl> font - weight : 400 ! important ; <nl> text - align : left ; <nl> width : 20 % ! important ; } <nl> - . collectionTh input , <nl> . collectionTh select , <nl> . collectionTh textarea { <nl> margin - top : 10px ; } <nl> mmm a / js / apps / system / aardvark / manifest . json <nl> ppp b / js / apps / system / aardvark / manifest . json <nl> <nl> " frontend / js / templates / editListEntryView . ejs " , <nl> " frontend / js / templates / footerView . ejs " , <nl> " frontend / js / templates / foxxActiveView . ejs " , <nl> - " frontend / js / templates / foxxEditView . ejs " , <nl> " frontend / js / templates / foxxInstalledView . ejs " , <nl> " frontend / js / templates / graphManagementView . ejs " , <nl> " frontend / js / templates / graphView . ejs " , <nl> | Fixed design bugs in graph viewer | arangodb/arangodb | 92f02fbce303f58faa27b75caf77417d3da44e83 | 2014-05-08T17:09:17Z |
mmm a / version . txt <nl> ppp b / version . txt <nl> COMPANY_NAME XBMC Foundation <nl> WEBSITE http : / / kodi . tv <nl> VERSION_MAJOR 18 <nl> VERSION_MINOR 0 <nl> - VERSION_TAG BETA2 <nl> - VERSION_CODE 17 . 99 . 802 <nl> - ADDON_API 17 . 9 . 802 <nl> + VERSION_TAG BETA3 <nl> + VERSION_CODE 17 . 99 . 803 <nl> + ADDON_API 17 . 9 . 803 <nl> APP_PACKAGE org . xbmc . kodi <nl> PACKAGE_IDENTITY XBMCFoundation . Kodi <nl> PACKAGE_PUBLISHER C62BD90A - CDD8 - 477F - 96C3 - B25992247B97 <nl> | Merge pull request from MartijnKaijser / v18 . 0beta3 | xbmc/xbmc | 1af0d08ae3dc718ddda61a51141227e06c6ce07c | 2018-09-09T19:03:40Z |
mmm a / modules / legacy / doc / motion_analysis . rst <nl> ppp b / modules / legacy / doc / motion_analysis . rst <nl> Calculates the optical flow for two images by using the block matching method . <nl> <nl> . . math : : <nl> <nl> - \ left \ lfloor \ frac { \ texttt { prev - > width } - \ texttt { block_size . width } } { \ texttt { shift_size . width } } \ right \ rfloor \ times \ left \ lfloor \ frac { \ texttt { prev - > height } - \ texttt { block_size . height } } { \ texttt { shift_size . height } } \ right \ rfloor <nl> + \ left \ lfloor \ frac { \ texttt { prev - > width } - \ texttt { block \ _size . width } } { \ texttt { shift \ _size . width } } \ right \ rfloor \ times \ left \ lfloor \ frac { \ texttt { prev - > height } - \ texttt { block \ _size . height } } { \ texttt { shift \ _size . height } } \ right \ rfloor <nl> <nl> size , 32 - bit floating - point , single - channel <nl> <nl> | Merged the trunk r8537 ( fixed pdf documentation build ) | opencv/opencv | 8159ea33863f31a058ece24fe5edc3ff7db2afcd | 2012-06-01T08:46:54Z |
mmm a / lib / SILGen / SILGenApply . cpp <nl> ppp b / lib / SILGen / SILGenApply . cpp <nl> void ArgEmitter : : emitShuffle ( Expr * inner , <nl> / / Emit the inner expression . <nl> SmallVector < ManagedValue , 8 > innerArgs ; <nl> SmallVector < InOutArgument , 2 > innerInOutArgs ; <nl> - ArgEmitter ( SGF , Rep , ClaimedParamsRef ( innerParams ) , innerArgs , innerInOutArgs , <nl> - / * foreign error * / None , / * foreign self * / ImportAsMemberStatus ( ) , <nl> - ( innerSpecialDests ? ArgSpecialDestArray ( * innerSpecialDests ) <nl> - : Optional < ArgSpecialDestArray > ( ) ) ) <nl> - . emitTopLevel ( ArgumentSource ( inner ) , innerOrigParamType ) ; <nl> + if ( ! innerParams . empty ( ) ) { <nl> + ArgEmitter ( SGF , Rep , ClaimedParamsRef ( innerParams ) , innerArgs , innerInOutArgs , <nl> + / * foreign error * / None , / * foreign self * / ImportAsMemberStatus ( ) , <nl> + ( innerSpecialDests ? ArgSpecialDestArray ( * innerSpecialDests ) <nl> + : Optional < ArgSpecialDestArray > ( ) ) ) <nl> + . emitTopLevel ( ArgumentSource ( inner ) , innerOrigParamType ) ; <nl> + } <nl> <nl> / / Make a second pass to split the inner arguments correctly . <nl> { <nl> mmm a / test / SILGen / default_arguments_generic . swift <nl> ppp b / test / SILGen / default_arguments_generic . swift <nl> func bar ( ) { <nl> / / CHECK : apply [ [ ZANG_DFLT_1 ] ] < Int , Double > <nl> Zim < Int > . zang ( Double . self , 22 ) <nl> } <nl> + <nl> + protocol Initializable { <nl> + init ( ) <nl> + } <nl> + struct Generic < T : Initializable > { <nl> + init ( _ value : T = T ( ) ) { } <nl> + } <nl> + struct InitializableImpl : Initializable { <nl> + init ( ) { } <nl> + } <nl> + / / CHECK - LABEL : sil hidden @ _T025default_arguments_generic17testInitializableyyF <nl> + func testInitializable ( ) { <nl> + / / The " . init " is required to trigger the crash that used to happen . <nl> + _ = Generic < InitializableImpl > . init ( ) <nl> + / / CHECK : [ [ INIT : % . + ] ] = function_ref @ _T025default_arguments_generic7GenericVACyxGxcfC <nl> + / / CHECK : function_ref @ _T025default_arguments_generic7GenericVACyxGxcfcfA_ : $ @ convention ( thin ) < τ_0_0 where τ_0_0 : Initializable > ( ) - > @ out τ_0_0 <nl> + / / CHECK : apply [ [ INIT ] ] < InitializableImpl > ( { { % . + } } , { { % . + } } ) : $ @ convention ( method ) < τ_0_0 where τ_0_0 : Initializable > ( @ in τ_0_0 , @ thin Generic < τ_0_0 > . Type ) - > Generic < τ_0_0 > <nl> + } / / CHECK : end sil function ' _T025default_arguments_generic17testInitializableyyF ' <nl> | [ SILGen ] Don ' t crash when calling a generic init with default args . ( ) | apple/swift | 59ddb5b00ffd227cdde451dfc33a7d7f78b3afcb | 2017-01-31T23:43:52Z |
mmm a / selfdrive / loggerd / tests / loggerd_tests_common . py <nl> ppp b / selfdrive / loggerd / tests / loggerd_tests_common . py <nl> def create_random_file ( file_path , size_mb , lock = False ) : <nl> os . remove ( lock_path ) <nl> <nl> class MockResponse ( ) : <nl> - def __init__ ( self , text ) : <nl> + def __init__ ( self , text , status_code ) : <nl> self . text = text <nl> + self . status_code = status_code <nl> <nl> class MockApi ( ) : <nl> def __init__ ( self , dongle_id ) : <nl> pass <nl> <nl> def get ( self , * args , * * kwargs ) : <nl> - return MockResponse ( ' { " url " : " http : / / localhost / does / not / exist " , " headers " : { } } ' ) <nl> + return MockResponse ( ' { " url " : " http : / / localhost / does / not / exist " , " headers " : { } } ' , 200 ) <nl> + <nl> + def get_token ( self ) : <nl> + return " fake - token " <nl> + <nl> + class MockApiIgnore ( ) : <nl> + def __init__ ( self , dongle_id ) : <nl> + pass <nl> + <nl> + def get ( self , * args , * * kwargs ) : <nl> + return MockResponse ( ' ' , 412 ) <nl> <nl> def get_token ( self ) : <nl> return " fake - token " <nl> def get ( self , k ) : <nl> class UploaderTestCase ( unittest . TestCase ) : <nl> f_type = " UNKNOWN " <nl> <nl> + def set_ignore ( self ) : <nl> + uploader . Api = MockApiIgnore <nl> + <nl> def setUp ( self ) : <nl> self . root = tempfile . mkdtemp ( ) <nl> uploader . ROOT = self . root # Monkey patch root dir <nl> mmm a / selfdrive / loggerd / tests / test_uploader . py <nl> ppp b / selfdrive / loggerd / tests / test_uploader . py <nl> def __init__ ( self ) : <nl> <nl> def reset ( self ) : <nl> self . upload_order = list ( ) <nl> + self . upload_ignored = list ( ) <nl> <nl> def emit ( self , record ) : <nl> try : <nl> j = json . loads ( record . message ) <nl> if j [ " event " ] = = " upload_success " : <nl> self . upload_order . append ( j [ " key " ] ) <nl> + if j [ " event " ] = = " upload_ignored " : <nl> + self . upload_ignored . append ( j [ " key " ] ) <nl> except Exception : <nl> pass <nl> <nl> def test_upload ( self ) : <nl> time . sleep ( 5 ) <nl> self . join_thread ( ) <nl> <nl> + self . assertTrue ( len ( log_handler . upload_ignored ) = = 0 , " Some files were ignored " ) <nl> self . assertFalse ( len ( log_handler . upload_order ) < len ( f_paths ) , " Some files failed to upload " ) <nl> self . assertFalse ( len ( log_handler . upload_order ) > len ( f_paths ) , " Some files were uploaded twice " ) <nl> for f_path in f_paths : <nl> def test_upload ( self ) : <nl> exp_order = self . gen_order ( [ self . seg_num ] , [ ] ) <nl> self . assertTrue ( log_handler . upload_order = = exp_order , " Files uploaded in wrong order " ) <nl> <nl> + def test_upload_ignored ( self ) : <nl> + self . set_ignore ( ) <nl> + f_paths = self . gen_files ( lock = False ) <nl> + <nl> + self . start_thread ( ) <nl> + # allow enough time that files could upload twice if there is a bug in the logic <nl> + time . sleep ( 5 ) <nl> + self . join_thread ( ) <nl> + <nl> + self . assertTrue ( len ( log_handler . upload_order ) = = 0 , " Some files were not ignored " ) <nl> + self . assertFalse ( len ( log_handler . upload_ignored ) < len ( f_paths ) , " Some files failed to ignore " ) <nl> + self . assertFalse ( len ( log_handler . upload_ignored ) > len ( f_paths ) , " Some files were ignored twice " ) <nl> + for f_path in f_paths : <nl> + self . assertTrue ( getxattr ( f_path , uploader . UPLOAD_ATTR_NAME ) , " All files not ignored " ) <nl> + exp_order = self . gen_order ( [ self . seg_num ] , [ ] ) <nl> + self . assertTrue ( log_handler . upload_ignored = = exp_order , " Files ignored in wrong order " ) <nl> + <nl> def test_upload_files_in_create_order ( self ) : <nl> f_paths = list ( ) <nl> seg1_nums = [ 0 , 1 , 2 , 10 , 20 ] <nl> def test_upload_files_in_create_order ( self ) : <nl> time . sleep ( 5 ) <nl> self . join_thread ( ) <nl> <nl> + self . assertTrue ( len ( log_handler . upload_ignored ) = = 0 , " Some files were ignored " ) <nl> self . assertFalse ( len ( log_handler . upload_order ) < len ( f_paths ) , " Some files failed to upload " ) <nl> self . assertFalse ( len ( log_handler . upload_order ) > len ( f_paths ) , " Some files were uploaded twice " ) <nl> for f_path in f_paths : <nl> | update tests | commaai/openpilot | 337fb1a273c4ac57c2a2a17ae68499eb8b49f37b | 2020-04-10T16:46:03Z |
mmm a / include / rapidjson / document . h <nl> ppp b / include / rapidjson / document . h <nl> int z = a [ 0u ] . GetInt ( ) ; / / This works too . <nl> } <nl> <nl> private : <nl> - template < typename , typename > <nl> + template < typename , typename , typename > <nl> friend class GenericDocument ; <nl> <nl> enum { <nl> typedef GenericValue < UTF8 < > > Value ; <nl> / / ! A document for parsing JSON text as DOM . <nl> / * ! <nl> \ note implements Handler concept <nl> - \ tparam Encoding encoding for both parsing and string storage . <nl> - \ tparam Allocator allocator for allocating memory for the DOM , and the stack during parsing . <nl> - \ warning Although GenericDocument inherits from GenericValue , the API does \ b not provide any virtual functions , especially no virtual destructors . To avoid memory leaks , do not \ c delete a GenericDocument object via a pointer to a GenericValue . <nl> + \ tparam Encoding Encoding for both parsing and string storage . <nl> + \ tparam Allocator Allocator for allocating memory for the DOM <nl> + \ tparam StackAllocator Allocator for allocating memory for stack during parsing . <nl> + \ warning Although GenericDocument inherits from GenericValue , the API does \ b not provide any virtual functions , especially no virtual destructor . To avoid memory leaks , do not \ c delete a GenericDocument object via a pointer to a GenericValue . <nl> * / <nl> - template < typename Encoding , typename Allocator = MemoryPoolAllocator < > > <nl> + template < typename Encoding , typename Allocator = MemoryPoolAllocator < > , typename StackAllocator = CrtAllocator > <nl> class GenericDocument : public GenericValue < Encoding , Allocator > { <nl> public : <nl> typedef typename Encoding : : Ch Ch ; / / ! < Character type derived from Encoding . <nl> class GenericDocument : public GenericValue < Encoding , Allocator > { <nl> typedef Allocator AllocatorType ; / / ! < Allocator type from template parameter . <nl> <nl> / / ! Constructor <nl> - / * ! \ param allocator Optional allocator for allocating stack memory . <nl> - \ param stackCapacity Initial capacity of stack in bytes . <nl> + / * ! \ param allocator Optional allocator for allocating memory . <nl> + \ param stackCapacity Optional initial capacity of stack in bytes . <nl> + \ param stackAllocator Optional allocator for allocating memory for stack . <nl> * / <nl> - GenericDocument ( Allocator * allocator = 0 , size_t stackCapacity = kDefaultStackCapacity ) : stack_ ( allocator , stackCapacity ) , parseResult_ ( ) { } <nl> + GenericDocument ( Allocator * allocator = 0 , size_t stackCapacity = kDefaultStackCapacity , StackAllocator * stackAllocator = 0 ) : <nl> + allocator_ ( allocator ) , ownAllocator_ ( 0 ) , stack_ ( stackAllocator , stackCapacity ) , parseResult_ ( ) <nl> + { <nl> + if ( ! allocator_ ) <nl> + ownAllocator_ = allocator_ = new Allocator ( ) ; <nl> + } <nl> + <nl> + ~ GenericDocument ( ) { <nl> + delete ownAllocator_ ; <nl> + } <nl> <nl> / / ! @ name Parse from stream <nl> / / ! @ { <nl> class GenericDocument : public GenericValue < Encoding , Allocator > { <nl> / / ! @ } <nl> <nl> / / ! Get the allocator of this document . <nl> - Allocator & GetAllocator ( ) { return stack_ . GetAllocator ( ) ; } <nl> + Allocator & GetAllocator ( ) { return * allocator_ ; } <nl> <nl> / / ! Get the capacity of stack in bytes . <nl> size_t GetStackCapacity ( ) const { return stack_ . GetCapacity ( ) ; } <nl> class GenericDocument : public GenericValue < Encoding , Allocator > { <nl> ( stack_ . template Pop < ValueType > ( 1 ) ) - > ~ ValueType ( ) ; <nl> else <nl> stack_ . Clear ( ) ; <nl> + stack_ . ShrinkToFit ( ) ; <nl> } <nl> <nl> static const size_t kDefaultStackCapacity = 1024 ; <nl> - internal : : Stack < Allocator > stack_ ; <nl> + Allocator * allocator_ ; <nl> + Allocator * ownAllocator_ ; <nl> + internal : : Stack < StackAllocator > stack_ ; <nl> ParseResult parseResult_ ; <nl> } ; <nl> <nl> mmm a / test / unittest / documenttest . cpp <nl> ppp b / test / unittest / documenttest . cpp <nl> <nl> <nl> using namespace rapidjson ; <nl> <nl> - TEST ( Document , Parse ) { <nl> - Document doc ; <nl> + template < typename Allocator , typename StackAllocator > <nl> + void ParseTest ( ) { <nl> + typedef GenericDocument < UTF8 < > , Allocator , StackAllocator > DocumentType ; <nl> + typedef DocumentType : : ValueType ValueType ; <nl> + DocumentType doc ; <nl> <nl> doc . Parse ( " { \ " hello \ " : \ " world \ " , \ " t \ " : true , \ " f \ " : false , \ " n \ " : null , \ " i \ " : 123 , \ " pi \ " : 3 . 1416 , \ " a \ " : [ 1 , 2 , 3 , 4 ] } " ) ; <nl> <nl> EXPECT_TRUE ( doc . IsObject ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " hello " ) ) ; <nl> - Value & hello = doc [ " hello " ] ; <nl> + const ValueType & hello = doc [ " hello " ] ; <nl> EXPECT_TRUE ( hello . IsString ( ) ) ; <nl> EXPECT_STREQ ( " world " , hello . GetString ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " t " ) ) ; <nl> - Value & t = doc [ " t " ] ; <nl> + const ValueType & t = doc [ " t " ] ; <nl> EXPECT_TRUE ( t . IsTrue ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " f " ) ) ; <nl> - Value & f = doc [ " f " ] ; <nl> + const ValueType & f = doc [ " f " ] ; <nl> EXPECT_TRUE ( f . IsFalse ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " n " ) ) ; <nl> - Value & n = doc [ " n " ] ; <nl> + const ValueType & n = doc [ " n " ] ; <nl> EXPECT_TRUE ( n . IsNull ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " i " ) ) ; <nl> - Value & i = doc [ " i " ] ; <nl> + const ValueType & i = doc [ " i " ] ; <nl> EXPECT_TRUE ( i . IsNumber ( ) ) ; <nl> EXPECT_EQ ( 123 , i . GetInt ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " pi " ) ) ; <nl> - Value & pi = doc [ " pi " ] ; <nl> + const ValueType & pi = doc [ " pi " ] ; <nl> EXPECT_TRUE ( pi . IsNumber ( ) ) ; <nl> EXPECT_EQ ( 3 . 1416 , pi . GetDouble ( ) ) ; <nl> <nl> EXPECT_TRUE ( doc . HasMember ( " a " ) ) ; <nl> - Value & a = doc [ " a " ] ; <nl> + const ValueType & a = doc [ " a " ] ; <nl> EXPECT_TRUE ( a . IsArray ( ) ) ; <nl> EXPECT_EQ ( 4u , a . Size ( ) ) ; <nl> for ( SizeType i = 0 ; i < 4 ; i + + ) <nl> EXPECT_EQ ( i + 1 , a [ i ] . GetUint ( ) ) ; <nl> } <nl> <nl> + TEST ( Document , Parse ) { <nl> + ParseTest < MemoryPoolAllocator < > , CrtAllocator > ( ) ; <nl> + ParseTest < MemoryPoolAllocator < > , MemoryPoolAllocator < > > ( ) ; <nl> + ParseTest < CrtAllocator , MemoryPoolAllocator < > > ( ) ; <nl> + ParseTest < CrtAllocator , CrtAllocator > ( ) ; <nl> + } <nl> + <nl> static FILE * OpenEncodedFile ( const char * filename ) { <nl> char buffer [ 1024 ] ; <nl> sprintf ( buffer , " encodings / % s " , filename ) ; <nl> | Separate Document ' s value and stack allocator . | Tencent/rapidjson | 941aa93f458139c90ed21e708b59c7a09bc42cbc | 2014-08-17T10:33:47Z |
mmm a / core / image . cpp <nl> ppp b / core / image . cpp <nl> void Image : : convert ( Format p_new_format ) { <nl> if ( p_new_format = = format ) <nl> return ; <nl> <nl> + ERR_FAIL_COND_MSG ( write_lock . ptr ( ) , " Cannot convert image when it is locked . " ) ; <nl> + <nl> if ( format > FORMAT_RGBE9995 | | p_new_format > FORMAT_RGBE9995 ) { <nl> <nl> ERR_FAIL_MSG ( " Cannot convert to < - > from compressed formats . Use compress ( ) and decompress ( ) instead . " ) ; <nl> void Image : : resize_to_po2 ( bool p_square ) { <nl> void Image : : resize ( int p_width , int p_height , Interpolation p_interpolation ) { <nl> <nl> ERR_FAIL_COND_MSG ( data . size ( ) = = 0 , " Cannot resize image before creating it , use create ( ) or create_from_data ( ) first . " ) ; <nl> - <nl> ERR_FAIL_COND_MSG ( ! _can_modify ( format ) , " Cannot resize in compressed or custom image formats . " ) ; <nl> + ERR_FAIL_COND_MSG ( write_lock . ptr ( ) , " Cannot resize image when it is locked . " ) ; <nl> <nl> bool mipmap_aware = p_interpolation = = INTERPOLATE_TRILINEAR / * | | p_interpolation = = INTERPOLATE_TRICUBIC * / ; <nl> <nl> void Image : : blit_rect ( const Ref < Image > & p_src , const Rect2 & p_src_rect , const Po <nl> ERR_FAIL_COND ( dsize = = 0 ) ; <nl> ERR_FAIL_COND ( srcdsize = = 0 ) ; <nl> ERR_FAIL_COND ( format ! = p_src - > format ) ; <nl> + ERR_FAIL_COND_MSG ( ! _can_modify ( format ) , " Cannot blit_rect in compressed or custom image formats . " ) ; <nl> <nl> Rect2i clipped_src_rect = Rect2i ( 0 , 0 , p_src - > width , p_src - > height ) . clip ( p_src_rect ) ; <nl> <nl> void Image : : blend_rect_mask ( const Ref < Image > & p_src , const Ref < Image > & p_mask , c <nl> } <nl> <nl> void Image : : fill ( const Color & c ) { <nl> + ERR_FAIL_COND_MSG ( ! _can_modify ( format ) , " Cannot fill in compressed or custom image formats . " ) ; <nl> <nl> lock ( ) ; <nl> <nl> | Merge pull request from timothyqiu / image - ops | godotengine/godot | 3cfb67e0f7f55f884c01994b7c7f35a5160dfc3e | 2020-01-28T07:14:40Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( WIN32 ) <nl> target_link_libraries ( trojan wsock32 ws2_32 ) <nl> else ( ) <nl> install ( TARGETS trojan DESTINATION bin ) <nl> - configure_file ( examples / server . json - example trojan . json COPYONLY ) <nl> - install ( FILES $ { CMAKE_BINARY_DIR } / trojan . json DESTINATION / etc ) <nl> + if ( NOT EXISTS / etc / trojan . json ) <nl> + configure_file ( examples / server . json - example trojan . json COPYONLY ) <nl> + install ( FILES $ { CMAKE_BINARY_DIR } / trojan . json DESTINATION / etc ) <nl> + endif ( ) <nl> if ( EXISTS / etc / systemd / system ) <nl> configure_file ( examples / trojan . service - example trojan . service ) <nl> install ( FILES $ { CMAKE_BINARY_DIR } / trojan . service DESTINATION / etc / systemd / system ) <nl> | Do not override system config | trojan-gfw/trojan | 59edfa5a698f35b2da7281f4d8c8327621a28d35 | 2017-10-31T17:35:19Z |
mmm a / src / php / README . md <nl> ppp b / src / php / README . md <nl> Pre - Alpha : This gRPC PHP implementation is work - in - progress and is not expected <nl> <nl> # # ENVIRONMENT <nl> <nl> - Install ` php5 ` and ` php5 - dev ` . <nl> + Prerequisite : PHP 5 . 5 or later , PHPUnit , pecl <nl> <nl> - To run the tests , additionally install ` phpunit ` . <nl> - <nl> - Alternatively , build and install PHP 5 . 5 or later from source with standard <nl> - configuration options . <nl> + ` ` ` sh <nl> + sudo apt - get install php5 php5 - dev phpunit php - pear <nl> + ` ` ` <nl> <nl> # # Build from Homebrew <nl> <nl> $ make check <nl> $ sudo make install <nl> ` ` ` <nl> <nl> - Build and install the gRPC C core <nl> + Build and install the gRPC C core libraries <nl> <nl> ` ` ` sh <nl> $ cd grpc <nl> $ make <nl> $ sudo make install <nl> ` ` ` <nl> <nl> - Build the gRPC PHP extension <nl> + Install the gRPC PHP extension <nl> + <nl> + ` ` ` sh <nl> + $ sudo pecl install grpc <nl> + ` ` ` <nl> + <nl> + OR <nl> <nl> ` ` ` sh <nl> $ cd grpc / src / php / ext / grpc <nl> $ . / bin / run_gen_code_test . sh <nl> [ linuxbrew ] : https : / / github . com / Homebrew / linuxbrew # installation <nl> [ gRPC install script ] : https : / / raw . githubusercontent . com / grpc / homebrew - grpc / master / scripts / install <nl> [ Node ] : https : / / github . com / grpc / grpc / tree / master / src / node / examples <nl> - <nl> mmm a / src / php / bin / run_tests . sh <nl> ppp b / src / php / bin / run_tests . sh <nl> set - e <nl> cd $ ( dirname $ 0 ) <nl> default_extension_dir = ` php - i | grep extension_dir | sed ' s / . * = > / / g ' ` <nl> <nl> - module_dir = . . / ext / grpc / modules <nl> + if command - v brew > / dev / null & & [ - d ` brew - - prefix ` / opt / grpc - php ] <nl> + then <nl> + # homebrew and the grpc - php formula are installed <nl> + extension_dir = " - d extension_dir = " ` brew - - prefix ` / opt / grpc - php <nl> + elif [ ! - e $ default_extension_dir / grpc . so ] <nl> + then <nl> + # the grpc extension is not found in the default PHP extension dir <nl> + # try the source modules directory <nl> + module_dir = . . / ext / grpc / modules <nl> + if [ ! - d $ module_dir ] <nl> + then <nl> + echo " Please run ' phpize & & . / configure & & make ' from ext / grpc first " <nl> + exit 1 <nl> + fi <nl> <nl> - # sym - link in system supplied extensions <nl> - for f in $ default_extension_dir / * . so <nl> - do <nl> - ln - s $ f $ module_dir / $ ( basename $ f ) & > / dev / null | | true <nl> - done <nl> + # sym - link in system supplied extensions <nl> + for f in $ default_extension_dir / * . so <nl> + do <nl> + ln - s $ f $ module_dir / $ ( basename $ f ) & > / dev / null | | true <nl> + done <nl> + <nl> + extension_dir = ' - d extension_dir = ' $ module_dir <nl> + fi <nl> <nl> php \ <nl> - - d extension_dir = $ module_dir \ <nl> + $ extension_dir \ <nl> - d extension = grpc . so \ <nl> ` which phpunit ` - v - - debug - - strict . . / tests / unit_tests <nl> new file mode 100644 <nl> index 00000000000 . . 17b94fedc01 <nl> mmm / dev / null <nl> ppp b / src / php / ext / grpc / CREDITS <nl> <nl> + Michael Lumish ( mlumish @ google . com ) <nl> + Tim Emiola ( temiola @ google . com ) <nl> + Stanley Cheung ( stanleycheung @ google . com ) <nl> new file mode 100644 <nl> index 00000000000 . . 704b523970a <nl> mmm / dev / null <nl> ppp b / src / php / ext / grpc / LICENSE <nl> <nl> + / * <nl> + * <nl> + * Copyright 2015 , Google Inc . <nl> + * All rights reserved . <nl> + * <nl> + * Redistribution and use in source and binary forms , with or without <nl> + * modification , are permitted provided that the following conditions are <nl> + * met : <nl> + * <nl> + * * Redistributions of source code must retain the above copyright <nl> + * notice , this list of conditions and the following disclaimer . <nl> + * * Redistributions in binary form must reproduce the above <nl> + * copyright notice , this list of conditions and the following disclaimer <nl> + * in the documentation and / or other materials provided with the <nl> + * distribution . <nl> + * * Neither the name of Google Inc . nor the names of its <nl> + * contributors may be used to endorse or promote products derived from <nl> + * this software without specific prior written permission . <nl> + * <nl> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + * " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + * LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + * A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + * SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + * LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + * DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + * THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + * ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + * OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + * <nl> + * / <nl> new file mode 100644 <nl> index 00000000000 . . 0ac09e18353 <nl> mmm / dev / null <nl> ppp b / src / php / ext / grpc / README . md <nl> <nl> + gRPC PHP Extension <nl> + = = = = = = = = = = = = = = = = = = <nl> + <nl> + # Requirements <nl> + <nl> + * PHP 5 . 5 + <nl> + * [ gRPC core library ] ( https : / / github . com / grpc / grpc ) 0 . 9 . 1 <nl> + <nl> + # Installation <nl> + <nl> + # # Install PHP 5 <nl> + <nl> + ` ` ` <nl> + $ sudo apt - get install git php5 php5 - dev php - pear unzip <nl> + ` ` ` <nl> + <nl> + # # Compile gRPC Core Library <nl> + <nl> + Clone the gRPC source code repository <nl> + <nl> + ` ` ` <nl> + $ git clone https : / / github . com / grpc / grpc . git <nl> + ` ` ` <nl> + <nl> + Build and install the Protocol Buffers compiler ( protoc ) <nl> + <nl> + ` ` ` <nl> + $ # from grpc <nl> + $ git checkout - - track origin / release - 0_9 <nl> + $ git pull - - recurse - submodules & & git submodule update - - init - - recursive <nl> + $ cd third_party / protobuf <nl> + $ . / autogen . sh <nl> + $ . / configure <nl> + $ make <nl> + $ make check <nl> + $ sudo make install <nl> + ` ` ` <nl> + <nl> + Build and install the gRPC C core library <nl> + <nl> + ` ` ` sh <nl> + $ # from grpc <nl> + $ make <nl> + $ sudo make install <nl> + ` ` ` <nl> + <nl> + # # Install the gRPC PHP extension <nl> + <nl> + Quick install <nl> + <nl> + ` ` ` sh <nl> + $ sudo pecl install grpc <nl> + ` ` ` <nl> + <nl> + Note : before a stable release , you may need to do <nl> + <nl> + ` ` ` sh <nl> + $ sudo pecl install grpc - 0 . 5 . 0 <nl> + ` ` ` <nl> + <nl> + OR <nl> + <nl> + Compile from source <nl> + <nl> + ` ` ` sh <nl> + $ # from grpc <nl> + $ cd src / php / ext / grpc <nl> + $ phpize <nl> + $ . / configure <nl> + $ make <nl> + $ sudo make install <nl> + ` ` ` <nl> new file mode 100644 <nl> index 00000000000 . . 2c89829d512 <nl> mmm / dev / null <nl> ppp b / src / php / ext / grpc / package . xml <nl> <nl> + < ? xml version = " 1 . 0 " encoding = " UTF - 8 " ? > <nl> + < package packagerversion = " 1 . 9 . 5 " version = " 2 . 0 " xmlns = " http : / / pear . php . net / dtd / package - 2 . 0 " xmlns : tasks = " http : / / pear . php . net / dtd / tasks - 1 . 0 " xmlns : xsi = " http : / / www . w3 . org / 2001 / XMLSchema - instance " xsi : schemaLocation = " http : / / pear . php . net / dtd / tasks - 1 . 0 http : / / pear . php . net / dtd / tasks - 1 . 0 . xsd http : / / pear . php . net / dtd / package - 2 . 0 http : / / pear . php . net / dtd / package - 2 . 0 . xsd " > <nl> + < name > grpc < / name > <nl> + < channel > pecl . php . net < / channel > <nl> + < summary > A high performance , open source , general RPC framework that puts mobile and HTTP / 2 first . < / summary > <nl> + < description > Remote Procedure Calls ( RPCs ) provide a useful abstraction for building distributed applications and services . The libraries in this repository provide a concrete implementation of the gRPC protocol , layered over HTTP / 2 . These libraries enable communication between clients and servers using any combination of the supported languages . < / description > <nl> + < lead > <nl> + < name > Stanley Cheung < / name > <nl> + < user > stanleycheung < / user > <nl> + < email > grpc - packages @ google . com < / email > <nl> + < active > yes < / active > <nl> + < / lead > <nl> + < date > 2015 - 06 - 16 < / date > <nl> + < time > 20 : 12 : 55 < / time > <nl> + < version > <nl> + < release > 0 . 5 . 0 < / release > <nl> + < api > 0 . 5 . 0 < / api > <nl> + < / version > <nl> + < stability > <nl> + < release > alpha < / release > <nl> + < api > alpha < / api > <nl> + < / stability > <nl> + < license > BSD < / license > <nl> + < notes > <nl> + First alpha release <nl> + < / notes > <nl> + < contents > <nl> + < dir baseinstalldir = " / " name = " / " > <nl> + < file baseinstalldir = " / " md5sum = " 6f19828fb869b7b8a590cbb76b4f996d " name = " byte_buffer . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " c8de0f819499c48adfc8d7f472c0196b " name = " byte_buffer . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " cb45b62f767ae7b4377761df696649fc " name = " call . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 26acbf04c30162c2d2aca4688bb2aec8 " name = " call . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 50837fbdb2892795f1871b22e5979762 " name = " channel . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " f1b66029daeced20b47cf00cc6523fc8 " name = " channel . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 81a1193e93d8b6602add8ac360de565b " name = " completion_queue . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " f10b5bb232d74a6878e829e2e76cdaa2 " name = " completion_queue . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " a9181ed994a072ac5f41e7c9705c170f " name = " config . m4 " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 8c3f1e11dac623001378bfd53b554f08 " name = " credentials . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 6988d6e97c19c8f8e3eb92371cf8246b " name = " credentials . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 38a1bc979d810c36ebc2a52d4b7b5319 " name = " CREDITS " role = " doc " / > <nl> + < file baseinstalldir = " / " md5sum = " 3f35b472bbdef5a788cd90617d7d0847 " name = " LICENSE " role = " doc " / > <nl> + < file baseinstalldir = " / " md5sum = " 6aaa7a290122d230f2d8c4e4e05da4a9 " name = " php_grpc . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 673b07859d9f69232f8a754c56780686 " name = " php_grpc . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 4d4d3382f8d10cae2e4378468e5516b9 " name = " README . md " role = " doc " / > <nl> + < file baseinstalldir = " / " md5sum = " 53fda0ee6937f6ddc8e271886018d441 " name = " server . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 4b730f06d14cbbb0642bdbd194749595 " name = " server . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " f6930beafb6c0e061899262f2f077e98 " name = " server_credentials . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 9c4b4cc06356a8a39a16a085a9b85996 " name = " server_credentials . h " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " c89c623cd17177ebde18313fc5c17122 " name = " timeval . c " role = " src " / > <nl> + < file baseinstalldir = " / " md5sum = " 496e27a100b4d93ca3fb35c924c5e163 " name = " timeval . h " role = " src " / > <nl> + < / dir > <nl> + < / contents > <nl> + < dependencies > <nl> + < required > <nl> + < php > <nl> + < min > 5 . 5 . 0 < / min > <nl> + < / php > <nl> + < pearinstaller > <nl> + < min > 1 . 4 . 0 < / min > <nl> + < / pearinstaller > <nl> + < / required > <nl> + < / dependencies > <nl> + < providesextension > grpc < / providesextension > <nl> + < extsrcrelease / > <nl> + < changelog > <nl> + < release > <nl> + < version > <nl> + < release > 0 . 5 . 0 < / release > <nl> + < api > 0 . 5 . 0 < / api > <nl> + < / version > <nl> + < stability > <nl> + < release > alpha < / release > <nl> + < api > alpha < / api > <nl> + < / stability > <nl> + < date > 2015 - 06 - 16 < / date > <nl> + < license > BSD < / license > <nl> + < notes > <nl> + First alpha release <nl> + < / notes > <nl> + < / release > <nl> + < / changelog > <nl> + < / package > <nl> | Merge pull request from stanley - cheung / php_pecl_extension | grpc/grpc | 94213dfa64408f2b267aa229ef3be481d5a6fb85 | 2015-06-18T19:25:16Z |
mmm a / core / input / gamecontrollerdb . txt <nl> ppp b / core / input / gamecontrollerdb . txt <nl> <nl> # Windows <nl> 03000000fa2d00000100000000000000 , 3DRUDDER , leftx : a0 , lefty : a1 , rightx : a5 , righty : a2 , platform : Windows , <nl> 03000000c82d00002038000000000000 , 8bitdo , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d000011ab000000000000 , 8BitDo F30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d000011ab000000000000 , 8BitDo F30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00001038000000000000 , 8BitDo F30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000090000000000000 , 8BitDo FC30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000650000000000000 , 8BitDo M30 , a : b0 , b : b1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : a4 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , righttrigger : b7 , start : b11 , x : b3 , y : b4 , platform : Windows , <nl> 03000000c82d00000310000000000000 , 8BitDo N30 , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b3 , y : b4 , platform : Windows , <nl> 03000000c82d00002028000000000000 , 8BitDo N30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00008010000000000000 , 8BitDo N30 , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b3 , y : b4 , platform : Windows , <nl> - 03000000c82d00000190000000000000 , 8BitDo N30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00001590000000000000 , 8BitDo N30 Pro 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00006528000000000000 , 8BitDo N30 Pro 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00000190000000000000 , 8BitDo N30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00001590000000000000 , 8BitDo N30 Pro 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00006528000000000000 , 8BitDo N30 Pro 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00015900000000000000 , 8BitDo N30 Pro 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00065280000000000000 , 8BitDo N30 Pro 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000022000000090000000000000 , 8Bitdo NES30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000203800000900000000000000 , 8Bitdo NES30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000130000000000000 , 8BitDo SF30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00000060000000000000 , 8Bitdo SF30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00000061000000000000 , 8Bitdo SF30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00000060000000000000 , 8Bitdo SF30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00000061000000000000 , 8Bitdo SF30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d000021ab000000000000 , 8BitDo SFC30 , a : b1 , b : b0 , back : b10 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000102800000900000000000000 , 8Bitdo SFC30 GamePad , a : b1 , b : b0 , back : b10 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00003028000000000000 , 8Bitdo SFC30 GamePad , a : b1 , b : b0 , back : b10 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000030000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000351000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00001290000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00001290000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b6 , rightshoulder : b7 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d000020ab000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00004028000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00006228000000000000 , 8BitDo SN30 GP , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00000160000000000000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> - 03000000c82d00000161000000000000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a5 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00006228000000000000 , 8BitDo SN30 , a : b1 , b : b0 , back : b10 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b6 , rightshoulder : b7 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00000160000000000000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00000161000000000000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000260000000000000 , 8BitDo SN30 Pro + , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000261000000000000 , 8BitDo SN30 Pro + , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000c82d00000031000000000000 , 8BitDo Wireless Adapter , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 03000000c82d00003032000000000000 , 8BitDo Zero 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> 03000000a00500003232000000000000 , 8Bitdo Zero GamePad , a : b0 , b : b1 , back : b10 , dpdown : + a2 , dpleft : - a0 , dpright : + a0 , dpup : - a2 , leftshoulder : b6 , rightshoulder : b7 , start : b11 , x : b3 , y : b4 , platform : Windows , <nl> 030000008f0e00001200000000000000 , Acme GA - 02 , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b11 , righttrigger : b7 , rightx : a3 , righty : a2 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 03000000fa190000f0ff000000000000 , Acteck AGJ - 3200 , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Windows , <nl> <nl> 03000000260900008888000000000000 , Cyber Gadget GameCube Controller , a : b0 , b : b1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , righttrigger : a4 , rightx : a2 , righty : a3 ~ , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> 03000000a306000022f6000000000000 , Cyborg V . 3 Rumble Pad , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : + a3 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : - a3 , rightx : a2 , righty : a4 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> 03000000451300000830000000000000 , Defender Game Racer X7 , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> + 030000007d0400000840000000000000 , Destroyer Tiltpad , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b1 , b : b2 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b4 , rightshoulder : b5 , x : b0 , y : b3 , platform : Windows , <nl> 03000000791d00000103000000000000 , Dual Box WII , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b6 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , righttrigger : b5 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Windows , <nl> 03000000bd12000002e0000000000000 , Dual USB Vibration Joystick , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b9 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b10 , righttrigger : b5 , rightx : a3 , righty : a2 , start : b11 , x : b3 , y : b0 , platform : Windows , <nl> 030000006f0e00003001000000000000 , EA SPORTS PS3 Controller , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> <nl> 030000000d0f00008800000000000000 , Fighting Stick mini 4 , a : b1 , b : b2 , back : b9 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , lefttrigger : b6 , rightshoulder : b5 , righttrigger : b7 , start : b8 , x : b0 , y : b3 , platform : Windows , <nl> 030000000d0f00002700000000000000 , FIGHTING STICK V3 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , lefttrigger : b6 , rightshoulder : b5 , righttrigger : b7 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> 78696e70757403000000000000000000 , Fightstick TES , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , lefttrigger : a2 , rightshoulder : b5 , righttrigger : a5 , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> - 03000000790000000600000000000000 , G - Shark GS - GP702 , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a4 , start : b9 , x : b3 , y : b0 , platform : Windows , <nl> 03000000790000002201000000000000 , Game Controller for PC , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Windows , <nl> 0300000066f700000100000000000000 , Game VIB Joystick , a : b2 , b : b3 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b8 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : b7 , rightx : a3 , righty : a2 , start : b11 , x : b0 , y : b1 , platform : Windows , <nl> 03000000260900002625000000000000 , Gamecube Controller , a : b0 , b : b1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b6 , lefttrigger : a4 , leftx : a0 , lefty : a1 , righttrigger : a5 , rightx : a2 , righty : a3 , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> <nl> 03000000ac0500004d04000000000000 , GameSir , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b3 , y : b4 , platform : Windows , <nl> 03000000ffff00000000000000000000 , GameStop Gamepad , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 030000006f0e00000102000000007801 , GameStop Xbox 360 Wired Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b8 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> + 030000009b2800003200000000000000 , GC / N64 to USB v3 . 4 , a : b0 , b : b7 , dpdown : b11 , dpleft : b12 , dpright : b13 , dpup : b10 , lefttrigger : + a5 , leftx : a0 , lefty : a1 , rightshoulder : b2 , righttrigger : + a2 , rightx : a3 , righty : a4 , start : b3 , x : b1 , y : b8 , platform : Windows , <nl> 030000008305000009a0000000000000 , Genius , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 030000008305000031b0000000000000 , Genius Maxfire Blaze 3 , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 03000000451300000010000000000000 , Genius Maxfire Grandias 12 , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> <nl> 030000007e0500000720000000000000 , Joy - Con ( R ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b12 , leftshoulder : b4 , leftstick : b11 , rightshoulder : b5 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 030000007e0500000720000001000000 , Joy - Con ( R ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b12 , leftshoulder : b4 , leftstick : b11 , rightshoulder : b5 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 03000000bd12000003c0000000000000 , JY - P70UR , a : b1 , b : b0 , back : b5 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b10 , lefttrigger : b7 , leftx : a0 , lefty : a1 , rightshoulder : b8 , rightstick : b11 , righttrigger : b9 , rightx : a3 , righty : a2 , start : b4 , x : b3 , y : b2 , platform : Windows , <nl> + 03000000242f00002d00000000000000 , JYS Wireless Adapter , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Windows , <nl> + 03000000242f00008a00000000000000 , JYS Wireless Adapter , a : b1 , b : b4 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b0 , y : b3 , platform : Windows , <nl> 03000000790000000200000000000000 , King PS3 Controller , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a4 , start : b9 , x : b3 , y : b0 , platform : Windows , <nl> 030000006d040000d1ca000000000000 , Logitech ChillStream , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> 030000006d040000d2ca000000000000 , Logitech Cordless Precision , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> <nl> 03000000790000001a18000000000000 , Venom , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> 03000000790000001b18000000000000 , Venom Arcade Joystick , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , lefttrigger : b6 , rightshoulder : b5 , righttrigger : b7 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> 030000006f0e00000302000000000000 , Victrix Pro Fight Stick for PS4 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , lefttrigger : b6 , rightshoulder : b5 , righttrigger : b7 , start : b9 , x : b0 , y : b3 , platform : Windows , <nl> + 0300000034120000adbe000000000000 , vJoy Device , a : b0 , b : b1 , back : b15 , dpdown : b6 , dpleft : b7 , dpright : b8 , dpup : b5 , guide : b16 , leftshoulder : b9 , leftstick : b13 , lefttrigger : b11 , leftx : a0 , lefty : a1 , rightshoulder : b10 , rightstick : b14 , righttrigger : b12 , rightx : + a3 , righty : + a4 , start : b4 , x : b2 , y : b3 , platform : Windows , <nl> 030000005e0400000a0b000000000000 , Xbox Adaptive Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b8 , lefttrigger : + a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : - a2 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> 03000000341a00000608000000000000 , Xeox , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> 03000000450c00002043000000000000 , XEOX Gamepad SL - 6556 - BK , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Windows , <nl> <nl> 03000000786901006e70000000000000 , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b8 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b8 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Windows , <nl> 03000000790000004f18000000000000 , ZD - T Android , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b3 , y : b4 , platform : Windows , <nl> - 03000000c82d00003032000000000000 , 8BitDo Zero 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Windows , <nl> + 030000009b2800006000000000000000 , GC / N64 to USB v3 . 6 , a : b0 , b : b7 , dpdown : b11 , dpleft : b12 , dpright : b13 , dpup : b10 , lefttrigger : + a5 , leftx : a0 , lefty : a1 , rightshoulder : b2 , righttrigger : + a2 , rightx : a3 , righty : a4 , start : b3 , x : b1 , y : b8 , platform : Windows , <nl> <nl> # Mac OS X <nl> 030000008f0e00000300000009010000 , 2In1 USB Joystick , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b2 , b : b1 , back : b8 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Mac OS X , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 030000008305000031b0000000000000 , Cideko AK08b , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> 03000000260900008888000088020000 , Cyber Gadget GameCube Controller , a : b0 , b : b1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , lefttrigger : a4 , leftx : a0 , lefty : a1 , rightshoulder : b6 , righttrigger : a5 , rightx : a2 , righty : a3 ~ , start : b7 , x : b2 , y : b3 , platform : Mac OS X , <nl> 03000000a306000022f6000001030000 , Cyborg V . 3 Rumble Pad , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : + a3 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : - a3 , rightx : a2 , righty : a4 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> - 03000000790000000600000000000000 , G - Shark GP - 702 , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a3 , righty : a4 , start : b9 , x : b3 , y : b0 , platform : Mac OS X , <nl> 03000000ad1b000001f9000000000000 , Gamestop BB - 070 X360 Controller , a : b0 , b : b1 , back : b9 , dpdown : b12 , dpleft : b13 , dpright : b14 , dpup : b11 , guide : b10 , leftshoulder : b4 , leftstick : b6 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b7 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b8 , x : b2 , y : b3 , platform : Mac OS X , <nl> 0500000047532047616d657061640000 , GameStop Gamepad , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Mac OS X , <nl> 030000006f0e00000102000000000000 , GameStop Xbox 360 Wired Controller , a : b0 , b : b1 , back : b9 , dpdown : b12 , dpleft : b13 , dpright : b14 , dpup : b11 , guide : b10 , leftshoulder : b4 , leftstick : b6 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b7 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b8 , x : b2 , y : b3 , platform : Mac OS X , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 03000000830500006020000000000000 , iBuffalo USB 2 - axis 8 - button Gamepad , a : b1 , b : b0 , back : b6 , leftshoulder : b4 , leftx : a0 , lefty : a1 , rightshoulder : b5 , start : b7 , x : b3 , y : b2 , platform : Mac OS X , <nl> 030000007e0500000620000001000000 , Joy - Con ( L ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b13 , leftshoulder : b4 , leftstick : b10 , rightshoulder : b5 , start : b8 , x : b2 , y : b3 , platform : Mac OS X , <nl> 030000007e0500000720000001000000 , Joy - Con ( R ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b12 , leftshoulder : b4 , leftstick : b11 , rightshoulder : b5 , start : b9 , x : b2 , y : b3 , platform : Mac OS X , <nl> + 03000000242f00002d00000007010000 , JYS Wireless Adapter , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Mac OS X , <nl> 030000006d04000016c2000000020000 , Logitech Dual Action , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> 030000006d04000016c2000000030000 , Logitech Dual Action , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> 030000006d04000016c2000014040000 , Logitech Dual Action , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 0300000000f00000f100000000000000 , SNES RetroPort , a : b2 , b : b3 , back : b4 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b5 , rightshoulder : b7 , start : b6 , x : b0 , y : b1 , platform : Mac OS X , <nl> 030000004c050000cc09000000000000 , Sony DualShock 4 V2 , a : b1 , b : b2 , back : b13 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : a3 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : a4 , rightx : a2 , righty : a5 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> 030000004c050000a00b000000000000 , Sony DualShock 4 Wireless Adaptor , a : b1 , b : b2 , back : b13 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : a3 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : a4 , rightx : a2 , righty : a5 , start : b9 , x : b0 , y : b3 , platform : Mac OS X , <nl> + 03000000d11800000094000000010000 , Stadia Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a4 , rightx : a2 , righty : a3 , start : b7 , x : b2 , y : b3 , platform : Mac OS X , <nl> 030000005e0400008e02000001000000 , Steam Virtual Gamepad , a : b0 , b : b1 , back : b9 , dpdown : b12 , dpleft : b13 , dpright : b14 , dpup : b11 , leftshoulder : b4 , leftstick : b6 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b7 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b8 , x : b2 , y : b3 , platform : Mac OS X , <nl> 03000000110100002014000000000000 , SteelSeries Nimbus , a : b0 , b : b1 , dpdown : b9 , dpleft : b11 , dpright : b10 , dpup : b8 , leftshoulder : b4 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b12 , x : b2 , y : b3 , platform : Mac OS X , <nl> 03000000110100002014000001000000 , SteelSeries Nimbus , a : b0 , b : b1 , dpdown : b9 , dpleft : b11 , dpright : b10 , dpup : b8 , guide : b12 , leftshoulder : b4 , lefttrigger : b6 , leftx : a0 , lefty : a1 ~ , rightshoulder : b5 , righttrigger : b7 , rightx : a2 , righty : a3 ~ , x : b2 , y : b3 , platform : Mac OS X , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 03000000c82d00000160000011010000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 03000000c82d00000161000000000000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 03000000c82d00001290000011010000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a3 , righty : a4 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> - 05000000c82d00000161000000010000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> + 05000000c82d00000161000000010000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b2 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 05000000c82d00006228000000010000 , 8BitDo SN30 Pro , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 03000000c82d00000260000011010000 , 8BitDo SN30 Pro + , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 05000000c82d00000261000000010000 , 8BitDo SN30 Pro + , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 030000005e0400008e02000020010000 , 8BitDo Wireless Adapter , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000c82d00000031000011010000 , 8BitDo Wireless Adapter , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> + 05000000c82d00003032000000010000 , 8BitDo Zero 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 05000000a00500003232000001000000 , 8Bitdo Zero GamePad , a : b0 , b : b1 , back : b10 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b11 , x : b3 , y : b4 , platform : Linux , <nl> 05000000a00500003232000008010000 , 8Bitdo Zero GamePad , a : b0 , b : b1 , back : b10 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b6 , rightshoulder : b7 , start : b11 , x : b3 , y : b4 , platform : Linux , <nl> 030000006f0e00001302000000010000 , Afterglow , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 06000000adde0000efbe000002010000 , Hidromancer Game Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000d81400000862000011010000 , HitBox ( PS3 / PC ) Analog Mode , a : b1 , b : b2 , back : b8 , guide : b9 , leftshoulder : b4 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : b7 , start : b12 , x : b0 , y : b3 , platform : Linux , <nl> 03000000c9110000f055000011010000 , HJC Game GAMEPAD , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Linux , <nl> + 03000000632500002605000010010000 , HJD - X , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b6 , leftstick : b13 , lefttrigger : a4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : a5 , rightx : a2 , righty : a3 , start : b11 , x : b3 , y : b4 , platform : Linux , <nl> 030000000d0f00000d00000000010000 , hori , a : b0 , b : b6 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b3 , leftx : b4 , lefty : b5 , rightshoulder : b7 , start : b9 , x : b1 , y : b2 , platform : Linux , <nl> 030000000d0f00001000000011010000 , HORI CO . LTD . FIGHTING STICK 3 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , lefttrigger : b6 , rightshoulder : b5 , righttrigger : b7 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000000d0f0000c100000011010000 , HORI CO . LTD . HORIPAD S , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b13 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 050000007e0500000620000001000000 , Joy - Con ( L ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b13 , leftshoulder : b4 , leftstick : b10 , rightshoulder : b5 , start : b8 , x : b2 , y : b3 , platform : Linux , <nl> 030000007e0500000720000001000000 , Joy - Con ( R ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b12 , leftshoulder : b4 , leftstick : b11 , rightshoulder : b5 , start : b9 , x : b2 , y : b3 , platform : Linux , <nl> 050000007e0500000720000001000000 , Joy - Con ( R ) , + leftx : h0 . 2 , + lefty : h0 . 4 , - leftx : h0 . 8 , - lefty : h0 . 1 , a : b0 , b : b1 , back : b12 , leftshoulder : b4 , leftstick : b11 , rightshoulder : b5 , start : b9 , x : b2 , y : b3 , platform : Linux , <nl> + 03000000242f00002d00000011010000 , JYS Wireless Adapter , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Linux , <nl> + 03000000242f00008a00000011010000 , JYS Wireless Adapter , a : b1 , b : b4 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b6 , leftstick : b13 , lefttrigger : b8 , rightshoulder : b7 , rightstick : b14 , righttrigger : b9 , rightx : a2 , righty : a3 , start : b11 , x : b0 , y : b3 , platform : Linux , <nl> 030000006f0e00000103000000020000 , Logic3 Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 030000006d04000019c2000010010000 , Logitech Cordless RumblePad 2 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000006d04000016c2000010010000 , Logitech Dual Action , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 03000000250900006688000000010000 , MP - 8866 Super Dual Box , a : b2 , b : b1 , back : b9 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b10 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b11 , righttrigger : b5 , rightx : a2 , righty : a3 , start : b8 , x : b3 , y : b0 , platform : Linux , <nl> 030000000d0f00000900000010010000 , Natec Genesis P44 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000001008000001e5000010010000 , NEXT SNES Controller , a : b2 , b : b1 , back : b8 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b4 , rightshoulder : b6 , start : b9 , x : b3 , y : b0 , platform : Linux , <nl> + 060000007e0500000820000000000000 , Nintendo Combined Joy - Cons ( joycond ) , a : b0 , b : b1 , back : b9 , dpdown : b15 , dpleft : b16 , dpright : b17 , dpup : b14 , leftshoulder : b5 , leftstick : b12 , lefttrigger : b7 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b13 , righttrigger : b8 , rightx : a2 , righty : a3 , start : b10 , x : b3 , y : b2 , platform : Linux , <nl> + 050000007e0500000920000001800000 , Nintendo Switch Pro Controller ( joycond ) , a : b0 , b : b1 , x : b3 , y : b2 , back : b9 , guide : b11 , start : b10 , leftstick : b12 , rightstick : b13 , leftshoulder : b5 , rightshoulder : b6 , dpup : h0 . 1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , leftx : a0 , lefty : a1 , rightx : a2 , righty : a3 , lefttrigger : b7 , righttrigger : b8 , platform : Linux , <nl> + 030000007e0500000920000011810000 , Nintendo Switch Pro Controller Wired ( joycond ) , a : b0 , b : b1 , x : b3 , y : b2 , back : b9 , guide : b11 , start : b10 , leftstick : b12 , rightstick : b13 , leftshoulder : b5 , rightshoulder : b6 , dpup : h0 . 1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , leftx : a0 , lefty : a1 , rightx : a2 , righty : a3 , lefttrigger : b7 , righttrigger : b8 , platform : Linux , <nl> 030000007e0500003703000000016800 , Nintendo GameCube Controller , a : b0 , b : b2 , dpdown : b6 , dpleft : b4 , dpright : b5 , dpup : b7 , lefttrigger : a4 , leftx : a0 , lefty : a1 ~ , rightshoulder : b9 , righttrigger : a5 , rightx : a2 , righty : a3 ~ , start : b8 , x : b1 , y : b3 , platform : Linux , <nl> 050000007e0500000920000001000000 , Nintendo Switch Pro Controller , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Linux , <nl> 050000007e0500003003000001000000 , Nintendo Wii Remote Pro Controller , a : b0 , b : b1 , back : b8 , dpdown : b14 , dpleft : b15 , dpright : b16 , dpup : b13 , guide : b10 , leftshoulder : b4 , leftstick : b11 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b12 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b2 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 030000005e0400000202000000010000 , Old Xbox pad , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b5 , leftstick : b8 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b2 , rightstick : b9 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b3 , y : b4 , platform : Linux , <nl> 05000000362800000100000002010000 , OUYA Game Controller , a : b0 , b : b3 , dpdown : b9 , dpleft : b10 , dpright : b11 , dpup : b8 , guide : b14 , leftshoulder : b4 , leftstick : b6 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b7 , righttrigger : a5 , rightx : a3 , righty : a4 , x : b1 , y : b2 , platform : Linux , <nl> 05000000362800000100000003010000 , OUYA Game Controller , a : b0 , b : b3 , dpdown : b9 , dpleft : b10 , dpright : b11 , dpup : b8 , guide : b14 , leftshoulder : b4 , leftstick : b6 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b7 , righttrigger : a5 , rightx : a3 , righty : a4 , x : b1 , y : b2 , platform : Linux , <nl> + 03000000830500005020000010010000 , Padix Co . Ltd . Rockfire PSX / USB Bridge , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b8 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b11 , x : b2 , y : b3 , platform : Linux , <nl> 03000000ff1100003133000010010000 , PC Game Controller , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Linux , <nl> 030000006f0e00006401000001010000 , PDP Battlefield One , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 030000006f0e00003101000000010000 , PDP EA Sports Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> + 030000006f0e0000c802000012010000 , PDP Kingdom Hearts Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 030000006f0e0000a802000023020000 , PDP Wired Controller for Xbox One , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b11 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b12 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b2 , y : b3 , platform : Linux , <nl> 030000004c050000da0c000011010000 , Playstation Controller , a : b2 , b : b1 , back : b8 , leftshoulder : b6 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , righttrigger : b5 , start : b9 , x : b3 , y : b0 , platform : Linux , <nl> 03000000c62400000053000000010000 , PowerA , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 050000004c050000cc09000000810000 , PS4 Controller , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b11 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b12 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b9 , x : b3 , y : b2 , platform : Linux , <nl> 050000004c050000cc09000001800000 , PS4 Controller , a : b0 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b11 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b12 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b9 , x : b3 , y : b2 , platform : Linux , <nl> 03000000300f00001211000011010000 , QanBa Arcade JoyStick , a : b2 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b5 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , righttrigger : b6 , start : b9 , x : b1 , y : b3 , platform : Linux , <nl> + 030000009b2800003200000001010000 , Raphnet Technologies GC / N64 to USB v3 . 4 , a : b0 , b : b7 , dpdown : b11 , dpleft : b12 , dpright : b13 , dpup : b10 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b2 , righttrigger : b5 , rightx : a3 , righty : a4 , start : b3 , x : b1 , y : b8 , platform : Linux , <nl> 030000009b2800000300000001010000 , raphnet . net 4nes4snes v1 . 5 , a : b0 , b : b4 , back : b2 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b3 , x : b1 , y : b5 , platform : Linux , <nl> 030000008916000001fd000024010000 , Razer Onza Classic Edition , a : b0 , b : b1 , back : b6 , dpdown : b14 , dpleft : b11 , dpright : b12 , dpup : b13 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 030000008916000000fd000024010000 , Razer Onza Tournament Edition , a : b0 , b : b1 , back : b6 , dpdown : b14 , dpleft : b11 , dpright : b12 , dpup : b13 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 030000006f0e00001e01000011010000 , Rock Candy PS3 Controller , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000006f0e00004601000001010000 , Rock Candy Xbox One Controller , a : b0 , b : b1 , back : b6 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000a306000023f6000011010000 , Saitek Cyborg V . 1 Game Pad , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a4 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> + 03000000a30600001005000000010000 , Saitek Saitek P150 , platform : Linux , a : b0 , b : b1 , y : b4 , x : b3 , leftshoulder : b7 , rightshoulder : b2 , dpup : - a1 , dpleft : - a0 , dpdown : + a1 , dpright : + a0 , lefttrigger : b6 , righttrigger : b5 , <nl> 03000000a30600000cff000010010000 , Saitek P2500 Force Rumble Pad , a : b2 , b : b3 , back : b11 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b4 , leftstick : b8 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : b7 , rightx : a3 , righty : a2 , x : b0 , y : b1 , platform : Linux , <nl> 03000000a30600000c04000011010000 , Saitek P2900 Wireless Pad , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b9 , leftshoulder : b6 , leftstick : b10 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b11 , righttrigger : b5 , rightx : a3 , righty : a2 , start : b12 , x : b0 , y : b3 , platform : Linux , <nl> - 03000000300f00001201000010010000 , Saitek P380 , a : b2 , b : b3 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b11 , righttrigger : b7 , rightx : a1 , righty : a2 , start : b9 , x : b0 , y : b1 , platform : Linux , <nl> + 03000000300f00001201000010010000 , Saitek P380 , a : b2 , b : b3 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b11 , righttrigger : b7 , rightx : a3 , righty : a2 , start : b9 , x : b0 , y : b1 , platform : Linux , <nl> 03000000a30600000901000000010000 , Saitek P880 , a : b2 , b : b3 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b8 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b9 , righttrigger : b7 , rightx : a3 , righty : a2 , x : b0 , y : b1 , platform : Linux , <nl> 03000000a30600000b04000000010000 , Saitek P990 Dual Analog Pad , a : b1 , b : b2 , back : b9 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a3 , righty : a2 , start : b8 , x : b0 , y : b3 , platform : Linux , <nl> 03000000a306000018f5000010010000 , Saitek PLC Saitek P3200 Rumble Pad , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a3 , righty : a4 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 03000000250900000500000000010000 , Sony PS2 pad with SmartJoy adapter , a : b2 , b : b1 , back : b9 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b10 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b11 , righttrigger : b5 , rightx : a2 , righty : a3 , start : b8 , x : b3 , y : b0 , platform : Linux , <nl> 030000005e0400008e02000073050000 , Speedlink TORID Wireless Gamepad , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 030000005e0400008e02000020200000 , SpeedLink XEOX Pro Analog Gamepad pad , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> + 03000000d11800000094000011010000 , Stadia Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a4 , rightx : a2 , righty : a3 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000de2800000112000001000000 , Steam Controller , a : b0 , b : b1 , back : b6 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : a3 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000de2800000211000001000000 , Steam Controller , a : b0 , b : b1 , back : b6 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : a3 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000de2800004211000001000000 , Steam Controller , a : b0 , b : b1 , back : b6 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : a3 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 03000000381000003114000075010000 , SteelSeries Stratus Duo , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 05000000110100001914000009010000 , SteelSeries Stratus XL , a : b0 , b : b1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftstick : b13 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : a4 , rightx : a2 , righty : a3 , start : b11 , x : b3 , y : b4 , platform : Linux , <nl> 03000000ad1b000038f0000090040000 , Street Fighter IV FightStick TE , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> + 030000003b07000004a1000000010000 , Suncom SFX Plus for USB , a : b0 , b : b2 , x : b1 , y : b3 , back : b7 , start : b8 , leftshoulder : b6 , rightshoulder : b9 , leftx : a0 , lefty : a1 , lefttrigger : b4 , righttrigger : b5 , platform : Linux , <nl> 03000000666600000488000000010000 , Super Joy Box 5 Pro , a : b2 , b : b1 , back : b9 , dpdown : b14 , dpleft : b15 , dpright : b13 , dpup : b12 , leftshoulder : b6 , leftstick : b10 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b11 , righttrigger : b5 , rightx : a2 , righty : a3 , start : b8 , x : b3 , y : b0 , platform : Linux , <nl> 0300000000f00000f100000000010000 , Super RetroPort , a : b1 , b : b5 , back : b2 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , start : b3 , x : b0 , y : b4 , platform : Linux , <nl> + 03000000457500002211000010010000 , SZMY - POWER CO . LTD . GAMEPAD , a : b2 , b : b1 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b3 , y : b0 , platform : Linux , <nl> + 030000008f0e00000d31000010010000 , SZMY - POWER CO . LTD . GAMEPAD 3 TURBO , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000004f04000020b3000010010000 , Thrustmaster 2 in 1 DT , a : b0 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b1 , y : b3 , platform : Linux , <nl> 030000004f04000015b3000010010000 , Thrustmaster Dual Analog 4 , a : b0 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b1 , y : b3 , platform : Linux , <nl> 030000004f04000023b3000000010000 , Thrustmaster Dual Trigger 3 - in - 1 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a5 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> + 03000000b50700000399000000010000 , Thrustmaster Firestorm Digital 2 , a : b2 , b : b4 , x : b3 , y : b5 , back : b11 , start : b1 , leftstick : b10 , rightstick : b0 , leftshoulder : b6 , rightshoulder : b8 , leftx : a0 , lefty : a1 , lefttrigger : b7 , righttrigger : b9 , platform : Linux , <nl> 030000004f04000000b3000010010000 , Thrustmaster Firestorm Dual Power , a : b0 , b : b2 , back : b9 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b11 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b12 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b10 , x : b1 , y : b3 , platform : Linux , <nl> 030000004f04000026b3000002040000 , Thrustmaster Gamepad GP XID , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 03000000c6240000025b000002020000 , Thrustmaster GPX Gamepad , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> 030000004f04000008d0000000010000 , Thrustmaster Run N Drive Wireless , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a5 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000004f04000009d0000000010000 , Thrustmaster Run N Drive Wireless PS3 , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> + 030000004f04000007d0000000010000 , Thrustmaster T Mini Wireless , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> 030000004f04000012b3000010010000 , Thrustmaster vibrating gamepad , a : b0 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b4 , leftstick : b10 , lefttrigger : b5 , leftx : a0 , lefty : a1 , rightshoulder : b6 , rightstick : b11 , righttrigger : b7 , rightx : a2 , righty : a3 , start : b9 , x : b1 , y : b3 , platform : Linux , <nl> 03000000bd12000015d0000010010000 , Tomee SNES USB Controller , a : b2 , b : b1 , back : b8 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b4 , rightshoulder : b5 , start : b9 , x : b3 , y : b0 , platform : Linux , <nl> 03000000d814000007cd000011010000 , Toodles 2008 Chimp PC / PS3 , a : b0 , b : b1 , back : b8 , leftshoulder : b4 , lefttrigger : b6 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : b7 , start : b9 , x : b3 , y : b2 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 05000000172700004431000029010000 , XiaoMi Game Controller , a : b0 , b : b1 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b20 , leftshoulder : b6 , leftstick : b13 , lefttrigger : a7 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightstick : b14 , righttrigger : a6 , rightx : a2 , righty : a5 , start : b11 , x : b3 , y : b4 , platform : Linux , <nl> 03000000c0160000e105000001010000 , Xin - Mo Xin - Mo Dual Arcade , a : b4 , b : b3 , back : b6 , dpdown : b12 , dpleft : b13 , dpright : b14 , dpup : b11 , guide : b9 , leftshoulder : b2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , start : b7 , x : b1 , y : b0 , platform : Linux , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b8 , leftshoulder : b4 , leftstick : b9 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b10 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b7 , x : b2 , y : b3 , platform : Linux , <nl> - 05000000c82d00003032000000010000 , 8BitDo Zero 2 , a : b1 , b : b0 , back : b10 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , leftshoulder : b6 , leftx : a0 , lefty : a1 , rightshoulder : b7 , rightx : a2 , righty : a3 , start : b11 , x : b4 , y : b3 , platform : Linux , <nl> 03000000120c0000100e000011010000 , ZEROPLUS P4 Gamepad , a : b1 , b : b2 , back : b8 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b12 , leftshoulder : b4 , leftstick : b10 , lefttrigger : a3 , leftx : a0 , lefty : a1 , rightshoulder : b5 , rightstick : b11 , righttrigger : a4 , rightx : a2 , righty : a5 , start : b9 , x : b0 , y : b3 , platform : Linux , <nl> + 030000005e0400008e02000000010000 , xbox360 Wireless EasySMX , a : b0 , b : b1 , x : b2 , y : b3 , back : b6 , guide : b8 , start : b7 , leftstick : b9 , rightstick : b10 , leftshoulder : b4 , rightshoulder : b5 , dpup : h0 . 1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , leftx : a0 , lefty : a1 , rightx : a3 , righty : a4 , lefttrigger : a2 , righttrigger : a5 , platform : Linux , <nl> + 030000009b2800006000000001010000 , Raphnet Technologies GC / N64 to USB v3 . 6 , a : b0 , b : b7 , dpdown : b11 , dpleft : b12 , dpright : b13 , dpup : b10 , lefttrigger : b4 , leftx : a0 , lefty : a1 , rightshoulder : b2 , righttrigger : b5 , rightx : a3 , righty : a4 , start : b3 , x : b1 , y : b8 , platform : Linux , <nl> <nl> # Android <nl> 05000000bc20000000550000ffff3f00 , GameSir G3w , a : b0 , b : b1 , back : b4 , dpdown : b12 , dpleft : b13 , dpright : b14 , dpup : b11 , leftshoulder : b9 , leftstick : b7 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b10 , rightstick : b8 , righttrigger : a4 , rightx : a2 , righty : a3 , start : b6 , x : b2 , y : b3 , platform : Android , <nl> xinput , XInput Controller , a : b0 , b : b1 , back : b6 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , <nl> 050000005e040000fd020000ffff3f00 , Xbox One Wireless Controller , a : b0 , b : b1 , back : b4 , dpdown : b12 , dpleft : b13 , dpright : b14 , dpup : b11 , leftshoulder : b9 , leftstick : b7 , lefttrigger : a5 , leftx : a0 , lefty : a1 , rightshoulder : b10 , rightstick : b8 , righttrigger : a4 , rightx : a2 , righty : a3 , start : b6 , x : b2 , y : b3 , platform : Android , <nl> 050000005e04000091020000ff073f00 , Xbox Wireless Controller , a : b0 , b : b1 , back : b4 , guide : b5 , leftshoulder : b9 , leftstick : b7 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b10 , rightstick : b8 , righttrigger : a5 , rightx : a3 , righty : a4 , start : b6 , x : b2 , y : b3 , platform : Android , <nl> 34356136633366613530316338376136 , Xbox Wireless Controller , a : b0 , b : b1 , back : b9 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b10 , leftshoulder : b3 , leftstick : b15 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b18 , rightstick : b16 , righttrigger : a5 , rightx : a3 , righty : a4 , x : b17 , y : b2 , platform : Android , <nl> + 7573622067616d657061642020202020 , NEXT SNES Controller , a : b2 , b : b1 , back : b8 , dpdown : + a1 , dpleft : - a0 , dpright : + a0 , dpup : - a1 , leftshoulder : b4 , rightshoulder : b6 , start : b9 , x : b3 , y : b0 , platform : Android , <nl> <nl> # iOS <nl> 05000000ac0500000100000000006d01 , * , a : b0 , b : b1 , dpdown : h0 . 4 , dpleft : h0 . 8 , dpright : h0 . 2 , dpup : h0 . 1 , guide : b6 , leftshoulder : b4 , lefttrigger : a2 , leftx : a0 , lefty : a1 , rightshoulder : b5 , righttrigger : a5 , rightx : a3 , righty : a4 , x : b2 , y : b3 , platform : iOS , <nl> | Merge pull request from akien - mga / gamepad - update - controllerdb | godotengine/godot | d0b89c42bf03fc7a38153477f8199d5212714d85 | 2020-06-05T09:19:11Z |
mmm a / js / apps / system / aardvark / frontend / js / views / foxxInstalledView . js <nl> ppp b / js / apps / system / aardvark / frontend / js / views / foxxInstalledView . js <nl> <nl> events : { <nl> / / " click . install " : " installDialog " , <nl> / / " click . purge " : " removeDialog " , <nl> - " click . icon_arangodb_settings3 " : " infoDialog " <nl> + " click . icon_arangodb_settings2 " : " infoDialog " <nl> } , <nl> <nl> renderVersion : function ( e ) { <nl> | Fixed a minor mistake where it was not possible to install a foxx App | arangodb/arangodb | 5df8cf08969e2211fa78971f1750b8833fa4d016 | 2014-07-16T14:11:43Z |
mmm a / src / core / ext / transport / chttp2 / transport / chttp2_transport . c <nl> ppp b / src / core / ext / transport / chttp2 / transport / chttp2_transport . c <nl> void grpc_chttp2_ping_strike ( grpc_exec_ctx * exec_ctx , <nl> if ( + + t - > ping_recv_state . ping_strikes > t - > ping_policy . max_ping_strikes & & <nl> t - > ping_policy . max_ping_strikes ! = 0 ) { <nl> send_goaway ( exec_ctx , t , <nl> - grpc_error_set_int ( GRPC_ERROR_CREATE ( " too_many_pings " ) , <nl> - GRPC_ERROR_INT_HTTP2_ERROR , <nl> - GRPC_HTTP2_ENHANCE_YOUR_CALM ) ) ; <nl> + grpc_error_set_int ( <nl> + GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " too_many_pings " ) , <nl> + GRPC_ERROR_INT_HTTP2_ERROR , GRPC_HTTP2_ENHANCE_YOUR_CALM ) ) ; <nl> / * The transport will be closed after the write is done * / <nl> - close_transport_locked ( exec_ctx , t , GRPC_ERROR_CREATE ( " Too many pings " ) ) ; <nl> + close_transport_locked ( <nl> + exec_ctx , t , GRPC_ERROR_CREATE_FROM_STATIC_STRING ( " Too many pings " ) ) ; <nl> } <nl> } <nl> <nl> | Update with the new grpc_erro interface | grpc/grpc | 78da25097d1ea4f1ccc55d244e3533eddf894915 | 2017-04-04T07:03:28Z |
mmm a / include / swift / AST / ASTScope . h <nl> ppp b / include / swift / AST / ASTScope . h <nl> class ASTSourceFileScope final : public ASTScopeImpl { <nl> <nl> const SourceFile * getSourceFile ( ) const override ; <nl> NullablePtr < const void > addressForPrinting ( ) const override { return SF ; } <nl> - bool crossCheckWithAST ( ) ; <nl> <nl> protected : <nl> ASTScopeImpl * expandSpecifically ( ScopeCreator & scopeCreator ) override ; <nl> mmm a / lib / AST / ASTScopeCreation . cpp <nl> ppp b / lib / AST / ASTScopeCreation . cpp <nl> class ScopeCreator final { <nl> return - 1 = = signum ; <nl> } <nl> <nl> - public : <nl> - / / / For debugging . Return true if scope tree contains all the decl contexts in <nl> - / / / the AST May modify the scope tree in order to update obsolete scopes . <nl> - / / / Likely slow . <nl> - bool containsAllDeclContextsFromAST ( ) { <nl> - auto allDeclContexts = findLocalizableDeclContextsInAST ( ) ; <nl> - llvm : : DenseMap < const DeclContext * , const ASTScopeImpl * > bogusDCs ; <nl> - sourceFileScope - > preOrderDo ( [ & ] ( ASTScopeImpl * scope ) { <nl> - scope - > expandAndBeCurrentDetectingRecursion ( * this ) ; <nl> - } ) ; <nl> - sourceFileScope - > postOrderDo ( [ & ] ( ASTScopeImpl * scope ) { <nl> - if ( auto * dc = scope - > getDeclContext ( ) . getPtrOrNull ( ) ) { <nl> - auto iter = allDeclContexts . find ( dc ) ; <nl> - if ( iter ! = allDeclContexts . end ( ) ) <nl> - + + iter - > second ; <nl> - else <nl> - bogusDCs . insert ( { dc , scope } ) ; <nl> - } <nl> - } ) ; <nl> - <nl> - auto printDecl = [ & ] ( const Decl * d ) { <nl> - llvm : : errs ( ) < < " \ ngetAsDecl ( ) - > " < < d < < " " ; <nl> - d - > getSourceRange ( ) . print ( llvm : : errs ( ) , ctx . SourceMgr ) ; <nl> - llvm : : errs ( ) < < " : " ; <nl> - d - > dump ( llvm : : errs ( ) ) ; <nl> - llvm : : errs ( ) < < " \ n " ; <nl> - } ; <nl> - bool foundOmission = false ; <nl> - for ( const auto & p : allDeclContexts ) { <nl> - if ( p . second = = 0 ) { <nl> - if ( auto * d = p . first - > getAsDecl ( ) ) { <nl> - if ( isLocalizable ( d ) ) { <nl> - llvm : : errs ( ) < < " \ nASTScope tree omitted DeclContext : " < < p . first <nl> - < < " " <nl> - < < " : \ n " ; <nl> - p . first - > printContext ( llvm : : errs ( ) ) ; <nl> - printDecl ( d ) ; <nl> - foundOmission = true ; <nl> - } <nl> - } else { <nl> - / / If no decl , no source range , so no scope <nl> - } <nl> - } <nl> - } <nl> - for ( const auto & dcAndScope : bogusDCs ) { <nl> - llvm : : errs ( ) < < " ASTScope tree confabulated : " < < dcAndScope . getFirst ( ) <nl> - < < " : \ n " ; <nl> - dcAndScope . getFirst ( ) - > printContext ( llvm : : errs ( ) ) ; <nl> - if ( auto * d = dcAndScope . getFirst ( ) - > getAsDecl ( ) ) <nl> - printDecl ( d ) ; <nl> - dcAndScope . getSecond ( ) - > print ( llvm : : errs ( ) , 0 , false ) ; <nl> - } <nl> - return ! foundOmission & & bogusDCs . empty ( ) ; <nl> - } <nl> - <nl> - private : <nl> - / / / Return a map of every DeclContext in the AST , and zero in the 2nd element . <nl> - / / / For debugging . <nl> - llvm : : DenseMap < const DeclContext * , unsigned > <nl> - findLocalizableDeclContextsInAST ( ) const ; <nl> - <nl> public : <nl> SWIFT_DEBUG_DUMP { print ( llvm : : errs ( ) ) ; } <nl> <nl> IterableTypeBodyPortion : : insertionPointForDeferredExpansion ( <nl> <nl> # pragma mark verification <nl> <nl> - namespace { <nl> - class LocalizableDeclContextCollector : public ASTWalker { <nl> - <nl> - public : <nl> - llvm : : DenseMap < const DeclContext * , unsigned > declContexts ; <nl> - <nl> - void record ( const DeclContext * dc ) { <nl> - if ( dc ) <nl> - declContexts . insert ( { dc , 0 } ) ; <nl> - } <nl> - <nl> - bool walkToDeclPre ( Decl * D ) override { <nl> - / / catchForDebugging ( D , " DictionaryBridging . swift " , 694 ) ; <nl> - if ( const auto * dc = dyn_cast < DeclContext > ( D ) ) <nl> - record ( dc ) ; <nl> - if ( isa < IfConfigDecl > ( D ) ) <nl> - return false ; <nl> - if ( auto * pd = dyn_cast < ParamDecl > ( D ) ) <nl> - record ( pd - > getDefaultArgumentInitContext ( ) ) ; <nl> - else if ( auto * pbd = dyn_cast < PatternBindingDecl > ( D ) ) <nl> - recordInitializers ( pbd ) ; <nl> - else if ( auto * vd = dyn_cast < VarDecl > ( D ) ) { <nl> - vd - > visitParsedAccessors ( [ & ] ( AccessorDecl * ad ) { <nl> - ad - > walk ( * this ) ; <nl> - } ) ; <nl> - } <nl> - return ASTWalker : : walkToDeclPre ( D ) ; <nl> - } <nl> - <nl> - std : : pair < bool , Expr * > walkToExprPre ( Expr * E ) override { <nl> - if ( const auto * ce = dyn_cast < ClosureExpr > ( E ) ) <nl> - record ( ce ) ; <nl> - return ASTWalker : : walkToExprPre ( E ) ; <nl> - } <nl> - <nl> - private : <nl> - void recordInitializers ( PatternBindingDecl * pbd ) { <nl> - for ( auto idx : range ( pbd - > getNumPatternEntries ( ) ) ) <nl> - record ( pbd - > getInitContext ( idx ) ) ; <nl> - } <nl> - <nl> - void catchForDebugging ( Decl * D , const char * file , const unsigned line ) { <nl> - auto & SM = D - > getASTContext ( ) . SourceMgr ; <nl> - auto loc = D - > getStartLoc ( ) ; <nl> - if ( ! loc . isValid ( ) ) <nl> - return ; <nl> - auto bufID = SM . findBufferContainingLoc ( loc ) ; <nl> - auto f = SM . getIdentifierForBuffer ( bufID ) ; <nl> - auto lin = SM . getLineAndColumnInBuffer ( loc ) . first ; <nl> - if ( f . endswith ( file ) & & lin = = line ) <nl> - if ( isa < PatternBindingDecl > ( D ) ) <nl> - llvm : : errs ( ) < < " * * * catchForDebugging : " < < lin < < " * * * \ n " ; <nl> - } <nl> - } ; <nl> - } / / end namespace <nl> - <nl> - llvm : : DenseMap < const DeclContext * , unsigned > <nl> - ScopeCreator : : findLocalizableDeclContextsInAST ( ) const { <nl> - LocalizableDeclContextCollector collector ; <nl> - sourceFileScope - > SF - > walk ( collector ) ; <nl> - / / Walker omits the top <nl> - collector . record ( sourceFileScope - > SF ) ; <nl> - return collector . declContexts ; <nl> - } <nl> - <nl> - bool ASTSourceFileScope : : crossCheckWithAST ( ) { <nl> - return scopeCreator - > containsAllDeclContextsFromAST ( ) ; <nl> - } <nl> - <nl> void ast_scope : : simple_display ( llvm : : raw_ostream & out , <nl> const ScopeCreator * scopeCreator ) { <nl> scopeCreator - > print ( out ) ; <nl> | ASTScope : Remove crossCheckWithAST ( ) | apple/swift | 49e371c563aa3afc2df811ad7af343cb1921a48f | 2020-09-25T06:40:13Z |
mmm a / src / flag - definitions . h <nl> ppp b / src / flag - definitions . h <nl> DEFINE_IMPLICATION ( es_staging , harmony ) <nl> / / Features that are still work in progress ( behind individual flags ) . <nl> # define HARMONY_INPROGRESS ( V ) \ <nl> V ( harmony_modules , " harmony modules " ) \ <nl> - V ( harmony_arrays , " harmony array methods " ) \ <nl> V ( harmony_array_includes , " harmony Array . prototype . includes " ) \ <nl> V ( harmony_regexps , " harmony regular expression extensions " ) \ <nl> V ( harmony_arrow_functions , " harmony arrow functions " ) \ <nl> DEFINE_IMPLICATION ( es_staging , harmony ) <nl> <nl> / / Features that are complete ( but still behind - - harmony / es - staging flag ) . <nl> # define HARMONY_STAGED ( V ) \ <nl> + V ( harmony_arrays , " harmony array methods " ) \ <nl> V ( harmony_rest_parameters , " harmony rest parameters " ) \ <nl> V ( harmony_spreadcalls , " harmony spread - calls " ) \ <nl> V ( harmony_object , " harmony Object methods " ) \ <nl> V ( harmony_spread_arrays , " harmony spread in array literals " ) \ <nl> V ( harmony_tostring , " harmony toString " ) <nl> <nl> - <nl> / / Features that are shipping ( turned on by default , but internal flag remains ) . <nl> # define HARMONY_SHIPPING ( V ) \ <nl> V ( harmony_classes , " harmony classes ( implies object literal extension ) " ) \ <nl> mmm a / test / test262 - es6 / test262 - es6 . status <nl> ppp b / test / test262 - es6 / test262 - es6 . status <nl> <nl> # https : / / code . google . com / p / v8 / issues / detail ? id = 705 <nl> ' language / statements / for - in / 12 . 6 . 4 - 2 ' : [ PASS , FAIL_OK ] , <nl> <nl> - # # # # # # # # # # # # # # # # # # # # # # MISSING ES6 FEATURES # # # # # # # # # # # # # # # # # # # # # # # <nl> - <nl> - # Array . fill ( currently requires - - harmony - arrays ) <nl> - ' built - ins / Array / prototype / fill / S22 . 1 . 3 . 6_T1 ' : [ FAIL ] , <nl> - <nl> # Array . find ( currently requires - - harmony - arrays ) <nl> - ' built - ins / Array / of / S22 . 1 . 2 . 3_T1 ' : [ FAIL ] , <nl> - ' built - ins / Array / of / S22 . 1 . 2 . 3_T2 ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_empty - array - undefined ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_length - property ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_modify - after - start ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_non - returning - predicate ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_predicate - arguments ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_push - after - start ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_remove - after - start ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_return - found - value ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / find / Array . prototype . find_skip - empty ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_this - defined ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_this - is - object ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / find / Array . prototype . find_this - undefined ' : [ FAIL ] , <nl> <nl> - # Array . from <nl> - ' built - ins / Array / from / S22 . 1 . 2 . 1_T1 ' : [ FAIL ] , <nl> - ' built - ins / Array / from / S22 . 1 . 2 . 1_T2 ' : [ FAIL ] , <nl> + # # # # # # # # # # # # # # # # # # # # # # MISSING ES6 FEATURES # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> - # Direct proxies <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_callable - predicate ' : [ FAIL ] , <nl> + # Requires - - harmony - sloppy <nl> + ' built - ins / Array / prototype / concat / Array . prototype . concat_non - array ' : [ FAIL ] , <nl> <nl> # - - harmony - computed - property - names is not yet enabled <nl> ' language / computed - property - names / class / accessor / getter ' : [ FAIL ] , <nl> <nl> ' built - ins / Array / prototype / find / Array . prototype . find_callable - Proxy - 1 ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / find / Array . prototype . find_callable - Proxy - 2 ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / find / Array . prototype . find_callable - arrowfunction ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_callable - forEach ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / find / Array . prototype . find_this - global ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / forEach / 15 . 4 . 4 . 18 - 3 - 12 ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / forEach / 15 . 4 . 4 . 18 - 3 - 25 ' : [ FAIL ] , <nl> ' built - ins / Array / prototype / forEach / 15 . 4 . 4 . 18 - 3 - 7 ' : [ FAIL ] , <nl> <nl> ' language / generators / generator . expression . implicit - name ' : [ FAIL ] , <nl> <nl> # Test 262 update 2015 - 03 - 31 <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like - length - to - string - throws ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like - length - value - of - throws ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like - negative - length ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like - primitive - non - number - length ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like - string - length ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_array - like - to - length - throws ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_holey - sloppy - arguments ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_large - typed - array ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_length - throws ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_non - array ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_sloppy - arguments ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_sloppy - arguments - throws ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_sloppy - arguments - with - dupes ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_small - typed - array ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - boolean - wrapper ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - function ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - getter - throws ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - number - wrapper ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - reg - exp ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - sparse - object ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_spreadable - string - wrapper ' : [ FAIL ] , <nl> - ' built - ins / Array / prototype / concat / Array . prototype . concat_strict - arguments ' : [ FAIL ] , <nl> ' built - ins / Symbol / species / Symbol . species . builtin - getter - name ' : [ FAIL ] , <nl> ' built - ins / Symbol / species / Symbol . species . exists ' : [ FAIL ] , <nl> ' built - ins / Symbol / species / Symbol . species . in_Array ' : [ FAIL ] , <nl> | Stage ES6 Array and TypedArray methods | v8/v8 | 131062fc41e2d95c9ae7b0913b7efb8e32785e2c | 2015-06-04T20:08:37Z |
mmm a / arangod / VocBase / index . c <nl> ppp b / arangod / VocBase / index . c <nl> static TRI_vector_string_t * ParseWordsFulltextIndex ( const char * const text , <nl> / / UTF - 8 <nl> if ( wordStart = = NULL ) { <nl> wordStart = ptr ; <nl> - containsUtf8 = true ; <nl> } <nl> + containsUtf8 = true ; <nl> } <nl> else { <nl> if ( wordStart ! = NULL ) { <nl> mmm a / js / server / tests / fulltext . js <nl> ppp b / js / server / tests / fulltext . js <nl> function fulltextQuerySuite ( ) { <nl> collection . save ( { text : texts [ i ] } ) ; <nl> } <nl> <nl> - / / TODO : the index does not pick this word up <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " AUTOTUERENDELLENentfernungsfirmenmitarbeiterverguetungsbewerter " ) . documents . length ) ; <nl> - / / TODO : the index does not pick this word up <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " Donaudampfschifffahrtskapitaensmuetzentraegervereinsvorstandsvorsitzenderehegattin " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " AUTOTUERENDELLENentfernungsfirmenmitarbeiterverguetungsbewerter " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " Donaudampfschifffahrtskapitaensmuetzentraegervereinsvorstandsvorsitzenderehegattin " ) . documents . length ) ; <nl> + <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " reliefpfeiler " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " feilen " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " reihenweise " ) . documents . length ) ; <nl> function fulltextQuerySuite ( ) { <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " ruBBisH , TEXT , some " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " rubbish , text " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " some , text " ) . documents . length ) ; <nl> - assertEqual ( 1 , collection . FULLTEXT ( idx , " more , rubbish , test , data , the , index , should , be , able , to , handle . all , this " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " more , rubbish , test , data , the , index , should , be , able , to , handle , all , this " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " even , more , rubbish , nevertheless , this , should , be , handled , well , too " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " even , too " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " even , rubbish , should , be , handled " ) . documents . length ) ; <nl> function fulltextQuerySuite ( ) { <nl> assertEqual ( 100 , collection . FULLTEXT ( idx , " dog , lazy , the , over , jumped , fox , brown , quick , the " ) . documents . length ) ; <nl> assertEqual ( 100 , collection . FULLTEXT ( idx , " fox , over , dog " ) . documents . length ) ; <nl> <nl> - / / TODO : index does not handle this and returns 100 matches <nl> - / / assertEqual ( 0 , collection . FULLTEXT ( idx , " the , frog " ) . documents . length ) ; <nl> + assertEqual ( 0 , collection . FULLTEXT ( idx , " the , frog " ) . documents . length ) ; <nl> assertEqual ( 0 , collection . FULLTEXT ( idx , " no , cats , allowed " ) . documents . length ) ; <nl> assertEqual ( 0 , collection . FULLTEXT ( idx , " banana " ) . documents . length ) ; <nl> } , <nl> function fulltextQuerySuite ( ) { <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " somerandomstringaaaaaaaaaaaaaaaaaaaaaa " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " somerandomstringaaaaaaaaaaaaaaaaaaaaaaa " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " somerandomstringaaaaaaaaaaaaaaaaaaaaaaaa " ) . documents . length ) ; <nl> - / / TODO : index does not handle this <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " somerandomstringaaaaaaaaaaaaaaaaaaaaaaaaa " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " somerandomstringaaaaaaaaaaaaaaaaaaaaaaaaaaa " ) . documents . length ) ; <nl> <nl> assertEqual ( 0 , collection . FULLTEXT ( idx , " foo " ) . documents . length ) ; <nl> assertEqual ( 0 , collection . FULLTEXT ( idx , " somerandomstring " ) . documents . length ) ; <nl> function fulltextQuerySuite ( ) { <nl> } <nl> <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , peter " ) . documents . length ) ; <nl> - / / TODO : index does not handle this <nl> - / / assertEqual ( 3 , collection . FULLTEXT ( idx , " der , müller " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " börger " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " BÖRGER " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " bÖRGER " ) . documents . length ) ; <nl> - / / assertEqual ( 2 , collection . FULLTEXT ( idx , " der , müller , ging , in , den , wald " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " der , müller , ging , in , den , wald , und , aß , den " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " der , müller , ging , in , den , wald , und , aß , den , hühnerbörekbärenmensch " ) . documents . length ) ; <nl> - / / assertEqual ( 2 , collection . FULLTEXT ( idx , " der , müller , aß , den , hühnerbörekbärenmensch " ) . documents . length ) ; <nl> + assertEqual ( 3 , collection . FULLTEXT ( idx , " der , müller " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " börger " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " BÖRGER " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " bÖRGER " ) . documents . length ) ; <nl> + assertEqual ( 2 , collection . FULLTEXT ( idx , " der , müller , ging , in , den , wald " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " der , müller , ging , in , den , wald , und , aß , den " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " der , müller , ging , in , den , wald , und , aß , den , hühnerbörekbärenmensch " ) . documents . length ) ; <nl> + assertEqual ( 2 , collection . FULLTEXT ( idx , " der , müller , aß , den , hühnerbörekbärenmensch " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , HANS , mag , den , PILZ " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , PILZ , hans , den , MAG " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , PILZ , hans , den , MAG " ) . documents . length ) ; <nl> function fulltextQuerySuite ( ) { <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , peter , mag , den , bÖRGER " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , peter , mag , bÖRGER " ) . documents . length ) ; <nl> assertEqual ( 1 , collection . FULLTEXT ( idx , " der , peter , bÖRGER " ) . documents . length ) ; <nl> - / / TODO : index does not handle this <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " der , bÖRGER " ) . documents . length ) ; <nl> - / / assertEqual ( 1 , collection . FULLTEXT ( idx , " bÖRGER " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " der , bÖRGER " ) . documents . length ) ; <nl> + assertEqual ( 1 , collection . FULLTEXT ( idx , " bÖRGER " ) . documents . length ) ; <nl> } , <nl> <nl> testUnicode : function ( ) { <nl> | fixed unicode lowercasing | arangodb/arangodb | 5742ab6fba01716bb8bc7fd955c5873aa8ea1763 | 2012-12-04T10:12:38Z |
mmm a / lib / FrontendTool / ReferenceDependencies . cpp <nl> ppp b / lib / FrontendTool / ReferenceDependencies . cpp <nl> <nl> # include " swift / AST / NameLookup . h " <nl> # include " swift / AST / ReferencedNameTracker . h " <nl> # include " swift / AST / Types . h " <nl> + # include " swift / Basic / FileSystem . h " <nl> # include " swift / Basic / LLVM . h " <nl> # include " swift / Basic / ReferenceDependencyKeys . h " <nl> # include " swift / Frontend / FrontendOptions . h " <nl> class ReferenceDependenciesEmitter { <nl> llvm : : raw_ostream & out ) ; <nl> <nl> private : <nl> - / / / Opens file for reference dependencies . Emits diagnostic if needed . <nl> - / / / <nl> - / / / \ return nullptr on error <nl> - static std : : unique_ptr < llvm : : raw_fd_ostream > openFile ( DiagnosticEngine & diags , <nl> - StringRef OutputPath ) ; <nl> / / / Emits all the dependency information . <nl> void emit ( ) const ; <nl> <nl> static std : : string escape ( DeclBaseName name ) { <nl> return llvm : : yaml : : escape ( name . userFacingName ( ) ) ; <nl> } <nl> <nl> - std : : unique_ptr < llvm : : raw_fd_ostream > <nl> - ReferenceDependenciesEmitter : : openFile ( DiagnosticEngine & diags , <nl> - StringRef outputPath ) { <nl> + bool ReferenceDependenciesEmitter : : emit ( DiagnosticEngine & diags , <nl> + SourceFile * const SF , <nl> + const DependencyTracker & depTracker , <nl> + StringRef outputPath ) { <nl> / / Before writing to the dependencies file path , preserve any previous file <nl> / / that may have been there . No error handling - - this is just a nicety , it <nl> / / doesn ' t matter if it fails . <nl> llvm : : sys : : fs : : rename ( outputPath , outputPath + " ~ " ) ; <nl> - <nl> - std : : error_code EC ; <nl> - auto out = llvm : : make_unique < llvm : : raw_fd_ostream > ( outputPath , EC , <nl> - llvm : : sys : : fs : : F_None ) ; <nl> - <nl> - if ( out - > has_error ( ) | | EC ) { <nl> + std : : error_code EC = <nl> + swift : : atomicallyWritingToFile ( outputPath , <nl> + [ & ] ( llvm : : raw_pwrite_stream & out ) { <nl> + ReferenceDependenciesEmitter : : emit ( SF , depTracker , out ) ; <nl> + } ) ; <nl> + if ( EC ) { <nl> diags . diagnose ( SourceLoc ( ) , diag : : error_opening_output , outputPath , <nl> EC . message ( ) ) ; <nl> - out - > clear_error ( ) ; <nl> - return nullptr ; <nl> - } <nl> - return out ; <nl> - } <nl> - <nl> - bool ReferenceDependenciesEmitter : : emit ( DiagnosticEngine & diags , <nl> - SourceFile * const SF , <nl> - const DependencyTracker & depTracker , <nl> - StringRef outputPath ) { <nl> - const std : : unique_ptr < llvm : : raw_ostream > out = openFile ( diags , outputPath ) ; <nl> - if ( ! out . get ( ) ) <nl> return true ; <nl> - ReferenceDependenciesEmitter : : emit ( SF , depTracker , * out ) ; <nl> + } <nl> return false ; <nl> } <nl> <nl> | Merge pull request from owenv / write - swiftdeps - atomically | apple/swift | 3acdff682596895894eaae0d7967b1a98a605bff | 2018-10-09T23:50:24Z |
mmm a / tests / test_main . cpp <nl> ppp b / tests / test_main . cpp <nl> <nl> # include " test_list . h " <nl> # include " test_math . h " <nl> # include " test_method_bind . h " <nl> + # include " test_node_path . h " <nl> # include " test_oa_hash_map . h " <nl> # include " test_ordered_hash_map . h " <nl> # include " test_physics_2d . h " <nl> new file mode 100644 <nl> index 00000000000 . . fdfff8d4c74 <nl> mmm / dev / null <nl> ppp b / tests / test_node_path . h <nl> <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * test_node_path . h * / <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * This file is part of : * / <nl> + / * GODOT ENGINE * / <nl> + / * https : / / godotengine . org * / <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + / * Copyright ( c ) 2007 - 2020 Juan Linietsky , Ariel Manzur . * / <nl> + / * Copyright ( c ) 2014 - 2020 Godot Engine contributors ( cf . AUTHORS . md ) . * / <nl> + / * * / <nl> + / * Permission is hereby granted , free of charge , to any person obtaining * / <nl> + / * a copy of this software and associated documentation files ( the * / <nl> + / * " Software " ) , to deal in the Software without restriction , including * / <nl> + / * without limitation the rights to use , copy , modify , merge , publish , * / <nl> + / * distribute , sublicense , and / or sell copies of the Software , and to * / <nl> + / * permit persons to whom the Software is furnished to do so , subject to * / <nl> + / * the following conditions : * / <nl> + / * * / <nl> + / * The above copyright notice and this permission notice shall be * / <nl> + / * included in all copies or substantial portions of the Software . * / <nl> + / * * / <nl> + / * THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , * / <nl> + / * EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * / <nl> + / * MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . * / <nl> + / * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * / <nl> + / * CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , * / <nl> + / * TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE * / <nl> + / * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE . * / <nl> + / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl> + <nl> + # ifndef TEST_NODE_PATH_H <nl> + # define TEST_NODE_PATH_H <nl> + <nl> + # include " core / string / node_path . h " <nl> + <nl> + # include " thirdparty / doctest / doctest . h " <nl> + <nl> + namespace TestNodePath { <nl> + <nl> + TEST_CASE ( " [ NodePath ] Relative path " ) { <nl> + const NodePath node_path_relative = NodePath ( " Path2D / PathFollow2D / Sprite2D : position : x " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_as_property_path ( ) = = NodePath ( " : Path2D / PathFollow2D / Sprite2D : position : x " ) , <nl> + " The returned property path should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_concatenated_subnames ( ) = = " position : x " , <nl> + " The returned concatenated subnames should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_name ( 0 ) = = " Path2D " , <nl> + " The returned name at index 0 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_name ( 1 ) = = " PathFollow2D " , <nl> + " The returned name at index 1 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_name ( 2 ) = = " Sprite2D " , <nl> + " The returned name at index 2 should match the expected value . " ) ; <nl> + ERR_PRINT_OFF ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_name ( 3 ) = = " " , <nl> + " The returned name at invalid index 3 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_name ( - 1 ) = = " " , <nl> + " The returned name at invalid index - 1 should match the expected value . " ) ; <nl> + ERR_PRINT_ON ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_name_count ( ) = = 3 , <nl> + " The returned number of names should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_subname ( 0 ) = = " position " , <nl> + " The returned subname at index 0 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_subname ( 1 ) = = " x " , <nl> + " The returned subname at index 1 should match the expected value . " ) ; <nl> + ERR_PRINT_OFF ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_subname ( 2 ) = = " " , <nl> + " The returned subname at invalid index 2 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_subname ( - 1 ) = = " " , <nl> + " The returned subname at invalid index - 1 should match the expected value . " ) ; <nl> + ERR_PRINT_ON ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_relative . get_subname_count ( ) = = 2 , <nl> + " The returned number of subnames should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + ! node_path_relative . is_absolute ( ) , <nl> + " The node path should be considered relative . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + ! node_path_relative . is_empty ( ) , <nl> + " The node path shouldn ' t be considered empty . " ) ; <nl> + } <nl> + <nl> + TEST_CASE ( " [ NodePath ] Absolute path " ) { <nl> + const NodePath node_path_aboslute = NodePath ( " / root / Sprite2D " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_as_property_path ( ) = = NodePath ( " : root / Sprite2D " ) , <nl> + " The returned property path should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_concatenated_subnames ( ) = = " " , <nl> + " The returned concatenated subnames should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_name ( 0 ) = = " root " , <nl> + " The returned name at index 0 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_name ( 1 ) = = " Sprite2D " , <nl> + " The returned name at index 1 should match the expected value . " ) ; <nl> + ERR_PRINT_OFF ; <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_name ( 2 ) = = " " , <nl> + " The returned name at invalid index 2 should match the expected value . " ) ; <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_name ( - 1 ) = = " " , <nl> + " The returned name at invalid index - 1 should match the expected value . " ) ; <nl> + ERR_PRINT_ON ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_name_count ( ) = = 2 , <nl> + " The returned number of names should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . get_subname_count ( ) = = 0 , <nl> + " The returned number of subnames should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_aboslute . is_absolute ( ) , <nl> + " The node path should be considered absolute . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + ! node_path_aboslute . is_empty ( ) , <nl> + " The node path shouldn ' t be considered empty . " ) ; <nl> + } <nl> + <nl> + TEST_CASE ( " [ NodePath ] Empty path " ) { <nl> + const NodePath node_path_empty = NodePath ( ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_empty . get_as_property_path ( ) = = NodePath ( ) , <nl> + " The returned property path should match the expected value . " ) ; <nl> + ERR_PRINT_OFF ; <nl> + CHECK_MESSAGE ( <nl> + node_path_empty . get_concatenated_subnames ( ) = = " " , <nl> + " The returned concatenated subnames should match the expected value . " ) ; <nl> + ERR_PRINT_ON ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_empty . get_name_count ( ) = = 0 , <nl> + " The returned number of names should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_empty . get_subname_count ( ) = = 0 , <nl> + " The returned number of subnames should match the expected value . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + ! node_path_empty . is_absolute ( ) , <nl> + " The node path shouldn ' t be considered absolute . " ) ; <nl> + <nl> + CHECK_MESSAGE ( <nl> + node_path_empty . is_empty ( ) , <nl> + " The node path should be considered empty . " ) ; <nl> + } <nl> + <nl> + } / / namespace TestNodePath <nl> + <nl> + # endif / / TEST_NODE_PATH_H <nl> | Add a test suite for NodePath | godotengine/godot | bf82da3d0f64e9f4beeb7f85fbe5b3a055bae9d3 | 2020-11-15T21:28:25Z |
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( BUILD_opencv_apps ) <nl> endif ( ) <nl> <nl> # examples <nl> - if ( BUILD_EXAMPLES OR BUILD_ANDROID_EXAMPLES OR INSTALL_PYTHON_EXAMPLES OR INSTALL_C_EXAMPLES ) <nl> + if ( BUILD_EXAMPLES OR BUILD_ANDROID_EXAMPLES OR INSTALL_ANDROID_EXAMPLES OR INSTALL_PYTHON_EXAMPLES OR INSTALL_C_EXAMPLES ) <nl> add_subdirectory ( samples ) <nl> endif ( ) <nl> <nl> ocv_build_features_string ( apps_status <nl> IF BUILD_EXAMPLES THEN " examples " <nl> IF BUILD_opencv_apps THEN " apps " <nl> IF BUILD_ANDROID_SERVICE THEN " android_service " <nl> - IF BUILD_ANDROID_EXAMPLES AND CAN_BUILD_ANDROID_PROJECTS THEN " android_examples " <nl> + IF ( BUILD_ANDROID_EXAMPLES OR INSTALL_ANDROID_EXAMPLES ) AND CAN_BUILD_ANDROID_PROJECTS THEN " android_examples " <nl> ELSE " - " ) <nl> status ( " Applications : " " $ { apps_status } " ) <nl> ocv_build_features_string ( docs_status <nl> mmm a / cmake / android / android_gradle_projects . cmake <nl> ppp b / cmake / android / android_gradle_projects . cmake <nl> macro ( add_android_project target path ) <nl> include ' : $ { __dir } ' <nl> " ) <nl> <nl> - # build apk <nl> - set ( APK_FILE " $ { ANDROID_BUILD_BASE_DIR } / $ { __dir } / build / outputs / apk / release / $ { __dir } - $ { ANDROID_ABI } - release - unsigned . apk " ) <nl> - ocv_update ( OPENCV_GRADLE_VERBOSE_OPTIONS " - i " ) <nl> - add_custom_command ( <nl> - OUTPUT " $ { APK_FILE } " " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " <nl> - COMMAND . / gradlew $ { OPENCV_GRADLE_VERBOSE_OPTIONS } " $ { __dir } : assemble " <nl> - COMMAND $ { CMAKE_COMMAND } - E touch " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " <nl> - WORKING_DIRECTORY " $ { ANDROID_BUILD_BASE_DIR } " <nl> - DEPENDS $ { depends } opencv_java_android <nl> - COMMENT " Building OpenCV Android sample project : $ { __dir } " <nl> - ) <nl> + if ( BUILD_ANDROID_EXAMPLES ) <nl> + # build apk <nl> + set ( APK_FILE " $ { ANDROID_BUILD_BASE_DIR } / $ { __dir } / build / outputs / apk / release / $ { __dir } - $ { ANDROID_ABI } - release - unsigned . apk " ) <nl> + ocv_update ( OPENCV_GRADLE_VERBOSE_OPTIONS " - i " ) <nl> + add_custom_command ( <nl> + OUTPUT " $ { APK_FILE } " " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " <nl> + COMMAND . / gradlew $ { OPENCV_GRADLE_VERBOSE_OPTIONS } " $ { __dir } : assemble " <nl> + COMMAND $ { CMAKE_COMMAND } - E touch " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " <nl> + WORKING_DIRECTORY " $ { ANDROID_BUILD_BASE_DIR } " <nl> + DEPENDS $ { depends } opencv_java_android <nl> + COMMENT " Building OpenCV Android sample project : $ { __dir } " <nl> + ) <nl> + else ( ) # install only <nl> + # copy samples <nl> + add_custom_command ( <nl> + OUTPUT " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " <nl> + COMMAND $ { CMAKE_COMMAND } - E touch " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " <nl> + WORKING_DIRECTORY " $ { ANDROID_BUILD_BASE_DIR } " <nl> + DEPENDS $ { depends } opencv_java_android <nl> + COMMENT " Copying OpenCV Android sample project : $ { __dir } " <nl> + ) <nl> + endif ( ) <nl> + <nl> file ( REMOVE " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " ) # force rebuild after CMake run <nl> <nl> add_custom_target ( android_sample_ $ { __dir } ALL DEPENDS " $ { OPENCV_DEPHELPER } / android_sample_ $ { __dir } " SOURCES " $ { ANDROID_SAMPLE_MANIFEST_PATH } " ) <nl> mmm a / platforms / android / build_sdk . py <nl> ppp b / platforms / android / build_sdk . py <nl> def __init__ ( self , workdir , opencvdir , config ) : <nl> self . ninja_path = self . get_ninja ( ) <nl> self . debug = True if config . debug else False <nl> self . debug_info = True if config . debug_info else False <nl> + self . no_samples_build = True if config . no_samples_build else False <nl> <nl> def get_cmake ( self ) : <nl> if not self . config . use_android_buildtools and check_executable ( [ ' cmake ' , ' - - version ' ] ) : <nl> def build_library ( self , abi , do_install ) : <nl> BUILD_TESTS = " OFF " , <nl> BUILD_PERF_TESTS = " OFF " , <nl> BUILD_DOCS = " OFF " , <nl> - BUILD_ANDROID_EXAMPLES = " ON " , <nl> + BUILD_ANDROID_EXAMPLES = ( " OFF " if self . no_samples_build else " ON " ) , <nl> INSTALL_ANDROID_EXAMPLES = " ON " , <nl> ) <nl> if self . ninja_path ! = ' ninja ' : <nl> def build_library ( self , abi , do_install ) : <nl> execute ( cmd ) <nl> # full parallelism for C + + compilation tasks <nl> execute ( [ self . ninja_path , " opencv_modules " ] ) <nl> - # limit parallelism for Gradle steps ( avoid huge memory consumption ) <nl> - execute ( [ self . ninja_path , ' - j3 ' , " install " if ( self . debug_info or self . debug ) else " install / strip " ] ) <nl> + # limit parallelism for building samples ( avoid huge memory consumption ) <nl> + if self . no_samples_build : <nl> + execute ( [ self . ninja_path , " install " if ( self . debug_info or self . debug ) else " install / strip " ] ) <nl> + else : <nl> + execute ( [ self . ninja_path , " - j1 " if ( self . debug_info or self . debug ) else " - j3 " , " install " if ( self . debug_info or self . debug ) else " install / strip " ] ) <nl> <nl> def build_javadoc ( self ) : <nl> classpaths = [ ] <nl> def gather_results ( self ) : <nl> parser . add_argument ( ' - - force_opencv_toolchain ' , action = " store_true " , help = " Do not use toolchain from Android NDK " ) <nl> parser . add_argument ( ' - - debug ' , action = " store_true " , help = " Build ' Debug ' binaries ( CMAKE_BUILD_TYPE = Debug ) " ) <nl> parser . add_argument ( ' - - debug_info ' , action = " store_true " , help = " Build with debug information ( useful for Release mode : BUILD_WITH_DEBUG_INFO = ON ) " ) <nl> + parser . add_argument ( ' - - no_samples_build ' , action = " store_true " , help = " Do not build samples ( speeds up build ) " ) <nl> args = parser . parse_args ( ) <nl> <nl> log . basicConfig ( format = ' % ( message ) s ' , level = log . DEBUG ) <nl> mmm a / platforms / android / gradle - wrapper / gradle . properties <nl> ppp b / platforms / android / gradle - wrapper / gradle . properties <nl> <nl> <nl> # Specifies the JVM arguments used for the daemon process . <nl> # The setting is particularly useful for tweaking memory settings . <nl> - org . gradle . jvmargs = - Xmx1536m <nl> + org . gradle . jvmargs = - Xmx2g <nl> <nl> # When configured , Gradle will run in incubating parallel mode . <nl> # This option should only be used with decoupled projects . More details , visit <nl> mmm a / samples / CMakeLists . txt <nl> ppp b / samples / CMakeLists . txt <nl> endif ( ) <nl> if ( UNIX AND NOT ANDROID AND ( HAVE_VA OR HAVE_VA_INTEL ) ) <nl> add_subdirectory ( va_intel ) <nl> endif ( ) <nl> - if ( ANDROID AND BUILD_ANDROID_EXAMPLES ) <nl> + if ( ANDROID AND ( BUILD_ANDROID_EXAMPLES OR INSTALL_ANDROID_EXAMPLES ) ) <nl> add_subdirectory ( android ) <nl> endif ( ) <nl> if ( INSTALL_PYTHON_EXAMPLES ) <nl> | Merge pull request from komakai : no_samples_build - option | opencv/opencv | b69bf8a8972fe464f5bf2a15088b1018ea9c0843 | 2019-07-22T17:38:37Z |
mmm a / src / citra_qt / debugger / graphics / graphics_vertex_shader . cpp <nl> ppp b / src / citra_qt / debugger / graphics / graphics_vertex_shader . cpp <nl> QVariant GraphicsVertexShaderModel : : data ( const QModelIndex & index , int role ) con <nl> print_input ( output , src1 , swizzle . negate_src1 , <nl> SelectorToString ( swizzle . src1_selector ) ) ; <nl> AlignToColumn ( kInputOperandColumnWidth ) ; <nl> - if ( src_is_inverted ) { <nl> - print_input ( output , src2 , swizzle . negate_src2 , <nl> - SelectorToString ( swizzle . src2_selector ) ) ; <nl> - } else { <nl> - print_input ( output , src2 , swizzle . negate_src2 , <nl> - SelectorToString ( swizzle . src2_selector ) , true , <nl> - instr . mad . AddressRegisterName ( ) ) ; <nl> - } <nl> + print_input ( output , src2 , swizzle . negate_src2 , <nl> + SelectorToString ( swizzle . src2_selector ) , true , <nl> + src_is_inverted ? " " : instr . mad . AddressRegisterName ( ) ) ; <nl> AlignToColumn ( kInputOperandColumnWidth ) ; <nl> - if ( src_is_inverted ) { <nl> - print_input ( output , src3 , swizzle . negate_src3 , <nl> - SelectorToString ( swizzle . src3_selector ) , true , <nl> - instr . mad . AddressRegisterName ( ) ) ; <nl> - } else { <nl> - print_input ( output , src3 , swizzle . negate_src3 , <nl> - SelectorToString ( swizzle . src3_selector ) ) ; <nl> - } <nl> + print_input ( output , src3 , swizzle . negate_src3 , <nl> + SelectorToString ( swizzle . src3_selector ) , true , <nl> + src_is_inverted ? instr . mad . AddressRegisterName ( ) : " " ) ; <nl> AlignToColumn ( kInputOperandColumnWidth ) ; <nl> break ; <nl> } <nl> QVariant GraphicsVertexShaderModel : : data ( const QModelIndex & index , int role ) con <nl> SourceRegister src1 = instr . common . GetSrc1 ( src_is_inverted ) ; <nl> print_input ( output , src1 , swizzle . negate_src1 , <nl> swizzle . SelectorToString ( false ) , true , <nl> - instr . common . AddressRegisterName ( ) ) ; <nl> + src_is_inverted ? " " : instr . common . AddressRegisterName ( ) ) ; <nl> AlignToColumn ( kInputOperandColumnWidth ) ; <nl> } <nl> <nl> - / / TODO : In some cases , the Address Register is used as an index for SRC2 <nl> - / / instead of SRC1 <nl> if ( opcode_info . subtype & OpCode : : Info : : Src2 ) { <nl> SourceRegister src2 = instr . common . GetSrc2 ( src_is_inverted ) ; <nl> print_input ( output , src2 , swizzle . negate_src2 , <nl> - swizzle . SelectorToString ( true ) ) ; <nl> + swizzle . SelectorToString ( true ) , true , <nl> + src_is_inverted ? instr . common . AddressRegisterName ( ) : " " ) ; <nl> AlignToColumn ( kInputOperandColumnWidth ) ; <nl> } <nl> break ; <nl> | debugger / shader : fix address register for reverted arithmetic op | yuzu-emu/yuzu | 8375fd2aba3d700bf8b5269820f3b7cb93c56c8c | 2017-07-20T20:12:08Z |
mmm a / servers / physics / body_pair_sw . cpp <nl> ppp b / servers / physics / body_pair_sw . cpp <nl> void BodyPairSW : : validate_contacts ( ) { <nl> } <nl> } <nl> <nl> + <nl> + bool BodyPairSW : : _test_ccd ( float p_step , BodySW * p_A , int p_shape_A , const Transform & p_xform_A , BodySW * p_B , int p_shape_B , const Transform & p_xform_B ) { <nl> + <nl> + <nl> + <nl> + Vector3 motion = p_A - > get_linear_velocity ( ) * p_step ; <nl> + real_t mlen = motion . length ( ) ; <nl> + if ( mlen < CMP_EPSILON ) <nl> + return false ; <nl> + <nl> + Vector3 mnormal = motion / mlen ; <nl> + <nl> + real_t min , max ; <nl> + p_A - > get_shape ( p_shape_A ) - > project_range ( mnormal , p_xform_A , min , max ) ; <nl> + bool fast_object = mlen > ( max - min ) * 0 . 3 ; / / going too fast in that direction <nl> + <nl> + if ( ! fast_object ) { / / did it move enough in this direction to even attempt raycast ? let ' s say it should move more than 1 / 3 the size of the object in that axis <nl> + return false ; <nl> + } <nl> + <nl> + / / cast a segment from support in motion normal , in the same direction of motion by motion length <nl> + / / support is the worst case collision point , so real collision happened before <nl> + int a ; <nl> + Vector3 s = p_A - > get_shape ( p_shape_A ) - > get_support ( p_xform_A . basis . xform ( mnormal ) . normalized ( ) ) ; <nl> + Vector3 from = p_xform_A . xform ( s ) ; <nl> + Vector3 to = from + motion ; <nl> + <nl> + Transform from_inv = p_xform_B . affine_inverse ( ) ; <nl> + <nl> + Vector3 local_from = from_inv . xform ( from - mnormal * mlen * 0 . 1 ) ; / / start from a little inside the bounding box <nl> + Vector3 local_to = from_inv . xform ( to ) ; <nl> + <nl> + Vector3 rpos , rnorm ; <nl> + if ( ! p_B - > get_shape ( p_shape_B ) - > intersect_segment ( local_from , local_to , rpos , rnorm ) ) { <nl> + return false ; <nl> + } <nl> + <nl> + / / shorten the linear velocity so it does not hit , but gets close enough , next frame will hit softly or soft enough <nl> + Vector3 hitpos = p_xform_B . xform ( rpos ) ; <nl> + <nl> + float newlen = hitpos . distance_to ( from ) - ( max - min ) * 0 . 01 ; <nl> + p_A - > set_linear_velocity ( ( mnormal * newlen ) / p_step ) ; <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + <nl> bool BodyPairSW : : setup ( float p_step ) { <nl> <nl> / / cannot collide <nl> bool BodyPairSW : : setup ( float p_step ) { <nl> bool collided = CollisionSolverSW : : solve_static ( shape_A_ptr , xform_A , shape_B_ptr , xform_B , _contact_added_callback , this , & sep_axis ) ; <nl> this - > collided = collided ; <nl> <nl> - if ( ! collided ) <nl> + <nl> + if ( ! collided ) { <nl> + <nl> + / / test ccd ( currently just a raycast ) <nl> + <nl> + if ( A - > is_continuous_collision_detection_enabled ( ) & & A - > get_mode ( ) > PhysicsServer : : BODY_MODE_KINEMATIC & & B - > get_mode ( ) < = PhysicsServer : : BODY_MODE_KINEMATIC ) { <nl> + _test_ccd ( p_step , A , shape_A , xform_A , B , shape_B , xform_B ) ; <nl> + } <nl> + <nl> + if ( B - > is_continuous_collision_detection_enabled ( ) & & B - > get_mode ( ) > PhysicsServer : : BODY_MODE_KINEMATIC & & A - > get_mode ( ) < = PhysicsServer : : BODY_MODE_KINEMATIC ) { <nl> + _test_ccd ( p_step , B , shape_B , xform_B , A , shape_A , xform_A ) ; <nl> + } <nl> + <nl> return false ; <nl> + } <nl> <nl> <nl> <nl> mmm a / servers / physics / body_pair_sw . h <nl> ppp b / servers / physics / body_pair_sw . h <nl> class BodyPairSW : public ConstraintSW { <nl> void contact_added_callback ( const Vector3 & p_point_A , const Vector3 & p_point_B ) ; <nl> <nl> void validate_contacts ( ) ; <nl> + bool _test_ccd ( float p_step , BodySW * p_A , int p_shape_A , const Transform & p_xform_A , BodySW * p_B , int p_shape_B , const Transform & p_xform_B ) ; <nl> <nl> SpaceSW * space ; <nl> <nl> mmm a / servers / physics_2d / body_pair_2d_sw . cpp <nl> ppp b / servers / physics_2d / body_pair_2d_sw . cpp <nl> bool BodyPair2DSW : : _test_ccd ( float p_step , Body2DSW * p_A , int p_shape_A , const Mat <nl> p_A - > get_shape ( p_shape_A ) - > project_rangev ( mnormal , p_xform_A , min , max ) ; <nl> bool fast_object = mlen > ( max - min ) * 0 . 3 ; / / going too fast in that direction <nl> <nl> - if ( fast_object ) { / / did it move enough in this direction to even attempt raycast ? let ' s say it should move more than 1 / 3 the size of the object in that axis <nl> + if ( ! fast_object ) { / / did it move enough in this direction to even attempt raycast ? let ' s say it should move more than 1 / 3 the size of the object in that axis <nl> return false ; <nl> } <nl> <nl> | - CCD in 3D physics was not working ( code was not even there ! ) re - added , fixes 1067 | godotengine/godot | f75ae815d51571287892d77414d45e15bfdb849b | 2015-01-06T02:00:35Z |
mmm a / test / cpp / naming / cancel_ares_query_test . cc <nl> ppp b / test / cpp / naming / cancel_ares_query_test . cc <nl> void TestCancelDuringActiveQuery ( <nl> gpr_free ( client_target ) ; <nl> grpc_completion_queue * cq = grpc_completion_queue_create_for_next ( nullptr ) ; <nl> cq_verifier * cqv = cq_verifier_create ( cq ) ; <nl> - gpr_timespec deadline = grpc_timeout_milliseconds_to_deadline ( 10 ) ; <nl> + gpr_timespec deadline = grpc_timeout_milliseconds_to_deadline ( 100 ) ; <nl> grpc_call * call = grpc_channel_create_call ( <nl> client , nullptr , GRPC_PROPAGATE_DEFAULTS , cq , <nl> grpc_slice_from_static_string ( " / foo " ) , nullptr , deadline , nullptr ) ; <nl> | Merge pull request from apolcyn / raise_deadline | grpc/grpc | e0aa50d541a91217adfe82736ee7f311facfb98b | 2019-11-12T21:53:24Z |
mmm a / admin / static / coffee / navbar . html <nl> ppp b / admin / static / coffee / navbar . html <nl> <nl> < li id = " nav - dashboard " > < a href = " # dashboard " > Dashboard < / a > < / li > <nl> < li id = " nav - namespaces " > < a href = " # namespaces " > Namespaces < / a > < / li > <nl> < li id = " nav - servers " > < a href = " # servers " > Servers < / a > < / li > <nl> + < li id = " nav - dataexplorer " > < a href = " # dataexplorer " > Data explorer < / a > < / li > <nl> < li id = " nav - logs " > < a href = " # logs " > Logs < / a > < / li > <nl> < / ul > <nl> <nl> mmm a / admin / static / coffee / router . coffee <nl> ppp b / admin / static / coffee / router . coffee <nl> class BackboneCluster extends Backbone . Router <nl> ' dashboard ' : ' dashboard ' <nl> ' resolve_issues ' : ' resolve_issues ' <nl> ' logs ' : ' logs ' <nl> + ' dataexplorer ' : ' dataexplorer ' <nl> <nl> initialize : - > <nl> log_initial ' ( initializing ) router ' <nl> class BackboneCluster extends Backbone . Router <nl> @ current_view = new LogView . Container <nl> @ $ container . html @ current_view . render ( ) . el <nl> <nl> + dataexplorer : - > <nl> + log_router ' / dataexplorer ' <nl> + clear_modals ( ) <nl> + @ current_view . destroy ( ) <nl> + @ current_view = new DataExplorerView . Container <nl> + @ $ container . html @ current_view . render ( ) . el <nl> + <nl> + <nl> namespace : ( id ) - > <nl> log_router ' / namespaces / ' + id <nl> clear_modals ( ) <nl> mmm a / src / Makefile <nl> ppp b / src / Makefile <nl> COFFEE_SOURCES : = $ ( patsubst % , $ ( WEB_SOURCE_DIR ) / static / coffee / % , \ <nl> namespaces / index . coffee namespaces / replicas . coffee namespaces / shards . coffee namespaces / machines . coffee namespaces / namespace . coffee \ <nl> servers / index . coffee servers / machine . coffee servers / datacenter . coffee \ <nl> dashboard . coffee \ <nl> + dataexplorer . coffee \ <nl> sidebar . coffee \ <nl> resolve_issues . coffee \ <nl> log_view . coffee \ <nl> | Add the data explorer page on the nav bar | rethinkdb/rethinkdb | 070b6e2464adf73860c4922771b0647718d52a35 | 2012-06-22T23:59:05Z |
mmm a / src / python / grpcio / grpc / _links / invocation . py <nl> ppp b / src / python / grpcio / grpc / _links / invocation . py <nl> def _on_finish_event ( self , operation_id , event , rpc_state ) : <nl> termination = links . Ticket . Termination . CANCELLATION <nl> elif event . status . code is _intermediary_low . Code . DEADLINE_EXCEEDED : <nl> termination = links . Ticket . Termination . EXPIRATION <nl> + elif event . status . code is _intermediary_low . Code . UNKNOWN : <nl> + termination = links . Ticket . Termination . LOCAL_FAILURE <nl> else : <nl> termination = links . Ticket . Termination . TRANSMISSION_FAILURE <nl> ticket = links . Ticket ( <nl> def invocation_link ( channel , host , request_serializers , response_deserializers ) : <nl> " " " Creates an InvocationLink . <nl> <nl> Args : <nl> - channel : A channel for use by the link . <nl> + channel : An _intermediary_low . Channel for use by the link . <nl> host : The host to specify when invoking RPCs . <nl> request_serializers : A dict from group - method pair to request object <nl> serialization behavior . <nl> mmm a / src / python / grpcio / grpc / _links / service . py <nl> ppp b / src / python / grpcio / grpc / _links / service . py <nl> <nl> from grpc . framework . foundation import relay <nl> from grpc . framework . interfaces . links import links <nl> <nl> + _TERMINATION_KIND_TO_CODE = { <nl> + links . Ticket . Termination . COMPLETION : _intermediary_low . Code . OK , <nl> + links . Ticket . Termination . CANCELLATION : _intermediary_low . Code . CANCELLED , <nl> + links . Ticket . Termination . EXPIRATION : <nl> + _intermediary_low . Code . DEADLINE_EXCEEDED , <nl> + links . Ticket . Termination . SHUTDOWN : _intermediary_low . Code . UNAVAILABLE , <nl> + links . Ticket . Termination . RECEPTION_FAILURE : _intermediary_low . Code . INTERNAL , <nl> + links . Ticket . Termination . TRANSMISSION_FAILURE : <nl> + _intermediary_low . Code . INTERNAL , <nl> + links . Ticket . Termination . LOCAL_FAILURE : _intermediary_low . Code . UNKNOWN , <nl> + links . Ticket . Termination . REMOTE_FAILURE : _intermediary_low . Code . UNKNOWN , <nl> + } <nl> + <nl> <nl> @ enum . unique <nl> class _Read ( enum . Enum ) : <nl> def _metadatafy ( call , metadata ) : <nl> call . add_metadata ( metadata_key , metadata_value ) <nl> <nl> <nl> + def _status ( termination_kind , code , details ) : <nl> + effective_details = b ' ' if details is None else details <nl> + if code is None : <nl> + effective_code = _TERMINATION_KIND_TO_CODE [ termination_kind ] <nl> + else : <nl> + effective_code = code <nl> + return _intermediary_low . Status ( effective_code , effective_details ) <nl> + <nl> + <nl> class _Kernel ( object ) : <nl> <nl> def __init__ ( self , request_deserializers , response_serializers , ticket_relay ) : <nl> def _on_write_event ( self , event ) : <nl> if rpc_state . high_write is _HighWrite . CLOSED : <nl> if rpc_state . terminal_metadata is not None : <nl> _metadatafy ( call , rpc_state . terminal_metadata ) <nl> - call . status ( <nl> - _intermediary_low . Status ( rpc_state . code , rpc_state . message ) , call ) <nl> + status = _status ( <nl> + links . Ticket . Termination . COMPLETION , rpc_state . code , <nl> + rpc_state . message ) <nl> + call . status ( status , call ) <nl> rpc_state . low_write = _LowWrite . CLOSED <nl> else : <nl> ticket = links . Ticket ( <nl> def add_ticket ( self , ticket ) : <nl> if rpc_state . low_write is _LowWrite . OPEN : <nl> if rpc_state . terminal_metadata is not None : <nl> _metadatafy ( call , rpc_state . terminal_metadata ) <nl> - status = _intermediary_low . Status ( <nl> - _intermediary_low . Code . OK <nl> - if rpc_state . code is None else rpc_state . code , <nl> - ' ' if rpc_state . message is None else rpc_state . message ) <nl> + status = _status ( <nl> + links . Ticket . Termination . COMPLETION , rpc_state . code , <nl> + rpc_state . message ) <nl> call . status ( status , call ) <nl> rpc_state . low_write = _LowWrite . CLOSED <nl> elif ticket . termination is not None : <nl> - call . cancel ( ) <nl> + if rpc_state . terminal_metadata is not None : <nl> + _metadatafy ( call , rpc_state . terminal_metadata ) <nl> + status = _status ( <nl> + ticket . termination , rpc_state . code , rpc_state . message ) <nl> + call . status ( status , call ) <nl> self . _rpc_states . pop ( call , None ) <nl> <nl> def add_port ( self , port , server_credentials ) : <nl> | Status code conformance in grpc . _links | grpc/grpc | 7b92bae20e19c6e94f9652050c50027a0fb5fe0c | 2015-08-25T00:56:25Z |
mmm a / . gitignore <nl> ppp b / . gitignore <nl> website / package - lock . json <nl> <nl> # clangd cache <nl> / . clangd <nl> + / . cache <nl> <nl> / compile_commands . json <nl> <nl> mmm a / src / Functions / abtesting . h <nl> ppp b / src / Functions / abtesting . h <nl> <nl> <nl> # if ! defined ( ARCADIA_BUILD ) & & USE_STATS <nl> <nl> + # include < common / types . h > <nl> + # include < Common / PODArray . h > <nl> <nl> - # include < iostream > <nl> - # include < vector > <nl> - # include < algorithm > <nl> - <nl> - # include < common / types . h > <nl> - # include < Common / PODArray . h > <nl> + # include < algorithm > <nl> + # include < iostream > <nl> + # include < vector > <nl> <nl> <nl> namespace DB <nl> { <nl> <nl> - typedef struct _Variant <nl> + struct Variant <nl> { <nl> Float64 x ; <nl> Float64 y ; <nl> Float64 beats_control ; <nl> Float64 best ; <nl> - } Variant ; <nl> + } ; <nl> <nl> using Variants = PODArray < Variant > ; <nl> <nl> mmm a / src / Functions / tests / gtest_abtesting . cpp <nl> ppp b / src / Functions / tests / gtest_abtesting . cpp <nl> <nl> - # include < gtest / gtest . h > <nl> - <nl> # include < Functions / abtesting . h > <nl> - # include < iostream > <nl> - # include < stdio . h > <nl> + <nl> + # if ! defined ( ARCADIA_BUILD ) & & USE_STATS <nl> + <nl> + # include < gtest / gtest . h > <nl> <nl> using namespace DB ; <nl> <nl> TEST ( BayesAB , gamma ) <nl> ASSERT_EQ ( 0 , max ) ; <nl> } <nl> <nl> + # endif <nl> | Merge pull request from abyss7 / fix - build | ClickHouse/ClickHouse | 41f1fd8fa0a6b4aadcb0fd27e5e03dff57f919a8 | 2020-10-15T17:48:53Z |
mmm a / docker / build / installers / install_poco . sh <nl> ppp b / docker / build / installers / install_poco . sh <nl> ldconfig <nl> <nl> # clean up <nl> rm - rf poco - $ { VERSION } - release . tar . gz poco - poco - $ { VERSION } - release <nl> + apt_get_remove \ <nl> + libssl - dev <nl> | Docker : remove build - deps for poco | ApolloAuto/apollo | 829d95b2cf61bbf66d1026794811ded44ae993e3 | 2020-11-16T05:25:34Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.