diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / scripting / v8_db . cpp <nl> ppp b / scripting / v8_db . cpp <nl> namespace mongo { <nl> DDD ( " collectionFallback [ " < < name < < " ] " ) ; <nl> <nl> v8 : : Handle < v8 : : Value > real = info . This ( ) - > GetPrototype ( ) - > ToObject ( ) - > Get ( name ) ; <nl> - if ( ! real - > IsUndefined ( ) ) <nl> + if ( ! real - > IsUndefined ( ) ) <nl> return real ; <nl> <nl> + if ( info . This ( ) - > HasRealNamedProperty ( name ) ) { <nl> + return info . This ( ) - > GetRealNamedProperty ( name ) ; <nl> + } <nl> + <nl> string sname = toSTLString ( name ) ; <nl> if ( sname [ 0 ] = = ' _ ' ) { <nl> if ( ! ( info . This ( ) - > HasRealNamedProperty ( name ) ) ) <nl> namespace mongo { <nl> v8 : : Handle < v8 : : Value > argv [ 1 ] ; <nl> argv [ 0 ] = name ; <nl> <nl> - return f - > Call ( info . This ( ) , 1 , argv ) ; <nl> + v8 : : Local < v8 : : Value > coll = f - > Call ( info . This ( ) , 1 , argv ) ; <nl> + / / cache collection for reuse <nl> + info . This ( ) - > Set ( name , coll ) ; <nl> + return coll ; <nl> } <nl> <nl> v8 : : Handle < v8 : : Value > dbQueryIndexAccess ( unsigned int index , const v8 : : AccessorInfo & info ) { <nl>
SERVER - 3986 : v8 needs to cache DBCollection objects in db object
mongodb/mongo
98bb7a2142975f658f3bccc2878287b5828da43c
2011-09-30T19:46:38Z
mmm a / test / Prototypes / Integers . swift . gyb <nl> ppp b / test / Prototypes / Integers . swift . gyb <nl> public protocol LLVMFixedWidthInteger : LLVMInteger { <nl> } <nl> <nl> extension LLVMFixedWidthInteger { <nl> + @ transparent <nl> public var countWords : LLVMWord { <nl> return Self . bitWidth . add ( <nl> LLVMWord ( $ { word_bits } ) . sub ( 1 ) ) . udiv ( $ { word_bits } ) <nl> } <nl> + <nl> + @ transparent <nl> public func nthWord ( n : LLVMWord ) - > LLVMWord { <nl> return self . lshr ( Self ( truncating : n ) . mul ( $ { word_bits } ) ) . lowWord <nl> } <nl> public struct LLVMInt $ { bits } : LLVMFixedWidthInteger { <nl> public typealias Narrowed = LLVMInt $ { narrowedBits } <nl> public typealias Storage = Builtin . Int $ { bits } <nl> <nl> + @ transparent <nl> public init ( ) { <nl> let zero : Builtin . Int $ { bits } = Builtin . zeroInitializer ( ) <nl> self . storage = zero <nl> } <nl> <nl> + @ transparent <nl> public init ( _builtinIntegerLiteral x : Builtin . $ { IntLiteral } ) { <nl> storage = Builtin . truncOrBitCast_ $ { IntLiteral } _Int $ { bits } ( x ) <nl> } <nl> <nl> / / / Create an instance initialized to ` value ` . <nl> + @ transparent <nl> public init ( integerLiteral value : Self_ ) { <nl> self = value <nl> } <nl> <nl> + @ transparent <nl> public init ( _ storage : Storage ) { <nl> self . storage = storage <nl> } <nl> <nl> public var storage : Storage <nl> <nl> + @ transparent <nl> public static var bitWidth : LLVMWord { return $ { bits } } <nl> <nl> % for operation in builtinBinaryOperations : <nl> + @ transparent <nl> public mutating func $ { operation } _InPlace ( rhs : Self_ ) { <nl> storage = Builtin . $ { operation } _Int $ { bits } ( storage , rhs . storage ) <nl> } <nl> % end <nl> <nl> % for operation in builtinBinaryOperationsWithOverflow : <nl> + @ transparent <nl> public mutating func $ { operation } _InPlaceReturningOverflow ( <nl> rhs : Self_ , trapOverflow : Bool <nl> ) - > Bool { <nl> public struct LLVMInt $ { bits } : LLVMFixedWidthInteger { <nl> % end <nl> <nl> / / / The most significant bit that is set , or - 1 if self = = 0 <nl> + @ transparent <nl> public var mostSignificant1Bit : LLVMWord { <nl> let leadingZeros = Self_ ( _leadingZeros ( storage ) ) . lowWord <nl> return Self_ . bitWidth . sub ( leadingZeros ) . sub ( 1 ) <nl> } <nl> <nl> / / / The most significant bit that is unset , or - 1 if self = = - 1 <nl> + @ transparent <nl> public var mostSignificant0Bit : LLVMWord { <nl> return self . xor ( - 1 ) . mostSignificant1Bit <nl> } <nl> <nl> % for sz in ' sign ' , ' zero ' : <nl> + @ transparent <nl> public var $ { sz } Extended : Extended { <nl> return Extended ( <nl> Builtin . $ { sz [ 0 ] } extOrBitCast_Int $ { bits } _Int $ { extendedBits } ( storage ) <nl> public struct LLVMInt $ { bits } : LLVMFixedWidthInteger { <nl> % end <nl> <nl> % for ( srcSigned , srcS ) in ( ( x , x [ 0 ] . lower ( ) ) for x in [ ' Signed ' , ' Unsigned ' ] ) : <nl> + @ transparent <nl> public func isLessThan $ { srcSigned } ( rhs : Self_ ) - > Bool { <nl> return Bool ( <nl> Builtin . cmp_ $ { srcS } lt_Int $ { bits } ( <nl> public struct LLVMInt $ { bits } : LLVMFixedWidthInteger { <nl> } <nl> <nl> % for ( dstSigned , dstS ) in ( ( x , x [ 0 ] . lower ( ) ) for x in [ ' Signed ' , ' Unsigned ' ] ) : <nl> + @ transparent <nl> public func checkingNarrow $ { srcSigned } To $ { dstSigned } ( ) - > Narrowed { <nl> let ( result , error ) <nl> = Builtin . $ { srcS } _to_ $ { dstS } _checked_trunc_Int $ { bits } _Int $ { narrowedBits } ( <nl> public struct LLVMInt $ { bits } : LLVMFixedWidthInteger { <nl> % end <nl> % end <nl> <nl> + @ transparent <nl> public var assumingNonNegative : Self_ { <nl> return Self_ ( Builtin . assumeNonNegative_Int $ { bits } ( storage ) ) <nl> } <nl> <nl> + @ transparent <nl> public func checkingNonNegative ( ) - > Self_ { <nl> let ( result , error ) = Builtin . s_to_u_checked_conversion_Int $ { bits } ( storage ) <nl> Builtin . condfail ( error ) <nl> return Self_ ( result ) <nl> } <nl> <nl> + @ transparent <nl> public var truncated : Narrowed { <nl> return Narrowed ( <nl> Builtin . truncOrBitCast_Int $ { bits } _Int $ { narrowedBits } ( storage ) <nl> ) <nl> } <nl> <nl> + @ transparent <nl> public init ( truncating other : LLVMWord ) { <nl> self . init ( <nl> Builtin . $ { fromWord } OrBitCast_ $ { IntWord } _Int $ { bits } ( other . storage ) ) <nl> public func = = ( lhs : LLVMInt $ { bits } , rhs : LLVMInt $ { bits } ) - > Bool { <nl> <nl> <nl> extension LLVMInt $ { bits } { <nl> + @ transparent <nl> public var lowWord : LLVMWord { <nl> return LLVMWord ( <nl> Builtin . $ { toWord } OrBitCast_Int $ { bits } _ $ { IntWord } ( self . storage ) ) <nl> public struct LLVMBigInt : LLVMInteger { <nl> public typealias Self_ = LLVMBigInt <nl> <nl> / / / Creates an instance with value 0 <nl> + @ inline ( __always ) <nl> public init ( ) { <nl> words = [ ] <nl> } <nl> <nl> + @ inline ( __always ) <nl> public init ( _builtinIntegerLiteral x : Builtin . $ { IntLiteral } ) { <nl> let isNegative = Bool ( <nl> Builtin . cmp_slt_ $ { IntLiteral } ( <nl> public struct LLVMBigInt : LLVMInteger { <nl> } <nl> <nl> / / / Create an instance initialized to ` value ` . <nl> + @ inline ( __always ) <nl> public init ( integerLiteral value : Self_ ) { <nl> self = value <nl> } <nl> public struct LLVMBigInt : LLVMInteger { <nl> % end <nl> <nl> / / / The most significant bit that is set , or - 1 if self = = 0 <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var mostSignificant1Bit : LLVMWord { <nl> fatalError ( " implement me " ) <nl> } <nl> <nl> / / / The most significant bit that is not set , or - 1 if self = = - 1 <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var mostSignificant0Bit : LLVMWord { <nl> fatalError ( " implement me " ) <nl> } <nl> <nl> + @ inline ( __always ) <nl> public func isLessThanSigned ( rhs : Self_ ) - > Bool { <nl> fatalError ( " implement me " ) <nl> } <nl> + @ inline ( __always ) <nl> public func isLessThanUnsigned ( rhs : Self_ ) - > Bool { <nl> fatalError ( " implement me " ) <nl> } <nl> public struct LLVMBigInt : LLVMInteger { <nl> / / / is not set . <nl> / / / <nl> / / / - Requires : the high bit is not set . <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var assumingNonNegative : Self_ { <nl> fatalError ( " implement me " ) <nl> } <nl> <nl> / / / Returns ` self ` , trapping if the high bit is set <nl> + @ inline ( __always ) <nl> public func checkingNonNegative ( ) - > Self_ { <nl> fatalError ( " implement me " ) <nl> } <nl> <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var signExtended : Extended { return self } <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var zeroExtended : Extended { return self } <nl> <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var truncated : Truncated { return self } <nl> <nl> internal var words : ContiguousArray < LLVMWord > <nl> <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var lowWord : LLVMWord { <nl> fatalError ( " implement me " ) <nl> } <nl> <nl> + @ transparent / / FIXME : should be @ inline ( __always ) <nl> public var countWords : LLVMWord { <nl> return LLVMWord ( words . count . value ) <nl> } <nl> <nl> + @ inline ( __always ) <nl> public func nthWord ( n : LLVMWord ) - > LLVMWord { <nl> return LLVMWord ( words [ Int ( n . storage ) ] ) <nl> } <nl> <nl> + @ inline ( __always ) <nl> public init ( truncating other : LLVMWord ) { <nl> words = [ other ] <nl> } <nl>
[ stdlib ] Integers Prototype : @ transparent / @ inline ( always )
apple/swift
9f65201e466941f9f9fab8f76007dd4071a2e5f3
2015-09-12T03:51:28Z
mmm a / arangod / HttpServer / HttpServer . cpp <nl> ppp b / arangod / HttpServer / HttpServer . cpp <nl> using namespace arangodb : : rest ; <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> int HttpServer : : sendChunk ( uint64_t taskId , std : : string const & data ) { <nl> - std : : unique_ptr < TaskData > taskData ( new TaskData ( ) ) ; <nl> + auto taskData = std : : make_unique < TaskData > ( ) ; <nl> <nl> taskData - > _taskId = taskId ; <nl> taskData - > _loop = Scheduler : : SCHEDULER - > lookupLoopById ( taskId ) ; <nl> mmm a / lib / Basics / AssocMulti . h <nl> ppp b / lib / Basics / AssocMulti . h <nl> class AssocMulti { <nl> <nl> std : : vector < Element * > * lookupByKey ( UserData * userData , Key const * key , <nl> size_t limit = 0 ) const { <nl> - std : : unique_ptr < std : : vector < Element * > > result ( new std : : vector < Element * > ( ) ) ; <nl> + auto result = std : : make_unique < std : : vector < Element * > > ( ) ; <nl> <nl> / / compute the hash <nl> uint64_t hashByKey = _hashKey ( userData , key ) ; <nl> class AssocMulti { <nl> std : : vector < Element * > * lookupWithElementByKey ( UserData * userData , <nl> Element const * element , <nl> size_t limit = 0 ) const { <nl> - std : : unique_ptr < std : : vector < Element * > > result ( new std : : vector < Element * > ( ) ) ; <nl> + auto result = std : : make_unique < std : : vector < Element * > > ( ) ; <nl> <nl> / / compute the hash <nl> uint64_t hashByKey = _hashElement ( userData , element , true ) ; <nl> class AssocMulti { <nl> <nl> std : : vector < Element * > * lookupWithElementByKeyContinue ( <nl> UserData * userData , Element const * element , size_t limit = 0 ) const { <nl> - std : : unique_ptr < std : : vector < Element * > > result ( new std : : vector < Element * > ( ) ) ; <nl> + auto result = std : : make_unique < std : : vector < Element * > > ( ) ; <nl> <nl> uint64_t hashByKey = _hashElement ( userData , element , true ) ; <nl> Bucket const & b = _buckets [ hashByKey & _bucketsMask ] ; <nl>
use forwarding
arangodb/arangodb
6d228eabaa652668fc7e07f62b7dedbf68233e55
2016-02-26T21:49:06Z
mmm a / lib / AST / Decl . cpp <nl> ppp b / lib / AST / Decl . cpp <nl> namespace { <nl> void printInherited ( ArrayRef < TypeLoc > Inherited ) { <nl> if ( Inherited . empty ( ) ) <nl> return ; <nl> - OS < < " inherits : " ; <nl> + OS < < " inherits : " ; <nl> bool First = true ; <nl> for ( auto Super : Inherited ) { <nl> if ( First ) <nl> namespace { <nl> OS < < ' \ n ' ; <nl> printRec ( D ) ; <nl> } <nl> - OS < < " ' ) " ; <nl> + OS < < " ) " ; <nl> } <nl> <nl> void visitClassDecl ( ClassDecl * CD ) { <nl> namespace { <nl> OS < < ' \ n ' ; <nl> printRec ( D ) ; <nl> } <nl> - OS < < " ' ) " ; <nl> + OS < < " ) " ; <nl> } <nl> <nl> void visitPatternBindingDecl ( PatternBindingDecl * PBD ) { <nl> namespace { <nl> <nl> void visitConstructorDecl ( ConstructorDecl * CD ) { <nl> printCommon ( CD , " constructor_decl " , FuncColor ) ; <nl> - OS < < ' \ n ' ; <nl> - if ( CD - > getBody ( ) ) <nl> + if ( CD - > getBody ( ) ) { <nl> + OS < < ' \ n ' ; <nl> printRec ( CD - > getBody ( ) ) ; <nl> + } <nl> OS < < ' ) ' ; <nl> } <nl> <nl>
A few misc tweaks to - ast - dump for decls .
apple/swift
b01b27c9c084758a1d5476c494a5af7d0169963f
2012-08-07T22:52:11Z
mmm a / tensorflow / python / ops / image_ops_impl . py <nl> ppp b / tensorflow / python / ops / image_ops_impl . py <nl> <nl> from tensorflow . python . util . tf_export import tf_export <nl> <nl> ops . NotDifferentiable ( ' RandomCrop ' ) <nl> - # TODO ( b / 31222613 ) : This op maybe differentiable , and there maybe <nl> + # TODO ( b / 31222613 ) : This op may be differentiable , and there may be <nl> # latent bugs here . <nl> ops . NotDifferentiable ( ' HSVToRGB ' ) <nl> ops . NotDifferentiable ( ' DrawBoundingBoxes ' ) <nl> ops . NotDifferentiable ( ' SampleDistortedBoundingBox ' ) <nl> ops . NotDifferentiable ( ' SampleDistortedBoundingBoxV2 ' ) <nl> # TODO ( bsteiner ) : Implement the gradient function for extract_glimpse <nl> - # TODO ( b / 31222613 ) : This op maybe differentiable , and there maybe <nl> + # TODO ( b / 31222613 ) : This op may be differentiable , and there may be <nl> # latent bugs here . <nl> ops . NotDifferentiable ( ' ExtractGlimpse ' ) <nl> ops . NotDifferentiable ( ' NonMaxSuppression ' ) <nl> def resize_images_v2 ( images , <nl> For synthetic images ( especially those lacking proper prefiltering ) , less <nl> ringing than Keys cubic kernel but less sharp . <nl> <nl> - Note that near image edges the filtering kernel maybe partially outside the <nl> + Note that near image edges the filtering kernel may be partially outside the <nl> image boundaries . For these pixels , only input pixels inside the image will be <nl> included in the filter sum , and the output value will be appropriately <nl> normalized . <nl> def is_jpeg ( contents , name = None ) : <nl> name : A name for the operation ( optional ) <nl> <nl> Returns : <nl> - A scalar boolean tensor indicating if ' contents ' maybe a JPEG image . <nl> + A scalar boolean tensor indicating if ' contents ' may be a JPEG image . <nl> is_jpeg is susceptible to false positives . <nl> " " " <nl> # Normal JPEGs start with \ xff \ xd8 \ xff \ xe0 <nl> def _is_png ( contents , name = None ) : <nl> name : A name for the operation ( optional ) <nl> <nl> Returns : <nl> - A scalar boolean tensor indicating if ' contents ' maybe a PNG image . <nl> + A scalar boolean tensor indicating if ' contents ' may be a PNG image . <nl> is_png is susceptible to false positives . <nl> " " " <nl> with ops . name_scope ( name , ' is_png ' ) : <nl> def sample_distorted_bounding_box_v2 ( image_size , <nl> localization of an object , i . e . bounding box , given an ` image_size ` , <nl> ` bounding_boxes ` and a series of constraints . <nl> <nl> - The output of this Op is a single bounding box that maybe used to crop the <nl> + The output of this Op is a single bounding box that may be used to crop the <nl> original image . The output is returned as 3 tensors : ` begin ` , ` size ` and <nl> ` bboxes ` . The first 2 tensors can be fed directly into ` tf . slice ` to crop the <nl> - image . The latter maybe supplied to ` tf . image . draw_bounding_boxes ` to <nl> + image . The latter may be supplied to ` tf . image . draw_bounding_boxes ` to <nl> visualize what the bounding box looks like . <nl> <nl> Bounding boxes are supplied and returned as ` [ y_min , x_min , y_max , x_max ] ` . <nl> def sample_distorted_bounding_box ( image_size , <nl> localization of an object , i . e . bounding box , given an ` image_size ` , <nl> ` bounding_boxes ` and a series of constraints . <nl> <nl> - The output of this Op is a single bounding box that maybe used to crop the <nl> + The output of this Op is a single bounding box that may be used to crop the <nl> original image . The output is returned as 3 tensors : ` begin ` , ` size ` and <nl> ` bboxes ` . The first 2 tensors can be fed directly into ` tf . slice ` to crop the <nl> - image . The latter maybe supplied to ` tf . image . draw_bounding_boxes ` to <nl> + image . The latter may be supplied to ` tf . image . draw_bounding_boxes ` to <nl> visualize what the bounding box looks like . <nl> <nl> Bounding boxes are supplied and returned as ` [ y_min , x_min , y_max , x_max ] ` . <nl>
Reverted " maybe " to " may be " as requested
tensorflow/tensorflow
444c091ad98b867369cf1fd9a4e5e99757975ba4
2019-11-14T08:47:42Z
mmm a / src / mongo / base / error_codes . yml <nl> ppp b / src / mongo / base / error_codes . yml <nl> error_codes : <nl> - { code : 290 , name : TransactionExceededLifetimeLimitSeconds , categories : [ ExceededTimeLimitError ] } <nl> - { code : 291 , name : NoQueryExecutionPlans } <nl> - { code : 292 , name : QueryExceededMemoryLimitNoDiskUseAllowed } <nl> + - { code : 293 , name : InvalidSeedList } <nl> + - { code : 294 , name : InvalidTopologyType } <nl> + - { code : 295 , name : InvalidHeartBeatFrequency } <nl> + - { code : 296 , name : TopologySetNameRequired } <nl> <nl> # Error codes 4000 - 8999 are reserved . <nl> <nl> mmm a / src / mongo / client / SConscript <nl> ppp b / src / mongo / client / SConscript <nl> Import ( ' env ' ) <nl> <nl> env = env . Clone ( ) <nl> <nl> + env . SConscript ( <nl> + dirs = [ ' sdam ' ] , <nl> + exports = [ ' env ' ] <nl> + ) <nl> + <nl> # Contains only the core ConnectionString functionality , * not * the ability to call connect ( ) and <nl> # return a DBClientBase * back . For that you need to link against the ' clientdriver_network ' library . <nl> env . Library ( <nl> new file mode 100644 <nl> index 000000000000 . . 19c8760fc3f6 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / SConscript <nl> <nl> + # - * - mode : python - * - <nl> + <nl> + Import ( ' env ' ) <nl> + <nl> + env = env . Clone ( ) <nl> + <nl> + env . Library ( <nl> + target = ' sdam ' , <nl> + source = [ <nl> + ' sdam_datatypes . cpp ' , <nl> + ' server_description . cpp ' , <nl> + ' topology_description . cpp ' , <nl> + ' topology_state_machine . cpp ' , <nl> + ' topology_manager . cpp ' <nl> + ] , <nl> + LIBDEPS = [ <nl> + ' $ BUILD_DIR / mongo / base ' , <nl> + ' $ BUILD_DIR / mongo / db / repl / optime ' , <nl> + ' $ BUILD_DIR / mongo / util / clock_sources ' , <nl> + ' $ BUILD_DIR / mongo / db / wire_version ' <nl> + ] , <nl> + ) <nl> + <nl> + env . Library ( <nl> + target = ' sdam_test ' , <nl> + source = [ <nl> + ' server_description_builder . cpp ' , <nl> + ] , <nl> + LIBDEPS = [ <nl> + ' $ BUILD_DIR / mongo / base ' , <nl> + ] , <nl> + ) <nl> + <nl> + env . CppUnitTest ( <nl> + target = ' topology_description_test ' , <nl> + source = [ ' topology_description_test . cpp ' ] , <nl> + LIBDEPS = [ ' sdam ' , ' sdam_test ' ] , <nl> + ) <nl> + <nl> + env . CppUnitTest ( <nl> + target = ' server_description_test ' , <nl> + source = [ ' server_description_test . cpp ' ] , <nl> + LIBDEPS = [ ' sdam ' , ' sdam_test ' ] , <nl> + ) <nl> + <nl> + env . CppUnitTest ( <nl> + target = ' topology_state_machine_test ' , <nl> + source = [ ' topology_state_machine_test . cpp ' ] , <nl> + LIBDEPS = [ ' sdam ' , ' sdam_test ' ] , <nl> + ) <nl> new file mode 100644 <nl> index 000000000000 . . d9cf96571714 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / sdam_datatypes . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / client / sdam / sdam_datatypes . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + std : : string toString ( const ServerType serverType ) { <nl> + switch ( serverType ) { <nl> + case ServerType : : kStandalone : <nl> + return " Standalone " ; <nl> + case ServerType : : kMongos : <nl> + return " Mongos " ; <nl> + case ServerType : : kRSPrimary : <nl> + return " RSPrimary " ; <nl> + case ServerType : : kRSSecondary : <nl> + return " RSSecondary " ; <nl> + case ServerType : : kRSArbiter : <nl> + return " RSArbiter " ; <nl> + case ServerType : : kRSOther : <nl> + return " RSOther " ; <nl> + case ServerType : : kRSGhost : <nl> + return " RSGhost " ; <nl> + case ServerType : : kUnknown : <nl> + return " Unknown " ; <nl> + default : <nl> + MONGO_UNREACHABLE ; <nl> + } <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & os , const ServerType serverType ) { <nl> + os < < toString ( serverType ) ; <nl> + return os ; <nl> + } <nl> + <nl> + const std : : vector < ServerType > allServerTypes ( ) { <nl> + static auto const result = std : : vector < ServerType > { ServerType : : kStandalone , <nl> + ServerType : : kMongos , <nl> + ServerType : : kRSPrimary , <nl> + ServerType : : kRSSecondary , <nl> + ServerType : : kRSArbiter , <nl> + ServerType : : kRSOther , <nl> + ServerType : : kRSGhost , <nl> + ServerType : : kUnknown } ; <nl> + return result ; <nl> + } <nl> + <nl> + <nl> + std : : string toString ( const TopologyType topologyType ) { <nl> + switch ( topologyType ) { <nl> + case TopologyType : : kReplicaSetNoPrimary : <nl> + return " ReplicaSetNoPrimary " ; <nl> + case TopologyType : : kReplicaSetWithPrimary : <nl> + return " ReplicaSetWithPrimary " ; <nl> + case TopologyType : : kSharded : <nl> + return " Sharded " ; <nl> + case TopologyType : : kUnknown : <nl> + return " Unknown " ; <nl> + case TopologyType : : kSingle : <nl> + return " Single " ; <nl> + default : <nl> + MONGO_UNREACHABLE <nl> + } <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & os , const TopologyType topologyType ) { <nl> + os < < toString ( topologyType ) ; <nl> + return os ; <nl> + } <nl> + <nl> + const std : : vector < TopologyType > allTopologyTypes ( ) { <nl> + static auto const result = std : : vector < TopologyType > { TopologyType : : kSingle , <nl> + TopologyType : : kReplicaSetNoPrimary , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + TopologyType : : kSharded , <nl> + TopologyType : : kUnknown } ; <nl> + return result ; <nl> + } <nl> + <nl> + const ServerAddress & IsMasterOutcome : : getServer ( ) const { <nl> + return _server ; <nl> + } <nl> + bool IsMasterOutcome : : isSuccess ( ) const { <nl> + return _success ; <nl> + } <nl> + const boost : : optional < BSONObj > & IsMasterOutcome : : getResponse ( ) const { <nl> + return _response ; <nl> + } <nl> + const boost : : optional < IsMasterRTT > & IsMasterOutcome : : getRtt ( ) const { <nl> + return _rtt ; <nl> + } <nl> + const std : : string & IsMasterOutcome : : getErrorMsg ( ) const { <nl> + return _errorMsg ; <nl> + } <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . d8b37df42d2a <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / sdam_datatypes . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include < boost / optional . hpp > <nl> + # include < chrono > <nl> + # include < string > <nl> + <nl> + # include " mongo / bson / bsonobj . h " <nl> + # include " mongo / util / duration . h " <nl> + <nl> + <nl> + / * * <nl> + * The data structures in this file are defined in the " Server Discovery & Monitoring " <nl> + * specification , which governs how topology changes are detected in a cluster . See <nl> + * https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst <nl> + * for more information . <nl> + * / <nl> + namespace mongo : : sdam { <nl> + enum class TopologyType { <nl> + kSingle , <nl> + kReplicaSetNoPrimary , <nl> + kReplicaSetWithPrimary , <nl> + kSharded , <nl> + kUnknown <nl> + } ; <nl> + const std : : vector < TopologyType > allTopologyTypes ( ) ; <nl> + std : : string toString ( const TopologyType topologyType ) ; <nl> + std : : ostream & operator < < ( std : : ostream & os , const TopologyType topologyType ) ; <nl> + <nl> + enum class ServerType { <nl> + kStandalone , <nl> + kMongos , <nl> + kRSPrimary , <nl> + kRSSecondary , <nl> + kRSArbiter , <nl> + kRSOther , <nl> + kRSGhost , <nl> + kUnknown <nl> + } ; <nl> + const std : : vector < ServerType > allServerTypes ( ) ; <nl> + std : : string toString ( const ServerType serverType ) ; <nl> + std : : ostream & operator < < ( std : : ostream & os , const ServerType serverType ) ; <nl> + <nl> + using ServerAddress = std : : string ; <nl> + using IsMasterRTT = mongo : : Nanoseconds ; <nl> + <nl> + / / The result of an attempt to call the " ismaster " command on a server . <nl> + class IsMasterOutcome { <nl> + IsMasterOutcome ( ) = delete ; <nl> + <nl> + public : <nl> + / / success constructor <nl> + IsMasterOutcome ( ServerAddress server , BSONObj response , IsMasterRTT rtt ) <nl> + : _server ( std : : move ( server ) ) , _success ( true ) , _response ( response ) , _rtt ( rtt ) { } <nl> + <nl> + / / failure constructor <nl> + IsMasterOutcome ( ServerAddress server , std : : string errorMsg ) <nl> + : _server ( std : : move ( server ) ) , _success ( false ) , _errorMsg ( errorMsg ) { } <nl> + <nl> + const ServerAddress & getServer ( ) const ; <nl> + bool isSuccess ( ) const ; <nl> + const boost : : optional < BSONObj > & getResponse ( ) const ; <nl> + const boost : : optional < IsMasterRTT > & getRtt ( ) const ; <nl> + const std : : string & getErrorMsg ( ) const ; <nl> + <nl> + private : <nl> + ServerAddress _server ; <nl> + / / indicating the success or failure of the attempt <nl> + bool _success ; <nl> + / / an error message in case of failure <nl> + std : : string _errorMsg ; <nl> + / / a document containing the command response ( or boost : : none if it failed ) <nl> + boost : : optional < BSONObj > _response ; <nl> + / / the round trip time to execute the command ( or null if it failed ) <nl> + boost : : optional < IsMasterRTT > _rtt ; <nl> + } ; <nl> + <nl> + class ServerDescription ; <nl> + using ServerDescriptionPtr = std : : shared_ptr < ServerDescription > ; <nl> + <nl> + class TopologyDescription ; <nl> + using TopologyDescriptionPtr = std : : shared_ptr < TopologyDescription > ; <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 3532e0588b7d <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / sdam_test_base . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # pragma once <nl> + <nl> + # include < map > <nl> + # include < ostream > <nl> + # include < set > <nl> + # include < string > <nl> + # include < type_traits > <nl> + # include < vector > <nl> + <nl> + # include " mongo / client / sdam / sdam_datatypes . h " <nl> + # include " mongo / client / sdam / server_description . h " <nl> + <nl> + <nl> + / * * <nl> + * The following facilitates writing tests in the Server Discovery And Monitoring ( sdam ) namespace . <nl> + * / <nl> + namespace mongo { <nl> + template < typename T > <nl> + std : : ostream & operator < < ( std : : ostream & os , const std : : vector < T > & s ) { <nl> + os < < " [ " ; <nl> + size_t i = 0 ; <nl> + for ( const auto & item : s ) { <nl> + os < < item ; <nl> + if ( i ! = s . size ( ) - 1 ) <nl> + os < < " , " ; <nl> + } <nl> + os < < " ] " ; <nl> + return os ; <nl> + } <nl> + <nl> + template < typename T > <nl> + std : : ostream & operator < < ( std : : ostream & os , const std : : set < T > & s ) { <nl> + os < < " { " ; <nl> + size_t i = 0 ; <nl> + for ( const auto & item : s ) { <nl> + os < < item ; <nl> + if ( i ! = s . size ( ) - 1 ) <nl> + os < < " , " ; <nl> + } <nl> + os < < " } " ; <nl> + return os ; <nl> + } <nl> + <nl> + template < typename K , typename V > <nl> + std : : ostream & operator < < ( std : : ostream & os , const std : : map < K , V > & m ) { <nl> + os < < " { " ; <nl> + size_t i = 0 ; <nl> + for ( const auto & item : m ) { <nl> + os < < item . first < < " : " < < item . second ; <nl> + if ( i ! = m . size ( ) - 1 ) <nl> + os < < " , " ; <nl> + } <nl> + os < < " } " ; <nl> + return os ; <nl> + } <nl> + <nl> + template std : : ostream & operator < < ( std : : ostream & os , <nl> + const std : : vector < mongo : : sdam : : ServerDescriptionPtr > & v ) ; <nl> + template std : : ostream & operator < < ( std : : ostream & os , const std : : set < std : : string > & s ) ; <nl> + template std : : ostream & operator < < ( std : : ostream & os , const std : : map < std : : string , std : : string > & m ) ; <nl> + } ; / / namespace mongo <nl> + <nl> + / / We include this here because the ASSERT_EQUALS needs to have the operator < < defined <nl> + / / beforehand for the types used in the tests . <nl> + # include " mongo / unittest / unittest . h " <nl> + namespace mongo { <nl> + namespace sdam { <nl> + using mongo : : operator < < ; <nl> + <nl> + class SdamTestFixture : public mongo : : unittest : : Test { <nl> + protected : <nl> + template < typename T , typename U > <nl> + std : : vector < U > map ( std : : vector < T > source , std : : function < U ( const T & ) > f ) { <nl> + std : : vector < U > result ; <nl> + std : : transform ( source . begin ( ) , <nl> + source . end ( ) , <nl> + std : : back_inserter ( result ) , <nl> + [ f ] ( const auto & item ) { return f ( item ) ; } ) ; <nl> + return result ; <nl> + } <nl> + <nl> + template < typename T , typename U > <nl> + std : : set < U > mapSet ( std : : vector < T > source , std : : function < U ( const T & ) > f ) { <nl> + auto v = map < T , U > ( source , f ) ; <nl> + std : : set < U > result ( v . begin ( ) , v . end ( ) ) ; <nl> + return result ; <nl> + } <nl> + } ; <nl> + } / / namespace sdam <nl> + } / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . 21726e4ee1a9 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / server_description . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kNetwork <nl> + <nl> + # include < algorithm > <nl> + # include < boost / algorithm / string . hpp > <nl> + # include < boost / optional . hpp > <nl> + # include < set > <nl> + <nl> + # include " mongo / bson / bsonobjbuilder . h " <nl> + # include " mongo / bson / oid . h " <nl> + # include " mongo / client / sdam / sdam_datatypes . h " <nl> + # include " mongo / util / duration . h " <nl> + # include " mongo / util / log . h " <nl> + <nl> + <nl> + namespace mongo : : sdam { <nl> + ServerDescription : : ServerDescription ( ClockSource * clockSource , <nl> + const IsMasterOutcome & isMasterOutcome , <nl> + boost : : optional < IsMasterRTT > lastRtt ) <nl> + : ServerDescription ( isMasterOutcome . getServer ( ) ) { <nl> + if ( isMasterOutcome . isSuccess ( ) ) { <nl> + const auto response = * isMasterOutcome . getResponse ( ) ; <nl> + <nl> + / / type must be parsed before RTT is calculated . <nl> + parseTypeFromIsMaster ( response ) ; <nl> + calculateRtt ( * isMasterOutcome . getRtt ( ) , lastRtt ) ; <nl> + <nl> + _lastUpdateTime = clockSource - > now ( ) ; <nl> + _minWireVersion = response [ " minWireVersion " ] . numberInt ( ) ; <nl> + _maxWireVersion = response [ " maxWireVersion " ] . numberInt ( ) ; <nl> + <nl> + saveLastWriteInfo ( response . getObjectField ( " lastWrite " ) ) ; <nl> + saveHosts ( response ) ; <nl> + saveTags ( response . getObjectField ( " tags " ) ) ; <nl> + saveElectionId ( response . getField ( " electionId " ) ) ; <nl> + <nl> + auto lsTimeoutField = response . getField ( " logicalSessionTimeoutMinutes " ) ; <nl> + if ( lsTimeoutField . type ( ) = = BSONType : : NumberInt ) { <nl> + _logicalSessionTimeoutMinutes = lsTimeoutField . numberInt ( ) ; <nl> + } <nl> + <nl> + auto setVersionField = response . getField ( " setVersion " ) ; <nl> + if ( setVersionField . type ( ) = = BSONType : : NumberInt ) { <nl> + _setVersion = response [ " setVersion " ] . numberInt ( ) ; <nl> + } <nl> + <nl> + auto setNameField = response . getField ( " setName " ) ; <nl> + if ( setNameField . type ( ) = = BSONType : : String ) { <nl> + _setName = response [ " setName " ] . str ( ) ; <nl> + } <nl> + <nl> + auto primaryField = response . getField ( " primary " ) ; <nl> + if ( primaryField . type ( ) = = BSONType : : String ) { <nl> + _primary = response . getStringField ( " primary " ) ; <nl> + } <nl> + } else { <nl> + _error = isMasterOutcome . getErrorMsg ( ) ; <nl> + } <nl> + } <nl> + <nl> + void ServerDescription : : storeHostListIfPresent ( const std : : string key , <nl> + const BSONObj response , <nl> + std : : set < ServerAddress > & destination ) { <nl> + if ( response . hasField ( key ) ) { <nl> + auto hostsBsonArray = response [ key ] . Array ( ) ; <nl> + std : : transform ( hostsBsonArray . begin ( ) , <nl> + hostsBsonArray . end ( ) , <nl> + std : : inserter ( destination , destination . begin ( ) ) , <nl> + [ ] ( const BSONElement e ) { return boost : : to_lower_copy ( e . String ( ) ) ; } ) ; <nl> + } <nl> + } <nl> + <nl> + void ServerDescription : : saveHosts ( const BSONObj response ) { <nl> + if ( response . hasField ( " me " ) ) { <nl> + auto me = response . getField ( " me " ) ; <nl> + _me = boost : : to_lower_copy ( me . str ( ) ) ; <nl> + } <nl> + <nl> + storeHostListIfPresent ( " hosts " , response , _hosts ) ; <nl> + storeHostListIfPresent ( " passives " , response , _passives ) ; <nl> + storeHostListIfPresent ( " arbiters " , response , _arbiters ) ; <nl> + } <nl> + <nl> + void ServerDescription : : saveTags ( BSONObj tagsObj ) { <nl> + const auto keys = tagsObj . getFieldNames < std : : set < std : : string > > ( ) ; <nl> + for ( const auto key : keys ) { <nl> + _tags [ key ] = tagsObj . getStringField ( key ) ; <nl> + } <nl> + } <nl> + <nl> + void ServerDescription : : saveElectionId ( BSONElement electionId ) { <nl> + if ( electionId . type ( ) = = jstOID ) { <nl> + _electionId = electionId . OID ( ) ; <nl> + } <nl> + } <nl> + <nl> + void ServerDescription : : calculateRtt ( const IsMasterRTT currentRtt , <nl> + const boost : : optional < IsMasterRTT > lastRtt ) { <nl> + if ( getType ( ) = = ServerType : : kUnknown ) { <nl> + / / if a server ' s type is Unknown , it ' s RTT is null <nl> + / / see : <nl> + / / https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst # roundtriptime <nl> + return ; <nl> + } <nl> + <nl> + if ( lastRtt ) { <nl> + / / new_rtt = alpha * x + ( 1 - alpha ) * old_rtt <nl> + _rtt = IsMasterRTT ( static_cast < IsMasterRTT : : rep > ( kRttAlpha * currentRtt . count ( ) + <nl> + ( 1 - kRttAlpha ) * lastRtt . get ( ) . count ( ) ) ) ; <nl> + } else { <nl> + _rtt = currentRtt ; <nl> + } <nl> + } <nl> + <nl> + void ServerDescription : : saveLastWriteInfo ( BSONObj lastWriteBson ) { <nl> + const auto lastWriteDateField = lastWriteBson . getField ( " lastWriteDate " ) ; <nl> + if ( lastWriteDateField . type ( ) = = BSONType : : Date ) { <nl> + _lastWriteDate = lastWriteDateField . date ( ) ; <nl> + } <nl> + <nl> + const auto opTimeParse = <nl> + repl : : OpTime : : parseFromOplogEntry ( lastWriteBson . getObjectField ( " opTime " ) ) ; <nl> + if ( opTimeParse . isOK ( ) ) { <nl> + _opTime = opTimeParse . getValue ( ) ; <nl> + } <nl> + } <nl> + <nl> + void ServerDescription : : parseTypeFromIsMaster ( const BSONObj isMaster ) { <nl> + ServerType t ; <nl> + bool hasSetName = isMaster . hasField ( " setName " ) ; <nl> + <nl> + if ( isMaster . getField ( " ok " ) . numberInt ( ) ! = 1 ) { <nl> + t = ServerType : : kUnknown ; <nl> + } else if ( ! hasSetName & & ! isMaster . hasField ( " msg " ) & & ! isMaster . getBoolField ( " isreplicaset " ) ) { <nl> + t = ServerType : : kStandalone ; <nl> + } else if ( kIsDbGrid = = isMaster . getStringField ( " msg " ) ) { <nl> + t = ServerType : : kMongos ; <nl> + } else if ( hasSetName & & isMaster . getBoolField ( " ismaster " ) ) { <nl> + t = ServerType : : kRSPrimary ; <nl> + } else if ( hasSetName & & isMaster . getBoolField ( " secondary " ) ) { <nl> + t = ServerType : : kRSSecondary ; <nl> + } else if ( hasSetName & & isMaster . getBoolField ( " arbiterOnly " ) ) { <nl> + t = ServerType : : kRSArbiter ; <nl> + } else if ( hasSetName & & isMaster . getBoolField ( " hidden " ) ) { <nl> + t = ServerType : : kRSOther ; <nl> + } else if ( isMaster . getBoolField ( " isreplicaset " ) ) { <nl> + t = ServerType : : kRSGhost ; <nl> + } else { <nl> + error ( ) < < " unknown server type from successful ismaster reply : " < < isMaster . toString ( ) ; <nl> + t = ServerType : : kUnknown ; <nl> + } <nl> + _type = t ; <nl> + } <nl> + <nl> + const ServerAddress & ServerDescription : : getAddress ( ) const { <nl> + return _address ; <nl> + } <nl> + <nl> + const boost : : optional < std : : string > & ServerDescription : : getError ( ) const { <nl> + return _error ; <nl> + } <nl> + <nl> + const boost : : optional < IsMasterRTT > & ServerDescription : : getRtt ( ) const { <nl> + return _rtt ; <nl> + } <nl> + <nl> + const boost : : optional < mongo : : Date_t > & ServerDescription : : getLastWriteDate ( ) const { <nl> + return _lastWriteDate ; <nl> + } <nl> + <nl> + const boost : : optional < repl : : OpTime > & ServerDescription : : getOpTime ( ) const { <nl> + return _opTime ; <nl> + } <nl> + <nl> + ServerType ServerDescription : : getType ( ) const { <nl> + return _type ; <nl> + } <nl> + <nl> + const boost : : optional < ServerAddress > & ServerDescription : : getMe ( ) const { <nl> + return _me ; <nl> + } <nl> + <nl> + const std : : set < ServerAddress > & ServerDescription : : getHosts ( ) const { <nl> + return _hosts ; <nl> + } <nl> + <nl> + const std : : set < ServerAddress > & ServerDescription : : getPassives ( ) const { <nl> + return _passives ; <nl> + } <nl> + <nl> + const std : : set < ServerAddress > & ServerDescription : : getArbiters ( ) const { <nl> + return _arbiters ; <nl> + } <nl> + <nl> + const std : : map < std : : string , std : : string > & ServerDescription : : getTags ( ) const { <nl> + return _tags ; <nl> + } <nl> + <nl> + const boost : : optional < std : : string > & ServerDescription : : getSetName ( ) const { <nl> + return _setName ; <nl> + } <nl> + <nl> + const boost : : optional < int > & ServerDescription : : getSetVersion ( ) const { <nl> + return _setVersion ; <nl> + } <nl> + <nl> + const boost : : optional < mongo : : OID > & ServerDescription : : getElectionId ( ) const { <nl> + return _electionId ; <nl> + } <nl> + <nl> + const boost : : optional < ServerAddress > & ServerDescription : : getPrimary ( ) const { <nl> + return _primary ; <nl> + } <nl> + <nl> + const mongo : : Date_t ServerDescription : : getLastUpdateTime ( ) const { <nl> + return * _lastUpdateTime ; <nl> + } <nl> + <nl> + const boost : : optional < int > & ServerDescription : : getLogicalSessionTimeoutMinutes ( ) const { <nl> + return _logicalSessionTimeoutMinutes ; <nl> + } <nl> + <nl> + bool ServerDescription : : isEquivalent ( const ServerDescription & other ) const { <nl> + auto otherValues = std : : tie ( other . _type , <nl> + other . _minWireVersion , <nl> + other . _maxWireVersion , <nl> + other . _me , <nl> + other . _hosts , <nl> + other . _passives , <nl> + other . _arbiters , <nl> + other . _tags , <nl> + other . _setName , <nl> + other . _setVersion , <nl> + other . _electionId , <nl> + other . _primary , <nl> + other . _logicalSessionTimeoutMinutes ) ; <nl> + auto thisValues = std : : tie ( _type , <nl> + _minWireVersion , <nl> + _maxWireVersion , <nl> + _me , <nl> + _hosts , <nl> + _passives , <nl> + _arbiters , <nl> + _tags , <nl> + _setName , <nl> + _setVersion , <nl> + _electionId , <nl> + _primary , <nl> + _logicalSessionTimeoutMinutes ) ; <nl> + return thisValues = = otherValues ; <nl> + } <nl> + <nl> + bool ServerDescription : : isDataBearingServer ( ) const { <nl> + return kDataServerTypes . find ( _type ) ! = kDataServerTypes . end ( ) ; <nl> + } <nl> + <nl> + / / output server description to bson . This is primarily used for debugging . <nl> + BSONObj ServerDescription : : toBson ( ) const { <nl> + BSONObjBuilder bson ; <nl> + bson . append ( " address " , _address ) ; <nl> + if ( _rtt ) { <nl> + bson . append ( " roundTripTime " , durationCount < Microseconds > ( * _rtt ) ) ; <nl> + } <nl> + <nl> + if ( _lastWriteDate ) { <nl> + bson . appendDate ( " lastWriteDate " , * _lastWriteDate ) ; <nl> + } <nl> + <nl> + if ( _opTime ) { <nl> + bson . append ( " opTime " , _opTime - > toBSON ( ) ) ; <nl> + } <nl> + <nl> + { <nl> + using mongo : : sdam : : toString ; <nl> + bson . append ( " type " , toString ( _type ) ) ; <nl> + } <nl> + <nl> + bson . append ( " minWireVersion " , _minWireVersion ) ; <nl> + bson . append ( " maxWireVersion " , _maxWireVersion ) ; <nl> + if ( _me ) { <nl> + bson . append ( " me " , * _me ) ; <nl> + } <nl> + if ( _setName ) { <nl> + bson . append ( " setName " , * _setName ) ; <nl> + } <nl> + if ( _setVersion ) { <nl> + bson . append ( " setVersion " , * _setVersion ) ; <nl> + } <nl> + if ( _electionId ) { <nl> + bson . append ( " electionId " , * _electionId ) ; <nl> + } <nl> + if ( _primary ) { <nl> + bson . append ( " primary " , * _primary ) ; <nl> + } <nl> + if ( _lastUpdateTime ) { <nl> + bson . append ( " lastUpdateTime " , * _lastUpdateTime ) ; <nl> + } <nl> + if ( _logicalSessionTimeoutMinutes ) { <nl> + bson . append ( " logicalSessionTimeoutMinutes " , * _logicalSessionTimeoutMinutes ) ; <nl> + } <nl> + return bson . obj ( ) ; <nl> + } <nl> + <nl> + int ServerDescription : : getMinWireVersion ( ) const { <nl> + return _minWireVersion ; <nl> + } <nl> + <nl> + int ServerDescription : : getMaxWireVersion ( ) const { <nl> + return _maxWireVersion ; <nl> + } <nl> + <nl> + std : : string ServerDescription : : toString ( ) const { <nl> + return toBson ( ) . toString ( ) ; <nl> + } <nl> + <nl> + <nl> + bool operator = = ( const mongo : : sdam : : ServerDescription & a , const mongo : : sdam : : ServerDescription & b ) { <nl> + return a . isEquivalent ( b ) ; <nl> + } <nl> + <nl> + bool operator ! = ( const mongo : : sdam : : ServerDescription & a , const mongo : : sdam : : ServerDescription & b ) { <nl> + return ! ( a = = b ) ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & os , const ServerDescription & description ) { <nl> + BSONObj obj = description . toBson ( ) ; <nl> + os < < obj . toString ( ) ; <nl> + return os ; <nl> + } <nl> + <nl> + std : : ostream & operator < < ( std : : ostream & os , const ServerDescriptionPtr & description ) { <nl> + os < < * description ; <nl> + return os ; <nl> + } <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 0d69ad86f2ca <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / server_description . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # pragma once <nl> + <nl> + # include < boost / algorithm / string . hpp > <nl> + # include < boost / optional . hpp > <nl> + # include < map > <nl> + # include < ostream > <nl> + # include < set > <nl> + # include < utility > <nl> + <nl> + # include " mongo / bson / oid . h " <nl> + # include " mongo / client / sdam / sdam_datatypes . h " <nl> + # include " mongo / db / repl / optime . h " <nl> + # include " mongo / platform / basic . h " <nl> + # include " mongo / util / clock_source . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + class ServerDescription { <nl> + ServerDescription ( ) = delete ; <nl> + <nl> + public : <nl> + / * * <nl> + * Construct an unknown ServerDescription with default values except the server ' s address . <nl> + * / <nl> + ServerDescription ( ServerAddress address ) <nl> + : _address ( std : : move ( address ) ) , _type ( ServerType : : kUnknown ) { <nl> + boost : : to_lower ( _address ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Build a new ServerDescription according to the rules of the SDAM spec based on the <nl> + * last RTT to the server and isMaster response . <nl> + * / <nl> + ServerDescription ( ClockSource * clockSource , <nl> + const IsMasterOutcome & isMasterOutcome , <nl> + boost : : optional < IsMasterRTT > lastRtt = boost : : none ) ; <nl> + <nl> + / * * <nl> + * This determines if a server description is equivalent according to the Server Discovery and <nl> + * Monitoring specification . Members marked with ( = ) are used to determine equality . Note that <nl> + * these members do not include RTT or the server ' s address . <nl> + * / <nl> + bool isEquivalent ( const ServerDescription & other ) const ; <nl> + <nl> + / / server identity <nl> + const ServerAddress & getAddress ( ) const ; <nl> + ServerType getType ( ) const ; <nl> + const boost : : optional < ServerAddress > & getMe ( ) const ; <nl> + const boost : : optional < std : : string > & getSetName ( ) const ; <nl> + const std : : map < std : : string , std : : string > & getTags ( ) const ; <nl> + <nl> + / / network attributes <nl> + const boost : : optional < std : : string > & getError ( ) const ; <nl> + const boost : : optional < IsMasterRTT > & getRtt ( ) const ; <nl> + const boost : : optional < int > & getLogicalSessionTimeoutMinutes ( ) const ; <nl> + <nl> + / / server capabilities <nl> + int getMinWireVersion ( ) const ; <nl> + int getMaxWireVersion ( ) const ; <nl> + bool isDataBearingServer ( ) const ; <nl> + <nl> + / / server ' time ' <nl> + const Date_t getLastUpdateTime ( ) const ; <nl> + const boost : : optional < Date_t > & getLastWriteDate ( ) const ; <nl> + const boost : : optional < repl : : OpTime > & getOpTime ( ) const ; <nl> + <nl> + / / topology membership <nl> + const boost : : optional < ServerAddress > & getPrimary ( ) const ; <nl> + const std : : set < ServerAddress > & getHosts ( ) const ; <nl> + const std : : set < ServerAddress > & getPassives ( ) const ; <nl> + const std : : set < ServerAddress > & getArbiters ( ) const ; <nl> + const boost : : optional < int > & getSetVersion ( ) const ; <nl> + const boost : : optional < OID > & getElectionId ( ) const ; <nl> + <nl> + BSONObj toBson ( ) const ; <nl> + std : : string toString ( ) const ; <nl> + <nl> + private : <nl> + / * * <nl> + * Classify the server ' s type based on the ismaster response . <nl> + * @ param isMaster - reply information for the ismaster command <nl> + * / <nl> + void parseTypeFromIsMaster ( const BSONObj isMaster ) ; <nl> + <nl> + <nl> + void calculateRtt ( const IsMasterRTT currentRtt , const boost : : optional < IsMasterRTT > lastRtt ) ; <nl> + void saveLastWriteInfo ( BSONObj lastWriteBson ) ; <nl> + <nl> + void storeHostListIfPresent ( const std : : string key , <nl> + const BSONObj response , <nl> + std : : set < ServerAddress > & destination ) ; <nl> + void saveHosts ( const BSONObj response ) ; <nl> + void saveTags ( BSONObj tagsObj ) ; <nl> + void saveElectionId ( BSONElement electionId ) ; <nl> + <nl> + static inline const std : : set < ServerType > kDataServerTypes { ServerType : : kMongos , <nl> + ServerType : : kRSPrimary , <nl> + ServerType : : kRSSecondary , <nl> + ServerType : : kStandalone } ; <nl> + <nl> + static inline const std : : string kIsDbGrid = " isdbgrid " ; <nl> + static inline const double kRttAlpha = 0 . 2 ; <nl> + <nl> + / / address : the hostname or IP , and the port number , that the client connects to . Note that this <nl> + / / is not the server ' s ismaster . me field , in the case that the server reports an address <nl> + / / different from the address the client uses . <nl> + ServerAddress _address ; <nl> + <nl> + / / error : information about the last error related to this server . Default null . <nl> + boost : : optional < std : : string > _error ; <nl> + <nl> + / / roundTripTime : the duration of the ismaster call . Default null . <nl> + boost : : optional < IsMasterRTT > _rtt ; <nl> + <nl> + / / lastWriteDate : a 64 - bit BSON datetime or null . The " lastWriteDate " from the server ' s most <nl> + / / recent ismaster response . <nl> + boost : : optional < Date_t > _lastWriteDate ; <nl> + <nl> + / / opTime : an ObjectId or null . The last opTime reported by the server ; an ObjectId or null . <nl> + / / ( Only mongos and shard servers record this field when monitoring config servers as replica <nl> + / / sets . ) <nl> + boost : : optional < repl : : OpTime > _opTime ; <nl> + <nl> + / / ( = ) type : a ServerType enum value . Default Unknown . <nl> + ServerType _type ; <nl> + <nl> + / / ( = ) minWireVersion , maxWireVersion : the wire protocol version range supported by the server . <nl> + / / Both default to 0 . Use min and maxWireVersion only to determine compatibility . <nl> + int _minWireVersion = 0 ; <nl> + int _maxWireVersion = 0 ; <nl> + <nl> + / / ( = ) me : The hostname or IP , and the port number , that this server was configured with in the <nl> + / / replica set . Default null . <nl> + boost : : optional < ServerAddress > _me ; <nl> + <nl> + / / ( = ) hosts , passives , arbiters : Sets of addresses . This server ' s opinion of the replica set ' s <nl> + / / members , if any . These hostnames are normalized to lower - case . Default empty . The client <nl> + / / monitors all three types of servers in a replica set . <nl> + std : : set < ServerAddress > _hosts ; <nl> + std : : set < ServerAddress > _passives ; <nl> + std : : set < ServerAddress > _arbiters ; <nl> + <nl> + / / ( = ) tags : map from string to string . Default empty . <nl> + std : : map < std : : string , std : : string > _tags ; <nl> + <nl> + / / ( = ) setName : string or null . Default null . <nl> + boost : : optional < std : : string > _setName ; <nl> + <nl> + / / ( = ) setVersion : integer or null . Default null . <nl> + boost : : optional < int > _setVersion ; <nl> + <nl> + / / ( = ) electionId : an ObjectId , if this is a MongoDB 2 . 6 + replica set member that believes it is <nl> + / / primary . See using setVersion and electionId to detect stale primaries . Default null . <nl> + boost : : optional < OID > _electionId ; <nl> + <nl> + / / ( = ) primary : an address . This server ' s opinion of who the primary is . Default null . <nl> + boost : : optional < ServerAddress > _primary ; <nl> + <nl> + / / lastUpdateTime : when this server was last checked . Default " infinity ago " . <nl> + boost : : optional < Date_t > _lastUpdateTime = Date_t : : min ( ) ; <nl> + <nl> + / / ( = ) logicalSessionTimeoutMinutes : integer or null . Default null . <nl> + boost : : optional < int > _logicalSessionTimeoutMinutes ; <nl> + <nl> + friend class ServerDescriptionBuilder ; <nl> + } ; <nl> + <nl> + bool operator = = ( const mongo : : sdam : : ServerDescription & a , const mongo : : sdam : : ServerDescription & b ) ; <nl> + bool operator ! = ( const mongo : : sdam : : ServerDescription & a , const mongo : : sdam : : ServerDescription & b ) ; <nl> + std : : ostream & operator < < ( std : : ostream & os , const ServerDescriptionPtr & description ) ; <nl> + std : : ostream & operator < < ( std : : ostream & os , const ServerDescription & description ) ; <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 16c40b025b80 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / server_description_builder . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # include " mongo / client / sdam / server_description_builder . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + ServerDescriptionPtr ServerDescriptionBuilder : : instance ( ) const { <nl> + return _instance ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withAddress ( const ServerAddress & address ) { <nl> + _instance - > _address = address ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withError ( const std : : string & error ) { <nl> + _instance - > _error = error ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withRtt ( const IsMasterRTT & rtt ) { <nl> + _instance - > _rtt = rtt ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withLastWriteDate ( const Date_t & lastWriteDate ) { <nl> + _instance - > _lastWriteDate = lastWriteDate ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withOpTime ( const repl : : OpTime opTime ) { <nl> + _instance - > _opTime = opTime ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withType ( const ServerType type ) { <nl> + _instance - > _type = type ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withMinWireVersion ( int minVersion ) { <nl> + _instance - > _minWireVersion = minVersion ; <nl> + return * this ; <nl> + } <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withMaxWireVersion ( int maxVersion ) { <nl> + _instance - > _maxWireVersion = maxVersion ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withMe ( const ServerAddress & me ) { <nl> + _instance - > _me = boost : : to_lower_copy ( me ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withHost ( const ServerAddress & host ) { <nl> + _instance - > _hosts . emplace ( boost : : to_lower_copy ( host ) ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withPassive ( const ServerAddress & passive ) { <nl> + _instance - > _passives . emplace ( boost : : to_lower_copy ( passive ) ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withArbiter ( const ServerAddress & arbiter ) { <nl> + _instance - > _arbiters . emplace ( boost : : to_lower_copy ( arbiter ) ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withTag ( const std : : string key , <nl> + const std : : string value ) { <nl> + _instance - > _tags [ key ] = value ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withSetName ( const std : : string setName ) { <nl> + _instance - > _setName = std : : move ( setName ) ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withSetVersion ( const int setVersion ) { <nl> + _instance - > _setVersion = setVersion ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withElectionId ( const OID & electionId ) { <nl> + _instance - > _electionId = electionId ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withPrimary ( const ServerAddress & primary ) { <nl> + _instance - > _primary = primary ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withLastUpdateTime ( <nl> + const Date_t & lastUpdateTime ) { <nl> + _instance - > _lastUpdateTime = lastUpdateTime ; <nl> + return * this ; <nl> + } <nl> + <nl> + ServerDescriptionBuilder & ServerDescriptionBuilder : : withLogicalSessionTimeoutMinutes ( <nl> + const boost : : optional < int > logicalSessionTimeoutMinutes ) { <nl> + _instance - > _logicalSessionTimeoutMinutes = logicalSessionTimeoutMinutes ; <nl> + return * this ; <nl> + } <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . f9ddf7e1a412 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / server_description_builder . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # pragma once <nl> + # include " mongo / client / sdam / server_description . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + <nl> + / * * <nl> + * This class is used in the unit tests to construct ServerDescription instances . For production <nl> + * code , ServerDescription instances should be constructed using its constructors . <nl> + * / <nl> + class ServerDescriptionBuilder { <nl> + public : <nl> + ServerDescriptionBuilder ( ) = default ; <nl> + <nl> + / * * <nl> + * Return the configured ServerDescription instance . <nl> + * / <nl> + ServerDescriptionPtr instance ( ) const ; <nl> + <nl> + / / server identity <nl> + ServerDescriptionBuilder & withAddress ( const ServerAddress & address ) ; <nl> + ServerDescriptionBuilder & withType ( const ServerType type ) ; <nl> + ServerDescriptionBuilder & withMe ( const ServerAddress & me ) ; <nl> + ServerDescriptionBuilder & withTag ( const std : : string key , const std : : string value ) ; <nl> + ServerDescriptionBuilder & withSetName ( const std : : string setName ) ; <nl> + <nl> + / / network attributes <nl> + ServerDescriptionBuilder & withRtt ( const IsMasterRTT & rtt ) ; <nl> + ServerDescriptionBuilder & withError ( const std : : string & error ) ; <nl> + ServerDescriptionBuilder & withLogicalSessionTimeoutMinutes ( <nl> + const boost : : optional < int > logicalSessionTimeoutMinutes ) ; <nl> + <nl> + / / server capabilities <nl> + ServerDescriptionBuilder & withMinWireVersion ( int minVersion ) ; <nl> + ServerDescriptionBuilder & withMaxWireVersion ( int maxVersion ) ; <nl> + <nl> + / / server ' time ' <nl> + ServerDescriptionBuilder & withLastWriteDate ( const Date_t & lastWriteDate ) ; <nl> + ServerDescriptionBuilder & withOpTime ( const repl : : OpTime opTime ) ; <nl> + ServerDescriptionBuilder & withLastUpdateTime ( const Date_t & lastUpdateTime ) ; <nl> + <nl> + / / topology membership <nl> + ServerDescriptionBuilder & withPrimary ( const ServerAddress & primary ) ; <nl> + ServerDescriptionBuilder & withHost ( const ServerAddress & host ) ; <nl> + ServerDescriptionBuilder & withPassive ( const ServerAddress & passive ) ; <nl> + ServerDescriptionBuilder & withArbiter ( const ServerAddress & arbiter ) ; <nl> + ServerDescriptionBuilder & withSetVersion ( const int setVersion ) ; <nl> + ServerDescriptionBuilder & withElectionId ( const OID & electionId ) ; <nl> + <nl> + private : <nl> + constexpr static auto kServerAddressNotSet = " address . not . set : 1234 " ; <nl> + ServerDescriptionPtr _instance = <nl> + std : : shared_ptr < ServerDescription > ( new ServerDescription ( kServerAddressNotSet ) ) ; <nl> + } ; <nl> + } / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 263392dfe6fb <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / server_description_test . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # include " mongo / client / sdam / sdam_test_base . h " <nl> + <nl> + # include < boost / algorithm / string . hpp > <nl> + # include < boost / optional / optional_io . hpp > <nl> + # include < ostream > <nl> + # include < set > <nl> + <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # include " mongo / client / sdam / server_description_builder . h " <nl> + # include " mongo / db / jsobj . h " <nl> + # include " mongo / db / repl / optime . h " <nl> + # include " mongo / util / system_clock_source . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + TEST ( ServerDescriptionTest , ShouldNormalizeAddress ) { <nl> + ServerDescription a ( " foo : 1234 " ) ; <nl> + ServerDescription b ( " FOo : 1234 " ) ; <nl> + ASSERT_EQUALS ( a . getAddress ( ) , b . getAddress ( ) ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareDefaultValuesAsEqual ) { <nl> + auto a = ServerDescription ( " foo : 1234 " ) ; <nl> + auto b = ServerDescription ( " foo : 1234 " ) ; <nl> + ASSERT_EQUALS ( a , b ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareDifferentAddressButSameServerTypeAsEqual ) { <nl> + / / Note : The SDAM specification does not prescribe how to compare server descriptions with <nl> + / / different addresses for equality . We choose that two descriptions are considered equal if <nl> + / / their addresses are different . <nl> + auto a = * ServerDescriptionBuilder ( ) <nl> + . withAddress ( " foo : 1234 " ) <nl> + . withType ( ServerType : : kStandalone ) <nl> + . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) <nl> + . withAddress ( " bar : 1234 " ) <nl> + . withType ( ServerType : : kStandalone ) <nl> + . instance ( ) ; <nl> + ASSERT_EQUALS ( a , b ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareServerTypes ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withType ( ServerType : : kStandalone ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withType ( ServerType : : kRSSecondary ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareMinWireVersion ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withMinWireVersion ( 1 ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withMinWireVersion ( 2 ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareMaxWireVersion ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withMaxWireVersion ( 1 ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withMaxWireVersion ( 2 ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareMeValues ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withMe ( " foo " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withMe ( " bar " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareHosts ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withHost ( " foo " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withHost ( " bar " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldComparePassives ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withPassive ( " foo " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withPassive ( " bar " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareArbiters ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withArbiter ( " foo " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withArbiter ( " bar " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareMultipleHostsOrderDoesntMatter ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withHost ( " foo " ) . withHost ( " bar " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withHost ( " bar " ) . withHost ( " foo " ) . instance ( ) ; <nl> + ASSERT_EQUALS ( a , b ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareMultiplePassivesOrderDoesntMatter ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withPassive ( " foo " ) . withPassive ( " bar " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withPassive ( " bar " ) . withPassive ( " foo " ) . instance ( ) ; <nl> + ASSERT_EQUALS ( a , b ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareMultipleArbitersOrderDoesntMatter ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withArbiter ( " foo " ) . withArbiter ( " bar " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withArbiter ( " bar " ) . withArbiter ( " foo " ) . instance ( ) ; <nl> + ASSERT_EQUALS ( a , b ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareTags ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withTag ( " foo " , " bar " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withTag ( " baz " , " buz " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareSetName ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withSetName ( " foo " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withSetName ( " bar " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareSetVersion ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withSetVersion ( 1 ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withSetVersion ( 2 ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareElectionId ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withElectionId ( OID : : max ( ) ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withElectionId ( OID ( " 000000000000000000000000 " ) ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldComparePrimary ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withPrimary ( " foo : 1234 " ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withPrimary ( " bar : 1234 " ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + TEST ( ServerDescriptionEqualityTest , ShouldCompareLogicalSessionTimeout ) { <nl> + auto a = * ServerDescriptionBuilder ( ) . withLogicalSessionTimeoutMinutes ( 1 ) . instance ( ) ; <nl> + auto b = * ServerDescriptionBuilder ( ) . withLogicalSessionTimeoutMinutes ( 2 ) . instance ( ) ; <nl> + ASSERT_NOT_EQUALS ( a , b ) ; <nl> + ASSERT_EQUALS ( a , a ) ; <nl> + } <nl> + <nl> + <nl> + class ServerDescriptionTestFixture : public SdamTestFixture { <nl> + protected : <nl> + / / returns a set containing the elements in the given bson array with lowercase values . <nl> + std : : set < std : : string > toHostSet ( std : : vector < BSONElement > bsonArray ) { <nl> + return mapSet < BSONElement , std : : string > ( <nl> + bsonArray , [ ] ( const BSONElement & e ) { return boost : : to_lower_copy ( e . String ( ) ) ; } ) ; <nl> + } <nl> + <nl> + std : : map < std : : string , std : : string > toStringMap ( BSONObj bsonObj ) { <nl> + std : : map < std : : string , std : : string > result ; <nl> + const auto keys = bsonObj . getFieldNames < std : : set < std : : string > > ( ) ; <nl> + std : : transform ( keys . begin ( ) , <nl> + keys . end ( ) , <nl> + std : : inserter ( result , result . begin ( ) ) , <nl> + [ bsonObj ] ( const std : : string & key ) { <nl> + return std : : pair < const std : : string , std : : string > ( <nl> + key , bsonObj . getStringField ( key ) ) ; <nl> + } ) ; <nl> + return result ; <nl> + } <nl> + <nl> + static BSONObjBuilder okBuilder ( ) { <nl> + return std : : move ( BSONObjBuilder ( ) . append ( " ok " , 1 ) ) ; <nl> + } <nl> + <nl> + static inline const auto clockSource = SystemClockSource : : get ( ) ; <nl> + <nl> + static inline const auto kBsonOk = okBuilder ( ) . obj ( ) ; <nl> + static inline const auto kBsonMissingOk = BSONObjBuilder ( ) . obj ( ) ; <nl> + static inline const auto kBsonMongos = okBuilder ( ) . append ( " msg " , " isdbgrid " ) . obj ( ) ; <nl> + static inline const auto kBsonRsPrimary = <nl> + okBuilder ( ) . append ( " ismaster " , true ) . append ( " setName " , " foo " ) . obj ( ) ; <nl> + static inline const auto kBsonRsSecondary = <nl> + okBuilder ( ) . append ( " secondary " , true ) . append ( " setName " , " foo " ) . obj ( ) ; <nl> + static inline const auto kBsonRsArbiter = <nl> + okBuilder ( ) . append ( " arbiterOnly " , true ) . append ( " setName " , " foo " ) . obj ( ) ; <nl> + static inline const auto kBsonRsOther = <nl> + okBuilder ( ) . append ( " hidden " , true ) . append ( " setName " , " foo " ) . obj ( ) ; <nl> + static inline const auto kBsonRsGhost = okBuilder ( ) . append ( " isreplicaset " , true ) . obj ( ) ; <nl> + static inline const auto kBsonWireVersion = <nl> + okBuilder ( ) . append ( " minWireVersion " , 1 ) . append ( " maxWireVersion " , 2 ) . obj ( ) ; <nl> + static inline const auto kBsonTags = <nl> + okBuilder ( ) <nl> + . append ( " tags " , BSONObjBuilder ( ) . append ( " foo " , " bar " ) . append ( " baz " , " buz " ) . obj ( ) ) <nl> + . obj ( ) ; <nl> + static inline const mongo : : repl : : OpTime kOpTime = <nl> + mongo : : repl : : OpTime ( Timestamp ( 1568848910 ) , 24 ) ; <nl> + static inline const Date_t kLastWriteDate = <nl> + dateFromISOString ( " 2019 - 09 - 18T23 : 21 : 50Z " ) . getValue ( ) ; <nl> + static inline const auto kBsonLastWrite = <nl> + okBuilder ( ) <nl> + . append ( " lastWrite " , <nl> + BSONObjBuilder ( ) <nl> + . appendTimeT ( " lastWriteDate " , kLastWriteDate . toTimeT ( ) ) <nl> + . append ( " opTime " , kOpTime . toBSON ( ) ) <nl> + . obj ( ) ) <nl> + . obj ( ) ; <nl> + static inline const auto kBsonHostNames = okBuilder ( ) <nl> + . append ( " me " , " Me : 1234 " ) <nl> + . appendArray ( " hosts " , <nl> + BSON_ARRAY ( " Foo : 1234 " <nl> + < < " Bar : 1234 " ) ) <nl> + . appendArray ( " arbiters " , <nl> + BSON_ARRAY ( " Baz : 1234 " <nl> + < < " Buz : 1234 " ) ) <nl> + . appendArray ( " passives " , <nl> + BSON_ARRAY ( " Biz : 1234 " <nl> + < < " Boz : 1234 " ) ) <nl> + . obj ( ) ; <nl> + static inline const auto kBsonSetVersionName = <nl> + okBuilder ( ) . append ( " setVersion " , 1 ) . append ( " setName " , " bar " ) . obj ( ) ; <nl> + static inline const auto kBsonElectionId = okBuilder ( ) . append ( " electionId " , OID : : max ( ) ) . obj ( ) ; <nl> + static inline const auto kBsonPrimary = okBuilder ( ) . append ( " primary " , " foo : 1234 " ) . obj ( ) ; <nl> + static inline const auto kBsonLogicalSessionTimeout = <nl> + okBuilder ( ) . append ( " logicalSessionTimeoutMinutes " , 1 ) . obj ( ) ; <nl> + } ; <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsUnknownForIsMasterError ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , " an error occurred " ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kUnknown , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsUnknownIfOkMissing ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonMissingOk , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kUnknown , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsStandalone ) { <nl> + / / No " msg : isdbgrid " , no setName , and no " isreplicaset : true " . <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonOk , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kStandalone , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsMongos ) { <nl> + / / contains " msg : isdbgrid " <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonMongos , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kMongos , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsRSPrimary ) { <nl> + / / " ismaster : true " , " setName " in response <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsPrimary , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kRSPrimary , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsRSSecondary ) { <nl> + / / " secondary : true " , " setName " in response <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsSecondary , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kRSSecondary , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsArbiter ) { <nl> + / / " arbiterOnly : true " , " setName " in response . <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsArbiter , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kRSArbiter , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsOther ) { <nl> + / / " hidden : true " , " setName " in response , or not primary , secondary , nor arbiter <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsOther , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kRSOther , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldParseTypeAsGhost ) { <nl> + / / " isreplicaset : true " in response . <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsGhost , IsMasterRTT : : min ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( ServerType : : kRSGhost , description . getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreErrorDescription ) { <nl> + auto errorMsg = " an error occurred " ; <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , errorMsg ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( errorMsg , * description . getError ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreRTTWithNoPreviousLatency ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsPrimary , IsMasterRTT : : max ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( IsMasterRTT : : max ( ) , * description . getRtt ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreRTTNullWhenServerTypeIsUnknown ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonMissingOk , IsMasterRTT : : max ( ) ) ; <nl> + auto description = ServerDescription ( clockSource , response , boost : : none ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getRtt ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , <nl> + ShouldStoreMovingAverageRTTWhenChangingFromOneKnownServerTypeToAnother ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsPrimary , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto lastServerDescription = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSSecondary ) <nl> + . withRtt ( mongo : : Milliseconds ( 20 ) ) <nl> + . instance ( ) ; <nl> + auto description = ServerDescription ( clockSource , response , lastServerDescription - > getRtt ( ) ) ; <nl> + ASSERT_EQUALS ( 24 , durationCount < mongo : : Milliseconds > ( * description . getRtt ( ) ) ) ; <nl> + <nl> + auto response2 = IsMasterOutcome ( " foo : 1234 " , kBsonRsPrimary , mongo : : Milliseconds ( 30 ) ) ; <nl> + auto description2 = ServerDescription ( clockSource , response2 , description . getRtt ( ) ) ; <nl> + ASSERT_EQUALS ( 25 , durationCount < mongo : : Milliseconds > ( * description2 . getRtt ( ) ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreLastWriteDate ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonLastWrite , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( kLastWriteDate , description . getLastWriteDate ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreOpTime ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonLastWrite , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( kOpTime , description . getOpTime ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreLastUpdateTime ) { <nl> + auto testStart = clockSource - > now ( ) ; <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonRsPrimary , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_GREATER_THAN_OR_EQUALS ( description . getLastUpdateTime ( ) , testStart ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreHostNamesAsLowercase ) { <nl> + auto response = IsMasterOutcome ( " FOO : 1234 " , kBsonHostNames , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + <nl> + ASSERT_EQUALS ( " foo : 1234 " , description . getAddress ( ) ) ; <nl> + <nl> + ASSERT_EQUALS ( boost : : to_lower_copy ( std : : string ( kBsonHostNames . getStringField ( " me " ) ) ) , <nl> + * description . getMe ( ) ) ; <nl> + <nl> + auto expectedHosts = toHostSet ( kBsonHostNames . getField ( " hosts " ) . Array ( ) ) ; <nl> + ASSERT_EQUALS ( expectedHosts , description . getHosts ( ) ) ; <nl> + <nl> + auto expectedPassives = toHostSet ( kBsonHostNames . getField ( " passives " ) . Array ( ) ) ; <nl> + ASSERT_EQUALS ( expectedPassives , description . getPassives ( ) ) ; <nl> + <nl> + auto expectedArbiters = toHostSet ( kBsonHostNames . getField ( " arbiters " ) . Array ( ) ) ; <nl> + ASSERT_EQUALS ( expectedArbiters , description . getArbiters ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreMinMaxWireVersion ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonWireVersion , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( kBsonWireVersion [ " minWireVersion " ] . Int ( ) , description . getMinWireVersion ( ) ) ; <nl> + ASSERT_EQUALS ( kBsonWireVersion [ " maxWireVersion " ] . Int ( ) , description . getMaxWireVersion ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreTags ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonTags , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( toStringMap ( kBsonTags [ " tags " ] . Obj ( ) ) , description . getTags ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreSetVersionAndName ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonSetVersionName , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( kBsonSetVersionName . getIntField ( " setVersion " ) , description . getSetVersion ( ) ) ; <nl> + ASSERT_EQUALS ( std : : string ( kBsonSetVersionName . getStringField ( " setName " ) ) , <nl> + description . getSetName ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreElectionId ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonElectionId , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( kBsonElectionId . getField ( " electionId " ) . OID ( ) , description . getElectionId ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStorePrimary ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonPrimary , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( std : : string ( kBsonPrimary . getStringField ( " primary " ) ) , description . getPrimary ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreLogicalSessionTimeout ) { <nl> + auto response = <nl> + IsMasterOutcome ( " foo : 1234 " , kBsonLogicalSessionTimeout , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( kBsonLogicalSessionTimeout . getIntField ( " logicalSessionTimeoutMinutes " ) , <nl> + description . getLogicalSessionTimeoutMinutes ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreServerAddressOnError ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , " an error occurred " ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( std : : string ( " foo : 1234 " ) , description . getAddress ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreCorrectDefaultValuesOnSuccess ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , kBsonOk , mongo : : Milliseconds ( 40 ) ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getError ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getLastWriteDate ( ) ) ; <nl> + ASSERT_EQUALS ( 0 , description . getMinWireVersion ( ) ) ; <nl> + ASSERT_EQUALS ( 0 , description . getMaxWireVersion ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getMe ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 0 ) , description . getHosts ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 0 ) , description . getPassives ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 0 ) , description . getTags ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getSetName ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getSetVersion ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getElectionId ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getPrimary ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getLogicalSessionTimeoutMinutes ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST_F ( ServerDescriptionTestFixture , ShouldStoreCorrectDefaultValuesOnFailure ) { <nl> + auto response = IsMasterOutcome ( " foo : 1234 " , " an error occurred " ) ; <nl> + auto description = ServerDescription ( clockSource , response ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getLastWriteDate ( ) ) ; <nl> + ASSERT_EQUALS ( ServerType : : kUnknown , description . getType ( ) ) ; <nl> + ASSERT_EQUALS ( 0 , description . getMinWireVersion ( ) ) ; <nl> + ASSERT_EQUALS ( 0 , description . getMaxWireVersion ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getMe ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 0 ) , description . getHosts ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 0 ) , description . getPassives ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 0 ) , description . getTags ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getSetName ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getSetVersion ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getElectionId ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getPrimary ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , description . getLogicalSessionTimeoutMinutes ( ) ) ; <nl> + } <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 45f6491c4666 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_description . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # include " mongo / client / sdam / topology_description . h " <nl> + <nl> + # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kNetwork <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # include " mongo / db / wire_version . h " <nl> + # include " mongo / util / log . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / TopologyDescription <nl> + / / / / / / / / / / / / / / / / / / / / / / / / <nl> + TopologyDescription : : TopologyDescription ( SdamConfiguration config ) <nl> + : _type ( config . getInitialType ( ) ) , _setName ( config . getSetName ( ) ) { <nl> + if ( auto seeds = config . getSeedList ( ) ) { <nl> + _servers . clear ( ) ; <nl> + for ( auto address : * seeds ) { <nl> + _servers . push_back ( std : : make_shared < ServerDescription > ( address ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + const UUID & TopologyDescription : : getId ( ) const { <nl> + return _id ; <nl> + } <nl> + <nl> + TopologyType TopologyDescription : : getType ( ) const { <nl> + return _type ; <nl> + } <nl> + <nl> + const boost : : optional < std : : string > & TopologyDescription : : getSetName ( ) const { <nl> + return _setName ; <nl> + } <nl> + <nl> + const boost : : optional < int > & TopologyDescription : : getMaxSetVersion ( ) const { <nl> + return _maxSetVersion ; <nl> + } <nl> + <nl> + const boost : : optional < OID > & TopologyDescription : : getMaxElectionId ( ) const { <nl> + return _maxElectionId ; <nl> + } <nl> + <nl> + const std : : vector < ServerDescriptionPtr > & TopologyDescription : : getServers ( ) const { <nl> + return _servers ; <nl> + } <nl> + <nl> + bool TopologyDescription : : isWireVersionCompatible ( ) const { <nl> + return _compatible ; <nl> + } <nl> + <nl> + const boost : : optional < std : : string > & TopologyDescription : : getWireVersionCompatibleError ( ) const { <nl> + return _compatibleError ; <nl> + } <nl> + <nl> + const boost : : optional < int > & TopologyDescription : : getLogicalSessionTimeoutMinutes ( ) const { <nl> + return _logicalSessionTimeoutMinutes ; <nl> + } <nl> + <nl> + void TopologyDescription : : setType ( TopologyType type ) { <nl> + _type = type ; <nl> + } <nl> + <nl> + bool TopologyDescription : : containsServerAddress ( const ServerAddress & address ) const { <nl> + return findServerByAddress ( address ) ! = boost : : none ; <nl> + } <nl> + <nl> + std : : vector < ServerDescriptionPtr > TopologyDescription : : findServers ( <nl> + std : : function < bool ( const ServerDescriptionPtr & ) > predicate ) const { <nl> + std : : vector < ServerDescriptionPtr > result ; <nl> + std : : copy_if ( _servers . begin ( ) , _servers . end ( ) , std : : back_inserter ( result ) , predicate ) ; <nl> + return result ; <nl> + } <nl> + <nl> + const boost : : optional < ServerDescriptionPtr > TopologyDescription : : findServerByAddress ( <nl> + ServerAddress address ) const { <nl> + auto results = findServers ( [ address ] ( const ServerDescriptionPtr & serverDescription ) { <nl> + return serverDescription - > getAddress ( ) = = address ; <nl> + } ) ; <nl> + return ( results . size ( ) > 0 ) ? boost : : make_optional ( results . front ( ) ) : boost : : none ; <nl> + } <nl> + <nl> + boost : : optional < ServerDescriptionPtr > TopologyDescription : : installServerDescription ( <nl> + const ServerDescriptionPtr & newServerDescription ) { <nl> + boost : : optional < ServerDescriptionPtr > previousDescription ; <nl> + if ( getType ( ) = = TopologyType : : kSingle ) { <nl> + / / For Single , there is always one ServerDescription in TopologyDescription . servers ; <nl> + / / the ServerDescription in TopologyDescription . servers MUST be replaced with the new <nl> + / / ServerDescription . <nl> + invariant ( _servers . size ( ) = = 1 ) ; <nl> + previousDescription = _servers [ 0 ] ; <nl> + _servers [ 0 ] = std : : shared_ptr < ServerDescription > ( newServerDescription ) ; <nl> + } else { <nl> + for ( auto it = _servers . begin ( ) ; it ! = _servers . end ( ) ; + + it ) { <nl> + const auto & currentDescription = * it ; <nl> + if ( currentDescription - > getAddress ( ) = = newServerDescription - > getAddress ( ) ) { <nl> + previousDescription = * it ; <nl> + * it = std : : shared_ptr < ServerDescription > ( newServerDescription ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + if ( ! previousDescription ) { <nl> + _servers . push_back ( std : : shared_ptr < ServerDescription > ( newServerDescription ) ) ; <nl> + } <nl> + } <nl> + <nl> + checkWireCompatibilityVersions ( ) ; <nl> + calculateLogicalSessionTimeout ( ) ; <nl> + return previousDescription ; <nl> + } <nl> + <nl> + void TopologyDescription : : removeServerDescription ( const ServerAddress & serverAddress ) { <nl> + auto it = std : : find_if ( <nl> + _servers . begin ( ) , _servers . end ( ) , [ serverAddress ] ( const ServerDescriptionPtr & description ) { <nl> + return description - > getAddress ( ) = = serverAddress ; <nl> + } ) ; <nl> + if ( it ! = _servers . end ( ) ) { <nl> + _servers . erase ( it ) ; <nl> + } <nl> + } <nl> + <nl> + void TopologyDescription : : checkWireCompatibilityVersions ( ) { <nl> + const WireVersionInfo supportedWireVersion = WireSpec : : instance ( ) . outgoing ; <nl> + std : : ostringstream errorOss ; <nl> + <nl> + _compatible = true ; <nl> + for ( const auto & serverDescription : _servers ) { <nl> + if ( serverDescription - > getType ( ) = = ServerType : : kUnknown ) { <nl> + continue ; <nl> + } <nl> + <nl> + if ( serverDescription - > getMinWireVersion ( ) > supportedWireVersion . maxWireVersion ) { <nl> + _compatible = false ; <nl> + errorOss < < " Server at " < < serverDescription - > getAddress ( ) < < " requires wire version " <nl> + < < serverDescription - > getMinWireVersion ( ) <nl> + < < " but this version of mongo only supports up to " <nl> + < < supportedWireVersion . maxWireVersion < < " . " ; <nl> + break ; <nl> + } else if ( serverDescription - > getMaxWireVersion ( ) < supportedWireVersion . minWireVersion ) { <nl> + _compatible = false ; <nl> + const auto & mongoVersion = <nl> + minimumRequiredMongoVersionString ( supportedWireVersion . minWireVersion ) ; <nl> + errorOss < < " Server at " < < serverDescription - > getAddress ( ) < < " requires wire version " <nl> + < < serverDescription - > getMaxWireVersion ( ) <nl> + < < " but this version of mongo requires at least " <nl> + < < supportedWireVersion . minWireVersion < < " ( MongoDB " < < mongoVersion < < " ) . " ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + _compatibleError = ( _compatible ) ? boost : : none : boost : : make_optional ( errorOss . str ( ) ) ; <nl> + } <nl> + <nl> + const std : : string TopologyDescription : : minimumRequiredMongoVersionString ( int version ) { <nl> + switch ( version ) { <nl> + case PLACEHOLDER_FOR_44 : <nl> + return " 4 . 4 " ; <nl> + case SHARDED_TRANSACTIONS : <nl> + return " 4 . 2 " ; <nl> + case REPLICA_SET_TRANSACTIONS : <nl> + return " 4 . 0 " ; <nl> + case SUPPORTS_OP_MSG : <nl> + return " 3 . 6 " ; <nl> + case COMMANDS_ACCEPT_WRITE_CONCERN : <nl> + return " 3 . 4 " ; <nl> + case BATCH_COMMANDS : <nl> + return " 3 . 2 " ; <nl> + case FIND_COMMAND : <nl> + return " 3 . 2 " ; <nl> + case RELEASE_2_7_7 : <nl> + return " 3 . 0 " ; <nl> + case AGG_RETURNS_CURSORS : <nl> + return " 2 . 6 " ; <nl> + case RELEASE_2_4_AND_BEFORE : <nl> + return " 2 . 4 " ; <nl> + default : <nl> + MONGO_UNREACHABLE ; <nl> + } <nl> + } <nl> + <nl> + void TopologyDescription : : calculateLogicalSessionTimeout ( ) { <nl> + int min = INT_MAX ; <nl> + bool foundNone = false ; <nl> + bool hasDataBearingServer = false ; <nl> + <nl> + invariant ( _servers . size ( ) > 0 ) ; <nl> + for ( auto description : _servers ) { <nl> + if ( ! description - > isDataBearingServer ( ) ) { <nl> + continue ; <nl> + } <nl> + hasDataBearingServer = true ; <nl> + <nl> + auto logicalSessionTimeout = description - > getLogicalSessionTimeoutMinutes ( ) ; <nl> + if ( ! logicalSessionTimeout ) { <nl> + foundNone = true ; <nl> + break ; <nl> + } <nl> + min = std : : min ( * logicalSessionTimeout , min ) ; <nl> + } <nl> + _logicalSessionTimeoutMinutes = <nl> + ( foundNone | | ! hasDataBearingServer ) ? boost : : none : boost : : make_optional ( min ) ; <nl> + } <nl> + <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / SdamConfiguration <nl> + / / / / / / / / / / / / / / / / / / / / / / / / <nl> + SdamConfiguration : : SdamConfiguration ( boost : : optional < std : : vector < ServerAddress > > seedList , <nl> + TopologyType initialType , <nl> + mongo : : Milliseconds heartBeatFrequencyMs , <nl> + boost : : optional < std : : string > setName ) <nl> + : _seedList ( seedList ) , <nl> + _initialType ( initialType ) , <nl> + _heartBeatFrequencyMs ( heartBeatFrequencyMs ) , <nl> + _setName ( setName ) { <nl> + uassert ( ErrorCodes : : InvalidSeedList , <nl> + " seed list size must be > = 1 " , <nl> + ! seedList | | ( * seedList ) . size ( ) > = 1 ) ; <nl> + <nl> + uassert ( ErrorCodes : : InvalidSeedList , <nl> + " TopologyType Single must have exactly one entry in the seed list . " , <nl> + _initialType ! = TopologyType : : kSingle | | ( * seedList ) . size ( ) = = 1 ) ; <nl> + <nl> + uassert ( <nl> + ErrorCodes : : InvalidTopologyType , <nl> + " Only ToplogyTypes ReplicaSetNoPrimary and Single are allowed when a setName is provided . " , <nl> + ! _setName | | <nl> + ( _initialType = = TopologyType : : kReplicaSetNoPrimary | | <nl> + _initialType = = TopologyType : : kSingle ) ) ; <nl> + <nl> + uassert ( ErrorCodes : : TopologySetNameRequired , <nl> + " setName is required for ReplicaSetNoPrimary " , <nl> + _initialType ! = TopologyType : : kReplicaSetNoPrimary | | _setName ) ; <nl> + <nl> + uassert ( ErrorCodes : : InvalidHeartBeatFrequency , <nl> + " topology heartbeat must be > = 500ms " , <nl> + _heartBeatFrequencyMs > = kMinHeartbeatFrequencyMS ) ; <nl> + } <nl> + <nl> + const boost : : optional < std : : vector < ServerAddress > > & SdamConfiguration : : getSeedList ( ) const { <nl> + return _seedList ; <nl> + } <nl> + <nl> + TopologyType SdamConfiguration : : getInitialType ( ) const { <nl> + return _initialType ; <nl> + } <nl> + <nl> + Milliseconds SdamConfiguration : : getHeartBeatFrequency ( ) const { <nl> + return _heartBeatFrequencyMs ; <nl> + } <nl> + <nl> + const boost : : optional < std : : string > & SdamConfiguration : : getSetName ( ) const { <nl> + return _setName ; <nl> + } <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . b39bd60663b7 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_description . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + <nl> + # pragma once <nl> + # include < memory > <nl> + # include < string > <nl> + # include < unordered_set > <nl> + <nl> + # include " boost / optional / optional . hpp " <nl> + <nl> + # include " mongo / bson / oid . h " <nl> + # include " mongo / client / read_preference . h " <nl> + # include " mongo / client / sdam / sdam_datatypes . h " <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # include " mongo / platform / basic . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + class SdamConfiguration { <nl> + public : <nl> + SdamConfiguration ( ) : SdamConfiguration ( boost : : none ) { } ; <nl> + <nl> + / * * <nl> + * Initialize the TopologyDescription . This constructor may uassert if the provided <nl> + * configuration options are not valid according to the Server Discovery & Monitoring Spec . <nl> + * <nl> + * Initial Servers <nl> + * initial servers may be set to a seed list of one or more server addresses . <nl> + * <nl> + * Initial TopologyType <nl> + * The initial TopologyType may be set to Single , Unknown , or ReplicaSetNoPrimary . <nl> + * <nl> + * Initial setName <nl> + * The client ' s initial replica set name is required in order to initially configure the <nl> + * topology type as ReplicaSetNoPrimary . <nl> + * <nl> + * Allowed configuration combinations <nl> + * TopologyType Single cannot be used with multiple seeds . <nl> + * If setName is not null , only TopologyType ReplicaSetNoPrimary and Single , are <nl> + * allowed . <nl> + * / <nl> + SdamConfiguration ( boost : : optional < std : : vector < ServerAddress > > seedList , <nl> + TopologyType initialType = TopologyType : : kUnknown , <nl> + mongo : : Milliseconds heartBeatFrequencyMs = kDefaultHeartbeatFrequencyMs , <nl> + boost : : optional < std : : string > setName = boost : : none ) ; <nl> + <nl> + const boost : : optional < std : : vector < ServerAddress > > & getSeedList ( ) const ; <nl> + TopologyType getInitialType ( ) const ; <nl> + Milliseconds getHeartBeatFrequency ( ) const ; <nl> + const boost : : optional < std : : string > & getSetName ( ) const ; <nl> + <nl> + static inline const mongo : : Milliseconds kDefaultHeartbeatFrequencyMs = mongo : : Seconds ( 10 ) ; <nl> + static inline const mongo : : Milliseconds kMinHeartbeatFrequencyMS = mongo : : Milliseconds ( 500 ) ; <nl> + <nl> + private : <nl> + boost : : optional < std : : vector < ServerAddress > > _seedList ; <nl> + TopologyType _initialType ; <nl> + mongo : : Milliseconds _heartBeatFrequencyMs ; <nl> + boost : : optional < std : : string > _setName ; <nl> + } ; <nl> + <nl> + class TopologyDescription { <nl> + public : <nl> + TopologyDescription ( ) : TopologyDescription ( SdamConfiguration ( ) ) { } <nl> + TopologyDescription ( const TopologyDescription & source ) = default ; <nl> + <nl> + / * * <nl> + * Initialize the TopologyDescription with the given configuration . <nl> + * / <nl> + TopologyDescription ( SdamConfiguration config ) ; <nl> + <nl> + const UUID & getId ( ) const ; <nl> + TopologyType getType ( ) const ; <nl> + const boost : : optional < std : : string > & getSetName ( ) const ; <nl> + <nl> + const boost : : optional < int > & getMaxSetVersion ( ) const ; <nl> + const boost : : optional < OID > & getMaxElectionId ( ) const ; <nl> + <nl> + const std : : vector < ServerDescriptionPtr > & getServers ( ) const ; <nl> + <nl> + bool isWireVersionCompatible ( ) const ; <nl> + const boost : : optional < std : : string > & getWireVersionCompatibleError ( ) const ; <nl> + <nl> + const boost : : optional < int > & getLogicalSessionTimeoutMinutes ( ) const ; <nl> + const Milliseconds & getHeartBeatFrequency ( ) const ; <nl> + <nl> + const boost : : optional < ServerDescriptionPtr > findServerByAddress ( ServerAddress address ) const ; <nl> + bool containsServerAddress ( const ServerAddress & address ) const ; <nl> + std : : vector < ServerDescriptionPtr > findServers ( <nl> + std : : function < bool ( const ServerDescriptionPtr & ) > predicate ) const ; <nl> + <nl> + / * * <nl> + * Adds the given ServerDescription or swaps it with an existing one <nl> + * using the description ' s ServerAddress as the lookup key . If present , the previous server <nl> + * description is returned . <nl> + * / <nl> + boost : : optional < ServerDescriptionPtr > installServerDescription ( <nl> + const ServerDescriptionPtr & newServerDescription ) ; <nl> + void removeServerDescription ( const ServerAddress & serverAddress ) ; <nl> + <nl> + void setType ( TopologyType type ) ; <nl> + <nl> + private : <nl> + / * * <nl> + * Checks if all server descriptions are compatible with this server ' s WireVersion . If an <nl> + * incompatible description is found , we set the topologyDescription ' s _compatible flag to false <nl> + * and store an error message in _compatibleError . A ServerDescription which is not Unknown is <nl> + * incompatible if : <nl> + * minWireVersion > serverMaxWireVersion , or maxWireVersion < serverMinWireVersion <nl> + * / <nl> + void checkWireCompatibilityVersions ( ) ; <nl> + <nl> + / * * <nl> + * Used in error string for wire compatibility check . <nl> + * <nl> + * Source : <nl> + * https : / / github . com / mongodb / specifications / blob / master / source / wireversion - featurelist . rst <nl> + * / <nl> + const std : : string minimumRequiredMongoVersionString ( int version ) ; <nl> + <nl> + / * * <nl> + * From Server Discovery and Monitoring : <nl> + * Updates the TopologyDescription . logicalSessionTimeoutMinutes to the smallest <nl> + * logicalSessionTimeoutMinutes value among ServerDescriptions of all data - bearing server types . <nl> + * If any have a null logicalSessionTimeoutMinutes , then <nl> + * TopologyDescription . logicalSessionTimeoutMinutes is set to null . <nl> + * <nl> + * https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst # logical - session - timeout <nl> + * / <nl> + void calculateLogicalSessionTimeout ( ) ; <nl> + <nl> + / / unique id for this topology <nl> + UUID _id = UUID : : gen ( ) ; <nl> + <nl> + / / a TopologyType enum value . <nl> + TopologyType _type = TopologyType : : kUnknown ; <nl> + <nl> + / / setName : the replica set name . Default null . <nl> + boost : : optional < std : : string > _setName ; <nl> + <nl> + / / maxSetVersion : an integer or null . The largest setVersion ever reported by a primary . <nl> + / / Default null . <nl> + boost : : optional < int > _maxSetVersion ; <nl> + <nl> + / / maxElectionId : an ObjectId or null . The largest electionId ever reported by a primary . <nl> + / / Default null . <nl> + boost : : optional < OID > _maxElectionId ; <nl> + <nl> + / / servers : a set of ServerDescription instances . Default contains one server : <nl> + / / " localhost : 27017 " , ServerType Unknown . <nl> + std : : vector < ServerDescriptionPtr > _servers { <nl> + std : : make_shared < ServerDescription > ( " localhost : 27017 " ) } ; <nl> + <nl> + / / compatible : a boolean . False if any server ' s wire protocol version range is incompatible with <nl> + / / the client ' s . Default true . <nl> + bool _compatible = true ; <nl> + <nl> + / / compatibilityError : a string . The error message if " compatible " is false , otherwise null . <nl> + boost : : optional < std : : string > _compatibleError ; <nl> + <nl> + / / logicalSessionTimeoutMinutes : integer or null . Default null . <nl> + boost : : optional < int > _logicalSessionTimeoutMinutes ; <nl> + <nl> + friend class TopologyStateMachine ; <nl> + } ; <nl> + } / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 68c18e0f5608 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_description_test . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # include " mongo / client / sdam / sdam_test_base . h " <nl> + # include " mongo / client / sdam / topology_description . h " <nl> + <nl> + # include < boost / optional / optional_io . hpp > <nl> + <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # include " mongo / client / sdam / server_description_builder . h " <nl> + # include " mongo / db / wire_version . h " <nl> + # include " mongo / unittest / death_test . h " <nl> + <nl> + namespace mongo { <nl> + template std : : ostream & operator < < ( std : : ostream & os , <nl> + const std : : vector < mongo : : sdam : : ServerAddress > & s ) ; <nl> + <nl> + namespace sdam { <nl> + using mongo : : operator < < ; <nl> + <nl> + class TopologyDescriptionTestFixture : public SdamTestFixture { <nl> + protected : <nl> + void assertDefaultConfig ( const TopologyDescription & topologyDescription ) ; <nl> + <nl> + static inline const auto kSetName = std : : string ( " mySetName " ) ; <nl> + <nl> + static inline const std : : vector < ServerAddress > kOneServer { " foo : 1234 " } ; <nl> + static inline const std : : vector < ServerAddress > kTwoServersVaryCase { " FoO : 1234 " , " BaR : 1234 " } ; <nl> + static inline const std : : vector < ServerAddress > kTwoServersNormalCase { " foo : 1234 " , " bar : 1234 " } ; <nl> + static inline const std : : vector < ServerAddress > kThreeServers { <nl> + " foo : 1234 " , " bar : 1234 " , " baz : 1234 " } ; <nl> + <nl> + static inline const auto kDefaultConfig = SdamConfiguration ( ) ; <nl> + static inline const auto kSingleSeedConfig = <nl> + SdamConfiguration ( kOneServer , TopologyType : : kSingle ) ; <nl> + } ; <nl> + <nl> + void TopologyDescriptionTestFixture : : assertDefaultConfig ( <nl> + const TopologyDescription & topologyDescription ) { <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getSetName ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getMaxElectionId ( ) ) ; <nl> + <nl> + auto expectedDefaultServer = ServerDescription ( " localhost : 27017 " ) ; <nl> + ASSERT_EQUALS ( expectedDefaultServer , * topologyDescription . getServers ( ) . front ( ) ) ; <nl> + ASSERT_EQUALS ( static_cast < std : : size_t > ( 1 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + <nl> + ASSERT_EQUALS ( true , topologyDescription . isWireVersionCompatible ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getLogicalSessionTimeoutMinutes ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldHaveCorrectDefaultValues ) { <nl> + assertDefaultConfig ( TopologyDescription ( kDefaultConfig ) ) ; <nl> + assertDefaultConfig ( TopologyDescription ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldNormalizeInitialSeedList ) { <nl> + auto config = SdamConfiguration ( kTwoServersVaryCase ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + <nl> + auto expectedAddresses = kTwoServersNormalCase ; <nl> + <nl> + auto serverAddresses = map < ServerDescriptionPtr , ServerAddress > ( <nl> + topologyDescription . getServers ( ) , <nl> + [ ] ( const ServerDescriptionPtr & description ) { return description - > getAddress ( ) ; } ) ; <nl> + <nl> + ASSERT_EQUALS ( expectedAddresses , serverAddresses ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldAllowTypeSingleWithASingleSeed ) { <nl> + TopologyDescription topologyDescription ( kSingleSeedConfig ) ; <nl> + <nl> + ASSERT ( TopologyType : : kSingle = = topologyDescription . getType ( ) ) ; <nl> + <nl> + auto servers = map < ServerDescriptionPtr , ServerAddress > ( <nl> + topologyDescription . getServers ( ) , <nl> + [ ] ( const ServerDescriptionPtr & desc ) { return desc - > getAddress ( ) ; } ) ; <nl> + ASSERT_EQUALS ( kOneServer , servers ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , DoesNotAllowMultipleSeedsWithSingle ) { <nl> + ASSERT_THROWS_CODE ( <nl> + { <nl> + auto config = SdamConfiguration ( kTwoServersNormalCase , TopologyType : : kSingle ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + } , <nl> + DBException , <nl> + ErrorCodes : : InvalidSeedList ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldSetTheReplicaSetName ) { <nl> + auto expectedSetName = kSetName ; <nl> + auto config = SdamConfiguration ( <nl> + kOneServer , TopologyType : : kReplicaSetNoPrimary , mongo : : Seconds ( 10 ) , expectedSetName ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + ASSERT_EQUALS ( expectedSetName , * topologyDescription . getSetName ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldNotAllowSettingTheReplicaSetNameWithWrongType ) { <nl> + ASSERT_THROWS_CODE ( <nl> + { <nl> + auto config = <nl> + SdamConfiguration ( kOneServer , TopologyType : : kUnknown , mongo : : Seconds ( 10 ) , kSetName ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + } , <nl> + DBException , <nl> + ErrorCodes : : InvalidTopologyType ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldNotAllowTopologyTypeRSNoPrimaryWithoutSetName ) { <nl> + ASSERT_THROWS_CODE ( <nl> + { <nl> + SdamConfiguration ( <nl> + kOneServer , TopologyType : : kReplicaSetNoPrimary , mongo : : Seconds ( 10 ) , boost : : none ) ; <nl> + } , <nl> + DBException , <nl> + ErrorCodes : : TopologySetNameRequired ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldOnlyAllowSingleAndRsNoPrimaryWithSetName ) { <nl> + auto topologyTypes = allTopologyTypes ( ) ; <nl> + topologyTypes . erase ( std : : remove_if ( topologyTypes . begin ( ) , <nl> + topologyTypes . end ( ) , <nl> + [ ] ( const TopologyType & topologyType ) { <nl> + return topologyType = = TopologyType : : kSingle | | <nl> + topologyType = = TopologyType : : kReplicaSetNoPrimary ; <nl> + } ) , <nl> + topologyTypes . end ( ) ) ; <nl> + <nl> + for ( const auto topologyType : topologyTypes ) { <nl> + ASSERT_THROWS_CODE ( <nl> + { <nl> + std : : cout < < " Check TopologyType " < < toString ( topologyType ) <nl> + < < " with setName value . " < < std : : endl ; <nl> + auto config = <nl> + SdamConfiguration ( kOneServer , topologyType , mongo : : Seconds ( 10 ) , kSetName ) ; <nl> + / / This is here to ensure that the compiler actually generates code for the above <nl> + / / statement . <nl> + std : : cout < < " Test failed for topologyType " < < config . getInitialType ( ) <nl> + < < std : : endl ; <nl> + MONGO_UNREACHABLE ; <nl> + } , <nl> + DBException , <nl> + ErrorCodes : : InvalidTopologyType ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldDefaultHeartbeatToTenSecs ) { <nl> + SdamConfiguration config ; <nl> + ASSERT_EQUALS ( SdamConfiguration : : kDefaultHeartbeatFrequencyMs , config . getHeartBeatFrequency ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldAllowSettingTheHeartbeatFrequency ) { <nl> + const auto expectedHeartbeatFrequency = mongo : : Milliseconds ( 20000 ) ; <nl> + SdamConfiguration config ( boost : : none , TopologyType : : kUnknown , expectedHeartbeatFrequency ) ; <nl> + ASSERT_EQUALS ( expectedHeartbeatFrequency , config . getHeartBeatFrequency ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldNotAllowChangingTheHeartbeatFrequencyBelow500Ms ) { <nl> + auto belowThresholdFrequency = <nl> + mongo : : Milliseconds ( SdamConfiguration : : kMinHeartbeatFrequencyMS . count ( ) - 1 ) ; <nl> + ASSERT_THROWS_CODE ( <nl> + { SdamConfiguration config ( boost : : none , TopologyType : : kUnknown , belowThresholdFrequency ) ; } , <nl> + DBException , <nl> + ErrorCodes : : InvalidHeartBeatFrequency ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , <nl> + ShouldSetWireCompatibilityErrorForMinWireVersionWhenMinWireVersionIsGreater ) { <nl> + const auto outgoingMaxWireVersion = WireSpec : : instance ( ) . outgoing . maxWireVersion ; <nl> + const auto config = SdamConfiguration ( kOneServer , TopologyType : : kUnknown , mongo : : Seconds ( 10 ) ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + const auto serverDescriptionMinVersion = ServerDescriptionBuilder ( ) <nl> + . withAddress ( kOneServer [ 0 ] ) <nl> + . withMe ( kOneServer [ 0 ] ) <nl> + . withType ( ServerType : : kRSSecondary ) <nl> + . withMinWireVersion ( outgoingMaxWireVersion + 1 ) <nl> + . instance ( ) ; <nl> + <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + topologyDescription . installServerDescription ( serverDescriptionMinVersion ) ; <nl> + ASSERT_NOT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , <nl> + ShouldSetWireCompatibilityErrorForMinWireVersionWhenMaxWireVersionIsLess ) { <nl> + const auto outgoingMinWireVersion = WireSpec : : instance ( ) . outgoing . minWireVersion ; <nl> + const auto config = SdamConfiguration ( kOneServer , TopologyType : : kUnknown , mongo : : Seconds ( 10 ) ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + const auto serverDescriptionMaxVersion = ServerDescriptionBuilder ( ) <nl> + . withAddress ( kOneServer [ 0 ] ) <nl> + . withMe ( kOneServer [ 0 ] ) <nl> + . withType ( ServerType : : kRSSecondary ) <nl> + . withMaxWireVersion ( outgoingMinWireVersion - 1 ) <nl> + . instance ( ) ; <nl> + <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + topologyDescription . installServerDescription ( serverDescriptionMaxVersion ) ; <nl> + ASSERT_NOT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldNotSetWireCompatibilityErrorWhenServerTypeIsUnknown ) { <nl> + const auto outgoingMinWireVersion = WireSpec : : instance ( ) . outgoing . minWireVersion ; <nl> + const auto config = SdamConfiguration ( kOneServer , TopologyType : : kUnknown , mongo : : Seconds ( 10 ) ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + const auto serverDescriptionMaxVersion = <nl> + ServerDescriptionBuilder ( ) . withMaxWireVersion ( outgoingMinWireVersion - 1 ) . instance ( ) ; <nl> + <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + topologyDescription . installServerDescription ( serverDescriptionMaxVersion ) ; <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getWireVersionCompatibleError ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , ShouldSetLogicalSessionTimeoutToMinOfAllServerDescriptions ) { <nl> + const auto config = SdamConfiguration ( kThreeServers ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + <nl> + const auto logicalSessionTimeouts = std : : vector { 300 , 100 , 200 } ; <nl> + auto timeoutIt = logicalSessionTimeouts . begin ( ) ; <nl> + const auto serverDescriptionsWithTimeouts = map < ServerDescriptionPtr , ServerDescriptionPtr > ( <nl> + topologyDescription . getServers ( ) , [ & timeoutIt ] ( const ServerDescriptionPtr & description ) { <nl> + auto newInstanceBuilder = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSSecondary ) <nl> + . withAddress ( description - > getAddress ( ) ) <nl> + . withMe ( description - > getAddress ( ) ) <nl> + . withLogicalSessionTimeoutMinutes ( * timeoutIt ) ; <nl> + timeoutIt + + ; <nl> + return newInstanceBuilder . instance ( ) ; <nl> + } ) ; <nl> + <nl> + for ( auto description : serverDescriptionsWithTimeouts ) { <nl> + topologyDescription . installServerDescription ( description ) ; <nl> + } <nl> + <nl> + int expectedLogicalSessionTimeout = <nl> + * std : : min_element ( logicalSessionTimeouts . begin ( ) , logicalSessionTimeouts . end ( ) ) ; <nl> + ASSERT_EQUALS ( expectedLogicalSessionTimeout , <nl> + topologyDescription . getLogicalSessionTimeoutMinutes ( ) ) ; <nl> + } <nl> + <nl> + <nl> + TEST_F ( TopologyDescriptionTestFixture , <nl> + ShouldSetLogicalSessionTimeoutToNoneIfAnyServerDescriptionHasNone ) { <nl> + const auto config = SdamConfiguration ( kThreeServers ) ; <nl> + TopologyDescription topologyDescription ( config ) ; <nl> + <nl> + const auto logicalSessionTimeouts = std : : vector { 300 , 100 , 200 } ; <nl> + auto timeoutIt = logicalSessionTimeouts . begin ( ) ; <nl> + <nl> + const auto serverDescriptionsWithTimeouts = map < ServerDescriptionPtr , ServerDescriptionPtr > ( <nl> + topologyDescription . getServers ( ) , [ & ] ( const ServerDescriptionPtr & description ) { <nl> + auto timeoutValue = ( timeoutIt = = logicalSessionTimeouts . begin ( ) ) <nl> + ? boost : : none <nl> + : boost : : make_optional ( * timeoutIt ) ; <nl> + <nl> + auto newInstance = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSSecondary ) <nl> + . withAddress ( description - > getAddress ( ) ) <nl> + . withMe ( description - > getAddress ( ) ) <nl> + . withLogicalSessionTimeoutMinutes ( timeoutValue ) <nl> + . instance ( ) ; <nl> + + + timeoutIt ; <nl> + return newInstance ; <nl> + } ) ; <nl> + <nl> + for ( auto description : serverDescriptionsWithTimeouts ) { <nl> + topologyDescription . installServerDescription ( description ) ; <nl> + } <nl> + <nl> + ASSERT_EQUALS ( boost : : none , topologyDescription . getLogicalSessionTimeoutMinutes ( ) ) ; <nl> + } <nl> + } ; / / namespace sdam <nl> + } ; / / namespace mongo <nl> new file mode 100644 <nl> index 000000000000 . . c74190f61fbc <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_manager . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # include " mongo / client / sdam / topology_manager . h " <nl> + <nl> + # include " mongo / client / sdam / topology_state_machine . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + TopologyManager : : TopologyManager ( SdamConfiguration config , ClockSource * clockSource ) <nl> + : _config ( std : : move ( config ) ) , <nl> + _clockSource ( clockSource ) , <nl> + _topologyDescription ( std : : make_unique < TopologyDescription > ( _config ) ) , <nl> + _topologyStateMachine ( std : : make_unique < TopologyStateMachine > ( _config ) ) { } <nl> + <nl> + void TopologyManager : : onServerDescription ( const IsMasterOutcome & isMasterOutcome ) { <nl> + stdx : : lock_guard < mongo : : Mutex > lock ( _mutex ) ; <nl> + <nl> + const auto & lastServerDescription = <nl> + _topologyDescription - > findServerByAddress ( isMasterOutcome . getServer ( ) ) ; <nl> + boost : : optional < IsMasterRTT > lastRTT = <nl> + ( lastServerDescription ) ? ( * lastServerDescription ) - > getRtt ( ) : boost : : none ; <nl> + <nl> + auto newServerDescription = <nl> + std : : make_shared < ServerDescription > ( _clockSource , isMasterOutcome , lastRTT ) ; <nl> + <nl> + auto newTopologyDescription = std : : make_unique < TopologyDescription > ( * _topologyDescription ) ; <nl> + _topologyStateMachine - > onServerDescription ( * newTopologyDescription , newServerDescription ) ; <nl> + _topologyDescription = std : : move ( newTopologyDescription ) ; <nl> + } <nl> + <nl> + const std : : shared_ptr < TopologyDescription > TopologyManager : : getTopologyDescription ( ) const { <nl> + stdx : : lock_guard < mongo : : Mutex > lock ( _mutex ) ; <nl> + return _topologyDescription ; <nl> + } <nl> + } ; / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . f52729b09461 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_manager . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # pragma once <nl> + # include < memory > <nl> + <nl> + # include " mongo / client / sdam / sdam_datatypes . h " <nl> + # include " mongo / client / sdam / topology_description . h " <nl> + # include " mongo / client / sdam / topology_state_machine . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + / * * <nl> + * This class serves as the public interface to the functionality described in the Service Discovery <nl> + * and Monitoring spec : <nl> + * https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst <nl> + * / <nl> + class TopologyManager { <nl> + TopologyManager ( ) = delete ; <nl> + TopologyManager ( const TopologyManager & ) = delete ; <nl> + <nl> + public : <nl> + TopologyManager ( SdamConfiguration config , ClockSource * clockSource ) ; <nl> + <nl> + / * * <nl> + * This function atomically : <nl> + * 1 . Clones the current TopologyDescription <nl> + * 2 . Executes the state machine logic given the cloned TopologyDescription and provided <nl> + * IsMasterOutcome ( containing the new ServerDescription ) . <nl> + * 3 . Installs the cloned ( and possibly modified ) TopologyDescription as the current one . <nl> + * <nl> + * Multiple threads may call this function concurrently . However , the manager will process the <nl> + * IsMasterOutcomes serially , as required by : <nl> + * https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst # process - one - ismaster - outcome - at - a - time <nl> + * / <nl> + void onServerDescription ( const IsMasterOutcome & isMasterOutcome ) ; <nl> + <nl> + / * * <nl> + * Get the current TopologyDescription . This is safe to call from multiple threads . <nl> + * / <nl> + const TopologyDescriptionPtr getTopologyDescription ( ) const ; <nl> + <nl> + private : <nl> + mutable mongo : : Mutex _mutex = mongo : : Mutex ( StringData ( " TopologyManager " ) ) ; <nl> + const SdamConfiguration _config ; <nl> + ClockSource * _clockSource ; <nl> + std : : shared_ptr < TopologyDescription > _topologyDescription ; <nl> + std : : unique_ptr < TopologyStateMachine > _topologyStateMachine ; <nl> + } ; <nl> + } / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 91224427796c <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_state_machine . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # define MONGO_LOG_DEFAULT_COMPONENT : : mongo : : logger : : LogComponent : : kNetwork <nl> + # include " mongo / client / sdam / topology_state_machine . h " <nl> + <nl> + # include < functional > <nl> + # include < ostream > <nl> + <nl> + # include " mongo / client / sdam / sdam_test_base . h " <nl> + # include " mongo / util / log . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + TopologyStateMachine : : TopologyStateMachine ( const SdamConfiguration & config ) : _config ( config ) { <nl> + initTransitionTable ( ) ; <nl> + } <nl> + <nl> + / / This is used to make the syntax in initTransitionTable less verbose . <nl> + / / Since we have enum class for TopologyType and ServerType there are no implicit int conversions . <nl> + template < typename T > <nl> + inline int idx ( T enumType ) { <nl> + return static_cast < int > ( enumType ) ; <nl> + } <nl> + <nl> + / * * <nl> + * This function encodes the transition table specified in <nl> + * https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst # topologytype - table <nl> + * / <nl> + void mongo : : sdam : : TopologyStateMachine : : initTransitionTable ( ) { <nl> + using namespace std : : placeholders ; <nl> + <nl> + / / init the table to No - ops <nl> + const TransitionAction NO_OP ( [ ] ( const TopologyDescription & , const ServerDescriptionPtr & ) { } ) ; <nl> + _stt . resize ( allTopologyTypes ( ) . size ( ) + 1 ) ; <nl> + for ( auto & row : _stt ) { <nl> + row . resize ( allServerTypes ( ) . size ( ) + 1 , NO_OP ) ; <nl> + } <nl> + <nl> + / / From TopologyType : Unknown <nl> + _stt [ idx ( TopologyType : : kUnknown ) ] [ idx ( ServerType : : kStandalone ) ] = <nl> + std : : bind ( & TopologyStateMachine : : updateUnknownWithStandalone , this , _1 , _2 ) ; <nl> + _stt [ idx ( TopologyType : : kUnknown ) ] [ idx ( ServerType : : kMongos ) ] = <nl> + setTopologyTypeAction ( TopologyType : : kSharded ) ; <nl> + _stt [ idx ( TopologyType : : kUnknown ) ] [ idx ( ServerType : : kRSPrimary ) ] = <nl> + setTopologyTypeAndUpdateRSFromPrimary ( TopologyType : : kReplicaSetWithPrimary ) ; <nl> + <nl> + { <nl> + const auto serverTypes = std : : vector < ServerType > { <nl> + ServerType : : kRSSecondary , ServerType : : kRSArbiter , ServerType : : kRSOther } ; <nl> + for ( auto newServerType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kUnknown ) ] [ idx ( newServerType ) ] = std : : bind ( <nl> + & TopologyStateMachine : : setTopologyTypeAndUpdateRSWithoutPrimary , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + <nl> + / / From TopologyType : Sharded <nl> + { <nl> + const auto serverTypes = std : : vector < ServerType > { ServerType : : kStandalone , <nl> + ServerType : : kRSPrimary , <nl> + ServerType : : kRSSecondary , <nl> + ServerType : : kRSArbiter , <nl> + ServerType : : kRSOther , <nl> + ServerType : : kRSGhost } ; <nl> + for ( auto newServerType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kSharded ) ] [ idx ( newServerType ) ] = <nl> + std : : bind ( & TopologyStateMachine : : removeAndStopMonitoring , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + <nl> + / / From TopologyType : ReplicaSetNoPrimary <nl> + { <nl> + const auto serverTypes = <nl> + std : : vector < ServerType > { ServerType : : kStandalone , ServerType : : kMongos } ; <nl> + for ( auto serverType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kReplicaSetNoPrimary ) ] [ idx ( serverType ) ] = <nl> + std : : bind ( & TopologyStateMachine : : removeAndStopMonitoring , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + <nl> + _stt [ idx ( TopologyType : : kReplicaSetNoPrimary ) ] [ idx ( ServerType : : kRSPrimary ) ] = <nl> + setTopologyTypeAndUpdateRSFromPrimary ( TopologyType : : kReplicaSetWithPrimary ) ; <nl> + <nl> + { <nl> + const auto serverTypes = std : : vector < ServerType > { <nl> + ServerType : : kRSSecondary , ServerType : : kRSArbiter , ServerType : : kRSOther } ; <nl> + for ( auto serverType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kReplicaSetNoPrimary ) ] [ idx ( serverType ) ] = <nl> + std : : bind ( & TopologyStateMachine : : updateRSWithoutPrimary , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + <nl> + / / From TopologyType : ReplicaSetWithPrimary <nl> + { <nl> + const auto serverTypes = <nl> + std : : vector < ServerType > { ServerType : : kUnknown , ServerType : : kRSGhost } ; <nl> + for ( auto serverType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kReplicaSetWithPrimary ) ] [ idx ( serverType ) ] = <nl> + std : : bind ( & TopologyStateMachine : : checkIfHasPrimary , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + <nl> + { <nl> + const auto serverTypes = <nl> + std : : vector < ServerType > { ServerType : : kStandalone , ServerType : : kMongos } ; <nl> + for ( auto serverType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kReplicaSetWithPrimary ) ] [ idx ( serverType ) ] = <nl> + std : : bind ( & TopologyStateMachine : : removeAndCheckIfHasPrimary , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + <nl> + _stt [ idx ( TopologyType : : kReplicaSetWithPrimary ) ] [ idx ( ServerType : : kRSPrimary ) ] = <nl> + std : : bind ( & TopologyStateMachine : : updateRSFromPrimary , this , _1 , _2 ) ; <nl> + <nl> + { <nl> + const auto serverTypes = std : : vector < ServerType > { <nl> + ServerType : : kRSSecondary , ServerType : : kRSArbiter , ServerType : : kRSOther } ; <nl> + for ( auto serverType : serverTypes ) { <nl> + _stt [ idx ( TopologyType : : kReplicaSetWithPrimary ) ] [ idx ( serverType ) ] = <nl> + std : : bind ( & TopologyStateMachine : : updateRSWithPrimaryFromMember , this , _1 , _2 ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : onServerDescription ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) { <nl> + if ( ! topologyDescription . containsServerAddress ( serverDescription - > getAddress ( ) ) ) { <nl> + LOG ( 0 ) < < kLogPrefix < < " ignoring ismaster reply from server that is not in the topology : " <nl> + < < serverDescription - > getAddress ( ) < < std : : endl ; <nl> + return ; <nl> + } <nl> + <nl> + installServerDescription ( topologyDescription , serverDescription , false ) ; <nl> + <nl> + if ( topologyDescription . getType ( ) ! = TopologyType : : kSingle ) { <nl> + auto & action = _stt [ idx ( topologyDescription . getType ( ) ) ] [ idx ( serverDescription - > getType ( ) ) ] ; <nl> + action ( topologyDescription , serverDescription ) ; <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : updateUnknownWithStandalone ( <nl> + TopologyDescription & topologyDescription , const ServerDescriptionPtr & serverDescription ) { <nl> + if ( ! topologyDescription . containsServerAddress ( serverDescription - > getAddress ( ) ) ) <nl> + return ; <nl> + <nl> + if ( _config . getSeedList ( ) & & ( * _config . getSeedList ( ) ) . size ( ) = = 1 ) { <nl> + modifyTopologyType ( topologyDescription , TopologyType : : kSingle ) ; <nl> + } else { <nl> + removeServerDescription ( topologyDescription , serverDescription - > getAddress ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : updateRSWithoutPrimary ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) { <nl> + const auto & serverDescAddress = serverDescription - > getAddress ( ) ; <nl> + <nl> + if ( ! topologyDescription . containsServerAddress ( serverDescAddress ) ) <nl> + return ; <nl> + <nl> + const auto & currentSetName = topologyDescription . getSetName ( ) ; <nl> + const auto & serverDescSetName = serverDescription - > getSetName ( ) ; <nl> + if ( currentSetName = = boost : : none ) { <nl> + modifySetName ( topologyDescription , serverDescSetName ) ; <nl> + } else if ( currentSetName ! = serverDescSetName ) { <nl> + removeServerDescription ( topologyDescription , serverDescription - > getAddress ( ) ) ; <nl> + return ; <nl> + } <nl> + <nl> + addUnknownServers ( topologyDescription , serverDescription ) ; <nl> + <nl> + if ( serverDescAddress ! = serverDescription - > getMe ( ) ) { <nl> + removeServerDescription ( topologyDescription , serverDescription - > getAddress ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : addUnknownServers ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) { <nl> + const std : : set < ServerAddress > * addressSets [ 3 ] { & serverDescription - > getHosts ( ) , <nl> + & serverDescription - > getPassives ( ) , <nl> + & serverDescription - > getArbiters ( ) } ; <nl> + for ( const auto addresses : addressSets ) { <nl> + for ( const auto & addressFromSet : * addresses ) { <nl> + if ( ! topologyDescription . containsServerAddress ( addressFromSet ) ) { <nl> + installServerDescription ( <nl> + topologyDescription , std : : make_shared < ServerDescription > ( addressFromSet ) , true ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : updateRSWithPrimaryFromMember ( <nl> + TopologyDescription & topologyDescription , const ServerDescriptionPtr & serverDescription ) { <nl> + const auto & serverDescAddress = serverDescription - > getAddress ( ) ; <nl> + if ( ! topologyDescription . containsServerAddress ( serverDescAddress ) ) { <nl> + return ; <nl> + } <nl> + <nl> + invariant ( serverDescription - > getSetName ( ) ! = boost : : none ) ; <nl> + if ( topologyDescription . getSetName ( ) ! = serverDescription - > getSetName ( ) ) { <nl> + removeAndCheckIfHasPrimary ( topologyDescription , serverDescription ) ; <nl> + return ; <nl> + } <nl> + <nl> + if ( serverDescription - > getAddress ( ) ! = serverDescription - > getMe ( ) ) { <nl> + removeAndCheckIfHasPrimary ( topologyDescription , serverDescription ) ; <nl> + return ; <nl> + } <nl> + <nl> + auto primaries = topologyDescription . findServers ( [ ] ( const ServerDescriptionPtr & description ) { <nl> + return description - > getType ( ) = = ServerType : : kRSPrimary ; <nl> + } ) ; <nl> + if ( primaries . size ( ) = = 0 ) { <nl> + modifyTopologyType ( topologyDescription , TopologyType : : kReplicaSetNoPrimary ) ; <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : updateRSFromPrimary ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) { <nl> + const auto & serverDescAddress = serverDescription - > getAddress ( ) ; <nl> + if ( ! topologyDescription . containsServerAddress ( serverDescAddress ) ) { <nl> + return ; <nl> + } <nl> + <nl> + auto topologySetName = topologyDescription . getSetName ( ) ; <nl> + auto serverDescSetName = serverDescription - > getSetName ( ) ; <nl> + if ( ! topologySetName & & serverDescSetName ) { <nl> + modifySetName ( topologyDescription , serverDescSetName ) ; <nl> + } else if ( topologySetName ! = serverDescSetName ) { <nl> + / / We found a primary but it doesn ' t have the setName <nl> + / / provided by the user or previously discovered . <nl> + removeAndCheckIfHasPrimary ( topologyDescription , serverDescription ) ; <nl> + return ; <nl> + } <nl> + <nl> + auto serverDescSetVersion = serverDescription - > getSetVersion ( ) ; <nl> + auto serverDescElectionId = serverDescription - > getElectionId ( ) ; <nl> + auto topologyMaxSetVersion = topologyDescription . getMaxSetVersion ( ) ; <nl> + auto topologyMaxElectionId = topologyDescription . getMaxElectionId ( ) ; <nl> + if ( serverDescSetVersion & & serverDescElectionId ) { <nl> + if ( topologyMaxSetVersion & & topologyMaxElectionId & & <nl> + ( ( topologyMaxSetVersion > serverDescSetVersion ) | | <nl> + ( topologyMaxSetVersion = = serverDescSetVersion & & <nl> + ( * topologyMaxElectionId ) . compare ( * serverDescElectionId ) > 0 ) ) ) { <nl> + / / stale primary <nl> + installServerDescription ( <nl> + topologyDescription , std : : make_shared < ServerDescription > ( serverDescAddress ) , false ) ; <nl> + checkIfHasPrimary ( topologyDescription , serverDescription ) ; <nl> + return ; <nl> + } <nl> + modifyMaxElectionId ( topologyDescription , * serverDescription - > getElectionId ( ) ) ; <nl> + } <nl> + <nl> + if ( serverDescSetVersion & & <nl> + ( ! topologyMaxSetVersion | | ( serverDescSetVersion > topologyMaxSetVersion ) ) ) { <nl> + modifyMaxSetVersion ( topologyDescription , * serverDescSetVersion ) ; <nl> + } <nl> + <nl> + auto oldPrimaries = topologyDescription . findServers ( <nl> + [ serverDescAddress ] ( const ServerDescriptionPtr & description ) { <nl> + return ( description - > getAddress ( ) ! = serverDescAddress & & <nl> + description - > getType ( ) = = ServerType : : kRSPrimary ) ; <nl> + } ) ; <nl> + invariant ( oldPrimaries . size ( ) < = 1 ) ; <nl> + for ( const auto & server : oldPrimaries ) { <nl> + installServerDescription ( <nl> + topologyDescription , std : : make_shared < ServerDescription > ( server - > getAddress ( ) ) , false ) ; <nl> + } <nl> + <nl> + addUnknownServers ( topologyDescription , serverDescription ) ; <nl> + for ( const auto & currentServerDescription : topologyDescription . getServers ( ) ) { <nl> + const auto currentServerAddress = currentServerDescription - > getAddress ( ) ; <nl> + auto hosts = serverDescription - > getHosts ( ) . find ( currentServerAddress ) ; <nl> + auto passives = serverDescription - > getPassives ( ) . find ( currentServerAddress ) ; <nl> + auto arbiters = serverDescription - > getArbiters ( ) . find ( currentServerAddress ) ; <nl> + <nl> + if ( hosts = = serverDescription - > getHosts ( ) . end ( ) & & <nl> + passives = = serverDescription - > getPassives ( ) . end ( ) & & <nl> + arbiters = = serverDescription - > getArbiters ( ) . end ( ) ) { <nl> + removeServerDescription ( topologyDescription , currentServerDescription - > getAddress ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + checkIfHasPrimary ( topologyDescription , serverDescription ) ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : removeAndStopMonitoring ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) { <nl> + removeServerDescription ( topologyDescription , serverDescription - > getAddress ( ) ) ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : checkIfHasPrimary ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) { <nl> + auto foundPrimaries = <nl> + topologyDescription . findServers ( [ ] ( const ServerDescriptionPtr & description ) { <nl> + return description - > getType ( ) = = ServerType : : kRSPrimary ; <nl> + } ) ; <nl> + if ( foundPrimaries . size ( ) > 0 ) { <nl> + modifyTopologyType ( topologyDescription , TopologyType : : kReplicaSetWithPrimary ) ; <nl> + } else { <nl> + modifyTopologyType ( topologyDescription , TopologyType : : kReplicaSetNoPrimary ) ; <nl> + } <nl> + } <nl> + <nl> + void TopologyStateMachine : : removeAndCheckIfHasPrimary ( <nl> + TopologyDescription & topologyDescription , const ServerDescriptionPtr & serverDescription ) { <nl> + / / Since serverDescription is passed by reference , make a copy of the ServerDescription <nl> + / / shared_ptr so that the underlying pointer is still valid for the call to checkIfHasPrimary . <nl> + ServerDescriptionPtr serverDescriptionNoGC ( serverDescription ) ; <nl> + removeAndStopMonitoring ( topologyDescription , serverDescriptionNoGC ) ; <nl> + checkIfHasPrimary ( topologyDescription , serverDescriptionNoGC ) ; <nl> + } <nl> + <nl> + TransitionAction TopologyStateMachine : : setTopologyTypeAction ( TopologyType type ) { <nl> + return [ this , type ] ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & newServerDescription ) { <nl> + modifyTopologyType ( topologyDescription , type ) ; <nl> + } ; <nl> + } <nl> + <nl> + TransitionAction TopologyStateMachine : : setTopologyTypeAndUpdateRSFromPrimary ( TopologyType type ) { <nl> + return [ this , type ] ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & newServerDescription ) { <nl> + modifyTopologyType ( topologyDescription , type ) ; <nl> + updateRSFromPrimary ( topologyDescription , newServerDescription ) ; <nl> + } ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : setTopologyTypeAndUpdateRSWithoutPrimary ( <nl> + TopologyDescription & topologyDescription , const ServerDescriptionPtr & serverDescription ) { <nl> + modifyTopologyType ( topologyDescription , TopologyType : : kReplicaSetNoPrimary ) ; <nl> + updateRSWithoutPrimary ( topologyDescription , serverDescription ) ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : removeServerDescription ( TopologyDescription & topologyDescription , <nl> + const ServerAddress serverAddress ) { <nl> + topologyDescription . removeServerDescription ( serverAddress ) ; <nl> + LOG ( 0 ) < < kLogPrefix < < " server ' " < < serverAddress < < " ' was removed from the topology . " <nl> + < < std : : endl ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : modifyTopologyType ( TopologyDescription & topologyDescription , <nl> + TopologyType topologyType ) { <nl> + topologyDescription . _type = topologyType ; <nl> + LOG ( 0 ) < < kLogPrefix < < " the topology type was set to " < < toString ( topologyType ) < < std : : endl ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : modifySetName ( TopologyDescription & topologyDescription , <nl> + const boost : : optional < std : : string > & setName ) { <nl> + topologyDescription . _setName = setName ; <nl> + LOG ( 0 ) < < kLogPrefix < < " the topology setName was set to " < < ( ( setName ) ? * setName : " [ null ] " ) <nl> + < < std : : endl ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : installServerDescription ( TopologyDescription & topologyDescription , <nl> + ServerDescriptionPtr newServerDescription , <nl> + bool newServer ) { <nl> + topologyDescription . installServerDescription ( newServerDescription ) ; <nl> + LOG ( 1 ) < < kLogPrefix < < ( ( newServer ) ? " installed new " : " updated existing " ) <nl> + < < " server description : " < < newServerDescription - > toString ( ) < < std : : endl ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : modifyMaxElectionId ( TopologyDescription & topologyDescription , <nl> + const OID & newMaxElectionId ) { <nl> + topologyDescription . _maxElectionId = newMaxElectionId ; <nl> + LOG ( 0 ) < < kLogPrefix < < " topology max election id set to " < < newMaxElectionId < < std : : endl ; <nl> + } <nl> + <nl> + void TopologyStateMachine : : modifyMaxSetVersion ( TopologyDescription & topologyDescription , <nl> + int & newMaxSetVersion ) { <nl> + topologyDescription . _maxSetVersion = newMaxSetVersion ; <nl> + LOG ( 0 ) < < kLogPrefix < < " topology max set version set to " < < newMaxSetVersion < < std : : endl ; <nl> + } <nl> + } / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . abed9bc854f9 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_state_machine . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # pragma once <nl> + <nl> + # include < vector > <nl> + <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # include " mongo / client / sdam / topology_description . h " <nl> + # include " mongo / platform / mutex . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + / / Actions that mutate the state of the topology description via events . <nl> + using TransitionAction = std : : function < void ( TopologyDescription & , const ServerDescriptionPtr & ) > ; <nl> + <nl> + / / indexed by ServerType <nl> + using StateTransitionTableRow = std : : vector < TransitionAction > ; <nl> + <nl> + / * * <nl> + * StateTransitionTable [ t ] [ s ] returns the action to <nl> + * take given that the topology currently has type t , and we receive a ServerDescription <nl> + * with type s . <nl> + * / <nl> + using StateTransitionTable = std : : vector < StateTransitionTableRow > ; <nl> + <nl> + class TopologyStateMachine { <nl> + public : <nl> + TopologyStateMachine ( const SdamConfiguration & config ) ; <nl> + <nl> + / * * <nl> + * Provides input to the state machine , and triggers the correct action based on the current <nl> + * TopologyDescription and the incoming ServerDescription . The topologyDescription instance may <nl> + * be modified as a result . <nl> + * / <nl> + void onServerDescription ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) ; <nl> + <nl> + private : <nl> + void initTransitionTable ( ) ; <nl> + <nl> + / / State machine actions <nl> + / / These are implemented , in an almost verbatim fashion , from the description <nl> + / / here : <nl> + / / https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst # actions <nl> + void updateUnknownWithStandalone ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void updateRSWithoutPrimary ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void updateRSWithPrimaryFromMember ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void updateRSFromPrimary ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void removeAndStopMonitoring ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void checkIfHasPrimary ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void removeAndCheckIfHasPrimary ( TopologyDescription & , const ServerDescriptionPtr & ) ; <nl> + void setTopologyTypeAndUpdateRSWithoutPrimary ( TopologyDescription & , <nl> + const ServerDescriptionPtr & ) ; <nl> + TransitionAction setTopologyTypeAction ( TopologyType type ) ; <nl> + TransitionAction setTopologyTypeAndUpdateRSFromPrimary ( TopologyType type ) ; <nl> + <nl> + void addUnknownServers ( TopologyDescription & topologyDescription , <nl> + const ServerDescriptionPtr & serverDescription ) ; <nl> + <nl> + / / The functions below mutate the state of the topology description <nl> + void installServerDescription ( TopologyDescription & topologyDescription , <nl> + ServerDescriptionPtr newServerDescription , <nl> + bool newServer ) ; <nl> + void removeServerDescription ( TopologyDescription & topologyDescription , <nl> + const ServerAddress serverAddress ) ; <nl> + <nl> + void modifyTopologyType ( TopologyDescription & topologyDescription , TopologyType topologyType ) ; <nl> + void modifySetName ( TopologyDescription & topologyDescription , <nl> + const boost : : optional < std : : string > & setName ) ; <nl> + <nl> + void modifyMaxElectionId ( TopologyDescription & topologyDescription , const OID & newMaxElectionId ) ; <nl> + void modifyMaxSetVersion ( TopologyDescription & topologyDescription , int & newMaxSetVersion ) ; <nl> + <nl> + StateTransitionTable _stt ; <nl> + SdamConfiguration _config ; <nl> + <nl> + static inline auto kLogPrefix = " sdam : " ; <nl> + } ; <nl> + } / / namespace mongo : : sdam <nl> new file mode 100644 <nl> index 000000000000 . . 346295942004 <nl> mmm / dev / null <nl> ppp b / src / mongo / client / sdam / topology_state_machine_test . cpp <nl> <nl> + / * * <nl> + * Copyright ( C ) 2019 - present MongoDB , Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the Server Side Public License , version 1 , <nl> + * as published by MongoDB , Inc . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * Server Side Public License for more details . <nl> + * <nl> + * You should have received a copy of the Server Side Public License <nl> + * along with this program . If not , see <nl> + * < http : / / www . mongodb . com / licensing / server - side - public - license > . <nl> + * <nl> + * As a special exception , the copyright holders give permission to link the <nl> + * code of portions of this program with the OpenSSL library under certain <nl> + * conditions as described in each individual source file and distribute <nl> + * linked combinations including the program with the OpenSSL library . You <nl> + * must comply with the Server Side Public License in all respects for <nl> + * all of the code used other than as permitted herein . If you modify file ( s ) <nl> + * with this exception , you may extend this exception to your version of the <nl> + * file ( s ) , but you are not obligated to do so . If you do not wish to do so , <nl> + * delete this exception statement from your version . If you delete this <nl> + * exception statement from all source files in the program , then also delete <nl> + * it in the license file . <nl> + * / <nl> + # include " mongo / client / sdam / topology_state_machine . h " <nl> + <nl> + # include < boost / optional / optional_io . hpp > <nl> + <nl> + # include " mongo / client / sdam / sdam_test_base . h " <nl> + # include " mongo / client / sdam / server_description . h " <nl> + # include " mongo / client / sdam / server_description_builder . h " <nl> + # include " mongo / client / sdam / topology_description . h " <nl> + <nl> + namespace mongo : : sdam { <nl> + class TopologyStateMachineTestFixture : public SdamTestFixture { <nl> + protected : <nl> + static inline const auto kReplicaSetName = " replica_set " ; <nl> + static inline const auto kLocalServer = " localhost : 123 " ; <nl> + static inline const auto kLocalServer2 = " localhost : 456 " ; <nl> + <nl> + static inline const auto kTwoSeedConfig = <nl> + SdamConfiguration ( std : : vector < ServerAddress > { kLocalServer , kLocalServer2 } , <nl> + TopologyType : : kUnknown , <nl> + mongo : : Milliseconds ( 500 ) ) ; <nl> + static inline const auto kTwoSeedReplicaSetNoPrimaryConfig = <nl> + SdamConfiguration ( std : : vector < ServerAddress > { kLocalServer , kLocalServer2 } , <nl> + TopologyType : : kReplicaSetNoPrimary , <nl> + mongo : : Milliseconds ( 500 ) , <nl> + std : : string ( " setName " ) ) ; <nl> + static inline const auto kSingleConfig = <nl> + SdamConfiguration ( std : : vector < ServerAddress > { kLocalServer } , TopologyType : : kSingle ) ; <nl> + <nl> + / / Given we in ' starting ' state with initial config ' initialConfig ' . We receive a <nl> + / / ServerDescription with type ' incoming ' , and expected the ending topology state to be <nl> + / / ' ending ' . <nl> + struct TopologyTypeTestCase { <nl> + SdamConfiguration initialConfig ; <nl> + TopologyType starting ; <nl> + ServerType incoming ; <nl> + TopologyType ending ; <nl> + } ; <nl> + <nl> + / / This function sets up the test scenario defined by the given TopologyTypeTestCase . It <nl> + / / simulates receiving a ServerDescription , and asserts that the final topology type is in the <nl> + / / correct state . <nl> + void assertTopologyTypeTestCase ( TopologyTypeTestCase testCase ) { <nl> + TopologyStateMachine stateMachine ( testCase . initialConfig ) ; <nl> + <nl> + / / setup the initial state <nl> + TopologyDescription topologyDescription ( testCase . initialConfig ) ; <nl> + topologyDescription . setType ( testCase . starting ) ; <nl> + <nl> + / / create new ServerDescription and <nl> + auto serverDescriptionBuilder = <nl> + ServerDescriptionBuilder ( ) . withType ( testCase . incoming ) . withAddress ( kLocalServer ) ; <nl> + <nl> + / / update the known hosts in the ServerDescription <nl> + if ( testCase . initialConfig . getSeedList ( ) ) { <nl> + for ( auto address : * testCase . initialConfig . getSeedList ( ) ) { <nl> + serverDescriptionBuilder . withHost ( address ) ; <nl> + } <nl> + } <nl> + <nl> + / / set the primary if we are creating one <nl> + if ( testCase . incoming = = ServerType : : kRSPrimary ) { <nl> + serverDescriptionBuilder . withPrimary ( kLocalServer ) ; <nl> + } <nl> + <nl> + / / set the replica set name if appropriate <nl> + const std : : vector < ServerType > & replicaSetServerTypes = std : : vector < ServerType > { <nl> + ServerType : : kRSOther , ServerType : : kRSSecondary , ServerType : : kRSArbiter } ; <nl> + if ( std : : find ( replicaSetServerTypes . begin ( ) , <nl> + replicaSetServerTypes . end ( ) , <nl> + testCase . incoming ) ! = replicaSetServerTypes . end ( ) ) { <nl> + serverDescriptionBuilder . withSetName ( kReplicaSetName ) ; <nl> + } <nl> + <nl> + const auto serverDescription = serverDescriptionBuilder . instance ( ) ; <nl> + <nl> + / / simulate the ServerDescription being received <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + <nl> + ASSERT_EQUALS ( topologyDescription . getType ( ) , testCase . ending ) ; <nl> + } <nl> + <nl> + std : : vector < ServerType > allServerTypesExceptPrimary ( ) { <nl> + auto allExceptPrimary = allServerTypes ( ) ; <nl> + allExceptPrimary . erase ( <nl> + std : : remove_if ( allExceptPrimary . begin ( ) , <nl> + allExceptPrimary . end ( ) , <nl> + [ ] ( const ServerType t ) { return t = = ServerType : : kRSPrimary ; } ) , <nl> + allExceptPrimary . end ( ) ) ; <nl> + return allExceptPrimary ; <nl> + } <nl> + } ; <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , ShouldInstallServerDescriptionInSingleTopology ) { <nl> + TopologyStateMachine stateMachine ( kSingleConfig ) ; <nl> + TopologyDescription topologyDescription ( kSingleConfig ) ; <nl> + <nl> + auto updatedMeAddress = " foo : 1234 " ; <nl> + auto serverDescription = ServerDescriptionBuilder ( ) <nl> + . withAddress ( kLocalServer ) <nl> + . withMe ( updatedMeAddress ) <nl> + . withType ( ServerType : : kStandalone ) <nl> + . instance ( ) ; <nl> + <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 1 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + <nl> + auto result = topologyDescription . findServerByAddress ( kLocalServer ) ; <nl> + ASSERT ( result ) ; <nl> + ASSERT_EQUALS ( serverDescription , * result ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , ShouldRemoveServerDescriptionIfNotInHostsList ) { <nl> + const auto primary = ( * kTwoSeedConfig . getSeedList ( ) ) . front ( ) ; <nl> + const auto expectedRemovedServer = ( * kTwoSeedConfig . getSeedList ( ) ) . back ( ) ; <nl> + <nl> + TopologyStateMachine stateMachine ( kTwoSeedConfig ) ; <nl> + TopologyDescription topologyDescription ( kTwoSeedConfig ) ; <nl> + <nl> + auto serverDescription = ServerDescriptionBuilder ( ) <nl> + . withAddress ( primary ) <nl> + . withType ( ServerType : : kRSPrimary ) <nl> + . withPrimary ( primary ) <nl> + . withHost ( primary ) <nl> + . instance ( ) ; <nl> + <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 2 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 1 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( serverDescription , topologyDescription . getServers ( ) . front ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , <nl> + ShouldRemoveNonPrimaryServerWhenTopologyIsReplicaSetNoPrimaryAndMeDoesntMatchAddress ) { <nl> + const auto serverAddress = ( * kTwoSeedReplicaSetNoPrimaryConfig . getSeedList ( ) ) . front ( ) ; <nl> + const auto expectedRemainingServerAddress = <nl> + ( * kTwoSeedReplicaSetNoPrimaryConfig . getSeedList ( ) ) . back ( ) ; <nl> + const auto me = std : : string ( " foo " ) + serverAddress ; <nl> + <nl> + TopologyStateMachine stateMachine ( kTwoSeedReplicaSetNoPrimaryConfig ) ; <nl> + TopologyDescription topologyDescription ( kTwoSeedReplicaSetNoPrimaryConfig ) ; <nl> + <nl> + auto serverDescription = ServerDescriptionBuilder ( ) <nl> + . withAddress ( serverAddress ) <nl> + . withMe ( me ) <nl> + . withType ( ServerType : : kRSSecondary ) <nl> + . instance ( ) ; <nl> + <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 2 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 1 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + ASSERT_EQUALS ( expectedRemainingServerAddress , <nl> + topologyDescription . getServers ( ) . front ( ) - > getAddress ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , <nl> + ShouldAddServerDescriptionIfInHostsListButNotInTopologyDescription ) { <nl> + const auto primary = ( * kTwoSeedConfig . getSeedList ( ) ) . front ( ) ; <nl> + const auto secondary = ( * kTwoSeedConfig . getSeedList ( ) ) . back ( ) ; <nl> + const auto newHost = ServerAddress ( " newhost : 123 " ) ; <nl> + <nl> + TopologyStateMachine stateMachine ( kTwoSeedConfig ) ; <nl> + TopologyDescription topologyDescription ( kTwoSeedConfig ) ; <nl> + <nl> + auto serverDescription = ServerDescriptionBuilder ( ) <nl> + . withAddress ( primary ) <nl> + . withType ( ServerType : : kRSPrimary ) <nl> + . withPrimary ( primary ) <nl> + . withHost ( primary ) <nl> + . withHost ( secondary ) <nl> + . withHost ( newHost ) <nl> + . instance ( ) ; <nl> + <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 2 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + ASSERT_EQUALS ( static_cast < size_t > ( 3 ) , topologyDescription . getServers ( ) . size ( ) ) ; <nl> + <nl> + auto newHostResult = topologyDescription . findServerByAddress ( newHost ) ; <nl> + ASSERT ( newHostResult ) ; <nl> + ASSERT_EQUALS ( newHost , ( * newHostResult ) - > getAddress ( ) ) ; <nl> + ASSERT_EQUALS ( ServerType : : kUnknown , ( * newHostResult ) - > getType ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , ShouldSaveNewMaxSetVersion ) { <nl> + const auto primary = ( * kTwoSeedConfig . getSeedList ( ) ) . front ( ) ; <nl> + <nl> + TopologyDescription topologyDescription ( kTwoSeedConfig ) ; <nl> + TopologyStateMachine stateMachine ( kTwoSeedConfig ) ; <nl> + <nl> + auto serverDescription = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSPrimary ) <nl> + . withPrimary ( primary ) <nl> + . withMe ( primary ) <nl> + . withAddress ( primary ) <nl> + . withHost ( primary ) <nl> + . withSetVersion ( 100 ) <nl> + . instance ( ) ; <nl> + <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + ASSERT_EQUALS ( 100 , topologyDescription . getMaxSetVersion ( ) ) ; <nl> + <nl> + auto serverDescriptionEvenBiggerSetVersion = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSPrimary ) <nl> + . withPrimary ( primary ) <nl> + . withMe ( primary ) <nl> + . withAddress ( primary ) <nl> + . withHost ( primary ) <nl> + . withSetVersion ( 200 ) <nl> + . instance ( ) ; <nl> + <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescriptionEvenBiggerSetVersion ) ; <nl> + ASSERT_EQUALS ( 200 , topologyDescription . getMaxSetVersion ( ) ) ; <nl> + } <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , ShouldSaveNewMaxElectionId ) { <nl> + const auto primary = ( * kTwoSeedConfig . getSeedList ( ) ) . front ( ) ; <nl> + TopologyDescription topologyDescription ( kTwoSeedConfig ) ; <nl> + TopologyStateMachine stateMachine ( kTwoSeedConfig ) ; <nl> + <nl> + const OID oidOne ( std : : string ( " 000000000000000000000001 " ) ) ; <nl> + const OID oidTwo ( std : : string ( " 000000000000000000000002 " ) ) ; <nl> + <nl> + auto serverDescription = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSPrimary ) <nl> + . withPrimary ( primary ) <nl> + . withMe ( primary ) <nl> + . withAddress ( primary ) <nl> + . withHost ( primary ) <nl> + . withSetVersion ( 1 ) <nl> + . withElectionId ( oidOne ) <nl> + . instance ( ) ; <nl> + <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescription ) ; <nl> + ASSERT_EQUALS ( oidOne , topologyDescription . getMaxElectionId ( ) ) ; <nl> + <nl> + auto serverDescriptionEvenBiggerElectionId = ServerDescriptionBuilder ( ) <nl> + . withType ( ServerType : : kRSPrimary ) <nl> + . withPrimary ( primary ) <nl> + . withMe ( primary ) <nl> + . withAddress ( primary ) <nl> + . withHost ( primary ) <nl> + . withSetVersion ( 1 ) <nl> + . withElectionId ( oidTwo ) <nl> + . instance ( ) ; <nl> + <nl> + stateMachine . onServerDescription ( topologyDescription , serverDescriptionEvenBiggerElectionId ) ; <nl> + ASSERT_EQUALS ( oidTwo , topologyDescription . getMaxElectionId ( ) ) ; <nl> + } <nl> + <nl> + / / The following two tests ( ShouldNotUpdateToplogyType , ShouldUpdateToCorrectToplogyType ) assert <nl> + / / that the topology type is correct given an initial state and a ServerType . Together , they <nl> + / / cover all the cases specified in the SDAM spec here : <nl> + / / https : / / github . com / mongodb / specifications / blob / master / source / server - discovery - and - monitoring / server - discovery - and - monitoring . rst # topologytype - table <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , ShouldNotUpdateToplogyType ) { <nl> + using T = TopologyTypeTestCase ; <nl> + <nl> + / / test cases that should not change TopologyType <nl> + std : : vector < TopologyTypeTestCase > testCases { <nl> + T { kTwoSeedConfig , TopologyType : : kUnknown , ServerType : : kUnknown , TopologyType : : kUnknown } , <nl> + T { kTwoSeedConfig , TopologyType : : kUnknown , ServerType : : kStandalone , TopologyType : : kUnknown } , <nl> + T { kTwoSeedConfig , TopologyType : : kUnknown , ServerType : : kRSGhost , TopologyType : : kUnknown } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetNoPrimary , <nl> + ServerType : : kUnknown , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetNoPrimary , <nl> + ServerType : : kUnknown , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + } ; <nl> + for ( auto serverType : allServerTypes ( ) ) { <nl> + testCases . push_back ( <nl> + T { kTwoSeedConfig , TopologyType : : kSharded , serverType , TopologyType : : kSharded } ) ; <nl> + } <nl> + <nl> + const auto & allExceptPrimary = allServerTypesExceptPrimary ( ) ; <nl> + for ( auto serverType : allExceptPrimary ) { <nl> + testCases . push_back ( T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetNoPrimary , <nl> + serverType , <nl> + TopologyType : : kReplicaSetNoPrimary } ) ; <nl> + } <nl> + <nl> + int count = 0 ; <nl> + for ( auto testCase : testCases ) { <nl> + std : : cout < < " case " < < + + count < < " starting TopologyType : " < < toString ( testCase . starting ) <nl> + < < " ; incoming ServerType : " < < toString ( testCase . incoming ) <nl> + < < " ; expect ending TopologyType : " < < toString ( testCase . ending ) < < std : : endl ; <nl> + <nl> + assertTopologyTypeTestCase ( testCase ) ; <nl> + } <nl> + } <nl> + <nl> + TEST_F ( TopologyStateMachineTestFixture , ShouldUpdateToCorrectToplogyType ) { <nl> + using T = TopologyTypeTestCase ; <nl> + <nl> + / / test cases that should change TopologyType <nl> + const std : : vector < TopologyTypeTestCase > testCases { <nl> + T { kTwoSeedConfig , TopologyType : : kUnknown , ServerType : : kMongos , TopologyType : : kSharded } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kUnknown , <nl> + ServerType : : kRSPrimary , <nl> + TopologyType : : kReplicaSetWithPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kUnknown , <nl> + ServerType : : kRSSecondary , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kUnknown , <nl> + ServerType : : kRSArbiter , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kUnknown , <nl> + ServerType : : kRSOther , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetNoPrimary , <nl> + ServerType : : kRSPrimary , <nl> + TopologyType : : kReplicaSetWithPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kUnknown , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kStandalone , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kMongos , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kRSPrimary , <nl> + TopologyType : : kReplicaSetWithPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kRSSecondary , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kRSOther , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kRSArbiter , <nl> + TopologyType : : kReplicaSetNoPrimary } , <nl> + T { kTwoSeedConfig , <nl> + TopologyType : : kReplicaSetWithPrimary , <nl> + ServerType : : kRSGhost , <nl> + TopologyType : : kReplicaSetNoPrimary } } ; <nl> + <nl> + int count = 0 ; <nl> + for ( auto testCase : testCases ) { <nl> + std : : cout < < " case " < < + + count < < " starting TopologyType : " < < toString ( testCase . starting ) <nl> + < < " ; incoming ServerType : " < < toString ( testCase . incoming ) <nl> + < < " ; expect ending TopologyType : " < < toString ( testCase . ending ) < < std : : endl ; <nl> + <nl> + assertTopologyTypeTestCase ( testCase ) ; <nl> + } <nl> + } <nl> + } / / namespace mongo : : sdam <nl>
SERVER - 43331 Implement State Machine for Server Discovery and Monitoring Spec
mongodb/mongo
124ad1c022f20dbeade4d67947e328dfe4b04e20
2019-11-08T21:36:55Z
mmm a / modules / highgui / src / cap_dshow . cpp <nl> ppp b / modules / highgui / src / cap_dshow . cpp <nl> IplImage * CvCaptureCAM_DShow : : retrieveFrame ( int ) <nl> frame = cvCreateImage ( cvSize ( w , h ) , 8 , 3 ) ; <nl> } <nl> <nl> - VI . getPixels ( index , ( uchar * ) frame - > imageData , false , true ) ; <nl> - return frame ; <nl> + if ( VI . getPixels ( index , ( uchar * ) frame - > imageData , false , true ) ) <nl> + return frame ; <nl> + else <nl> + return NULL ; <nl> } <nl> <nl> double CvCaptureCAM_DShow : : getProperty ( int property_id ) <nl>
Merge pull request from asmorkalov : dshow_valid_check_fix
opencv/opencv
55e83b8d180a941170ae246619965831dee16c0f
2013-06-10T11:06:14Z
mmm a / include / swift / AST / Decl . h <nl> ppp b / include / swift / AST / Decl . h <nl> class AbstractFunctionDecl : public GenericContext , public ValueDecl { <nl> / / / <nl> / / / Functions that are an ' async ' context can make calls to ' async ' functions . <nl> bool isAsyncContext ( ) const { <nl> - return hasAsync ( ) | | getAttrs ( ) . hasAttribute < AsyncHandlerAttr > ( ) ; <nl> + return hasAsync ( ) | | isAsyncHandler ( ) ; <nl> } <nl> <nl> + / / / Returns true if the function is an @ asyncHandler . <nl> + bool isAsyncHandler ( ) const ; <nl> + <nl> / / / Returns true if the function body throws . <nl> bool hasThrows ( ) const { return Bits . AbstractFunctionDecl . Throws ; } <nl> <nl> mmm a / include / swift / AST / TypeCheckRequests . h <nl> ppp b / include / swift / AST / TypeCheckRequests . h <nl> class SelfAccessKindRequest : <nl> void cacheResult ( SelfAccessKind value ) const ; <nl> } ; <nl> <nl> + / / / Determine whether the given function is an @ asyncHandler . <nl> + class IsAsyncHandlerRequest : <nl> + public SimpleRequest < IsAsyncHandlerRequest , <nl> + bool ( FuncDecl * ) , <nl> + RequestFlags : : Cached > { <nl> + public : <nl> + using SimpleRequest : : SimpleRequest ; <nl> + <nl> + private : <nl> + friend SimpleRequest ; <nl> + <nl> + bool evaluate ( Evaluator & evaluator , FuncDecl * func ) const ; <nl> + <nl> + public : <nl> + / / Caching <nl> + bool isCached ( ) const { return true ; } <nl> + } ; <nl> + <nl> / / / Request whether the storage has a mutating getter . <nl> class IsGetterMutatingRequest : <nl> public SimpleRequest < IsGetterMutatingRequest , <nl> mmm a / include / swift / AST / TypeCheckerTypeIDZone . def <nl> ppp b / include / swift / AST / TypeCheckerTypeIDZone . def <nl> SWIFT_REQUEST ( TypeChecker , ExtendedTypeRequest , Type ( ExtensionDecl * ) , Cached , <nl> NoLocationInfo ) <nl> SWIFT_REQUEST ( TypeChecker , FunctionBuilderTypeRequest , Type ( ValueDecl * ) , <nl> Cached , NoLocationInfo ) <nl> + SWIFT_REQUEST ( TypeChecker , IsAsyncHandlerRequest , bool ( FuncDecl * ) , <nl> + Cached , NoLocationInfo ) <nl> SWIFT_REQUEST ( TypeChecker , FunctionOperatorRequest , OperatorDecl * ( FuncDecl * ) , <nl> Cached , NoLocationInfo ) <nl> SWIFT_REQUEST ( NameLookup , GenericSignatureRequest , <nl> mmm a / lib / AST / Decl . cpp <nl> ppp b / lib / AST / Decl . cpp <nl> bool AbstractFunctionDecl : : argumentNameIsAPIByDefault ( ) const { <nl> return false ; <nl> } <nl> <nl> + bool AbstractFunctionDecl : : isAsyncHandler ( ) const { <nl> + auto func = dyn_cast < FuncDecl > ( this ) ; <nl> + if ( ! func ) <nl> + return false ; <nl> + <nl> + auto mutableFunc = const_cast < FuncDecl * > ( func ) ; <nl> + return evaluateOrDefault ( getASTContext ( ) . evaluator , <nl> + IsAsyncHandlerRequest { mutableFunc } , <nl> + false ) ; <nl> + } <nl> + <nl> BraceStmt * AbstractFunctionDecl : : getBody ( bool canSynthesize ) const { <nl> if ( ( getBodyKind ( ) = = BodyKind : : Synthesize | | <nl> getBodyKind ( ) = = BodyKind : : Unparsed ) & & <nl> mmm a / lib / Sema / CMakeLists . txt <nl> ppp b / lib / Sema / CMakeLists . txt <nl> add_swift_host_library ( swiftSema STATIC <nl> TypeCheckCaptures . cpp <nl> TypeCheckCircularity . cpp <nl> TypeCheckCodeCompletion . cpp <nl> + TypeCheckConcurrency . cpp <nl> TypeCheckConstraints . cpp <nl> TypeCheckDecl . cpp <nl> TypeCheckDeclObjC . cpp <nl> mmm a / lib / Sema / TypeCheckAttr . cpp <nl> ppp b / lib / Sema / TypeCheckAttr . cpp <nl> <nl> <nl> using namespace swift ; <nl> <nl> - / / / Check whether the @ asyncHandler attribute can be applied to the given <nl> - / / / function declaration . <nl> - / / / <nl> - / / / \ param diagnose Whether to emit a diagnostic when a problem is encountered . <nl> - / / / <nl> - / / / \ returns \ c true if there was a problem with adding the attribute , \ c false <nl> - / / / otherwise . <nl> - static bool checkAsyncHandler ( FuncDecl * func , bool diagnose ) { <nl> - if ( ! func - > getResultInterfaceType ( ) - > isVoid ( ) ) { <nl> - if ( diagnose ) { <nl> - func - > diagnose ( diag : : asynchandler_returns_value ) <nl> - . highlight ( func - > getBodyResultTypeLoc ( ) . getSourceRange ( ) ) ; <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - if ( func - > hasThrows ( ) ) { <nl> - if ( diagnose ) { <nl> - func - > diagnose ( diag : : asynchandler_throws ) <nl> - . fixItRemove ( func - > getThrowsLoc ( ) ) ; <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - if ( func - > hasAsync ( ) ) { <nl> - if ( diagnose ) { <nl> - func - > diagnose ( diag : : asynchandler_async ) <nl> - . fixItRemove ( func - > getAsyncLoc ( ) ) ; <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - for ( auto param : * func - > getParameters ( ) ) { <nl> - if ( param - > isInOut ( ) ) { <nl> - if ( diagnose ) { <nl> - param - > diagnose ( diag : : asynchandler_inout_parameter ) <nl> - . fixItRemove ( param - > getSpecifierLoc ( ) ) ; <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - } <nl> - <nl> - if ( func - > isMutating ( ) ) { <nl> - if ( diagnose ) { <nl> - auto diag = func - > diagnose ( diag : : asynchandler_mutating ) ; <nl> - if ( auto mutatingAttr = func - > getAttrs ( ) . getAttribute < MutatingAttr > ( ) ) { <nl> - diag . fixItRemove ( mutatingAttr - > getRange ( ) ) ; <nl> - } <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - return false ; <nl> - } <nl> - <nl> namespace { <nl> / / / This emits a diagnostic with a fixit to remove the attribute . <nl> template < typename . . . ArgTypes > <nl> class AttributeChecker : public AttributeVisitor < AttributeChecker > { <nl> return ; <nl> } <nl> <nl> - if ( checkAsyncHandler ( func , / * diagnose = * / true ) ) { <nl> - attr - > setInvalid ( ) ; <nl> - return ; <nl> - } <nl> + / / Trigger the request to check for @ asyncHandler . <nl> + ( void ) func - > isAsyncHandler ( ) ; <nl> } <nl> } ; <nl> } / / end anonymous namespace <nl> void AttributeChecker : : visitTransposeAttr ( TransposeAttr * attr ) { <nl> / / Set the resolved linearity parameter indices in the attribute . <nl> attr - > setParameterIndices ( linearParamIndices ) ; <nl> } <nl> - <nl> - void swift : : addAsyncNotes ( FuncDecl * func ) { <nl> - func - > diagnose ( diag : : note_add_async_to_function , func - > getName ( ) ) ; <nl> - <nl> - if ( ! checkAsyncHandler ( func , / * diagnose = * / false ) ) { <nl> - func - > diagnose ( <nl> - diag : : note_add_asynchandler_to_function , func - > getName ( ) ) <nl> - . fixItInsert ( func - > getAttributeInsertionLoc ( false ) , " @ asyncHandler " ) ; <nl> - } <nl> - } <nl> new file mode 100644 <nl> index 000000000000 . . f70cc61096e1 <nl> mmm / dev / null <nl> ppp b / lib / Sema / TypeCheckConcurrency . cpp <nl> <nl> + / / = = = mmm TypeCheckConcurrency . cpp - Concurrency mmmmmmmmmmmmmmmmmmmmmmmmmmm = = = / / <nl> + / / <nl> + / / This source file is part of the Swift . org open source project <nl> + / / <nl> + / / Copyright ( c ) 2014 - 2020 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / <nl> + / / This file implements type checking support for Swift ' s concurrency model . <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + # include " TypeChecker . h " <nl> + # include " swift / AST / ParameterList . h " <nl> + # include " swift / AST / ProtocolConformance . h " <nl> + # include " swift / AST / TypeCheckRequests . h " <nl> + <nl> + using namespace swift ; <nl> + <nl> + / / / Check whether the @ asyncHandler attribute can be applied to the given <nl> + / / / function declaration . <nl> + / / / <nl> + / / / \ param diagnose Whether to emit a diagnostic when a problem is encountered . <nl> + / / / <nl> + / / / \ returns \ c true if there was a problem with adding the attribute , \ c false <nl> + / / / otherwise . <nl> + static bool checkAsyncHandler ( FuncDecl * func , bool diagnose ) { <nl> + if ( ! func - > getResultInterfaceType ( ) - > isVoid ( ) ) { <nl> + if ( diagnose ) { <nl> + func - > diagnose ( diag : : asynchandler_returns_value ) <nl> + . highlight ( func - > getBodyResultTypeLoc ( ) . getSourceRange ( ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + if ( func - > hasThrows ( ) ) { <nl> + if ( diagnose ) { <nl> + func - > diagnose ( diag : : asynchandler_throws ) <nl> + . fixItRemove ( func - > getThrowsLoc ( ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + if ( func - > hasAsync ( ) ) { <nl> + if ( diagnose ) { <nl> + func - > diagnose ( diag : : asynchandler_async ) <nl> + . fixItRemove ( func - > getAsyncLoc ( ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + for ( auto param : * func - > getParameters ( ) ) { <nl> + if ( param - > isInOut ( ) ) { <nl> + if ( diagnose ) { <nl> + param - > diagnose ( diag : : asynchandler_inout_parameter ) <nl> + . fixItRemove ( param - > getSpecifierLoc ( ) ) ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + } <nl> + <nl> + if ( func - > isMutating ( ) ) { <nl> + if ( diagnose ) { <nl> + auto diag = func - > diagnose ( diag : : asynchandler_mutating ) ; <nl> + if ( auto mutatingAttr = func - > getAttrs ( ) . getAttribute < MutatingAttr > ( ) ) { <nl> + diag . fixItRemove ( mutatingAttr - > getRange ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> + <nl> + void swift : : addAsyncNotes ( FuncDecl * func ) { <nl> + func - > diagnose ( diag : : note_add_async_to_function , func - > getName ( ) ) ; <nl> + <nl> + if ( ! checkAsyncHandler ( func , / * diagnose = * / false ) ) { <nl> + func - > diagnose ( <nl> + diag : : note_add_asynchandler_to_function , func - > getName ( ) ) <nl> + . fixItInsert ( func - > getAttributeInsertionLoc ( false ) , " @ asyncHandler " ) ; <nl> + } <nl> + } <nl> + <nl> + bool IsAsyncHandlerRequest : : evaluate ( <nl> + Evaluator & evaluator , FuncDecl * func ) const { <nl> + / / Check whether the attribute was explicitly specified . <nl> + if ( auto attr = func - > getAttrs ( ) . getAttribute < AsyncHandlerAttr > ( ) ) { <nl> + / / Check for well - formedness . <nl> + if ( checkAsyncHandler ( func , / * diagnose = * / true ) ) { <nl> + attr - > setInvalid ( ) ; <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + if ( ! func - > getASTContext ( ) . LangOpts . EnableExperimentalConcurrency ) <nl> + return false ; <nl> + <nl> + / / Are we in a context where inference is possible ? <nl> + auto dc = func - > getDeclContext ( ) ; <nl> + if ( ! dc - > isTypeContext ( ) | | ! dc - > getParentSourceFile ( ) | | <nl> + isa < ProtocolDecl > ( dc ) | | ! func - > hasBody ( ) ) <nl> + return false ; <nl> + <nl> + / / Is it possible to infer @ asyncHandler for this function at all ? <nl> + if ( checkAsyncHandler ( func , / * diagnose = * / false ) ) <nl> + return false ; <nl> + <nl> + / / Add an implicit @ asyncHandler attribute and return true . We ' re done . <nl> + auto addImplicitAsyncHandlerAttr = [ & ] { <nl> + func - > getAttrs ( ) . add ( new ( func - > getASTContext ( ) ) AsyncHandlerAttr ( true ) ) ; <nl> + return true ; <nl> + } ; <nl> + <nl> + / / Check whether any of the conformances in the context of the function <nl> + / / implies @ asyncHandler . <nl> + { <nl> + auto idc = cast < IterableDeclContext > ( dc - > getAsDecl ( ) ) ; <nl> + auto conformances = evaluateOrDefault ( <nl> + dc - > getASTContext ( ) . evaluator , <nl> + LookupAllConformancesInContextRequest { idc } , { } ) ; <nl> + <nl> + for ( auto conformance : conformances ) { <nl> + auto protocol = conformance - > getProtocol ( ) ; <nl> + for ( auto found : protocol - > lookupDirect ( func - > getName ( ) ) ) { <nl> + if ( ! isa < ProtocolDecl > ( found - > getDeclContext ( ) ) ) <nl> + continue ; <nl> + <nl> + auto requirement = dyn_cast < FuncDecl > ( found ) ; <nl> + if ( ! requirement ) <nl> + continue ; <nl> + <nl> + if ( ! requirement - > isAsyncHandler ( ) ) <nl> + continue ; <nl> + <nl> + auto witness = conformance - > getWitnessDecl ( requirement ) ; <nl> + if ( witness ! = func ) <nl> + continue ; <nl> + <nl> + return addImplicitAsyncHandlerAttr ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / / Look through dynamic replacements . <nl> + if ( auto replaced = func - > getDynamicallyReplacedDecl ( ) ) { <nl> + if ( auto replacedFunc = dyn_cast < FuncDecl > ( replaced ) ) <nl> + if ( replacedFunc - > isAsyncHandler ( ) ) <nl> + return addImplicitAsyncHandlerAttr ( ) ; <nl> + } <nl> + <nl> + return false ; <nl> + } <nl> mmm a / test / attr / asynchandler . swift <nl> ppp b / test / attr / asynchandler . swift <nl> struct X { <nl> @ asyncHandler init ( ) { } <nl> / / expected - error @ - 1 { { @ asyncHandler may only be used on ' func ' declarations } } <nl> } <nl> + <nl> + <nl> + / / Inference of @ asyncHandler <nl> + protocol P { <nl> + @ asyncHandler func callback ( ) <nl> + } <nl> + <nl> + extension X : P { <nl> + func callback ( ) { <nl> + / / okay , it ' s an async context <nl> + let _ = await globalAsyncFunction ( ) <nl> + } <nl> + } <nl>
Merge pull request from DougGregor / concurrency - infer - asynchandler
apple/swift
84a21d1769426a2a592b36ae36a32c0f78a6fc15
2020-08-15T03:32:39Z
mmm a / tests / test_core . py <nl> ppp b / tests / test_core . py <nl> def test_asm_pgo ( self ) : <nl> <nl> def test_response_file ( self ) : <nl> with open ( ' rsp_file ' , ' w ' ) as f : <nl> - f . write ( ' - o % s / response_file . o . js % s ' % ( self . get_dir ( ) , path_from_root ( ' tests ' , ' hello_world . cpp ' ) ) ) <nl> + response_data = ' - o % s / response_file . o . js % s ' % ( self . get_dir ( ) , path_from_root ( ' tests ' , ' hello_world . cpp ' ) ) <nl> + f . write ( response_data . replace ( ' \ \ ' , ' \ \ \ \ ' ) ) <nl> run_process ( [ PYTHON , EMCC , " @ rsp_file " ] + self . emcc_args ) <nl> self . do_run ( ' ' , ' hello , world ' , basename = ' response_file ' , no_build = True ) <nl> <nl> def test_linker_response_file ( self ) : <nl> # by emscripten , except when using the wasm backend ( lld ) in which case it <nl> # should pass the original flag to the linker . <nl> with open ( ' rsp_file ' , ' w ' ) as f : <nl> - f . write ( objfile + ' - - export = foo ' ) <nl> + response_data = objfile + ' - - export = foo ' <nl> + f . write ( response_data . replace ( ' \ \ ' , ' \ \ \ \ ' ) ) <nl> run_process ( [ PYTHON , EMCC , " - Wl , @ rsp_file " , ' - o ' , os . path . join ( self . get_dir ( ) , ' response_file . o . js ' ) ] + self . emcc_args ) <nl> self . do_run ( ' ' , ' hello , world ' , basename = ' response_file ' , no_build = True ) <nl> <nl>
Merge pull request from juj / fix_test_response_file_on_windows
emscripten-core/emscripten
0c8040d431a9964b270cd08ee632038e042832bf
2018-07-31T05:50:17Z
mmm a / android / sdk / src / main / java / com / taobao / weex / WXSDKInstance . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / WXSDKInstance . java <nl> public boolean isNeedValidate ( ) { <nl> / * <nl> * store custom ViewPort Width <nl> * / <nl> + @ Deprecated <nl> public void setViewPortWidth ( int viewPortWidth ) { <nl> mViewPortWidth = viewPortWidth ; <nl> } <nl> <nl> + @ Deprecated <nl> public static int getViewPortWidth ( ) { <nl> return mViewPortWidth ; <nl> } <nl> public void setInstanceViewPortWidth ( int instanceViewPortWidth ) { <nl> this . mInstanceViewPortWidth = instanceViewPortWidth ; <nl> } <nl> <nl> + public int getInstanceViewPortWidth ( ) { <nl> + return mInstanceViewPortWidth ; <nl> + } <nl> + <nl> public interface OnInstanceVisibleListener { <nl> void onAppear ( ) ; <nl> void onDisappear ( ) ; <nl> public void setSize ( int width , int height ) { <nl> if ( width < 0 | | height < 0 | | isDestroy | | ! mRendered ) { <nl> return ; <nl> } <nl> - float realWidth = WXViewUtils . getWebPxByWidth ( width , getViewPortWidth ( ) ) ; <nl> - float realHeight = WXViewUtils . getWebPxByWidth ( height , getViewPortWidth ( ) ) ; <nl> + float realWidth = WXViewUtils . getWebPxByWidth ( width , getInstanceViewPortWidth ( ) ) ; <nl> + float realHeight = WXViewUtils . getWebPxByWidth ( height , getInstanceViewPortWidth ( ) ) ; <nl> <nl> View godView = mRenderContainer ; <nl> if ( godView ! = null ) { <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / WXSDKManager . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / WXSDKManager . java <nl> public static WXSDKManager getInstance ( ) { <nl> } <nl> <nl> public static int getInstanceViewPortWidth ( String instanceId ) { <nl> - return getInstance ( ) . getSDKInstance ( instanceId ) . getViewPortWidth ( ) ; <nl> + return getInstance ( ) . getSDKInstance ( instanceId ) . getInstanceViewPortWidth ( ) ; <nl> } <nl> <nl> static void setInstance ( WXSDKManager manager ) { <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / dom / WXDomModule . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / dom / WXDomModule . java <nl> public void getComponentRect ( String ref , String callback ) { <nl> <nl> @ NonNull <nl> private float getWebPxValue ( int value ) { <nl> - return WXViewUtils . getWebPxByWidth ( value , mWXSDKInstance . getViewPortWidth ( ) ) ; <nl> + return WXViewUtils . getWebPxByWidth ( value , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ; <nl> } <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / dom / WXDomObject . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / dom / WXDomObject . java <nl> public String dumpDomTree ( ) { <nl> <nl> WXDomObject domObject = WXDomObjectFactory . newInstance ( type ) ; <nl> <nl> - domObject . setViewPortWidth ( wxsdkInstance . getViewPortWidth ( ) ) ; <nl> + domObject . setViewPortWidth ( wxsdkInstance . getInstanceViewPortWidth ( ) ) ; <nl> <nl> if ( domObject = = null ) { <nl> return null ; <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / dom / WXDomStatement . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / dom / WXDomStatement . java <nl> private WXAnimationBean createAnimationBean ( String ref , String animation ) { <nl> int width = ( int ) domObject . getLayoutWidth ( ) ; <nl> int height = ( int ) domObject . getLayoutHeight ( ) ; <nl> animationBean . styles . init ( animationBean . styles . transformOrigin , <nl> - animationBean . styles . transform , width , height , WXSDKManager . getInstance ( ) . getSDKInstance ( mInstanceId ) . getViewPortWidth ( ) ) ; <nl> + animationBean . styles . transform , width , height , WXSDKManager . getInstance ( ) . getSDKInstance ( mInstanceId ) . getInstanceViewPortWidth ( ) ) ; <nl> } <nl> return animationBean ; <nl> } catch ( RuntimeException e ) { <nl> private WXAnimationBean createAnimationBean ( String ref , Map < String , Object > style <nl> int width = ( int ) domObject . getLayoutWidth ( ) ; <nl> int height = ( int ) domObject . getLayoutHeight ( ) ; <nl> animationBean . styles = new WXAnimationBean . Style ( ) ; <nl> - animationBean . styles . init ( transformOrigin , ( String ) transform , width , height , WXSDKManager . getInstance ( ) . getSDKInstance ( mInstanceId ) . getViewPortWidth ( ) ) ; <nl> + animationBean . styles . init ( transformOrigin , ( String ) transform , width , height , WXSDKManager . getInstance ( ) . getSDKInstance ( mInstanceId ) . getInstanceViewPortWidth ( ) ) ; <nl> return animationBean ; <nl> } <nl> } catch ( RuntimeException e ) { <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / WXRenderStatement . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / WXRenderStatement . java <nl> public void getComponentSize ( String ref , JSCallback callback ) { <nl> if ( component ! = null ) { <nl> Map < String , Float > size = new HashMap < > ( ) ; <nl> Rect sizes = component . getComponentSize ( ) ; <nl> - size . put ( " width " , WXViewUtils . getWebPxByWidth ( sizes . width ( ) , mWXSDKInstance . getViewPortWidth ( ) ) ) ; <nl> - size . put ( " height " , WXViewUtils . getWebPxByWidth ( sizes . height ( ) , mWXSDKInstance . getViewPortWidth ( ) ) ) ; <nl> - size . put ( " bottom " , WXViewUtils . getWebPxByWidth ( sizes . bottom , mWXSDKInstance . getViewPortWidth ( ) ) ) ; <nl> - size . put ( " left " , WXViewUtils . getWebPxByWidth ( sizes . left , mWXSDKInstance . getViewPortWidth ( ) ) ) ; <nl> - size . put ( " right " , WXViewUtils . getWebPxByWidth ( sizes . right , mWXSDKInstance . getViewPortWidth ( ) ) ) ; <nl> - size . put ( " top " , WXViewUtils . getWebPxByWidth ( sizes . top , mWXSDKInstance . getViewPortWidth ( ) ) ) ; <nl> + size . put ( " width " , WXViewUtils . getWebPxByWidth ( sizes . width ( ) , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + size . put ( " height " , WXViewUtils . getWebPxByWidth ( sizes . height ( ) , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + size . put ( " bottom " , WXViewUtils . getWebPxByWidth ( sizes . bottom , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + size . put ( " left " , WXViewUtils . getWebPxByWidth ( sizes . left , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + size . put ( " right " , WXViewUtils . getWebPxByWidth ( sizes . right , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + size . put ( " top " , WXViewUtils . getWebPxByWidth ( sizes . top , mWXSDKInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> options . put ( " size " , size ) ; <nl> options . put ( " result " , true ) ; <nl> } else { <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / animation / WXAnimationModule . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / animation / WXAnimationModule . java <nl> public static void startAnimation ( WXSDKInstance instance , WXComponent component , <nl> return ; <nl> } <nl> try { <nl> - Animator animator = createAnimator ( animationBean , component . getHostView ( ) , instance . getViewPortWidth ( ) ) ; <nl> + Animator animator = createAnimator ( animationBean , component . getHostView ( ) , instance . getInstanceViewPortWidth ( ) ) ; <nl> if ( animator ! = null ) { <nl> Animator . AnimatorListener animatorCallback = createAnimatorListener ( instance , callback ) ; <nl> if ( Build . VERSION . SDK_INT < Build . VERSION_CODES . JELLY_BEAN_MR2 ) { <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / AbstractEditComponent . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / AbstractEditComponent . java <nl> protected void appleStyleAfterCreated ( WXEditText editText ) { <nl> editText . setHintTextColor ( colorInt ) ; <nl> } <nl> <nl> - editText . setTextSize ( TypedValue . COMPLEX_UNIT_PX , WXStyle . getFontSize ( getDomObject ( ) . getStyles ( ) , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + editText . setTextSize ( TypedValue . COMPLEX_UNIT_PX , WXStyle . getFontSize ( getDomObject ( ) . getStyles ( ) , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> editText . setText ( getDomObject ( ) . getAttrs ( ) . optString ( Constants . Name . VALUE ) ) ; <nl> } <nl> <nl> public void setColor ( String color ) { <nl> @ WXComponentProp ( name = Constants . Name . FONT_SIZE ) <nl> public void setFontSize ( String fontSize ) { <nl> if ( getHostView ( ) ! = null & & fontSize ! = null ) { <nl> - getHostView ( ) . setTextSize ( TypedValue . COMPLEX_UNIT_PX , WXStyle . getFontSize ( getDomObject ( ) . getStyles ( ) , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + getHostView ( ) . setTextSize ( TypedValue . COMPLEX_UNIT_PX , WXStyle . getFontSize ( getDomObject ( ) . getStyles ( ) , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> } <nl> } <nl> <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXComponent . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXComponent . java <nl> public void onHostViewClick ( ) { <nl> Map < String , Object > position = WXDataStructureUtil . newHashMapWithExpectedSize ( 4 ) ; <nl> int [ ] location = new int [ 2 ] ; <nl> mHost . getLocationOnScreen ( location ) ; <nl> - position . put ( " x " , WXViewUtils . getWebPxByWidth ( location [ 0 ] , mInstance . getViewPortWidth ( ) ) ) ; <nl> - position . put ( " y " , WXViewUtils . getWebPxByWidth ( location [ 1 ] , mInstance . getViewPortWidth ( ) ) ) ; <nl> - position . put ( " width " , WXViewUtils . getWebPxByWidth ( mDomObj . getLayoutWidth ( ) , mInstance . getViewPortWidth ( ) ) ) ; <nl> - position . put ( " height " , WXViewUtils . getWebPxByWidth ( mDomObj . getLayoutHeight ( ) , mInstance . getViewPortWidth ( ) ) ) ; <nl> + position . put ( " x " , WXViewUtils . getWebPxByWidth ( location [ 0 ] , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + position . put ( " y " , WXViewUtils . getWebPxByWidth ( location [ 1 ] , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + position . put ( " width " , WXViewUtils . getWebPxByWidth ( mDomObj . getLayoutWidth ( ) , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> + position . put ( " height " , WXViewUtils . getWebPxByWidth ( mDomObj . getLayoutHeight ( ) , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> param . put ( Constants . Name . POSITION , position ) ; <nl> fireEvent ( Constants . Event . CLICK , param ) ; <nl> } <nl> public void setBorderRadius ( String key , float borderRadius ) { <nl> if ( borderRadius > = 0 ) { <nl> switch ( key ) { <nl> case Constants . Name . BORDER_RADIUS : <nl> - getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_RADIUS_ALL , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_RADIUS_ALL , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_TOP_LEFT_RADIUS : <nl> - getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_TOP_LEFT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_TOP_LEFT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_TOP_RIGHT_RADIUS : <nl> - getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_TOP_RIGHT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_TOP_RIGHT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_BOTTOM_RIGHT_RADIUS : <nl> - getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_BOTTOM_RIGHT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_BOTTOM_RIGHT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_BOTTOM_LEFT_RADIUS : <nl> - getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_BOTTOM_LEFT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderRadius ( BorderDrawable . BORDER_BOTTOM_LEFT_RADIUS , WXViewUtils . getRealSubPxByWidth ( borderRadius , mInstance . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> } <nl> } <nl> public void setBorderWidth ( String key , float borderWidth ) { <nl> if ( borderWidth > = 0 ) { <nl> switch ( key ) { <nl> case Constants . Name . BORDER_WIDTH : <nl> - getOrCreateBorder ( ) . setBorderWidth ( Spacing . ALL , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderWidth ( Spacing . ALL , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_TOP_WIDTH : <nl> - getOrCreateBorder ( ) . setBorderWidth ( Spacing . TOP , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderWidth ( Spacing . TOP , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_RIGHT_WIDTH : <nl> - getOrCreateBorder ( ) . setBorderWidth ( Spacing . RIGHT , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderWidth ( Spacing . RIGHT , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_BOTTOM_WIDTH : <nl> - getOrCreateBorder ( ) . setBorderWidth ( Spacing . BOTTOM , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderWidth ( Spacing . BOTTOM , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> case Constants . Name . BORDER_LEFT_WIDTH : <nl> - getOrCreateBorder ( ) . setBorderWidth ( Spacing . LEFT , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + getOrCreateBorder ( ) . setBorderWidth ( Spacing . LEFT , WXViewUtils . getRealSubPxByWidth ( borderWidth , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> break ; <nl> } <nl> } <nl> public void setVisibility ( String visibility ) { <nl> * This is an experimental feature for elevation of material design . <nl> * / <nl> private void updateElevation ( ) { <nl> - float elevation = getDomObject ( ) . getAttrs ( ) . getElevation ( getInstance ( ) . getViewPortWidth ( ) ) ; <nl> + float elevation = getDomObject ( ) . getAttrs ( ) . getElevation ( getInstance ( ) . getInstanceViewPortWidth ( ) ) ; <nl> if ( ! Float . isNaN ( elevation ) ) { <nl> ViewCompat . setElevation ( getHostView ( ) , elevation ) ; <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXEmbed . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXEmbed . java <nl> public WXEmbed ( WXSDKInstance instance , WXDomObject node , WXVContainer parent ) { <nl> super ( instance , node , parent ) ; <nl> mListener = new EmbedRenderListener ( this ) ; <nl> <nl> - ERROR_IMG_WIDTH = ( int ) WXViewUtils . getRealPxByWidth ( 270 , instance . getViewPortWidth ( ) ) ; <nl> - ERROR_IMG_HEIGHT = ( int ) WXViewUtils . getRealPxByWidth ( 260 , instance . getViewPortWidth ( ) ) ; <nl> + ERROR_IMG_WIDTH = ( int ) WXViewUtils . getRealPxByWidth ( 270 , instance . getInstanceViewPortWidth ( ) ) ; <nl> + ERROR_IMG_HEIGHT = ( int ) WXViewUtils . getRealPxByWidth ( 260 , instance . getInstanceViewPortWidth ( ) ) ; <nl> if ( instance instanceof EmbedManager ) { <nl> Object itemId = node . getAttrs ( ) . get ( ITEM_ID ) ; <nl> if ( itemId ! = null ) { <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXIndicator . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXIndicator . java <nl> public void setItemSize ( int itemSize ) { <nl> if ( itemSize < 0 ) { <nl> return ; <nl> } <nl> - getHostView ( ) . setRadius ( WXViewUtils . getRealPxByWidth ( itemSize , getInstance ( ) . getViewPortWidth ( ) ) / 2 . 0f ) ; <nl> + getHostView ( ) . setRadius ( WXViewUtils . getRealPxByWidth ( itemSize , getInstance ( ) . getInstanceViewPortWidth ( ) ) / 2 . 0f ) ; <nl> getHostView ( ) . forceLayout ( ) ; <nl> getHostView ( ) . requestLayout ( ) ; <nl> } <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / WXSliderNeighbor . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / WXSliderNeighbor . java <nl> <nl> import android . widget . FrameLayout ; <nl> <nl> import com . taobao . weex . WXSDKInstance ; <nl> - import com . taobao . weex . WXSDKManager ; <nl> import com . taobao . weex . common . WXThread ; <nl> - import com . taobao . weex . dom . WXDomManager ; <nl> import com . taobao . weex . dom . WXDomObject ; <nl> import com . taobao . weex . ui . ComponentCreator ; <nl> import com . taobao . weex . ui . view . WXCircleIndicator ; <nl> import com . taobao . weex . ui . view . WXCirclePageAdapter ; <nl> import com . taobao . weex . ui . view . WXCircleViewPager ; <nl> - import com . taobao . weex . utils . WXLogUtils ; <nl> import com . taobao . weex . utils . WXUtils ; <nl> import com . taobao . weex . utils . WXViewUtils ; <nl> <nl> - import java . lang . reflect . Field ; <nl> import java . lang . reflect . InvocationTargetException ; <nl> import java . util . List ; <nl> - import java . util . concurrent . ConcurrentHashMap ; <nl> <nl> / * * <nl> * Known Issus : In auto play mode , neighbor view not scaled or aplhaed rarely . <nl> private float calculateTranslation ( @ NonNull View hostPage ) { <nl> } <nl> View realView = ( ( ViewGroup ) hostPage ) . getChildAt ( 0 ) ; <nl> float translation = ( hostPage . getMeasuredWidth ( ) - realView . getMeasuredWidth ( ) * mNeighborScale ) / 4 ; <nl> - translation + = ( ( hostPage . getMeasuredWidth ( ) - realView . getMeasuredWidth ( ) * mCurrentItemScale ) / 2 - WXViewUtils . getRealPxByWidth ( mNeighborSpace , getInstance ( ) . getViewPortWidth ( ) ) ) / 2 ; <nl> + translation + = ( ( hostPage . getMeasuredWidth ( ) - realView . getMeasuredWidth ( ) * mCurrentItemScale ) / 2 - WXViewUtils . getRealPxByWidth ( mNeighborSpace , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) / 2 ; <nl> return translation ; <nl> } <nl> <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / component / list / BasicListComponent . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / component / list / BasicListComponent . java <nl> public void setScrollable ( boolean scrollable ) { <nl> <nl> @ WXComponentProp ( name = Constants . Name . OFFSET_ACCURACY ) <nl> public void setOffsetAccuracy ( int accuracy ) { <nl> - float real = WXViewUtils . getRealPxByWidth ( accuracy , getInstance ( ) . getViewPortWidth ( ) ) ; <nl> + float real = WXViewUtils . getRealPxByWidth ( accuracy , getInstance ( ) . getInstanceViewPortWidth ( ) ) ; <nl> this . mOffsetAccuracy = ( int ) real ; <nl> } <nl> <nl> public void scrollTo ( WXComponent component , Map < String , Object > options ) { <nl> smooth = WXUtils . getBoolean ( options . get ( Constants . Name . ANIMATED ) , true ) ; <nl> if ( offsetStr ! = null ) { <nl> try { <nl> - offsetFloat = WXViewUtils . getRealPxByWidth ( Float . parseFloat ( offsetStr ) , WXSDKInstance . getViewPortWidth ( ) ) ; <nl> + offsetFloat = WXViewUtils . getRealPxByWidth ( Float . parseFloat ( offsetStr ) , getInstance ( ) . getInstanceViewPortWidth ( ) ) ; <nl> } catch ( Exception e ) { <nl> WXLogUtils . e ( " Float parseFloat error : " + e . getMessage ( ) ) ; <nl> } <nl> public void onLoadMore ( int offScreenY ) { <nl> if ( TextUtils . isEmpty ( offset ) ) { <nl> offset = " 0 " ; <nl> } <nl> - float offsetParsed = WXViewUtils . getRealPxByWidth ( Integer . parseInt ( offset ) , WXSDKInstance . getViewPortWidth ( ) ) ; <nl> + float offsetParsed = WXViewUtils . getRealPxByWidth ( Integer . parseInt ( offset ) , getInstance ( ) . getInstanceViewPortWidth ( ) ) ; <nl> <nl> if ( offScreenY < offsetParsed ) { <nl> <nl> public void onScrolled ( RecyclerView recyclerView , int dx , int dy ) { <nl> Map < String , Object > contentSize = new HashMap < > ( 2 ) ; <nl> Map < String , Object > contentOffset = new HashMap < > ( 2 ) ; <nl> <nl> - contentSize . put ( Constants . Name . WIDTH , WXViewUtils . getWebPxByWidth ( contentWidth , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> - contentSize . put ( Constants . Name . HEIGHT , WXViewUtils . getWebPxByWidth ( contentHeight , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + contentSize . put ( Constants . Name . WIDTH , WXViewUtils . getWebPxByWidth ( contentWidth , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> + contentSize . put ( Constants . Name . HEIGHT , WXViewUtils . getWebPxByWidth ( contentHeight , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> <nl> - contentOffset . put ( Constants . Name . X , - WXViewUtils . getWebPxByWidth ( offsetX , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> - contentOffset . put ( Constants . Name . Y , - WXViewUtils . getWebPxByWidth ( offsetY , getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + contentOffset . put ( Constants . Name . X , - WXViewUtils . getWebPxByWidth ( offsetX , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> + contentOffset . put ( Constants . Name . Y , - WXViewUtils . getWebPxByWidth ( offsetY , getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> event . put ( Constants . Name . CONTENT_SIZE , contentSize ) ; <nl> event . put ( Constants . Name . CONTENT_OFFSET , contentOffset ) ; <nl> <nl> mmm a / android / sdk / src / main / java / com / taobao / weex / ui / view / gesture / WXGesture . java <nl> ppp b / android / sdk / src / main / java / com / taobao / weex / ui / view / gesture / WXGesture . java <nl> private PointF getEventLocInScreenCoordinate ( float eventX , float eventY ) { <nl> globalEventOffset . set ( ( int ) eventX , ( int ) eventY ) ; <nl> component . getRealView ( ) . getGlobalVisibleRect ( globalRect , globalOffset ) ; <nl> globalEventOffset . offset ( globalOffset . x , globalOffset . y ) ; <nl> - return new PointF ( WXViewUtils . getWebPxByWidth ( globalEventOffset . x , component . getInstance ( ) . getViewPortWidth ( ) ) , <nl> - WXViewUtils . getWebPxByWidth ( globalEventOffset . y , component . getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + return new PointF ( WXViewUtils . getWebPxByWidth ( globalEventOffset . x , component . getInstance ( ) . getInstanceViewPortWidth ( ) ) , <nl> + WXViewUtils . getWebPxByWidth ( globalEventOffset . y , component . getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> } <nl> <nl> / * * <nl> private PointF getEventLocInPageCoordinate ( float eventX , float eventY ) { <nl> locLeftTop . set ( 0 , 0 ) ; <nl> component . computeVisiblePointInViewCoordinate ( locLeftTop ) ; <nl> locEventOffset . offset ( locLeftTop . x , locLeftTop . y ) ; <nl> - return new PointF ( WXViewUtils . getWebPxByWidth ( locEventOffset . x , component . getInstance ( ) . getViewPortWidth ( ) ) , <nl> - WXViewUtils . getWebPxByWidth ( locEventOffset . y , component . getInstance ( ) . getViewPortWidth ( ) ) ) ; <nl> + return new PointF ( WXViewUtils . getWebPxByWidth ( locEventOffset . x , component . getInstance ( ) . getInstanceViewPortWidth ( ) ) , <nl> + WXViewUtils . getWebPxByWidth ( locEventOffset . y , component . getInstance ( ) . getInstanceViewPortWidth ( ) ) ) ; <nl> } <nl> <nl> private static class GestureHandler extends android . os . Handler { <nl> mmm a / android / sdk / src / test / java / com / taobao / weex / ui / module / WXMetaModuleTest . java <nl> ppp b / android / sdk / src / test / java / com / taobao / weex / ui / module / WXMetaModuleTest . java <nl> public void setViewport ( ) throws Exception { <nl> JSONObject jsonObject = new JSONObject ( ) ; <nl> jsonObject . put ( WXMetaModule . WIDTH , 640 ) ; <nl> mMeta . setViewport ( jsonObject . toString ( ) ) ; <nl> - assertTrue ( mMeta . mWXSDKInstance . getViewPortWidth ( ) = = 640 ) ; <nl> + assertTrue ( mMeta . mWXSDKInstance . getInstanceViewPortWidth ( ) = = 640 ) ; <nl> <nl> jsonObject . put ( WXMetaModule . WIDTH , 320 . 5 ) ; <nl> mMeta . setViewport ( jsonObject . toString ( ) ) ; <nl> - assertTrue ( mMeta . mWXSDKInstance . getViewPortWidth ( ) = = 320 ) ; <nl> + assertTrue ( mMeta . mWXSDKInstance . getInstanceViewPortWidth ( ) = = 320 ) ; <nl> <nl> jsonObject . put ( WXMetaModule . WIDTH , " - 200 " ) ; <nl> mMeta . setViewport ( jsonObject . toString ( ) ) ; <nl> - assertTrue ( mMeta . mWXSDKInstance . getViewPortWidth ( ) = = 320 ) ; <nl> + assertTrue ( mMeta . mWXSDKInstance . getInstanceViewPortWidth ( ) = = 320 ) ; <nl> <nl> jsonObject . put ( WXMetaModule . WIDTH , " error " ) ; <nl> mMeta . setViewport ( jsonObject . toString ( ) ) ; <nl> - assertTrue ( mMeta . mWXSDKInstance . getViewPortWidth ( ) = = 320 ) ; <nl> + assertTrue ( mMeta . mWXSDKInstance . getInstanceViewPortWidth ( ) = = 320 ) ; <nl> <nl> <nl> mMeta . setViewport ( " ads " ) ; <nl> - assertTrue ( mMeta . mWXSDKInstance . getViewPortWidth ( ) = = 320 ) ; <nl> + assertTrue ( mMeta . mWXSDKInstance . getInstanceViewPortWidth ( ) = = 320 ) ; <nl> } <nl> <nl> } <nl> \ No newline at end of file <nl>
* [ android ] deprecated static viewport getter and setter
apache/incubator-weex
fcfb32b282a5fed07d83816c2566310993a135ba
2017-03-10T09:05:07Z
mmm a / tensorflow / contrib / lite / model . cc <nl> ppp b / tensorflow / contrib / lite / model . cc <nl> TfLiteStatus InterpreterBuilder : : ParseNodes ( <nl> } <nl> <nl> const TfLiteRegistration * registration = <nl> - flatbuffer_op_index_to_registration_ [ op - > opcode_index ( ) ] ; <nl> + flatbuffer_op_index_to_registration_ [ index ] ; <nl> if ( registration = = nullptr ) { <nl> error_reporter_ - > Report ( " Skipping op for opcode_index % d \ n " , index ) ; <nl> status = kTfLiteError ; <nl>
Remove an unnecessary op - > opcode_index ( ) operation .
tensorflow/tensorflow
92cc6352abaf2442c0d29755f87f6dbcd514a684
2018-06-29T04:37:43Z
mmm a / docs / api / crash - reporter . md <nl> ppp b / docs / api / crash - reporter . md <nl> The ` crashReporter ` module has the following methods : <nl> report . Only string properties are sent correctly , Nested objects are not <nl> supported . <nl> <nl> - You are required to call this method before using other ` crashReporter ` <nl> - APIs . <nl> - <nl> - * * Note : * * On macOS , Electron uses a new ` crashpad ` client , which is different <nl> - from ` breakpad ` on Windows and Linux . To enable the crash collection feature , <nl> - you are required to call the ` crashReporter . start ` API to initialize ` crashpad ` <nl> - in the main process and in each renderer process from which you wish to collect <nl> - crash reports . <nl> + You are required to call this method before using other ` crashReporter ` APIs <nl> + and in each process ( main / renderer ) from which you want to collect crash reports . <nl> + You can pass different options to ` crashReporter . start ` while calling from different processes . <nl> + <nl> + * * Note : * * On Windows and Linux , Electron uses ` breakpad ` for crash collection and reporting . <nl> + Crashes can be collected from the main and renderer process , but not from the child processes <nl> + created via ` child_process ` module . <nl> + <nl> + * * Note : * * On macOS , Electron uses a new ` crashpad ` client for crash collection and reporting . <nl> + Crashes can be collected from the main , renderer and any of the child processes created via the ` child_process ` module . <nl> + If you want to enable crash reporting , initializing ` crashpad ` from the main process using ` crashReporter . start ` is mandatory <nl> + regardless of which process you want to collect crashes from . Once initialized this way , the crashpad handler collects <nl> + crashes from all processes . You still have to call ` crashReporter . start ` from the renderer process , otherwise crashes from <nl> + renderer processes will get reported without ` companyName ` , ` productName ` or any of the ` extra ` information . <nl> <nl> # # # ` crashReporter . getLastCrashReport ( ) ` <nl> <nl>
Clarifying crash reporter behviour in Mac
electron/electron
f8b738e6c23d2cd30184c31d847b75608dfc2f6c
2016-11-23T23:36:03Z
new file mode 100644 <nl> index 00000000000 . . 512703a52fb <nl> mmm / dev / null <nl> ppp b / src / operator / convolution_op - inl . h <nl> <nl> + / * ! <nl> + * Copyright ( c ) 2015 by Contributors <nl> + * \ file convolution_op - inl . h <nl> + * \ brief convolution op <nl> + * \ author Bing Xu <nl> + * / <nl> + # ifndef MXNET_CONVOLUTION_OP_INL_H_ <nl> + # define MXNET_CONVOLUTION_OP_INL_H_ <nl> + <nl> + # include < mxnet / operator . h > <nl> + # include " . / operator_common . h " <nl> + # include " . / param . h " <nl> + <nl> + namespace mxnet { <nl> + namespace op { <nl> + template < typename xpu > <nl> + class ConvolutionOp : public Operator { <nl> + public : <nl> + virtual std : : vector < ArgType > DescribeArgs ( ) const { <nl> + ArgType ret [ ] = { kDataArg , kWeightArg , kBiasArg } ; <nl> + if ( param_ . no_bias = = 0 ) { <nl> + return std : : vector < ArgType > ( ret , ret + 3 ) ; <nl> + } else { <nl> + return std : : vector < ArgType > ( ret , ret + 2 ) ; <nl> + } <nl> + } <nl> + virtual void SetParam ( const char * name , const char * val ) { <nl> + param_ . SetParam ( name , val ) ; <nl> + } <nl> + virtual void InferShape ( std : : vector < TShape > * in_shape , <nl> + std : : vector < TShape > * out_shape ) { <nl> + using namespace mshadow ; <nl> + if ( param_ . no_bias = = 0 ) { <nl> + CHECK ( in_shape - > size ( ) = = 3 ) < < " Input : [ data , weight , bias ] " ; <nl> + } else { <nl> + CHECK ( in_shape - > size ( ) = = 2 ) < < " Input : [ data , weight ] " ; <nl> + } <nl> + CHECK ( param_ . num_channel > 0 ) ; <nl> + const TShape & dshape = ( * in_shape ) [ 0 ] ; <nl> + CHECK ( dshape . ndim ( ) = = 4 ) < < \ <nl> + " Input data should be 4D in batch - channel - y - x " ; <nl> + ShapeAssignCheck ( ( * in_shape ) [ 1 ] , Shape4 ( param_ . num_channel , <nl> + dshape [ 1 ] , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x ) ) ; <nl> + if ( param_ . no_bias = = 0 ) { <nl> + ShapeAssignCheck ( ( * in_shape ) [ 2 ] , Shape1 ( param_ . num_channel ) ) ; <nl> + } <nl> + out_shape - > clear ( ) ; <nl> + out_shape - > push_back ( dshape ) ; <nl> + const index_t ksize_y = static_cast < index_t > ( param_ . kernel_y ) ; <nl> + const index_t ksize_x = static_cast < index_t > ( param_ . kernel_x ) ; <nl> + const index_t kstride = static_cast < index_t > ( param_ . stride_y ) ; <nl> + / / todo : support dual stride <nl> + mshadow : : Shape < 4 > ishape = in_shape - > at ( 0 ) . get < 4 > ( ) ; <nl> + CHECK ( ishape [ 1 ] % param_ . num_group = = 0 ) < < \ <nl> + " input channels must divide group size " ; <nl> + CHECK ( param_ . num_channel % param_ . num_group = = 0 ) < < \ <nl> + " output channels must divide group size " ; <nl> + CHECK ( ksize_y > 0 & & ksize_x > 0 ) < < \ <nl> + " incorrect kernel size " ; <nl> + CHECK ( ksize_x < = ishape [ 3 ] & & ksize_y < = ishape [ 2 ] ) < < \ <nl> + " kernel size exceed input " ; <nl> + ( * out_shape ) [ 0 ] [ 1 ] = param_ . num_channel ; <nl> + ( * out_shape ) [ 0 ] [ 2 ] = ( ishape [ 2 ] + 2 * param_ . pad_y - ksize_y ) / kstride + 1 ; <nl> + ( * out_shape ) [ 0 ] [ 3 ] = ( ishape [ 3 ] + 2 * param_ . pad_x - ksize_x ) / kstride + 1 ; <nl> + } <nl> + virtual void Forward ( Option opt , <nl> + RunContext ctx , <nl> + const std : : vector < TBlob > & in_data , <nl> + const std : : vector < TBlob > & out_data ) { <nl> + using namespace mshadow ; <nl> + using namespace mshadow : : expr ; <nl> + size_t expected = param_ . no_bias = = 0 ? 3 : 2 ; <nl> + CHECK ( in_data . size ( ) = = expected ) ; <nl> + CHECK ( out_data . size ( ) = = 1 ) ; <nl> + / / weight shape with group <nl> + TShape ws ; <nl> + ShapeAssignCheck ( ws , Shape3 ( param_ . num_group , <nl> + param_ . num_channel / param_ . num_group , <nl> + param_ . num_input_channel / param_ . num_group * <nl> + param_ . kernel_y * param_ . kernel_x ) ) ; <nl> + Stream < xpu > * s = static_cast < Stream < xpu > * > ( ctx . stream ) ; <nl> + Tensor < xpu , 4 > data = in_data [ 0 ] . get < xpu , 4 , real_t > ( s ) ; <nl> + Tensor < xpu , 3 > wmat = in_data [ 1 ] . get_with_shape < xpu , 3 , real_t > ( ws , s ) ; <nl> + Tensor < xpu , 4 > out = out_data [ 0 ] . get < xpu , 4 , real_t > ( s ) ; <nl> + this - > InitTemp ( data . shape_ , out . shape_ ) ; <nl> + const index_t nbatch = data . size ( 0 ) ; <nl> + for ( index_t i = 0 ; i < nbatch ; i + = nstep_ ) { <nl> + / / resize , incase last batch is smaller <nl> + const index_t step = std : : min ( nstep_ , nbatch - i ) ; <nl> + temp_col_ . Resize ( mshadow : : Shape2 ( shape_colunit_ [ 0 ] , <nl> + shape_colunit_ [ 1 ] * step ) ) ; <nl> + temp_dst_ . Resize ( mshadow : : Shape3 ( shape_dstunit_ [ 0 ] , <nl> + shape_dstunit_ [ 1 ] , <nl> + shape_dstunit_ [ 2 ] * step ) ) ; <nl> + <nl> + if ( param_ . pad_x = = 0 & & param_ . pad_y = = 0 ) { <nl> + temp_col_ = unpack_patch2col ( data . Slice ( i , i + step ) , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x , <nl> + param_ . stride_y ) ; <nl> + / / TODO : make mshadow support dual stride <nl> + } else { <nl> + temp_col_ = unpack_patch2col ( pad ( data . Slice ( i , i + step ) , <nl> + param_ . pad_y , param_ . pad_x ) , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x , <nl> + param_ . stride_y ) ; <nl> + / / TODO : make mshadow support dual stride <nl> + } <nl> + const index_t gstride = temp_col_ . size ( 0 ) / param_ . num_group ; <nl> + for ( int gid = 0 ; gid < param_ . num_group ; + + gid ) { <nl> + mshadow : : Tensor < xpu , 2 > tmpc = temp_col_ . Slice ( gstride * gid , <nl> + gstride * ( gid + 1 ) ) ; <nl> + temp_dst_ [ gid ] = dot ( wmat [ gid ] , tmpc ) ; <nl> + } <nl> + out . Slice ( i , i + step ) = swapaxis < 1 , 0 > ( reshape ( temp_dst_ , <nl> + mshadow : : Shape4 ( param_ . num_channel , <nl> + step , <nl> + out . size ( 2 ) , <nl> + out . size ( 3 ) ) ) ) ; <nl> + } <nl> + if ( param_ . no_bias = = 0 ) { <nl> + / / add bias , broadcast bias to dim 1 : channel <nl> + Tensor < xpu , 1 > bias = in_data [ 2 ] . get < xpu , 1 , real_t > ( s ) ; <nl> + out + = broadcast < 1 > ( bias , out . shape_ ) ; <nl> + } <nl> + } <nl> + virtual void Backward ( RunContext ctx , <nl> + const std : : vector < TBlob > & grad_next , <nl> + const std : : vector < TBlob > & in_data , <nl> + const std : : vector < TBlob > & out_grad , <nl> + const std : : vector < GradReqType > & req ) { <nl> + using namespace mshadow ; <nl> + using namespace mshadow : : expr ; <nl> + CHECK ( grad_next . size ( ) = = 1 ) ; <nl> + size_t expected = param_ . no_bias = = 0 ? 3 : 2 ; <nl> + CHECK ( in_data . size ( ) = = expected & & out_grad . size ( ) = = expected ) ; <nl> + CHECK ( req . size ( ) = = expected ) ; <nl> + TShape ws ; <nl> + ShapeAssignCheck ( ws , Shape3 ( param_ . num_group , <nl> + param_ . num_channel / param_ . num_group , <nl> + param_ . num_input_channel / param_ . num_group * <nl> + param_ . kernel_y * param_ . kernel_x ) ) ; <nl> + Stream < xpu > * s = static_cast < Stream < xpu > * > ( ctx . stream ) ; <nl> + Tensor < xpu , 4 > data = in_data [ 0 ] . get < xpu , 4 , real_t > ( s ) ; <nl> + Tensor < xpu , 3 > wmat = in_data [ 1 ] . get_with_shape < xpu , 3 , real_t > ( ws , s ) ; <nl> + Tensor < xpu , 4 > grad = grad_next [ 0 ] . get < xpu , 4 , real_t > ( s ) ; <nl> + Tensor < xpu , 4 > gdata = out_grad [ 0 ] . get < xpu , 4 , real_t > ( s ) ; <nl> + Tensor < xpu , 3 > gwmat = out_grad [ 0 ] . get_with_shape < xpu , 3 , real_t > ( ws , s ) ; <nl> + this - > InitTemp ( data . shape_ , grad . shape_ ) ; <nl> + const index_t nbatch = data . size ( 0 ) ; <nl> + for ( index_t i = 0 ; i < nbatch ; i + = nstep_ ) { <nl> + const index_t step = std : : min ( nstep_ , nbatch - i ) ; <nl> + temp_col_ . Resize ( mshadow : : Shape2 ( shape_colunit_ [ 0 ] , <nl> + shape_colunit_ [ 1 ] * step ) ) ; <nl> + temp_dst_ . Resize ( mshadow : : Shape3 ( shape_dstunit_ [ 0 ] , <nl> + shape_dstunit_ [ 1 ] , <nl> + shape_dstunit_ [ 2 ] * step ) ) ; <nl> + temp_dst_ = reshape ( swapaxis < 1 , 0 > ( grad . Slice ( i , i + step ) ) , <nl> + temp_dst_ . shape_ ) ; <nl> + if ( param_ . pad_x = = 0 & & param_ . pad_y = = 0 ) { <nl> + temp_col_ = unpack_patch2col ( data . Slice ( i , i + step ) , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x , <nl> + param_ . stride_y ) ; <nl> + / / TODO : dual stride <nl> + } else { <nl> + temp_col_ = unpack_patch2col ( pad ( data . Slice ( i , i + step ) , <nl> + param_ . pad_y , param_ . pad_x ) , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x , <nl> + param_ . stride_y ) ; <nl> + / / TODO : dual stride <nl> + } <nl> + const index_t gstride = temp_col_ . size ( 0 ) / param_ . num_group ; <nl> + for ( int gid = 0 ; gid < param_ . num_group ; + + gid ) { <nl> + mshadow : : Tensor < xpu , 2 > tmpc = temp_col_ . Slice ( gstride * gid , <nl> + gstride * ( gid + 1 ) ) ; <nl> + gwmat [ gid ] + = dot ( temp_dst_ [ gid ] , tmpc . T ( ) ) ; <nl> + } <nl> + if ( req [ 0 ] ! = kNullOp ) { <nl> + for ( int gid = 0 ; gid < param_ . num_group ; + + gid ) { <nl> + mshadow : : Tensor < xpu , 2 > tmpc = temp_col_ . Slice ( gstride * gid , <nl> + gstride * ( gid + 1 ) ) ; <nl> + tmpc = dot ( wmat [ gid ] . T ( ) , temp_dst_ [ gid ] ) ; <nl> + } <nl> + <nl> + if ( param_ . pad_x = = 0 & & param_ . pad_y = = 0 ) { <nl> + Tensor < xpu , 4 > gdata_tmp = gdata . Slice ( i , i + step ) ; <nl> + Assign ( gdata_tmp , <nl> + req [ 0 ] , <nl> + pack_col2patch ( temp_col_ , <nl> + data . Slice ( i , i + step ) . shape_ , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x , <nl> + param_ . stride_y ) ) ; <nl> + / / TODO : dual stride <nl> + } else { <nl> + mshadow : : Shape < 4 > pshape = data . Slice ( i , i + step ) . shape_ ; <nl> + pshape [ 2 ] + = 2 * param_ . pad_y ; pshape [ 3 ] + = 2 * param_ . pad_x ; <nl> + Tensor < xpu , 4 > gdata_tmp = gdata . Slice ( i , i + step ) ; <nl> + Assign ( gdata_tmp , <nl> + req [ 0 ] , <nl> + crop ( pack_col2patch ( temp_col_ , <nl> + pshape , <nl> + param_ . kernel_y , <nl> + param_ . kernel_x , <nl> + param_ . stride_y ) , <nl> + data [ i ] [ 0 ] . shape_ ) ) ; <nl> + / / TODO : dual stride <nl> + } <nl> + } <nl> + } <nl> + if ( param_ . no_bias = = 0 ) { <nl> + Tensor < xpu , 1 > gbias = out_grad [ 2 ] . get < xpu , 1 , real_t > ( s ) ; <nl> + Assign ( gbias , req [ 2 ] , sumall_except_dim < 1 > ( grad ) ) ; <nl> + } <nl> + } <nl> + private : <nl> + / * ! \ brief Alloc temp space for pack / unpack * / <nl> + inline void InitTemp ( mshadow : : Shape < 4 > ishape , mshadow : : Shape < 4 > oshape ) { <nl> + const index_t ksize_y = static_cast < index_t > ( param_ . kernel_y ) ; <nl> + const index_t ksize_x = static_cast < index_t > ( param_ . kernel_x ) ; <nl> + / / this is the unit size of each temp structure <nl> + shape_colunit_ = mshadow : : Shape2 ( ishape [ 1 ] * ksize_y * ksize_x , <nl> + oshape [ 2 ] * oshape [ 3 ] ) ; <nl> + shape_dstunit_ = mshadow : : Shape3 ( param_ . num_group , <nl> + param_ . num_channel / param_ . num_group , <nl> + oshape [ 2 ] * oshape [ 3 ] ) ; <nl> + nstep_ = std : : max ( std : : min ( ( index_t ) ( param_ . temp_col_max / <nl> + shape_colunit_ . Size ( ) ) , <nl> + ishape [ 0 ] ) , 1U ) ; <nl> + / / make nstep more balanced , <nl> + / / nstep will use exactly same number of operations to finish , <nl> + index_t nop = ( ishape [ 0 ] + nstep_ - 1 ) / nstep_ ; <nl> + nstep_ = ( ishape [ 0 ] + nop - 1 ) / nop ; <nl> + CHECK ( nstep_ > 0 ) ; <nl> + / / helper structure <nl> + temp_col_ . Resize ( mshadow : : Shape2 ( shape_colunit_ [ 0 ] , <nl> + shape_colunit_ [ 1 ] * nstep_ ) ) ; <nl> + temp_dst_ . Resize ( mshadow : : Shape3 ( shape_dstunit_ [ 0 ] , <nl> + shape_dstunit_ [ 1 ] , <nl> + shape_dstunit_ [ 2 ] * nstep_ ) ) ; <nl> + } <nl> + / * ! \ brief parameters that potentially be useful * / <nl> + Param param_ ; <nl> + / * ! \ brief temporary data structure to store patches * / <nl> + mshadow : : TensorContainer < xpu , 2 > temp_col_ ; <nl> + / * ! \ brief temporary data structure to store results * / <nl> + mshadow : : TensorContainer < xpu , 3 > temp_dst_ ; <nl> + / * ! \ brief shape of column unit * / <nl> + mshadow : : Shape < 2 > shape_colunit_ ; <nl> + / * ! \ brief shape of dst unit * / <nl> + mshadow : : Shape < 3 > shape_dstunit_ ; <nl> + / * ! \ brief how many number of batches to be unpacked together * / <nl> + mshadow : : index_t nstep_ ; <nl> + } ; / / class ConvolutionOp <nl> + } / / op <nl> + } / / namespace mxnet <nl> + # endif / / MXNET_CONVOLUTION_OP_INL_H_ <nl> mmm a / src / operator / fully_connect_op - inl . h <nl> ppp b / src / operator / fully_connect_op - inl . h <nl> class FullyConnectOp : public Operator { <nl> CHECK ( grad_next . size ( ) = = 1 ) ; <nl> size_t expected = param_ . no_bias = = 0 ? 3 : 2 ; <nl> CHECK ( in_data . size ( ) = = expected & & out_grad . size ( ) = = expected ) ; <nl> - CHECK ( req . size ( ) = = 3 ) ; <nl> + CHECK ( req . size ( ) = = expected ) ; <nl> Stream < xpu > * s = static_cast < Stream < xpu > * > ( ctx . stream ) ; <nl> Tensor < xpu , 2 > data = in_data [ 0 ] . FlatTo2D < xpu , real_t > ( s ) ; <nl> Tensor < xpu , 2 > wmat = in_data [ 1 ] . get < xpu , 2 , real_t > ( s ) ; <nl> mmm a / src / operator / operator - inl . h <nl> ppp b / src / operator / operator - inl . h <nl> <nl> # include " . / mshadow_op . h " <nl> # include " . / activation_op - inl . h " <nl> # include " . / fully_connect_op - inl . h " <nl> + # include " . / convolution_op - inl . h " <nl> + <nl> <nl> namespace mxnet { <nl> namespace op { <nl> / * ! <nl> - * \ brief device invariant function to create operators <nl> + * \ brief device invariant function to create operators <nl> * \ param type the type of operator <nl> * \ tparam xpu the device type we are at <nl> * / <nl> inline Operator * CreateOperator_ ( OpType type ) { <nl> return new ActivationOp < xpu , relu , relu_grad > ( ) ; <nl> case kFullc : <nl> return new FullyConnectOp < xpu > ( ) ; <nl> + case kConv : <nl> + return new ConvolutionOp < xpu > ( ) ; <nl> default : LOG ( FATAL ) < < " unknown OpType " ; <nl> } <nl> return NULL ; <nl>
conv op
apache/incubator-mxnet
1bd68582af813163ebeb0b3679b278fa84e15341
2015-06-22T04:16:03Z
mmm a / test / js - perf - test / ArraySort / sort - base . js <nl> ppp b / test / js - perf - test / ArraySort / sort - base . js <nl> function CreatePackedObjectArray ( ) { <nl> } <nl> <nl> function CreateHoleySmiArray ( ) { <nl> - array_to_sort = new Array ( kArraySize ) ; <nl> - for ( let i = 0 ; i < kArraySize ; + + i ) { <nl> - array_to_sort [ i ] = template_array [ i ] ; <nl> - } <nl> - <nl> + array_to_sort = Array . from ( template_array ) ; <nl> + delete array_to_sort [ 0 ] ; <nl> AssertHoleySmiElements ( ) ; <nl> } <nl> <nl> function cmp_smaller ( a , b ) { <nl> } <nl> <nl> function cmp_greater ( a , b ) { return cmp_smaller ( b , a ) ; } <nl> + <nl> + / / The counter is used in some benchmarks to trigger actions during sorting . <nl> + / / To keep benchmarks deterministic , the counter needs to be reset for each <nl> + / / iteration . <nl> + let counter = 0 ; <nl> + <nl> + / / Sorting benchmarks need to execute setup and tearDown for each iteration . <nl> + / / Otherwise the benchmarks would mainly measure sorting already sorted arrays <nl> + / / which , depending on the strategy , is either the worst - or best case . <nl> + function createSortSuite ( name , reference , run , setup , tearDown = ( ) = > { } ) { <nl> + let run_fn = ( ) = > { <nl> + counter = 0 ; <nl> + <nl> + setup ( ) ; <nl> + run ( ) ; <nl> + tearDown ( ) ; <nl> + } ; <nl> + <nl> + return createSuite ( name , reference , run_fn ) ; <nl> + } <nl> mmm a / test / js - perf - test / ArraySort / sort - cmpfn - kindchange . js <nl> ppp b / test / js - perf - test / ArraySort / sort - cmpfn - kindchange . js <nl> load ( ' sort - base . js ' ) ; <nl> / / after a set amount of comparisons . The transform function should cause the <nl> / / element kind of the array to change . <nl> function CreateCompareFn ( transformfn ) { <nl> - let counter = 0 ; <nl> return ( a , b ) = > { <nl> + + counter ; <nl> if ( counter = = kArraySize / 2 ) { <nl> let cmp_packed_smi_to_double = CreateCompareFn ( ( ) = > array_to_sort . push ( 0 . 1 ) ) ; <nl> let cmp_holey_smi_to_double = CreateCompareFn ( ( ) = > array_to_sort . push ( 0 . 1 ) ) ; <nl> let cmp_double_to_double = CreateCompareFn ( ( ) = > array_to_sort . length * = 2 ) ; <nl> <nl> - createSuite ( <nl> + createSortSuite ( <nl> ' PackedSmiToPackedDouble ' , 1000 , CreateSortFn ( [ cmp_packed_smi_to_double ] ) , <nl> CreatePackedSmiArray , AssertPackedDoubleElements ) ; <nl> - createSuite ( <nl> + createSortSuite ( <nl> ' HoleySmiToHoleyDouble ' , 1000 , CreateSortFn ( [ cmp_holey_smi_to_double ] ) , <nl> CreateHoleySmiArray , AssertHoleyDoubleElements ) ; <nl> - createSuite ( <nl> + createSortSuite ( <nl> ' PackedDoubleToHoleyDouble ' , 1000 , CreateSortFn ( [ cmp_double_to_double ] ) , <nl> CreatePackedDoubleArray , AssertHoleyDoubleElements ) ; <nl> <nl> let cmp_packed_to_dict = CreateCompareFn ( ( ) = > array_to_sort [ % MaxSmi ( ) ] = 42 ) ; <nl> let cmp_holey_to_dict = CreateCompareFn ( ( ) = > array_to_sort [ % MaxSmi ( ) ] = 42 ) ; <nl> <nl> - createSuite ( <nl> + createSortSuite ( <nl> ' PackedElementToDictionary ' , 1000 , CreateSortFn ( [ cmp_packed_to_dict ] ) , <nl> CreatePackedObjectArray , AssertDictionaryElements ) ; <nl> - createSuite ( <nl> + createSortSuite ( <nl> ' HoleyElementToDictionary ' , 1000 , CreateSortFn ( [ cmp_holey_to_dict ] ) , <nl> CreateHoleyObjectArray , AssertDictionaryElements ) ; <nl> mmm a / test / js - perf - test / ArraySort / sort - cmpfn . js <nl> ppp b / test / js - perf - test / ArraySort / sort - cmpfn . js <nl> load ( ' sort - base . js ' ) ; <nl> / / other sort benchmarks have monomorphic call sites . <nl> let sortfn = CreateSortFn ( [ cmp_smaller , cmp_greater ] ) ; <nl> <nl> - createSuite ( ' PackedSmi ' , 1000 , sortfn , CreatePackedSmiArray ) ; <nl> - createSuite ( ' PackedDouble ' , 1000 , sortfn , CreatePackedDoubleArray ) ; <nl> - createSuite ( ' PackedElement ' , 1000 , sortfn , CreatePackedObjectArray ) ; <nl> + createSortSuite ( ' PackedSmi ' , 1000 , sortfn , CreatePackedSmiArray ) ; <nl> + createSortSuite ( ' PackedDouble ' , 1000 , sortfn , CreatePackedDoubleArray ) ; <nl> + createSortSuite ( ' PackedElement ' , 1000 , sortfn , CreatePackedObjectArray ) ; <nl> <nl> - createSuite ( ' HoleySmi ' , 1000 , sortfn , CreateHoleySmiArray ) ; <nl> - createSuite ( ' HoleyDouble ' , 1000 , sortfn , CreateHoleyDoubleArray ) ; <nl> - createSuite ( ' HoleyElement ' , 1000 , sortfn , CreateHoleyObjectArray ) ; <nl> + createSortSuite ( ' HoleySmi ' , 1000 , sortfn , CreateHoleySmiArray ) ; <nl> + createSortSuite ( ' HoleyDouble ' , 1000 , sortfn , CreateHoleyDoubleArray ) ; <nl> + createSortSuite ( ' HoleyElement ' , 1000 , sortfn , CreateHoleyObjectArray ) ; <nl> <nl> - createSuite ( ' Dictionary ' , 1000 , sortfn , CreateDictionaryArray ) ; <nl> + createSortSuite ( ' Dictionary ' , 1000 , sortfn , CreateDictionaryArray ) ; <nl> mmm a / test / js - perf - test / ArraySort / sort - megamorphic . js <nl> ppp b / test / js - perf - test / ArraySort / sort - megamorphic . js <nl> function SetupMegamorphic ( ) { <nl> Array . prototype . sort . call ( { } ) ; <nl> } <nl> <nl> - createSuite ( ' Base ' , 1000 , Sort , SetupMegamorphic ) ; <nl> - createSuite ( ' MultipleCompareFns ' , 1000 , <nl> + createSortSuite ( ' Base ' , 1000 , Sort , SetupMegamorphic ) ; <nl> + createSortSuite ( ' MultipleCompareFns ' , 1000 , <nl> CreateSortFn ( [ cmp_smaller , cmp_greater ] ) , SetupMegamorphic ) ; <nl> mmm a / test / js - perf - test / ArraySort / sort - presorted . js <nl> ppp b / test / js - perf - test / ArraySort / sort - presorted . js <nl> let SetupUpUp = ( ) = > SetupPreSortedHalfs ( Up , Up ) ; <nl> let SetupDownDown = ( ) = > SetupPreSortedHalfs ( Down , Down ) ; <nl> let SetupDownUp = ( ) = > SetupPreSortedHalfs ( Down , Up ) ; <nl> <nl> - createSuite ( ' Up ' , 1000 , SortAsc , ( ) = > Up ( array_to_sort , kLength ) , TearDown ) ; <nl> - createSuite ( ' Down ' , 1000 , SortAsc , ( ) = > Down ( array_to_sort , kLength ) , TearDown ) ; <nl> - createSuite ( ' Saw1000 ' , 1000 , SortAsc , SetupSaw1000 , TearDown ) ; <nl> - createSuite ( ' Saw500 ' , 1000 , SortAsc , SetupSaw500 , TearDown ) ; <nl> - createSuite ( ' Saw200 ' , 1000 , SortAsc , SetupSaw200 , TearDown ) ; <nl> - createSuite ( ' Saw200Symmetric ' , 1000 , SortAsc , SetupSaw200Sym , TearDown ) ; <nl> - createSuite ( ' Saw200Down ' , 1000 , SortAsc , SetupSaw200Down , TearDown ) ; <nl> - createSuite ( ' UpDown ' , 1000 , SortAsc , SetupUpDown , TearDown ) ; <nl> - createSuite ( ' UpUp ' , 1000 , SortAsc , SetupUpUp , TearDown ) ; <nl> - createSuite ( ' DownDown ' , 1000 , SortAsc , SetupDownDown , TearDown ) ; <nl> - createSuite ( ' DownUp ' , 1000 , SortAsc , SetupDownUp , TearDown ) ; <nl> + createSortSuite ( <nl> + ' Up ' , 1000 , SortAsc , ( ) = > Up ( array_to_sort , kLength ) , TearDown ) ; <nl> + createSortSuite ( <nl> + ' Down ' , 1000 , SortAsc , ( ) = > Down ( array_to_sort , kLength ) , TearDown ) ; <nl> + createSortSuite ( ' Saw1000 ' , 1000 , SortAsc , SetupSaw1000 , TearDown ) ; <nl> + createSortSuite ( ' Saw500 ' , 1000 , SortAsc , SetupSaw500 , TearDown ) ; <nl> + createSortSuite ( ' Saw200 ' , 1000 , SortAsc , SetupSaw200 , TearDown ) ; <nl> + createSortSuite ( ' Saw200Symmetric ' , 1000 , SortAsc , SetupSaw200Sym , TearDown ) ; <nl> + createSortSuite ( ' Saw200Down ' , 1000 , SortAsc , SetupSaw200Down , TearDown ) ; <nl> + createSortSuite ( ' UpDown ' , 1000 , SortAsc , SetupUpDown , TearDown ) ; <nl> + createSortSuite ( ' UpUp ' , 1000 , SortAsc , SetupUpUp , TearDown ) ; <nl> + createSortSuite ( ' DownDown ' , 1000 , SortAsc , SetupDownDown , TearDown ) ; <nl> + createSortSuite ( ' DownUp ' , 1000 , SortAsc , SetupDownUp , TearDown ) ; <nl> mmm a / test / js - perf - test / ArraySort / sort . js <nl> ppp b / test / js - perf - test / ArraySort / sort . js <nl> <nl> <nl> load ( ' sort - base . js ' ) ; <nl> <nl> - createSuite ( ' PackedSmi ' , 1000 , Sort , CreatePackedSmiArray ) ; <nl> - createSuite ( ' PackedDouble ' , 1000 , Sort , CreatePackedDoubleArray ) ; <nl> - createSuite ( ' PackedElement ' , 1000 , Sort , CreatePackedObjectArray ) ; <nl> + createSortSuite ( ' PackedSmi ' , 1000 , Sort , CreatePackedSmiArray ) ; <nl> + createSortSuite ( ' PackedDouble ' , 1000 , Sort , CreatePackedDoubleArray ) ; <nl> + createSortSuite ( ' PackedElement ' , 1000 , Sort , CreatePackedObjectArray ) ; <nl> <nl> - createSuite ( ' HoleySmi ' , 1000 , Sort , CreateHoleySmiArray ) ; <nl> - createSuite ( ' HoleyDouble ' , 1000 , Sort , CreateHoleyDoubleArray ) ; <nl> - createSuite ( ' HoleyElement ' , 1000 , Sort , CreateHoleyObjectArray ) ; <nl> + createSortSuite ( ' HoleySmi ' , 1000 , Sort , CreateHoleySmiArray ) ; <nl> + createSortSuite ( ' HoleyDouble ' , 1000 , Sort , CreateHoleyDoubleArray ) ; <nl> + createSortSuite ( ' HoleyElement ' , 1000 , Sort , CreateHoleyObjectArray ) ; <nl> <nl> - createSuite ( ' Dictionary ' , 1000 , Sort , CreateDictionaryArray ) ; <nl> + createSortSuite ( ' Dictionary ' , 1000 , Sort , CreateDictionaryArray ) ; <nl>
[ jstests ] Change sorting benchmarks to run setup for each iteration
v8/v8
af9e4ba2ca0452bc5c2871ed9c76627dfab45689
2018-06-19T09:40:39Z
mmm a / src / mongo / db / db_raii . cpp <nl> ppp b / src / mongo / db / db_raii . cpp <nl> LockMode getLockModeForQuery ( OperationContext * opCtx ) { <nl> invariant ( opCtx ) ; <nl> <nl> / / Use IX locks for autocommit : false multi - statement transactions ; otherwise , use IS locks . <nl> - if ( opCtx - > getWriteUnitOfWork ( ) ) { <nl> - invariant ( OperationContextSession : : get ( opCtx ) ) ; <nl> - if ( ! OperationContextSession : : get ( opCtx ) - > getAutocommit ( ) ) { <nl> - return MODE_IX ; <nl> - } <nl> + auto session = OperationContextSession : : get ( opCtx ) ; <nl> + if ( session & & session - > inMultiDocumentTransaction ( ) ) { <nl> + return MODE_IX ; <nl> } <nl> - <nl> return MODE_IS ; <nl> } <nl> <nl> mmm a / src / mongo / db / ops / write_ops_exec . cpp <nl> ppp b / src / mongo / db / ops / write_ops_exec . cpp <nl> void finishCurOp ( OperationContext * opCtx , CurOp * curOp ) { <nl> } <nl> <nl> / / Do not profile individual statements in a write command if we are in a transaction . <nl> - if ( curOp - > shouldDBProfile ( shouldSample ) & & ! opCtx - > getWriteUnitOfWork ( ) ) { <nl> + auto session = OperationContextSession : : get ( opCtx ) ; <nl> + if ( curOp - > shouldDBProfile ( shouldSample ) & & <nl> + ! ( session & & session - > inSnapshotReadOrMultiDocumentTransaction ( ) ) ) { <nl> profile ( opCtx , CurOp : : get ( opCtx ) - > getNetworkOp ( ) ) ; <nl> } <nl> } catch ( const DBException & ex ) { <nl> bool handleError ( OperationContext * opCtx , <nl> throw ; / / These have always failed the whole batch . <nl> } <nl> <nl> - if ( opCtx - > getWriteUnitOfWork ( ) ) { <nl> + auto session = OperationContextSession : : get ( opCtx ) ; <nl> + if ( session & & session - > inSnapshotReadOrMultiDocumentTransaction ( ) ) { <nl> / / If we are in a transaction , we must fail the whole batch . <nl> throw ; <nl> } <nl> SingleWriteResult makeWriteResultForInsertOrDeleteRetry ( ) { <nl> } / / namespace <nl> <nl> WriteResult performInserts ( OperationContext * opCtx , const write_ops : : Insert & wholeOp ) { <nl> - / / Insert performs its own retries , so we should not be in a WriteUnitOfWork unless we are in a <nl> - / / transaction . <nl> - invariant ( ! opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) | | opCtx - > getWriteUnitOfWork ( ) ) ; <nl> + / / Insert performs its own retries , so we should only be within a WriteUnitOfWork when run under <nl> + / / snapshot read concern or in a transaction . <nl> + auto session = OperationContextSession : : get ( opCtx ) ; <nl> + invariant ( ! opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) | | <nl> + ( session & & session - > inSnapshotReadOrMultiDocumentTransaction ( ) ) ) ; <nl> auto & curOp = * CurOp : : get ( opCtx ) ; <nl> ON_BLOCK_EXIT ( [ & ] { <nl> / / This is the only part of finishCurOp we need to do for inserts because they reuse the <nl> WriteResult performInserts ( OperationContext * opCtx , const write_ops : : Insert & who <nl> const auto stmtId = getStmtIdForWriteOp ( opCtx , wholeOp , stmtIdIndex + + ) ; <nl> if ( opCtx - > getTxnNumber ( ) ) { <nl> auto session = OperationContextSession : : get ( opCtx ) ; <nl> + invariant ( session ) ; <nl> if ( session - > checkStatementExecutedNoOplogEntryFetch ( * opCtx - > getTxnNumber ( ) , <nl> stmtId ) ) { <nl> containsRetry = true ; <nl> static SingleWriteResult performSingleUpdateOp ( OperationContext * opCtx , <nl> } <nl> <nl> WriteResult performUpdates ( OperationContext * opCtx , const write_ops : : Update & wholeOp ) { <nl> - / / Update performs its own retries , so we should not be in a WriteUnitOfWork unless we are in a <nl> - / / transaction . <nl> - invariant ( ! opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) | | opCtx - > getWriteUnitOfWork ( ) ) ; <nl> + / / Update performs its own retries , so we should not be in a WriteUnitOfWork unless run under <nl> + / / snapshot read concern or in a transaction . <nl> + auto session = OperationContextSession : : get ( opCtx ) ; <nl> + invariant ( ! opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) | | <nl> + ( session & & session - > inSnapshotReadOrMultiDocumentTransaction ( ) ) ) ; <nl> uassertStatusOK ( userAllowedWriteNS ( wholeOp . getNamespace ( ) ) ) ; <nl> <nl> DisableDocumentValidationIfTrue docValidationDisabler ( <nl> static SingleWriteResult performSingleDeleteOp ( OperationContext * opCtx , <nl> WriteResult performDeletes ( OperationContext * opCtx , const write_ops : : Delete & wholeOp ) { <nl> / / Delete performs its own retries , so we should not be in a WriteUnitOfWork unless we are in a <nl> / / transaction . <nl> - invariant ( ! opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) | | opCtx - > getWriteUnitOfWork ( ) ) ; <nl> + auto session = OperationContextSession : : get ( opCtx ) ; <nl> + invariant ( ! opCtx - > lockState ( ) - > inAWriteUnitOfWork ( ) | | <nl> + ( session & & session - > inSnapshotReadOrMultiDocumentTransaction ( ) ) ) ; <nl> uassertStatusOK ( userAllowedWriteNS ( wholeOp . getNamespace ( ) ) ) ; <nl> <nl> DisableDocumentValidationIfTrue docValidationDisabler ( <nl> mmm a / src / mongo / db / repl / do_txn . cpp <nl> ppp b / src / mongo / db / repl / do_txn . cpp <nl> Status doTxn ( OperationContext * opCtx , <nl> uassert ( ErrorCodes : : InvalidOptions , " doTxn can only be run with a transaction ID . " , txnNumber ) ; <nl> auto * session = OperationContextSession : : get ( opCtx ) ; <nl> uassert ( ErrorCodes : : InvalidOptions , " doTxn must be run within a session " , session ) ; <nl> - invariant ( ! session - > getAutocommit ( ) ) ; <nl> + invariant ( session - > inMultiDocumentTransaction ( ) ) ; <nl> + invariant ( opCtx - > getWriteUnitOfWork ( ) ) ; <nl> uassert ( <nl> ErrorCodes : : InvalidOptions , " doTxn supports only CRUD opts . " , _areOpsCrudOnly ( doTxnCmd ) ) ; <nl> auto hasPrecondition = _hasPrecondition ( doTxnCmd ) ; <nl> Status doTxn ( OperationContext * opCtx , <nl> int numApplied = 0 ; <nl> <nl> try { <nl> - writeConflictRetry ( opCtx , " doTxn " , dbName , [ & ] { <nl> - BSONObjBuilder intermediateResult ; <nl> - / / The write unit of work guarantees snapshot isolation for precondition check and the <nl> - / / write . <nl> - WriteUnitOfWork wunit ( opCtx ) ; <nl> - <nl> - / / Check precondition in the same write unit of work so that they share the same <nl> - / / snapshot . <nl> - if ( hasPrecondition ) { <nl> - uassertStatusOK ( _checkPrecondition ( opCtx , doTxnCmd , result ) ) ; <nl> - } <nl> + BSONObjBuilder intermediateResult ; <nl> + <nl> + / / The transaction takes place in a global unit of work , so the precondition check <nl> + / / and the writes will share the same snapshot . <nl> + if ( hasPrecondition ) { <nl> + uassertStatusOK ( _checkPrecondition ( opCtx , doTxnCmd , result ) ) ; <nl> + } <nl> <nl> - numApplied = 0 ; <nl> - uassertStatusOK ( _doTxn ( opCtx , dbName , doTxnCmd , & intermediateResult , & numApplied ) ) ; <nl> - auto opObserver = getGlobalServiceContext ( ) - > getOpObserver ( ) ; <nl> - invariant ( opObserver ) ; <nl> - opObserver - > onTransactionCommit ( opCtx ) ; <nl> - wunit . commit ( ) ; <nl> - result - > appendElements ( intermediateResult . obj ( ) ) ; <nl> - } ) ; <nl> + numApplied = 0 ; <nl> + uassertStatusOK ( _doTxn ( opCtx , dbName , doTxnCmd , & intermediateResult , & numApplied ) ) ; <nl> + auto opObserver = getGlobalServiceContext ( ) - > getOpObserver ( ) ; <nl> + invariant ( opObserver ) ; <nl> + opObserver - > onTransactionCommit ( opCtx ) ; <nl> + result - > appendElements ( intermediateResult . obj ( ) ) ; <nl> <nl> / / Commit the global WUOW if the command succeeds . <nl> - if ( opCtx - > getWriteUnitOfWork ( ) ) { <nl> - opCtx - > getWriteUnitOfWork ( ) - > commit ( ) ; <nl> - } <nl> + opCtx - > getWriteUnitOfWork ( ) - > commit ( ) ; <nl> } catch ( const DBException & ex ) { <nl> BSONArrayBuilder ab ; <nl> + + numApplied ; <nl> mmm a / src / mongo / db / repl / do_txn_test . cpp <nl> ppp b / src / mongo / db / repl / do_txn_test . cpp <nl> void DoTxnTest : : setUp ( ) { <nl> _opCtx - > setLogicalSessionId ( makeLogicalSessionIdForTest ( ) ) ; <nl> _opCtx - > setTxnNumber ( 0 ) ; / / TxnNumber can always be 0 because we have a new session . <nl> _ocs . emplace ( _opCtx . get ( ) , true / * checkOutSession * / , false / * autocommit * / ) ; <nl> + _ocs - > unstashTransactionResources ( ) ; <nl> } <nl> <nl> void DoTxnTest : : tearDown ( ) { <nl> mmm a / src / mongo / db / service_entry_point_common . cpp <nl> ppp b / src / mongo / db / service_entry_point_common . cpp <nl> void execCommandDatabase ( OperationContext * opCtx , <nl> opCtx , invocation . get ( ) , request , replyBuilder , startOperationTime , behaviors ) ; <nl> <nl> if ( retval ) { <nl> - if ( opCtx - > getWriteUnitOfWork ( ) ) { <nl> - / / Snapshot readConcern is enabled and it must be used within a session . <nl> - auto session = sessionTxnState . get ( opCtx ) ; <nl> - invariant ( session ! = nullptr , <nl> - str : : stream ( ) <nl> - < < " Snapshot transaction must be run within a session . Command : " <nl> - < < ServiceEntryPointCommon : : getRedactedCopyForLogging ( command , <nl> - request . body ) ) ; <nl> - if ( opCtx - > hasStashedCursor ( ) | | session - > inMultiDocumentTransaction ( ) ) { <nl> - sessionTxnState . stashTransactionResources ( ) ; <nl> - } else { <nl> - / / If we are in an autocommit = true transaction and have no stashed cursor , <nl> - / / commit the transaction . <nl> - opCtx - > getWriteUnitOfWork ( ) - > commit ( ) ; <nl> - } <nl> - } <nl> + sessionTxnState . stashTransactionResources ( ) ; <nl> } else { <nl> command - > incrementCommandsFailed ( ) ; <nl> } <nl> mmm a / src / mongo / db / session . cpp <nl> ppp b / src / mongo / db / session . cpp <nl> <nl> <nl> # include " mongo / db / session . h " <nl> <nl> - # include < boost / utility / in_place_factory . hpp > <nl> - <nl> # include " mongo / db / catalog / index_catalog . h " <nl> # include " mongo / db / concurrency / lock_state . h " <nl> # include " mongo / db / concurrency / write_conflict_exception . h " <nl> Session : : TxnResources : : TxnResources ( OperationContext * opCtx ) { <nl> } <nl> <nl> Session : : TxnResources : : ~ TxnResources ( ) { <nl> - if ( ! _released ) { <nl> - _recoveryUnit - > abortUnitOfWork ( ) ; <nl> + if ( ! _released & & _recoveryUnit ) { <nl> + / / This should only be reached when aborting a transaction that isn ' t active , i . e . <nl> + / / when starting a new transaction before completing an old one . So we should <nl> + / / be at WUOW nesting level 1 ( only the top level WriteUnitOfWork ) . <nl> _locker - > endWriteUnitOfWork ( ) ; <nl> + invariant ( ! _locker - > inAWriteUnitOfWork ( ) ) ; <nl> + _recoveryUnit - > abortUnitOfWork ( ) ; <nl> } <nl> } <nl> <nl> void Session : : stashTransactionResources ( OperationContext * opCtx ) { <nl> / / effectively owns the Session . That is , a user might lock the Client to ensure it doesn ' t go <nl> / / away , and then lock the Session owned by that client . We rely on the fact that we are not <nl> / / using the DefaultLockerImpl to avoid deadlock . <nl> + <nl> invariant ( ! isMMAPV1 ( ) ) ; <nl> stdx : : lock_guard < Client > lk ( * opCtx - > getClient ( ) ) ; <nl> - stdx : : lock_guard < stdx : : mutex > lg ( _mutex ) ; <nl> - <nl> - invariant ( opCtx - > hasStashedCursor ( ) | | ! _autocommit ) ; <nl> + stdx : : unique_lock < stdx : : mutex > lg ( _mutex ) ; <nl> <nl> if ( * opCtx - > getTxnNumber ( ) ! = _activeTxnNumber ) { <nl> / / The session is checked out , so _activeTxnNumber cannot advance due to a user operation . <nl> void Session : : stashTransactionResources ( OperationContext * opCtx ) { <nl> < < _activeTxnNumber ) ; <nl> } <nl> <nl> + if ( _txnState ! = MultiDocumentTransactionState : : kInProgress & & <nl> + _txnState ! = MultiDocumentTransactionState : : kInSnapshotRead ) { <nl> + / / Not in a multi - document transaction or snapshot read : nothing to do . <nl> + return ; <nl> + } <nl> + <nl> + if ( _txnState = = MultiDocumentTransactionState : : kInSnapshotRead & & ! opCtx - > hasStashedCursor ( ) ) { <nl> + / / The snapshot read is complete . <nl> + invariant ( opCtx - > getWriteUnitOfWork ( ) ) ; <nl> + / / We cannot hold the session lock during the commit , or a deadlock results . <nl> + _txnState = MultiDocumentTransactionState : : kCommitting ; <nl> + lg . unlock ( ) ; <nl> + opCtx - > getWriteUnitOfWork ( ) - > commit ( ) ; <nl> + opCtx - > setWriteUnitOfWork ( nullptr ) ; <nl> + lg . lock ( ) ; <nl> + _txnState = MultiDocumentTransactionState : : kCommitted ; <nl> + return ; <nl> + } <nl> + <nl> invariant ( ! _txnResourceStash ) ; <nl> - _txnResourceStash = boost : : in_place ( opCtx ) ; <nl> + _txnResourceStash = TxnResources ( opCtx ) ; <nl> } <nl> <nl> void Session : : unstashTransactionResources ( OperationContext * opCtx ) { <nl> void Session : : unstashTransactionResources ( OperationContext * opCtx ) { <nl> } <nl> <nl> if ( _txnResourceStash ) { <nl> + invariant ( _txnState ! = MultiDocumentTransactionState : : kNone ) ; <nl> _txnResourceStash - > release ( opCtx ) ; <nl> _txnResourceStash = boost : : none ; <nl> } else { <nl> void Session : : unstashTransactionResources ( OperationContext * opCtx ) { <nl> if ( readConcernArgs . getLevel ( ) = = repl : : ReadConcernLevel : : kSnapshotReadConcern | | <nl> _txnState = = MultiDocumentTransactionState : : kInProgress ) { <nl> opCtx - > setWriteUnitOfWork ( std : : make_unique < WriteUnitOfWork > ( opCtx ) ) ; <nl> + if ( _txnState ! = MultiDocumentTransactionState : : kInProgress ) { <nl> + invariant ( _txnState = = MultiDocumentTransactionState : : kNone ) ; <nl> + _txnState = MultiDocumentTransactionState : : kInSnapshotRead ; <nl> + } <nl> } <nl> } <nl> } <nl> void Session : : _setActiveTxn ( WithLock , TxnNumber txnNumber ) { <nl> _activeTxnNumber = txnNumber ; <nl> _activeTxnCommittedStatements . clear ( ) ; <nl> _hasIncompleteHistory = false ; <nl> + _txnResourceStash = boost : : none ; <nl> } <nl> <nl> void Session : : addTransactionOperation ( OperationContext * opCtx , <nl> mmm a / src / mongo / db / session . h <nl> ppp b / src / mongo / db / session . h <nl> class Session { <nl> <nl> ~ TxnResources ( ) ; <nl> <nl> + / / Rule of 5 : because we have a class - defined destructor , we need to explictly specify <nl> + / / the move operator and move assignment operator . <nl> + TxnResources ( TxnResources & & ) = default ; <nl> + TxnResources & operator = ( TxnResources & & ) = default ; <nl> + <nl> / * * <nl> * Releases stashed transaction state onto ' opCtx ' . Must only be called once . <nl> * / <nl> class Session { <nl> return _txnState = = MultiDocumentTransactionState : : kInProgress ; <nl> } ; <nl> <nl> + / * * <nl> + * Returns whether we are in a read - only or multi - document transaction . <nl> + * / <nl> + bool inSnapshotReadOrMultiDocumentTransaction ( ) const { <nl> + stdx : : lock_guard < stdx : : mutex > lk ( _mutex ) ; <nl> + return _txnState = = MultiDocumentTransactionState : : kInProgress | | <nl> + _txnState = = MultiDocumentTransactionState : : kInSnapshotRead ; <nl> + } <nl> + <nl> / * * <nl> * Adds a stored operation to the list of stored operations for the current multi - document <nl> * ( non - autocommit ) transaction . It is illegal to add operations when no multi - document <nl> class Session { <nl> / / Holds transaction resources between network operations . <nl> boost : : optional < TxnResources > _txnResourceStash ; <nl> <nl> - / / Indicates the state of the current multi - document transaction , if any . <nl> - / / If the transaction is in any state but kInProgress , no more operations can <nl> - / / be collected . <nl> + / / Indicates the state of the current multi - document transaction or snapshot read , if any . If <nl> + / / the transaction is in any state but kInProgress , no more operations can be collected . <nl> enum class MultiDocumentTransactionState { <nl> kNone , <nl> + kInSnapshotRead , <nl> kInProgress , <nl> kCommitting , <nl> kCommitted , <nl> mmm a / src / mongo / db / session_catalog . cpp <nl> ppp b / src / mongo / db / session_catalog . cpp <nl> OperationContextSession : : ~ OperationContextSession ( ) { <nl> } <nl> <nl> void OperationContextSession : : stashTransactionResources ( ) { <nl> + if ( ! _opCtx - > getTxnNumber ( ) ) { <nl> + return ; <nl> + } <nl> + <nl> if ( auto & checkedOutSession = operationSessionDecoration ( _opCtx ) ) { <nl> if ( checkedOutSession - > checkOutNestingLevel = = 1 ) { <nl> if ( auto session = checkedOutSession - > scopedSession . get ( ) ) { <nl>
SERVER - 33551 Track snapshot read transactions with MultiDocumentTransactionState in Session
mongodb/mongo
388d8c1245e61c4fa906254f711aec8ee7fe7486
2018-03-15T01:14:25Z
mmm a / tensorflow / python / keras / BUILD <nl> ppp b / tensorflow / python / keras / BUILD <nl> cuda_py_test ( <nl> " / / tensorflow / python : client_testlib " , <nl> ] , <nl> shard_count = 8 , <nl> - tags = [ <nl> - " manual " , # b / 124471597 <nl> - " notap " , # b / 124471597 <nl> - ] , <nl> xla_enable_strict_auto_jit = True , <nl> ) <nl> <nl>
Automated rollback of commit 082a349b260c3ceeab7c616700fbe19810191216
tensorflow/tensorflow
c715e350caa2f5de063c2e403ed56e8f387f7a75
2019-02-25T20:23:32Z
mmm a / src / api / hocrrenderer . cpp <nl> ppp b / src / api / hocrrenderer . cpp <nl> char * TessBaseAPI : : GetHOCRText ( ETEXT_DESC * monitor , int page_number ) { <nl> if ( tesseract_ = = nullptr | | ( page_res_ = = nullptr & & Recognize ( monitor ) < 0 ) ) <nl> return nullptr ; <nl> <nl> - int lcnt = 1 , bcnt = 1 , pcnt = 1 , wcnt = 1 , tcnt = 1 , gcnt = 1 ; <nl> + int lcnt = 1 , bcnt = 1 , pcnt = 1 , wcnt = 1 , scnt = 1 , tcnt = 1 , gcnt = 1 ; <nl> int page_id = page_number + 1 ; / / hOCR uses 1 - based page numbers . <nl> bool para_is_ltr = true ; / / Default direction is LTR <nl> const char * paragraph_lang = nullptr ; <nl> char * TessBaseAPI : : GetHOCRText ( ETEXT_DESC * monitor , int page_number ) { <nl> / / Now , process the word . . . <nl> std : : vector < std : : vector < std : : pair < const char * , float > > > * confidencemap = <nl> nullptr ; <nl> + std : : vector < std : : vector < std : : vector < std : : pair < const char * , float > > > > * <nl> + symbolMap = nullptr ; <nl> if ( tesseract_ - > lstm_choice_mode ) { <nl> confidencemap = res_it - > GetBestLSTMSymbolChoices ( ) ; <nl> + symbolMap = res_it - > GetBestSegmentedLSTMSymbolChoices ( ) ; <nl> } <nl> hocr_str < < " \ n < span class = ' ocrx_word ' " <nl> < < " id = ' " <nl> char * TessBaseAPI : : GetHOCRText ( ETEXT_DESC * monitor , int page_number ) { <nl> tcnt + + ; <nl> } <nl> } <nl> + } else if ( tesseract_ - > lstm_choice_mode = = 3 & & symbolMap ! = nullptr ) { <nl> + for ( size_t j = 0 ; j < symbolMap - > size ( ) ; j + + ) { <nl> + std : : vector < std : : vector < std : : pair < const char * , float > > > timesteps = <nl> + ( * symbolMap ) [ j ] ; <nl> + hocr_str < < " \ n < span class = ' ocr_symbol ' " <nl> + < < " id = ' " <nl> + < < " symbolstep_ " < < page_id < < " _ " < < wcnt < < " _ " < < scnt <nl> + < < " ' > " <nl> + < < timesteps [ 0 ] [ 0 ] . first ; <nl> + for ( size_t i = 1 ; i < timesteps . size ( ) ; i + + ) { <nl> + hocr_str < < " \ n < span class = ' ocrx_cinfo ' " <nl> + < < " id = ' " <nl> + < < " timestep_ " < < page_id < < " _ " < < wcnt < < " _ " < < tcnt <nl> + < < " ' " <nl> + < < " > " ; <nl> + std : : vector < std : : pair < const char * , float > > timestep = <nl> + timesteps [ i ] ; <nl> + for ( std : : pair < const char * , float > conf : timestep ) { <nl> + hocr_str < < " < span class = ' ocr_glyph ' " <nl> + < < " id = ' " <nl> + < < " choice_ " < < page_id < < " _ " < < wcnt < < " _ " < < gcnt <nl> + < < " ' " <nl> + < < " title = ' x_confs " < < int ( conf . second * 100 ) < < " ' > " <nl> + < < conf . first < < " < / span > " ; <nl> + gcnt + + ; <nl> + } <nl> + hocr_str < < " < / span > " ; <nl> + tcnt + + ; <nl> + } <nl> + hocr_str < < " < / span > " ; <nl> + scnt + + ; <nl> + } <nl> } <nl> hocr_str < < " < / span > " ; <nl> tcnt = 1 ; <nl> mmm a / src / ccmain / resultiterator . cpp <nl> ppp b / src / ccmain / resultiterator . cpp <nl> char * ResultIterator : : GetUTF8Text ( PageIteratorLevel level ) const { <nl> return result ; <nl> } <nl> <nl> - std : : vector < std : : vector < std : : pair < const char * , float > > > * ResultIterator : : GetBestLSTMSymbolChoices ( ) const { <nl> + std : : vector < std : : vector < std : : pair < const char * , float > > > * <nl> + ResultIterator : : GetBestLSTMSymbolChoices ( ) const { <nl> if ( it_ - > word ( ) ! = nullptr ) { <nl> return & it_ - > word ( ) - > timesteps ; <nl> } else { <nl> std : : vector < std : : vector < std : : pair < const char * , float > > > * ResultIterator : : GetBest <nl> } <nl> } <nl> <nl> + std : : vector < std : : vector < std : : vector < std : : pair < const char * , float > > > > * <nl> + ResultIterator : : GetBestSegmentedLSTMSymbolChoices ( ) const { <nl> + if ( it_ - > word ( ) ! = nullptr ) { <nl> + return & it_ - > word ( ) - > symbol_steps ; <nl> + } else { <nl> + return nullptr ; <nl> + } <nl> + } <nl> + <nl> void ResultIterator : : AppendUTF8WordText ( STRING * text ) const { <nl> if ( ! it_ - > word ( ) ) return ; <nl> ASSERT_HOST ( it_ - > word ( ) - > best_choice ! = nullptr ) ; <nl> mmm a / src / ccmain / resultiterator . h <nl> ppp b / src / ccmain / resultiterator . h <nl> class TESS_API ResultIterator : public LTRResultIterator { <nl> / * * <nl> * Returns the LSTM choices for every LSTM timestep for the current word . <nl> * / <nl> - virtual std : : vector < std : : vector < std : : pair < const char * , float > > > * GetBestLSTMSymbolChoices ( ) const ; <nl> + virtual std : : vector < std : : vector < std : : pair < const char * , float > > > * <nl> + GetBestLSTMSymbolChoices ( ) const ; <nl> + virtual std : : vector < std : : vector < std : : vector < std : : pair < const char * , float > > > > * <nl> + GetBestSegmentedLSTMSymbolChoices ( ) const ; <nl> <nl> / * * <nl> * Return whether the current paragraph ' s dominant reading direction <nl> mmm a / src / ccmain / tesseractclass . cpp <nl> ppp b / src / ccmain / tesseractclass . cpp <nl> Tesseract : : Tesseract ( ) <nl> " Allows to include alternative symbols choices in the hOCR output . " <nl> " Valid input values are 0 , 1 and 2 . 0 is the default value . " <nl> " With 1 the alternative symbol choices per timestep are included . " <nl> - " With 2 the alternative symbol choices are accumulated per character . " , <nl> + " With 2 the alternative symbol choices are accumulated per character . " <nl> + " With 3 the alternative symbol choices per timestep are included and " <nl> + " separated by the suggested segmentation of Tesseract " , <nl> this - > params ( ) ) , <nl> <nl> backup_config_file_ ( nullptr ) , <nl> mmm a / src / ccmain / tesseractclass . h <nl> ppp b / src / ccmain / tesseractclass . h <nl> class Tesseract : public Wordrec { <nl> " Allows to include alternative symbols choices in the hOCR output . " <nl> " Valid input values are 0 , 1 and 2 . 0 is the default value . " <nl> " With 1 the alternative symbol choices per timestep are included . " <nl> - " With 2 the alternative symbol choices are accumulated per character . " ) ; <nl> + " With 2 the alternative symbol choices are accumulated per character . " <nl> + " With 3 the alternative symbol choices per timestep are included and " <nl> + " separated by the suggested segmentation of Tesseract " ) ; <nl> <nl> / / / / ambigsrecog . cpp / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> FILE * init_recog_training ( const STRING & fname ) ; <nl> mmm a / src / ccstruct / pageres . h <nl> ppp b / src / ccstruct / pageres . h <nl> class WERD_RES : public ELIST_LINK { <nl> GenericVector < int > blob_gaps ; <nl> / / Stores the lstm choices of every timestep <nl> std : : vector < std : : vector < std : : pair < const char * , float > > > timesteps ; <nl> + std : : vector < std : : vector < std : : vector < std : : pair < const char * , float > > > > <nl> + symbol_steps ; <nl> / / Ratings matrix contains classifier choices for each classified combination <nl> / / of blobs . The dimension is the same as the number of blobs in chopped_word <nl> / / and the leading diagonal corresponds to classifier results of the blobs <nl> mmm a / src / lstm / recodebeam . cpp <nl> ppp b / src / lstm / recodebeam . cpp <nl> <nl> # include < deque > <nl> # include < map > <nl> # include < set > <nl> + # include < tuple > <nl> # include < vector > <nl> <nl> # include < algorithm > <nl> void RecodeBeamSearch : : ExtractBestPathAsWords ( const TBOX & line_box , <nl> GenericVector < int > xcoords ; <nl> GenericVector < const RecodeNode * > best_nodes ; <nl> GenericVector < const RecodeNode * > second_nodes ; <nl> - std : : deque < std : : pair < int , int > > best_choices ; <nl> + std : : deque < std : : tuple < int , int , double > > best_choices ; <nl> ExtractBestPaths ( & best_nodes , & second_nodes ) ; <nl> if ( debug ) { <nl> DebugPath ( unicharset , best_nodes ) ; <nl> void RecodeBeamSearch : : ExtractBestPathAsWords ( const TBOX & line_box , <nl> int timestepEnd = 0 ; <nl> / / if lstm choice mode is required in granularity level 2 it stores the x <nl> / / Coordinates of every chosen character to match the alternative choices to it <nl> - if ( lstm_choice_mode = = 2 ) { <nl> + if ( lstm_choice_mode = = 2 | | lstm_choice_mode = = 3 ) { <nl> ExtractPathAsUnicharIds ( best_nodes , & unichar_ids , & certs , & ratings , <nl> & xcoords , & best_choices ) ; <nl> if ( best_choices . size ( ) > 0 ) { <nl> - current_char = best_choices . front ( ) . first ; <nl> - timestepEnd = best_choices . front ( ) . second ; <nl> - best_choices . pop_front ( ) ; <nl> + current_char = std : : get < 0 > ( best_choices . front ( ) ) ; <nl> + timestepEnd = std : : get < 1 > ( best_choices . front ( ) ) ; <nl> + if ( lstm_choice_mode = = 2 ) <nl> + best_choices . pop_front ( ) ; <nl> } <nl> } else { <nl> ExtractPathAsUnicharIds ( best_nodes , & unichar_ids , & certs , & ratings , <nl> void RecodeBeamSearch : : ExtractBestPathAsWords ( const TBOX & line_box , <nl> choice_pairs . push_back ( choice ) ; <nl> } <nl> } <nl> - if ( ( best_choices . size ( ) > 0 & & i = = best_choices . front ( ) . second - 1 ) <nl> + if ( ( best_choices . size ( ) > 0 & & i = = std : : get < 1 > ( best_choices . front ( ) ) - 1 ) <nl> | | i = = xcoords [ word_end ] - 1 ) { <nl> std : : map < const char * , float > summed_propabilities ; <nl> for ( auto it = choice_pairs . begin ( ) ; it ! = choice_pairs . end ( ) ; + + it ) { <nl> void RecodeBeamSearch : : ExtractBestPathAsWords ( const TBOX & line_box , <nl> it - > second ) ) ; <nl> } <nl> if ( best_choices . size ( ) > 0 ) { <nl> - current_char = best_choices . front ( ) . first ; <nl> + current_char = std : : get < 0 > ( best_choices . front ( ) ) ; <nl> best_choices . pop_front ( ) ; <nl> } <nl> choice_pairs . clear ( ) ; <nl> void RecodeBeamSearch : : ExtractBestPathAsWords ( const TBOX & line_box , <nl> } <nl> } <nl> timestepEnd = xcoords [ word_end ] ; <nl> + } else if ( lstm_choice_mode = = 3 ) { <nl> + std : : vector < std : : vector < std : : pair < const char * , float > > > currentSymbol ; <nl> + for ( size_t i = timestepEnd ; i < xcoords [ word_end ] ; i + + ) { <nl> + if ( i = = std : : get < 1 > ( best_choices . front ( ) ) ) { <nl> + if ( currentSymbol . size ( ) > 0 ) { <nl> + word_res - > symbol_steps . push_back ( currentSymbol ) ; <nl> + currentSymbol . clear ( ) ; <nl> + } <nl> + std : : vector < std : : pair < const char * , float > > choice_Header ; <nl> + choice_Header . push_back ( std : : pair < const char * , float > ( <nl> + unicharset - > id_to_unichar_ext ( std : : get < 0 > ( best_choices . front ( ) ) ) , <nl> + 2 . 0 ) ) ; <nl> + currentSymbol . push_back ( choice_Header ) ; <nl> + if ( best_choices . size ( ) > 1 ) best_choices . pop_front ( ) ; <nl> + } <nl> + currentSymbol . push_back ( timesteps [ i ] ) ; <nl> + } <nl> + word_res - > symbol_steps . push_back ( currentSymbol ) ; <nl> + timestepEnd = xcoords [ word_end ] ; <nl> } <nl> for ( int i = word_start ; i < word_end ; + + i ) { <nl> BLOB_CHOICE_LIST * choices = new BLOB_CHOICE_LIST ; <nl> void RecodeBeamSearch : : ExtractPathAsUnicharIds ( <nl> const GenericVector < const RecodeNode * > & best_nodes , <nl> GenericVector < int > * unichar_ids , GenericVector < float > * certs , <nl> GenericVector < float > * ratings , GenericVector < int > * xcoords , <nl> - std : : deque < std : : pair < int , int > > * best_choices ) { <nl> + std : : deque < std : : tuple < int , int , double > > * best_choices ) { <nl> unichar_ids - > truncate ( 0 ) ; <nl> certs - > truncate ( 0 ) ; <nl> ratings - > truncate ( 0 ) ; <nl> void RecodeBeamSearch : : ExtractPathAsUnicharIds ( <nl> int t = 0 ; <nl> int width = best_nodes . size ( ) ; <nl> while ( t < width ) { <nl> + int id ; <nl> + int tposition ; <nl> double certainty = 0 . 0 ; <nl> double rating = 0 . 0 ; <nl> while ( t < width & & best_nodes [ t ] - > unichar_id = = INVALID_UNICHAR_ID ) { <nl> void RecodeBeamSearch : : ExtractPathAsUnicharIds ( <nl> unichar_ids - > push_back ( unichar_id ) ; <nl> xcoords - > push_back ( t ) ; <nl> if ( best_choices ! = nullptr ) { <nl> - best_choices - > push_back ( std : : pair < int , int > ( unichar_id , t ) ) ; <nl> + tposition = t ; <nl> + id = unichar_id ; <nl> } <nl> do { <nl> double cert = best_nodes [ t + + ] - > certainty ; <nl> void RecodeBeamSearch : : ExtractPathAsUnicharIds ( <nl> if ( certainty < certs - > back ( ) ) certs - > back ( ) = certainty ; <nl> ratings - > back ( ) + = rating ; <nl> } <nl> + if ( best_choices ! = nullptr ) { <nl> + best_choices - > push_back ( <nl> + std : : tuple < int , int , double > ( id , tposition , rating ) ) ; <nl> + } <nl> } <nl> xcoords - > push_back ( width ) ; <nl> } <nl> mmm a / src / lstm / recodebeam . h <nl> ppp b / src / lstm / recodebeam . h <nl> <nl> # include " unicharcompress . h " <nl> # include < deque > <nl> # include < set > <nl> + # include < tuple > <nl> # include < vector > <nl> <nl> namespace tesseract { <nl> class RecodeBeamSearch { <nl> const GenericVector < const RecodeNode * > & best_nodes , <nl> GenericVector < int > * unichar_ids , GenericVector < float > * certs , <nl> GenericVector < float > * ratings , GenericVector < int > * xcoords , <nl> - std : : deque < std : : pair < int , int > > * best_choices = nullptr ) ; <nl> + std : : deque < std : : tuple < int , int , double > > * best_choices = nullptr ) ; <nl> <nl> / / Sets up a word with the ratings matrix and fake blobs with boxes in the <nl> / / right places . <nl>
Added the option to get the timesteps separated by the suggested segmentation
tesseract-ocr/tesseract
754e38d2b48184f8dbaeca7acd7119f64acc7bf7
2019-03-11T09:50:56Z
mmm a / dbms / src / Interpreters / LogicalExpressionsOptimizer . cpp <nl> ppp b / dbms / src / Interpreters / LogicalExpressionsOptimizer . cpp <nl> bool LogicalExpressionsOptimizer : : mayOptimizeDisjunctiveEqualityChain ( const Disj <nl> / / / Проверяем , что правые части всех равенств имеют один и тот же тип . <nl> auto & first_operands = getFunctionOperands ( equality_functions [ 0 ] ) ; <nl> auto first_literal = static_cast < ASTLiteral * > ( & * first_operands [ 1 ] ) ; <nl> - for ( auto function : equality_functions ) <nl> + for ( size_t i = 1 ; i < equality_functions . size ( ) ; + + i ) <nl> { <nl> - auto & operands = getFunctionOperands ( function ) ; <nl> + auto & operands = getFunctionOperands ( equality_functions [ i ] ) ; <nl> auto literal = static_cast < ASTLiteral * > ( & * operands [ 1 ] ) ; <nl> <nl> if ( literal - > type ! = first_literal - > type ) <nl>
dbms : Server : simplified code [ # METR - 14875 ]
ClickHouse/ClickHouse
75dc809ad2c1e7378af0381d7890876d54a198d3
2015-02-20T14:27:05Z
mmm a / src / Storages / MergeTree / MergeTreeBlockReadUtils . cpp <nl> ppp b / src / Storages / MergeTree / MergeTreeBlockReadUtils . cpp <nl> MergeTreeReadTaskColumns getReadTaskColumns ( <nl> if ( prewhere_info ) <nl> { <nl> if ( prewhere_info - > alias_actions ) <nl> - pre_column_names = prewhere_info - > alias_actions - > getRequiredColumns ( ) . getNames ( ) ; <nl> + pre_column_names = prewhere_info - > alias_actions - > getRequiredColumns ( ) ; <nl> else <nl> - pre_column_names = prewhere_info - > prewhere_actions - > getRequiredColumns ( ) . getNames ( ) ; <nl> + pre_column_names = prewhere_info - > prewhere_actions - > getRequiredColumns ( ) ; <nl> <nl> if ( pre_column_names . empty ( ) ) <nl> pre_column_names . push_back ( column_names [ 0 ] ) ; <nl>
Fix build
ClickHouse/ClickHouse
4984fdc693c3c79a26137a4fccecf7e88b7ee65a
2020-11-03T19:28:50Z
mmm a / xbmc / addons / kodi - addon - dev - kit / doxygen / Modules / modules_python . dox <nl> ppp b / xbmc / addons / kodi - addon - dev - kit / doxygen / Modules / modules_python . dox <nl> web applications or frameworks for the Python programming language . <nl> http : / / mirrors . kodi . tv / docs / python - docs / 16 . x - jarvis / xbmc . html # RenderCapture - waitForCaptureStateChangeEvent , <nl> < b > xbmc . RenderCapture ( ) . waitForCaptureStateChangeEvent ( ) < / b > function was removed completely . <nl> } <nl> + \ python_removed_function { <nl> + disableSubtitles , <nl> + http : / / mirrors . kodi . tv / docs / python - docs / 16 . x - jarvis / xbmc . html # Player , <nl> + < b > xbmc . Player ( ) . disableSubtitles ( ) < / b > function was removed completely . <nl> + Use \ endhtmlonly \ link XBMCAddon : : xbmc : : Player : : showSubtitles ( ) xbmc . Player ( ) . showSubtitles ( . . . ) \ endlink \ htmlonly instead . <nl> + } <nl> * / <nl> / * ! <nl> @ page python_v16 Python API v16 <nl> web applications or frameworks for the Python programming language . <nl> * / <nl> / * ! <nl> @ page python_v12 Python API v12 <nl> + \ python_removed_function { <nl> + executehttpapi , <nl> + http : / / mirrors . kodi . tv / docs / python - docs / 12 . 2 - frodo / xbmc . html # - executehttpapi , <nl> + < b > xbmc . executehttpapi ( ) < / b > function was removed completely . <nl> + } <nl> * / <nl> - <nl> / * ! <nl> @ page python_revisions Python API Changes <nl> @ brief Overview of changes on Python API for Kodi <nl> mmm a / xbmc / interfaces / legacy / ListItem . h <nl> ppp b / xbmc / interfaces / legacy / ListItem . h <nl> namespace XBMCAddon <nl> const String & path = emptyString , <nl> bool offscreen = false ) ; <nl> <nl> - # ifndef SWIG <nl> + # if ! defined SWIG & & ! defined DOXYGEN_SHOULD_SKIP_THIS <nl> inline explicit ListItem ( CFileItemPtr pitem ) : <nl> item ( pitem ) , m_offscreen ( false ) <nl> { } <nl> namespace XBMCAddon <nl> / / / @ param number int - the number of the season . <nl> / / / @ param name string - the name of the season . Default " " . <nl> / / / <nl> + / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / <nl> / / / @ python_v18 New function added . <nl> namespace XBMCAddon <nl> / / / @ param isgz [ opt ] bool ( use gzip to retrieve the image , default false ) <nl> / / / @ param season [ opt ] integer ( number of season in case of season thumb ) <nl> / / / <nl> + / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / @ python_v18 New function added . <nl> / / / <nl> namespace XBMCAddon <nl> # ifdef DOXYGEN_SHOULD_USE_THIS <nl> / / / <nl> / / / \ ingroup python_xbmcgui_listitem <nl> - / / / @ brief \ python_func { addContextMenuItems ( [ ( label , action , ) * ] , replaceItems ) } <nl> + / / / @ brief \ python_func { addContextMenuItems ( [ ( label , action ) , * ] ) } <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / Adds item ( s ) to the context menu for media lists . <nl> / / / <nl> - / / / @ param items list - [ ( label , action , ) * ] A list of tuples consisting of label and action pairs . <nl> + / / / @ param items list - [ ( label , action ) , * ] A list of tuples consisting of label and action pairs . <nl> / / / - label string or unicode - item ' s label . <nl> - / / / - action string or unicode - any built - in function to perform . <nl> - / / / <nl> - / / / <nl> - / / / List of functions - http : / / kodi . wiki / view / List_of_Built_In_Functions <nl> + / / / - action string or unicode - any available \ link page_List_of_built_in_functions built - in function \ endlink . <nl> / / / <nl> / / / @ note You can use the above as keywords for arguments and skip certain optional arguments . \ n <nl> / / / Once you use a keyword , all following arguments require the keyword . <nl> / / / <nl> / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / / @ python_v17 Completely removed option * * replaceItems * * . <nl> + / / / @ python_v17 Completely removed previously available argument * * replaceItems * * . <nl> / / / <nl> / / / * * Example : * * <nl> / / / ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ { . py } <nl> / / / . . . <nl> - / / / listitem . addContextMenuItems ( [ ( ' Theater Showtimes ' , ' RunScript ( special : / / home / scripts / showtimes / default . py , Iron Man ) ' , ) ] ) <nl> + / / / listitem . addContextMenuItems ( [ ( ' Theater Showtimes ' , ' RunScript ( special : / / home / scripts / showtimes / default . py , Iron Man ) ' ) ] ) <nl> / / / . . . <nl> / / / ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ <nl> / / / <nl> namespace XBMCAddon <nl> / / / If disabled , HEAD requests to e . g determine mime type will not be sent . <nl> / / / <nl> / / / @ param enable bool to enable content lookup <nl> + / / / <nl> + / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / @ python_v16 New function added . <nl> / / / <nl> namespace XBMCAddon <nl> / / / Returns the path of this listitem . <nl> / / / <nl> / / / @ return [ string ] filename <nl> + / / / <nl> + / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / @ python_v17 New function added . <nl> / / / <nl> namespace XBMCAddon <nl> / / / Returns the VideoInfoTag for this item . <nl> / / / <nl> / / / @ return video info tag <nl> + / / / <nl> + / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / @ python_v15 New function added . <nl> / / / <nl> namespace XBMCAddon <nl> / / / Returns the MusicInfoTag for this item . <nl> / / / <nl> / / / @ return music info tag <nl> + / / / <nl> + / / / <nl> / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> / / / @ python_v15 New function added . <nl> / / / <nl> namespace XBMCAddon <nl> xbmc : : InfoTagMusic * getMusicInfoTag ( ) ; <nl> # endif <nl> <nl> - # ifdef DOXYGEN_SHOULD_USE_THIS <nl> - / / / <nl> - / / / \ ingroup python_xbmcgui_listitem <nl> - / / / @ brief \ python_func { addContextMenuItems ( ) } <nl> - / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / / Adds item ( s ) to the context menu for media lists . <nl> - / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / / @ python_v14 <nl> - / / / Function completely removed and replaced with context menu add - ons . <nl> - / / / <nl> - # endif <nl> - <nl> private : <nl> std : : vector < std : : string > getStringArray ( const InfoLabelValue & alt , const std : : string & tag , std : : string value = " " ) ; <nl> <nl> mmm a / xbmc / interfaces / legacy / ModuleXbmc . h <nl> ppp b / xbmc / interfaces / legacy / ModuleXbmc . h <nl> namespace XBMCAddon <nl> # else <nl> String convertLanguage ( const char * language , int format ) ; <nl> # endif <nl> - <nl> - # ifdef DOXYGEN_SHOULD_USE_THIS <nl> - / / / <nl> - / / / \ ingroup python_xbmc <nl> - / / / @ brief \ python_func { executehttpapi ( httpcommand ) } <nl> - / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / / @ python_v12 Completely removed . <nl> - / / / <nl> - # endif <nl> / / @ } <nl> # ifndef DOXYGEN_SHOULD_SKIP_THIS <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_WEBSERVER ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_AIRPLAYSERVER ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_UPNPSERVER ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_UPNPRENDERER ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_EVENTSERVER ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_JSONRPCSERVER ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , SERVER_ZEROCONF ) ; <nl> - <nl> - SWIG_CONSTANT_FROM_GETTER ( int , PLAYLIST_MUSIC ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , PLAYLIST_VIDEO ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , TRAY_OPEN ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , DRIVE_NOT_READY ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , TRAY_CLOSED_NO_MEDIA ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , TRAY_CLOSED_MEDIA_PRESENT ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGDEBUG ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGINFO ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGNOTICE ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGWARNING ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGERROR ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGSEVERE ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGFATAL ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , LOGNONE ) ; <nl> - <nl> - SWIG_CONSTANT_FROM_GETTER ( int , ISO_639_1 ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , ISO_639_2 ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( int , ENGLISH_NAME ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_WEBSERVER ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_AIRPLAYSERVER ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_UPNPSERVER ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_UPNPRENDERER ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_EVENTSERVER ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_JSONRPCSERVER ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , SERVER_ZEROCONF ) ; <nl> + <nl> + SWIG_CONSTANT_FROM_GETTER ( int , PLAYLIST_MUSIC ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , PLAYLIST_VIDEO ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , TRAY_OPEN ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , DRIVE_NOT_READY ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , TRAY_CLOSED_NO_MEDIA ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , TRAY_CLOSED_MEDIA_PRESENT ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGDEBUG ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGINFO ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGNOTICE ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGWARNING ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGERROR ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGSEVERE ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGFATAL ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , LOGNONE ) ; <nl> + <nl> + SWIG_CONSTANT_FROM_GETTER ( int , ISO_639_1 ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , ISO_639_2 ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( int , ENGLISH_NAME ) ; <nl> # if 0 <nl> void registerMonitor ( Monitor * monitor ) ; <nl> void unregisterMonitor ( Monitor * monitor ) ; <nl> mmm a / xbmc / interfaces / legacy / ModuleXbmcgui . h <nl> ppp b / xbmc / interfaces / legacy / ModuleXbmcgui . h <nl> namespace XBMCAddon <nl> / / / @ } <nl> <nl> # ifndef DOXYGEN_SHOULD_SKIP_THIS <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_NONE , CGUIListItem : : ICON_OVERLAY_NONE ) ; <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_RAR , CGUIListItem : : ICON_OVERLAY_RAR ) ; <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_ZIP , CGUIListItem : : ICON_OVERLAY_ZIP ) ; <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_LOCKED , CGUIListItem : : ICON_OVERLAY_LOCKED ) ; <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_UNWATCHED , CGUIListItem : : ICON_OVERLAY_UNWATCHED ) ; <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_WATCHED , CGUIListItem : : ICON_OVERLAY_WATCHED ) ; <nl> - SWIG_CONSTANT2 ( int , ICON_OVERLAY_HD , CGUIListItem : : ICON_OVERLAY_HD ) ; <nl> - <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_TEXT , CGUIEditControl : : INPUT_TYPE_TEXT ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_NUMBER , CGUIEditControl : : INPUT_TYPE_NUMBER ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_DATE , CGUIEditControl : : INPUT_TYPE_DATE ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_TIME , CGUIEditControl : : INPUT_TYPE_TIME ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_IPADDRESS , CGUIEditControl : : INPUT_TYPE_IPADDRESS ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_PASSWORD , CGUIEditControl : : INPUT_TYPE_PASSWORD ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_PASSWORD_MD5 , CGUIEditControl : : INPUT_TYPE_PASSWORD_MD5 ) ; <nl> - SWIG_CONSTANT2 ( int , INPUT_TYPE_SECONDS , CGUIEditControl : : INPUT_TYPE_SECONDS ) ; <nl> - <nl> - SWIG_CONSTANT_FROM_GETTER ( const char * , NOTIFICATION_INFO ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( const char * , NOTIFICATION_WARNING ) ; <nl> - SWIG_CONSTANT_FROM_GETTER ( const char * , NOTIFICATION_ERROR ) ; <nl> - <nl> - SWIG_CONSTANT ( int , INPUT_ALPHANUM ) ; <nl> - SWIG_CONSTANT ( int , INPUT_NUMERIC ) ; <nl> - SWIG_CONSTANT ( int , INPUT_DATE ) ; <nl> - SWIG_CONSTANT ( int , INPUT_TIME ) ; <nl> - SWIG_CONSTANT ( int , INPUT_IPADDRESS ) ; <nl> - SWIG_CONSTANT ( int , INPUT_PASSWORD ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_NONE , CGUIListItem : : ICON_OVERLAY_NONE ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_RAR , CGUIListItem : : ICON_OVERLAY_RAR ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_ZIP , CGUIListItem : : ICON_OVERLAY_ZIP ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_LOCKED , CGUIListItem : : ICON_OVERLAY_LOCKED ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_UNWATCHED , CGUIListItem : : ICON_OVERLAY_UNWATCHED ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_WATCHED , CGUIListItem : : ICON_OVERLAY_WATCHED ) ; <nl> + SWIG_CONSTANT2 ( int , ICON_OVERLAY_HD , CGUIListItem : : ICON_OVERLAY_HD ) ; <nl> + <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_TEXT , CGUIEditControl : : INPUT_TYPE_TEXT ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_NUMBER , CGUIEditControl : : INPUT_TYPE_NUMBER ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_DATE , CGUIEditControl : : INPUT_TYPE_DATE ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_TIME , CGUIEditControl : : INPUT_TYPE_TIME ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_IPADDRESS , CGUIEditControl : : INPUT_TYPE_IPADDRESS ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_PASSWORD , CGUIEditControl : : INPUT_TYPE_PASSWORD ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_PASSWORD_MD5 , CGUIEditControl : : INPUT_TYPE_PASSWORD_MD5 ) ; <nl> + SWIG_CONSTANT2 ( int , INPUT_TYPE_SECONDS , CGUIEditControl : : INPUT_TYPE_SECONDS ) ; <nl> + <nl> + SWIG_CONSTANT_FROM_GETTER ( const char * , NOTIFICATION_INFO ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( const char * , NOTIFICATION_WARNING ) ; <nl> + SWIG_CONSTANT_FROM_GETTER ( const char * , NOTIFICATION_ERROR ) ; <nl> + <nl> + SWIG_CONSTANT ( int , INPUT_ALPHANUM ) ; <nl> + SWIG_CONSTANT ( int , INPUT_NUMERIC ) ; <nl> + SWIG_CONSTANT ( int , INPUT_DATE ) ; <nl> + SWIG_CONSTANT ( int , INPUT_TIME ) ; <nl> + SWIG_CONSTANT ( int , INPUT_IPADDRESS ) ; <nl> + SWIG_CONSTANT ( int , INPUT_PASSWORD ) ; <nl> <nl> SWIG_CONSTANT ( int , HORIZONTAL ) ; <nl> SWIG_CONSTANT ( int , VERTICAL ) ; <nl> <nl> - SWIG_CONSTANT ( int , PASSWORD_VERIFY ) ; <nl> - SWIG_CONSTANT ( int , ALPHANUM_HIDE_INPUT ) ; <nl> + SWIG_CONSTANT ( int , PASSWORD_VERIFY ) ; <nl> + SWIG_CONSTANT ( int , ALPHANUM_HIDE_INPUT ) ; <nl> <nl> } <nl> } <nl> mmm a / xbmc / interfaces / legacy / ModuleXbmcplugin . h <nl> ppp b / xbmc / interfaces / legacy / ModuleXbmcplugin . h <nl> namespace XBMCAddon <nl> # endif <nl> <nl> # ifndef DOXYGEN_SHOULD_SKIP_THIS <nl> - SWIG_CONSTANT ( int , SORT_METHOD_NONE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_LABEL ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_LABEL_IGNORE_THE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_DATE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_SIZE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_FILE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_DRIVE_TYPE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_TRACKNUM ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_DURATION ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_TITLE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_TITLE_IGNORE_THE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_ARTIST ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_ARTIST_IGNORE_THE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_ALBUM ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_ALBUM_IGNORE_THE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_GENRE ) ; <nl> - SWIG_CONSTANT2 ( int , SORT_METHOD_VIDEO_YEAR , SORT_METHOD_YEAR ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_RATING ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_PROGRAM_COUNT ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_PLAYLIST_ORDER ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_EPISODE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_TITLE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_SORT_TITLE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_PRODUCTIONCODE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_SONG_RATING ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_MPAA_RATING ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_RUNTIME ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_STUDIO ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_STUDIO_IGNORE_THE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_UNSORTED ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_BITRATE ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_LISTENERS ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_COUNTRY ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_DATEADDED ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_FULLPATH ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_LABEL_IGNORE_FOLDERS ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_LASTPLAYED ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_PLAYCOUNT ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_CHANNEL ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_DATE_TAKEN ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_USER_RATING ) ; <nl> - SWIG_CONSTANT ( int , SORT_METHOD_SONG_USER_RATING ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_NONE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_LABEL ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_LABEL_IGNORE_THE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_DATE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_SIZE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_FILE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_DRIVE_TYPE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_TRACKNUM ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_DURATION ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_TITLE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_TITLE_IGNORE_THE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_ARTIST ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_ARTIST_IGNORE_THE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_ALBUM ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_ALBUM_IGNORE_THE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_GENRE ) ; <nl> + SWIG_CONSTANT2 ( int , SORT_METHOD_VIDEO_YEAR , SORT_METHOD_YEAR ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_RATING ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_PROGRAM_COUNT ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_PLAYLIST_ORDER ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_EPISODE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_TITLE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_SORT_TITLE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_PRODUCTIONCODE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_SONG_RATING ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_MPAA_RATING ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_RUNTIME ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_STUDIO ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_STUDIO_IGNORE_THE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_UNSORTED ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_BITRATE ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_LISTENERS ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_COUNTRY ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_DATEADDED ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_FULLPATH ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_LABEL_IGNORE_FOLDERS ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_LASTPLAYED ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_PLAYCOUNT ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_CHANNEL ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_DATE_TAKEN ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_VIDEO_USER_RATING ) ; <nl> + SWIG_CONSTANT ( int , SORT_METHOD_SONG_USER_RATING ) ; <nl> } <nl> } <nl> # endif / * DOXYGEN_SHOULD_SKIP_THIS * / <nl> mmm a / xbmc / interfaces / legacy / Player . h <nl> ppp b / xbmc / interfaces / legacy / Player . h <nl> namespace XBMCAddon <nl> void showSubtitles ( bool bVisible ) ; <nl> # endif <nl> <nl> - # ifdef DOXYGEN_SHOULD_USE_THIS <nl> - / / / <nl> - / / / \ ingroup python_Player <nl> - / / / @ brief \ python_func { DisableSubtitles ( ) } <nl> - / / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / / @ python_v12 Deprecated . Use * * showSubtitles * * instead . <nl> - / / / @ python_v17 Completely removed function . <nl> - / / / <nl> - # endif <nl> - <nl> # ifdef DOXYGEN_SHOULD_USE_THIS <nl> / / / <nl> / / / \ ingroup python_Player <nl>
Merge pull request from enen92 / removedpythonfunctions
xbmc/xbmc
cd2c5e2fde12de6cc97d4835f9df25d42a394c45
2018-10-28T11:05:31Z
mmm a / src / mongo / db / views / view_catalog . cpp <nl> ppp b / src / mongo / db / views / view_catalog . cpp <nl> void ViewCatalog : : _requireValidCatalog ( WithLock ) { <nl> } <nl> <nl> void ViewCatalog : : iterate ( OperationContext * opCtx , ViewIteratorCallback callback ) { <nl> - Lock : : CollectionLock systemViewsLock ( <nl> - opCtx , <nl> - NamespaceString ( _durable - > getName ( ) , NamespaceString : : kSystemDotViewsCollectionName ) , <nl> - MODE_IS ) ; <nl> stdx : : lock_guard < Latch > lk ( _mutex ) ; <nl> _requireValidCatalog ( lk ) ; <nl> for ( auto & & view : _viewMap ) { <nl> std : : shared_ptr < ViewDefinition > ViewCatalog : : _lookup ( WithLock lk , <nl> } <nl> <nl> std : : shared_ptr < ViewDefinition > ViewCatalog : : lookup ( OperationContext * opCtx , StringData ns ) { <nl> - Lock : : CollectionLock systemViewsLock ( <nl> - opCtx , <nl> - NamespaceString ( _durable - > getName ( ) , NamespaceString : : kSystemDotViewsCollectionName ) , <nl> - MODE_IS ) ; <nl> stdx : : lock_guard < Latch > lk ( _mutex ) ; <nl> if ( ! _valid & & opCtx - > getClient ( ) - > isFromUserConnection ( ) ) { <nl> / / We want to avoid lookups on invalid collection names . <nl> std : : shared_ptr < ViewDefinition > ViewCatalog : : lookup ( OperationContext * opCtx , Str <nl> <nl> std : : shared_ptr < ViewDefinition > ViewCatalog : : lookupWithoutValidatingDurableViews ( <nl> OperationContext * opCtx , StringData ns ) { <nl> - Lock : : CollectionLock systemViewsLock ( <nl> - opCtx , <nl> - NamespaceString ( _durable - > getName ( ) , NamespaceString : : kSystemDotViewsCollectionName ) , <nl> - MODE_IS ) ; <nl> stdx : : lock_guard < Latch > lk ( _mutex ) ; <nl> return _lookup ( lk , opCtx , ns , ViewCatalogLookupBehavior : : kAllowInvalidDurableViews ) ; <nl> } <nl> <nl> StatusWith < ResolvedView > ViewCatalog : : resolveView ( OperationContext * opCtx , <nl> const NamespaceString & nss ) { <nl> - Lock : : CollectionLock systemViewsLock ( <nl> - opCtx , <nl> - NamespaceString ( _durable - > getName ( ) , NamespaceString : : kSystemDotViewsCollectionName ) , <nl> - MODE_IS ) ; <nl> stdx : : unique_lock < Latch > lock ( _mutex ) ; <nl> <nl> _requireValidCatalog ( lock ) ; <nl>
SERVER - 51320 Remove CollectionLock to system . views from functions in ViewCatalog .
mongodb/mongo
ded7b44e03b6aea749c764d0e4cc2556785b4eb4
2020-10-13T20:45:52Z
mmm a / README . rst <nl> ppp b / README . rst <nl> Bitcoin - qt : Qt4 based GUI replacement for Bitcoin <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> * * Warning * * * * Warning * * * * Warning * * <nl> - Pre - alpha stuff ! Use on testnet only ! <nl> + <nl> + Pre - alpha stuff ! I ' m using this client myself on the production network , and I haven ' t noticed any glitches , but remember : always backup your wallet ! Testing on the testnet is recommended . <nl> <nl> This has been implemented : <nl> <nl> then execute the following : <nl> qmake <nl> make <nl> <nl> - Alternatively , install Qt Creator and open the ` bitcoin . pro ` file . <nl> + Alternatively , install Qt Creator and open the ` bitcoin - qt . pro ` file . <nl> <nl> - An executable named ` bitcoin ` will be built . <nl> + An executable named ` bitcoin - qt ` will be built . <nl> <nl> similarity index 98 % <nl> rename from bitcoin . pro <nl> rename to bitcoin - qt . pro <nl> mmm a / bitcoin . pro <nl> ppp b / bitcoin - qt . pro <nl> FORMS + = \ <nl> src / qt / forms / aboutdialog . ui \ <nl> src / qt / forms / editaddressdialog . ui \ <nl> src / qt / forms / transactiondescdialog . ui <nl> + <nl> + CODECFORTR = UTF - 8 <nl> + TRANSLATIONS = src / qt / locale / bitcoin_nl . ts <nl>
prepare internationalization ; rename project to bitcoin - qt
bitcoin/bitcoin
ab2fe68fd8aa8137d1dc304a900ea811eac99af5
2011-06-12T12:32:36Z
mmm a / test / functional / rpc_net . py <nl> ppp b / test / functional / rpc_net . py <nl> def _test_getnettotals ( self ) : <nl> <nl> peer_info_after_ping = self . nodes [ 0 ] . getpeerinfo ( ) <nl> for before , after in zip ( peer_info , peer_info_after_ping ) : <nl> - assert_greater_than_or_equal ( after [ ' bytesrecv_per_msg ' ] [ ' pong ' ] , before [ ' bytesrecv_per_msg ' ] [ ' pong ' ] + 32 ) <nl> - assert_greater_than_or_equal ( after [ ' bytessent_per_msg ' ] [ ' ping ' ] , before [ ' bytessent_per_msg ' ] [ ' ping ' ] + 32 ) <nl> + assert_greater_than_or_equal ( after [ ' bytesrecv_per_msg ' ] . get ( ' pong ' , 0 ) , before [ ' bytesrecv_per_msg ' ] . get ( ' pong ' , 0 ) + 32 ) <nl> + assert_greater_than_or_equal ( after [ ' bytessent_per_msg ' ] . get ( ' ping ' , 0 ) , before [ ' bytessent_per_msg ' ] . get ( ' ping ' , 0 ) + 32 ) <nl> <nl> def _test_getnetworkinginfo ( self ) : <nl> assert_equal ( self . nodes [ 0 ] . getnetworkinfo ( ) [ ' networkactive ' ] , True ) <nl>
test : Fix rpc_net . py " pong " race condition
bitcoin/bitcoin
de23739b22fefd7a409f6cb0a4d6128c176fa229
2019-01-01T21:12:41Z
mmm a / test / distrib / cpp / run_distrib_test_cmake . sh <nl> ppp b / test / distrib / cpp / run_distrib_test_cmake . sh <nl> cd cmake / build <nl> cmake - DCMAKE_BUILD_TYPE = Release . . / . . <nl> make - j4 install <nl> cd . . / . . / . . / . . / . . <nl> + rm - rf third_party / cares / cares # wipe out to prevent influencing the grpc build <nl> <nl> # Install zlib <nl> cd third_party / zlib <nl> cd cmake / build <nl> cmake - DCMAKE_BUILD_TYPE = Release . . / . . <nl> make - j4 install <nl> cd . . / . . / . . / . . <nl> + rm - rf third_party / zlib # wipe out to prevent influencing the grpc build <nl> <nl> # Install protobuf <nl> cd third_party / protobuf <nl> cd cmake / build <nl> cmake - Dprotobuf_BUILD_TESTS = OFF - DCMAKE_BUILD_TYPE = Release . . <nl> make - j4 install <nl> cd . . / . . / . . / . . <nl> - <nl> - # TODO : Install boringssl <nl> + rm - rf third_party / protobuf # wipe out to prevent influencing the grpc build <nl> <nl> # Install gRPC <nl> mkdir - p cmake / build <nl>
prevent submodule headers from influencing the build
grpc/grpc
b2cf73e1f81a9c9eeff6dab8f8980c1711784796
2017-09-11T09:04:52Z
mmm a / include / v8 . h <nl> ppp b / include / v8 . h <nl> class V8EXPORT V8 { <nl> * Optional notification that the embedder is idle . <nl> * V8 uses the notification to reduce memory footprint . <nl> * This call can be used repeatedly if the embedder remains idle . <nl> - * \ param is_high_priority tells whether the embedder is high priority . <nl> * Returns true if the embedder should stop calling IdleNotification <nl> * until real work has been done . This indicates that V8 has done <nl> * as much cleanup as it will be able to do . <nl> * / <nl> - static bool IdleNotification ( bool is_high_priority ) ; <nl> + static bool IdleNotification ( ) ; <nl> <nl> / * * <nl> * Optional notification that the system is running low on memory . <nl> mmm a / src / api . cc <nl> ppp b / src / api . cc <nl> bool v8 : : V8 : : Dispose ( ) { <nl> } <nl> <nl> <nl> - bool v8 : : V8 : : IdleNotification ( bool is_high_priority ) { <nl> + bool v8 : : V8 : : IdleNotification ( ) { <nl> / / Returning true tells the caller that it need not <nl> / / continue to call IdleNotification . <nl> if ( ! i : : V8 : : IsRunning ( ) ) return true ; <nl> - return i : : V8 : : IdleNotification ( is_high_priority ) ; <nl> + return i : : V8 : : IdleNotification ( ) ; <nl> } <nl> <nl> <nl> mmm a / src / v8 . cc <nl> ppp b / src / v8 . cc <nl> uint32_t V8 : : Random ( ) { <nl> } <nl> <nl> <nl> - bool V8 : : IdleNotification ( bool is_high_priority ) { <nl> + bool V8 : : IdleNotification ( ) { <nl> / / Returning true tells the caller that there is no need to call <nl> / / IdleNotification again . <nl> if ( ! FLAG_use_idle_notification ) return true ; <nl> - / / Ignore high priority instances of V8 . <nl> - if ( is_high_priority ) return true ; <nl> <nl> / / Tell the heap that it may want to adjust . <nl> return Heap : : IdleNotification ( ) ; <nl> mmm a / src / v8 . h <nl> ppp b / src / v8 . h <nl> class V8 : public AllStatic { <nl> static Smi * RandomPositiveSmi ( ) ; <nl> <nl> / / Idle notification directly from the API . <nl> - static bool IdleNotification ( bool is_high_priority ) ; <nl> + static bool IdleNotification ( ) ; <nl> <nl> private : <nl> / / True if engine is currently running <nl> mmm a / test / cctest / test - api . cc <nl> ppp b / test / cctest / test - api . cc <nl> THREADED_TEST ( StackTrace ) { <nl> / / Test that idle notification can be handled when V8 has not yet been <nl> / / set up . <nl> THREADED_TEST ( IdleNotification ) { <nl> - for ( int i = 0 ; i < 100 ; i + + ) v8 : : V8 : : IdleNotification ( true ) ; <nl> - for ( int i = 0 ; i < 100 ; i + + ) v8 : : V8 : : IdleNotification ( false ) ; <nl> + for ( int i = 0 ; i < 100 ; i + + ) <nl> + CHECK ( v8 : : V8 : : IdleNotification ( ) ) ; <nl> } <nl> <nl> <nl>
Remove the high - priority flag from IdleNotification ( ) since
v8/v8
5191c81dff1fe7b1dd7be3c61d31728f95dd75ee
2009-10-06T00:06:17Z
mmm a / include / swift / SIL / Projection . h <nl> ppp b / include / swift / SIL / Projection . h <nl> inline bool isStrictSubSeqRelation ( SubSeqRelation_t Seq ) { <nl> bool getIntegerIndex ( SILValue IndexVal , unsigned & IndexConst ) ; <nl> <nl> / / / Given a SIL value , capture its element index and the value of the aggregate <nl> - / / / that immeditely contains it . <nl> + / / / that immediately contains it . <nl> / / / <nl> / / / This lightweight utility maps a SIL address projection to an index . <nl> struct ProjectionIndex { <nl>
Fix typo : immeditely → immediately
apple/swift
ebd87f3e3b4d6ca01836f8683d4078758109eccb
2015-12-13T23:11:52Z
mmm a / README . md <nl> ppp b / README . md <nl> the guidelines [ here ] ( CONTRIBUTING . md ) first . <nl> * ` findNumbers ` - finds numbers in text input . <nl> * ` PhoneNumberOfflineGeocoder ` - provides geographical information related to a phone number . <nl> * ` PhoneNumberToCarrierMapper ` - provides carrier information related to a phone number . <nl> + * ` PhoneNumberToTimeZonesMapper ` - provides timezone information related to a phone number . <nl> <nl> # Demo ( v8 . 4 . 2 ) <nl> [ Java ] ( http : / / libphonenumber . appspot . com / ) <nl> new file mode 100644 <nl> index 000000000 . . aae61a4e8 <nl> mmm / dev / null <nl> ppp b / resources / timezones / README . md <nl> <nl> + # Timezone Mapper <nl> + <nl> + The timezone mapper allows a likely timezone to be obtained for a given phone <nl> + number . The timezone returned is the canonical ID from [ CLDR ] ( <nl> + http : / / www . unicode . org / cldr / charts / latest / supplemental / zone_tzid . html ) , not a <nl> + localised name ( or any other identifier ) . For mobile phones which are associated <nl> + with particular area codes , it returns the timezone of the area code ; it does <nl> + not track the user ' s current location in any way . This could be used to work out <nl> + whether it is likely to be a good time to ring a user based on their provided <nl> + number . <nl> + <nl> + Code Location : <nl> + [ java / geocoder / src / com / google / i18n / phonenumbers / PhoneNumberToTimeZonesMapper . java ] ( https : / / github . com / googlei18n / libphonenumber / blob / master / java / geocoder / src / com / google / i18n / phonenumbers / PhoneNumberToTimeZonesMapper . java ) <nl> + <nl> + Example usage : <nl> + <nl> + ` ` ` <nl> + PhoneNumberToTimeZonesMapper timeZonesMapper = PhoneNumberToTimeZonesMapper . getInstance ( ) ; <nl> + <nl> + List < String > timezones = timeZonesMapper . getTimeZonesForNumber ( phoneNumber ) ; <nl> + ` ` ` <nl> + <nl> + # # Contributing to the timezone metadata <nl> + <nl> + The timezone metadata is auto - generated with few exceptions , so we cannot accept <nl> + pull requests . If we have an error please file an issue and we ' ll see if we can <nl> + make a generic fix . <nl> + <nl> + If making fixes in your own fork while you wait for this , build the metadata by <nl> + running this command from the root of the repository ( assuming you have ` ant ` <nl> + installed ) : <nl> + <nl> + ` ` ` <nl> + ant - f java / build . xml build - timezones - data <nl> + ` ` ` <nl> + <nl> + Note that , due to our using stable CLDR timezone IDs , we do not change the ID <nl> + for an existing timezone when the name of a region or subdivision changes . The <nl> + library returns the * ID * , which you may use to get the localised name from CLDR . <nl>
Add timezone mapper readme ( )
google/libphonenumber
ad0b3ba836909c2450a498c68edc73054fd3830f
2017-05-03T15:35:37Z
mmm a / src / mips / assembler - mips . cc <nl> ppp b / src / mips / assembler - mips . cc <nl> void Assembler : : ceil_w_d ( FPURegister fd , FPURegister fs ) { <nl> } <nl> <nl> <nl> + void Assembler : : rint_s ( FPURegister fd , FPURegister fs ) { rint ( S , fd , fs ) ; } <nl> + <nl> + <nl> + void Assembler : : rint ( SecondaryField fmt , FPURegister fd , FPURegister fs ) { <nl> + DCHECK ( IsMipsArchVariant ( kMips32r6 ) ) ; <nl> + GenInstrRegister ( COP1 , fmt , f0 , fs , fd , RINT ) ; <nl> + } <nl> + <nl> + <nl> + void Assembler : : rint_d ( FPURegister fd , FPURegister fs ) { rint ( D , fd , fs ) ; } <nl> + <nl> + <nl> void Assembler : : cvt_l_s ( FPURegister fd , FPURegister fs ) { <nl> DCHECK ( IsMipsArchVariant ( kMips32r2 ) ) ; <nl> GenInstrRegister ( COP1 , S , f0 , fs , fd , CVT_L_S ) ; <nl> mmm a / src / mips / assembler - mips . h <nl> ppp b / src / mips / assembler - mips . h <nl> class Assembler : public AssemblerBase { <nl> void floor_w_d ( FPURegister fd , FPURegister fs ) ; <nl> void ceil_w_s ( FPURegister fd , FPURegister fs ) ; <nl> void ceil_w_d ( FPURegister fd , FPURegister fs ) ; <nl> + void rint_s ( FPURegister fd , FPURegister fs ) ; <nl> + void rint_d ( FPURegister fd , FPURegister fs ) ; <nl> + void rint ( SecondaryField fmt , FPURegister fd , FPURegister fs ) ; <nl> <nl> void cvt_l_s ( FPURegister fd , FPURegister fs ) ; <nl> void cvt_l_d ( FPURegister fd , FPURegister fs ) ; <nl> mmm a / src / mips / constants - mips . h <nl> ppp b / src / mips / constants - mips . h <nl> enum Opcode { <nl> <nl> enum SecondaryField { <nl> / / SPECIAL Encoding of Function Field . <nl> - SLL = ( ( 0 < < 3 ) + 0 ) , <nl> - MOVCI = ( ( 0 < < 3 ) + 1 ) , <nl> - SRL = ( ( 0 < < 3 ) + 2 ) , <nl> - SRA = ( ( 0 < < 3 ) + 3 ) , <nl> - SLLV = ( ( 0 < < 3 ) + 4 ) , <nl> - SRLV = ( ( 0 < < 3 ) + 6 ) , <nl> - SRAV = ( ( 0 < < 3 ) + 7 ) , <nl> - <nl> - JR = ( ( 1 < < 3 ) + 0 ) , <nl> - JALR = ( ( 1 < < 3 ) + 1 ) , <nl> - MOVZ = ( ( 1 < < 3 ) + 2 ) , <nl> - MOVN = ( ( 1 < < 3 ) + 3 ) , <nl> - BREAK = ( ( 1 < < 3 ) + 5 ) , <nl> - <nl> - MFHI = ( ( 2 < < 3 ) + 0 ) , <nl> - CLZ_R6 = ( ( 2 < < 3 ) + 0 ) , <nl> - CLO_R6 = ( ( 2 < < 3 ) + 1 ) , <nl> - MFLO = ( ( 2 < < 3 ) + 2 ) , <nl> - <nl> - MULT = ( ( 3 < < 3 ) + 0 ) , <nl> - MULTU = ( ( 3 < < 3 ) + 1 ) , <nl> - DIV = ( ( 3 < < 3 ) + 2 ) , <nl> - DIVU = ( ( 3 < < 3 ) + 3 ) , <nl> - <nl> - ADD = ( ( 4 < < 3 ) + 0 ) , <nl> - ADDU = ( ( 4 < < 3 ) + 1 ) , <nl> - SUB = ( ( 4 < < 3 ) + 2 ) , <nl> - SUBU = ( ( 4 < < 3 ) + 3 ) , <nl> - AND = ( ( 4 < < 3 ) + 4 ) , <nl> - OR = ( ( 4 < < 3 ) + 5 ) , <nl> - XOR = ( ( 4 < < 3 ) + 6 ) , <nl> - NOR = ( ( 4 < < 3 ) + 7 ) , <nl> - <nl> - SLT = ( ( 5 < < 3 ) + 2 ) , <nl> - SLTU = ( ( 5 < < 3 ) + 3 ) , <nl> - <nl> - TGE = ( ( 6 < < 3 ) + 0 ) , <nl> - TGEU = ( ( 6 < < 3 ) + 1 ) , <nl> - TLT = ( ( 6 < < 3 ) + 2 ) , <nl> - TLTU = ( ( 6 < < 3 ) + 3 ) , <nl> - TEQ = ( ( 6 < < 3 ) + 4 ) , <nl> - SELEQZ_S = ( ( 6 < < 3 ) + 5 ) , <nl> - TNE = ( ( 6 < < 3 ) + 6 ) , <nl> - SELNEZ_S = ( ( 6 < < 3 ) + 7 ) , <nl> + SLL = ( ( 0 < < 3 ) + 0 ) , <nl> + MOVCI = ( ( 0 < < 3 ) + 1 ) , <nl> + SRL = ( ( 0 < < 3 ) + 2 ) , <nl> + SRA = ( ( 0 < < 3 ) + 3 ) , <nl> + SLLV = ( ( 0 < < 3 ) + 4 ) , <nl> + SRLV = ( ( 0 < < 3 ) + 6 ) , <nl> + SRAV = ( ( 0 < < 3 ) + 7 ) , <nl> + <nl> + JR = ( ( 1 < < 3 ) + 0 ) , <nl> + JALR = ( ( 1 < < 3 ) + 1 ) , <nl> + MOVZ = ( ( 1 < < 3 ) + 2 ) , <nl> + MOVN = ( ( 1 < < 3 ) + 3 ) , <nl> + BREAK = ( ( 1 < < 3 ) + 5 ) , <nl> + <nl> + MFHI = ( ( 2 < < 3 ) + 0 ) , <nl> + CLZ_R6 = ( ( 2 < < 3 ) + 0 ) , <nl> + CLO_R6 = ( ( 2 < < 3 ) + 1 ) , <nl> + MFLO = ( ( 2 < < 3 ) + 2 ) , <nl> + <nl> + MULT = ( ( 3 < < 3 ) + 0 ) , <nl> + MULTU = ( ( 3 < < 3 ) + 1 ) , <nl> + DIV = ( ( 3 < < 3 ) + 2 ) , <nl> + DIVU = ( ( 3 < < 3 ) + 3 ) , <nl> + <nl> + ADD = ( ( 4 < < 3 ) + 0 ) , <nl> + ADDU = ( ( 4 < < 3 ) + 1 ) , <nl> + SUB = ( ( 4 < < 3 ) + 2 ) , <nl> + SUBU = ( ( 4 < < 3 ) + 3 ) , <nl> + AND = ( ( 4 < < 3 ) + 4 ) , <nl> + OR = ( ( 4 < < 3 ) + 5 ) , <nl> + XOR = ( ( 4 < < 3 ) + 6 ) , <nl> + NOR = ( ( 4 < < 3 ) + 7 ) , <nl> + <nl> + SLT = ( ( 5 < < 3 ) + 2 ) , <nl> + SLTU = ( ( 5 < < 3 ) + 3 ) , <nl> + <nl> + TGE = ( ( 6 < < 3 ) + 0 ) , <nl> + TGEU = ( ( 6 < < 3 ) + 1 ) , <nl> + TLT = ( ( 6 < < 3 ) + 2 ) , <nl> + TLTU = ( ( 6 < < 3 ) + 3 ) , <nl> + TEQ = ( ( 6 < < 3 ) + 4 ) , <nl> + SELEQZ_S = ( ( 6 < < 3 ) + 5 ) , <nl> + TNE = ( ( 6 < < 3 ) + 6 ) , <nl> + SELNEZ_S = ( ( 6 < < 3 ) + 7 ) , <nl> <nl> / / Multiply integers in r6 . <nl> - MUL_MUH = ( ( 3 < < 3 ) + 0 ) , / / MUL , MUH . <nl> - MUL_MUH_U = ( ( 3 < < 3 ) + 1 ) , / / MUL_U , MUH_U . <nl> + MUL_MUH = ( ( 3 < < 3 ) + 0 ) , / / MUL , MUH . <nl> + MUL_MUH_U = ( ( 3 < < 3 ) + 1 ) , / / MUL_U , MUH_U . <nl> + RINT = ( ( 3 < < 3 ) + 2 ) , <nl> <nl> - MUL_OP = ( ( 0 < < 3 ) + 2 ) , <nl> - MUH_OP = ( ( 0 < < 3 ) + 3 ) , <nl> - DIV_OP = ( ( 0 < < 3 ) + 2 ) , <nl> - MOD_OP = ( ( 0 < < 3 ) + 3 ) , <nl> + MUL_OP = ( ( 0 < < 3 ) + 2 ) , <nl> + MUH_OP = ( ( 0 < < 3 ) + 3 ) , <nl> + DIV_OP = ( ( 0 < < 3 ) + 2 ) , <nl> + MOD_OP = ( ( 0 < < 3 ) + 3 ) , <nl> <nl> - DIV_MOD = ( ( 3 < < 3 ) + 2 ) , <nl> - DIV_MOD_U = ( ( 3 < < 3 ) + 3 ) , <nl> + DIV_MOD = ( ( 3 < < 3 ) + 2 ) , <nl> + DIV_MOD_U = ( ( 3 < < 3 ) + 3 ) , <nl> <nl> / / SPECIAL2 Encoding of Function Field . <nl> - MUL = ( ( 0 < < 3 ) + 2 ) , <nl> - CLZ = ( ( 4 < < 3 ) + 0 ) , <nl> - CLO = ( ( 4 < < 3 ) + 1 ) , <nl> + MUL = ( ( 0 < < 3 ) + 2 ) , <nl> + CLZ = ( ( 4 < < 3 ) + 0 ) , <nl> + CLO = ( ( 4 < < 3 ) + 1 ) , <nl> <nl> / / SPECIAL3 Encoding of Function Field . <nl> - EXT = ( ( 0 < < 3 ) + 0 ) , <nl> - INS = ( ( 0 < < 3 ) + 4 ) , <nl> + EXT = ( ( 0 < < 3 ) + 0 ) , <nl> + INS = ( ( 0 < < 3 ) + 4 ) , <nl> <nl> / / REGIMM encoding of rt Field . <nl> - BLTZ = ( ( 0 < < 3 ) + 0 ) < < 16 , <nl> - BGEZ = ( ( 0 < < 3 ) + 1 ) < < 16 , <nl> - BLTZAL = ( ( 2 < < 3 ) + 0 ) < < 16 , <nl> - BGEZAL = ( ( 2 < < 3 ) + 1 ) < < 16 , <nl> - BGEZALL = ( ( 2 < < 3 ) + 3 ) < < 16 , <nl> + BLTZ = ( ( 0 < < 3 ) + 0 ) < < 16 , <nl> + BGEZ = ( ( 0 < < 3 ) + 1 ) < < 16 , <nl> + BLTZAL = ( ( 2 < < 3 ) + 0 ) < < 16 , <nl> + BGEZAL = ( ( 2 < < 3 ) + 1 ) < < 16 , <nl> + BGEZALL = ( ( 2 < < 3 ) + 3 ) < < 16 , <nl> <nl> / / COP1 Encoding of rs Field . <nl> - MFC1 = ( ( 0 < < 3 ) + 0 ) < < 21 , <nl> - CFC1 = ( ( 0 < < 3 ) + 2 ) < < 21 , <nl> - MFHC1 = ( ( 0 < < 3 ) + 3 ) < < 21 , <nl> - MTC1 = ( ( 0 < < 3 ) + 4 ) < < 21 , <nl> - CTC1 = ( ( 0 < < 3 ) + 6 ) < < 21 , <nl> - MTHC1 = ( ( 0 < < 3 ) + 7 ) < < 21 , <nl> - BC1 = ( ( 1 < < 3 ) + 0 ) < < 21 , <nl> - S = ( ( 2 < < 3 ) + 0 ) < < 21 , <nl> - D = ( ( 2 < < 3 ) + 1 ) < < 21 , <nl> - W = ( ( 2 < < 3 ) + 4 ) < < 21 , <nl> - L = ( ( 2 < < 3 ) + 5 ) < < 21 , <nl> - PS = ( ( 2 < < 3 ) + 6 ) < < 21 , <nl> + MFC1 = ( ( 0 < < 3 ) + 0 ) < < 21 , <nl> + CFC1 = ( ( 0 < < 3 ) + 2 ) < < 21 , <nl> + MFHC1 = ( ( 0 < < 3 ) + 3 ) < < 21 , <nl> + MTC1 = ( ( 0 < < 3 ) + 4 ) < < 21 , <nl> + CTC1 = ( ( 0 < < 3 ) + 6 ) < < 21 , <nl> + MTHC1 = ( ( 0 < < 3 ) + 7 ) < < 21 , <nl> + BC1 = ( ( 1 < < 3 ) + 0 ) < < 21 , <nl> + S = ( ( 2 < < 3 ) + 0 ) < < 21 , <nl> + D = ( ( 2 < < 3 ) + 1 ) < < 21 , <nl> + W = ( ( 2 < < 3 ) + 4 ) < < 21 , <nl> + L = ( ( 2 < < 3 ) + 5 ) < < 21 , <nl> + PS = ( ( 2 < < 3 ) + 6 ) < < 21 , <nl> / / COP1 Encoding of Function Field When rs = S . <nl> - ROUND_L_S = ( ( 1 < < 3 ) + 0 ) , <nl> - TRUNC_L_S = ( ( 1 < < 3 ) + 1 ) , <nl> - CEIL_L_S = ( ( 1 < < 3 ) + 2 ) , <nl> - FLOOR_L_S = ( ( 1 < < 3 ) + 3 ) , <nl> - ROUND_W_S = ( ( 1 < < 3 ) + 4 ) , <nl> - TRUNC_W_S = ( ( 1 < < 3 ) + 5 ) , <nl> - CEIL_W_S = ( ( 1 < < 3 ) + 6 ) , <nl> - FLOOR_W_S = ( ( 1 < < 3 ) + 7 ) , <nl> - CVT_D_S = ( ( 4 < < 3 ) + 1 ) , <nl> - CVT_W_S = ( ( 4 < < 3 ) + 4 ) , <nl> - CVT_L_S = ( ( 4 < < 3 ) + 5 ) , <nl> - CVT_PS_S = ( ( 4 < < 3 ) + 6 ) , <nl> + ROUND_L_S = ( ( 1 < < 3 ) + 0 ) , <nl> + TRUNC_L_S = ( ( 1 < < 3 ) + 1 ) , <nl> + CEIL_L_S = ( ( 1 < < 3 ) + 2 ) , <nl> + FLOOR_L_S = ( ( 1 < < 3 ) + 3 ) , <nl> + ROUND_W_S = ( ( 1 < < 3 ) + 4 ) , <nl> + TRUNC_W_S = ( ( 1 < < 3 ) + 5 ) , <nl> + CEIL_W_S = ( ( 1 < < 3 ) + 6 ) , <nl> + FLOOR_W_S = ( ( 1 < < 3 ) + 7 ) , <nl> + CVT_D_S = ( ( 4 < < 3 ) + 1 ) , <nl> + CVT_W_S = ( ( 4 < < 3 ) + 4 ) , <nl> + CVT_L_S = ( ( 4 < < 3 ) + 5 ) , <nl> + CVT_PS_S = ( ( 4 < < 3 ) + 6 ) , <nl> / / COP1 Encoding of Function Field When rs = D . <nl> - ADD_D = ( ( 0 < < 3 ) + 0 ) , <nl> - SUB_D = ( ( 0 < < 3 ) + 1 ) , <nl> - MUL_D = ( ( 0 < < 3 ) + 2 ) , <nl> - DIV_D = ( ( 0 < < 3 ) + 3 ) , <nl> - SQRT_D = ( ( 0 < < 3 ) + 4 ) , <nl> - ABS_D = ( ( 0 < < 3 ) + 5 ) , <nl> - MOV_D = ( ( 0 < < 3 ) + 6 ) , <nl> - NEG_D = ( ( 0 < < 3 ) + 7 ) , <nl> - ROUND_L_D = ( ( 1 < < 3 ) + 0 ) , <nl> - TRUNC_L_D = ( ( 1 < < 3 ) + 1 ) , <nl> - CEIL_L_D = ( ( 1 < < 3 ) + 2 ) , <nl> - FLOOR_L_D = ( ( 1 < < 3 ) + 3 ) , <nl> - ROUND_W_D = ( ( 1 < < 3 ) + 4 ) , <nl> - TRUNC_W_D = ( ( 1 < < 3 ) + 5 ) , <nl> - CEIL_W_D = ( ( 1 < < 3 ) + 6 ) , <nl> - FLOOR_W_D = ( ( 1 < < 3 ) + 7 ) , <nl> - MIN = ( ( 3 < < 3 ) + 4 ) , <nl> - MINA = ( ( 3 < < 3 ) + 5 ) , <nl> - MAX = ( ( 3 < < 3 ) + 6 ) , <nl> - MAXA = ( ( 3 < < 3 ) + 7 ) , <nl> - CVT_S_D = ( ( 4 < < 3 ) + 0 ) , <nl> - CVT_W_D = ( ( 4 < < 3 ) + 4 ) , <nl> - CVT_L_D = ( ( 4 < < 3 ) + 5 ) , <nl> - C_F_D = ( ( 6 < < 3 ) + 0 ) , <nl> - C_UN_D = ( ( 6 < < 3 ) + 1 ) , <nl> - C_EQ_D = ( ( 6 < < 3 ) + 2 ) , <nl> - C_UEQ_D = ( ( 6 < < 3 ) + 3 ) , <nl> - C_OLT_D = ( ( 6 < < 3 ) + 4 ) , <nl> - C_ULT_D = ( ( 6 < < 3 ) + 5 ) , <nl> - C_OLE_D = ( ( 6 < < 3 ) + 6 ) , <nl> - C_ULE_D = ( ( 6 < < 3 ) + 7 ) , <nl> + ADD_D = ( ( 0 < < 3 ) + 0 ) , <nl> + SUB_D = ( ( 0 < < 3 ) + 1 ) , <nl> + MUL_D = ( ( 0 < < 3 ) + 2 ) , <nl> + DIV_D = ( ( 0 < < 3 ) + 3 ) , <nl> + SQRT_D = ( ( 0 < < 3 ) + 4 ) , <nl> + ABS_D = ( ( 0 < < 3 ) + 5 ) , <nl> + MOV_D = ( ( 0 < < 3 ) + 6 ) , <nl> + NEG_D = ( ( 0 < < 3 ) + 7 ) , <nl> + ROUND_L_D = ( ( 1 < < 3 ) + 0 ) , <nl> + TRUNC_L_D = ( ( 1 < < 3 ) + 1 ) , <nl> + CEIL_L_D = ( ( 1 < < 3 ) + 2 ) , <nl> + FLOOR_L_D = ( ( 1 < < 3 ) + 3 ) , <nl> + ROUND_W_D = ( ( 1 < < 3 ) + 4 ) , <nl> + TRUNC_W_D = ( ( 1 < < 3 ) + 5 ) , <nl> + CEIL_W_D = ( ( 1 < < 3 ) + 6 ) , <nl> + FLOOR_W_D = ( ( 1 < < 3 ) + 7 ) , <nl> + MIN = ( ( 3 < < 3 ) + 4 ) , <nl> + MINA = ( ( 3 < < 3 ) + 5 ) , <nl> + MAX = ( ( 3 < < 3 ) + 6 ) , <nl> + MAXA = ( ( 3 < < 3 ) + 7 ) , <nl> + CVT_S_D = ( ( 4 < < 3 ) + 0 ) , <nl> + CVT_W_D = ( ( 4 < < 3 ) + 4 ) , <nl> + CVT_L_D = ( ( 4 < < 3 ) + 5 ) , <nl> + C_F_D = ( ( 6 < < 3 ) + 0 ) , <nl> + C_UN_D = ( ( 6 < < 3 ) + 1 ) , <nl> + C_EQ_D = ( ( 6 < < 3 ) + 2 ) , <nl> + C_UEQ_D = ( ( 6 < < 3 ) + 3 ) , <nl> + C_OLT_D = ( ( 6 < < 3 ) + 4 ) , <nl> + C_ULT_D = ( ( 6 < < 3 ) + 5 ) , <nl> + C_OLE_D = ( ( 6 < < 3 ) + 6 ) , <nl> + C_ULE_D = ( ( 6 < < 3 ) + 7 ) , <nl> / / COP1 Encoding of Function Field When rs = W or L . <nl> - CVT_S_W = ( ( 4 < < 3 ) + 0 ) , <nl> - CVT_D_W = ( ( 4 < < 3 ) + 1 ) , <nl> - CVT_S_L = ( ( 4 < < 3 ) + 0 ) , <nl> - CVT_D_L = ( ( 4 < < 3 ) + 1 ) , <nl> - BC1EQZ = ( ( 2 < < 2 ) + 1 ) < < 21 , <nl> - BC1NEZ = ( ( 3 < < 2 ) + 1 ) < < 21 , <nl> + CVT_S_W = ( ( 4 < < 3 ) + 0 ) , <nl> + CVT_D_W = ( ( 4 < < 3 ) + 1 ) , <nl> + CVT_S_L = ( ( 4 < < 3 ) + 0 ) , <nl> + CVT_D_L = ( ( 4 < < 3 ) + 1 ) , <nl> + BC1EQZ = ( ( 2 < < 2 ) + 1 ) < < 21 , <nl> + BC1NEZ = ( ( 3 < < 2 ) + 1 ) < < 21 , <nl> / / COP1 CMP positive predicates Bit 5 . . 4 = 00 . <nl> - CMP_AF = ( ( 0 < < 3 ) + 0 ) , <nl> - CMP_UN = ( ( 0 < < 3 ) + 1 ) , <nl> - CMP_EQ = ( ( 0 < < 3 ) + 2 ) , <nl> - CMP_UEQ = ( ( 0 < < 3 ) + 3 ) , <nl> - CMP_LT = ( ( 0 < < 3 ) + 4 ) , <nl> - CMP_ULT = ( ( 0 < < 3 ) + 5 ) , <nl> - CMP_LE = ( ( 0 < < 3 ) + 6 ) , <nl> - CMP_ULE = ( ( 0 < < 3 ) + 7 ) , <nl> - CMP_SAF = ( ( 1 < < 3 ) + 0 ) , <nl> - CMP_SUN = ( ( 1 < < 3 ) + 1 ) , <nl> - CMP_SEQ = ( ( 1 < < 3 ) + 2 ) , <nl> - CMP_SUEQ = ( ( 1 < < 3 ) + 3 ) , <nl> - CMP_SSLT = ( ( 1 < < 3 ) + 4 ) , <nl> - CMP_SSULT = ( ( 1 < < 3 ) + 5 ) , <nl> - CMP_SLE = ( ( 1 < < 3 ) + 6 ) , <nl> - CMP_SULE = ( ( 1 < < 3 ) + 7 ) , <nl> + CMP_AF = ( ( 0 < < 3 ) + 0 ) , <nl> + CMP_UN = ( ( 0 < < 3 ) + 1 ) , <nl> + CMP_EQ = ( ( 0 < < 3 ) + 2 ) , <nl> + CMP_UEQ = ( ( 0 < < 3 ) + 3 ) , <nl> + CMP_LT = ( ( 0 < < 3 ) + 4 ) , <nl> + CMP_ULT = ( ( 0 < < 3 ) + 5 ) , <nl> + CMP_LE = ( ( 0 < < 3 ) + 6 ) , <nl> + CMP_ULE = ( ( 0 < < 3 ) + 7 ) , <nl> + CMP_SAF = ( ( 1 < < 3 ) + 0 ) , <nl> + CMP_SUN = ( ( 1 < < 3 ) + 1 ) , <nl> + CMP_SEQ = ( ( 1 < < 3 ) + 2 ) , <nl> + CMP_SUEQ = ( ( 1 < < 3 ) + 3 ) , <nl> + CMP_SSLT = ( ( 1 < < 3 ) + 4 ) , <nl> + CMP_SSULT = ( ( 1 < < 3 ) + 5 ) , <nl> + CMP_SLE = ( ( 1 < < 3 ) + 6 ) , <nl> + CMP_SULE = ( ( 1 < < 3 ) + 7 ) , <nl> / / COP1 CMP negative predicates Bit 5 . . 4 = 01 . <nl> - CMP_AT = ( ( 2 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> - CMP_OR = ( ( 2 < < 3 ) + 1 ) , <nl> - CMP_UNE = ( ( 2 < < 3 ) + 2 ) , <nl> - CMP_NE = ( ( 2 < < 3 ) + 3 ) , <nl> - CMP_UGE = ( ( 2 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> - CMP_OGE = ( ( 2 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> - CMP_UGT = ( ( 2 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> - CMP_OGT = ( ( 2 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> - CMP_SAT = ( ( 3 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> - CMP_SOR = ( ( 3 < < 3 ) + 1 ) , <nl> - CMP_SUNE = ( ( 3 < < 3 ) + 2 ) , <nl> - CMP_SNE = ( ( 3 < < 3 ) + 3 ) , <nl> - CMP_SUGE = ( ( 3 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> - CMP_SOGE = ( ( 3 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> - CMP_SUGT = ( ( 3 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> - CMP_SOGT = ( ( 3 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> - <nl> - SEL = ( ( 2 < < 3 ) + 0 ) , <nl> - SELEQZ_C = ( ( 2 < < 3 ) + 4 ) , / / COP1 on FPR registers . <nl> - SELNEZ_C = ( ( 2 < < 3 ) + 7 ) , / / COP1 on FPR registers . <nl> + CMP_AT = ( ( 2 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> + CMP_OR = ( ( 2 < < 3 ) + 1 ) , <nl> + CMP_UNE = ( ( 2 < < 3 ) + 2 ) , <nl> + CMP_NE = ( ( 2 < < 3 ) + 3 ) , <nl> + CMP_UGE = ( ( 2 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> + CMP_OGE = ( ( 2 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> + CMP_UGT = ( ( 2 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> + CMP_OGT = ( ( 2 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> + CMP_SAT = ( ( 3 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> + CMP_SOR = ( ( 3 < < 3 ) + 1 ) , <nl> + CMP_SUNE = ( ( 3 < < 3 ) + 2 ) , <nl> + CMP_SNE = ( ( 3 < < 3 ) + 3 ) , <nl> + CMP_SUGE = ( ( 3 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> + CMP_SOGE = ( ( 3 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> + CMP_SUGT = ( ( 3 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> + CMP_SOGT = ( ( 3 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> + <nl> + SEL = ( ( 2 < < 3 ) + 0 ) , <nl> + SELEQZ_C = ( ( 2 < < 3 ) + 4 ) , / / COP1 on FPR registers . <nl> + SELNEZ_C = ( ( 2 < < 3 ) + 7 ) , / / COP1 on FPR registers . <nl> / / COP1 Encoding of Function Field When rs = PS . <nl> / / COP1X Encoding of Function Field . <nl> - MADD_D = ( ( 4 < < 3 ) + 1 ) , <nl> + MADD_D = ( ( 4 < < 3 ) + 1 ) , <nl> <nl> - NULLSF = 0 <nl> + NULLSF = 0 <nl> } ; <nl> <nl> <nl> mmm a / src / mips / disasm - mips . cc <nl> ppp b / src / mips / disasm - mips . cc <nl> void Decoder : : Unknown ( Instruction * instr ) { <nl> <nl> bool Decoder : : DecodeTypeRegisterRsType ( Instruction * instr ) { <nl> switch ( instr - > FunctionFieldRaw ( ) ) { <nl> + case RINT : <nl> + Format ( instr , " rint . ' t ' fd , ' fs " ) ; <nl> + break ; <nl> case MIN : <nl> Format ( instr , " min . ' t ' fd , ' fs , ' ft " ) ; <nl> break ; <nl> mmm a / src / mips / simulator - mips . cc <nl> ppp b / src / mips / simulator - mips . cc <nl> bool Simulator : : test_fcsr_bit ( uint32_t cc ) { <nl> } <nl> <nl> <nl> + void Simulator : : set_fcsr_rounding_mode ( FPURoundingMode mode ) { <nl> + FCSR_ | = mode & kFPURoundingModeMask ; <nl> + } <nl> + <nl> + <nl> + unsigned int Simulator : : get_fcsr_rounding_mode ( ) { <nl> + return FCSR_ & kFPURoundingModeMask ; <nl> + } <nl> + <nl> + <nl> / / Sets the rounding error codes in FCSR based on the result of the rounding . <nl> / / Returns true if the operation was invalid . <nl> bool Simulator : : set_fcsr_round_error ( double original , double rounded ) { <nl> bool Simulator : : set_fcsr_round_error ( double original , double rounded ) { <nl> } <nl> <nl> <nl> + void Simulator : : round_according_to_fcsr ( double toRound , double & rounded , <nl> + int32_t & rounded_int , double fs ) { <nl> + / / 0 RN ( round to nearest ) : Round a result to the nearest <nl> + / / representable value ; if the result is exactly halfway between <nl> + / / two representable values , round to zero . Behave like round_w_d . <nl> + <nl> + / / 1 RZ ( round toward zero ) : Round a result to the closest <nl> + / / representable value whose absolute value is less than or <nl> + / / equal to the infinitely accurate result . Behave like trunc_w_d . <nl> + <nl> + / / 2 RP ( round up , or toward infinity ) : Round a result to the <nl> + / / next representable value up . Behave like ceil_w_d . <nl> + <nl> + / / 3 RD ( round down , or toward − infinity ) : Round a result to <nl> + / / the next representable value down . Behave like floor_w_d . <nl> + switch ( get_fcsr_rounding_mode ( ) ) { <nl> + case kRoundToNearest : <nl> + rounded = std : : floor ( fs + 0 . 5 ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + if ( ( rounded_int & 1 ) ! = 0 & & rounded_int - fs = = 0 . 5 ) { <nl> + / / If the number is halfway between two integers , <nl> + / / round to the even one . <nl> + rounded_int - - ; <nl> + } <nl> + break ; <nl> + case kRoundToZero : <nl> + rounded = trunc ( fs ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + break ; <nl> + case kRoundToPlusInf : <nl> + rounded = std : : ceil ( fs ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + break ; <nl> + case kRoundToMinusInf : <nl> + rounded = std : : floor ( fs ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + <nl> / / Raw access to the PC register . <nl> void Simulator : : set_pc ( int32_t value ) { <nl> pc_modified_ = true ; <nl> void Simulator : : DecodeTypeRegisterDRsType ( Instruction * instr , <nl> cc = instr - > FCccValue ( ) ; <nl> fcsr_cc = get_fcsr_condition_bit ( cc ) ; <nl> switch ( instr - > FunctionFieldRaw ( ) ) { <nl> + case RINT : { <nl> + DCHECK ( IsMipsArchVariant ( kMips32r6 ) ) ; <nl> + double result , temp , temp_result ; <nl> + double upper = std : : ceil ( fs ) ; <nl> + double lower = std : : floor ( fs ) ; <nl> + switch ( get_fcsr_rounding_mode ( ) ) { <nl> + case kRoundToNearest : <nl> + if ( upper - fs < fs - lower ) { <nl> + result = upper ; <nl> + } else if ( upper - fs > fs - lower ) { <nl> + result = lower ; <nl> + } else { <nl> + temp_result = upper / 2 ; <nl> + double reminder = modf ( temp_result , & temp ) ; <nl> + if ( reminder = = 0 ) { <nl> + result = upper ; <nl> + } else { <nl> + result = lower ; <nl> + } <nl> + } <nl> + break ; <nl> + case kRoundToZero : <nl> + result = ( fs > 0 ? lower : upper ) ; <nl> + break ; <nl> + case kRoundToPlusInf : <nl> + result = upper ; <nl> + break ; <nl> + case kRoundToMinusInf : <nl> + result = lower ; <nl> + break ; <nl> + } <nl> + set_fpu_register_double ( fd_reg , result ) ; <nl> + if ( result ! = fs ) { <nl> + set_fcsr_bit ( kFCSRInexactFlagBit , true ) ; <nl> + } <nl> + break ; <nl> + } <nl> case SEL : <nl> DCHECK ( IsMipsArchVariant ( kMips32r6 ) ) ; <nl> set_fpu_register_double ( fd_reg , ( fd_int & 0x1 ) = = 0 ? fs : ft ) ; <nl> void Simulator : : DecodeTypeRegisterDRsType ( Instruction * instr , <nl> case C_ULE_D : <nl> set_fcsr_bit ( fcsr_cc , ( fs < = ft ) | | ( std : : isnan ( fs ) | | std : : isnan ( ft ) ) ) ; <nl> break ; <nl> - case CVT_W_D : / / Convert double to word . <nl> - / / Rounding modes are not yet supported . <nl> - DCHECK ( ( FCSR_ & 3 ) = = 0 ) ; <nl> - / / In rounding mode 0 it should behave like ROUND . <nl> + case CVT_W_D : { / / Convert double to word . <nl> + double rounded ; <nl> + int32_t result ; <nl> + round_according_to_fcsr ( fs , rounded , result , fs ) ; <nl> + set_fpu_register_word ( fd_reg , result ) ; <nl> + if ( set_fcsr_round_error ( fs , rounded ) ) { <nl> + set_fpu_register_word ( fd_reg , kFPUInvalidResult ) ; <nl> + } <nl> + } break ; <nl> case ROUND_W_D : / / Round double to word ( round half to even ) . <nl> { <nl> double rounded = std : : floor ( fs + 0 . 5 ) ; <nl> mmm a / src / mips / simulator - mips . h <nl> ppp b / src / mips / simulator - mips . h <nl> class Simulator { <nl> double get_fpu_register_double ( int fpureg ) const ; <nl> void set_fcsr_bit ( uint32_t cc , bool value ) ; <nl> bool test_fcsr_bit ( uint32_t cc ) ; <nl> + void set_fcsr_rounding_mode ( FPURoundingMode mode ) ; <nl> + unsigned int get_fcsr_rounding_mode ( ) ; <nl> bool set_fcsr_round_error ( double original , double rounded ) ; <nl> - <nl> + void round_according_to_fcsr ( double toRound , double & rounded , <nl> + int32_t & rounded_int , double fs ) ; <nl> / / Special case of set_register and get_register to access the raw PC value . <nl> void set_pc ( int32_t value ) ; <nl> int32_t get_pc ( ) const ; <nl> mmm a / src / mips64 / assembler - mips64 . cc <nl> ppp b / src / mips64 / assembler - mips64 . cc <nl> void Assembler : : ceil_w_d ( FPURegister fd , FPURegister fs ) { <nl> } <nl> <nl> <nl> + void Assembler : : rint_s ( FPURegister fd , FPURegister fs ) { rint ( S , fd , fs ) ; } <nl> + <nl> + <nl> + void Assembler : : rint_d ( FPURegister fd , FPURegister fs ) { rint ( D , fd , fs ) ; } <nl> + <nl> + <nl> + void Assembler : : rint ( SecondaryField fmt , FPURegister fd , FPURegister fs ) { <nl> + DCHECK ( kArchVariant = = kMips64r6 ) ; <nl> + GenInstrRegister ( COP1 , D , f0 , fs , fd , RINT ) ; <nl> + } <nl> + <nl> + <nl> void Assembler : : cvt_l_s ( FPURegister fd , FPURegister fs ) { <nl> DCHECK ( kArchVariant = = kMips64r2 ) ; <nl> GenInstrRegister ( COP1 , S , f0 , fs , fd , CVT_L_S ) ; <nl> mmm a / src / mips64 / assembler - mips64 . h <nl> ppp b / src / mips64 / assembler - mips64 . h <nl> class Assembler : public AssemblerBase { <nl> void floor_w_d ( FPURegister fd , FPURegister fs ) ; <nl> void ceil_w_s ( FPURegister fd , FPURegister fs ) ; <nl> void ceil_w_d ( FPURegister fd , FPURegister fs ) ; <nl> + void rint_s ( FPURegister fd , FPURegister fs ) ; <nl> + void rint_d ( FPURegister fd , FPURegister fs ) ; <nl> + void rint ( SecondaryField fmt , FPURegister fd , FPURegister fs ) ; <nl> + <nl> <nl> void cvt_l_s ( FPURegister fd , FPURegister fs ) ; <nl> void cvt_l_d ( FPURegister fd , FPURegister fs ) ; <nl> mmm a / src / mips64 / constants - mips64 . h <nl> ppp b / src / mips64 / constants - mips64 . h <nl> enum Opcode { <nl> <nl> enum SecondaryField { <nl> / / SPECIAL Encoding of Function Field . <nl> - SLL = ( ( 0 < < 3 ) + 0 ) , <nl> - MOVCI = ( ( 0 < < 3 ) + 1 ) , <nl> - SRL = ( ( 0 < < 3 ) + 2 ) , <nl> - SRA = ( ( 0 < < 3 ) + 3 ) , <nl> - SLLV = ( ( 0 < < 3 ) + 4 ) , <nl> - SRLV = ( ( 0 < < 3 ) + 6 ) , <nl> - SRAV = ( ( 0 < < 3 ) + 7 ) , <nl> - <nl> - JR = ( ( 1 < < 3 ) + 0 ) , <nl> - JALR = ( ( 1 < < 3 ) + 1 ) , <nl> - MOVZ = ( ( 1 < < 3 ) + 2 ) , <nl> - MOVN = ( ( 1 < < 3 ) + 3 ) , <nl> - BREAK = ( ( 1 < < 3 ) + 5 ) , <nl> - <nl> - MFHI = ( ( 2 < < 3 ) + 0 ) , <nl> - CLZ_R6 = ( ( 2 < < 3 ) + 0 ) , <nl> - CLO_R6 = ( ( 2 < < 3 ) + 1 ) , <nl> - MFLO = ( ( 2 < < 3 ) + 2 ) , <nl> - DSLLV = ( ( 2 < < 3 ) + 4 ) , <nl> - DSRLV = ( ( 2 < < 3 ) + 6 ) , <nl> - DSRAV = ( ( 2 < < 3 ) + 7 ) , <nl> - <nl> - MULT = ( ( 3 < < 3 ) + 0 ) , <nl> - MULTU = ( ( 3 < < 3 ) + 1 ) , <nl> - DIV = ( ( 3 < < 3 ) + 2 ) , <nl> - DIVU = ( ( 3 < < 3 ) + 3 ) , <nl> - DMULT = ( ( 3 < < 3 ) + 4 ) , <nl> - DMULTU = ( ( 3 < < 3 ) + 5 ) , <nl> - DDIV = ( ( 3 < < 3 ) + 6 ) , <nl> - DDIVU = ( ( 3 < < 3 ) + 7 ) , <nl> - <nl> - ADD = ( ( 4 < < 3 ) + 0 ) , <nl> - ADDU = ( ( 4 < < 3 ) + 1 ) , <nl> - SUB = ( ( 4 < < 3 ) + 2 ) , <nl> - SUBU = ( ( 4 < < 3 ) + 3 ) , <nl> - AND = ( ( 4 < < 3 ) + 4 ) , <nl> - OR = ( ( 4 < < 3 ) + 5 ) , <nl> - XOR = ( ( 4 < < 3 ) + 6 ) , <nl> - NOR = ( ( 4 < < 3 ) + 7 ) , <nl> - <nl> - SLT = ( ( 5 < < 3 ) + 2 ) , <nl> - SLTU = ( ( 5 < < 3 ) + 3 ) , <nl> - DADD = ( ( 5 < < 3 ) + 4 ) , <nl> - DADDU = ( ( 5 < < 3 ) + 5 ) , <nl> - DSUB = ( ( 5 < < 3 ) + 6 ) , <nl> - DSUBU = ( ( 5 < < 3 ) + 7 ) , <nl> - <nl> - TGE = ( ( 6 < < 3 ) + 0 ) , <nl> - TGEU = ( ( 6 < < 3 ) + 1 ) , <nl> - TLT = ( ( 6 < < 3 ) + 2 ) , <nl> - TLTU = ( ( 6 < < 3 ) + 3 ) , <nl> - TEQ = ( ( 6 < < 3 ) + 4 ) , <nl> - SELEQZ_S = ( ( 6 < < 3 ) + 5 ) , <nl> - TNE = ( ( 6 < < 3 ) + 6 ) , <nl> - SELNEZ_S = ( ( 6 < < 3 ) + 7 ) , <nl> - <nl> - DSLL = ( ( 7 < < 3 ) + 0 ) , <nl> - DSRL = ( ( 7 < < 3 ) + 2 ) , <nl> - DSRA = ( ( 7 < < 3 ) + 3 ) , <nl> - DSLL32 = ( ( 7 < < 3 ) + 4 ) , <nl> - DSRL32 = ( ( 7 < < 3 ) + 6 ) , <nl> - DSRA32 = ( ( 7 < < 3 ) + 7 ) , <nl> + SLL = ( ( 0 < < 3 ) + 0 ) , <nl> + MOVCI = ( ( 0 < < 3 ) + 1 ) , <nl> + SRL = ( ( 0 < < 3 ) + 2 ) , <nl> + SRA = ( ( 0 < < 3 ) + 3 ) , <nl> + SLLV = ( ( 0 < < 3 ) + 4 ) , <nl> + SRLV = ( ( 0 < < 3 ) + 6 ) , <nl> + SRAV = ( ( 0 < < 3 ) + 7 ) , <nl> + <nl> + JR = ( ( 1 < < 3 ) + 0 ) , <nl> + JALR = ( ( 1 < < 3 ) + 1 ) , <nl> + MOVZ = ( ( 1 < < 3 ) + 2 ) , <nl> + MOVN = ( ( 1 < < 3 ) + 3 ) , <nl> + BREAK = ( ( 1 < < 3 ) + 5 ) , <nl> + <nl> + MFHI = ( ( 2 < < 3 ) + 0 ) , <nl> + CLZ_R6 = ( ( 2 < < 3 ) + 0 ) , <nl> + CLO_R6 = ( ( 2 < < 3 ) + 1 ) , <nl> + MFLO = ( ( 2 < < 3 ) + 2 ) , <nl> + DSLLV = ( ( 2 < < 3 ) + 4 ) , <nl> + DSRLV = ( ( 2 < < 3 ) + 6 ) , <nl> + DSRAV = ( ( 2 < < 3 ) + 7 ) , <nl> + <nl> + MULT = ( ( 3 < < 3 ) + 0 ) , <nl> + MULTU = ( ( 3 < < 3 ) + 1 ) , <nl> + DIV = ( ( 3 < < 3 ) + 2 ) , <nl> + DIVU = ( ( 3 < < 3 ) + 3 ) , <nl> + DMULT = ( ( 3 < < 3 ) + 4 ) , <nl> + DMULTU = ( ( 3 < < 3 ) + 5 ) , <nl> + DDIV = ( ( 3 < < 3 ) + 6 ) , <nl> + DDIVU = ( ( 3 < < 3 ) + 7 ) , <nl> + <nl> + ADD = ( ( 4 < < 3 ) + 0 ) , <nl> + ADDU = ( ( 4 < < 3 ) + 1 ) , <nl> + SUB = ( ( 4 < < 3 ) + 2 ) , <nl> + SUBU = ( ( 4 < < 3 ) + 3 ) , <nl> + AND = ( ( 4 < < 3 ) + 4 ) , <nl> + OR = ( ( 4 < < 3 ) + 5 ) , <nl> + XOR = ( ( 4 < < 3 ) + 6 ) , <nl> + NOR = ( ( 4 < < 3 ) + 7 ) , <nl> + <nl> + SLT = ( ( 5 < < 3 ) + 2 ) , <nl> + SLTU = ( ( 5 < < 3 ) + 3 ) , <nl> + DADD = ( ( 5 < < 3 ) + 4 ) , <nl> + DADDU = ( ( 5 < < 3 ) + 5 ) , <nl> + DSUB = ( ( 5 < < 3 ) + 6 ) , <nl> + DSUBU = ( ( 5 < < 3 ) + 7 ) , <nl> + <nl> + TGE = ( ( 6 < < 3 ) + 0 ) , <nl> + TGEU = ( ( 6 < < 3 ) + 1 ) , <nl> + TLT = ( ( 6 < < 3 ) + 2 ) , <nl> + TLTU = ( ( 6 < < 3 ) + 3 ) , <nl> + TEQ = ( ( 6 < < 3 ) + 4 ) , <nl> + SELEQZ_S = ( ( 6 < < 3 ) + 5 ) , <nl> + TNE = ( ( 6 < < 3 ) + 6 ) , <nl> + SELNEZ_S = ( ( 6 < < 3 ) + 7 ) , <nl> + <nl> + DSLL = ( ( 7 < < 3 ) + 0 ) , <nl> + DSRL = ( ( 7 < < 3 ) + 2 ) , <nl> + DSRA = ( ( 7 < < 3 ) + 3 ) , <nl> + DSLL32 = ( ( 7 < < 3 ) + 4 ) , <nl> + DSRL32 = ( ( 7 < < 3 ) + 6 ) , <nl> + DSRA32 = ( ( 7 < < 3 ) + 7 ) , <nl> <nl> / / Multiply integers in r6 . <nl> - MUL_MUH = ( ( 3 < < 3 ) + 0 ) , / / MUL , MUH . <nl> - MUL_MUH_U = ( ( 3 < < 3 ) + 1 ) , / / MUL_U , MUH_U . <nl> - D_MUL_MUH = ( ( 7 < < 2 ) + 0 ) , / / DMUL , DMUH . <nl> + MUL_MUH = ( ( 3 < < 3 ) + 0 ) , / / MUL , MUH . <nl> + MUL_MUH_U = ( ( 3 < < 3 ) + 1 ) , / / MUL_U , MUH_U . <nl> + D_MUL_MUH = ( ( 7 < < 2 ) + 0 ) , / / DMUL , DMUH . <nl> D_MUL_MUH_U = ( ( 7 < < 2 ) + 1 ) , / / DMUL_U , DMUH_U . <nl> + RINT = ( ( 3 < < 3 ) + 2 ) , <nl> <nl> - MUL_OP = ( ( 0 < < 3 ) + 2 ) , <nl> - MUH_OP = ( ( 0 < < 3 ) + 3 ) , <nl> - DIV_OP = ( ( 0 < < 3 ) + 2 ) , <nl> - MOD_OP = ( ( 0 < < 3 ) + 3 ) , <nl> + MUL_OP = ( ( 0 < < 3 ) + 2 ) , <nl> + MUH_OP = ( ( 0 < < 3 ) + 3 ) , <nl> + DIV_OP = ( ( 0 < < 3 ) + 2 ) , <nl> + MOD_OP = ( ( 0 < < 3 ) + 3 ) , <nl> <nl> - DIV_MOD = ( ( 3 < < 3 ) + 2 ) , <nl> - DIV_MOD_U = ( ( 3 < < 3 ) + 3 ) , <nl> - D_DIV_MOD = ( ( 3 < < 3 ) + 6 ) , <nl> + DIV_MOD = ( ( 3 < < 3 ) + 2 ) , <nl> + DIV_MOD_U = ( ( 3 < < 3 ) + 3 ) , <nl> + D_DIV_MOD = ( ( 3 < < 3 ) + 6 ) , <nl> D_DIV_MOD_U = ( ( 3 < < 3 ) + 7 ) , <nl> <nl> / / drotr in special4 ? <nl> <nl> / / SPECIAL2 Encoding of Function Field . <nl> - MUL = ( ( 0 < < 3 ) + 2 ) , <nl> - CLZ = ( ( 4 < < 3 ) + 0 ) , <nl> - CLO = ( ( 4 < < 3 ) + 1 ) , <nl> + MUL = ( ( 0 < < 3 ) + 2 ) , <nl> + CLZ = ( ( 4 < < 3 ) + 0 ) , <nl> + CLO = ( ( 4 < < 3 ) + 1 ) , <nl> <nl> / / SPECIAL3 Encoding of Function Field . <nl> - EXT = ( ( 0 < < 3 ) + 0 ) , <nl> - DEXTM = ( ( 0 < < 3 ) + 1 ) , <nl> - DEXTU = ( ( 0 < < 3 ) + 2 ) , <nl> - DEXT = ( ( 0 < < 3 ) + 3 ) , <nl> - INS = ( ( 0 < < 3 ) + 4 ) , <nl> - DINSM = ( ( 0 < < 3 ) + 5 ) , <nl> - DINSU = ( ( 0 < < 3 ) + 6 ) , <nl> - DINS = ( ( 0 < < 3 ) + 7 ) , <nl> + EXT = ( ( 0 < < 3 ) + 0 ) , <nl> + DEXTM = ( ( 0 < < 3 ) + 1 ) , <nl> + DEXTU = ( ( 0 < < 3 ) + 2 ) , <nl> + DEXT = ( ( 0 < < 3 ) + 3 ) , <nl> + INS = ( ( 0 < < 3 ) + 4 ) , <nl> + DINSM = ( ( 0 < < 3 ) + 5 ) , <nl> + DINSU = ( ( 0 < < 3 ) + 6 ) , <nl> + DINS = ( ( 0 < < 3 ) + 7 ) , <nl> <nl> - DSBH = ( ( 4 < < 3 ) + 4 ) , <nl> + DSBH = ( ( 4 < < 3 ) + 4 ) , <nl> <nl> / / REGIMM encoding of rt Field . <nl> - BLTZ = ( ( 0 < < 3 ) + 0 ) < < 16 , <nl> - BGEZ = ( ( 0 < < 3 ) + 1 ) < < 16 , <nl> - BLTZAL = ( ( 2 < < 3 ) + 0 ) < < 16 , <nl> - BGEZAL = ( ( 2 < < 3 ) + 1 ) < < 16 , <nl> - BGEZALL = ( ( 2 < < 3 ) + 3 ) < < 16 , <nl> - DAHI = ( ( 0 < < 3 ) + 6 ) < < 16 , <nl> - DATI = ( ( 3 < < 3 ) + 6 ) < < 16 , <nl> + BLTZ = ( ( 0 < < 3 ) + 0 ) < < 16 , <nl> + BGEZ = ( ( 0 < < 3 ) + 1 ) < < 16 , <nl> + BLTZAL = ( ( 2 < < 3 ) + 0 ) < < 16 , <nl> + BGEZAL = ( ( 2 < < 3 ) + 1 ) < < 16 , <nl> + BGEZALL = ( ( 2 < < 3 ) + 3 ) < < 16 , <nl> + DAHI = ( ( 0 < < 3 ) + 6 ) < < 16 , <nl> + DATI = ( ( 3 < < 3 ) + 6 ) < < 16 , <nl> <nl> / / COP1 Encoding of rs Field . <nl> - MFC1 = ( ( 0 < < 3 ) + 0 ) < < 21 , <nl> - DMFC1 = ( ( 0 < < 3 ) + 1 ) < < 21 , <nl> - CFC1 = ( ( 0 < < 3 ) + 2 ) < < 21 , <nl> - MFHC1 = ( ( 0 < < 3 ) + 3 ) < < 21 , <nl> - MTC1 = ( ( 0 < < 3 ) + 4 ) < < 21 , <nl> - DMTC1 = ( ( 0 < < 3 ) + 5 ) < < 21 , <nl> - CTC1 = ( ( 0 < < 3 ) + 6 ) < < 21 , <nl> - MTHC1 = ( ( 0 < < 3 ) + 7 ) < < 21 , <nl> - BC1 = ( ( 1 < < 3 ) + 0 ) < < 21 , <nl> - S = ( ( 2 < < 3 ) + 0 ) < < 21 , <nl> - D = ( ( 2 < < 3 ) + 1 ) < < 21 , <nl> - W = ( ( 2 < < 3 ) + 4 ) < < 21 , <nl> - L = ( ( 2 < < 3 ) + 5 ) < < 21 , <nl> - PS = ( ( 2 < < 3 ) + 6 ) < < 21 , <nl> + MFC1 = ( ( 0 < < 3 ) + 0 ) < < 21 , <nl> + DMFC1 = ( ( 0 < < 3 ) + 1 ) < < 21 , <nl> + CFC1 = ( ( 0 < < 3 ) + 2 ) < < 21 , <nl> + MFHC1 = ( ( 0 < < 3 ) + 3 ) < < 21 , <nl> + MTC1 = ( ( 0 < < 3 ) + 4 ) < < 21 , <nl> + DMTC1 = ( ( 0 < < 3 ) + 5 ) < < 21 , <nl> + CTC1 = ( ( 0 < < 3 ) + 6 ) < < 21 , <nl> + MTHC1 = ( ( 0 < < 3 ) + 7 ) < < 21 , <nl> + BC1 = ( ( 1 < < 3 ) + 0 ) < < 21 , <nl> + S = ( ( 2 < < 3 ) + 0 ) < < 21 , <nl> + D = ( ( 2 < < 3 ) + 1 ) < < 21 , <nl> + W = ( ( 2 < < 3 ) + 4 ) < < 21 , <nl> + L = ( ( 2 < < 3 ) + 5 ) < < 21 , <nl> + PS = ( ( 2 < < 3 ) + 6 ) < < 21 , <nl> / / COP1 Encoding of Function Field When rs = S . <nl> - ROUND_L_S = ( ( 1 < < 3 ) + 0 ) , <nl> - TRUNC_L_S = ( ( 1 < < 3 ) + 1 ) , <nl> - CEIL_L_S = ( ( 1 < < 3 ) + 2 ) , <nl> - FLOOR_L_S = ( ( 1 < < 3 ) + 3 ) , <nl> - ROUND_W_S = ( ( 1 < < 3 ) + 4 ) , <nl> - TRUNC_W_S = ( ( 1 < < 3 ) + 5 ) , <nl> - CEIL_W_S = ( ( 1 < < 3 ) + 6 ) , <nl> - FLOOR_W_S = ( ( 1 < < 3 ) + 7 ) , <nl> - CVT_D_S = ( ( 4 < < 3 ) + 1 ) , <nl> - CVT_W_S = ( ( 4 < < 3 ) + 4 ) , <nl> - CVT_L_S = ( ( 4 < < 3 ) + 5 ) , <nl> - CVT_PS_S = ( ( 4 < < 3 ) + 6 ) , <nl> + ROUND_L_S = ( ( 1 < < 3 ) + 0 ) , <nl> + TRUNC_L_S = ( ( 1 < < 3 ) + 1 ) , <nl> + CEIL_L_S = ( ( 1 < < 3 ) + 2 ) , <nl> + FLOOR_L_S = ( ( 1 < < 3 ) + 3 ) , <nl> + ROUND_W_S = ( ( 1 < < 3 ) + 4 ) , <nl> + TRUNC_W_S = ( ( 1 < < 3 ) + 5 ) , <nl> + CEIL_W_S = ( ( 1 < < 3 ) + 6 ) , <nl> + FLOOR_W_S = ( ( 1 < < 3 ) + 7 ) , <nl> + CVT_D_S = ( ( 4 < < 3 ) + 1 ) , <nl> + CVT_W_S = ( ( 4 < < 3 ) + 4 ) , <nl> + CVT_L_S = ( ( 4 < < 3 ) + 5 ) , <nl> + CVT_PS_S = ( ( 4 < < 3 ) + 6 ) , <nl> / / COP1 Encoding of Function Field When rs = D . <nl> - ADD_D = ( ( 0 < < 3 ) + 0 ) , <nl> - SUB_D = ( ( 0 < < 3 ) + 1 ) , <nl> - MUL_D = ( ( 0 < < 3 ) + 2 ) , <nl> - DIV_D = ( ( 0 < < 3 ) + 3 ) , <nl> - SQRT_D = ( ( 0 < < 3 ) + 4 ) , <nl> - ABS_D = ( ( 0 < < 3 ) + 5 ) , <nl> - MOV_D = ( ( 0 < < 3 ) + 6 ) , <nl> - NEG_D = ( ( 0 < < 3 ) + 7 ) , <nl> - ROUND_L_D = ( ( 1 < < 3 ) + 0 ) , <nl> - TRUNC_L_D = ( ( 1 < < 3 ) + 1 ) , <nl> - CEIL_L_D = ( ( 1 < < 3 ) + 2 ) , <nl> - FLOOR_L_D = ( ( 1 < < 3 ) + 3 ) , <nl> - ROUND_W_D = ( ( 1 < < 3 ) + 4 ) , <nl> - TRUNC_W_D = ( ( 1 < < 3 ) + 5 ) , <nl> - CEIL_W_D = ( ( 1 < < 3 ) + 6 ) , <nl> - FLOOR_W_D = ( ( 1 < < 3 ) + 7 ) , <nl> - MIN = ( ( 3 < < 3 ) + 4 ) , <nl> - MINA = ( ( 3 < < 3 ) + 5 ) , <nl> - MAX = ( ( 3 < < 3 ) + 6 ) , <nl> - MAXA = ( ( 3 < < 3 ) + 7 ) , <nl> - CVT_S_D = ( ( 4 < < 3 ) + 0 ) , <nl> - CVT_W_D = ( ( 4 < < 3 ) + 4 ) , <nl> - CVT_L_D = ( ( 4 < < 3 ) + 5 ) , <nl> - C_F_D = ( ( 6 < < 3 ) + 0 ) , <nl> - C_UN_D = ( ( 6 < < 3 ) + 1 ) , <nl> - C_EQ_D = ( ( 6 < < 3 ) + 2 ) , <nl> - C_UEQ_D = ( ( 6 < < 3 ) + 3 ) , <nl> - C_OLT_D = ( ( 6 < < 3 ) + 4 ) , <nl> - C_ULT_D = ( ( 6 < < 3 ) + 5 ) , <nl> - C_OLE_D = ( ( 6 < < 3 ) + 6 ) , <nl> - C_ULE_D = ( ( 6 < < 3 ) + 7 ) , <nl> + ADD_D = ( ( 0 < < 3 ) + 0 ) , <nl> + SUB_D = ( ( 0 < < 3 ) + 1 ) , <nl> + MUL_D = ( ( 0 < < 3 ) + 2 ) , <nl> + DIV_D = ( ( 0 < < 3 ) + 3 ) , <nl> + SQRT_D = ( ( 0 < < 3 ) + 4 ) , <nl> + ABS_D = ( ( 0 < < 3 ) + 5 ) , <nl> + MOV_D = ( ( 0 < < 3 ) + 6 ) , <nl> + NEG_D = ( ( 0 < < 3 ) + 7 ) , <nl> + ROUND_L_D = ( ( 1 < < 3 ) + 0 ) , <nl> + TRUNC_L_D = ( ( 1 < < 3 ) + 1 ) , <nl> + CEIL_L_D = ( ( 1 < < 3 ) + 2 ) , <nl> + FLOOR_L_D = ( ( 1 < < 3 ) + 3 ) , <nl> + ROUND_W_D = ( ( 1 < < 3 ) + 4 ) , <nl> + TRUNC_W_D = ( ( 1 < < 3 ) + 5 ) , <nl> + CEIL_W_D = ( ( 1 < < 3 ) + 6 ) , <nl> + FLOOR_W_D = ( ( 1 < < 3 ) + 7 ) , <nl> + MIN = ( ( 3 < < 3 ) + 4 ) , <nl> + MINA = ( ( 3 < < 3 ) + 5 ) , <nl> + MAX = ( ( 3 < < 3 ) + 6 ) , <nl> + MAXA = ( ( 3 < < 3 ) + 7 ) , <nl> + CVT_S_D = ( ( 4 < < 3 ) + 0 ) , <nl> + CVT_W_D = ( ( 4 < < 3 ) + 4 ) , <nl> + CVT_L_D = ( ( 4 < < 3 ) + 5 ) , <nl> + C_F_D = ( ( 6 < < 3 ) + 0 ) , <nl> + C_UN_D = ( ( 6 < < 3 ) + 1 ) , <nl> + C_EQ_D = ( ( 6 < < 3 ) + 2 ) , <nl> + C_UEQ_D = ( ( 6 < < 3 ) + 3 ) , <nl> + C_OLT_D = ( ( 6 < < 3 ) + 4 ) , <nl> + C_ULT_D = ( ( 6 < < 3 ) + 5 ) , <nl> + C_OLE_D = ( ( 6 < < 3 ) + 6 ) , <nl> + C_ULE_D = ( ( 6 < < 3 ) + 7 ) , <nl> / / COP1 Encoding of Function Field When rs = W or L . <nl> - CVT_S_W = ( ( 4 < < 3 ) + 0 ) , <nl> - CVT_D_W = ( ( 4 < < 3 ) + 1 ) , <nl> - CVT_S_L = ( ( 4 < < 3 ) + 0 ) , <nl> - CVT_D_L = ( ( 4 < < 3 ) + 1 ) , <nl> - BC1EQZ = ( ( 2 < < 2 ) + 1 ) < < 21 , <nl> - BC1NEZ = ( ( 3 < < 2 ) + 1 ) < < 21 , <nl> + CVT_S_W = ( ( 4 < < 3 ) + 0 ) , <nl> + CVT_D_W = ( ( 4 < < 3 ) + 1 ) , <nl> + CVT_S_L = ( ( 4 < < 3 ) + 0 ) , <nl> + CVT_D_L = ( ( 4 < < 3 ) + 1 ) , <nl> + BC1EQZ = ( ( 2 < < 2 ) + 1 ) < < 21 , <nl> + BC1NEZ = ( ( 3 < < 2 ) + 1 ) < < 21 , <nl> / / COP1 CMP positive predicates Bit 5 . . 4 = 00 . <nl> - CMP_AF = ( ( 0 < < 3 ) + 0 ) , <nl> - CMP_UN = ( ( 0 < < 3 ) + 1 ) , <nl> - CMP_EQ = ( ( 0 < < 3 ) + 2 ) , <nl> - CMP_UEQ = ( ( 0 < < 3 ) + 3 ) , <nl> - CMP_LT = ( ( 0 < < 3 ) + 4 ) , <nl> - CMP_ULT = ( ( 0 < < 3 ) + 5 ) , <nl> - CMP_LE = ( ( 0 < < 3 ) + 6 ) , <nl> - CMP_ULE = ( ( 0 < < 3 ) + 7 ) , <nl> - CMP_SAF = ( ( 1 < < 3 ) + 0 ) , <nl> - CMP_SUN = ( ( 1 < < 3 ) + 1 ) , <nl> - CMP_SEQ = ( ( 1 < < 3 ) + 2 ) , <nl> - CMP_SUEQ = ( ( 1 < < 3 ) + 3 ) , <nl> - CMP_SSLT = ( ( 1 < < 3 ) + 4 ) , <nl> - CMP_SSULT = ( ( 1 < < 3 ) + 5 ) , <nl> - CMP_SLE = ( ( 1 < < 3 ) + 6 ) , <nl> - CMP_SULE = ( ( 1 < < 3 ) + 7 ) , <nl> + CMP_AF = ( ( 0 < < 3 ) + 0 ) , <nl> + CMP_UN = ( ( 0 < < 3 ) + 1 ) , <nl> + CMP_EQ = ( ( 0 < < 3 ) + 2 ) , <nl> + CMP_UEQ = ( ( 0 < < 3 ) + 3 ) , <nl> + CMP_LT = ( ( 0 < < 3 ) + 4 ) , <nl> + CMP_ULT = ( ( 0 < < 3 ) + 5 ) , <nl> + CMP_LE = ( ( 0 < < 3 ) + 6 ) , <nl> + CMP_ULE = ( ( 0 < < 3 ) + 7 ) , <nl> + CMP_SAF = ( ( 1 < < 3 ) + 0 ) , <nl> + CMP_SUN = ( ( 1 < < 3 ) + 1 ) , <nl> + CMP_SEQ = ( ( 1 < < 3 ) + 2 ) , <nl> + CMP_SUEQ = ( ( 1 < < 3 ) + 3 ) , <nl> + CMP_SSLT = ( ( 1 < < 3 ) + 4 ) , <nl> + CMP_SSULT = ( ( 1 < < 3 ) + 5 ) , <nl> + CMP_SLE = ( ( 1 < < 3 ) + 6 ) , <nl> + CMP_SULE = ( ( 1 < < 3 ) + 7 ) , <nl> / / COP1 CMP negative predicates Bit 5 . . 4 = 01 . <nl> - CMP_AT = ( ( 2 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> - CMP_OR = ( ( 2 < < 3 ) + 1 ) , <nl> - CMP_UNE = ( ( 2 < < 3 ) + 2 ) , <nl> - CMP_NE = ( ( 2 < < 3 ) + 3 ) , <nl> - CMP_UGE = ( ( 2 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> - CMP_OGE = ( ( 2 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> - CMP_UGT = ( ( 2 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> - CMP_OGT = ( ( 2 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> - CMP_SAT = ( ( 3 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> - CMP_SOR = ( ( 3 < < 3 ) + 1 ) , <nl> - CMP_SUNE = ( ( 3 < < 3 ) + 2 ) , <nl> - CMP_SNE = ( ( 3 < < 3 ) + 3 ) , <nl> - CMP_SUGE = ( ( 3 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> - CMP_SOGE = ( ( 3 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> - CMP_SUGT = ( ( 3 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> - CMP_SOGT = ( ( 3 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> - <nl> - SEL = ( ( 2 < < 3 ) + 0 ) , <nl> - SELEQZ_C = ( ( 2 < < 3 ) + 4 ) , / / COP1 on FPR registers . <nl> - SELNEZ_C = ( ( 2 < < 3 ) + 7 ) , / / COP1 on FPR registers . <nl> + CMP_AT = ( ( 2 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> + CMP_OR = ( ( 2 < < 3 ) + 1 ) , <nl> + CMP_UNE = ( ( 2 < < 3 ) + 2 ) , <nl> + CMP_NE = ( ( 2 < < 3 ) + 3 ) , <nl> + CMP_UGE = ( ( 2 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> + CMP_OGE = ( ( 2 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> + CMP_UGT = ( ( 2 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> + CMP_OGT = ( ( 2 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> + CMP_SAT = ( ( 3 < < 3 ) + 0 ) , / / Reserved , not implemented . <nl> + CMP_SOR = ( ( 3 < < 3 ) + 1 ) , <nl> + CMP_SUNE = ( ( 3 < < 3 ) + 2 ) , <nl> + CMP_SNE = ( ( 3 < < 3 ) + 3 ) , <nl> + CMP_SUGE = ( ( 3 < < 3 ) + 4 ) , / / Reserved , not implemented . <nl> + CMP_SOGE = ( ( 3 < < 3 ) + 5 ) , / / Reserved , not implemented . <nl> + CMP_SUGT = ( ( 3 < < 3 ) + 6 ) , / / Reserved , not implemented . <nl> + CMP_SOGT = ( ( 3 < < 3 ) + 7 ) , / / Reserved , not implemented . <nl> + <nl> + SEL = ( ( 2 < < 3 ) + 0 ) , <nl> + SELEQZ_C = ( ( 2 < < 3 ) + 4 ) , / / COP1 on FPR registers . <nl> + SELNEZ_C = ( ( 2 < < 3 ) + 7 ) , / / COP1 on FPR registers . <nl> <nl> / / COP1 Encoding of Function Field When rs = PS . <nl> / / COP1X Encoding of Function Field . <nl> - MADD_D = ( ( 4 < < 3 ) + 1 ) , <nl> + MADD_D = ( ( 4 < < 3 ) + 1 ) , <nl> <nl> - NULLSF = 0 <nl> + NULLSF = 0 <nl> } ; <nl> <nl> <nl> mmm a / src / mips64 / disasm - mips64 . cc <nl> ppp b / src / mips64 / disasm - mips64 . cc <nl> int Decoder : : DecodeBreakInstr ( Instruction * instr ) { <nl> <nl> bool Decoder : : DecodeTypeRegisterRsType ( Instruction * instr ) { <nl> switch ( instr - > FunctionFieldRaw ( ) ) { <nl> + case RINT : <nl> + Format ( instr , " rint . ' t ' fd , ' fs " ) ; <nl> + break ; <nl> case SELEQZ_C : <nl> Format ( instr , " seleqz . ' t ' fd , ' fs , ' ft " ) ; <nl> break ; <nl> mmm a / src / mips64 / simulator - mips64 . cc <nl> ppp b / src / mips64 / simulator - mips64 . cc <nl> bool Simulator : : set_fcsr_round64_error ( double original , double rounded ) { <nl> } <nl> <nl> <nl> + / / for cvt instructions only <nl> + void Simulator : : round_according_to_fcsr ( double toRound , double & rounded , <nl> + int32_t & rounded_int , double fs ) { <nl> + / / 0 RN ( round to nearest ) : Round a result to the nearest <nl> + / / representable value ; if the result is exactly halfway between <nl> + / / two representable values , round to zero . Behave like round_w_d . <nl> + <nl> + / / 1 RZ ( round toward zero ) : Round a result to the closest <nl> + / / representable value whose absolute value is less than or <nl> + / / equal to the infinitely accurate result . Behave like trunc_w_d . <nl> + <nl> + / / 2 RP ( round up , or toward + infinity ) : Round a result to the <nl> + / / next representable value up . Behave like ceil_w_d . <nl> + <nl> + / / 3 RN ( round down , or toward − infinity ) : Round a result to <nl> + / / the next representable value down . Behave like floor_w_d . <nl> + switch ( FCSR_ & 3 ) { <nl> + case kRoundToNearest : <nl> + rounded = std : : floor ( fs + 0 . 5 ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + if ( ( rounded_int & 1 ) ! = 0 & & rounded_int - fs = = 0 . 5 ) { <nl> + / / If the number is halfway between two integers , <nl> + / / round to the even one . <nl> + rounded_int - - ; <nl> + } <nl> + break ; <nl> + case kRoundToZero : <nl> + rounded = trunc ( fs ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + break ; <nl> + case kRoundToPlusInf : <nl> + rounded = std : : ceil ( fs ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + break ; <nl> + case kRoundToMinusInf : <nl> + rounded = std : : floor ( fs ) ; <nl> + rounded_int = static_cast < int32_t > ( rounded ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void Simulator : : round64_according_to_fcsr ( double toRound , double & rounded , <nl> + int64_t & rounded_int , double fs ) { <nl> + / / 0 RN ( round to nearest ) : Round a result to the nearest <nl> + / / representable value ; if the result is exactly halfway between <nl> + / / two representable values , round to zero . Behave like round_w_d . <nl> + <nl> + / / 1 RZ ( round toward zero ) : Round a result to the closest <nl> + / / representable value whose absolute value is less than or . <nl> + / / equal to the infinitely accurate result . Behave like trunc_w_d . <nl> + <nl> + / / 2 RP ( round up , or toward + infinity ) : Round a result to the <nl> + / / next representable value up . Behave like ceil_w_d . <nl> + <nl> + / / 3 RN ( round down , or toward − infinity ) : Round a result to <nl> + / / the next representable value down . Behave like floor_w_d . <nl> + switch ( FCSR_ & 3 ) { <nl> + case kRoundToNearest : <nl> + rounded = std : : floor ( fs + 0 . 5 ) ; <nl> + rounded_int = static_cast < int64_t > ( rounded ) ; <nl> + if ( ( rounded_int & 1 ) ! = 0 & & rounded_int - fs = = 0 . 5 ) { <nl> + / / If the number is halfway between two integers , <nl> + / / round to the even one . <nl> + rounded_int - - ; <nl> + } <nl> + break ; <nl> + case kRoundToZero : <nl> + rounded = trunc ( fs ) ; <nl> + rounded_int = static_cast < int64_t > ( rounded ) ; <nl> + break ; <nl> + case kRoundToPlusInf : <nl> + rounded = std : : ceil ( fs ) ; <nl> + rounded_int = static_cast < int64_t > ( rounded ) ; <nl> + break ; <nl> + case kRoundToMinusInf : <nl> + rounded = std : : floor ( fs ) ; <nl> + rounded_int = static_cast < int64_t > ( rounded ) ; <nl> + break ; <nl> + } <nl> + } <nl> + <nl> + <nl> / / Raw access to the PC register . <nl> void Simulator : : set_pc ( int64_t value ) { <nl> pc_modified_ = true ; <nl> void Simulator : : DecodeTypeRegisterDRsType ( Instruction * instr , <nl> int64_t ft_int = bit_cast < int64_t > ( ft ) ; <nl> int64_t fd_int = bit_cast < int64_t > ( fd ) ; <nl> switch ( instr - > FunctionFieldRaw ( ) ) { <nl> + case RINT : { <nl> + DCHECK ( kArchVariant = = kMips64r6 ) ; <nl> + double result , temp , temp_result ; <nl> + double upper = std : : ceil ( fs ) ; <nl> + double lower = std : : floor ( fs ) ; <nl> + switch ( FCSR_ & 0x3 ) { <nl> + case kRoundToNearest : <nl> + if ( upper - fs < fs - lower ) { <nl> + result = upper ; <nl> + } else if ( upper - fs > fs - lower ) { <nl> + result = lower ; <nl> + } else { <nl> + temp_result = upper / 2 ; <nl> + double reminder = modf ( temp_result , & temp ) ; <nl> + if ( reminder = = 0 ) { <nl> + result = upper ; <nl> + } else { <nl> + result = lower ; <nl> + } <nl> + } <nl> + break ; <nl> + case kRoundToZero : <nl> + result = ( fs > 0 ? lower : upper ) ; <nl> + break ; <nl> + case kRoundToPlusInf : <nl> + result = upper ; <nl> + break ; <nl> + case kRoundToMinusInf : <nl> + result = lower ; <nl> + break ; <nl> + } <nl> + set_fpu_register_double ( fd_reg , result ) ; <nl> + if ( result ! = fs ) { <nl> + set_fcsr_bit ( kFCSRInexactFlagBit , true ) ; <nl> + } <nl> + break ; <nl> + } <nl> case SEL : <nl> DCHECK ( kArchVariant = = kMips64r6 ) ; <nl> set_fpu_register_double ( fd_reg , ( fd_int & 0x1 ) = = 0 ? fs : ft ) ; <nl> void Simulator : : DecodeTypeRegisterDRsType ( Instruction * instr , <nl> case C_ULE_D : <nl> set_fcsr_bit ( fcsr_cc , ( fs < = ft ) | | ( std : : isnan ( fs ) | | std : : isnan ( ft ) ) ) ; <nl> break ; <nl> - case CVT_W_D : / / Convert double to word . <nl> - / / Rounding modes are not yet supported . <nl> - DCHECK ( ( FCSR_ & 3 ) = = 0 ) ; <nl> - / / In rounding mode 0 it should behave like ROUND . <nl> - / / No break . <nl> + case CVT_W_D : { / / Convert double to word . <nl> + double rounded ; <nl> + int32_t result ; <nl> + round_according_to_fcsr ( fs , rounded , result , fs ) ; <nl> + set_fpu_register_word ( fd_reg , result ) ; <nl> + if ( set_fcsr_round_error ( fs , rounded ) ) { <nl> + set_fpu_register_word ( fd_reg , kFPUInvalidResult ) ; <nl> + } <nl> + break ; <nl> + } <nl> case ROUND_W_D : / / Round double to word ( round half to even ) . <nl> { <nl> double rounded = std : : floor ( fs + 0 . 5 ) ; <nl> void Simulator : : DecodeTypeRegisterDRsType ( Instruction * instr , <nl> case CVT_S_D : / / Convert double to float ( single ) . <nl> set_fpu_register_float ( fd_reg , static_cast < float > ( fs ) ) ; <nl> break ; <nl> - case CVT_L_D : / / Mips64r2 : Truncate double to 64 - bit long - word . <nl> - / / Rounding modes are not yet supported . <nl> - DCHECK ( ( FCSR_ & 3 ) = = 0 ) ; <nl> - / / In rounding mode 0 it should behave like ROUND . <nl> - / / No break . <nl> + case CVT_L_D : { / / Mips64r2 : Truncate double to 64 - bit long - word . <nl> + double rounded ; <nl> + int64_t result ; <nl> + round64_according_to_fcsr ( fs , rounded , result , fs ) ; <nl> + set_fpu_register ( fd_reg , result ) ; <nl> + if ( set_fcsr_round64_error ( fs , rounded ) ) { <nl> + set_fpu_register ( fd_reg , kFPUInvalidResult ) ; <nl> + } <nl> + break ; <nl> + } <nl> case ROUND_L_D : { / / Mips64r2 instruction . <nl> / / check error cases <nl> double rounded = fs > 0 ? floor ( fs + 0 . 5 ) : ceil ( fs - 0 . 5 ) ; <nl> uintptr_t Simulator : : PopAddress ( ) { <nl> <nl> <nl> # undef UNSUPPORTED <nl> - <nl> } } / / namespace v8 : : internal <nl> <nl> # endif / / USE_SIMULATOR <nl> mmm a / src / mips64 / simulator - mips64 . h <nl> ppp b / src / mips64 / simulator - mips64 . h <nl> class Simulator { <nl> bool test_fcsr_bit ( uint32_t cc ) ; <nl> bool set_fcsr_round_error ( double original , double rounded ) ; <nl> bool set_fcsr_round64_error ( double original , double rounded ) ; <nl> + void round_according_to_fcsr ( double toRound , double & rounded , <nl> + int32_t & rounded_int , double fs ) ; <nl> + void round64_according_to_fcsr ( double toRound , double & rounded , <nl> + int64_t & rounded_int , double fs ) ; <nl> <nl> / / Special case of set_register and get_register to access the raw PC value . <nl> void set_pc ( int64_t value ) ; <nl> mmm a / test / cctest / test - assembler - mips . cc <nl> ppp b / test / cctest / test - assembler - mips . cc <nl> TEST ( MIPS7 ) { <nl> <nl> TEST ( MIPS8 ) { <nl> / / Test ROTR and ROTRV instructions . <nl> - CcTest : : InitializeVM ( ) ; <nl> - Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> + if ( IsMipsArchVariant ( kMips32r2 ) ) { <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> <nl> - typedef struct { <nl> - int32_t input ; <nl> - int32_t result_rotr_4 ; <nl> - int32_t result_rotr_8 ; <nl> - int32_t result_rotr_12 ; <nl> - int32_t result_rotr_16 ; <nl> - int32_t result_rotr_20 ; <nl> - int32_t result_rotr_24 ; <nl> - int32_t result_rotr_28 ; <nl> - int32_t result_rotrv_4 ; <nl> - int32_t result_rotrv_8 ; <nl> - int32_t result_rotrv_12 ; <nl> - int32_t result_rotrv_16 ; <nl> - int32_t result_rotrv_20 ; <nl> - int32_t result_rotrv_24 ; <nl> - int32_t result_rotrv_28 ; <nl> - } T ; <nl> - T t ; <nl> + typedef struct { <nl> + int32_t input ; <nl> + int32_t result_rotr_4 ; <nl> + int32_t result_rotr_8 ; <nl> + int32_t result_rotr_12 ; <nl> + int32_t result_rotr_16 ; <nl> + int32_t result_rotr_20 ; <nl> + int32_t result_rotr_24 ; <nl> + int32_t result_rotr_28 ; <nl> + int32_t result_rotrv_4 ; <nl> + int32_t result_rotrv_8 ; <nl> + int32_t result_rotrv_12 ; <nl> + int32_t result_rotrv_16 ; <nl> + int32_t result_rotrv_20 ; <nl> + int32_t result_rotrv_24 ; <nl> + int32_t result_rotrv_28 ; <nl> + } T ; <nl> + T t ; <nl> <nl> - MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> <nl> - / / Basic word load . <nl> - __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( T , input ) ) ) ; <nl> - <nl> - / / ROTR instruction ( called through the Ror macro ) . <nl> - __ Ror ( t1 , t0 , 0x0004 ) ; <nl> - __ Ror ( t2 , t0 , 0x0008 ) ; <nl> - __ Ror ( t3 , t0 , 0x000c ) ; <nl> - __ Ror ( t4 , t0 , 0x0010 ) ; <nl> - __ Ror ( t5 , t0 , 0x0014 ) ; <nl> - __ Ror ( t6 , t0 , 0x0018 ) ; <nl> - __ Ror ( t7 , t0 , 0x001c ) ; <nl> - <nl> - / / Basic word store . <nl> - __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_4 ) ) ) ; <nl> - __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_8 ) ) ) ; <nl> - __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_12 ) ) ) ; <nl> - __ sw ( t4 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_16 ) ) ) ; <nl> - __ sw ( t5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_20 ) ) ) ; <nl> - __ sw ( t6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_24 ) ) ) ; <nl> - __ sw ( t7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_28 ) ) ) ; <nl> - <nl> - / / ROTRV instruction ( called through the Ror macro ) . <nl> - __ li ( t7 , 0x0004 ) ; <nl> - __ Ror ( t1 , t0 , t7 ) ; <nl> - __ li ( t7 , 0x0008 ) ; <nl> - __ Ror ( t2 , t0 , t7 ) ; <nl> - __ li ( t7 , 0x000C ) ; <nl> - __ Ror ( t3 , t0 , t7 ) ; <nl> - __ li ( t7 , 0x0010 ) ; <nl> - __ Ror ( t4 , t0 , t7 ) ; <nl> - __ li ( t7 , 0x0014 ) ; <nl> - __ Ror ( t5 , t0 , t7 ) ; <nl> - __ li ( t7 , 0x0018 ) ; <nl> - __ Ror ( t6 , t0 , t7 ) ; <nl> - __ li ( t7 , 0x001C ) ; <nl> - __ Ror ( t7 , t0 , t7 ) ; <nl> - <nl> - / / Basic word store . <nl> - __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_4 ) ) ) ; <nl> - __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_8 ) ) ) ; <nl> - __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_12 ) ) ) ; <nl> - __ sw ( t4 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_16 ) ) ) ; <nl> - __ sw ( t5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_20 ) ) ) ; <nl> - __ sw ( t6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_24 ) ) ) ; <nl> - __ sw ( t7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_28 ) ) ) ; <nl> + / / Basic word load . <nl> + __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( T , input ) ) ) ; <nl> + <nl> + / / ROTR instruction ( called through the Ror macro ) . <nl> + __ Ror ( t1 , t0 , 0x0004 ) ; <nl> + __ Ror ( t2 , t0 , 0x0008 ) ; <nl> + __ Ror ( t3 , t0 , 0x000c ) ; <nl> + __ Ror ( t4 , t0 , 0x0010 ) ; <nl> + __ Ror ( t5 , t0 , 0x0014 ) ; <nl> + __ Ror ( t6 , t0 , 0x0018 ) ; <nl> + __ Ror ( t7 , t0 , 0x001c ) ; <nl> + <nl> + / / Basic word store . <nl> + __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_4 ) ) ) ; <nl> + __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_8 ) ) ) ; <nl> + __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_12 ) ) ) ; <nl> + __ sw ( t4 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_16 ) ) ) ; <nl> + __ sw ( t5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_20 ) ) ) ; <nl> + __ sw ( t6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_24 ) ) ) ; <nl> + __ sw ( t7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_28 ) ) ) ; <nl> + <nl> + / / ROTRV instruction ( called through the Ror macro ) . <nl> + __ li ( t7 , 0x0004 ) ; <nl> + __ Ror ( t1 , t0 , t7 ) ; <nl> + __ li ( t7 , 0x0008 ) ; <nl> + __ Ror ( t2 , t0 , t7 ) ; <nl> + __ li ( t7 , 0x000C ) ; <nl> + __ Ror ( t3 , t0 , t7 ) ; <nl> + __ li ( t7 , 0x0010 ) ; <nl> + __ Ror ( t4 , t0 , t7 ) ; <nl> + __ li ( t7 , 0x0014 ) ; <nl> + __ Ror ( t5 , t0 , t7 ) ; <nl> + __ li ( t7 , 0x0018 ) ; <nl> + __ Ror ( t6 , t0 , t7 ) ; <nl> + __ li ( t7 , 0x001C ) ; <nl> + __ Ror ( t7 , t0 , t7 ) ; <nl> + <nl> + / / Basic word store . <nl> + __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_4 ) ) ) ; <nl> + __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_8 ) ) ) ; <nl> + __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_12 ) ) ) ; <nl> + __ sw ( t4 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_16 ) ) ) ; <nl> + __ sw ( t5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_20 ) ) ) ; <nl> + __ sw ( t6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_24 ) ) ) ; <nl> + __ sw ( t7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_28 ) ) ) ; <nl> <nl> - __ jr ( ra ) ; <nl> - __ nop ( ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> <nl> - CodeDesc desc ; <nl> - assm . GetCode ( & desc ) ; <nl> - Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> - desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> - F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> - t . input = 0x12345678 ; <nl> - Object * dummy = CALL_GENERATED_CODE ( f , & t , 0x0 , 0 , 0 , 0 ) ; <nl> - USE ( dummy ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotr_4 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotr_8 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotr_12 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotr_16 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotr_20 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotr_24 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotr_28 ) ; <nl> - <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotrv_4 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotrv_8 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotrv_12 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotrv_16 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotrv_20 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotrv_24 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotrv_28 ) ; <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + t . input = 0x12345678 ; <nl> + Object * dummy = CALL_GENERATED_CODE ( f , & t , 0x0 , 0 , 0 , 0 ) ; <nl> + USE ( dummy ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotr_4 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotr_8 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotr_12 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotr_16 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotr_20 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotr_24 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotr_28 ) ; <nl> + <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotrv_4 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotrv_8 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotrv_12 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotrv_16 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotrv_20 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotrv_24 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotrv_28 ) ; <nl> + } <nl> } <nl> <nl> <nl> TEST ( MIPS16 ) { <nl> 18446744073709551621 . 0 , - 18446744073709551621 . 0 } ; <nl> double tests [ test_size * 2 ] = { 2 . 8 , 2 . 9 , - 2 . 8 , - 2 . 9 , <nl> 18446744073709551616 . 0 , 18446744073709555712 . 0 } ; <nl> - for ( int j = 0 ; j < test_size ; j + = 2 ) { <nl> - for ( int i = 0 ; i < input_size ; i + + ) { <nl> + for ( int j = 0 ; j < test_size ; j + = 2 ) { <nl> + for ( int i = 0 ; i < input_size ; i + + ) { <nl> test . e = inputs [ i ] ; <nl> test . f = tests [ j ] ; <nl> ( CALL_GENERATED_CODE ( f , & test , 0 , 0 , 0 , 0 ) ) ; <nl> TEST ( MIPS17 ) { <nl> <nl> __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( TestFloat , a ) ) ) ; <nl> __ ldc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( TestFloat , b ) ) ) ; <nl> - __ min ( D , f10 , f8 , f4 ) ; <nl> - __ max ( D , f12 , f8 , f4 ) ; <nl> + __ min ( D , f10 , f4 , f8 ) ; <nl> + __ max ( D , f12 , f4 , f8 ) ; <nl> __ sdc1 ( f10 , MemOperand ( a0 , OFFSET_OF ( TestFloat , c ) ) ) ; <nl> __ sdc1 ( f12 , MemOperand ( a0 , OFFSET_OF ( TestFloat , d ) ) ) ; <nl> __ jr ( ra ) ; <nl> TEST ( MIPS17 ) { <nl> } <nl> <nl> <nl> + TEST ( MIPS18 ) { <nl> + if ( IsMipsArchVariant ( kMips32r6 ) ) { <nl> + const int tableLength = 30 ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + <nl> + typedef struct test_float { <nl> + double a ; <nl> + double b ; <nl> + int fcsr ; <nl> + } TestFloat ; <nl> + <nl> + TestFloat test ; <nl> + double inputs [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E + 308 , 6 . 27463370218383111104242366943E - 307 , <nl> + 309485009821345068724781056 . 89 , <nl> + 2 . 1 , 2 . 6 , 2 . 5 , 3 . 1 , 3 . 6 , 3 . 5 , <nl> + - 2 . 1 , - 2 . 6 , - 2 . 5 , - 3 . 1 , - 3 . 6 , - 3 . 5 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RN [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 3 . 0 , 2 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 3 . 0 , - 2 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RZ [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RP [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 1 , <nl> + 309485009821345068724781057 . 0 , <nl> + 3 . 0 , 3 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RM [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 3 . 0 , - 3 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + int fcsr_inputs [ 4 ] = <nl> + { kRoundToNearest , kRoundToZero , kRoundToPlusInf , kRoundToMinusInf } ; <nl> + double * outputs [ 4 ] = { outputs_RN , outputs_RZ , outputs_RP , outputs_RM } ; <nl> + __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( TestFloat , a ) ) ) ; <nl> + __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( TestFloat , fcsr ) ) ) ; <nl> + __ cfc1 ( t1 , FCSR ) ; <nl> + __ ctc1 ( t0 , FCSR ) ; <nl> + __ rint_d ( f8 , f4 ) ; <nl> + __ sdc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( TestFloat , b ) ) ) ; <nl> + __ ctc1 ( t1 , FCSR ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> + <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + <nl> + for ( int j = 0 ; j < 4 ; j + + ) { <nl> + test . fcsr = fcsr_inputs [ j ] ; <nl> + for ( int i = 0 ; i < tableLength ; i + + ) { <nl> + test . a = inputs [ i ] ; <nl> + std : : cout < < j < < " " < < i < < " \ n " ; <nl> + ( CALL_GENERATED_CODE ( f , & test , 0 , 0 , 0 , 0 ) ) ; <nl> + CHECK_EQ ( test . b , outputs [ j ] [ i ] ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( MIPS19 ) { <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + <nl> + typedef struct test_float { <nl> + double a ; <nl> + int32_t b ; <nl> + int32_t fcsr ; <nl> + } Test ; <nl> + const int tableLength = 24 ; <nl> + double inputs [ tableLength ] = { <nl> + 2 . 1 , 2 . 6 , 2 . 5 , 3 . 1 , 3 . 6 , 3 . 5 , <nl> + - 2 . 1 , - 2 . 6 , - 2 . 5 , - 3 . 1 , - 3 . 6 , - 3 . 5 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , 2147483653 . 0 <nl> + } ; <nl> + double outputs_RN [ tableLength ] = { <nl> + 2 . 0 , 3 . 0 , 2 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 3 . 0 , - 2 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + double outputs_RZ [ tableLength ] = { <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + double outputs_RP [ tableLength ] = { <nl> + 3 . 0 , 3 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + double outputs_RM [ tableLength ] = { <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 3 . 0 , - 3 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + int fcsr_inputs [ 4 ] = <nl> + { kRoundToNearest , kRoundToZero , kRoundToPlusInf , kRoundToMinusInf } ; <nl> + double * outputs [ 4 ] = { outputs_RN , outputs_RZ , outputs_RP , outputs_RM } ; <nl> + __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( Test , a ) ) ) ; <nl> + __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( Test , fcsr ) ) ) ; <nl> + __ cfc1 ( t1 , FCSR ) ; <nl> + __ ctc1 ( t0 , FCSR ) ; <nl> + __ cvt_w_d ( f8 , f4 ) ; <nl> + __ swc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( Test , b ) ) ) ; <nl> + __ ctc1 ( t1 , FCSR ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> + Test test ; <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + for ( int j = 0 ; j < 4 ; j + + ) { <nl> + test . fcsr = fcsr_inputs [ j ] ; <nl> + for ( int i = 0 ; i < tableLength ; i + + ) { <nl> + test . a = inputs [ i ] ; <nl> + std : : cout < < i < < " " < < j < < " \ n " ; <nl> + ( CALL_GENERATED_CODE ( f , & test , 0 , 0 , 0 , 0 ) ) ; <nl> + CHECK_EQ ( test . b , outputs [ j ] [ i ] ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> TEST ( jump_tables1 ) { <nl> / / Test jump tables with forward jumps . <nl> CcTest : : InitializeVM ( ) ; <nl> mmm a / test / cctest / test - assembler - mips64 . cc <nl> ppp b / test / cctest / test - assembler - mips64 . cc <nl> TEST ( MIPS7 ) { <nl> <nl> <nl> TEST ( MIPS8 ) { <nl> - / / Test ROTR and ROTRV instructions . <nl> - CcTest : : InitializeVM ( ) ; <nl> - Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> - HandleScope scope ( isolate ) ; <nl> + if ( kArchVariant = = kMips64r2 ) { <nl> + / / Test ROTR and ROTRV instructions . <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> <nl> - typedef struct { <nl> - int32_t input ; <nl> - int32_t result_rotr_4 ; <nl> - int32_t result_rotr_8 ; <nl> - int32_t result_rotr_12 ; <nl> - int32_t result_rotr_16 ; <nl> - int32_t result_rotr_20 ; <nl> - int32_t result_rotr_24 ; <nl> - int32_t result_rotr_28 ; <nl> - int32_t result_rotrv_4 ; <nl> - int32_t result_rotrv_8 ; <nl> - int32_t result_rotrv_12 ; <nl> - int32_t result_rotrv_16 ; <nl> - int32_t result_rotrv_20 ; <nl> - int32_t result_rotrv_24 ; <nl> - int32_t result_rotrv_28 ; <nl> - } T ; <nl> - T t ; <nl> + typedef struct { <nl> + int32_t input ; <nl> + int32_t result_rotr_4 ; <nl> + int32_t result_rotr_8 ; <nl> + int32_t result_rotr_12 ; <nl> + int32_t result_rotr_16 ; <nl> + int32_t result_rotr_20 ; <nl> + int32_t result_rotr_24 ; <nl> + int32_t result_rotr_28 ; <nl> + int32_t result_rotrv_4 ; <nl> + int32_t result_rotrv_8 ; <nl> + int32_t result_rotrv_12 ; <nl> + int32_t result_rotrv_16 ; <nl> + int32_t result_rotrv_20 ; <nl> + int32_t result_rotrv_24 ; <nl> + int32_t result_rotrv_28 ; <nl> + } T ; <nl> + T t ; <nl> <nl> - MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> <nl> - / / Basic word load . <nl> - __ lw ( a4 , MemOperand ( a0 , OFFSET_OF ( T , input ) ) ) ; <nl> - <nl> - / / ROTR instruction ( called through the Ror macro ) . <nl> - __ Ror ( a5 , a4 , 0x0004 ) ; <nl> - __ Ror ( a6 , a4 , 0x0008 ) ; <nl> - __ Ror ( a7 , a4 , 0x000c ) ; <nl> - __ Ror ( t0 , a4 , 0x0010 ) ; <nl> - __ Ror ( t1 , a4 , 0x0014 ) ; <nl> - __ Ror ( t2 , a4 , 0x0018 ) ; <nl> - __ Ror ( t3 , a4 , 0x001c ) ; <nl> - <nl> - / / Basic word store . <nl> - __ sw ( a5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_4 ) ) ) ; <nl> - __ sw ( a6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_8 ) ) ) ; <nl> - __ sw ( a7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_12 ) ) ) ; <nl> - __ sw ( t0 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_16 ) ) ) ; <nl> - __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_20 ) ) ) ; <nl> - __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_24 ) ) ) ; <nl> - __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_28 ) ) ) ; <nl> - <nl> - / / ROTRV instruction ( called through the Ror macro ) . <nl> - __ li ( t3 , 0x0004 ) ; <nl> - __ Ror ( a5 , a4 , t3 ) ; <nl> - __ li ( t3 , 0x0008 ) ; <nl> - __ Ror ( a6 , a4 , t3 ) ; <nl> - __ li ( t3 , 0x000C ) ; <nl> - __ Ror ( a7 , a4 , t3 ) ; <nl> - __ li ( t3 , 0x0010 ) ; <nl> - __ Ror ( t0 , a4 , t3 ) ; <nl> - __ li ( t3 , 0x0014 ) ; <nl> - __ Ror ( t1 , a4 , t3 ) ; <nl> - __ li ( t3 , 0x0018 ) ; <nl> - __ Ror ( t2 , a4 , t3 ) ; <nl> - __ li ( t3 , 0x001C ) ; <nl> - __ Ror ( t3 , a4 , t3 ) ; <nl> - <nl> - / / Basic word store . <nl> - __ sw ( a5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_4 ) ) ) ; <nl> - __ sw ( a6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_8 ) ) ) ; <nl> - __ sw ( a7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_12 ) ) ) ; <nl> - __ sw ( t0 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_16 ) ) ) ; <nl> - __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_20 ) ) ) ; <nl> - __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_24 ) ) ) ; <nl> - __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_28 ) ) ) ; <nl> + / / Basic word load . <nl> + __ lw ( a4 , MemOperand ( a0 , OFFSET_OF ( T , input ) ) ) ; <nl> + <nl> + / / ROTR instruction ( called through the Ror macro ) . <nl> + __ Ror ( a5 , a4 , 0x0004 ) ; <nl> + __ Ror ( a6 , a4 , 0x0008 ) ; <nl> + __ Ror ( a7 , a4 , 0x000c ) ; <nl> + __ Ror ( t0 , a4 , 0x0010 ) ; <nl> + __ Ror ( t1 , a4 , 0x0014 ) ; <nl> + __ Ror ( t2 , a4 , 0x0018 ) ; <nl> + __ Ror ( t3 , a4 , 0x001c ) ; <nl> + <nl> + / / Basic word store . <nl> + __ sw ( a5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_4 ) ) ) ; <nl> + __ sw ( a6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_8 ) ) ) ; <nl> + __ sw ( a7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_12 ) ) ) ; <nl> + __ sw ( t0 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_16 ) ) ) ; <nl> + __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_20 ) ) ) ; <nl> + __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_24 ) ) ) ; <nl> + __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotr_28 ) ) ) ; <nl> + <nl> + / / ROTRV instruction ( called through the Ror macro ) . <nl> + __ li ( t3 , 0x0004 ) ; <nl> + __ Ror ( a5 , a4 , t3 ) ; <nl> + __ li ( t3 , 0x0008 ) ; <nl> + __ Ror ( a6 , a4 , t3 ) ; <nl> + __ li ( t3 , 0x000C ) ; <nl> + __ Ror ( a7 , a4 , t3 ) ; <nl> + __ li ( t3 , 0x0010 ) ; <nl> + __ Ror ( t0 , a4 , t3 ) ; <nl> + __ li ( t3 , 0x0014 ) ; <nl> + __ Ror ( t1 , a4 , t3 ) ; <nl> + __ li ( t3 , 0x0018 ) ; <nl> + __ Ror ( t2 , a4 , t3 ) ; <nl> + __ li ( t3 , 0x001C ) ; <nl> + __ Ror ( t3 , a4 , t3 ) ; <nl> + <nl> + / / Basic word store . <nl> + __ sw ( a5 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_4 ) ) ) ; <nl> + __ sw ( a6 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_8 ) ) ) ; <nl> + __ sw ( a7 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_12 ) ) ) ; <nl> + __ sw ( t0 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_16 ) ) ) ; <nl> + __ sw ( t1 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_20 ) ) ) ; <nl> + __ sw ( t2 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_24 ) ) ) ; <nl> + __ sw ( t3 , MemOperand ( a0 , OFFSET_OF ( T , result_rotrv_28 ) ) ) ; <nl> <nl> - __ jr ( ra ) ; <nl> - __ nop ( ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> <nl> - CodeDesc desc ; <nl> - assm . GetCode ( & desc ) ; <nl> - Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> - desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> - F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> - t . input = 0x12345678 ; <nl> - Object * dummy = CALL_GENERATED_CODE ( f , & t , 0x0 , 0 , 0 , 0 ) ; <nl> - USE ( dummy ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotr_4 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotr_8 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotr_12 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotr_16 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotr_20 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotr_24 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotr_28 ) ; <nl> - <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotrv_4 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotrv_8 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotrv_12 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotrv_16 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotrv_20 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotrv_24 ) ; <nl> - CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotrv_28 ) ; <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + t . input = 0x12345678 ; <nl> + Object * dummy = CALL_GENERATED_CODE ( f , & t , 0x0 , 0 , 0 , 0 ) ; <nl> + USE ( dummy ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotr_4 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotr_8 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotr_12 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotr_16 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotr_20 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotr_24 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotr_28 ) ; <nl> + <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x81234567 ) , t . result_rotrv_4 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x78123456 ) , t . result_rotrv_8 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x67812345 ) , t . result_rotrv_12 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x56781234 ) , t . result_rotrv_16 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x45678123 ) , t . result_rotrv_20 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x34567812 ) , t . result_rotrv_24 ) ; <nl> + CHECK_EQ ( static_cast < int32_t > ( 0x23456781 ) , t . result_rotrv_28 ) ; <nl> + } <nl> } <nl> <nl> <nl> TEST ( MIPS18 ) { <nl> <nl> __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( TestFloat , a ) ) ) ; <nl> __ ldc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( TestFloat , b ) ) ) ; <nl> - __ min ( D , f10 , f8 , f4 ) ; <nl> - __ max ( D , f12 , f8 , f4 ) ; <nl> + __ min ( D , f10 , f4 , f8 ) ; <nl> + __ max ( D , f12 , f4 , f8 ) ; <nl> __ sdc1 ( f10 , MemOperand ( a0 , OFFSET_OF ( TestFloat , c ) ) ) ; <nl> __ sdc1 ( f12 , MemOperand ( a0 , OFFSET_OF ( TestFloat , d ) ) ) ; <nl> __ jr ( ra ) ; <nl> TEST ( MIPS18 ) { <nl> } <nl> <nl> <nl> + TEST ( MIPS19 ) { <nl> + if ( kArchVariant = = kMips64r6 ) { <nl> + const int tableLength = 30 ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + <nl> + typedef struct test_float { <nl> + double a ; <nl> + double b ; <nl> + int fcsr ; <nl> + } TestFloat ; <nl> + <nl> + TestFloat test ; <nl> + double inputs [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E + 308 , 6 . 27463370218383111104242366943E - 307 , <nl> + 309485009821345068724781056 . 89 , <nl> + 2 . 1 , 2 . 6 , 2 . 5 , 3 . 1 , 3 . 6 , 3 . 5 , <nl> + - 2 . 1 , - 2 . 6 , - 2 . 5 , - 3 . 1 , - 3 . 6 , - 3 . 5 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RN [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 3 . 0 , 2 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 3 . 0 , - 2 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RZ [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RP [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 1 , <nl> + 309485009821345068724781057 . 0 , <nl> + 3 . 0 , 3 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RM [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 3 . 0 , - 3 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + int fcsr_inputs [ 4 ] = <nl> + { kRoundToNearest , kRoundToZero , kRoundToPlusInf , kRoundToMinusInf } ; <nl> + double * outputs [ 4 ] = { outputs_RN , outputs_RZ , outputs_RP , outputs_RM } ; <nl> + __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( TestFloat , a ) ) ) ; <nl> + __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( TestFloat , fcsr ) ) ) ; <nl> + __ ctc1 ( t0 , FCSR ) ; <nl> + __ rint_d ( f8 , f4 ) ; <nl> + __ sdc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( TestFloat , b ) ) ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> + <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + <nl> + for ( int j = 0 ; j < 4 ; j + + ) { <nl> + test . fcsr = fcsr_inputs [ j ] ; <nl> + for ( int i = 0 ; i < tableLength ; i + + ) { <nl> + test . a = inputs [ i ] ; <nl> + std : : cout < < j < < " " < < i < < " \ n " ; <nl> + ( CALL_GENERATED_CODE ( f , & test , 0 , 0 , 0 , 0 ) ) ; <nl> + CHECK_EQ ( test . b , outputs [ j ] [ i ] ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( MIPS20 ) { <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + <nl> + typedef struct test_float { <nl> + double a ; <nl> + int32_t b ; <nl> + int fcsr ; <nl> + } Test ; <nl> + const int tableLength = 24 ; <nl> + double inputs [ tableLength ] = { <nl> + 2 . 1 , 2 . 6 , 2 . 5 , 3 . 1 , 3 . 6 , 3 . 5 , <nl> + - 2 . 1 , - 2 . 6 , - 2 . 5 , - 3 . 1 , - 3 . 6 , - 3 . 5 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , 2147483653 . 0 <nl> + } ; <nl> + double outputs_RN [ tableLength ] = { <nl> + 2 . 0 , 3 . 0 , 2 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 3 . 0 , - 2 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + double outputs_RZ [ tableLength ] = { <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + double outputs_RP [ tableLength ] = { <nl> + 3 . 0 , 3 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + double outputs_RM [ tableLength ] = { <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 3 . 0 , - 3 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 2147483637 . 0 , 2147483638 . 0 , 2147483639 . 0 , <nl> + 2147483640 . 0 , 2147483641 . 0 , 2147483642 . 0 , <nl> + 2147483643 . 0 , 2147483644 . 0 , 2147483645 . 0 , <nl> + 2147483646 . 0 , 2147483647 . 0 , kFPUInvalidResult } ; <nl> + int fcsr_inputs [ 4 ] = <nl> + { kRoundToNearest , kRoundToZero , kRoundToPlusInf , kRoundToMinusInf } ; <nl> + double * outputs [ 4 ] = { outputs_RN , outputs_RZ , outputs_RP , outputs_RM } ; <nl> + __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( Test , a ) ) ) ; <nl> + __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( Test , fcsr ) ) ) ; <nl> + __ cfc1 ( t1 , FCSR ) ; <nl> + __ ctc1 ( t0 , FCSR ) ; <nl> + __ cvt_w_d ( f8 , f4 ) ; <nl> + __ swc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( Test , b ) ) ) ; <nl> + __ ctc1 ( t1 , FCSR ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> + Test test ; <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + for ( int j = 0 ; j < 4 ; j + + ) { <nl> + test . fcsr = fcsr_inputs [ j ] ; <nl> + for ( int i = 0 ; i < tableLength ; i + + ) { <nl> + test . a = inputs [ i ] ; <nl> + ( CALL_GENERATED_CODE ( f , & test , 0 , 0 , 0 , 0 ) ) ; <nl> + CHECK_EQ ( test . b , outputs [ j ] [ i ] ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + TEST ( MIPS21 ) { <nl> + if ( kArchVariant = = kMips64r6 ) { <nl> + const int tableLength = 30 ; <nl> + CcTest : : InitializeVM ( ) ; <nl> + Isolate * isolate = CcTest : : i_isolate ( ) ; <nl> + HandleScope scope ( isolate ) ; <nl> + MacroAssembler assm ( isolate , NULL , 0 ) ; <nl> + <nl> + typedef struct test_float { <nl> + double a ; <nl> + double b ; <nl> + int fcsr ; <nl> + } TestFloat ; <nl> + <nl> + TestFloat test ; <nl> + double inputs [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 6 . 27463370218383111104242366943E - 307 , <nl> + 309485009821345068724781056 . 89 , <nl> + 2 . 1 , 2 . 6 , 2 . 5 , 3 . 1 , 3 . 6 , 3 . 5 , <nl> + - 2 . 1 , - 2 . 6 , - 2 . 5 , - 3 . 1 , - 3 . 6 , - 3 . 5 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RN [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 3 . 0 , 2 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 3 . 0 , - 2 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RZ [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RP [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 1 , <nl> + 309485009821345068724781057 . 0 , <nl> + 3 . 0 , 3 . 0 , 3 . 0 , 4 . 0 , 4 . 0 , 4 . 0 , <nl> + - 2 . 0 , - 2 . 0 , - 2 . 0 , - 3 . 0 , - 3 . 0 , - 3 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + double outputs_RM [ tableLength ] = { 18446744073709551617 . 0 , <nl> + 4503599627370496 . 0 , - 4503599627370496 . 0 , <nl> + 1 . 26782468584154733584017312973E30 , 1 . 44860108245951772690707170478E147 , <nl> + 1 . 7976931348623157E308 , 0 , <nl> + 309485009821345068724781057 . 0 , <nl> + 2 . 0 , 2 . 0 , 2 . 0 , 3 . 0 , 3 . 0 , 3 . 0 , <nl> + - 3 . 0 , - 3 . 0 , - 3 . 0 , - 4 . 0 , - 4 . 0 , - 4 . 0 , <nl> + 37778931862957161709568 . 0 , 37778931862957161709569 . 0 , <nl> + 37778931862957161709580 . 0 , 37778931862957161709581 . 0 , <nl> + 37778931862957161709582 . 0 , 37778931862957161709583 . 0 , <nl> + 37778931862957161709584 . 0 , 37778931862957161709585 . 0 , <nl> + 37778931862957161709586 . 0 , 37778931862957161709587 . 0 } ; <nl> + int fcsr_inputs [ 4 ] = <nl> + { kRoundToNearest , kRoundToZero , kRoundToPlusInf , kRoundToMinusInf } ; <nl> + double * outputs [ 4 ] = { outputs_RN , outputs_RZ , outputs_RP , outputs_RM } ; <nl> + __ ldc1 ( f4 , MemOperand ( a0 , OFFSET_OF ( TestFloat , a ) ) ) ; <nl> + __ lw ( t0 , MemOperand ( a0 , OFFSET_OF ( TestFloat , fcsr ) ) ) ; <nl> + __ cfc1 ( t1 , FCSR ) ; <nl> + __ ctc1 ( t0 , FCSR ) ; <nl> + __ rint_d ( f8 , f4 ) ; <nl> + __ sdc1 ( f8 , MemOperand ( a0 , OFFSET_OF ( TestFloat , b ) ) ) ; <nl> + __ ctc1 ( t1 , FCSR ) ; <nl> + __ jr ( ra ) ; <nl> + __ nop ( ) ; <nl> + <nl> + CodeDesc desc ; <nl> + assm . GetCode ( & desc ) ; <nl> + Handle < Code > code = isolate - > factory ( ) - > NewCode ( <nl> + desc , Code : : ComputeFlags ( Code : : STUB ) , Handle < Code > ( ) ) ; <nl> + F3 f = FUNCTION_CAST < F3 > ( code - > entry ( ) ) ; <nl> + for ( int j = 0 ; j < 4 ; j + + ) { <nl> + test . fcsr = fcsr_inputs [ j ] ; <nl> + for ( int i = 0 ; i < tableLength ; i + + ) { <nl> + test . a = inputs [ i ] ; <nl> + ( CALL_GENERATED_CODE ( f , & test , 0 , 0 , 0 , 0 ) ) ; <nl> + CHECK_EQ ( test . b , outputs [ j ] [ i ] ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + <nl> + <nl> + <nl> TEST ( jump_tables1 ) { <nl> / / Test jump tables with forward jumps . <nl> CcTest : : InitializeVM ( ) ; <nl> mmm a / test / cctest / test - disasm - mips . cc <nl> ppp b / test / cctest / test - disasm - mips . cc <nl> TEST ( Type1 ) { <nl> <nl> COMPARE ( min ( D , f3 , f4 , f5 ) , " 462520dc min . d f3 , f4 , f5 " ) ; <nl> COMPARE ( max ( D , f3 , f4 , f5 ) , " 462520de max . d f3 , f4 , f5 " ) ; <nl> + COMPARE ( rint_d ( f8 , f6 ) , " 4620321a rint . d f8 , f6 " ) ; <nl> + <nl> VERIFY_RUN ( ) ; <nl> } <nl> } <nl> mmm a / test / cctest / test - disasm - mips64 . cc <nl> ppp b / test / cctest / test - disasm - mips64 . cc <nl> TEST ( Type1 ) { <nl> <nl> COMPARE ( min ( D , f3 , f4 , f5 ) , " 462520dc min . d f3 , f4 , f5 " ) ; <nl> COMPARE ( max ( D , f3 , f4 , f5 ) , " 462520de max . d f3 , f4 , f5 " ) ; <nl> + COMPARE ( rint_d ( f8 , f6 ) , " 4620321a rint . d f8 , f6 " ) ; <nl> VERIFY_RUN ( ) ; <nl> } <nl> } <nl>
MIPS : Add rounding support in simulator and RINT instruction .
v8/v8
9da34c56a153a0da7dbb16a24c9d0dfed1925336
2015-04-30T06:29:16Z
mmm a / CHANGELOG <nl> ppp b / CHANGELOG <nl> v2 . 5 . 0 ( XXXX - XX - XX ) <nl> behavior . <nl> <nl> <nl> + v2 . 4 . 3 ( 2015 - 02 - XX ) <nl> + mmmmmmmmmmmmmmmmmm - <nl> + <nl> + * fix multi - threading with openssl when running under Windows <nl> + <nl> + * fix timeout on socket operations when running under Windows <nl> + <nl> + * Fixed an error in Foxx routing which caused some apps that worked in 2 . 4 . 1 to fail with status 500 : ` undefined is not a function ` errors in 2 . 4 . 2 <nl> + This error was occurring due to seldom internal rerouting introduced by the malformed application handler . <nl> + <nl> + <nl> v2 . 4 . 2 ( 2015 - XX - XX ) <nl> mmmmmmmmmmmmmmmmmm - <nl> <nl>
update CHANGELOG with 2 . 4 changes
arangodb/arangodb
37f19da58324f04c6e45b9fb6e90a6929af5445b
2015-02-05T20:20:01Z
mmm a / vendor / init . bat <nl> ppp b / vendor / init . bat <nl> call " % cmder_root % \ vendor \ lib \ lib_console " <nl> call " % cmder_root % \ vendor \ lib \ lib_git " <nl> call " % cmder_root % \ vendor \ lib \ lib_profile " <nl> <nl> + <nl> : var_loop <nl> if " % ~ 1 " = = " " ( <nl> goto : start <nl> call " % cmder_root % \ vendor \ lib \ lib_profile " <nl> goto var_loop <nl> <nl> : start <nl> + % lib_base % cmder_shell <nl> % lib_console % debug_output init . bat " Env Var - CMDER_ROOT = % CMDER_ROOT % " <nl> % lib_console % debug_output init . bat " Env Var - debug_output = % debug_output % " <nl> <nl> if " % PROCESSOR_ARCHITECTURE % " = = " x86 " ( <nl> set architecture_bits = 64 <nl> ) <nl> <nl> - REM echo % comspec % | find / i " tcc . exe " > nul <nl> - REM if % errorlevel % = = 1 ( <nl> + if " % CMDER_SHELL % " neq " tcc . exe " ( <nl> : : Tell the user about the clink config files . . . <nl> if defined " % CMDER_USER_CONFIG % \ settings " if not exist " % CMDER_USER_CONFIG % \ settings " ( <nl> echo Generating clink initial settings in " % CMDER_USER_CONFIG % \ settings " <nl> REM if % errorlevel % = = 1 ( <nl> ) else ( <nl> " % CMDER_ROOT % \ vendor \ clink \ clink_x % architecture % . exe " inject - - quiet - - profile " % CMDER_ROOT % \ config " - - scripts " % CMDER_ROOT % \ vendor " <nl> ) <nl> - REM ) <nl> + ) <nl> <nl> : : Prepare for git - for - windows <nl> <nl> if not defined user_aliases ( <nl> ) <nl> <nl> <nl> - echo % comspec % | find / i " tcc . exe " > nul <nl> - if " % errorlevel % " = = " 1 " ( <nl> + if " % CMDER_SHELL % " neq " tcc . exe " ( <nl> REM The aliases environment variable is used by alias . bat to id <nl> REM the default file to store new aliases in . <nl> if not defined aliases ( <nl> mmm a / vendor / lib / lib_base . cmd <nl> ppp b / vendor / lib / lib_base . cmd <nl> exit / b <nl> <nl> pause <nl> exit / b <nl> + <nl> + : cmder_shell <nl> + : : : = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + : : : show_subs - shows all sub routines in a . bat / . cmd file with documentation <nl> + : : : . <nl> + : : : include : <nl> + : : : . <nl> + : : : call " lib_base . cmd " <nl> + : : : . <nl> + : : : usage : <nl> + : : : . <nl> + : : : % lib_base % is_cmd <nl> + : : : . <nl> + : : : options : <nl> + : : : . <nl> + : : : file < in > full path to file containing lib_routines to display <nl> + : : : . <nl> + : : : mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - <nl> + echo % comspec % | find / i " \ cmd . exe " > nul & & set " CMDER_SHELL = cmd . exe " <nl> + echo % comspec % | find / i " \ tcc . exe " > nul & & set " CMDER_SHELL = tcc . exe " <nl> + exit / b <nl>
add cmder_shell method
cmderdev/cmder
823e6fee6e042011166288c31116d1b7f70b67cf
2018-09-02T22:32:20Z
mmm a / tensorflow / compiler / tests / BUILD <nl> ppp b / tensorflow / compiler / tests / BUILD <nl> tf_xla_py_test ( <nl> ] , <nl> ) <nl> <nl> - tf_xla_py_test ( <nl> - name = " eig_op_test " , <nl> - size = " medium " , <nl> - srcs = [ " eig_op_test . py " ] , <nl> - tags = [ " optonly " ] , <nl> - deps = [ <nl> - " : xla_test " , <nl> - " / / tensorflow / python : array_ops " , <nl> - " / / tensorflow / python : framework " , <nl> - " / / tensorflow / python : map_fn " , <nl> - " / / tensorflow / python : math_ops " , <nl> - " / / tensorflow / python : platform_test " , <nl> - " / / tensorflow / python : training " , <nl> - " @ absl_py / / absl / testing : parameterized " , <nl> - ] , <nl> - ) <nl> - <nl> - <nl> tf_xla_py_test ( <nl> name = " self_adjoint_eig_op_test " , <nl> size = " medium " , <nl> mmm a / tensorflow / core / kernels / eig_op_complex128 . cc <nl> ppp b / tensorflow / core / kernels / eig_op_complex128 . cc <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> you may not use this file except in compliance with the License . <nl> mmm a / tensorflow / core / kernels / eig_op_impl . h <nl> ppp b / tensorflow / core / kernels / eig_op_impl . h <nl> <nl> - / * Copyright 2017 The TensorFlow Authors . All Rights Reserved . <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> <nl> Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> you may not use this file except in compliance with the License . <nl> mmm a / tensorflow / python / kernel_tests / eig_op_test . py <nl> ppp b / tensorflow / python / kernel_tests / eig_op_test . py <nl> <nl> - # Copyright 2016 The TensorFlow Authors . All Rights Reserved . <nl> + # Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> # <nl> # Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> # you may not use this file except in compliance with the License . <nl>
Remove non - existing test file from build
tensorflow/tensorflow
7f7125e402d803ab3b291ff75bd3eab32ed0d731
2019-10-10T12:29:27Z
mmm a / src / google / protobuf / repeated_field . h <nl> ppp b / src / google / protobuf / repeated_field . h <nl> class RepeatedField PROTOBUF_FINAL { <nl> Element * e = & rep - > elements [ 0 ] ; <nl> Element * limit = & rep - > elements [ size ] ; <nl> for ( ; e < limit ; e + + ) { <nl> - e - > Element : : ~ Element ( ) ; <nl> + e - > ~ Element ( ) ; <nl> } <nl> if ( rep - > arena = = NULL ) { <nl> # if defined ( __GXX_DELETE_WITH_SIZE__ ) | | defined ( __cpp_sized_deallocation ) <nl>
Merge pull request from mda000 / issue2972
protocolbuffers/protobuf
594f810081623c87cc894820089c6a73f91d9252
2017-04-19T15:34:54Z
mmm a / src / symbol / graph_executor . cc <nl> ppp b / src / symbol / graph_executor . cc <nl> inline std : : vector < std : : pair < T , T > > GraphExecutor : : GetInplaceOption ( <nl> std : : vector < std : : pair < T , T > > remap ( remap_index . size ( ) ) ; <nl> for ( size_t i = 0 ; i < remap_index . size ( ) ; + + i ) { <nl> if ( args_array [ remap_index [ i ] . first ] = = nullptr ) { <nl> - LOG ( FATAL ) < < <nl> - " BackwardInplaceOption uses input that is returned by DeclareBackwardDependency " ; <nl> + LOG ( FATAL ) < < " BackwardInplaceOption not consistent with DeclareBackwardDependency " ; <nl> } <nl> remap [ i ] . first = * args_array [ remap_index [ i ] . first ] ; <nl> remap [ i ] . second = * static_cast < T * > ( remap_index [ i ] . second ) ; <nl>
Update graph_executor . cc
apache/incubator-mxnet
d5d6177b961b75c54f67d8b8747ebab9438092c3
2015-09-18T10:22:46Z
mmm a / hphp / hack / src / server / ideEnv . ml <nl> ppp b / hphp / hack / src / server / ideEnv . ml <nl> type t = { <nl> tcopt : TypecheckerOptions . t ; <nl> ( * Persistent client talking JSON protocol * ) <nl> client : ( in_channel * out_channel ) option ; <nl> - ( * Non - persistent clients awaiting to receive Hello message and send their <nl> - * single ServerCommand request * ) <nl> - requests : ( Timeout . in_channel * out_channel ) list ; <nl> ( * Whether typechecker has finished full initialization . In the future , we <nl> * can have more granularity here , allowing some queries to run sooner . * ) <nl> typechecker_init_done : bool ; <nl> let build_env typechecker tcopt = { <nl> typechecker = typechecker ; <nl> tcopt = tcopt ; <nl> client = None ; <nl> - requests = [ ] ; <nl> typechecker_init_done = false ; <nl> files_info = Relative_path . Map . empty ; <nl> errorl = [ ] ; <nl> mmm a / hphp / hack / src / server / ideMain . ml <nl> ppp b / hphp / hack / src / server / ideMain . ml <nl> <nl> * <nl> * ) <nl> <nl> - open Core <nl> open IdeEnv <nl> open IdeJson <nl> <nl> - type job = { <nl> - priority : int ; <nl> - run : IdeEnv . t - > IdeEnv . t ; <nl> - } <nl> + module IdeScheduler = IdeScheduler . Make ( struct type t = IdeEnv . t end ) <nl> <nl> - type wait_handle = <nl> - ( * Job that should be run when file descriptor is ready * ) <nl> - | Channel of Unix . file_descr * job <nl> - ( * Job that should be run if provided function tells us that there is <nl> - * something to do * ) <nl> - | Fun of ( IdeEnv . t - > job list ) <nl> - <nl> - let get_ready env wait_handles = <nl> - let funs , channels = List . partition_map wait_handles ~ f : begin function <nl> - | Fun x - > ` Fst x <nl> - | Channel ( x , y ) - > ` Snd ( x , y ) <nl> - end in <nl> - let ready_funs = List . concat_map funs ~ f : ( fun f - > f env ) in <nl> - let wait_time = if ready_funs = [ ] then 1 . 0 else 0 . 0 in <nl> - let fds = List . map channels ~ f : fst in <nl> - let readable , _ , _ = Unix . select fds [ ] [ ] wait_time in <nl> - let ready_channels = List . filter_map channels ~ f : begin fun ( fd , job ) - > <nl> - Option . map ( List . find readable ~ f : ( fun x - > x = fd ) ) ~ f : ( fun _ - > job ) <nl> - end in <nl> - ready_funs @ ready_channels <nl> + module Priorities = struct <nl> + let persistent_client_request = 0 <nl> + let hh_client_request = 1 <nl> + let new_client = 2 <nl> + let typechecker_message = 3 <nl> + let idle = 4 <nl> + end <nl> <nl> ( * Wrapper to ensure flushing and type safety of sent type * ) <nl> let write_string_to_channel ( s : string ) oc = <nl> let handle_already_has_client oc = <nl> write_string_to_channel response oc ; <nl> close_out oc <nl> <nl> - let handle_new_client parent_in_fd env = <nl> - let socket = Libancillary . ancil_recv_fd parent_in_fd in <nl> - let ic , oc = <nl> - ( Unix . in_channel_of_descr socket ) , ( Unix . out_channel_of_descr socket ) in <nl> - let client_type = <nl> - Marshal_tools . from_fd_with_preamble ( Unix . descr_of_in_channel ic ) in <nl> + ( * This is similar to ServerCommand . handle , except that it handles only SEARCH <nl> + * queries which cannot be handled there anymore * ) <nl> + let handle_waiting_hh_client_request ( ic , oc ) env = <nl> + Hh_logger . log " Handling hh_client request " ; <nl> + ( try <nl> + ServerCommand . say_hello oc ; <nl> + let msg = ServerCommand . read_client_msg ic in <nl> + <nl> + match msg with <nl> + | ServerCommand . Rpc ServerRpc . SEARCH ( query , type_ ) - > <nl> + let response = ServerSearch . go query type_ in <nl> + ServerCommand . send_response_to_client ( ic , oc ) response ; <nl> + | _ - > assert false <nl> <nl> - match client_type with <nl> - | ServerMonitorUtils . Persistent - > <nl> - begin match env . client with <nl> - | None - > <nl> - Hh_logger . log " Connected new persistent client " ; <nl> - { env with client = Some ( ic , oc ) } <nl> - | Some _ - > <nl> - Hh_logger . log " Rejected a client " ; <nl> - handle_already_has_client oc ; env <nl> - end <nl> - | ServerMonitorUtils . Request - > <nl> - Hh_logger . log " Connected new single request client " ; <nl> - let ic , oc = Timeout . in_channel_of_descr socket , oc in <nl> - { env with requests = ( ic , oc ) : : env . requests } <nl> + with <nl> + | Sys_error ( " Broken pipe " ) <nl> + | ServerCommand . Read_command_timeout - > <nl> + ServerUtils . shutdown_client ( ic , oc ) ) ; <nl> + env <nl> <nl> let send_call_to_typecheker env id = function <nl> | IdeServerCall . Find_refs_call action - > <nl> let get_call_response env id call = <nl> | IdeServerCall . Server_busy - > <nl> Some ( IdeJsonUtils . json_string_of_server_busy id ) <nl> <nl> - let handle_gone_client env = <nl> + let handle_gone_client ic env = <nl> Hh_logger . log " Client went away " ; <nl> + let fd = Unix . descr_of_in_channel ic in <nl> + IdeScheduler . stop_waiting_for_channel fd ; <nl> { env with client = None } <nl> <nl> let handle_client_request ( ic , oc ) env = <nl> let handle_client_request ( ic , oc ) env = <nl> | End_of_file <nl> | Sys_error _ - > <nl> ( * client went away in the meantime * ) <nl> - handle_gone_client env <nl> + handle_gone_client ic env <nl> <nl> - let handle_typechecker_message typechecker_process env = <nl> - match IdeProcessPipe . recv typechecker_process with <nl> + let handle_new_client parent_in_fd env = <nl> + let socket = Libancillary . ancil_recv_fd parent_in_fd in <nl> + let ic , oc = <nl> + ( Unix . in_channel_of_descr socket ) , ( Unix . out_channel_of_descr socket ) in <nl> + let client_type = <nl> + Marshal_tools . from_fd_with_preamble ( Unix . descr_of_in_channel ic ) in <nl> + <nl> + match client_type with <nl> + | ServerMonitorUtils . Persistent - > <nl> + begin match env . client with <nl> + | None - > <nl> + Hh_logger . log " Connected new persistent client " ; <nl> + IdeScheduler . wait_for_channel <nl> + ( Unix . descr_of_in_channel ic ) <nl> + ( handle_client_request ( ic , oc ) ) <nl> + ~ priority : Priorities . persistent_client_request ; <nl> + { env with client = Some ( ic , oc ) } <nl> + | Some _ - > <nl> + Hh_logger . log " Rejected a client " ; <nl> + handle_already_has_client oc ; env <nl> + end <nl> + | ServerMonitorUtils . Request - > <nl> + Hh_logger . log " Connected new single request client " ; <nl> + let ic , oc = Timeout . in_channel_of_descr socket , oc in <nl> + IdeScheduler . wait_for_fun <nl> + ( fun env - > <nl> + env . typechecker_init_done & & <nl> + ( not ( HackSearchService . IdeProcessApi . updates_pending ( ) ) ) <nl> + ) <nl> + ( handle_waiting_hh_client_request ( ic , oc ) ) <nl> + ~ once : true <nl> + ~ priority : Priorities . hh_client_request ; <nl> + env <nl> + <nl> + let handle_typechecker_message env = <nl> + match IdeProcessPipe . recv env . typechecker with <nl> | IdeProcessMessage . Typechecker_init_done - > <nl> { env with typechecker_init_done = true } <nl> | IdeProcessMessage . Sync_file_info updated_files_info - > <nl> let handle_server_idle env = <nl> ignore ( SharedMem . try_lock_hashtable ~ do_ : ( fun ( ) - > IdeIdle . go ( ) ) ) ; <nl> env <nl> <nl> - ( * This is similar to ServerCommand . handle , except that it handles only SEARCH <nl> - * queries which cannot be handled there anymore * ) <nl> - let handle_waiting_hh_client_requests env = <nl> - List . iter env . requests begin fun ( ic , oc ) - > <nl> - try <nl> - ServerCommand . say_hello oc ; <nl> - let msg = ServerCommand . read_client_msg ic in <nl> - <nl> - match msg with <nl> - | ServerCommand . Rpc ServerRpc . SEARCH ( query , type_ ) - > <nl> - let response = ServerSearch . go query type_ in <nl> - ServerCommand . send_response_to_client ( ic , oc ) response ; <nl> - | _ - > assert false <nl> - <nl> - with <nl> - | Sys_error ( " Broken pipe " ) <nl> - | ServerCommand . Read_command_timeout - > <nl> - ServerUtils . shutdown_client ( ic , oc ) <nl> - end ; <nl> - { env with requests = [ ] } <nl> - <nl> - let get_jobs typechecker parent_in_fd = <nl> - let idle_handle = Fun ( <nl> - fun _ - > if IdeIdle . has_tasks ( ) then [ { <nl> - priority = 4 ; <nl> - run = handle_server_idle <nl> - } ] else [ ] <nl> - ) in <nl> - let typechecker_handle = Channel ( <nl> - typechecker . IdeProcessPipe . in_fd , { <nl> - priority = 2 ; <nl> - run = handle_typechecker_message typechecker <nl> - } <nl> - ) in <nl> - let monitor_handle = Channel ( <nl> - parent_in_fd , { <nl> - priority = 1 ; <nl> - run = handle_new_client parent_in_fd <nl> - } <nl> - ) in <nl> - let hh_clients_handle = Fun ( <nl> - fun env - > if <nl> - ( not ( List . is_empty env . requests ) ) & & <nl> - env . typechecker_init_done & & <nl> - ( not ( HackSearchService . IdeProcessApi . updates_pending ( ) ) ) <nl> - then [ { <nl> - priority = 3 ; <nl> - run = handle_waiting_hh_client_requests ; <nl> - } ] else [ ] <nl> - ) in <nl> - [ idle_handle ; typechecker_handle ; monitor_handle ; hh_clients_handle ] <nl> - <nl> - let get_client_job ( ( client_ic , _ ) as client ) = <nl> - Channel ( <nl> - Unix . descr_of_in_channel client_ic , { <nl> - priority = 0 ; <nl> - run = handle_client_request client <nl> - } <nl> - ) <nl> + let init_scheduler monitor_in_fd typechecker_in_fd = <nl> + IdeScheduler . wait_for_channel <nl> + monitor_in_fd <nl> + ( handle_new_client monitor_in_fd ) <nl> + ~ priority : Priorities . new_client ; <nl> + IdeScheduler . wait_for_channel <nl> + typechecker_in_fd <nl> + handle_typechecker_message <nl> + ~ priority : Priorities . typechecker_message ; <nl> + IdeScheduler . wait_for_fun <nl> + ( fun _ - > IdeIdle . has_tasks ( ) ) <nl> + handle_server_idle <nl> + ~ priority : Priorities . idle <nl> <nl> let daemon_main options ( parent_ic , _parent_oc ) = <nl> Printexc . record_backtrace true ; <nl> let daemon_main options ( parent_ic , _parent_oc ) = <nl> let env = ref ( build_env typechecker_process tcopt ) in <nl> <nl> IdeIdle . init ( ) ; <nl> + init_scheduler parent_in_fd typechecker_process . IdeProcessPipe . in_fd ; <nl> while true do <nl> ServerMonitorUtils . exit_if_parent_dead ( ) ; <nl> let new_env = try <nl> - let jobs = get_jobs typechecker_process parent_in_fd in <nl> - let jobs = match ! env . client with <nl> - | Some client - > ( get_client_job client ) : : jobs <nl> - | None - > jobs <nl> - in <nl> - let ready_jobs = get_ready ! env jobs in <nl> - let sorted_ready_jobs = List . sort ready_jobs ~ cmp : begin fun x y - > <nl> - x . priority - y . priority <nl> - end in <nl> - List . fold_right sorted_ready_jobs <nl> - ~ init : ! env <nl> - ~ f : ( fun job env - > job . run env ) <nl> + IdeScheduler . wait_and_run_ready ! env <nl> with <nl> | IdeProcessPipe . IDE_process_pipe_broken - > <nl> Hh_logger . log " Typechecker has died , exiting too . " ; <nl> new file mode 100644 <nl> index 00000000000 . . ac4dbfff3e5 <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / server / ideScheduler . ml <nl> <nl> + ( * * <nl> + * Copyright ( c ) 2015 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * ) <nl> + open Core <nl> + <nl> + module Make = functor ( EnvType : sig type t end ) - > struct <nl> + type t = EnvType . t <nl> + <nl> + type job = { <nl> + priority : int ; <nl> + run : t - > t ; <nl> + } <nl> + <nl> + type wait_handle = <nl> + ( * Job that should be run if provided function tells us that there is <nl> + * something to do * ) <nl> + | Fun of ( t - > bool ) * job <nl> + ( * Job that should be run when file descriptor is ready * ) <nl> + | Channel of Unix . file_descr * job <nl> + <nl> + type env = { <nl> + waiting_jobs : wait_handle list ; <nl> + ready_jobs : job list ; <nl> + } <nl> + <nl> + let empty ( ) = { <nl> + waiting_jobs = [ ] ; <nl> + ready_jobs = [ ] ; <nl> + } <nl> + <nl> + let env = ref ( empty ( ) ) <nl> + <nl> + let reset ( ) = <nl> + env : = empty ( ) <nl> + <nl> + let rec wait_for_fun ? ( once = false ) ~ priority is_ready f = <nl> + let f ' = if once then f else begin fun job_env - > <nl> + wait_for_fun ~ priority is_ready f ; <nl> + let job_env = f job_env in <nl> + job_env <nl> + end in <nl> + let wait_handle = Fun ( is_ready , { priority ; run = f ' } ) in <nl> + env : = { ! env with waiting_jobs = wait_handle : : ! env . waiting_jobs } <nl> + <nl> + let rec wait_for_channel ~ priority fd f = <nl> + let f ' = begin fun env - > <nl> + let env = f env in <nl> + wait_for_channel ~ priority fd f ; <nl> + env <nl> + end in <nl> + let wait_handle = Channel ( fd , { priority ; run = f ' } ) in <nl> + env : = { ! env with waiting_jobs = wait_handle : : ! env . waiting_jobs } <nl> + <nl> + let stop_waiting_for_channel fd = <nl> + let waiting_jobs = List . filter ! env . waiting_jobs begin function <nl> + | Channel ( x , _ ) - > x < > fd <nl> + | _ - > true <nl> + end in <nl> + env : = { ! env with waiting_jobs } <nl> + <nl> + let wait_for_ready_jobs job_env = <nl> + let funs , channels = List . partition_map ! env . waiting_jobs ~ f : begin function <nl> + | Fun ( x , y ) - > ` Fst ( x , y ) <nl> + | Channel ( fd , f ) - > ` Snd ( fd , f ) <nl> + end in <nl> + <nl> + let ready_funs , waiting_funs = List . partition_map funs <nl> + ~ f : begin fun ( is_ready , job ) - > <nl> + if is_ready job_env then ` Fst job <nl> + else ` Snd ( Fun ( is_ready , job ) ) end in <nl> + <nl> + let wait_time = <nl> + if ready_funs = [ ] & & ! env . ready_jobs = [ ] then 1 . 0 else 0 . 0 in <nl> + <nl> + let fds = List . map channels ~ f : fst in <nl> + let readable , _ , _ = Unix . select fds [ ] [ ] wait_time in <nl> + <nl> + let ready_channels , waiting_channels = List . partition_map channels <nl> + ~ f : begin fun ( fd , job ) - > <nl> + if List . exists readable ~ f : ( fun x - > x = fd ) <nl> + then ` Fst job else ` Snd ( Channel ( fd , job ) ) end in <nl> + <nl> + let ready_jobs = ready_funs @ ready_channels @ ! env . ready_jobs in <nl> + let ready_jobs = List . sort <nl> + ready_jobs <nl> + ~ cmp : ( fun x y - > x . priority - y . priority ) in <nl> + <nl> + env : = { <nl> + ready_jobs ; <nl> + waiting_jobs = waiting_funs @ waiting_channels ; <nl> + } <nl> + <nl> + let rec wait_and_run_ready job_env = <nl> + wait_for_ready_jobs job_env ; <nl> + let job = match ! env . ready_jobs with <nl> + | h : : t - > env : = { ! env with ready_jobs = t } ; Some h <nl> + | [ ] - > None <nl> + in <nl> + match job with <nl> + | Some job - > <nl> + let job_env = job . run job_env in <nl> + wait_and_run_ready job_env <nl> + | None - > job_env <nl> + <nl> + end <nl> new file mode 100644 <nl> index 00000000000 . . cd1f316c4ef <nl> mmm / dev / null <nl> ppp b / hphp / hack / src / server / ideScheduler . mli <nl> <nl> + ( * * <nl> + * Copyright ( c ) 2015 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * ) <nl> + <nl> + module Make : functor ( EnvType : sig type t end ) - > sig <nl> + type t = EnvType . t <nl> + <nl> + ( * Remove all scheduled jobs * ) <nl> + val reset : unit - > unit <nl> + <nl> + val wait_for_fun : <nl> + ? once : bool - > ( * Should the job be removed after it ' s executed * ) <nl> + priority : int - > <nl> + ( t - > bool ) - > ( * The job can run when this function return true * ) <nl> + ( t - > t ) - > ( * The job to run * ) <nl> + unit <nl> + <nl> + val wait_for_channel : <nl> + priority : int - > <nl> + Unix . file_descr - > ( * The job can run when this fd is readable * ) <nl> + ( t - > t ) - > ( * The job to run * ) <nl> + unit <nl> + <nl> + val stop_waiting_for_channel : <nl> + Unix . file_descr - > ( * fd that was passed to wait_for_channel before * ) <nl> + unit <nl> + <nl> + val wait_and_run_ready : t - > t <nl> + end <nl> new file mode 100644 <nl> index 00000000000 . . 58780e2f001 <nl> mmm / dev / null <nl> ppp b / hphp / hack / test / unit / ide / ide_scheduler_test . ml <nl> <nl> + ( * * <nl> + * Copyright ( c ) 2015 , Facebook , Inc . <nl> + * All rights reserved . <nl> + * <nl> + * This source code is licensed under the BSD - style license found in the <nl> + * LICENSE file in the " hack " directory of this source tree . An additional grant <nl> + * of patent rights can be found in the PATENTS file in the same directory . <nl> + * <nl> + * ) <nl> + <nl> + open Core <nl> + <nl> + type test_env = { <nl> + ( * What callbacks were called and when * ) <nl> + callbacks_trace : ( string * float ) list ; <nl> + } <nl> + <nl> + module TestScheduler = IdeScheduler . Make ( struct type t = test_env end ) <nl> + <nl> + let empty_test_env ( ) = { <nl> + callbacks_trace = [ ] ; <nl> + } <nl> + <nl> + let record_trace name = <nl> + ( fun env - > { <nl> + callbacks_trace = ( name , Unix . gettimeofday ( ) ) : : env . callbacks_trace <nl> + } ) <nl> + <nl> + ( * Keep running the function until ~ steps callbacks have executed * ) <nl> + let schedule_run_until name ~ steps ~ priority = <nl> + TestScheduler . wait_for_fun <nl> + ~ priority <nl> + ( fun env - > ( List . length env . callbacks_trace < steps ) ) <nl> + ( record_trace name ) <nl> + <nl> + let schedule_wait_for_channel name fd ~ priority = <nl> + TestScheduler . wait_for_channel <nl> + fd <nl> + ( fun env - > <nl> + let payload = Marshal_tools . from_fd_with_preamble fd in <nl> + record_trace ( name ^ " : " ^ payload ) env <nl> + ) <nl> + ~ priority <nl> + <nl> + let run_and_expect_trace env trace = <nl> + let env = TestScheduler . wait_and_run_ready env in <nl> + List . iter2_exn env . callbacks_trace trace <nl> + ~ f : ( fun ( x , _ ) y - > if x < > y then raise Exit ) ; <nl> + env <nl> + <nl> + let test_fun_wait env = <nl> + schedule_run_until " fun " ~ steps : 3 ~ priority : 0 ; <nl> + ignore ( run_and_expect_trace env [ " fun " ; " fun " ; " fun " ] ) ; <nl> + true <nl> + <nl> + let test_fun_wait_once env = <nl> + TestScheduler . wait_for_fun <nl> + ( fun _ - > true ) <nl> + ( record_trace " fun " ) <nl> + ~ once : true <nl> + ~ priority : 0 ; <nl> + ignore ( run_and_expect_trace env [ " fun " ] ) ; <nl> + true <nl> + <nl> + let test_channel_wait env = <nl> + let fd_in , fd_out = Unix . pipe ( ) in <nl> + <nl> + schedule_wait_for_channel " channel " fd_in ~ priority : 0 ; <nl> + let env = run_and_expect_trace env [ ] in <nl> + Marshal_tools . to_fd_with_preamble fd_out " msg1 " ; <nl> + let env = run_and_expect_trace env [ " channel : msg1 " ] in <nl> + Marshal_tools . to_fd_with_preamble fd_out " msg2 " ; <nl> + let env = run_and_expect_trace env [ " channel : msg2 " ; " channel : msg1 " ] in <nl> + Marshal_tools . to_fd_with_preamble fd_out " msg3 " ; <nl> + TestScheduler . stop_waiting_for_channel fd_in ; <nl> + ignore ( run_and_expect_trace env [ " channel : msg2 " ; " channel : msg1 " ] ) ; <nl> + true <nl> + <nl> + let test_priorities env = <nl> + schedule_run_until " fun1 " ~ steps : 1 ~ priority : 1 ; <nl> + schedule_run_until " fun2 " ~ steps : 1 ~ priority : 0 ; <nl> + schedule_run_until " fun3 " ~ steps : 1 ~ priority : 2 ; <nl> + ignore ( run_and_expect_trace env [ " fun3 " ; " fun1 " ; " fun2 " ] ) ; <nl> + true <nl> + <nl> + let test_fun_and_channel env = <nl> + let fd_in , _ = Unix . pipe ( ) in <nl> + <nl> + schedule_run_until " fun " ~ steps : 1 ~ priority : 1 ; <nl> + schedule_wait_for_channel " channel " fd_in ~ priority : 0 ; <nl> + <nl> + let t = Unix . gettimeofday ( ) in <nl> + let env = TestScheduler . wait_and_run_ready env in <nl> + ( match env . callbacks_trace with <nl> + ( * Check that function that was immediately ready did not block <nl> + * because of the channel * ) <nl> + | [ " fun " , when_ ] - > when_ - . t < 0 . 1 <nl> + | _ - > false ) <nl> + <nl> + let tests = List . map [ <nl> + " test_fun_wait " , test_fun_wait ; <nl> + " test_fun_wait_once " , test_fun_wait_once ; <nl> + " test_channel_wait " , test_channel_wait ; <nl> + " test_priorities " , test_priorities ; <nl> + " test_fun_and_channel " , test_fun_and_channel <nl> + ] begin fun ( name , f ) - > <nl> + let f ' = begin fun ( ) - > <nl> + TestScheduler . reset ( ) ; <nl> + let env = ( empty_test_env ( ) ) in <nl> + f env <nl> + end in <nl> + ( name , f ' ) <nl> + end <nl> + <nl> + let ( ) = <nl> + Unit_test . run_all tests <nl>
Factor out waiting for stuff to separate scheduler module , add tests
facebook/hhvm
a8df3c3e69e21582894e99b543dcf52be3e3b8ed
2016-03-22T03:00:33Z
mmm a / tensorflow / core / graph / collective_order . cc <nl> ppp b / tensorflow / core / graph / collective_order . cc <nl> Status CreateControlDependencies ( <nl> <nl> / / Insert control dependencies defined by ` dependency_edges ` in ` graph ` . If <nl> / / ` order_type ` is ` kEdges ` , insert explicit control edges , else if ` order_type ` <nl> - / / is ` kAttrs ` , encode depdencies as an attribute on collective node . <nl> + / / is ` kAttrs ` , encode dependencies as an attribute on collective node . <nl> Status InsertControlDependencies ( <nl> Graph * graph , GraphCollectiveOrder order_type , <nl> const absl : : flat_hash_map < Node * , absl : : flat_hash_set < Node * > > & <nl>
Fixed typo
tensorflow/tensorflow
e827f0d5571f7fd4963e0adb5a1e0f99ade45924
2019-03-10T14:09:49Z
mmm a / scene / gui / text_edit . cpp <nl> ppp b / scene / gui / text_edit . cpp <nl> void TextEdit : : _notification ( int p_what ) { <nl> <nl> if ( OS : : get_singleton ( ) - > has_virtual_keyboard ( ) ) <nl> OS : : get_singleton ( ) - > show_virtual_keyboard ( get_text ( ) , get_global_rect ( ) ) ; <nl> - if ( raised_from_completion ) { <nl> - VisualServer : : get_singleton ( ) - > canvas_item_set_z_index ( get_canvas_item ( ) , 1 ) ; <nl> - } <nl> } break ; <nl> case NOTIFICATION_FOCUS_EXIT : { <nl> <nl> void TextEdit : : _notification ( int p_what ) { <nl> <nl> if ( OS : : get_singleton ( ) - > has_virtual_keyboard ( ) ) <nl> OS : : get_singleton ( ) - > hide_virtual_keyboard ( ) ; <nl> - if ( raised_from_completion ) { <nl> - VisualServer : : get_singleton ( ) - > canvas_item_set_z_index ( get_canvas_item ( ) , 0 ) ; <nl> - } <nl> } break ; <nl> } <nl> } <nl> void TextEdit : : _confirm_completion ( ) { <nl> <nl> void TextEdit : : _cancel_code_hint ( ) { <nl> <nl> - VisualServer : : get_singleton ( ) - > canvas_item_set_z_index ( get_canvas_item ( ) , 0 ) ; <nl> - raised_from_completion = false ; <nl> completion_hint = " " ; <nl> update ( ) ; <nl> } <nl> <nl> void TextEdit : : _cancel_completion ( ) { <nl> <nl> - VisualServer : : get_singleton ( ) - > canvas_item_set_z_index ( get_canvas_item ( ) , 0 ) ; <nl> - raised_from_completion = false ; <nl> if ( ! completion_active ) <nl> return ; <nl> <nl> void TextEdit : : query_code_comple ( ) { <nl> <nl> void TextEdit : : set_code_hint ( const String & p_hint ) { <nl> <nl> - VisualServer : : get_singleton ( ) - > canvas_item_set_z_index ( get_canvas_item ( ) , 1 ) ; <nl> - raised_from_completion = true ; <nl> completion_hint = p_hint ; <nl> completion_hint_offset = - 0xFFFF ; <nl> update ( ) ; <nl> void TextEdit : : set_code_hint ( const String & p_hint ) { <nl> <nl> void TextEdit : : code_complete ( const Vector < String > & p_strings , bool p_forced ) { <nl> <nl> - VisualServer : : get_singleton ( ) - > canvas_item_set_z_index ( get_canvas_item ( ) , 1 ) ; <nl> - raised_from_completion = true ; <nl> completion_strings = p_strings ; <nl> completion_active = true ; <nl> completion_forced = p_forced ; <nl> TextEdit : : TextEdit ( ) { <nl> target_v_scroll = 0 ; <nl> v_scroll_speed = 80 ; <nl> <nl> - raised_from_completion = false ; <nl> - <nl> context_menu_enabled = true ; <nl> menu = memnew ( PopupMenu ) ; <nl> add_child ( menu ) ; <nl> mmm a / scene / gui / text_edit . h <nl> ppp b / scene / gui / text_edit . h <nl> class TextEdit : public Control { <nl> float target_v_scroll ; <nl> float v_scroll_speed ; <nl> <nl> - bool raised_from_completion ; <nl> - <nl> String highlighted_word ; <nl> <nl> uint64_t last_dblclk ; <nl>
Merge pull request from zhagsenkk / master
godotengine/godot
b947e758237ac43be52523bad69f94b5d8136e1a
2018-10-29T09:25:49Z
deleted file mode 100644 <nl> index 576466a312 . . 0000000000 <nl> mmm a / change / react - native - windows - 2020 - 04 - 03 - 09 - 14 - 08 - dependabot - npm_and_yarn - microsoft - api - documenter - 7 . 7 . 17 . json <nl> ppp / dev / null <nl> <nl> - { <nl> - " type " : " none " , <nl> - " comment " : " Update api docs " , <nl> - " packageName " : " react - native - windows " , <nl> - " email " : " acoates @ microsoft . com " , <nl> - " dependentChangeType " : " none " , <nl> - " date " : " 2020 - 04 - 03T16 : 14 : 08 . 305Z " <nl> - } <nl> \ No newline at end of file <nl> mmm a / vnext / CHANGELOG . json <nl> ppp b / vnext / CHANGELOG . json <nl> <nl> { <nl> " name " : " react - native - windows " , <nl> " entries " : [ <nl> + { <nl> + " date " : " Fri , 03 Apr 2020 17 : 20 : 20 GMT " , <nl> + " tag " : " react - native - windows_v0 . 0 . 0 - master . 13 " , <nl> + " version " : " 0 . 0 . 0 - master . 13 " , <nl> + " comments " : { <nl> + " none " : [ <nl> + { <nl> + " comment " : " Update api docs " , <nl> + " author " : " acoates @ microsoft . com " , <nl> + " commit " : " 407c0834ada43cd9d42c24cb6ddfe7c91ddf960a " , <nl> + " package " : " react - native - windows " <nl> + } <nl> + ] <nl> + } <nl> + } , <nl> { <nl> " date " : " Fri , 03 Apr 2020 16 : 06 : 49 GMT " , <nl> " tag " : " react - native - windows_v0 . 0 . 0 - master . 13 " , <nl>
applying package updates * * * NO_CI * * *
microsoft/react-native-windows
118a2a7c72431f5e42996679a225256b3d076369
2020-04-03T17:20:20Z
mmm a / src / json . hpp <nl> ppp b / src / json . hpp <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class CompatibleObjectType , typename <nl> - std : : enable_if < <nl> - std : : is_constructible < typename object_t : : key_type , typename CompatibleObjectType : : key_type > : : value and <nl> - std : : is_constructible < basic_json , typename CompatibleObjectType : : mapped_type > : : value , int > : : type <nl> - = 0 > <nl> + template < class CompatibleObjectType , typename std : : enable_if < <nl> + std : : is_constructible < typename object_t : : key_type , typename CompatibleObjectType : : key_type > : : value and <nl> + std : : is_constructible < basic_json , typename CompatibleObjectType : : mapped_type > : : value , int > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleObjectType & val ) <nl> : m_type ( value_t : : object ) <nl> { <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class CompatibleArrayType , typename <nl> - std : : enable_if < <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : reverse_iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_reverse_iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename array_t : : iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename array_t : : const_iterator > : : value and <nl> - std : : is_constructible < basic_json , typename CompatibleArrayType : : value_type > : : value , int > : : type <nl> - = 0 > <nl> + template < class CompatibleArrayType , typename std : : enable_if < <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : reverse_iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_reverse_iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename array_t : : iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename array_t : : const_iterator > : : value and <nl> + std : : is_constructible < basic_json , typename CompatibleArrayType : : value_type > : : value , int > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleArrayType & val ) <nl> : m_type ( value_t : : array ) <nl> { <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class CompatibleStringType , typename <nl> - std : : enable_if < <nl> - std : : is_constructible < string_t , CompatibleStringType > : : value , int > : : type <nl> - = 0 > <nl> + template < class CompatibleStringType , typename std : : enable_if < <nl> + std : : is_constructible < string_t , CompatibleStringType > : : value , int > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleStringType & val ) <nl> : basic_json ( string_t ( val ) ) <nl> { <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename T , <nl> - typename std : : enable_if < <nl> + template < typename T , typename std : : enable_if < <nl> not ( std : : is_same < T , int > : : value ) <nl> and std : : is_same < T , number_integer_t > : : value <nl> , int > : : type <nl> class basic_json <nl> <nl> @ since version 2 . 0 . 0 <nl> * / <nl> - template < typename T , <nl> - typename std : : enable_if < <nl> + template < typename T , typename std : : enable_if < <nl> not ( std : : is_same < T , int > : : value ) <nl> and std : : is_same < T , number_unsigned_t > : : value <nl> , int > : : type <nl> class basic_json <nl> <nl> @ since version 2 . 0 . 0 <nl> * / <nl> - template < typename CompatibleNumberUnsignedType , typename <nl> - std : : enable_if < <nl> - std : : is_constructible < number_unsigned_t , CompatibleNumberUnsignedType > : : value and <nl> - std : : numeric_limits < CompatibleNumberUnsignedType > : : is_integer and <nl> - not std : : numeric_limits < CompatibleNumberUnsignedType > : : is_signed , <nl> - CompatibleNumberUnsignedType > : : type <nl> - = 0 > <nl> + template < typename CompatibleNumberUnsignedType , typename std : : enable_if < <nl> + std : : is_constructible < number_unsigned_t , CompatibleNumberUnsignedType > : : value and <nl> + std : : numeric_limits < CompatibleNumberUnsignedType > : : is_integer and <nl> + not std : : numeric_limits < CompatibleNumberUnsignedType > : : is_signed , CompatibleNumberUnsignedType > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleNumberUnsignedType val ) noexcept <nl> : m_type ( value_t : : number_unsigned ) , <nl> m_value ( static_cast < number_unsigned_t > ( val ) ) <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename CompatibleNumberFloatType , typename = typename <nl> - std : : enable_if < <nl> + template < typename CompatibleNumberFloatType , typename = typename std : : enable_if < <nl> std : : is_constructible < number_float_t , CompatibleNumberFloatType > : : value and <nl> - std : : is_floating_point < CompatibleNumberFloatType > : : value > : : type <nl> - > <nl> + std : : is_floating_point < CompatibleNumberFloatType > : : value > : : type > <nl> basic_json ( const CompatibleNumberFloatType val ) noexcept <nl> : basic_json ( number_float_t ( val ) ) <nl> { <nl> class basic_json <nl> @ since version 1 . 0 . 0 <nl> * / <nl> basic_json ( std : : initializer_list < basic_json > init , <nl> - bool type_deduction = true , <nl> - value_t manual_type = value_t : : array ) <nl> + const bool type_deduction = true , <nl> + const value_t manual_type = value_t : : array ) <nl> { <nl> / / check if each element is an array with two elements whose first <nl> / / element is a string <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class InputIT , typename <nl> - std : : enable_if < <nl> - std : : is_same < InputIT , typename basic_json_t : : iterator > : : value or <nl> - std : : is_same < InputIT , typename basic_json_t : : const_iterator > : : value <nl> - , int > : : type <nl> - = 0 > <nl> + template < class InputIT , typename = typename std : : enable_if < <nl> + std : : is_same < InputIT , typename basic_json_t : : iterator > : : value or <nl> + std : : is_same < InputIT , typename basic_json_t : : const_iterator > : : value > : : type > <nl> basic_json ( InputIT first , InputIT last ) <nl> { <nl> assert ( first . m_object ! = nullptr ) ; <nl> class basic_json <nl> / / / / / / / / / / / / / / / / / / <nl> <nl> / / / get an object ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < typename object_t : : key_type , typename T : : key_type > : : value and <nl> - std : : is_convertible < basic_json_t , typename T : : mapped_type > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_convertible < typename object_t : : key_type , typename T : : key_type > : : value and <nl> + std : : is_convertible < basic_json_t , typename T : : mapped_type > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_object ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get an array ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , typename T : : value_type > : : value and <nl> - not std : : is_same < basic_json_t , typename T : : value_type > : : value and <nl> - not std : : is_arithmetic < T > : : value and <nl> - not std : : is_convertible < std : : string , T > : : value and <nl> - not has_mapped_type < T > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , typename T : : value_type > : : value and <nl> + not std : : is_same < basic_json_t , typename T : : value_type > : : value and <nl> + not std : : is_arithmetic < T > : : value and <nl> + not std : : is_convertible < std : : string , T > : : value and <nl> + not has_mapped_type < T > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_array ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get an array ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , T > : : value and <nl> - not std : : is_same < basic_json_t , T > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , T > : : value and <nl> + not std : : is_same < basic_json_t , T > : : value <nl> + , int > : : type = 0 > <nl> std : : vector < T > get_impl ( std : : vector < T > * ) const <nl> { <nl> if ( is_array ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get an array ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_same < basic_json , typename T : : value_type > : : value and <nl> - not has_mapped_type < T > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_same < basic_json , typename T : : value_type > : : value and <nl> + not has_mapped_type < T > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_array ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get a string ( explicit ) <nl> - template < typename T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < string_t , T > : : value <nl> - , int > : : type = 0 > <nl> + template < typename T , typename std : : enable_if < <nl> + std : : is_convertible < string_t , T > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_string ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get a number ( explicit ) <nl> - template < typename T , typename <nl> - std : : enable_if < <nl> - std : : is_arithmetic < T > : : value <nl> - , int > : : type = 0 > <nl> + template < typename T , typename std : : enable_if < <nl> + std : : is_arithmetic < T > : : value , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> switch ( m_type ) <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename ValueType , typename <nl> - std : : enable_if < <nl> - not std : : is_pointer < ValueType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename ValueType , typename = typename std : : enable_if < <nl> + not std : : is_pointer < ValueType > : : value > : : type > <nl> ValueType get ( ) const <nl> { <nl> return get_impl ( static_cast < ValueType * > ( nullptr ) ) ; <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> - std : : is_pointer < PointerType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> + std : : is_pointer < PointerType > : : value > : : type > <nl> PointerType get ( ) noexcept <nl> { <nl> / / delegate the call to get_ptr <nl> class basic_json <nl> @ brief get a pointer value ( explicit ) <nl> @ copydoc get ( ) <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> - std : : is_pointer < PointerType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> + std : : is_pointer < PointerType > : : value > : : type > <nl> constexpr const PointerType get ( ) const noexcept <nl> { <nl> / / delegate the call to get_ptr <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> - std : : is_pointer < PointerType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> + std : : is_pointer < PointerType > : : value > : : type > <nl> PointerType get_ptr ( ) noexcept <nl> { <nl> / / get the type of the PointerType ( remove pointer and const ) <nl> class basic_json <nl> @ brief get a pointer value ( implicit ) <nl> @ copydoc get_ptr ( ) <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> std : : is_pointer < PointerType > : : value <nl> - and std : : is_const < typename std : : remove_pointer < PointerType > : : type > : : value <nl> - , int > : : type = 0 > <nl> + and std : : is_const < typename std : : remove_pointer < PointerType > : : type > : : value > : : type > <nl> constexpr const PointerType get_ptr ( ) const noexcept <nl> { <nl> / / get the type of the PointerType ( remove pointer and const ) <nl> class basic_json <nl> <nl> @ since version 1 . 1 . 0 <nl> * / <nl> - template < typename ReferenceType , typename <nl> - std : : enable_if < <nl> - std : : is_reference < ReferenceType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename ReferenceType , typename = typename std : : enable_if < <nl> + std : : is_reference < ReferenceType > : : value > : : type > <nl> ReferenceType get_ref ( ) <nl> { <nl> / / delegate call to get_ref_impl <nl> class basic_json <nl> @ brief get a reference value ( implicit ) <nl> @ copydoc get_ref ( ) <nl> * / <nl> - template < typename ReferenceType , typename <nl> - std : : enable_if < <nl> + template < typename ReferenceType , typename = typename std : : enable_if < <nl> std : : is_reference < ReferenceType > : : value <nl> - and std : : is_const < typename std : : remove_reference < ReferenceType > : : type > : : value <nl> - , int > : : type = 0 > <nl> + and std : : is_const < typename std : : remove_reference < ReferenceType > : : type > : : value > : : type > <nl> ReferenceType get_ref ( ) const <nl> { <nl> / / delegate call to get_ref_impl <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename ValueType , typename <nl> - std : : enable_if < <nl> + template < typename ValueType , typename = typename std : : enable_if < <nl> not std : : is_pointer < ValueType > : : value <nl> and not std : : is_same < ValueType , typename string_t : : value_type > : : value <nl> # ifndef _MSC_VER / / Fix for issue # 167 operator < < abiguity under VS2015 <nl> and not std : : is_same < ValueType , std : : initializer_list < typename string_t : : value_type > > : : value <nl> # endif <nl> - , int > : : type = 0 > <nl> + > : : type > <nl> operator ValueType ( ) const <nl> { <nl> / / delegate the call to get < > ( ) const <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class ValueType , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , ValueType > : : value <nl> - , int > : : type = 0 > <nl> + template < class ValueType , typename = typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , ValueType > : : value > : : type > <nl> ValueType value ( const typename object_t : : key_type & key , ValueType default_value ) const <nl> { <nl> / / at only works for objects <nl> class basic_json <nl> <nl> @ since version 2 . 0 . 2 <nl> * / <nl> - template < class ValueType , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , ValueType > : : value <nl> - , int > : : type = 0 > <nl> + template < class ValueType , typename = typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , ValueType > : : value > : : type > <nl> ValueType value ( const json_pointer & ptr , ValueType default_value ) const <nl> { <nl> / / at only works for objects <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class InteratorType , typename <nl> - std : : enable_if < <nl> - std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> - std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value <nl> - , int > : : type <nl> - = 0 > <nl> + template < class InteratorType , typename = typename std : : enable_if < <nl> + std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> + std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value > : : type > <nl> InteratorType erase ( InteratorType pos ) <nl> { <nl> / / make sure iterator fits the current value <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class InteratorType , typename <nl> - std : : enable_if < <nl> - std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> - std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value <nl> - , int > : : type <nl> - = 0 > <nl> + template < class InteratorType , typename = typename std : : enable_if < <nl> + std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> + std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value > : : type > <nl> InteratorType erase ( InteratorType first , InteratorType last ) <nl> { <nl> / / make sure iterator fits the current value <nl> class basic_json <nl> const unsigned int current_indent = 0 ) const <nl> { <nl> / / variable to hold indentation for recursive calls <nl> - unsigned int new_indent = current_indent ; <nl> + auto new_indent = current_indent ; <nl> <nl> switch ( m_type ) <nl> { <nl> class basic_json <nl> const std : : size_t codepoint2 = 0 ) <nl> { <nl> / / calculate the code point from the given code points <nl> - std : : size_t codepoint = codepoint1 ; <nl> + auto codepoint = codepoint1 ; <nl> <nl> / / check if codepoint1 is a high surrogate <nl> if ( codepoint1 > = 0xD800 and codepoint1 < = 0xDBFF ) <nl> class basic_json <nl> auto reference_token = reference_string . substr ( start , slash - start ) ; <nl> <nl> / / check reference tokens are properly escaped <nl> - for ( size_t pos = reference_token . find_first_of ( " ~ " ) ; <nl> + for ( auto pos = reference_token . find_first_of ( " ~ " ) ; <nl> pos ! = std : : string : : npos ; <nl> pos = reference_token . find_first_of ( " ~ " , pos + 1 ) ) <nl> { <nl> class basic_json <nl> assert ( not f . empty ( ) ) ; <nl> <nl> for ( <nl> - size_t pos = s . find ( f ) ; / / find first occurrence of f <nl> + auto pos = s . find ( f ) ; / / find first occurrence of f <nl> pos ! = std : : string : : npos ; / / make sure f was found <nl> s . replace ( pos , f . size ( ) , t ) , / / replace with t <nl> pos = s . find ( f , pos + t . size ( ) ) / / find next occurrence of f <nl> class basic_json <nl> else <nl> { <nl> / / make sure the top element of the pointer exists <nl> - json_pointer top_pointer = ptr . top ( ) ; <nl> + auto top_pointer = ptr . top ( ) ; <nl> if ( top_pointer ! = ptr ) <nl> { <nl> basic_json & x = result . at ( top_pointer ) ; <nl> namespace std <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < > <nl> + template < > <nl> inline void swap ( nlohmann : : json & j1 , <nl> nlohmann : : json & j2 ) noexcept ( <nl> is_nothrow_move_constructible < nlohmann : : json > : : value and <nl> inline void swap ( nlohmann : : json & j1 , <nl> } <nl> <nl> / / / hash value for JSON objects <nl> - template < > <nl> + template < > <nl> struct hash < nlohmann : : json > <nl> { <nl> / * ! <nl> mmm a / src / json . hpp . re2c <nl> ppp b / src / json . hpp . re2c <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class CompatibleObjectType , typename <nl> - std : : enable_if < <nl> - std : : is_constructible < typename object_t : : key_type , typename CompatibleObjectType : : key_type > : : value and <nl> - std : : is_constructible < basic_json , typename CompatibleObjectType : : mapped_type > : : value , int > : : type <nl> - = 0 > <nl> + template < class CompatibleObjectType , typename std : : enable_if < <nl> + std : : is_constructible < typename object_t : : key_type , typename CompatibleObjectType : : key_type > : : value and <nl> + std : : is_constructible < basic_json , typename CompatibleObjectType : : mapped_type > : : value , int > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleObjectType & val ) <nl> : m_type ( value_t : : object ) <nl> { <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class CompatibleArrayType , typename <nl> - std : : enable_if < <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : reverse_iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_reverse_iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename array_t : : iterator > : : value and <nl> - not std : : is_same < CompatibleArrayType , typename array_t : : const_iterator > : : value and <nl> - std : : is_constructible < basic_json , typename CompatibleArrayType : : value_type > : : value , int > : : type <nl> - = 0 > <nl> + template < class CompatibleArrayType , typename std : : enable_if < <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : reverse_iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename basic_json_t : : const_reverse_iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename array_t : : iterator > : : value and <nl> + not std : : is_same < CompatibleArrayType , typename array_t : : const_iterator > : : value and <nl> + std : : is_constructible < basic_json , typename CompatibleArrayType : : value_type > : : value , int > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleArrayType & val ) <nl> : m_type ( value_t : : array ) <nl> { <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class CompatibleStringType , typename <nl> - std : : enable_if < <nl> - std : : is_constructible < string_t , CompatibleStringType > : : value , int > : : type <nl> - = 0 > <nl> + template < class CompatibleStringType , typename std : : enable_if < <nl> + std : : is_constructible < string_t , CompatibleStringType > : : value , int > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleStringType & val ) <nl> : basic_json ( string_t ( val ) ) <nl> { <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename T , <nl> - typename std : : enable_if < <nl> + template < typename T , typename std : : enable_if < <nl> not ( std : : is_same < T , int > : : value ) <nl> and std : : is_same < T , number_integer_t > : : value <nl> , int > : : type <nl> class basic_json <nl> <nl> @ since version 2 . 0 . 0 <nl> * / <nl> - template < typename T , <nl> - typename std : : enable_if < <nl> + template < typename T , typename std : : enable_if < <nl> not ( std : : is_same < T , int > : : value ) <nl> and std : : is_same < T , number_unsigned_t > : : value <nl> , int > : : type <nl> class basic_json <nl> <nl> @ since version 2 . 0 . 0 <nl> * / <nl> - template < typename CompatibleNumberUnsignedType , typename <nl> - std : : enable_if < <nl> - std : : is_constructible < number_unsigned_t , CompatibleNumberUnsignedType > : : value and <nl> - std : : numeric_limits < CompatibleNumberUnsignedType > : : is_integer and <nl> - not std : : numeric_limits < CompatibleNumberUnsignedType > : : is_signed , <nl> - CompatibleNumberUnsignedType > : : type <nl> - = 0 > <nl> + template < typename CompatibleNumberUnsignedType , typename std : : enable_if < <nl> + std : : is_constructible < number_unsigned_t , CompatibleNumberUnsignedType > : : value and <nl> + std : : numeric_limits < CompatibleNumberUnsignedType > : : is_integer and <nl> + not std : : numeric_limits < CompatibleNumberUnsignedType > : : is_signed , CompatibleNumberUnsignedType > : : type <nl> + = 0 > <nl> basic_json ( const CompatibleNumberUnsignedType val ) noexcept <nl> : m_type ( value_t : : number_unsigned ) , <nl> m_value ( static_cast < number_unsigned_t > ( val ) ) <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename CompatibleNumberFloatType , typename = typename <nl> - std : : enable_if < <nl> + template < typename CompatibleNumberFloatType , typename = typename std : : enable_if < <nl> std : : is_constructible < number_float_t , CompatibleNumberFloatType > : : value and <nl> - std : : is_floating_point < CompatibleNumberFloatType > : : value > : : type <nl> - > <nl> + std : : is_floating_point < CompatibleNumberFloatType > : : value > : : type > <nl> basic_json ( const CompatibleNumberFloatType val ) noexcept <nl> : basic_json ( number_float_t ( val ) ) <nl> { <nl> class basic_json <nl> @ since version 1 . 0 . 0 <nl> * / <nl> basic_json ( std : : initializer_list < basic_json > init , <nl> - bool type_deduction = true , <nl> - value_t manual_type = value_t : : array ) <nl> + const bool type_deduction = true , <nl> + const value_t manual_type = value_t : : array ) <nl> { <nl> / / check if each element is an array with two elements whose first <nl> / / element is a string <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class InputIT , typename <nl> - std : : enable_if < <nl> - std : : is_same < InputIT , typename basic_json_t : : iterator > : : value or <nl> - std : : is_same < InputIT , typename basic_json_t : : const_iterator > : : value <nl> - , int > : : type <nl> - = 0 > <nl> + template < class InputIT , typename = typename std : : enable_if < <nl> + std : : is_same < InputIT , typename basic_json_t : : iterator > : : value or <nl> + std : : is_same < InputIT , typename basic_json_t : : const_iterator > : : value > : : type > <nl> basic_json ( InputIT first , InputIT last ) <nl> { <nl> assert ( first . m_object ! = nullptr ) ; <nl> class basic_json <nl> / / / / / / / / / / / / / / / / / / <nl> <nl> / / / get an object ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < typename object_t : : key_type , typename T : : key_type > : : value and <nl> - std : : is_convertible < basic_json_t , typename T : : mapped_type > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_convertible < typename object_t : : key_type , typename T : : key_type > : : value and <nl> + std : : is_convertible < basic_json_t , typename T : : mapped_type > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_object ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get an array ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , typename T : : value_type > : : value and <nl> - not std : : is_same < basic_json_t , typename T : : value_type > : : value and <nl> - not std : : is_arithmetic < T > : : value and <nl> - not std : : is_convertible < std : : string , T > : : value and <nl> - not has_mapped_type < T > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , typename T : : value_type > : : value and <nl> + not std : : is_same < basic_json_t , typename T : : value_type > : : value and <nl> + not std : : is_arithmetic < T > : : value and <nl> + not std : : is_convertible < std : : string , T > : : value and <nl> + not has_mapped_type < T > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_array ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get an array ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , T > : : value and <nl> - not std : : is_same < basic_json_t , T > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , T > : : value and <nl> + not std : : is_same < basic_json_t , T > : : value <nl> + , int > : : type = 0 > <nl> std : : vector < T > get_impl ( std : : vector < T > * ) const <nl> { <nl> if ( is_array ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get an array ( explicit ) <nl> - template < class T , typename <nl> - std : : enable_if < <nl> - std : : is_same < basic_json , typename T : : value_type > : : value and <nl> - not has_mapped_type < T > : : value <nl> - , int > : : type = 0 > <nl> + template < class T , typename std : : enable_if < <nl> + std : : is_same < basic_json , typename T : : value_type > : : value and <nl> + not has_mapped_type < T > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_array ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get a string ( explicit ) <nl> - template < typename T , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < string_t , T > : : value <nl> - , int > : : type = 0 > <nl> + template < typename T , typename std : : enable_if < <nl> + std : : is_convertible < string_t , T > : : value <nl> + , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> if ( is_string ( ) ) <nl> class basic_json <nl> } <nl> <nl> / / / get a number ( explicit ) <nl> - template < typename T , typename <nl> - std : : enable_if < <nl> - std : : is_arithmetic < T > : : value <nl> - , int > : : type = 0 > <nl> + template < typename T , typename std : : enable_if < <nl> + std : : is_arithmetic < T > : : value , int > : : type = 0 > <nl> T get_impl ( T * ) const <nl> { <nl> switch ( m_type ) <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename ValueType , typename <nl> - std : : enable_if < <nl> - not std : : is_pointer < ValueType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename ValueType , typename = typename std : : enable_if < <nl> + not std : : is_pointer < ValueType > : : value > : : type > <nl> ValueType get ( ) const <nl> { <nl> return get_impl ( static_cast < ValueType * > ( nullptr ) ) ; <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> - std : : is_pointer < PointerType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> + std : : is_pointer < PointerType > : : value > : : type > <nl> PointerType get ( ) noexcept <nl> { <nl> / / delegate the call to get_ptr <nl> class basic_json <nl> @ brief get a pointer value ( explicit ) <nl> @ copydoc get ( ) <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> - std : : is_pointer < PointerType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> + std : : is_pointer < PointerType > : : value > : : type > <nl> constexpr const PointerType get ( ) const noexcept <nl> { <nl> / / delegate the call to get_ptr <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> - std : : is_pointer < PointerType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> + std : : is_pointer < PointerType > : : value > : : type > <nl> PointerType get_ptr ( ) noexcept <nl> { <nl> / / get the type of the PointerType ( remove pointer and const ) <nl> class basic_json <nl> @ brief get a pointer value ( implicit ) <nl> @ copydoc get_ptr ( ) <nl> * / <nl> - template < typename PointerType , typename <nl> - std : : enable_if < <nl> + template < typename PointerType , typename = typename std : : enable_if < <nl> std : : is_pointer < PointerType > : : value <nl> - and std : : is_const < typename std : : remove_pointer < PointerType > : : type > : : value <nl> - , int > : : type = 0 > <nl> + and std : : is_const < typename std : : remove_pointer < PointerType > : : type > : : value > : : type > <nl> constexpr const PointerType get_ptr ( ) const noexcept <nl> { <nl> / / get the type of the PointerType ( remove pointer and const ) <nl> class basic_json <nl> <nl> @ since version 1 . 1 . 0 <nl> * / <nl> - template < typename ReferenceType , typename <nl> - std : : enable_if < <nl> - std : : is_reference < ReferenceType > : : value <nl> - , int > : : type = 0 > <nl> + template < typename ReferenceType , typename = typename std : : enable_if < <nl> + std : : is_reference < ReferenceType > : : value > : : type > <nl> ReferenceType get_ref ( ) <nl> { <nl> / / delegate call to get_ref_impl <nl> class basic_json <nl> @ brief get a reference value ( implicit ) <nl> @ copydoc get_ref ( ) <nl> * / <nl> - template < typename ReferenceType , typename <nl> - std : : enable_if < <nl> + template < typename ReferenceType , typename = typename std : : enable_if < <nl> std : : is_reference < ReferenceType > : : value <nl> - and std : : is_const < typename std : : remove_reference < ReferenceType > : : type > : : value <nl> - , int > : : type = 0 > <nl> + and std : : is_const < typename std : : remove_reference < ReferenceType > : : type > : : value > : : type > <nl> ReferenceType get_ref ( ) const <nl> { <nl> / / delegate call to get_ref_impl <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename ValueType , typename <nl> - std : : enable_if < <nl> + template < typename ValueType , typename = typename std : : enable_if < <nl> not std : : is_pointer < ValueType > : : value <nl> and not std : : is_same < ValueType , typename string_t : : value_type > : : value <nl> # ifndef _MSC_VER / / Fix for issue # 167 operator < < abiguity under VS2015 <nl> and not std : : is_same < ValueType , std : : initializer_list < typename string_t : : value_type > > : : value <nl> # endif <nl> - , int > : : type = 0 > <nl> + > : : type > <nl> operator ValueType ( ) const <nl> { <nl> / / delegate the call to get < > ( ) const <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class ValueType , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , ValueType > : : value <nl> - , int > : : type = 0 > <nl> + template < class ValueType , typename = typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , ValueType > : : value > : : type > <nl> ValueType value ( const typename object_t : : key_type & key , ValueType default_value ) const <nl> { <nl> / / at only works for objects <nl> class basic_json <nl> <nl> @ since version 2 . 0 . 2 <nl> * / <nl> - template < class ValueType , typename <nl> - std : : enable_if < <nl> - std : : is_convertible < basic_json_t , ValueType > : : value <nl> - , int > : : type = 0 > <nl> + template < class ValueType , typename = typename std : : enable_if < <nl> + std : : is_convertible < basic_json_t , ValueType > : : value > : : type > <nl> ValueType value ( const json_pointer & ptr , ValueType default_value ) const <nl> { <nl> / / at only works for objects <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class InteratorType , typename <nl> - std : : enable_if < <nl> - std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> - std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value <nl> - , int > : : type <nl> - = 0 > <nl> + template < class InteratorType , typename = typename std : : enable_if < <nl> + std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> + std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value > : : type > <nl> InteratorType erase ( InteratorType pos ) <nl> { <nl> / / make sure iterator fits the current value <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < class InteratorType , typename <nl> - std : : enable_if < <nl> - std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> - std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value <nl> - , int > : : type <nl> - = 0 > <nl> + template < class InteratorType , typename = typename std : : enable_if < <nl> + std : : is_same < InteratorType , typename basic_json_t : : iterator > : : value or <nl> + std : : is_same < InteratorType , typename basic_json_t : : const_iterator > : : value > : : type > <nl> InteratorType erase ( InteratorType first , InteratorType last ) <nl> { <nl> / / make sure iterator fits the current value <nl> class basic_json <nl> const unsigned int current_indent = 0 ) const <nl> { <nl> / / variable to hold indentation for recursive calls <nl> - unsigned int new_indent = current_indent ; <nl> + auto new_indent = current_indent ; <nl> <nl> switch ( m_type ) <nl> { <nl> class basic_json <nl> const std : : size_t codepoint2 = 0 ) <nl> { <nl> / / calculate the code point from the given code points <nl> - std : : size_t codepoint = codepoint1 ; <nl> + auto codepoint = codepoint1 ; <nl> <nl> / / check if codepoint1 is a high surrogate <nl> if ( codepoint1 > = 0xD800 and codepoint1 < = 0xDBFF ) <nl> class basic_json <nl> auto reference_token = reference_string . substr ( start , slash - start ) ; <nl> <nl> / / check reference tokens are properly escaped <nl> - for ( size_t pos = reference_token . find_first_of ( " ~ " ) ; <nl> + for ( auto pos = reference_token . find_first_of ( " ~ " ) ; <nl> pos ! = std : : string : : npos ; <nl> pos = reference_token . find_first_of ( " ~ " , pos + 1 ) ) <nl> { <nl> class basic_json <nl> assert ( not f . empty ( ) ) ; <nl> <nl> for ( <nl> - size_t pos = s . find ( f ) ; / / find first occurrence of f <nl> + auto pos = s . find ( f ) ; / / find first occurrence of f <nl> pos ! = std : : string : : npos ; / / make sure f was found <nl> s . replace ( pos , f . size ( ) , t ) , / / replace with t <nl> pos = s . find ( f , pos + t . size ( ) ) / / find next occurrence of f <nl> class basic_json <nl> else <nl> { <nl> / / make sure the top element of the pointer exists <nl> - json_pointer top_pointer = ptr . top ( ) ; <nl> + auto top_pointer = ptr . top ( ) ; <nl> if ( top_pointer ! = ptr ) <nl> { <nl> basic_json & x = result . at ( top_pointer ) ; <nl> namespace std <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < > <nl> + template < > <nl> inline void swap ( nlohmann : : json & j1 , <nl> nlohmann : : json & j2 ) noexcept ( <nl> is_nothrow_move_constructible < nlohmann : : json > : : value and <nl> inline void swap ( nlohmann : : json & j1 , <nl> } <nl> <nl> / / / hash value for JSON objects <nl> - template < > <nl> + template < > <nl> struct hash < nlohmann : : json > <nl> { <nl> / * ! <nl> mmm a / test / src / unit - class_const_iterator . cpp <nl> ppp b / test / src / unit - class_const_iterator . cpp <nl> TEST_CASE ( " const_iterator class " ) <nl> json : : const_iterator it2 ( & j ) ; <nl> it2 = it ; <nl> } <nl> + <nl> + SECTION ( " copy constructor from non - const iterator " ) <nl> + { <nl> + SECTION ( " create from uninitialized iterator " ) <nl> + { <nl> + const json : : iterator it { } ; <nl> + json : : const_iterator cit ( it ) ; <nl> + } <nl> + <nl> + SECTION ( " create from initialized iterator " ) <nl> + { <nl> + json j ; <nl> + const json : : iterator it = j . begin ( ) ; <nl> + json : : const_iterator cit ( it ) ; <nl> + } <nl> + } <nl> } <nl> <nl> SECTION ( " initialization " ) <nl> mmm a / test / src / unit - deserialization . cpp <nl> ppp b / test / src / unit - deserialization . cpp <nl> using nlohmann : : json ; <nl> <nl> TEST_CASE ( " deserialization " ) <nl> { <nl> - SECTION ( " stream " ) <nl> + SECTION ( " successful deserialization " ) <nl> { <nl> - std : : stringstream ss ; <nl> - ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> - json j = json : : parse ( ss ) ; <nl> - CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> - } <nl> + SECTION ( " stream " ) <nl> + { <nl> + std : : stringstream ss ; <nl> + ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> + json j = json : : parse ( ss ) ; <nl> + CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + } <nl> <nl> - SECTION ( " string " ) <nl> - { <nl> - auto s = " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> - json j = json : : parse ( s ) ; <nl> - CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> - } <nl> + SECTION ( " string " ) <nl> + { <nl> + json : : string_t s = " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> + json j = json : : parse ( s ) ; <nl> + CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + } <nl> <nl> - SECTION ( " operator < < " ) <nl> - { <nl> - std : : stringstream ss ; <nl> - ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> - json j ; <nl> - j < < ss ; <nl> - CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> - } <nl> + SECTION ( " operator < < " ) <nl> + { <nl> + std : : stringstream ss ; <nl> + ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> + json j ; <nl> + j < < ss ; <nl> + CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + } <nl> <nl> - SECTION ( " operator > > " ) <nl> - { <nl> - std : : stringstream ss ; <nl> - ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> - json j ; <nl> - ss > > j ; <nl> - CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + SECTION ( " operator > > " ) <nl> + { <nl> + std : : stringstream ss ; <nl> + ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " ; <nl> + json j ; <nl> + ss > > j ; <nl> + CHECK ( j = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + } <nl> + <nl> + SECTION ( " user - defined string literal " ) <nl> + { <nl> + CHECK ( " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " _json = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + } <nl> } <nl> <nl> - SECTION ( " user - defined string literal " ) <nl> + SECTION ( " successful deserialization " ) <nl> { <nl> - CHECK ( " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } ] " _json = = json ( { " foo " , 1 , 2 , 3 , false , { { " one " , 1 } } } ) ) ; <nl> + SECTION ( " stream " ) <nl> + { <nl> + std : : stringstream ss ; <nl> + ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } " ; <nl> + CHECK_THROWS_AS ( json : : parse ( ss ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( json : : parse ( ss ) , " parse error - unexpected end of input " ) ; <nl> + } <nl> + <nl> + SECTION ( " string " ) <nl> + { <nl> + json : : string_t s = " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } " ; <nl> + CHECK_THROWS_AS ( json : : parse ( s ) , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( json : : parse ( s ) , " parse error - unexpected end of input ; expected ' ] ' " ) ; <nl> + } <nl> + <nl> + SECTION ( " operator < < " ) <nl> + { <nl> + std : : stringstream ss ; <nl> + ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } " ; <nl> + json j ; <nl> + CHECK_THROWS_AS ( j < < ss , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( j < < ss , " parse error - unexpected end of input " ) ; <nl> + } <nl> + <nl> + SECTION ( " operator > > " ) <nl> + { <nl> + std : : stringstream ss ; <nl> + ss < < " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } " ; <nl> + json j ; <nl> + CHECK_THROWS_AS ( ss > > j , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( ss > > j , " parse error - unexpected end of input " ) ; <nl> + } <nl> + <nl> + SECTION ( " user - defined string literal " ) <nl> + { <nl> + CHECK_THROWS_AS ( " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } " _json , std : : invalid_argument ) ; <nl> + CHECK_THROWS_WITH ( " [ \ " foo \ " , 1 , 2 , 3 , false , { \ " one \ " : 1 } " _json , <nl> + " parse error - unexpected end of input ; expected ' ] ' " ) ; <nl> + } <nl> } <nl> } <nl>
cleanup and improvement of branch coverage
nlohmann/json
a485aa8d272f3656c1fb5840ce100f3288f7e2da
2016-08-30T21:44:15Z
mmm a / doc / classes / CanvasItem . xml <nl> ppp b / doc / classes / CanvasItem . xml <nl> <nl> < argument index = " 7 " name = " antialiased " type = " bool " default = " false " > <nl> < / argument > <nl> < description > <nl> + Draws an arc between the given angles . The larger the value of [ code ] point_count [ / code ] , the smoother the curve . <nl> < / description > <nl> < / method > <nl> < method name = " draw_char " > <nl> mmm a / doc / classes / Control . xml <nl> ppp b / doc / classes / Control . xml <nl> <nl> < description > <nl> Virtual method to be implemented by the user . Returns whether the given [ code ] point [ / code ] is inside this control . <nl> If not overridden , default behavior is checking if the point is within control ' s Rect . <nl> - [ b ] Node : [ / b ] If you want to check if a point is inside the control , you can use [ code ] get_rect ( ) . has_point ( point ) [ / code ] . <nl> + [ b ] Note : [ / b ] If you want to check if a point is inside the control , you can use [ code ] get_rect ( ) . has_point ( point ) [ / code ] . <nl> < / description > <nl> < / method > <nl> < method name = " has_shader_override " qualifiers = " const " > <nl> mmm a / doc / classes / Font . xml <nl> ppp b / doc / classes / Font . xml <nl> <nl> < return type = " bool " > <nl> < / return > <nl> < description > <nl> + Returns [ code ] true [ / code ] if the font has an outline . <nl> < / description > <nl> < / method > <nl> < method name = " is_distance_field_hint " qualifiers = " const " > <nl> mmm a / doc / classes / InputMap . xml <nl> ppp b / doc / classes / InputMap . xml <nl> <nl> < argument index = " 1 " name = " deadzone " type = " float " > <nl> < / argument > <nl> < description > <nl> + Sets a deadzone value for the action . <nl> < / description > <nl> < / method > <nl> < method name = " add_action " > <nl> mmm a / doc / classes / MainLoop . xml <nl> ppp b / doc / classes / MainLoop . xml <nl> <nl> < argument index = " 1 " name = " granted " type = " bool " > <nl> < / argument > <nl> < description > <nl> - Emitted when an user responds to permission request . <nl> + Emitted when a user responds to a permission request . <nl> < / description > <nl> < / signal > <nl> < / signals > <nl> mmm a / doc / classes / NinePatchRect . xml <nl> ppp b / doc / classes / NinePatchRect . xml <nl> <nl> < signals > <nl> < signal name = " texture_changed " > <nl> < description > <nl> - Fired when the node ' s texture changes . <nl> + Emitted when the node ' s texture changes . <nl> < / description > <nl> < / signal > <nl> < / signals > <nl>
[ DOCS ] Corrections and clarifications to classref
godotengine/godot
378c4895ae8ad3a5c2926d4eef2deae0af4197f4
2020-01-10T18:46:41Z
mmm a / test / lit . cfg <nl> ppp b / test / lit . cfg <nl> config . filecheck = inferSwiftBinary ( ' FileCheck ' ) <nl> config . llvm_dwarfdump = inferSwiftBinary ( ' llvm - dwarfdump ' ) <nl> config . llvm_readelf = inferSwiftBinary ( ' llvm - readelf ' ) <nl> config . llvm_dis = inferSwiftBinary ( ' llvm - dis ' ) <nl> + config . llvm_nm = inferSwiftBinary ( ' llvm - nm ' ) <nl> config . sourcekitd_test = inferSwiftBinary ( ' sourcekitd - test ' ) <nl> config . complete_test = inferSwiftBinary ( ' complete - test ' ) <nl> config . swift_api_digester = inferSwiftBinary ( ' swift - api - digester ' ) <nl> config . substitutions . append ( ( ' % swift - llvm - opt ' , config . swift_llvm_opt ) ) <nl> config . substitutions . append ( ( ' % llvm - dwarfdump ' , config . llvm_dwarfdump ) ) <nl> config . substitutions . append ( ( ' % llvm - readelf ' , config . llvm_readelf ) ) <nl> config . substitutions . append ( ( ' % llvm - dis ' , config . llvm_dis ) ) <nl> + config . substitutions . append ( ( ' % llvm - nm ' , config . llvm_nm ) ) <nl> config . substitutions . append ( ( ' % swift - demangle - yamldump ' , config . swift_demangle_yamldump ) ) <nl> config . substitutions . append ( ( ' % Benchmark_O ' , config . benchmark_o ) ) <nl> config . substitutions . append ( ( ' % Benchmark_Driver ' , config . benchmark_driver ) ) <nl> new file mode 100644 <nl> index 000000000000 . . 709cb0f6fbae <nl> mmm / dev / null <nl> ppp b / test / stdlib / llvm - support - odr - violation . test - sh <nl> <nl> + / / RUN : % llvm - nm - - defined - only - C % platform - module - dir / % target - library - name ( swiftCore ) | % FileCheck % s <nl> + / / CHECK - NOT : [ ^ : ] llvm : : <nl>
test : ensure that we do not regress the standard library isolation
apple/swift
a92894c53012ebf77716c7a5dbb6ddacb6b1ad0a
2020-05-18T21:08:16Z
mmm a / lib / ClangImporter / ImportDecl . cpp <nl> ppp b / lib / ClangImporter / ImportDecl . cpp <nl> <nl> # include " clang / AST / ASTContext . h " <nl> # include " clang / AST / Attr . h " <nl> # include " clang / AST / DeclVisitor . h " <nl> + # include " clang / Basic / CharInfo . h " <nl> # include " clang / Lex / Preprocessor . h " <nl> <nl> # include " llvm / ADT / SmallString . h " <nl> static bool isNSDictionaryMethod ( const clang : : ObjCMethodDecl * MD , <nl> / / / This is used to derive the common prefix of enum constants so we can elide <nl> / / / it from the Swift interface . <nl> static StringRef getCommonWordPrefix ( StringRef a , StringRef b ) { <nl> + / / Ensure that ' b ' is the longer string . <nl> + if ( a . size ( ) > b . size ( ) ) <nl> + std : : swap ( a , b ) ; <nl> + <nl> unsigned prefixLength = 0 ; <nl> - unsigned commonSize = std : : min ( a . size ( ) , b . size ( ) ) ; <nl> + unsigned commonSize = a . size ( ) ; <nl> for ( size_t i = 0 ; i < commonSize ; + + i ) { <nl> / / If this is a camel - case word boundary , advance the prefix length . <nl> - if ( isupper ( a [ i ] ) & & isupper ( b [ i ] ) ) <nl> + if ( clang : : isUppercase ( a [ i ] ) & & clang : : isUppercase ( b [ i ] ) ) <nl> prefixLength = i ; <nl> <nl> if ( a [ i ] ! = b [ i ] ) <nl> return a . slice ( 0 , prefixLength ) ; <nl> } <nl> - return a . slice ( 0 , commonSize ) ; <nl> + if ( b . size ( ) = = commonSize | | clang : : isIdentifierHead ( b [ commonSize ] ) ) <nl> + prefixLength = commonSize ; <nl> + return a . slice ( 0 , prefixLength ) ; <nl> } <nl> <nl> namespace { <nl> mmm a / test / Inputs / clang - importer - sdk / usr / include / Foundation . h <nl> ppp b / test / Inputs / clang - importer - sdk / usr / include / Foundation . h <nl> typedef NS_ENUM ( unsigned char , NSAliasesEnum ) { <nl> NSAliasesByName = NSAliasesOriginal , <nl> } ; <nl> <nl> + typedef NS_ENUM ( NSUInteger , NSNumberFormatterBehavior ) { <nl> + NSNumberFormatterBehaviorDefault = 0 , <nl> + NSNumberFormatterBehavior10_0 = 1000 , <nl> + NSNumberFormatterBehavior10_4 = 1040 , <nl> + } ; <nl> + <nl> + <nl> / / / Aaa . NSRuncingOptions . Bbb . <nl> typedef NS_OPTIONS ( NSUInteger , NSRuncingOptions ) { <nl> NSRuncingEnableMince = 1 , <nl>
[ ClangImporter ] Revise enum splitting to not stop right before a number .
apple/swift
fdbfd2439aa736510b42c6809e3d8d61403a0e0f
2014-04-01T00:13:25Z
mmm a / fdbserver / RestoreApplier . actor . cpp <nl> ppp b / fdbserver / RestoreApplier . actor . cpp <nl> void handleUpdateRateRequest ( RestoreUpdateRateRequest req , Reference < RestoreAppl <nl> } <nl> <nl> ACTOR static Future < Void > traceRate ( const char * context , Reference < ApplierBatchData > batchData , int batchIndex , <nl> - UID nodeID , NotifiedVersion * finishedVB ) { <nl> + UID nodeID , NotifiedVersion * finishedVB , bool once = false ) { <nl> loop { <nl> if ( ( finishedVB - > get ( ) ! = batchIndex - 1 ) | | ! batchData . isValid ( ) ) { <nl> break ; <nl> ACTOR static Future < Void > traceRate ( const char * context , Reference < ApplierBatchD <nl> . detail ( " TargetBytesMB " , batchData - > targetWriteRateMB ) <nl> . detail ( " InflightBytesMB " , batchData - > applyingDataBytes ) <nl> . detail ( " ReceivedBytes " , batchData - > receivedBytes ) ; <nl> + if ( once ) { <nl> + break ; <nl> + } <nl> wait ( delay ( 5 . 0 ) ) ; <nl> } <nl> <nl> ACTOR static Future < Void > handleApplyToDBRequest ( RestoreVersionBatchRequest req , <nl> / / Multiple actors can wait on req . batchIndex - 1 ; <nl> / / Avoid setting finishedBatch when finishedBatch > req . batchIndex <nl> if ( self - > finishedBatch . get ( ) = = req . batchIndex - 1 ) { <nl> - batchData - > rateTracer = traceRate ( " FastRestoreApplierTransactionRateControlDone " , batchData , req . batchIndex , <nl> - self - > id ( ) , & self - > finishedBatch ) ; / / Track the last rate info <nl> + batchData - > rateTracer = <nl> + traceRate ( " FastRestoreApplierTransactionRateControlDone " , batchData , req . batchIndex , self - > id ( ) , <nl> + & self - > finishedBatch , true / * print once * / ) ; / / Track the last rate info <nl> self - > finishedBatch . set ( req . batchIndex ) ; <nl> / / self - > batch [ req . batchIndex ] - > vbState = ApplierVersionBatchState : : DONE ; <nl> / / Free memory for the version batch <nl> mmm a / fdbserver / RestoreController . actor . cpp <nl> ppp b / fdbserver / RestoreController . actor . cpp <nl> ACTOR Future < Void > startRestoreController ( Reference < RestoreWorkerData > controlle <nl> / / recruitRestoreRoles must come after controllerWorker has finished collectWorkerInterface <nl> wait ( recruitRestoreRoles ( controllerWorker , self ) ) ; <nl> <nl> - self - > addActor . send ( updateHeartbeatTime ( self ) ) ; <nl> + / / self - > addActor . send ( updateHeartbeatTime ( self ) ) ; <nl> self - > addActor . send ( checkRolesLiveness ( self ) ) ; <nl> self - > addActor . send ( updateProcessMetrics ( self ) ) ; <nl> self - > addActor . send ( traceProcessMetrics ( self , " RestoreController " ) ) ; <nl>
FastRestore : Fix segmentation fault
apple/foundationdb
cd89606fd96df186a111367a97572938dab815dc
2020-09-29T23:10:40Z
mmm a / src / arm / macro - assembler - arm . cc <nl> ppp b / src / arm / macro - assembler - arm . cc <nl> void MacroAssembler : : CompareInstanceType ( Register map , <nl> } <nl> <nl> <nl> + void MacroAssembler : : CompareRoot ( Register obj , <nl> + Heap : : RootListIndex index ) { <nl> + ASSERT ( ! obj . is ( ip ) ) ; <nl> + LoadRoot ( ip , index ) ; <nl> + cmp ( obj , ip ) ; <nl> + } <nl> + <nl> + <nl> void MacroAssembler : : CheckMap ( Register obj , <nl> Register scratch , <nl> Handle < Map > map , <nl> void MacroAssembler : : AbortIfNotString ( Register object ) { <nl> void MacroAssembler : : AbortIfNotRootValue ( Register src , <nl> Heap : : RootListIndex root_value_index , <nl> const char * message ) { <nl> - ASSERT ( ! src . is ( ip ) ) ; <nl> - LoadRoot ( ip , root_value_index ) ; <nl> - cmp ( src , ip ) ; <nl> + CompareRoot ( src , root_value_index ) ; <nl> Assert ( eq , message ) ; <nl> } <nl> <nl> mmm a / src / arm / macro - assembler - arm . h <nl> ppp b / src / arm / macro - assembler - arm . h <nl> class MacroAssembler : public Assembler { <nl> bool is_heap_object ) ; <nl> <nl> <nl> + / / Compare the object in a register to a value from the root list . <nl> + / / Uses the ip register as scratch . <nl> + void CompareRoot ( Register obj , Heap : : RootListIndex index ) ; <nl> + <nl> + <nl> / / Load and check the instance type of an object for being a string . <nl> / / Loads the type into the second argument register . <nl> / / Returns a condition that will be enabled if the object was a string . <nl>
Add new ARM macro assembler function CompareRoot left out of previous commit .
v8/v8
5a40de9b21a123008bd538008ab4a6bfd2d7f4e3
2011-03-03T12:21:37Z
mmm a / dbms / src / Databases / DatabaseLazy . cpp <nl> ppp b / dbms / src / Databases / DatabaseLazy . cpp <nl> void DatabaseLazy : : createTable ( <nl> if ( ! endsWith ( table - > getName ( ) , " Log " ) ) <nl> throw Exception ( " Lazy engine can be used only with * Log tables . " , ErrorCodes : : UNSUPPORTED_METHOD ) ; <nl> DatabaseOnDisk : : createTable ( * this , context , table_name , table , query ) ; <nl> + <nl> + / / / DatabaseOnDisk : : createTable renames file , so we need to get new metadata_modification_time . <nl> + std : : lock_guard lock ( tables_mutex ) ; <nl> + auto it = tables_cache . find ( table_name ) ; <nl> + if ( it ! = tables_cache . end ( ) ) <nl> + it - > second . metadata_modification_time = DatabaseOnDisk : : getTableMetadataModificationTime ( * this , table_name ) ; <nl> } <nl> <nl> <nl> void DatabaseLazy : : attachTable ( const String & table_name , const StoragePtr & tab <nl> <nl> auto [ it , inserted ] = tables_cache . emplace ( std : : piecewise_construct , <nl> std : : forward_as_tuple ( table_name ) , <nl> - std : : forward_as_tuple ( table , <nl> - current_time , <nl> - DatabaseOnDisk : : getTableMetadataModificationTime ( * this , table_name ) ) ) ; <nl> + std : : forward_as_tuple ( table , current_time , DatabaseOnDisk : : getTableMetadataModificationTime ( * this , table_name ) ) ) ; <nl> if ( ! inserted ) <nl> throw Exception ( " Table " + getDatabaseName ( ) + " . " + table_name + " already exists . " , ErrorCodes : : TABLE_ALREADY_EXISTS ) ; <nl> <nl> mmm a / dbms / src / Storages / System / StorageSystemTables . cpp <nl> ppp b / dbms / src / Storages / System / StorageSystemTables . cpp <nl> static bool needLockStructure ( const DatabasePtr & database , const Block & header ) <nl> if ( database - > getEngineName ( ) ! = " Lazy " ) <nl> return true ; <nl> <nl> - static std : : set < std : : string > columns_without_lock = { " database " , " name " , " metadata_modification_time " } ; <nl> + static const std : : set < std : : string > columns_without_lock = { " database " , " name " , " metadata_modification_time " } ; <nl> for ( const auto & column : header . getColumnsWithTypeAndName ( ) ) { <nl> - if ( columns_without_lock . find ( column . name ) ! = columns_without_lock . end ( ) ) { <nl> + if ( columns_without_lock . find ( column . name ) = = columns_without_lock . end ( ) ) { <nl> return true ; <nl> } <nl> } <nl> mmm a / dbms / tests / queries / 0_stateless / 01014_lazy_database_basic . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 01014_lazy_database_basic . reference <nl> <nl> - testlazy log Log 0 [ ' / var / lib / clickhouse / data / testlazy / log / ' ] / var / lib / clickhouse / metadata / testlazy / log . sql 0000 - 00 - 00 00 : 00 : 00 [ ] [ ] CREATE TABLE testlazy . log ( ` a ` UInt64 , ` b ` UInt64 ) ENGINE = Log Log <nl> - testlazy slog StripeLog 0 [ ' / var / lib / clickhouse / data / testlazy / slog / ' ] / var / lib / clickhouse / metadata / testlazy / slog . sql 0000 - 00 - 00 00 : 00 : 00 [ ] [ ] CREATE TABLE testlazy . slog ( ` a ` UInt64 , ` b ` UInt64 ) ENGINE = StripeLog StripeLog <nl> - testlazy tlog TinyLog 0 [ ' / var / lib / clickhouse / data / testlazy / tlog / ' ] / var / lib / clickhouse / metadata / testlazy / tlog . sql 0000 - 00 - 00 00 : 00 : 00 [ ] [ ] CREATE TABLE testlazy . tlog ( ` a ` UInt64 , ` b ` UInt64 ) ENGINE = TinyLog TinyLog <nl> - testlazy log 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy slog 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy tlog 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy log2 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy slog 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy tlog 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy log2 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy slog 0000 - 00 - 00 00 : 00 : 00 <nl> - testlazy tlog 0000 - 00 - 00 00 : 00 : 00 <nl> + testlazy log CREATE TABLE testlazy . log ( ` a ` UInt64 , ` b ` UInt64 ) ENGINE = Log <nl> + testlazy slog CREATE TABLE testlazy . slog ( ` a ` UInt64 , ` b ` UInt64 ) ENGINE = StripeLog <nl> + testlazy tlog CREATE TABLE testlazy . tlog ( ` a ` UInt64 , ` b ` UInt64 ) ENGINE = TinyLog <nl> + testlazy log <nl> + testlazy slog <nl> + testlazy tlog <nl> + testlazy log2 <nl> + testlazy slog <nl> + testlazy tlog <nl> + testlazy log2 <nl> + testlazy slog <nl> + testlazy tlog <nl> 1 1 <nl> 2 2 <nl> 3 3 <nl> mmm a / dbms / tests / queries / 0_stateless / 01014_lazy_database_basic . sh <nl> ppp b / dbms / tests / queries / 0_stateless / 01014_lazy_database_basic . sh <nl> $ { CLICKHOUSE_CLIENT } - n - q " <nl> sleep 1 . 5 <nl> <nl> $ { CLICKHOUSE_CLIENT } - q " <nl> - SELECT * FROM system . tables WHERE database = ' testlazy ' ; <nl> + SELECT database , name , create_table_query FROM system . tables WHERE database = ' testlazy ' ; <nl> " <nl> <nl> sleep 1 . 5 <nl> <nl> $ { CLICKHOUSE_CLIENT } - q " <nl> - SELECT database , name , metadata_modification_time FROM system . tables WHERE database = ' testlazy ' ; <nl> + SELECT database , name FROM system . tables WHERE database = ' testlazy ' ; <nl> " <nl> <nl> sleep 1 . 5 <nl> sleep 1 . 5 <nl> $ { CLICKHOUSE_CLIENT } - n - q " <nl> SELECT * FROM testlazy . log LIMIT 0 ; - - drop testlazy . log from cache <nl> RENAME TABLE testlazy . log TO testlazy . log2 ; <nl> - SELECT database , name , metadata_modification_time FROM system . tables WHERE database = ' testlazy ' ; <nl> + SELECT database , name FROM system . tables WHERE database = ' testlazy ' ; <nl> " <nl> <nl> sleep 1 . 5 <nl> <nl> $ { CLICKHOUSE_CLIENT } - q " <nl> - SELECT database , name , metadata_modification_time FROM system . tables WHERE database = ' testlazy ' ; <nl> + SELECT database , name FROM system . tables WHERE database = ' testlazy ' ; <nl> " <nl> <nl> sleep 1 . 5 <nl>
fix metadata time
ClickHouse/ClickHouse
d6490892a7b81c9c0b105e8e1c8e33024ce16e23
2019-10-03T14:18:17Z
new file mode 100644 <nl> index 000000000000 . . e3f14b666436 <nl> mmm / dev / null <nl> ppp b / include / swift / ABI / ValueWitness . def <nl> <nl> + / / = = = mmm ValueWitness . def - Value witness x - macros mmmmmmmmmmmm - - * - C + + - * - = = = / / <nl> + / / <nl> + / / This source file is part of the Swift . org open source project <nl> + / / <nl> + / / Copyright ( c ) 2014 - 2017 Apple Inc . and the Swift project authors <nl> + / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> + / / <nl> + / / See https : / / swift . org / LICENSE . txt for license information <nl> + / / See https : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + / / <nl> + / / X - macro definition file for value witness tables . <nl> + / / <nl> + / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> + <nl> + / / This file is " parameterized " in the sense that exactly one of the <nl> + / / following macros * must * be defined : <nl> + <nl> + / / / WANT_ALL_VALUE_WITNESSES <nl> + / / / Define this to expand all value witnesses , not just the ones from <nl> + / / / a specific category . <nl> + # if defined ( WANT_ALL_VALUE_WITNESSES ) <nl> + # undef WANT_ALL_VALUE_WITNESSES <nl> + # define WANT_REQUIRED_VALUE_WITNESSES 1 <nl> + # define WANT_EXTRA_INHABITANT_VALUE_WITNESSES 1 <nl> + # define WANT_ENUM_VALUE_WITNESSES 1 <nl> + <nl> + / / / WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + / / / Define this to expand only the required value witnesses . <nl> + # elif defined ( WANT_ONLY_REQUIRED_VALUE_WITNESSES ) <nl> + # undef WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define WANT_REQUIRED_VALUE_WITNESSES 1 <nl> + # define WANT_EXTRA_INHABITANT_VALUE_WITNESSES 0 <nl> + # define WANT_ENUM_VALUE_WITNESSES 0 <nl> + <nl> + / / / WANT_ONLY_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + / / / Define this to expand only the extra - inhabitant value witnesses . <nl> + # elif defined ( WANT_ONLY_EXTRA_INHABITANT_VALUE_WITNESSES ) <nl> + # undef WANT_ONLY_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + # define WANT_REQUIRED_VALUE_WITNESSES 0 <nl> + # define WANT_EXTRA_INHABITANT_VALUE_WITNESSES 1 <nl> + # define WANT_ENUM_VALUE_WITNESSES 0 <nl> + <nl> + / / / WANT_ONLY_ENUM_VALUE_WITNESSES <nl> + / / / Define this to expand only the enum value witnesses . <nl> + # elif defined ( WANT_ONLY_ENUM_VALUE_WITNESSES ) <nl> + # undef WANT_ONLY_ENUM_VALUE_WITNESSES <nl> + # define WANT_REQUIRED_VALUE_WITNESSES 0 <nl> + # define WANT_EXTRA_INHABITANT_VALUE_WITNESSES 0 <nl> + # define WANT_ENUM_VALUE_WITNESSES 1 <nl> + <nl> + / / / WANT_REQUIRED_VALUE_WITNESSES <nl> + / / / WANT_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + / / / WANT_ENUM_VALUE_WITNESSES <nl> + / / / Define all of these to control exactly what to expand . <nl> + # else <nl> + # if ! defined ( WANT_REQUIRED_VALUE_WITNESSES ) | | ! defined ( WANT_EXTRA_INHABITANT_VALUE_WITNESSES ) | | ! defined ( WANT_ENUM_VALUE_WITNESSES ) <nl> + # error failed to define a WANT macro ; possible typo ? <nl> + # endif <nl> + # endif <nl> + <nl> + / / / VALUE_WITNESS ( lowerId , upperId ) <nl> + / / / A fallback called for value witnesses if either of DATA_VALUE_WITNESS or <nl> + / / / FUNCTION_VALUE_WITNESS is not defined . <nl> + <nl> + / / / FUNCTION_VALUE_WITNESS ( lowerId , upperId , returnType , paramTypeList ) <nl> + / / / A function value witness . Types will be defined in terms of the <nl> + / / / following macros : <nl> + / / / MUTABLE_VALUE_TYPE - a pointer to a mutable opaque value <nl> + / / / IMMUTABLE_VALUE_TYPE - a pointer to an immutable opaque value <nl> + / / / MUTABLE_BUFFER_TYPE - a pointer to a fixed - size value buffer <nl> + / / / IMMUTABLE_BUFFER_TYPE - a pointer to an immutable fixed - size buffer <nl> + / / / TYPE_TYPE - a pointer to type metadata <nl> + / / / SIZE_TYPE - size_t <nl> + / / / INT_TYPE - int <nl> + / / / VOID_TYPE - void <nl> + / / / Defaults to VALUE_WITNESS . <nl> + / / / FIXME : The ' copy ' witnesses should be using immutable types but aren ' t . <nl> + # ifndef FUNCTION_VALUE_WITNESS <nl> + # define FUNCTION_VALUE_WITNESS ( lowerId , upperId , returnType , paramTypes ) \ <nl> + VALUE_WITNESS ( lowerId , upperId ) <nl> + # endif <nl> + <nl> + / / / DATA_VALUE_WITNESS ( lowerId , upperId , type ) <nl> + / / / A non - function value witness . Types are specified as for <nl> + / / / FUNCTION_VALUE_WITNESS <nl> + / / / Defaults to VALUE_WITNESS . <nl> + # ifndef DATA_VALUE_WITNESS <nl> + # define DATA_VALUE_WITNESS ( lowerId , upperId , type ) \ <nl> + VALUE_WITNESS ( lowerId , upperId ) <nl> + # endif <nl> + <nl> + / / / Begin a range of value witnesses . This will be expanded immediately <nl> + / / / after the first value in the range , whose ID will be upperId . <nl> + / / / Range expansions do not interact well with the use of WANT_ONLY_ * . <nl> + # ifndef BEGIN_VALUE_WITNESS_RANGE <nl> + # define BEGIN_VALUE_WITNESS_RANGE ( rangeId , upperId ) <nl> + # endif <nl> + <nl> + / / / End a range of value witnesses . This will be expanded immediately <nl> + / / / after the last value in the range , whose ID will be upperId . <nl> + / / / Range expansions do not interact well with the use of WANT_ONLY_ * . <nl> + # ifndef END_VALUE_WITNESS_RANGE <nl> + # define END_VALUE_WITNESS_RANGE ( rangeId , upperId ) <nl> + # endif <nl> + <nl> + # if WANT_REQUIRED_VALUE_WITNESSES <nl> + <nl> + / / / T * ( * initializeBufferWithCopyOfBuffer ) ( B * dest , B * src , M * self ) ; <nl> + / / / Given an invalid buffer , initialize it as a copy of the <nl> + / / / object in the source buffer . This can be decomposed as : <nl> + / / / initializeWithCopy ( self - > allocateBuffer ( dest , self ) , self - > projectBuffer ( src ) , self ) <nl> + FUNCTION_VALUE_WITNESS ( initializeBufferWithCopyOfBuffer , <nl> + InitializeBufferWithCopyOfBuffer , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_BUFFER_TYPE , MUTABLE_BUFFER_TYPE , TYPE_TYPE ) ) <nl> + <nl> + BEGIN_VALUE_WITNESS_RANGE ( ValueWitness , <nl> + InitializeBufferWithCopyOfBuffer ) <nl> + BEGIN_VALUE_WITNESS_RANGE ( RequiredValueWitness , <nl> + InitializeBufferWithCopyOfBuffer ) <nl> + BEGIN_VALUE_WITNESS_RANGE ( RequiredValueWitnessFunction , <nl> + InitializeBufferWithCopyOfBuffer ) <nl> + <nl> + / / / void ( * destroy ) ( T * object , witness_t * self ) ; <nl> + / / / <nl> + / / / Given a valid object of this type , destroy it , leaving it as an <nl> + / / / invalid object . This is useful when generically destroying <nl> + / / / an object which has been allocated in - line , such as an array , <nl> + / / / struct , or tuple element . <nl> + FUNCTION_VALUE_WITNESS ( destroy , <nl> + Destroy , <nl> + VOID_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * initializeWithCopy ) ( T * dest , T * src , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid object of this type , initialize it as a copy of <nl> + / / / the source object . Returns the dest object . <nl> + FUNCTION_VALUE_WITNESS ( initializeWithCopy , <nl> + InitializeWithCopy , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * assignWithCopy ) ( T * dest , T * src , M * self ) ; <nl> + / / / <nl> + / / / Given a valid object of this type , change it to be a copy of the <nl> + / / / source object . Returns the dest object . <nl> + FUNCTION_VALUE_WITNESS ( assignWithCopy , <nl> + AssignWithCopy , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * initializeWithTake ) ( T * dest , T * src , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid object of this type , initialize it by taking <nl> + / / / the value of the source object . The source object becomes <nl> + / / / invalid . Returns the dest object . <nl> + FUNCTION_VALUE_WITNESS ( initializeWithTake , <nl> + InitializeWithTake , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * assignWithTake ) ( T * dest , T * src , M * self ) ; <nl> + / / / <nl> + / / / Given a valid object of this type , change it to be a copy of the <nl> + / / / source object . The source object becomes invalid . Returns the <nl> + / / / dest object . <nl> + FUNCTION_VALUE_WITNESS ( assignWithTake , <nl> + AssignWithTake , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * initializeBufferWithTakeOfBuffer ) ( B * dest , B * src , M * self ) ; <nl> + / / / Given an invalid buffer , initialize it by taking the value out of <nl> + / / / the source buffer . This can be ( inefficiently ) decomposed as : <nl> + / / / initializeWithTake ( self - > allocateBuffer ( dest , self ) , self - > projectBuffer ( src ) , self ) <nl> + / / / deallocateBuffer ( src , self ) <nl> + FUNCTION_VALUE_WITNESS ( initializeBufferWithTakeOfBuffer , <nl> + InitializeBufferWithTakeOfBuffer , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_BUFFER_TYPE , MUTABLE_BUFFER_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / void ( * destroyArray ) ( T * object , size_t n , witness_t * self ) ; <nl> + / / / <nl> + / / / Given a valid array of n objects of this type , destroy the object , leaving <nl> + / / / the array invalid . This is useful when generically destroying an array of <nl> + / / / objects to avoid calling the scalar ' destroy ' witness in a loop . <nl> + FUNCTION_VALUE_WITNESS ( destroyArray , <nl> + DestroyArray , <nl> + VOID_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , SIZE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * initializeArrayWithCopy ) ( T * dest , T * src , size_t n , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid array of n objects of this type , initialize the objects <nl> + / / / as a copy of the source array . Returns the dest array . <nl> + FUNCTION_VALUE_WITNESS ( initializeArrayWithCopy , <nl> + InitializeArrayWithCopy , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , <nl> + SIZE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * initializeArrayWithTakeFrontToBack ) ( T * dest , T * src , size_t n , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid array of n objects of this type , initialize the objects <nl> + / / / by taking them from the source array in front - to - back order . <nl> + / / / The source array becomes invalid . <nl> + / / / Returns the dest array . <nl> + FUNCTION_VALUE_WITNESS ( initializeArrayWithTakeFrontToBack , <nl> + InitializeArrayWithTakeFrontToBack , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , <nl> + SIZE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / T * ( * initializeArrayWithTakeBackToFront ) ( T * dest , T * src , size_t n , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid array of n objects of this type , initialize the objects <nl> + / / / by taking them from the source array in back - to - front order . <nl> + / / / The source array becomes invalid . <nl> + / / / Returns the dest array . <nl> + FUNCTION_VALUE_WITNESS ( initializeArrayWithTakeBackToFront , <nl> + InitializeArrayWithTakeBackToFront , <nl> + MUTABLE_VALUE_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , MUTABLE_VALUE_TYPE , <nl> + SIZE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( RequiredValueWitnessFunction , <nl> + InitializeArrayWithTakeBackToFront ) <nl> + <nl> + / / / size_t size ; <nl> + / / / <nl> + / / / The required storage size of a single object of this type . <nl> + DATA_VALUE_WITNESS ( size , <nl> + Size , <nl> + SIZE_TYPE ) <nl> + <nl> + BEGIN_VALUE_WITNESS_RANGE ( TypeLayoutWitness , <nl> + Size ) <nl> + <nl> + BEGIN_VALUE_WITNESS_RANGE ( RequiredTypeLayoutWitness , <nl> + Size ) <nl> + <nl> + / / / size_t flags ; <nl> + / / / <nl> + / / / The ValueWitnessAlignmentMask bits represent the required <nl> + / / / alignment of the first byte of an object of this type , expressed <nl> + / / / as a mask of the low bits that must not be set in the pointer . <nl> + / / / This representation can be easily converted to the ' alignof ' <nl> + / / / result by merely adding 1 , but it is more directly useful for <nl> + / / / performing dynamic structure layouts , and it grants an <nl> + / / / additional bit of precision in a compact field without needing <nl> + / / / to switch to an exponent representation . <nl> + / / / <nl> + / / / The ValueWitnessIsNonPOD bit is set if the type is not POD . <nl> + / / / <nl> + / / / The ValueWitnessIsNonInline bit is set if the type cannot be <nl> + / / / represented in a fixed - size buffer . <nl> + / / / <nl> + / / / The Enum_HasExtraInhabitants bit is set if the type ' s binary <nl> + / / / representation has " extra inhabitants " that do not form valid values of <nl> + / / / the type , and the value witness table contains the ExtraInhabitantWitness <nl> + / / / entries . <nl> + / / / <nl> + / / / The Enum_HasSpareBits bit is set if the type ' s binary representation <nl> + / / / has unused bits . <nl> + / / / <nl> + / / / The HasEnumWitnesses bit is set if the type is an enum type . <nl> + DATA_VALUE_WITNESS ( flags , <nl> + Flags , <nl> + SIZE_TYPE ) <nl> + <nl> + / / / size_t stride ; <nl> + / / / <nl> + / / / The required size per element of an array of this type . It is at least <nl> + / / / one , even for zero - sized types , like the empty tuple . <nl> + DATA_VALUE_WITNESS ( stride , <nl> + Stride , <nl> + SIZE_TYPE ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( RequiredTypeLayoutWitness , <nl> + Stride ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( RequiredValueWitness , <nl> + Stride ) <nl> + <nl> + # endif / * WANT_REQUIRED_VALUE_WITNESSES * / <nl> + <nl> + # if WANT_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + <nl> + / / The following value witnesses are conditionally present based on <nl> + / / the Enum_HasExtraInhabitants bit of the flags . <nl> + <nl> + / / / size_t extraInhabitantFlags ; <nl> + / / / <nl> + / / / These bits are always present if the extra inhabitants witnesses are : <nl> + / / / <nl> + / / / - The NumExtraInhabitantsMask bits contain the number of extra <nl> + / / / inhabitants of the type representation . <nl> + / / / <nl> + / / / If the Enum_HasSpareBits flag is set in the value witness flags , these <nl> + / / / additional flags are available : <nl> + / / / <nl> + / / / - The NumSpareBitsMask bits contain the number of ( host - endian ) contiguous <nl> + / / / spare bits in the type representation . <nl> + / / / - The SpareBitsShiftMask bits contain the ( host - endian ) bit offset of the <nl> + / / / lowest spare bit . <nl> + DATA_VALUE_WITNESS ( extraInhabitantFlags , <nl> + ExtraInhabitantFlags , <nl> + SIZE_TYPE ) <nl> + <nl> + BEGIN_VALUE_WITNESS_RANGE ( ExtraInhabitantValueWitness , <nl> + ExtraInhabitantFlags ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( TypeLayoutWitness , <nl> + ExtraInhabitantFlags ) <nl> + <nl> + / / / void ( * storeExtraInhabitant ) ( T * obj , int index , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid object of this type , store the representation of an <nl> + / / / extra inhabitant of the type . The object will remain invalid , because <nl> + / / / an extra inhabitant is by definition an invalid representation of the <nl> + / / / type . index must be less than numExtraInhabitants . <nl> + FUNCTION_VALUE_WITNESS ( storeExtraInhabitant , <nl> + StoreExtraInhabitant , <nl> + VOID_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , INT_TYPE , TYPE_TYPE ) ) <nl> + <nl> + BEGIN_VALUE_WITNESS_RANGE ( ExtraInhabitantValueWitnessFunction , <nl> + StoreExtraInhabitant ) <nl> + <nl> + / / / int ( * getExtraInhabitantIndex ) ( T * obj , M * self ) ; <nl> + / / / <nl> + / / / Given an invalid object of this type with an extra inhabitant <nl> + / / / representation , returns the index of the extra inhabitant representation . <nl> + / / / Returns - 1 if the object is a valid value of the type . If non - negative , <nl> + / / / the return value is the same index that can be passed to <nl> + / / / storeExtraInhabitant to reproduce the representation . <nl> + FUNCTION_VALUE_WITNESS ( getExtraInhabitantIndex , <nl> + GetExtraInhabitantIndex , <nl> + INT_TYPE , <nl> + ( IMMUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( ExtraInhabitantValueWitnessFunction , <nl> + GetExtraInhabitantIndex ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( ExtraInhabitantValueWitness , <nl> + GetExtraInhabitantIndex ) <nl> + <nl> + # endif / * WANT_EXTRA_INHABITANT_VALUE_WITNESSES * / <nl> + <nl> + # if WANT_ENUM_VALUE_WITNESSES <nl> + <nl> + / / The following value witnesses are conditionally present if the witnessed <nl> + / / type is an enum . <nl> + <nl> + / / / int ( * getEnumTag ) ( T * obj , M * self ) ; <nl> + / / / <nl> + / / / Given a valid object of this enum type , extracts the tag value indicating <nl> + / / / which case of the enum is inhabited . Returned values are in the range <nl> + / / / [ - ElementsWithPayload . . ElementsWithNoPayload - 1 ] . <nl> + FUNCTION_VALUE_WITNESS ( getEnumTag , <nl> + GetEnumTag , <nl> + INT_TYPE , <nl> + ( IMMUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + BEGIN_VALUE_WITNESS_RANGE ( EnumValueWitness , <nl> + GetEnumTag ) <nl> + <nl> + / / / void ( * destructiveProjectEnumData ) ( T * obj , M * self ) ; <nl> + / / / Given a valid object of this enum type , destructively extracts the <nl> + / / / associated payload . <nl> + FUNCTION_VALUE_WITNESS ( destructiveProjectEnumData , <nl> + DestructiveProjectEnumData , <nl> + VOID_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , TYPE_TYPE ) ) <nl> + <nl> + / / / void ( * destructiveInjectEnumTag ) ( T * obj , int tag , M * self ) ; <nl> + / / / Given an enum case tag and a valid object of case ' s payload type , <nl> + / / / destructively inserts the tag into the payload . The given tag value <nl> + / / / must be in the range [ - ElementsWithPayload . . ElementsWithNoPayload - 1 ] . <nl> + FUNCTION_VALUE_WITNESS ( destructiveInjectEnumTag , <nl> + DestructiveInjectEnumTag , <nl> + VOID_TYPE , <nl> + ( MUTABLE_VALUE_TYPE , INT_TYPE , TYPE_TYPE ) ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( EnumValueWitness , <nl> + DestructiveInjectEnumTag ) <nl> + <nl> + END_VALUE_WITNESS_RANGE ( ValueWitness , <nl> + DestructiveInjectEnumTag ) <nl> + <nl> + # endif / * WANT_ENUM_VALUE_WITNESSES * / <nl> + <nl> + # undef MUTABLE_VALUE_TYPE <nl> + # undef IMMUTABLE_VALUE_TYPE <nl> + # undef MUTABLE_BUFFER_TYPE <nl> + # undef IMMUTABLE_BUFFER_TYPE <nl> + # undef TYPE_TYPE <nl> + # undef SIZE_TYPE <nl> + # undef INT_TYPE <nl> + # undef VOID_TYPE <nl> + <nl> + # undef END_VALUE_WITNESS_RANGE <nl> + # undef BEGIN_VALUE_WITNESS_RANGE <nl> + # undef DATA_VALUE_WITNESS <nl> + # undef FUNCTION_VALUE_WITNESS <nl> + # undef VALUE_WITNESS <nl> + # undef ENUM_VALUE_WITNESS <nl> + # undef EXTRA_INHABITANT_VALUE_WITNESS <nl> + # undef NON_REQUIRED_VALUE_WITNESS <nl> + # undef REQUIRED_VALUE_WITNESS <nl> + # undef WANT_ENUM_VALUE_WITNESSES <nl> + # undef WANT_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + # undef WANT_REQUIRED_VALUE_WITNESSES <nl> mmm a / include / swift / IRGen / ValueWitness . h <nl> ppp b / include / swift / IRGen / ValueWitness . h <nl> namespace irgen { <nl> / / / This leaves us with 12 data operations , to which we add the <nl> / / / meta - operation ' sizeAndAlign ' for a total of 13 . <nl> enum class ValueWitness : unsigned { <nl> - / / Candidates that are likely to see use in existential code are then grouped <nl> - / / together for cache - locality reasons . <nl> - <nl> - / / / T * ( * initializeBufferWithCopyOfBuffer ) ( B * dest , B * src , M * self ) ; <nl> - / / / Given an invalid buffer , initialize it as a copy of the <nl> - / / / object in the source buffer . This can be decomposed as : <nl> - / / / initializeWithCopy ( self - > allocateBuffer ( dest , self ) , self - > projectBuffer ( src ) , self ) <nl> - InitializeBufferWithCopyOfBuffer , <nl> - <nl> - / / / void ( * destroy ) ( T * object , witness_t * self ) ; <nl> - / / / <nl> - / / / Given a valid object of this type , destroy it , leaving it as an <nl> - / / / invalid object . This is useful when generically destroying <nl> - / / / an object which has been allocated in - line , such as an array , <nl> - / / / struct , or tuple element . <nl> - Destroy , <nl> - <nl> - / / / T * ( * initializeWithCopy ) ( T * dest , T * src , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid object of this type , initialize it as a copy of <nl> - / / / the source object . Returns the dest object . <nl> - InitializeWithCopy , <nl> - <nl> - / / / T * ( * assignWithCopy ) ( T * dest , T * src , M * self ) ; <nl> - / / / <nl> - / / / Given a valid object of this type , change it to be a copy of the <nl> - / / / source object . Returns the dest object . <nl> - AssignWithCopy , <nl> - <nl> - / / / T * ( * initializeWithTake ) ( T * dest , T * src , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid object of this type , initialize it by taking <nl> - / / / the value of the source object . The source object becomes <nl> - / / / invalid . Returns the dest object . <nl> - InitializeWithTake , <nl> - <nl> - / / / T * ( * assignWithTake ) ( T * dest , T * src , M * self ) ; <nl> - / / / <nl> - / / / Given a valid object of this type , change it to be a copy of the <nl> - / / / source object . The source object becomes invalid . Returns the <nl> - / / / dest object . <nl> - AssignWithTake , <nl> - <nl> - / / / T * ( * initializeBufferWithTakeOfBuffer ) ( B * dest , B * src , M * self ) ; <nl> - / / / Given an invalid buffer , initialize it by taking the value out of <nl> - / / / the source buffer . This can be ( inefficiently ) decomposed as : <nl> - / / / initializeWithTake ( self - > allocateBuffer ( dest , self ) , self - > projectBuffer ( src ) , self ) <nl> - / / / deallocateBuffer ( src , self ) <nl> - InitializeBufferWithTakeOfBuffer , <nl> - <nl> - / / / void ( * destroyArray ) ( T * object , size_t n , witness_t * self ) ; <nl> - / / / <nl> - / / / Given a valid array of n objects of this type , destroy the object , leaving <nl> - / / / the array invalid . This is useful when generically destroying an array of <nl> - / / / objects to avoid calling the scalar ' destroy ' witness in a loop . <nl> - DestroyArray , <nl> - <nl> - / / / T * ( * initializeArrayWithCopy ) ( T * dest , T * src , size_t n , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid array of n objects of this type , initialize the objects <nl> - / / / as a copy of the source array . Returns the dest array . <nl> - InitializeArrayWithCopy , <nl> - <nl> - / / / T * ( * initializeArrayWithTakeFrontToBack ) ( T * dest , T * src , size_t n , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid array of n objects of this type , initialize the objects <nl> - / / / by taking them from the source array in front - to - back order . <nl> - / / / The source array becomes invalid . <nl> - / / / Returns the dest array . <nl> - InitializeArrayWithTakeFrontToBack , <nl> - <nl> - / / / T * ( * initializeArrayWithTakeBackToFront ) ( T * dest , T * src , size_t n , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid array of n objects of this type , initialize the objects <nl> - / / / by taking them from the source array in back - to - front order . <nl> - / / / The source array becomes invalid . <nl> - / / / Returns the dest array . <nl> - InitializeArrayWithTakeBackToFront , <nl> - <nl> - Last_RequiredValueWitnessFunction = InitializeArrayWithTakeBackToFront , <nl> - <nl> - / / / The offset at which type layout witnesses begin . <nl> - First_TypeLayoutWitness , <nl> - <nl> - / / / size_t size ; <nl> - / / / <nl> - / / / The required storage size of a single object of this type . <nl> - Size = First_TypeLayoutWitness , <nl> - <nl> - / / / size_t flags ; <nl> - / / / <nl> - / / / The ValueWitnessAlignmentMask bits represent the required <nl> - / / / alignment of the first byte of an object of this type , expressed <nl> - / / / as a mask of the low bits that must not be set in the pointer . <nl> - / / / This representation can be easily converted to the ' alignof ' <nl> - / / / result by merely adding 1 , but it is more directly useful for <nl> - / / / performing dynamic structure layouts , and it grants an <nl> - / / / additional bit of precision in a compact field without needing <nl> - / / / to switch to an exponent representation . <nl> - / / / <nl> - / / / The ValueWitnessIsNonPOD bit is set if the type is not POD . <nl> - / / / <nl> - / / / The ValueWitnessIsNonInline bit is set if the type cannot be <nl> - / / / represented in a fixed - size buffer . <nl> - / / / <nl> - / / / The Enum_HasExtraInhabitants bit is set if the type ' s binary <nl> - / / / representation has " extra inhabitants " that do not form valid values of <nl> - / / / the type , and the value witness table contains the ExtraInhabitantWitness <nl> - / / / entries . <nl> - / / / <nl> - / / / The Enum_HasSpareBits bit is set if the type ' s binary representation <nl> - / / / has unused bits . <nl> - / / / <nl> - / / / The HasEnumWitnesses bit is set if the type is an enum type . <nl> - Flags , <nl> - <nl> - / / / size_t stride ; <nl> - / / / <nl> - / / / The required size per element of an array of this type . It is at least <nl> - / / / one , even for zero - sized types , like the empty tuple . <nl> - Stride , <nl> - <nl> - Last_RequiredValueWitness = Stride , <nl> - Last_RequiredTypeLayoutWitness = Last_RequiredValueWitness , <nl> - <nl> - / / / The following value witnesses are conditionally present based on <nl> - / / / the Enum_HasExtraInhabitants bit of the flags . <nl> - First_ExtraInhabitantValueWitness , <nl> - <nl> - / / / size_t extraInhabitantFlags ; <nl> - / / / <nl> - / / / These bits are always present if the extra inhabitants witnesses are : <nl> - / / / <nl> - / / / - The NumExtraInhabitantsMask bits contain the number of extra <nl> - / / / inhabitants of the type representation . <nl> - / / / <nl> - / / / If the Enum_HasSpareBits flag is set in the value witness flags , these <nl> - / / / additional flags are available : <nl> - / / / <nl> - / / / - The NumSpareBitsMask bits contain the number of ( host - endian ) contiguous <nl> - / / / spare bits in the type representation . <nl> - / / / - The SpareBitsShiftMask bits contain the ( host - endian ) bit offset of the <nl> - / / / lowest spare bit . <nl> - ExtraInhabitantFlags = First_ExtraInhabitantValueWitness , <nl> - <nl> - Last_TypeLayoutWitness = ExtraInhabitantFlags , <nl> - <nl> - First_ExtraInhabitantValueWitnessFunction , <nl> - <nl> - / / / void ( * storeExtraInhabitant ) ( T * obj , unsigned index , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid object of this type , store the representation of an <nl> - / / / extra inhabitant of the type . The object will remain invalid , because <nl> - / / / an extra inhabitant is by definition an invalid representation of the <nl> - / / / type . index must be less than numExtraInhabitants . <nl> - StoreExtraInhabitant = First_ExtraInhabitantValueWitnessFunction , <nl> - <nl> - / / / int ( * getExtraInhabitantIndex ) ( T * obj , M * self ) ; <nl> - / / / <nl> - / / / Given an invalid object of this type with an extra inhabitant <nl> - / / / representation , returns the index of the extra inhabitant representation . <nl> - / / / Returns - 1 if the object is a valid value of the type . If non - negative , <nl> - / / / the return value is the same index that can be passed to <nl> - / / / storeExtraInhabitant to reproduce the representation . <nl> - GetExtraInhabitantIndex , <nl> - <nl> - Last_ExtraInhabitantValueWitnessFunction = GetExtraInhabitantIndex , <nl> - Last_ExtraInhabitantValueWitness = Last_ExtraInhabitantValueWitnessFunction , <nl> - <nl> - / / / The following value witnesses are conditionally present if the witnessed <nl> - / / / type is an enum . <nl> - First_EnumValueWitness , <nl> - <nl> - / / / int ( * getEnumTag ) ( T * obj , M * self ) ; <nl> - / / / Given a valid object of this enum type , extracts the tag value indicating <nl> - / / / which case of the enum is inhabited . Returned values are in the range <nl> - / / / [ - ElementsWithPayload . . ElementsWithNoPayload - 1 ] . <nl> - GetEnumTag = First_EnumValueWitness , <nl> - / / / void ( * destructiveProjectEnumData ) ( T * obj , M * self ) ; <nl> - / / / Given a valid object of this enum type , destructively extracts the <nl> - / / / associated payload . <nl> - DestructiveProjectEnumData , <nl> - / / / void ( * destructiveInjectEnumTag ) ( T * obj , int tag , M * self ) ; <nl> - / / / Given an enum case tag and a valid object of case ' s payload type , <nl> - / / / destructively inserts the tag into the payload . The given tag value <nl> - / / / must be in the range [ - ElementsWithPayload . . ElementsWithNoPayload - 1 ] . <nl> - DestructiveInjectEnumTag , <nl> - <nl> - Last_EnumValueWitness = DestructiveInjectEnumTag , <nl> - <nl> - Last_ValueWitness = Last_EnumValueWitness , <nl> + # define WANT_ALL_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( lowerId , upperId ) upperId , <nl> + # define BEGIN_VALUE_WITNESS_RANGE ( rangeId , upperId ) First_ # # rangeId = upperId , <nl> + # define END_VALUE_WITNESS_RANGE ( rangeId , upperId ) Last_ # # rangeId = upperId , <nl> + # include " swift / ABI / ValueWitness . def " <nl> } ; <nl> <nl> / / The namespaces here are to force the enumerators to be scoped . We don ' t <nl> mmm a / include / swift / Runtime / Metadata . h <nl> ppp b / include / swift / Runtime / Metadata . h <nl> class ExtraInhabitantFlags { <nl> <nl> namespace value_witness_types { <nl> <nl> - <nl> - / / / Given an unallocated buffer , initialize it as a copy of the <nl> - / / / object in the source buffer . This can be decomposed as : <nl> - / / / <nl> - / / / self - > initializeBufferWithCopy ( dest , self - > projectBuffer ( src ) , self ) <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' aliasing . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an unallocated buffer <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized buffer <nl> - / / / Invariants : <nl> - / / / ' src ' is an initialized buffer <nl> - typedef OpaqueValue * initializeBufferWithCopyOfBuffer ( ValueBuffer * dest , <nl> - ValueBuffer * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an initialized object , destroy it . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' object ' is an initialized object <nl> - / / / Postconditions : <nl> - / / / ' object ' is an uninitialized object <nl> - typedef void destroy ( OpaqueValue * object , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an uninitialized object and an initialized object , copy <nl> - / / / the value . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' aliasing . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an uninitialized object <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized object <nl> - / / / Invariants : <nl> - / / / ' src ' is an initialized object <nl> - typedef OpaqueValue * initializeWithCopy ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given two initialized objects , copy the value from one to the <nl> - / / / other . <nl> - / / / <nl> - / / / This operation must be safe against ' dest ' and ' src ' aliasing . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Invariants : <nl> - / / / ' dest ' is an initialized object <nl> - / / / ' src ' is an initialized object <nl> - typedef OpaqueValue * assignWithCopy ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an uninitialized object and an initialized object , move <nl> - / / / the value from one to the other , leaving the source object <nl> - / / / uninitialized . <nl> - / / / <nl> - / / / There is no need for an initializeBufferWithTakeOfBuffer , because that <nl> - / / / can simply be a pointer - aligned memcpy of sizeof ( ValueBuffer ) <nl> - / / / bytes . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' aliasing . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an uninitialized object <nl> - / / / ' src ' is an initialized object <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized object <nl> - / / / ' src ' is an uninitialized object <nl> - typedef OpaqueValue * initializeWithTake ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an initialized object and an initialized object , move <nl> - / / / the value from one to the other , leaving the source object <nl> - / / / uninitialized . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' aliasing . <nl> - / / / Therefore this can be decomposed as : <nl> - / / / <nl> - / / / self - > destroy ( dest , self ) ; <nl> - / / / self - > initializeWithTake ( dest , src , self ) ; <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' src ' is an initialized object <nl> - / / / Postconditions : <nl> - / / / ' src ' is an uninitialized object <nl> - / / / Invariants : <nl> - / / / ' dest ' is an initialized object <nl> - typedef OpaqueValue * assignWithTake ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an unallocated buffer and an initialized buffer , move the <nl> - / / / value from one buffer to the other , leaving the source buffer <nl> - / / / unallocated . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' aliasing . <nl> - / / / Therefore this can be decomposed as : <nl> - / / / <nl> - / / / self - > initializeBufferWithTake ( dest , self - > projectBuffer ( src ) , self ) <nl> - / / / self - > deallocateBuffer ( src , self ) <nl> - / / / <nl> - / / / However , it may be more efficient because values stored out - of - line <nl> - / / / may be moved by simply moving the buffer . <nl> - / / / <nl> - / / / If the value is bitwise - takable or stored out of line , this is <nl> - / / / equivalent to a memcpy of the buffers . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an unallocated buffer <nl> - / / / ' src ' is an initialized buffer <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized buffer <nl> - / / / ' src ' is an unallocated buffer <nl> - typedef OpaqueValue * initializeBufferWithTakeOfBuffer ( ValueBuffer * dest , <nl> - ValueBuffer * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an initialized array of objects , destroy it . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' object ' is an initialized array of n objects <nl> - / / / Postconditions : <nl> - / / / ' object ' is an uninitialized array of n objects <nl> - typedef void destroyArray ( OpaqueValue * array , size_t n , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an uninitialized array and an initialized array , copy <nl> - / / / the value . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' aliasing . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an uninitialized array of n objects <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized array of n objects <nl> - / / / Invariants : <nl> - / / / ' src ' is an initialized array of n objects <nl> - typedef OpaqueValue * initializeArrayWithCopy ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - size_t n , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an uninitialized array and an initialized array , move <nl> - / / / the values from one to the other , leaving the source array <nl> - / / / uninitialized . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' fully <nl> - / / / overlapping . ' dest ' may partially overlap the head of ' src ' , because the <nl> - / / / values are taken as if in front - to - back order . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an uninitialized array of n objects <nl> - / / / ' src ' is an initialized array of n objects <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized array of n objects <nl> - / / / ' src ' is an uninitialized array of n objects <nl> - typedef OpaqueValue * initializeArrayWithTakeFrontToBack ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - size_t n , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given an uninitialized array and an initialized array , move <nl> - / / / the values from one to the other , leaving the source array <nl> - / / / uninitialized . <nl> - / / / <nl> - / / / This operation does not need to be safe against ' dest ' and ' src ' fully <nl> - / / / overlapping . ' dest ' may partially overlap the tail of ' src ' , because the <nl> - / / / values are taken as if in back - to - front order . <nl> - / / / <nl> - / / / Returns the dest object . <nl> - / / / <nl> - / / / Preconditions : <nl> - / / / ' dest ' is an uninitialized array of n objects <nl> - / / / ' src ' is an initialized array of n objects <nl> - / / / Postconditions : <nl> - / / / ' dest ' is an initialized array of n objects <nl> - / / / ' src ' is an uninitialized array of n objects <nl> - typedef OpaqueValue * initializeArrayWithTakeBackToFront ( OpaqueValue * dest , <nl> - OpaqueValue * src , <nl> - size_t n , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / The number of bytes required to store an object of this type . <nl> - / / / This value may be zero . This value is not necessarily a <nl> - / / / multiple of the alignment . <nl> - typedef size_t size ; <nl> - <nl> - / / / Flags which apply to the type here . <nl> - typedef ValueWitnessFlags flags ; <nl> - <nl> - / / / When allocating an array of objects of this type , the number of bytes <nl> - / / / between array elements . This value may be zero . This value is always <nl> - / / / a multiple of the alignment . <nl> - typedef size_t stride ; <nl> - <nl> - / / / Flags which describe extra inhabitants . <nl> - typedef ExtraInhabitantFlags extraInhabitantFlags ; <nl> - <nl> - / / / Store an extra inhabitant , named by a unique positive or zero index , <nl> - / / / into the given uninitialized storage for the type . <nl> - typedef void storeExtraInhabitant ( OpaqueValue * dest , <nl> - int index , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Get the extra inhabitant index for the bit pattern stored at the given <nl> - / / / address , or return - 1 if there is a valid value at the address . <nl> - typedef int getExtraInhabitantIndex ( const OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given a valid object of this enum type , extracts the tag value indicating <nl> - / / / which case of the enum is inhabited . Returned values are in the range <nl> - / / / [ - ElementsWithPayload . . ElementsWithNoPayload - 1 ] . <nl> - / / / <nl> - / / / The tag value can be used to index into the array returned by the <nl> - / / / NominalTypeDescriptor ' s GetCaseTypes function to get the payload type <nl> - / / / and check if the payload is indirect . <nl> - typedef int getEnumTag ( const OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given a valid object of this enum type , destructively strips the tag <nl> - / / / bits , leaving behind a value of the inhabited case payload type . <nl> - / / / If the case is indirect , the payload can then be projected from the box <nl> - / / / with swift_projectBox ( ) . <nl> - typedef void destructiveProjectEnumData ( OpaqueValue * src , <nl> - const Metadata * self ) ; <nl> - <nl> - / / / Given a valid object of an enum case payload ' s type , destructively add <nl> - / / / the tag bits for the given case , leaving behind a fully - formed value of <nl> - / / / the enum type . If the enum case does not have a payload , the initial <nl> - / / / state of the value can be undefined . The given tag value must be in <nl> - / / / the range [ - ElementsWithPayload . . ElementsWithNoPayload - 1 ] . <nl> - typedef void destructiveInjectEnumTag ( OpaqueValue * src , <nl> - int tag , <nl> - const Metadata * self ) ; <nl> + / / Note that , for now , we aren ' t strict about ' const ' . <nl> + # define WANT_ALL_VALUE_WITNESSES <nl> + # define DATA_VALUE_WITNESS ( lowerId , upperId , type ) <nl> + # define FUNCTION_VALUE_WITNESS ( lowerId , upperId , returnType , paramTypes ) \ <nl> + typedef returnType ( * lowerId ) paramTypes ; <nl> + # define MUTABLE_VALUE_TYPE OpaqueValue * <nl> + # define IMMUTABLE_VALUE_TYPE const OpaqueValue * <nl> + # define MUTABLE_BUFFER_TYPE ValueBuffer * <nl> + # define IMMUTABLE_BUFFER_TYPE const ValueBuffer * <nl> + # define TYPE_TYPE const Metadata * <nl> + # define SIZE_TYPE size_t <nl> + # define INT_TYPE int <nl> + # define VOID_TYPE void <nl> + # include " swift / ABI / ValueWitness . def " <nl> + <nl> + / / Handle the data witnesses explicitly so we can use more specific <nl> + / / types for the flags enums . <nl> + typedef size_t size ; <nl> + typedef ValueWitnessFlags flags ; <nl> + typedef size_t stride ; <nl> + typedef ExtraInhabitantFlags extraInhabitantFlags ; <nl> <nl> } / / end namespace value_witness_types <nl> <nl> OpaqueValue * swift_copyPOD ( OpaqueValue * dest , <nl> OpaqueValue * src , <nl> const Metadata * self ) ; <nl> <nl> - # define FOR_ALL_FUNCTION_VALUE_WITNESSES ( MACRO ) \ <nl> - MACRO ( initializeBufferWithCopyOfBuffer ) \ <nl> - MACRO ( destroy ) \ <nl> - MACRO ( initializeWithCopy ) \ <nl> - MACRO ( assignWithCopy ) \ <nl> - MACRO ( initializeWithTake ) \ <nl> - MACRO ( assignWithTake ) \ <nl> - MACRO ( initializeBufferWithTakeOfBuffer ) \ <nl> - MACRO ( destroyArray ) \ <nl> - MACRO ( initializeArrayWithCopy ) \ <nl> - MACRO ( initializeArrayWithTakeFrontToBack ) \ <nl> - MACRO ( initializeArrayWithTakeBackToFront ) <nl> - <nl> struct TypeLayout ; <nl> <nl> / / / A value - witness table . A value witness table is built around <nl> struct ValueWitnessTable { <nl> / / For the meaning of all of these witnesses , consult the comments <nl> / / on their associated typedefs , above . <nl> <nl> - # define DECLARE_WITNESS ( NAME ) \ <nl> - value_witness_types : : NAME * NAME ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( DECLARE_WITNESS ) <nl> - # undef DECLARE_WITNESS <nl> - <nl> - value_witness_types : : size size ; <nl> - value_witness_types : : flags flags ; <nl> - value_witness_types : : stride stride ; <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + value_witness_types : : LOWER_ID LOWER_ID ; <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> / / / Would values of a type with the given layout requirements be <nl> / / / allocated inline ? <nl> struct ValueWitnessTable { <nl> / / / These entry points are available only if the HasExtraInhabitants flag bit is <nl> / / / set in the ' flags ' field . <nl> struct ExtraInhabitantsValueWitnessTable : ValueWitnessTable { <nl> - value_witness_types : : extraInhabitantFlags extraInhabitantFlags ; <nl> - value_witness_types : : storeExtraInhabitant * storeExtraInhabitant ; <nl> - value_witness_types : : getExtraInhabitantIndex * getExtraInhabitantIndex ; <nl> + # define WANT_ONLY_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + value_witness_types : : LOWER_ID LOWER_ID ; <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> # define SET_WITNESS ( NAME ) base . NAME , <nl> <nl> struct ExtraInhabitantsValueWitnessTable : ValueWitnessTable { <nl> constexpr ExtraInhabitantsValueWitnessTable ( <nl> const ValueWitnessTable & base , <nl> value_witness_types : : extraInhabitantFlags eif , <nl> - value_witness_types : : storeExtraInhabitant * sei , <nl> - value_witness_types : : getExtraInhabitantIndex * geii ) <nl> - : ValueWitnessTable { <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( SET_WITNESS ) <nl> - base . size , <nl> - base . flags , <nl> - base . stride <nl> - } , extraInhabitantFlags ( eif ) , <nl> + value_witness_types : : storeExtraInhabitant sei , <nl> + value_witness_types : : getExtraInhabitantIndex geii ) <nl> + : ValueWitnessTable ( base ) , <nl> + extraInhabitantFlags ( eif ) , <nl> storeExtraInhabitant ( sei ) , <nl> getExtraInhabitantIndex ( geii ) { } <nl> - # undef SET_WITNESS <nl> <nl> static bool classof ( const ValueWitnessTable * table ) { <nl> return table - > flags . hasExtraInhabitants ( ) ; <nl> struct ExtraInhabitantsValueWitnessTable : ValueWitnessTable { <nl> / / / These entry points are available only if the HasEnumWitnesses flag bit is <nl> / / / set in the ' flags ' field . <nl> struct EnumValueWitnessTable : ExtraInhabitantsValueWitnessTable { <nl> - value_witness_types : : getEnumTag * getEnumTag ; <nl> - value_witness_types : : destructiveProjectEnumData * destructiveProjectEnumData ; <nl> - value_witness_types : : destructiveInjectEnumTag * destructiveInjectEnumTag ; <nl> + # define WANT_ONLY_ENUM_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + value_witness_types : : LOWER_ID LOWER_ID ; <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> constexpr EnumValueWitnessTable ( ) <nl> : ExtraInhabitantsValueWitnessTable ( ) , <nl> struct EnumValueWitnessTable : ExtraInhabitantsValueWitnessTable { <nl> destructiveInjectEnumTag ( nullptr ) { } <nl> constexpr EnumValueWitnessTable ( <nl> const ExtraInhabitantsValueWitnessTable & base , <nl> - value_witness_types : : getEnumTag * getEnumTag , <nl> - value_witness_types : : destructiveProjectEnumData * destructiveProjectEnumData , <nl> - value_witness_types : : destructiveInjectEnumTag * destructiveInjectEnumTag ) <nl> + value_witness_types : : getEnumTag getEnumTag , <nl> + value_witness_types : : destructiveProjectEnumData destructiveProjectEnumData , <nl> + value_witness_types : : destructiveInjectEnumTag destructiveInjectEnumTag ) <nl> : ExtraInhabitantsValueWitnessTable ( base ) , <nl> getEnumTag ( getEnumTag ) , <nl> destructiveProjectEnumData ( destructiveProjectEnumData ) , <nl> namespace { <nl> template < typename T > struct _ResultOf ; <nl> <nl> template < typename R , typename . . . A > <nl> - struct _ResultOf < R ( A . . . ) > { <nl> + struct _ResultOf < R ( * ) ( A . . . ) > { <nl> using type = R ; <nl> } ; <nl> } <nl> struct TargetMetadata { <nl> <nl> / / Define forwarders for value witnesses . These invoke this metadata ' s value <nl> / / witness table with itself as the ' self ' parameter . <nl> - # define FORWARD_WITNESS ( WITNESS ) \ <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define FUNCTION_VALUE_WITNESS ( WITNESS , UPPER , RET_TYPE , PARAM_TYPES ) \ <nl> template < typename . . . A > \ <nl> _ResultOf < value_witness_types : : WITNESS > : : type \ <nl> vw_ # # WITNESS ( A & & . . . args ) const { \ <nl> return getValueWitnesses ( ) - > WITNESS ( std : : forward < A > ( args ) . . . , this ) ; \ <nl> } <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( FORWARD_WITNESS ) <nl> - # undef FORWARD_WITNESS <nl> + # define DATA_VALUE_WITNESS ( LOWER , UPPER , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> int vw_getExtraInhabitantIndex ( const OpaqueValue * value ) const { <nl> return getValueWitnesses ( ) - > _asXIVWT ( ) - > getExtraInhabitantIndex ( value , this ) ; <nl> struct TargetMetadata { <nl> } <nl> <nl> int vw_getEnumTag ( const OpaqueValue * value ) const { <nl> - return getValueWitnesses ( ) - > _asEVWT ( ) - > getEnumTag ( value , this ) ; <nl> + return getValueWitnesses ( ) - > _asEVWT ( ) - > getEnumTag ( const_cast < OpaqueValue * > ( value ) , this ) ; <nl> } <nl> void vw_destructiveProjectEnumData ( OpaqueValue * value ) const { <nl> getValueWitnesses ( ) - > _asEVWT ( ) - > destructiveProjectEnumData ( value , this ) ; <nl> mmm a / stdlib / public / runtime / Enum . cpp <nl> ppp b / stdlib / public / runtime / Enum . cpp <nl> swift : : swift_initEnumValueWitnessTableSinglePayload ( ValueWitnessTable * vwtable , <nl> | | payloadVWT = = & VALUE_WITNESS_SYM ( BO ) <nl> # endif <nl> ) ) { <nl> - # define COPY_PAYLOAD_WITNESS ( NAME ) vwtable - > NAME = payloadVWT - > NAME ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( COPY_PAYLOAD_WITNESS ) <nl> - # undef COPY_PAYLOAD_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + vwtable - > LOWER_ID = payloadVWT - > LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> } else { <nl> # endif <nl> installCommonValueWitnesses ( vwtable ) ; <nl> mmm a / stdlib / public / runtime / Metadata . cpp <nl> ppp b / stdlib / public / runtime / Metadata . cpp <nl> static void tuple_destroyArray ( OpaqueValue * array , size_t n , <nl> <nl> / / The operation doesn ' t have to be initializeWithCopy , but they all <nl> / / have basically the same type . <nl> - typedef value_witness_types : : initializeWithCopy * <nl> + typedef value_witness_types : : initializeWithCopy <nl> ValueWitnessTable : : * forEachOperation ; <nl> <nl> / / / Perform an operation for each field of two tuples . <nl> static int tuple_getExtraInhabitantIndex ( const OpaqueValue * tuple , <nl> <nl> / / / Various standard witness table for tuples . <nl> static const ValueWitnessTable tuple_witnesses_pod_inline = { <nl> - # define TUPLE_WITNESS ( NAME ) & tuple_ # # NAME < true , true > , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( TUPLE_WITNESS ) <nl> - # undef TUPLE_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) & tuple_ # # LOWER_ID < true , true > , <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> 0 , <nl> ValueWitnessFlags ( ) , <nl> 0 <nl> } ; <nl> static const ValueWitnessTable tuple_witnesses_nonpod_inline = { <nl> - # define TUPLE_WITNESS ( NAME ) & tuple_ # # NAME < false , true > , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( TUPLE_WITNESS ) <nl> - # undef TUPLE_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) & tuple_ # # LOWER_ID < false , true > , <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> 0 , <nl> ValueWitnessFlags ( ) , <nl> 0 <nl> } ; <nl> static const ValueWitnessTable tuple_witnesses_pod_noninline = { <nl> - # define TUPLE_WITNESS ( NAME ) & tuple_ # # NAME < true , false > , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( TUPLE_WITNESS ) <nl> - # undef TUPLE_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) & tuple_ # # LOWER_ID < true , false > , <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> 0 , <nl> ValueWitnessFlags ( ) , <nl> 0 <nl> } ; <nl> static const ValueWitnessTable tuple_witnesses_nonpod_noninline = { <nl> - # define TUPLE_WITNESS ( NAME ) & tuple_ # # NAME < false , false > , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( TUPLE_WITNESS ) <nl> - # undef TUPLE_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) & tuple_ # # LOWER_ID < false , false > , <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> 0 , <nl> ValueWitnessFlags ( ) , <nl> 0 <nl> TupleCacheEntry : : TupleCacheEntry ( const Key & key , <nl> proposedWitnesses = & tuple_witnesses_nonpod_noninline ; <nl> } <nl> } <nl> - # define ASSIGN_TUPLE_WITNESS ( NAME ) \ <nl> - Witnesses . NAME = proposedWitnesses - > NAME ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( ASSIGN_TUPLE_WITNESS ) <nl> - # undef ASSIGN_TUPLE_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + Witnesses . LOWER_ID = proposedWitnesses - > LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> / / We have extra inhabitants if the first element does . <nl> / / FIXME : generalize this . <nl> namespace { <nl> struct pointer_function_cast_impl ; <nl> <nl> template < typename OutRet , typename . . . OutArgs > <nl> - struct pointer_function_cast_impl < OutRet * ( OutArgs * . . . ) > { <nl> + struct pointer_function_cast_impl < OutRet * ( * ) ( OutArgs * . . . ) > { <nl> template < typename InRet , typename . . . InArgs > <nl> static constexpr auto perform ( InRet * ( * function ) ( InArgs * . . . ) ) <nl> - > OutRet * ( * ) ( OutArgs * . . . ) <nl> namespace { <nl> } ; <nl> <nl> template < typename . . . OutArgs > <nl> - struct pointer_function_cast_impl < void ( OutArgs * . . . ) > { <nl> + struct pointer_function_cast_impl < void ( * ) ( OutArgs * . . . ) > { <nl> template < typename . . . InArgs > <nl> static constexpr auto perform ( void ( * function ) ( InArgs * . . . ) ) <nl> - > void ( * ) ( OutArgs * . . . ) <nl> namespace { <nl> / / / In any reasonable calling convention the input and output function types <nl> / / / should be ABI - compatible . <nl> template < typename Out , typename In > <nl> - static constexpr Out * pointer_function_cast ( In * function ) { <nl> + static constexpr Out pointer_function_cast ( In * function ) { <nl> return pointer_function_cast_impl < Out > : : perform ( function ) ; <nl> } <nl> <nl> void swift : : installCommonValueWitnesses ( ValueWitnessTable * vwtable ) { <nl> / / For uncommon layouts , use value witnesses that work with an arbitrary <nl> / / size and alignment . <nl> if ( flags . isInlineStorage ( ) ) { <nl> - # define INSTALL_POD_DIRECT_WITNESS ( NAME ) vwtable - > NAME = pod_direct_ # # NAME ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( INSTALL_POD_DIRECT_WITNESS ) <nl> - # undef INSTALL_POD_DIRECT_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + vwtable - > LOWER_ID = pod_direct_ # # LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> } else { <nl> - # define INSTALL_POD_INDIRECT_WITNESS ( NAME ) vwtable - > NAME = pod_indirect_ # # NAME ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( INSTALL_POD_INDIRECT_WITNESS ) <nl> - # undef INSTALL_POD_INDIRECT_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + vwtable - > LOWER_ID = pod_indirect_ # # LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> } <nl> return ; <nl> <nl> void swift : : installCommonValueWitnesses ( ValueWitnessTable * vwtable ) { <nl> commonVWT = & VALUE_WITNESS_SYM ( Bi512_ ) ; <nl> break ; <nl> } <nl> - <nl> - # define INSTALL_POD_COMMON_WITNESS ( NAME ) vwtable - > NAME = commonVWT - > NAME ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( INSTALL_POD_COMMON_WITNESS ) <nl> - # undef INSTALL_POD_COMMON_WITNESS <nl> + <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + vwtable - > LOWER_ID = commonVWT - > LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> return ; <nl> } <nl> ExistentialMetatypeValueWitnessTableCacheEntry ( unsigned numWitnessTables ) { <nl> using Box = NonFixedExistentialMetatypeBox ; <nl> using Witnesses = NonFixedValueWitnesses < Box , / * known allocated * / true > ; <nl> <nl> - # define STORE_VAR_EXISTENTIAL_METATYPE_WITNESS ( WITNESS ) \ <nl> - Data . WITNESS = Witnesses : : WITNESS ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( STORE_VAR_EXISTENTIAL_METATYPE_WITNESS ) <nl> - STORE_VAR_EXISTENTIAL_METATYPE_WITNESS ( storeExtraInhabitant ) <nl> - STORE_VAR_EXISTENTIAL_METATYPE_WITNESS ( getExtraInhabitantIndex ) <nl> - # undef STORE_VAR_EXISTENTIAL_METATYPE_WITNESS <nl> + # define WANT_REQUIRED_VALUE_WITNESSES 1 <nl> + # define WANT_EXTRA_INHABITANT_VALUE_WITNESSES 1 <nl> + # define WANT_ENUM_VALUE_WITNESSES 0 <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + Data . LOWER_ID = Witnesses : : LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> Data . size = Box : : Container : : getSize ( numWitnessTables ) ; <nl> Data . flags = ValueWitnessFlags ( ) <nl> OpaqueExistentialValueWitnessTableCacheEntry ( unsigned numWitnessTables ) { <nl> using Witnesses = NonFixedValueWitnesses < Box , / * known allocated * / true > ; <nl> static_assert ( ! Witnesses : : hasExtraInhabitants , " no extra inhabitants " ) ; <nl> <nl> - # define STORE_VAR_OPAQUE_EXISTENTIAL_WITNESS ( WITNESS ) \ <nl> - Data . WITNESS = Witnesses : : WITNESS ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( STORE_VAR_OPAQUE_EXISTENTIAL_WITNESS ) <nl> - # undef STORE_VAR_OPAQUE_EXISTENTIAL_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + Data . LOWER_ID = Witnesses : : LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> Data . size = Box : : Container : : getSize ( numWitnessTables ) ; <nl> Data . flags = ValueWitnessFlags ( ) <nl> ClassExistentialValueWitnessTableCacheEntry ( unsigned numWitnessTables ) { <nl> using Box = NonFixedClassExistentialBox ; <nl> using Witnesses = NonFixedValueWitnesses < Box , / * known allocated * / true > ; <nl> <nl> - # define STORE_VAR_CLASS_EXISTENTIAL_WITNESS ( WITNESS ) \ <nl> - Data . WITNESS = Witnesses : : WITNESS ; <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( STORE_VAR_CLASS_EXISTENTIAL_WITNESS ) <nl> - STORE_VAR_CLASS_EXISTENTIAL_WITNESS ( storeExtraInhabitant ) <nl> - STORE_VAR_CLASS_EXISTENTIAL_WITNESS ( getExtraInhabitantIndex ) <nl> - # undef STORE_VAR_CLASS_EXISTENTIAL_WITNESS <nl> + # define WANT_REQUIRED_VALUE_WITNESSES 1 <nl> + # define WANT_EXTRA_INHABITANT_VALUE_WITNESSES 1 <nl> + # define WANT_ENUM_VALUE_WITNESSES 0 <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) \ <nl> + Data . LOWER_ID = Witnesses : : LOWER_ID ; <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> <nl> Data . size = Box : : Container : : getSize ( numWitnessTables ) ; <nl> Data . flags = ValueWitnessFlags ( ) <nl> mmm a / stdlib / public / runtime / MetadataImpl . h <nl> ppp b / stdlib / public / runtime / MetadataImpl . h <nl> struct ValueWitnessTableGenerator ; <nl> <nl> template < class Witnesses > struct ValueWitnessTableGenerator < Witnesses , false > { <nl> static constexpr const ValueWitnessTable table = { <nl> - # define EACH_WITNESS ( ID ) Witnesses : : ID , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( EACH_WITNESS ) <nl> - # undef EACH_WITNESS <nl> - Witnesses : : size , <nl> - Witnesses : : flags , <nl> - Witnesses : : stride , <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) Witnesses : : LOWER_ID , <nl> + # include " swift / ABI / ValueWitness . def " <nl> } ; <nl> } ; <nl> <nl> template < class Witnesses > struct ValueWitnessTableGenerator < Witnesses , false > { <nl> template < class Witnesses > struct ValueWitnessTableGenerator < Witnesses , true > { <nl> static constexpr const ExtraInhabitantsValueWitnessTable table = { <nl> { <nl> - # define EACH_WITNESS ( ID ) Witnesses : : ID , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( EACH_WITNESS ) <nl> - # undef EACH_WITNESS <nl> - Witnesses : : size , <nl> - Witnesses : : flags , <nl> - Witnesses : : stride , <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) Witnesses : : LOWER_ID , <nl> + # include " swift / ABI / ValueWitness . def " <nl> } , <nl> - Witnesses : : extraInhabitantFlags , <nl> - Witnesses : : storeExtraInhabitant , <nl> - Witnesses : : getExtraInhabitantIndex , <nl> + # define WANT_ONLY_EXTRA_INHABITANT_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) Witnesses : : LOWER_ID , <nl> + # include " swift / ABI / ValueWitness . def " <nl> } ; <nl> } ; <nl> <nl> mmm a / unittests / runtime / Enum . cpp <nl> ppp b / unittests / runtime / Enum . cpp <nl> ExtraInhabitantsValueWitnessTable Int8WithExtraInhabitantValueWitness <nl> = { <nl> / / ValueWitnessTable <nl> ValueWitnessTable { <nl> - # define STEAL_INT8_WITNESS ( witness ) VALUE_WITNESS_SYM ( Bi8_ ) . witness , <nl> - FOR_ALL_FUNCTION_VALUE_WITNESSES ( STEAL_INT8_WITNESS ) <nl> - # undef STEAL_INT8_WITNESS <nl> + # define WANT_ONLY_REQUIRED_VALUE_WITNESSES <nl> + # define VALUE_WITNESS ( LOWER_ID , UPPER_ID ) VALUE_WITNESS_SYM ( Bi8_ ) . LOWER_ID , <nl> + # define DATA_VALUE_WITNESS ( LOWER_ID , UPPER_ID , TYPE ) <nl> + # include " swift / ABI / ValueWitness . def " <nl> VALUE_WITNESS_SYM ( Bi8_ ) . size , <nl> VALUE_WITNESS_SYM ( Bi8_ ) . flags . withExtraInhabitants ( true ) , <nl> VALUE_WITNESS_SYM ( Bi8_ ) . stride <nl>
Create a central x - macro database of value witnesses . NFC .
apple/swift
9f8093f3764ceb299c2dbe556afcae4f2d4df230
2017-08-22T00:17:02Z
mmm a / src / serializer / log / log_serializer . cc <nl> ppp b / src / serializer / log / log_serializer . cc <nl> bool filepath_file_opener_t : : open_serializer_file ( const std : : string & path , int e <nl> } <nl> <nl> bool filepath_file_opener_t : : open_serializer_file_create_temporary ( scoped_ptr_t < file_t > * file_out ) { <nl> + / / TODO ( 84 ) : Make this a mutex_guarantee_t . <nl> + mutex_assertion_t : : acq_t acq ( & reentrance_mutex_ ) ; <nl> bool success = open_serializer_file ( temporary_file_name ( ) , linux_file_t : : mode_create | linux_file_t : : mode_truncate , file_out ) ; <nl> if ( success ) { <nl> / / TODO ( 84 ) : More rigorous temporary file state management . <nl> void filepath_file_opener_t : : do_move_serializer_file_to_permanent_location ( bool <nl> temporary_file_name ( ) . c_str ( ) , file_name ( ) . c_str ( ) ) ; <nl> } <nl> <nl> - / / TODO ( 84 ) : More rigorous temporary file state management . <nl> opened_temporary_ = false ; <nl> * success_out = true ; <nl> } <nl> <nl> bool filepath_file_opener_t : : move_serializer_file_to_permanent_location ( ) { <nl> + mutex_assertion_t : : acq_t acq ( & reentrance_mutex_ ) ; <nl> bool success ; <nl> / / TODO ( 84 ) : Should we drop the pretense of returning a bool ? What about for the other functions ? <nl> thread_pool_t : : run_in_blocker_pool ( boost : : bind ( & filepath_file_opener_t : : do_move_serializer_file_to_permanent_location , this , & success ) ) ; <nl> bool filepath_file_opener_t : : move_serializer_file_to_permanent_location ( ) { <nl> } <nl> <nl> bool filepath_file_opener_t : : open_serializer_file_existing ( scoped_ptr_t < file_t > * file_out ) { <nl> + mutex_assertion_t : : acq_t acq ( & reentrance_mutex_ ) ; <nl> return open_serializer_file ( current_file_name ( ) , 0 , file_out ) ; <nl> } <nl> <nl> - bool filepath_file_opener_t : : unlink_serializer_file ( ) { <nl> + void filepath_file_opener_t : : do_unlink_serializer_file ( bool * success_out ) { <nl> const int res = : : unlink ( current_file_name ( ) . c_str ( ) ) ; <nl> guarantee_err ( res = = 0 , " unlink ( ) falied " ) ; <nl> - return res = = 0 ; <nl> + * success_out = ( res = = 0 ) ; <nl> + } <nl> + <nl> + bool filepath_file_opener_t : : unlink_serializer_file ( ) { <nl> + mutex_assertion_t : : acq_t acq ( & reentrance_mutex_ ) ; <nl> + bool success ; <nl> + thread_pool_t : : run_in_blocker_pool ( boost : : bind ( & filepath_file_opener_t : : do_unlink_serializer_file , this , & success ) ) ; <nl> + return success ; <nl> } <nl> <nl> # ifdef SEMANTIC_SERIALIZER_CHECK <nl> mmm a / src / serializer / log / log_serializer . hpp <nl> ppp b / src / serializer / log / log_serializer . hpp <nl> <nl> # include " serializer / log / config . hpp " <nl> # include " utils . hpp " <nl> # include " concurrency / mutex . hpp " <nl> + # include " concurrency / mutex_assertion . hpp " <nl> <nl> # include " serializer / log / metablock_manager . hpp " <nl> # include " serializer / log / extent_manager . hpp " <nl> class filepath_file_opener_t : public serializer_file_opener_t { <nl> private : <nl> MUST_USE bool open_serializer_file ( const std : : string & path , int extra_flags , scoped_ptr_t < file_t > * file_out ) ; <nl> <nl> + / / Functions to be run in the blocker pool . <nl> void do_move_serializer_file_to_permanent_location ( bool * success_out ) ; <nl> + void do_unlink_serializer_file ( bool * success_out ) ; <nl> <nl> / / The path of the temporary file . This is file_name ( ) with some suffix appended . <nl> std : : string temporary_file_name ( ) const ; <nl> class filepath_file_opener_t : public serializer_file_opener_t { <nl> const std : : string filepath_ ; <nl> io_backender_t * const backender_ ; <nl> <nl> + / / Makes sure that only one member function gets called at a time . Some of them are blocking , <nl> + / / and we don ' t want to have to worry about stuff like what the value of opened_temporary_ <nl> + / / should be during the blocking call to move_serializer_file_to_permanent_location ( ) . <nl> + mutex_assertion_t reentrance_mutex_ ; <nl> + <nl> / / This begins false . It becomes true when open_serializer_file_create_temporary is called . It <nl> / / becomes false again when move_serializer_file_to_permanent_location is called . It is used by <nl> / / open_serializer_file_existing to know whether it should use the temporary or permanent path . <nl> - / / TODO ( 84 ) : Have more rigorous management of file open state . <nl> bool opened_temporary_ ; <nl> <nl> DISABLE_COPYING ( filepath_file_opener_t ) ; <nl>
Made filepath_file_opener_t have reentrance_mutex_ , made unlink_serializer_file run in blocker pool .
rethinkdb/rethinkdb
2f6ebf18c65f96d9f750e6685831e65edc1b7aaf
2013-01-28T22:42:34Z
new file mode 100644 <nl> index 00000000000 . . 52c40b37c39 <nl> mmm / dev / null <nl> ppp b / test / rql_test / src / datum / binary . yaml <nl> <nl> + desc : Tests of converstion to and from the RQL binary type <nl> + tests : <nl> + <nl> + # Short binary data from 0 to 12 characters <nl> + # Not fully implemented for JS as comparing Buffer objects is non - trivial <nl> + - def : s = " " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( ' ' ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 0 <nl> + <nl> + - def : s = " \ x00 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 1 <nl> + <nl> + - def : s = " \ x00 \ x42 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 2 <nl> + <nl> + - def : s = " \ x00 \ xfe \ x7a " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 3 <nl> + <nl> + - def : s = " \ xed \ xfe \ x00 \ xba " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 4 <nl> + <nl> + - def : s = " \ x50 \ xf9 \ x00 \ x77 \ xf9 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 5 <nl> + <nl> + - def : s = " \ x2f \ xe3 \ xb5 \ x57 \ x00 \ x92 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 6 <nl> + <nl> + - def : s = " \ xa9 \ x43 \ x54 \ xe9 \ x00 \ xf8 \ xfb " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 7 <nl> + <nl> + - def : s = " \ x57 \ xbb \ xe5 \ x82 \ x8b \ xd3 \ x00 \ xf9 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 8 <nl> + <nl> + - def : s = " \ x44 \ x1b \ x3e \ x00 \ x13 \ x19 \ x29 \ x2a \ xbf " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 9 <nl> + <nl> + - def : s = " \ x8a \ x1d \ x09 \ x00 \ x5d \ x60 \ x6b \ x2e \ x70 \ xd9 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 10 <nl> + <nl> + - def : s = " \ x00 \ xaf \ x47 \ x4b \ x38 \ x99 \ x14 \ x8d \ x8f \ x10 \ x51 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 11 <nl> + <nl> + - def : s = " \ x45 \ x39 \ x00 \ xf7 \ xc2 \ x37 \ xfd \ xe0 \ x38 \ x82 \ x40 \ xa9 " <nl> + - def : <nl> + rb : s . force_encoding ( ' BINARY ' ) <nl> + js : s = Buffer ( s , ' binary ' ) <nl> + py : s = bytes ( s ) <nl> + - py : r . binary ( s ) <nl> + rb : r . binary ( s ) <nl> + ot : s <nl> + - cd : r . binary ( s ) . count ( ) <nl> + ot : 12 <nl> + <nl> + # Test comparisons <nl> + # Binary objects to use , in order of increasing value <nl> + - def : a = " \ x00 " <nl> + - def : b = " \ x00 \ x01 " <nl> + - def : c = " \ x01 " <nl> + - def : d = " \ x70 \ x22 " <nl> + - def : e = " \ x80 " <nl> + - def : f = " \ xFE " <nl> + - def : <nl> + rb : a . force_encoding ( ' BINARY ' ) <nl> + js : a = Buffer ( a , ' binary ' ) <nl> + py : a = bytes ( a ) <nl> + - def : <nl> + rb : b . force_encoding ( ' BINARY ' ) <nl> + js : b = Buffer ( b , ' binary ' ) <nl> + py : b = bytes ( b ) <nl> + - def : <nl> + rb : c . force_encoding ( ' BINARY ' ) <nl> + js : c = Buffer ( c , ' binary ' ) <nl> + py : c = bytes ( c ) <nl> + - def : <nl> + rb : d . force_encoding ( ' BINARY ' ) <nl> + js : d = Buffer ( d , ' binary ' ) <nl> + py : d = bytes ( d ) <nl> + - def : <nl> + rb : e . force_encoding ( ' BINARY ' ) <nl> + js : e = Buffer ( e , ' binary ' ) <nl> + py : e = bytes ( e ) <nl> + - def : <nl> + rb : f . force_encoding ( ' BINARY ' ) <nl> + js : f = Buffer ( f , ' binary ' ) <nl> + py : f = bytes ( f ) <nl> + <nl> + # a - > a <nl> + - cd : r . binary ( a ) . eq ( r . binary ( a ) ) <nl> + ot : true <nl> + - cd : r . binary ( a ) . le ( r . binary ( a ) ) <nl> + ot : true <nl> + - cd : r . binary ( a ) . ge ( r . binary ( a ) ) <nl> + ot : true <nl> + - cd : r . binary ( a ) . ne ( r . binary ( a ) ) <nl> + ot : false <nl> + - cd : r . binary ( a ) . lt ( r . binary ( a ) ) <nl> + ot : false <nl> + - cd : r . binary ( a ) . gt ( r . binary ( a ) ) <nl> + ot : false <nl> + <nl> + # a - > b <nl> + - cd : r . binary ( a ) . ne ( r . binary ( b ) ) <nl> + ot : true <nl> + - cd : r . binary ( a ) . lt ( r . binary ( b ) ) <nl> + ot : true <nl> + - cd : r . binary ( a ) . le ( r . binary ( b ) ) <nl> + ot : true <nl> + - cd : r . binary ( a ) . ge ( r . binary ( b ) ) <nl> + ot : false <nl> + - cd : r . binary ( a ) . gt ( r . binary ( b ) ) <nl> + ot : false <nl> + - cd : r . binary ( a ) . eq ( r . binary ( b ) ) <nl> + ot : false <nl> + <nl> + # b - > c <nl> + - cd : r . binary ( b ) . ne ( r . binary ( c ) ) <nl> + ot : true <nl> + - cd : r . binary ( b ) . lt ( r . binary ( c ) ) <nl> + ot : true <nl> + - cd : r . binary ( b ) . le ( r . binary ( c ) ) <nl> + ot : true <nl> + - cd : r . binary ( b ) . ge ( r . binary ( c ) ) <nl> + ot : false <nl> + - cd : r . binary ( b ) . gt ( r . binary ( c ) ) <nl> + ot : false <nl> + - cd : r . binary ( b ) . eq ( r . binary ( c ) ) <nl> + ot : false <nl> + <nl> + # c - > d <nl> + - cd : r . binary ( c ) . ne ( r . binary ( d ) ) <nl> + ot : true <nl> + - cd : r . binary ( c ) . lt ( r . binary ( d ) ) <nl> + ot : true <nl> + - cd : r . binary ( c ) . le ( r . binary ( d ) ) <nl> + ot : true <nl> + - cd : r . binary ( c ) . ge ( r . binary ( d ) ) <nl> + ot : false <nl> + - cd : r . binary ( c ) . gt ( r . binary ( d ) ) <nl> + ot : false <nl> + - cd : r . binary ( c ) . eq ( r . binary ( d ) ) <nl> + ot : false <nl> + <nl> + # d - > e <nl> + - cd : r . binary ( d ) . ne ( r . binary ( e ) ) <nl> + ot : true <nl> + - cd : r . binary ( d ) . lt ( r . binary ( e ) ) <nl> + ot : true <nl> + - cd : r . binary ( d ) . le ( r . binary ( e ) ) <nl> + ot : true <nl> + - cd : r . binary ( d ) . ge ( r . binary ( e ) ) <nl> + ot : false <nl> + - cd : r . binary ( d ) . gt ( r . binary ( e ) ) <nl> + ot : false <nl> + - cd : r . binary ( d ) . eq ( r . binary ( e ) ) <nl> + ot : false <nl> + <nl> + # e - > f <nl> + - cd : r . binary ( e ) . ne ( r . binary ( f ) ) <nl> + ot : true <nl> + - cd : r . binary ( e ) . lt ( r . binary ( f ) ) <nl> + ot : true <nl> + - cd : r . binary ( e ) . le ( r . binary ( f ) ) <nl> + ot : true <nl> + - cd : r . binary ( e ) . ge ( r . binary ( f ) ) <nl> + ot : false <nl> + - cd : r . binary ( e ) . gt ( r . binary ( f ) ) <nl> + ot : false <nl> + - cd : r . binary ( e ) . eq ( r . binary ( f ) ) <nl> + ot : false <nl> + <nl> + # f - > f <nl> + - cd : r . binary ( f ) . eq ( r . binary ( f ) ) <nl> + ot : true <nl> + - cd : r . binary ( f ) . le ( r . binary ( f ) ) <nl> + ot : true <nl> + - cd : r . binary ( f ) . ge ( r . binary ( f ) ) <nl> + ot : true <nl> + - cd : r . binary ( f ) . ne ( r . binary ( f ) ) <nl> + ot : false <nl> + - cd : r . binary ( f ) . lt ( r . binary ( f ) ) <nl> + ot : false <nl> + - cd : r . binary ( f ) . gt ( r . binary ( f ) ) <nl> + ot : false <nl> + <nl> + # Test encodings <nl> + - py : r . binary ( u ' イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム ' . encode ( ' utf - 8 ' ) ) <nl> + ot : u ' イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム ' . encode ( ' utf - 8 ' ) <nl> + - py : r . binary ( u ' ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏ ' . encode ( ' utf - 16 ' ) ) <nl> + ot : u ' ƀƁƂƃƄƅƆƇƈƉƊƋƌƍƎƏ ' . encode ( ' utf - 16 ' ) <nl> + - py : r . binary ( u ' lorem ipsum ' . encode ( ' ascii ' ) ) <nl> + ot : u ' lorem ipsum ' . encode ( ' ascii ' ) <nl> + <nl> + # Test coercions <nl> + - py : r . binary ( bytes ( ' foo ' ) ) . coerce_to ( ' string ' ) <nl> + ot : bytes ( ' foo ' ) <nl> + - py : r . binary ( u ' イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム ' . encode ( ' utf - 8 ' ) ) . coerce_to ( ' string ' ) <nl> + ot : u ' イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム ' <nl> + - py : r . binary ( u ' lorem ipsum ' . encode ( ' ascii ' ) ) . coerce_to ( ' string ' ) <nl> + ot : u ' lorem ipsum ' <nl> + <nl> + - py : r . expr ( ' foo ' ) . coerce_to ( ' binary ' ) <nl> + ot : bytes ( ' foo ' ) <nl> + <nl> + - py : r . binary ( bytes ( ' foo ' ) ) . coerce_to ( ' binary ' ) <nl> + ot : bytes ( ' foo ' ) <nl> + <nl> + # Test slice <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( - 3 , - 1 ) <nl> + ot : bytes ( ' ef ' ) <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( 0 , 2 ) <nl> + ot : bytes ( ' ab ' ) <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( 3 , - 1 ) <nl> + ot : bytes ( ' def ' ) <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( - 5 , 5 ) <nl> + ot : bytes ( ' cde ' ) <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( - 8 , 2 ) <nl> + ot : bytes ( ' ab ' ) <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( 5 , 7 ) <nl> + ot : bytes ( ' fg ' ) <nl> + <nl> + # Left side out - of - bound should clamp to index 0 <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( - 9 , 2 ) <nl> + ot : bytes ( ' ab ' ) <nl> + <nl> + # Right side out - of - bound should return the valid subset of the range <nl> + - py : r . binary ( bytes ( ' abcdefg ' ) ) . slice ( 5 , 9 ) <nl> + ot : bytes ( ' fg ' ) <nl> + <nl> + # Test binary keys <nl> + # Binary keys should sort between array and bool <nl> + - cd : r . db ( ' test ' ) . table_create ( ' datum_binary ' ) <nl> + def : tbl = r . table ( ' datum_binary ' ) <nl> + <nl> + - def : <nl> + py : trows = [ { ' id ' : [ 0 ] } , { ' id ' : r . binary ( a ) } , { ' id ' : r . binary ( c ) } , { ' id ' : r . binary ( d ) } , { ' id ' : r . binary ( f ) } , { ' id ' : False } , { ' id ' : True } , { ' id ' : - 500 } , { ' id ' : 500 } , { ' id ' : ' ' } , { ' id ' : ' str ' } ] <nl> + rb : trows = [ { : id = > [ 0 ] } , { : id = > r . binary ( a ) } , { : id = > r . binary ( c ) } , { : id = > r . binary ( d ) } , { : id = > r . binary ( f ) } , { : id = > false } , { : id = > true } , { : id = > - 500 } , { : id = > 500 } , { : id = > ' ' } , { : id = > ' str ' } ] <nl> + - def : <nl> + py : trows_raw = [ { ' id ' : [ 0 ] } , { ' id ' : a } , { ' id ' : c } , { ' id ' : d } , { ' id ' : f } , { ' id ' : False } , { ' id ' : True } , { ' id ' : - 500 } , { ' id ' : 500 } , { ' id ' : ' ' } , { ' id ' : ' str ' } ] <nl> + rb : trows_raw = [ { : id = > [ 0 ] } , { : id = > a } , { : id = > c } , { : id = > d } , { : id = > f } , { : id = > false } , { : id = > true } , { : id = > - 500 } , { : id = > 500 } , { : id = > ' ' } , { : id = > ' str ' } ] <nl> + - cd : tbl . insert ( trows ) [ ' inserted ' ] <nl> + js : tbl . insert ( trows ) ( ' inserted ' ) <nl> + ot : 11 <nl> + <nl> + - cd : tbl . order_by ( ' id ' ) <nl> + ot : trows_raw <nl> + <nl> + # Test errors <nl> + # Missing ' data ' field <nl> + - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Invalid binary pseudotype : ' + ' lacking ` data ` key . ' , [ ] ) <nl> + # Invalid base64 format <nl> + - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' , ' data ' : ' ABCDEFGH = = AA ' } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Invalid base64 format , data found after padding character \ ' = \ ' . ' , [ ] ) <nl> + - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' , ' data ' : ' ABCDEF = = $ ' } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Invalid base64 format , data found after padding character \ ' = \ ' . ' , [ ] ) <nl> + - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' , ' data ' : ' A ^ CDEFGH ' } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Invalid base64 character found : ' + ' \ ' ^ \ ' . ' , [ ] ) <nl> + - py : r . expr ( { ' $ reql_type $ ' : ' BINARY ' , ' data ' : ' ABCDE ' } ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Invalid base64 length : ' + ' 1 character remaining , cannot decode a full byte . ' , [ ] ) <nl> + # Invalid arity <nl> + - rb : r . binary ( ' abc ' , ' def ' ) <nl> + py : r . binary ( bytes ( ' abc ' ) , bytes ( ' def ' ) ) <nl> + js : r . binary ( Buffer ( 0 ) , Buffer ( 0 ) ) <nl> + ot : <nl> + py : err ( ' TypeError ' , ' binary ( ) takes exactly 1 argument ( 2 given ) ' ) <nl> + rb : err ( ' ArgumentError ' , ' wrong number of arguments ( 2 for 1 ) ' , [ ] ) <nl> + js : err ( ' RqlDriverError ' , ' Expected 1 argument but found 2 . ' , [ ] ) <nl> + # Invalid coercions <nl> + - rb : r . binary ( ' a ' ) . coerce_to ( ' array ' ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to ARRAY . ' , [ ] ) <nl> + - rb : r . binary ( ' a ' ) . coerce_to ( ' object ' ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to OBJECT . ' , [ ] ) <nl> + - rb : r . binary ( ' a ' ) . coerce_to ( ' bool ' ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to BOOL . ' , [ ] ) <nl> + - rb : r . binary ( ' a ' ) . coerce_to ( ' number ' ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to NUMBER . ' , [ ] ) <nl> + - rb : r . binary ( ' a ' ) . coerce_to ( ' nu ' + ' ll ' ) <nl> + ot : err ( ' RqlRuntimeError ' , ' Cannot coerce BINARY to NULL . ' , [ ] ) <nl> mmm a / test / rql_test / src / math_logic / comparison . yaml <nl> ppp b / test / rql_test / src / math_logic / comparison . yaml <nl> tests : <nl> # Comparisons across types <nl> # RQL primtive types compare as if mapped to the following numbers <nl> # ARRAY : 0 <nl> - # BOOLEAN : 1 <nl> - # NULL : 2 <nl> - # NUMBER : 3 <nl> - # OBJECT : 4 <nl> - # STRING : 5 <nl> + # BINARY : 1 <nl> + # BOOLEAN : 2 <nl> + # NULL : 3 <nl> + # NUMBER : 4 <nl> + # OBJECT : 5 <nl> + # STRING : 6 <nl> <nl> - py : r . expr ( [ ] ) < True <nl> js : r ( [ ] ) . lt ( true ) <nl> tests : <nl> - py : r . expr ( False ) < [ ] <nl> js : r ( false ) . lt ( [ ] ) <nl> rb : r ( false ) < [ ] <nl> + <nl> + - py : r . expr ( [ ] ) < r . binary ( " \ xAE " ) <nl> + js : r ( [ ] ) . lt ( r . binary ( Buffer ( " \ x00 " ) ) ) <nl> + rb : r ( [ ] ) < r . binary ( " " ) <nl> + ot : true <nl> + <nl> + - py : r . expr ( [ 1 , 2 ] ) < r . binary ( " \ xAE " ) <nl> + js : r ( [ 1 , 2 ] ) . lt ( r . binary ( Buffer ( " \ x00 " ) ) ) <nl> + rb : r ( [ 1 , 2 ] ) < r . binary ( " " ) <nl> + ot : true <nl> + <nl> + - py : r . expr ( r . binary ( " \ xAE " ) ) < [ ] <nl> + js : r ( r . binary ( Buffer ( " \ x00 " ) ) ) . lt ( [ ] ) <nl> + rb : r ( r . binary ( " " ) ) < [ ] <nl> + ot : false <nl> + <nl> + - py : r . binary ( " 0xAE " ) < True <nl> + js : r . binary ( Buffer ( " 0x00 " ) ) . lt ( true ) <nl> + rb : r . binary ( " " ) < true <nl> + ot : true <nl> + <nl> + - py : r . binary ( " 0xAE " ) > False <nl> + js : r . binary ( Buffer ( " 0x00 " ) ) . gt ( false ) <nl> + rb : r . binary ( " " ) > false <nl> ot : false <nl> <nl> - py : True < r . expr ( None ) <nl>
adding r . binary reql tests
rethinkdb/rethinkdb
9f7247bd9d2116d64c4dc86438cf6a139b3ce11e
2014-07-25T01:10:53Z
mmm a / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> ppp b / tensorflow / stream_executor / cuda / cuda_dnn . cc <nl> class CudnnRnnDescriptor : public dnn : : RnnDescriptor { <nl> / / We can only reasonably expect the user to handle the subsequent failure <nl> / / in profile mode , which is run with algorithms returned from <nl> / / GetRnnAlgorithms ( ) ( which are non - default and explicitly set whether to <nl> - / / use tensor ops ) . <nl> - if ( RnnTensorOpMathEnabled ( ) & & algorithm_config . algorithm ( ) . has_value ( ) ) { <nl> - cudnnMathType_t math_type = <nl> - algorithm_config . algorithm ( ) - > tensor_ops_enabled ( ) <nl> - ? CUDNN_TENSOR_OP_MATH <nl> - : CUDNN_DEFAULT_MATH ; <nl> + / / use tensor ops ) . CuDNN 7 . 2 . 1 fixed this issue <nl> + if ( RnnTensorOpMathEnabled ( ) ) { <nl> + cudnnMathType_t math_type ; <nl> + if ( algorithm_config . algorithm ( ) . has_value ( ) ) { <nl> + math_type = algorithm_config . algorithm ( ) - > tensor_ops_enabled ( ) <nl> + ? CUDNN_TENSOR_OP_MATH <nl> + : CUDNN_DEFAULT_MATH ; <nl> + } else { <nl> + # if CUDNN_VERSION > = 7201 <nl> + math_type = CUDNN_TENSOR_OP_MATH ; <nl> + # else <nl> + math_type = CUDNN_DEFAULT_MATH ; <nl> + # endif / / CUDNN_VERSION > = 7201 <nl> + } <nl> CHECK_CUDNN_OK ( cudnnSetRNNMatrixMathType ( rnn_desc . get ( ) , math_type ) ) ; <nl> } <nl> - # endif <nl> + # endif / / CUDNN_VERSION > = 7000 <nl> <nl> return CudnnRnnDescriptor ( cudnn , std : : move ( rnn_desc ) , std : : move ( rnn_plan ) , <nl> num_layers , hidden_size , input_size , batch_size , <nl>
Merge pull request from houtoms : pr_fix_rnn_tensor_core_use
tensorflow/tensorflow
40275ad3aa6ce8c59b2a494150d1c47e0ef57bd3
2019-05-30T19:06:08Z
mmm a / src / test / fuzz / prevector . cpp <nl> ppp b / src / test / fuzz / prevector . cpp <nl> void test_one_input ( const std : : vector < uint8_t > & buffer ) <nl> prevector_tester < 8 , int > test ; <nl> <nl> while ( prov . remaining_bytes ( ) ) { <nl> - switch ( prov . ConsumeIntegralInRange < int > ( 0 , 14 + 3 * ( test . size ( ) > 0 ) ) ) { <nl> + switch ( prov . ConsumeIntegralInRange < int > ( 0 , 13 + 3 * ( test . size ( ) > 0 ) ) ) { <nl> case 0 : <nl> test . insert ( prov . ConsumeIntegralInRange < size_t > ( 0 , test . size ( ) ) , prov . ConsumeIntegral < int > ( ) ) ; <nl> break ; <nl> void test_one_input ( const std : : vector < uint8_t > & buffer ) <nl> test . insert ( prov . ConsumeIntegralInRange < size_t > ( 0 , test . size ( ) ) , 1 + prov . ConsumeBool ( ) , prov . ConsumeIntegral < int > ( ) ) ; <nl> break ; <nl> case 3 : { <nl> - int del = std : : min < int > ( test . size ( ) , 1 + prov . ConsumeBool ( ) ) ; <nl> + int del = prov . ConsumeIntegralInRange < int > ( 0 , test . size ( ) ) ; <nl> int beg = prov . ConsumeIntegralInRange < int > ( 0 , test . size ( ) - del ) ; <nl> test . erase ( beg , beg + del ) ; <nl> break ; <nl> void test_one_input ( const std : : vector < uint8_t > & buffer ) <nl> test . insert_range ( prov . ConsumeIntegralInRange < size_t > ( 0 , test . size ( ) ) , values , values + num ) ; <nl> break ; <nl> } <nl> - case 6 : { <nl> - int del = std : : min < int > ( test . size ( ) , 1 + prov . ConsumeIntegralInRange < int > ( 0 , 3 ) ) ; <nl> - int beg = prov . ConsumeIntegralInRange < int > ( 0 , test . size ( ) - del ) ; <nl> - test . erase ( beg , beg + del ) ; <nl> - break ; <nl> - } <nl> case 7 : <nl> test . reserve ( prov . ConsumeIntegralInRange < size_t > ( 0 , 32767 ) ) ; <nl> break ; <nl> case 8 : <nl> test . shrink_to_fit ( ) ; <nl> break ; <nl> - case 17 : <nl> + case 14 : <nl> test . update ( prov . ConsumeIntegralInRange < size_t > ( 0 , test . size ( ) - 1 ) , prov . ConsumeIntegral < int > ( ) ) ; <nl> break ; <nl> case 9 : <nl> void test_one_input ( const std : : vector < uint8_t > & buffer ) <nl> case 13 : <nl> test . move ( ) ; <nl> break ; <nl> - case 14 : { <nl> + case 6 : { <nl> int num = 1 + prov . ConsumeIntegralInRange < int > ( 0 , 15 ) ; <nl> std : : vector < int > values ( num ) ; <nl> for ( auto & v : values ) { <nl>
Merge and generalize case 3 and case 6
bitcoin/bitcoin
c2ccadc26a04358b11539097c1aadb8d11b85c21
2020-04-06T21:39:42Z
mmm a / Telegram / SourceFiles / calls / calls_call . cpp <nl> ppp b / Telegram / SourceFiles / calls / calls_call . cpp <nl> namespace { <nl> constexpr auto kMinLayer = 65 ; <nl> constexpr auto kMaxLayer = 75 ; <nl> constexpr auto kHangupTimeoutMs = 5000 ; <nl> + constexpr auto kSha256Size = 32 ; <nl> <nl> using tgvoip : : Endpoint ; <nl> <nl> void Call : : start ( bytes : : const_span random ) { <nl> void Call : : startOutgoing ( ) { <nl> Expects ( _type = = Type : : Outgoing ) ; <nl> Expects ( _state = = State : : Requesting ) ; <nl> + Expects ( _gaHash . size ( ) = = kSha256Size ) ; <nl> <nl> request ( MTPphone_RequestCall ( <nl> _user - > inputUser , <nl> bool Call : : handleUpdate ( const MTPPhoneCall & call ) { <nl> _id = data . vid . v ; <nl> _accessHash = data . vaccess_hash . v ; <nl> auto gaHashBytes = bytes : : make_span ( data . vg_a_hash . v ) ; <nl> - if ( gaHashBytes . size ( ) ! = _gaHash . size ( ) ) { <nl> - LOG ( ( " Call Error : Wrong g_a_hash size % 1 , expected % 2 . " ) . arg ( gaHashBytes . size ( ) ) . arg ( _gaHash . size ( ) ) ) ; <nl> + if ( gaHashBytes . size ( ) ! = kSha256Size ) { <nl> + LOG ( ( " Call Error : Wrong g_a_hash size % 1 , expected % 2 . " ) . arg ( gaHashBytes . size ( ) ) . arg ( kSha256Size ) ) ; <nl> finish ( FinishType : : Failed ) ; <nl> return true ; <nl> } <nl> - bytes : : copy ( _gaHash , gaHashBytes ) ; <nl> + _gaHash = bytes : : make_vector ( gaHashBytes ) ; <nl> } return true ; <nl> <nl> case mtpc_phoneCallEmpty : { <nl>
Fix incoming calls .
telegramdesktop/tdesktop
d8a4ede4b5599f2b14810eea29b7f00d2943fa05
2018-06-06T10:28:43Z
mmm a / tensorflow / python / eager / benchmarks_test . py <nl> ppp b / tensorflow / python / eager / benchmarks_test . py <nl> def benchmark_tf_random_uniform_2_by_2_default_setting_GPU ( self ) : <nl> self . _run ( func , num_iters = self . _num_iters_2_by_2 ) <nl> <nl> def _benchmark_tf_dropout_2_by_2 ( self , <nl> - rate = 0 . 5 , <nl> is_rate_tensor = True , <nl> noise_shape = None , <nl> device = CPU ) : <nl> if is_rate_tensor : <nl> - rate = constant_op . constant ( rate , dtype = dtypes . float32 ) <nl> + rate = constant_op . constant ( 0 . 5 , dtype = dtypes . float32 ) <nl> + else : <nl> + rate = 0 . 5 <nl> with context . device ( device ) : <nl> <nl> def func ( ) : <nl> def benchmark_tf_dropout_2_by_2_CPU ( self ) : <nl> def benchmark_tf_dropout_2_by_2_GPU ( self ) : <nl> self . _benchmark_tf_dropout_2_by_2 ( device = GPU ) <nl> <nl> - def benchmark_tf_dropout_scalar_rate_2_by_2_CPU_rate_0 ( self ) : <nl> - self . _benchmark_tf_dropout_2_by_2 ( rate = 0 , is_rate_tensor = False ) <nl> - <nl> - def benchmark_tf_dropout_scalar_rate_2_by_2_GPU_rate_0 ( self ) : <nl> - self . _benchmark_tf_dropout_2_by_2 ( rate = 0 . 0 , <nl> - is_rate_tensor = False , device = GPU ) <nl> - <nl> - def benchmark_tf_dropout_2_by_2_CPU_rate_0 ( self ) : <nl> - self . _benchmark_tf_dropout_2_by_2 ( rate = 0 . 0 ) <nl> - <nl> - def benchmark_tf_dropout_2_by_2_GPU_rate_0 ( self ) : <nl> - self . _benchmark_tf_dropout_2_by_2 ( rate = 0 , device = GPU ) <nl> - <nl> def _benchmark_transpose ( self , <nl> m , <nl> num_iters , <nl> mmm a / tensorflow / python / ops / nn_ops . py <nl> ppp b / tensorflow / python / ops / nn_ops . py <nl> def dropout_v2 ( x , rate , noise_shape = None , seed = None , name = None ) : <nl> if not x_dtype . is_floating : <nl> raise ValueError ( " x has to be a floating point tensor since it ' s going " <nl> " to be scaled . Got a % s tensor instead . " % x_dtype ) <nl> - if is_rate_number and rate = = 0 : <nl> - # Fast - path : Return the input immediately if rate is non - tensor & is ` 0 ` . <nl> - # We trigger this after all error checking <nl> - # and after ` x ` has been converted to a tensor , to prevent inconsistent <nl> - # tensor conversions / error raising if rate is changed to / from 0 . <nl> - return x <nl> - <nl> is_executing_eagerly = context . executing_eagerly ( ) <nl> if not tensor_util . is_tensor ( rate ) : <nl> if is_rate_number : <nl>
Internal change
tensorflow/tensorflow
59721aa8f7c29a8dd9da9cb6d8ec4cd2b301a176
2020-09-10T23:20:08Z
mmm a / ports / muparser / CONTROL <nl> ppp b / ports / muparser / CONTROL <nl> <nl> Source : muparser <nl> - Version : 6cf2746 <nl> + Version : 2 . 2 . 6 . 1 <nl> Description : Fast math parser library <nl> mmm a / ports / muparser / portfile . cmake <nl> ppp b / ports / muparser / portfile . cmake <nl> include ( vcpkg_common_functions ) <nl> vcpkg_from_github ( <nl> OUT_SOURCE_PATH SOURCE_PATH <nl> REPO beltoforion / muparser <nl> - REF 6cf2746f7ce3ecbe0fd91098a3c2123e5253bb0e <nl> - SHA512 a44720507806beb577fee9480102dbdcbf8b95612e8e51e1c57688c27e69f5fec0261beb03d034471519d8a4430954d74fdb626f63d21000160eeaa081a83861 <nl> + REF v2 . 2 . 6 . 1 <nl> + SHA512 01bfc8cc48158c8413ae5e1da2ddbac1c9f0b9075470b1ab75853587d641dd195ebea268e1060a340098fd8015bc5f77d8e9cde5f81cffeade2f157c5f295496 <nl> HEAD_REF master <nl> ) <nl> <nl>
update muParser to v . 2 . 2 . 6 . 1 ( )
microsoft/vcpkg
af069e7d4e0caa63cb361a9d1bbf5154f2116f78
2019-04-04T03:26:54Z
mmm a / tensorflow / lite / experimental / micro / examples / micro_vision / BUILD <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / BUILD <nl> tflite_micro_cc_test ( <nl> ] , <nl> ) <nl> <nl> + cc_library ( <nl> + name = " detection_responder " , <nl> + srcs = [ <nl> + " detection_responder . cc " , <nl> + ] , <nl> + hdrs = [ <nl> + " detection_responder . h " , <nl> + ] , <nl> + deps = [ <nl> + " / / tensorflow / lite / c : c_api_internal " , <nl> + " / / tensorflow / lite / experimental / micro : micro_framework " , <nl> + ] , <nl> + ) <nl> + <nl> + tflite_micro_cc_test ( <nl> + name = " detection_responder_test " , <nl> + srcs = [ <nl> + " detection_responder_test . cc " , <nl> + ] , <nl> + deps = [ <nl> + " : detection_responder " , <nl> + " / / tensorflow / lite / experimental / micro / testing : micro_test " , <nl> + ] , <nl> + ) <nl> + <nl> cc_binary ( <nl> name = " micro_vision " , <nl> srcs = [ <nl> " main . cc " , <nl> ] , <nl> deps = [ <nl> + " : detection_responder " , <nl> " : image_provider " , <nl> " : model_settings " , <nl> " : person_detect_model_data " , <nl> mmm a / tensorflow / lite / experimental / micro / examples / micro_vision / Makefile . inc <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / Makefile . inc <nl> IMAGE_PROVIDER_TEST_HDRS : = \ <nl> tensorflow / lite / experimental / micro / examples / micro_vision / image_provider . h \ <nl> tensorflow / lite / experimental / micro / examples / micro_vision / model_settings . h <nl> <nl> + DETECTION_RESPONDER_TEST_SRCS : = \ <nl> + tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . cc \ <nl> + tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder_test . cc <nl> + <nl> + DETECTION_RESPONDER_TEST_HDRS : = \ <nl> + tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h <nl> + <nl> MICRO_VISION_SRCS : = \ <nl> + tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . cc \ <nl> tensorflow / lite / experimental / micro / examples / micro_vision / image_provider . cc \ <nl> tensorflow / lite / experimental / micro / examples / micro_vision / main . cc \ <nl> $ ( MICRO_VISION_MODEL_SRCS ) <nl> <nl> MICRO_VISION_HDRS : = \ <nl> + tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h \ <nl> tensorflow / lite / experimental / micro / examples / micro_vision / image_provider . h \ <nl> $ ( MICRO_VISION_MODEL_HDRS ) <nl> <nl> $ ( MICRO_VISION_TEST_SRCS ) , $ ( MICRO_VISION_TEST_HDRS ) ) ) <nl> $ ( eval $ ( call microlite_test , image_provider_test , \ <nl> $ ( IMAGE_PROVIDER_TEST_SRCS ) , $ ( IMAGE_PROVIDER_TEST_HDRS ) ) ) <nl> <nl> + # Tests the detection responder module . <nl> + $ ( eval $ ( call microlite_test , detection_responder_test , \ <nl> + $ ( DETECTION_RESPONDER_TEST_SRCS ) , $ ( DETECTION_RESPONDER_TEST_HDRS ) ) ) <nl> + <nl> # Builds a standalone object recognition binary . <nl> $ ( eval $ ( call microlite_test , micro_vision , \ <nl> $ ( MICRO_VISION_SRCS ) , $ ( MICRO_VISION_HDRS ) ) ) <nl> new file mode 100644 <nl> index 0000000000000 . . e2ac98fecab21 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . cc <nl> <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h " <nl> + <nl> + / / This dummy implementation writes person and no person scores to the error <nl> + / / console . Real applications will want to take some custom action instead , and <nl> + / / should implement their own versions of this function . <nl> + void RespondToDetection ( tflite : : ErrorReporter * error_reporter , <nl> + uint8_t person_score , uint8_t no_person_score ) { <nl> + error_reporter - > Report ( " person score : % d no person score % d " , person_score , <nl> + no_person_score ) ; <nl> + } <nl> new file mode 100644 <nl> index 0000000000000 . . a1aca63cf3c43 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h <nl> <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + / / Provides an interface to take an action based on the output from the person <nl> + / / detection model . <nl> + <nl> + # ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_VISION_DETECTION_RESPONDER_H_ <nl> + # define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_VISION_DETECTION_RESPONDER_H_ <nl> + <nl> + # include " tensorflow / lite / c / c_api_internal . h " <nl> + # include " tensorflow / lite / experimental / micro / micro_error_reporter . h " <nl> + <nl> + / / Called every time the results of a person detection run are available . The <nl> + / / ` person_score ` has the numerical confidence that the captured image contains <nl> + / / a person , and ` no_person_score ` has the numerical confidence that the image <nl> + / / does not contain a person . Typically if person_score > no person score , the <nl> + / / image is considered to contain a person . This threshold may be adjusted for <nl> + / / particular applications . <nl> + void RespondToDetection ( tflite : : ErrorReporter * error_reporter , <nl> + uint8_t person_score , uint8_t no_person_score ) ; <nl> + <nl> + # endif / / TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_MICRO_VISION_DETECTION_RESPONDER_H_ <nl> new file mode 100644 <nl> index 0000000000000 . . ec25533e82cd0 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder_test . cc <nl> <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h " <nl> + <nl> + # include " tensorflow / lite / experimental / micro / testing / micro_test . h " <nl> + # include " tensorflow / lite / experimental / micro / testing / test_utils . h " <nl> + <nl> + TF_LITE_MICRO_TESTS_BEGIN <nl> + <nl> + TF_LITE_MICRO_TEST ( TestCallability ) { <nl> + tflite : : MicroErrorReporter micro_error_reporter ; <nl> + tflite : : ErrorReporter * error_reporter = & micro_error_reporter ; <nl> + <nl> + / / This will have external side - effects ( like printing to the debug console <nl> + / / or lighting an LED ) that are hard to observe , so the most we can do is <nl> + / / make sure the call doesn ' t crash . <nl> + RespondToDetection ( error_reporter , 100 , 200 ) ; <nl> + RespondToDetection ( error_reporter , 200 , 100 ) ; <nl> + } <nl> + <nl> + TF_LITE_MICRO_TESTS_END <nl> mmm a / tensorflow / lite / experimental / micro / examples / micro_vision / main . cc <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / main . cc <nl> See the License for the specific language governing permissions and <nl> limitations under the License . <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> <nl> + # include " tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h " <nl> # include " tensorflow / lite / experimental / micro / examples / micro_vision / image_provider . h " <nl> # include " tensorflow / lite / experimental / micro / examples / micro_vision / model_settings . h " <nl> # include " tensorflow / lite / experimental / micro / examples / micro_vision / person_detect_model_data . h " <nl> int main ( int argc , char * argv [ ] ) { <nl> <nl> TfLiteTensor * output = interpreter . output ( 0 ) ; <nl> <nl> - / / Log the person score and no person score . <nl> + / / Process the inference results . <nl> uint8_t person_score = output - > data . uint8 [ kPersonIndex ] ; <nl> uint8_t no_person_score = output - > data . uint8 [ kNotAPersonIndex ] ; <nl> - error_reporter - > Report ( <nl> - " person data . person score : % d , no person score : % d \ n " , person_score , <nl> - no_person_score ) ; <nl> + RespondToDetection ( error_reporter , person_score , no_person_score ) ; <nl> } <nl> <nl> return 0 ; <nl> new file mode 100644 <nl> index 0000000000000 . . 43425b76e6885 <nl> mmm / dev / null <nl> ppp b / tensorflow / lite / experimental / micro / examples / micro_vision / sparkfun_edge / detection_responder . cc <nl> <nl> + / * Copyright 2019 The TensorFlow Authors . All Rights Reserved . <nl> + <nl> + Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + you may not use this file except in compliance with the License . <nl> + You may obtain a copy of the License at <nl> + <nl> + http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + <nl> + Unless required by applicable law or agreed to in writing , software <nl> + distributed under the License is distributed on an " AS IS " BASIS , <nl> + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + See the License for the specific language governing permissions and <nl> + limitations under the License . <nl> + = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = * / <nl> + <nl> + # include " tensorflow / lite / experimental / micro / examples / micro_vision / detection_responder . h " <nl> + <nl> + # include " am_bsp . h " / / NOLINT <nl> + <nl> + / / This implementation will light up LEDs on the board in response to the <nl> + / / inference results . <nl> + void RespondToDetection ( tflite : : ErrorReporter * error_reporter , <nl> + uint8_t person_score , uint8_t no_person_score ) { <nl> + static bool is_initialized = false ; <nl> + if ( ! is_initialized ) { <nl> + / / Setup LED ' s as outputs . Leave red LED alone since that ' s an error <nl> + / / indicator for sparkfun_edge in image_provider . <nl> + am_hal_gpio_pinconfig ( AM_BSP_GPIO_LED_BLUE , g_AM_HAL_GPIO_OUTPUT_12 ) ; <nl> + am_hal_gpio_pinconfig ( AM_BSP_GPIO_LED_GREEN , g_AM_HAL_GPIO_OUTPUT_12 ) ; <nl> + am_hal_gpio_pinconfig ( AM_BSP_GPIO_LED_YELLOW , g_AM_HAL_GPIO_OUTPUT_12 ) ; <nl> + is_initialized = true ; <nl> + } <nl> + <nl> + / / Toggle the blue LED every time an inference is performed . <nl> + static int count = 0 ; <nl> + if ( + + count & 1 ) { <nl> + am_hal_gpio_output_set ( AM_BSP_GPIO_LED_BLUE ) ; <nl> + } else { <nl> + am_hal_gpio_output_clear ( AM_BSP_GPIO_LED_BLUE ) ; <nl> + } <nl> + <nl> + / / Turn on the green LED if a person was detected . Turn on the yellow LED <nl> + / / otherwise . <nl> + am_hal_gpio_output_clear ( AM_BSP_GPIO_LED_YELLOW ) ; <nl> + am_hal_gpio_output_clear ( AM_BSP_GPIO_LED_GREEN ) ; <nl> + if ( person_score > no_person_score ) { <nl> + am_hal_gpio_output_set ( AM_BSP_GPIO_LED_GREEN ) ; <nl> + } else { <nl> + am_hal_gpio_output_set ( AM_BSP_GPIO_LED_YELLOW ) ; <nl> + } <nl> + <nl> + error_reporter - > Report ( " person score : % d no person score % d " , person_score , <nl> + no_person_score ) ; <nl> + } <nl>
Add detection_responder which allows each platform to process the person detection output in its own way . For example , sparkfun_edge lights up the yellow LED for no person and the green LED for person , and toggles the blue LED on each run .
tensorflow/tensorflow
3526f05b16ed8ab00f4287b62b8b49589fbf7971
2019-07-11T17:58:43Z
mmm a / xbmc / cores / VideoPlayer / VideoPlayer . cpp <nl> ppp b / xbmc / cores / VideoPlayer / VideoPlayer . cpp <nl> void CVideoPlayer : : HandlePlaySpeed ( ) <nl> if ( m_CurrentVideo . starttime ! = DVD_NOPTS_VALUE & & ( m_CurrentVideo . packets > 0 ) ) <nl> { <nl> if ( m_CurrentVideo . starttime - m_CurrentVideo . cachetotal < clock ) <nl> + { <nl> clock = m_CurrentVideo . starttime - m_CurrentVideo . cachetotal ; <nl> - else if ( m_CurrentVideo . starttime > m_CurrentAudio . starttime ) <nl> + } <nl> + else if ( m_CurrentVideo . starttime > m_CurrentAudio . starttime & & <nl> + ! m_pInputStream - > IsRealtime ( ) ) <nl> { <nl> int audioLevel = m_VideoPlayerAudio - > GetLevel ( ) ; <nl> / / @ todo hardcoded 8 seconds in message queue <nl>
VideoPlayer : fix starttime of realtime streams
xbmc/xbmc
0f845a341b414bb5d0d42a5db99135fa04c43c8d
2018-07-18T10:37:49Z
mmm a / src / heap / spaces . cc <nl> ppp b / src / heap / spaces . cc <nl> Page * Page : : ConvertNewToOld ( Page * old_page ) { <nl> return new_page ; <nl> } <nl> <nl> - / / Commit MemoryChunk area to the requested size . <nl> - bool MemoryChunk : : CommitArea ( size_t requested ) { <nl> - size_t guard_size = <nl> - IsFlagSet ( IS_EXECUTABLE ) ? MemoryAllocator : : CodePageGuardSize ( ) : 0 ; <nl> - size_t header_size = area_start ( ) - address ( ) - guard_size ; <nl> - size_t commit_size = <nl> - : : RoundUp ( header_size + requested , MemoryAllocator : : GetCommitPageSize ( ) ) ; <nl> - size_t committed_size = : : RoundUp ( header_size + ( area_end ( ) - area_start ( ) ) , <nl> - MemoryAllocator : : GetCommitPageSize ( ) ) ; <nl> - <nl> - if ( commit_size > committed_size ) { <nl> - / / Commit size should be less or equal than the reserved size . <nl> - DCHECK ( commit_size < = size ( ) - 2 * guard_size ) ; <nl> - / / Append the committed area . <nl> - Address start = address ( ) + committed_size + guard_size ; <nl> - size_t length = commit_size - committed_size ; <nl> - if ( reservation_ . IsReserved ( ) ) { <nl> - Executability executable = <nl> - IsFlagSet ( IS_EXECUTABLE ) ? EXECUTABLE : NOT_EXECUTABLE ; <nl> - if ( ! heap ( ) - > memory_allocator ( ) - > CommitMemory ( start , length , <nl> - executable ) ) { <nl> - return false ; <nl> - } <nl> - } else { <nl> - CodeRange * code_range = heap_ - > memory_allocator ( ) - > code_range ( ) ; <nl> - DCHECK ( code_range - > valid ( ) & & IsFlagSet ( IS_EXECUTABLE ) ) ; <nl> - if ( ! code_range - > CommitRawMemory ( start , length ) ) return false ; <nl> - } <nl> - <nl> - if ( Heap : : ShouldZapGarbage ( ) ) { <nl> - heap_ - > memory_allocator ( ) - > ZapBlock ( start , length ) ; <nl> - } <nl> - } else if ( commit_size < committed_size ) { <nl> - DCHECK_LT ( 0 , commit_size ) ; <nl> - / / Shrink the committed area . <nl> - size_t length = committed_size - commit_size ; <nl> - Address start = address ( ) + committed_size + guard_size - length ; <nl> - if ( reservation_ . IsReserved ( ) ) { <nl> - if ( ! reservation_ . Uncommit ( start , length ) ) return false ; <nl> - } else { <nl> - CodeRange * code_range = heap_ - > memory_allocator ( ) - > code_range ( ) ; <nl> - DCHECK ( code_range - > valid ( ) & & IsFlagSet ( IS_EXECUTABLE ) ) ; <nl> - if ( ! code_range - > UncommitRawMemory ( start , length ) ) return false ; <nl> - } <nl> - } <nl> - <nl> - area_end_ = area_start_ + requested ; <nl> - return true ; <nl> - } <nl> - <nl> size_t MemoryChunk : : CommittedPhysicalMemory ( ) { <nl> if ( ! base : : OS : : HasLazyCommits ( ) | | owner ( ) - > identity ( ) = = LO_SPACE ) <nl> return size ( ) ; <nl> mmm a / src / heap / spaces . h <nl> ppp b / src / heap / spaces . h <nl> class MemoryChunk { <nl> Address area_end ( ) { return area_end_ ; } <nl> size_t area_size ( ) { return static_cast < size_t > ( area_end ( ) - area_start ( ) ) ; } <nl> <nl> - bool CommitArea ( size_t requested ) ; <nl> - <nl> / / Approximate amount of physical memory committed for this chunk . <nl> size_t CommittedPhysicalMemory ( ) ; <nl> <nl> mmm a / test / cctest / heap / test - spaces . cc <nl> ppp b / test / cctest / heap / test - spaces . cc <nl> static void VerifyMemoryChunk ( Isolate * isolate , <nl> CodeRange * code_range , <nl> size_t reserve_area_size , <nl> size_t commit_area_size , <nl> - size_t second_commit_area_size , <nl> Executability executable ) { <nl> MemoryAllocator * memory_allocator = new MemoryAllocator ( isolate ) ; <nl> CHECK ( memory_allocator - > SetUp ( heap - > MaxReserved ( ) , 0 ) ) ; <nl> static void VerifyMemoryChunk ( Isolate * isolate , <nl> memory_chunk - > address ( ) + memory_chunk - > size ( ) ) ; <nl> CHECK ( static_cast < size_t > ( memory_chunk - > area_size ( ) ) = = commit_area_size ) ; <nl> <nl> - Address area_start = memory_chunk - > area_start ( ) ; <nl> - <nl> - memory_chunk - > CommitArea ( second_commit_area_size ) ; <nl> - CHECK ( area_start = = memory_chunk - > area_start ( ) ) ; <nl> - CHECK ( memory_chunk - > area_start ( ) < <nl> - memory_chunk - > address ( ) + memory_chunk - > size ( ) ) ; <nl> - CHECK ( memory_chunk - > area_end ( ) < = <nl> - memory_chunk - > address ( ) + memory_chunk - > size ( ) ) ; <nl> - CHECK ( static_cast < size_t > ( memory_chunk - > area_size ( ) ) = = <nl> - second_commit_area_size ) ; <nl> - <nl> memory_allocator - > Free < MemoryAllocator : : kFull > ( memory_chunk ) ; <nl> } <nl> memory_allocator - > TearDown ( ) ; <nl> TEST ( MemoryChunk ) { <nl> Heap * heap = isolate - > heap ( ) ; <nl> <nl> size_t reserve_area_size = 1 * MB ; <nl> - size_t initial_commit_area_size , second_commit_area_size ; <nl> + size_t initial_commit_area_size ; <nl> <nl> for ( int i = 0 ; i < 100 ; i + + ) { <nl> initial_commit_area_size = PseudorandomAreaSize ( ) ; <nl> - second_commit_area_size = PseudorandomAreaSize ( ) ; <nl> <nl> / / With CodeRange . <nl> CodeRange * code_range = new CodeRange ( isolate ) ; <nl> TEST ( MemoryChunk ) { <nl> code_range , <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> - second_commit_area_size , <nl> EXECUTABLE ) ; <nl> <nl> VerifyMemoryChunk ( isolate , <nl> TEST ( MemoryChunk ) { <nl> code_range , <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> - second_commit_area_size , <nl> NOT_EXECUTABLE ) ; <nl> delete code_range ; <nl> <nl> TEST ( MemoryChunk ) { <nl> code_range , <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> - second_commit_area_size , <nl> EXECUTABLE ) ; <nl> <nl> VerifyMemoryChunk ( isolate , <nl> TEST ( MemoryChunk ) { <nl> code_range , <nl> reserve_area_size , <nl> initial_commit_area_size , <nl> - second_commit_area_size , <nl> NOT_EXECUTABLE ) ; <nl> delete code_range ; <nl> } <nl>
[ heap ] Remove dead { MemoryChunk : : CommitArea } method .
v8/v8
d6f0db8b7bfdd9ed7f8f95971887b23bec3beb8c
2017-11-10T09:53:23Z
new file mode 100644 <nl> index 00000000000 . . 57ad0683099 <nl> mmm / dev / null <nl> ppp b / modules / imgproc / test / test_fitellipse . cpp <nl> <nl> + / / This file is part of OpenCV project . <nl> + / / It is subject to the license terms in the LICENSE file found in the top - level directory <nl> + / / of this distribution and at http : / / opencv . org / license . html . <nl> + / / <nl> + / / Copyright ( C ) 2016 , Itseez , Inc , all rights reserved . <nl> + <nl> + # include " test_precomp . hpp " <nl> + # include < vector > <nl> + # include < cmath > <nl> + <nl> + using namespace cv ; <nl> + using namespace std ; <nl> + <nl> + / / return true if point lies inside ellipse <nl> + static bool check_pt_in_ellipse ( const Point2f & pt , const RotatedRect & el ) { <nl> + Point2f to_pt = pt - el . center ; <nl> + double pt_angle = atan2 ( to_pt . y , to_pt . x ) ; <nl> + double el_angle = el . angle * CV_PI / 180 ; <nl> + double x_dist = 0 . 5 * el . size . width * cos ( pt_angle + el_angle ) ; <nl> + double y_dist = 0 . 5 * el . size . height * sin ( pt_angle + el_angle ) ; <nl> + double el_dist = sqrt ( x_dist * x_dist + y_dist * y_dist ) ; <nl> + return norm ( to_pt ) < el_dist ; <nl> + } <nl> + <nl> + / / Return true if mass center of fitted points lies inside ellipse <nl> + static bool fit_and_check_ellipse ( const vector < Point2f > & pts ) { <nl> + RotatedRect ellipse = fitEllipse ( pts ) ; <nl> + <nl> + Point2f mass_center ; <nl> + for ( size_t i = 0 ; i < pts . size ( ) ; i + + ) { <nl> + mass_center + = pts [ i ] ; <nl> + } <nl> + mass_center / = ( float ) pts . size ( ) ; <nl> + <nl> + return check_pt_in_ellipse ( mass_center , ellipse ) ; <nl> + } <nl> + <nl> + TEST ( Imgproc_FitEllipse_Issue_4515 , DISABLED_accuracy ) { <nl> + vector < Point2f > pts ; <nl> + pts . push_back ( Point2f ( 327 , 317 ) ) ; <nl> + pts . push_back ( Point2f ( 328 , 316 ) ) ; <nl> + pts . push_back ( Point2f ( 329 , 315 ) ) ; <nl> + pts . push_back ( Point2f ( 330 , 314 ) ) ; <nl> + pts . push_back ( Point2f ( 331 , 314 ) ) ; <nl> + pts . push_back ( Point2f ( 332 , 314 ) ) ; <nl> + pts . push_back ( Point2f ( 333 , 315 ) ) ; <nl> + pts . push_back ( Point2f ( 333 , 316 ) ) ; <nl> + pts . push_back ( Point2f ( 333 , 317 ) ) ; <nl> + pts . push_back ( Point2f ( 333 , 318 ) ) ; <nl> + pts . push_back ( Point2f ( 333 , 319 ) ) ; <nl> + pts . push_back ( Point2f ( 333 , 320 ) ) ; <nl> + <nl> + EXPECT_TRUE ( fit_and_check_ellipse ( pts ) ) ; <nl> + } <nl> + <nl> + TEST ( Imgproc_FitEllipse_Issue_6544 , DISABLED_accuracy ) { <nl> + vector < Point2f > pts ; <nl> + pts . push_back ( Point2f ( 924 . 784f , 764 . 160f ) ) ; <nl> + pts . push_back ( Point2f ( 928 . 388f , 615 . 903f ) ) ; <nl> + pts . push_back ( Point2f ( 847 . 4f , 888 . 014f ) ) ; <nl> + pts . push_back ( Point2f ( 929 . 406f , 741 . 675f ) ) ; <nl> + pts . push_back ( Point2f ( 904 . 564f , 825 . 605f ) ) ; <nl> + pts . push_back ( Point2f ( 926 . 742f , 760 . 746f ) ) ; <nl> + pts . push_back ( Point2f ( 863 . 479f , 873 . 406f ) ) ; <nl> + pts . push_back ( Point2f ( 910 . 987f , 808 . 863f ) ) ; <nl> + pts . push_back ( Point2f ( 929 . 145f , 744 . 976f ) ) ; <nl> + pts . push_back ( Point2f ( 917 . 474f , 791 . 823f ) ) ; <nl> + <nl> + EXPECT_TRUE ( fit_and_check_ellipse ( pts ) ) ; <nl> + } <nl>
Added tests for issues # , 6544
opencv/opencv
2deda0e868d87e0e5a5be3eccd8539073881b0c6
2016-07-18T07:01:13Z
mmm a / aten / src / ATen / InferSize . h <nl> ppp b / aten / src / ATen / InferSize . h <nl> <nl> # pragma once <nl> <nl> - # include < ATen / optional . h > <nl> # include < ATen / ScalarType . h > <nl> + # include < c10 / util / Optional . h > <nl> # include < sstream > <nl> # include < vector > <nl> <nl> mmm a / aten / src / ATen / core / TensorOptions . h <nl> ppp b / aten / src / ATen / core / TensorOptions . h <nl> struct CAFFE2_API TensorOptions { <nl> <nl> / / / Return a copy of ` TensorOptions ` with ` device ` set to the given one , or <nl> / / / cleared if ` device ` is ` nullopt ` . <nl> - C10_NODISCARD TensorOptions device ( optional < Device > device ) const noexcept { <nl> + C10_NODISCARD TensorOptions device ( c10 : : optional < Device > device ) const <nl> + noexcept { <nl> TensorOptions r = * this ; <nl> r . set_device ( device ) ; <nl> return r ; <nl> struct CAFFE2_API TensorOptions { <nl> } <nl> <nl> / / / Return a copy of ` TensorOptions ` with ` dtype ` set to the given one . <nl> - C10_NODISCARD TensorOptions dtype ( optional < ScalarType > dtype ) const noexcept { <nl> + C10_NODISCARD TensorOptions dtype ( c10 : : optional < ScalarType > dtype ) const <nl> + noexcept { <nl> TensorOptions r = * this ; <nl> r . set_dtype ( dtype ) ; <nl> return r ; <nl> struct CAFFE2_API TensorOptions { <nl> } <nl> <nl> / / / Sets the layout of the ` TensorOptions ` . <nl> - C10_NODISCARD TensorOptions layout ( optional < Layout > layout ) const noexcept { <nl> + C10_NODISCARD TensorOptions layout ( c10 : : optional < Layout > layout ) const <nl> + noexcept { <nl> TensorOptions r = * this ; <nl> r . set_layout ( layout ) ; <nl> return r ; <nl> } <nl> <nl> / / / Sets the ` requires_grad ` property of the ` TensorOptions ` . <nl> - C10_NODISCARD TensorOptions requires_grad ( optional < bool > requires_grad ) const noexcept { <nl> + C10_NODISCARD TensorOptions <nl> + requires_grad ( c10 : : optional < bool > requires_grad ) const noexcept { <nl> TensorOptions r = * this ; <nl> r . set_requires_grad ( requires_grad ) ; <nl> return r ; <nl> } <nl> <nl> / / / Sets the ` is_variable ` property on the ` TensorOptions ` . <nl> - C10_NODISCARD TensorOptions is_variable ( optional < bool > is_variable ) const noexcept { <nl> + C10_NODISCARD TensorOptions is_variable ( c10 : : optional < bool > is_variable ) const <nl> + noexcept { <nl> TensorOptions r = * this ; <nl> r . set_is_variable ( is_variable ) ; <nl> return r ; <nl> struct CAFFE2_API TensorOptions { <nl> <nl> / / / Returns the device of the ` TensorOptions ` , or ` c10 : : nullopt ` if <nl> / / / device is not specified . <nl> - optional < Device > device_opt ( ) const noexcept { <nl> + c10 : : optional < Device > device_opt ( ) const noexcept { <nl> return has_device_ ? c10 : : make_optional ( device_ ) : c10 : : nullopt ; <nl> } <nl> <nl> struct CAFFE2_API TensorOptions { <nl> <nl> / / / Returns the dtype of the ` TensorOptions ` , or ` c10 : : nullopt ` if <nl> / / / device is not specified . <nl> - optional < ScalarType > dtype_opt ( ) const noexcept { <nl> + c10 : : optional < ScalarType > dtype_opt ( ) const noexcept { <nl> return has_dtype_ ? c10 : : make_optional ( dtype_ ) : c10 : : nullopt ; <nl> } <nl> <nl> struct CAFFE2_API TensorOptions { <nl> <nl> / / / Returns the layout of the ` TensorOptions ` , or ` c10 : : nullopt ` if <nl> / / / layout is not specified . <nl> - optional < Layout > layout_opt ( ) const noexcept { <nl> + c10 : : optional < Layout > layout_opt ( ) const noexcept { <nl> return has_layout_ ? c10 : : make_optional ( layout_ ) : c10 : : nullopt ; <nl> } <nl> <nl> struct CAFFE2_API TensorOptions { <nl> <nl> / / / Returns the ` requires_grad ` property of the ` TensorOptions ` , or <nl> / / / ` c10 : : nullopt ` if ` requires_grad ` is not specified . <nl> - optional < bool > requires_grad_opt ( ) const noexcept { <nl> + c10 : : optional < bool > requires_grad_opt ( ) const noexcept { <nl> return has_requires_grad_ ? c10 : : make_optional ( requires_grad_ ) <nl> : c10 : : nullopt ; <nl> } <nl> struct CAFFE2_API TensorOptions { <nl> <nl> / / / Returns the ` is_variable ` property of the ` TensorOptions ` , or <nl> / / / ` c10 : : nullopt ` if ` is_variable ` is not specified . <nl> - optional < bool > is_variable_opt ( ) const noexcept { <nl> + c10 : : optional < bool > is_variable_opt ( ) const noexcept { <nl> return has_is_variable_ ? c10 : : make_optional ( is_variable_ ) : c10 : : nullopt ; <nl> } <nl> <nl> struct CAFFE2_API TensorOptions { <nl> / / on temporaries . ) <nl> <nl> / / / Mutably set the device of ` TensorOptions ` . <nl> - void set_device ( optional < Device > device ) & noexcept { <nl> + void set_device ( c10 : : optional < Device > device ) & noexcept { <nl> if ( device ) { <nl> device_ = * device ; <nl> has_device_ = true ; <nl> struct CAFFE2_API TensorOptions { <nl> } <nl> <nl> / / / Mutably set the dtype of ` TensorOptions ` . <nl> - void set_dtype ( optional < ScalarType > dtype ) & noexcept { <nl> + void set_dtype ( c10 : : optional < ScalarType > dtype ) & noexcept { <nl> if ( dtype ) { <nl> dtype_ = * dtype ; <nl> has_dtype_ = true ; <nl> struct CAFFE2_API TensorOptions { <nl> } <nl> <nl> / / / Mutably set the layout of ` TensorOptions ` . <nl> - void set_layout ( optional < Layout > layout ) & noexcept { <nl> + void set_layout ( c10 : : optional < Layout > layout ) & noexcept { <nl> if ( layout ) { <nl> layout_ = * layout ; <nl> has_layout_ = true ; <nl> struct CAFFE2_API TensorOptions { <nl> } <nl> <nl> / / / Mutably set the ` requires_grad ` property of ` TensorOptions ` . <nl> - void set_requires_grad ( optional < bool > requires_grad ) & noexcept { <nl> + void set_requires_grad ( c10 : : optional < bool > requires_grad ) & noexcept { <nl> if ( requires_grad ) { <nl> requires_grad_ = * requires_grad ; <nl> has_requires_grad_ = true ; <nl> struct CAFFE2_API TensorOptions { <nl> } <nl> <nl> / / / Mutably set the ` is_variable ` property of ` TensorOptions ` . <nl> - void set_is_variable ( optional < bool > is_variable ) & noexcept { <nl> + void set_is_variable ( c10 : : optional < bool > is_variable ) & noexcept { <nl> if ( is_variable ) { <nl> is_variable_ = * is_variable ; <nl> has_is_variable_ = true ; <nl> mmm a / aten / src / ATen / core / Type . h <nl> ppp b / aten / src / ATen / core / Type . h <nl> struct CAFFE2_API Type { <nl> return backendToDeviceType ( backend ( ) ) ; <nl> } <nl> <nl> - virtual Tensor copy ( const Tensor & src , bool non_blocking = false , optional < Device > to_device = { } ) const = 0 ; <nl> + virtual Tensor copy ( <nl> + const Tensor & src , <nl> + bool non_blocking = false , <nl> + c10 : : optional < Device > to_device = { } ) const = 0 ; <nl> virtual Tensor & copy_ ( Tensor & self , const Tensor & src , bool non_blocking = false ) const = 0 ; <nl> virtual Tensor & s_copy_ ( Tensor & self , const Tensor & src , bool non_blocking ) const = 0 ; <nl> virtual Tensor & _s_copy_from ( const Tensor & self , Tensor & dst , bool non_blocking ) const = 0 ; <nl> mmm a / aten / src / ATen / native / ReduceOps . cpp <nl> ppp b / aten / src / ATen / native / ReduceOps . cpp <nl> DEFINE_DISPATCH ( sum_kernel ) ; <nl> DEFINE_DISPATCH ( prod_kernel ) ; <nl> DEFINE_DISPATCH ( norm_kernel ) ; <nl> <nl> - static inline Tensor integer_upcast ( const Tensor & self , optional < ScalarType > dtype ) { <nl> + static inline Tensor integer_upcast ( <nl> + const Tensor & self , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> ScalarType scalarType = self . type ( ) . scalarType ( ) ; <nl> ScalarType upcast_scalarType = dtype . value_or ( at : : isIntegralType ( scalarType ) ? ScalarType : : Long : scalarType ) ; <nl> return self . toType ( upcast_scalarType ) ; <nl> } <nl> <nl> - static inline Tensor cumsum ( const Tensor & self , int64_t dim , optional < ScalarType > dtype ) { <nl> + static inline Tensor cumsum ( <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> return at : : _cumsum ( integer_upcast ( self , dtype ) , dim ) ; <nl> } <nl> <nl> Tensor cumsum ( const Tensor & self , int64_t dim , ScalarType dtype ) { <nl> - return at : : native : : cumsum ( self , dim , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : cumsum ( self , dim , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor cumsum ( const Tensor & self , int64_t dim ) { <nl> return at : : native : : cumsum ( self , dim , c10 : : nullopt ) ; <nl> } <nl> <nl> - static inline Tensor & cumsum_out ( Tensor & result , const Tensor & self , int64_t dim , optional < ScalarType > dtype ) { <nl> + static inline Tensor & cumsum_out ( <nl> + Tensor & result , <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> / / result type is favored over dtype ; check that they match if provided ( NumPy doesn ' t check ) <nl> AT_CHECK ( <nl> ! dtype . has_value ( ) | | ( result . type ( ) . scalarType ( ) = = dtype . value ( ) ) , <nl> static inline Tensor & cumsum_out ( Tensor & result , const Tensor & self , int64_t dim <nl> } <nl> <nl> Tensor & cumsum_out ( Tensor & result , const Tensor & self , int64_t dim , ScalarType dtype ) { <nl> - return at : : native : : cumsum_out ( result , self , dim , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : cumsum_out ( <nl> + result , self , dim , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor & cumsum_out ( Tensor & result , const Tensor & self , int64_t dim ) { <nl> return at : : native : : cumsum_out ( result , self , dim , c10 : : nullopt ) ; <nl> } <nl> <nl> - static inline Tensor cumprod ( const Tensor & self , int64_t dim , optional < ScalarType > dtype ) { <nl> + static inline Tensor cumprod ( <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> return at : : _cumprod ( integer_upcast ( self , dtype ) , dim ) ; <nl> } <nl> <nl> Tensor cumprod ( const Tensor & self , int64_t dim , ScalarType dtype ) { <nl> - return at : : native : : cumprod ( self , dim , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : cumprod ( self , dim , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor cumprod ( const Tensor & self , int64_t dim ) { <nl> return at : : native : : cumprod ( self , dim , c10 : : nullopt ) ; <nl> } <nl> <nl> - static inline Tensor & cumprod_out ( Tensor & result , const Tensor & self , int64_t dim , optional < ScalarType > dtype ) { <nl> + static inline Tensor & cumprod_out ( <nl> + Tensor & result , <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> / / result type is favored over dtype ; check that they match if provided ( NumPy doesn ' t check ) <nl> AT_CHECK ( <nl> ! dtype . has_value ( ) | | ( result . type ( ) . scalarType ( ) = = dtype . value ( ) ) , <nl> static inline Tensor & cumprod_out ( Tensor & result , const Tensor & self , int64_t di <nl> } <nl> <nl> Tensor & cumprod_out ( Tensor & result , const Tensor & self , int64_t dim , ScalarType dtype ) { <nl> - return at : : native : : cumprod_out ( result , self , dim , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : cumprod_out ( <nl> + result , self , dim , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor & cumprod_out ( Tensor & result , const Tensor & self , int64_t dim ) { <nl> Tensor & cumprod_out ( Tensor & result , const Tensor & self , int64_t dim ) { <nl> <nl> / / ALL REDUCE # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> - static inline Tensor mean ( const Tensor & self , optional < ScalarType > dtype ) { <nl> + static inline Tensor mean ( const Tensor & self , c10 : : optional < ScalarType > dtype ) { <nl> ScalarType scalarType = self . type ( ) . scalarType ( ) ; <nl> AT_CHECK ( <nl> at : : isFloatingType ( scalarType ) , <nl> static inline Tensor mean ( const Tensor & self , optional < ScalarType > dtype ) { <nl> } <nl> <nl> Tensor mean ( const Tensor & self , ScalarType dtype ) { <nl> - return at : : native : : mean ( self , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : mean ( self , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor mean ( const Tensor & self ) { <nl> return at : : native : : mean ( self , c10 : : nullopt ) ; <nl> } <nl> <nl> - static inline Tensor sum ( const Tensor & self , optional < ScalarType > dtype ) { <nl> + static inline Tensor sum ( const Tensor & self , c10 : : optional < ScalarType > dtype ) { <nl> return at : : _sum ( integer_upcast ( self , dtype ) ) ; <nl> } <nl> <nl> Tensor sum ( const Tensor & self , ScalarType dtype ) { <nl> - return at : : native : : sum ( self , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : sum ( self , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor sum ( const Tensor & self ) { <nl> Tensor _sum_cpu ( const Tensor & self ) { <nl> return at : : _sumall ( self ) ; <nl> } <nl> <nl> - static inline Tensor prod ( const Tensor & self , optional < ScalarType > dtype ) { <nl> + static inline Tensor prod ( const Tensor & self , c10 : : optional < ScalarType > dtype ) { <nl> return at : : _prod ( integer_upcast ( self , dtype ) ) ; <nl> } <nl> <nl> Tensor prod ( const Tensor & self , ScalarType dtype ) { <nl> - return at : : native : : prod ( self , optional < ScalarType > ( dtype ) ) ; <nl> + return at : : native : : prod ( self , c10 : : optional < ScalarType > ( dtype ) ) ; <nl> } <nl> <nl> Tensor prod ( const Tensor & self ) { <nl> Tensor _prod_cpu ( const Tensor & self ) { <nl> <nl> / / DIM REDUCE # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # <nl> <nl> - static inline Tensor & mean_out ( Tensor & result , const Tensor & self , int64_t dim , <nl> - bool keepdim , optional < ScalarType > dtype ) { <nl> + static inline Tensor & mean_out ( <nl> + Tensor & result , <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + bool keepdim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> ScalarType scalarType = result . type ( ) . scalarType ( ) ; <nl> AT_CHECK ( <nl> at : : isFloatingType ( scalarType ) , <nl> Tensor & mean_out ( Tensor & result , const Tensor & self , int64_t dim , ScalarType dty <nl> return at : : native : : mean_out ( result , self , dim , false , dtype ) ; <nl> } <nl> <nl> - static inline Tensor & sum_out ( Tensor & result , const Tensor & self , IntList dim , <nl> - bool keepdim , optional < ScalarType > dtype ) { <nl> + static inline Tensor & sum_out ( <nl> + Tensor & result , <nl> + const Tensor & self , <nl> + IntList dim , <nl> + bool keepdim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> / / result type is favored over dtype ; check that they match if provided ( NumPy doesn ' t check ) <nl> AT_CHECK ( <nl> ! dtype . has_value ( ) | | ( result . type ( ) . scalarType ( ) = = dtype . value ( ) ) , <nl> Tensor & _sum_out_cpu ( Tensor & result , const Tensor & self , int64_t dim_ , <nl> return at : : _th_sum_out ( result , self , dim , keepdim ) ; <nl> } <nl> <nl> - static inline Tensor & prod_out ( Tensor & result , const Tensor & self , int64_t dim , <nl> - bool keepdim , optional < ScalarType > dtype ) { <nl> + static inline Tensor & prod_out ( <nl> + Tensor & result , <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + bool keepdim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> / / result type is favored over dtype ; check that they match if provided ( NumPy doesn ' t check ) <nl> AT_CHECK ( <nl> ! dtype . has_value ( ) | | ( result . type ( ) . scalarType ( ) = = dtype . value ( ) ) , <nl> Tensor & _prod_out_cpu ( Tensor & result , const Tensor & self , int64_t dim_ , <nl> return at : : _th_prod_out ( result , self , dim , keepdim ) ; <nl> } <nl> <nl> - static inline Tensor mean ( const Tensor & self , int64_t dim , bool keepdim , optional < ScalarType > dtype ) { <nl> + static inline Tensor mean ( <nl> + const Tensor & self , <nl> + int64_t dim , <nl> + bool keepdim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> ScalarType scalarType = self . type ( ) . scalarType ( ) ; <nl> AT_CHECK ( <nl> at : : isFloatingType ( scalarType ) , <nl> Tensor mean ( const Tensor & self , int64_t dim , ScalarType dtype ) { <nl> return at : : native : : mean ( self , dim , false , dtype ) ; <nl> } <nl> <nl> - static inline Tensor sum ( const Tensor & self , IntList dim_ , bool keepdim , optional < ScalarType > dtype ) { <nl> + static inline Tensor sum ( <nl> + const Tensor & self , <nl> + IntList dim_ , <nl> + bool keepdim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> return at : : _sum ( integer_upcast ( self , dtype ) , dim_ , keepdim ) ; <nl> } <nl> <nl> Tensor _sum ( const Tensor & self , int64_t dim_ , bool keepdim ) { <nl> return at : : _sum_out ( result , self , dim , keepdim ) ; <nl> } <nl> <nl> - static inline Tensor prod ( const Tensor & self , int64_t dim_ , bool keepdim , optional < ScalarType > dtype ) { <nl> + static inline Tensor prod ( <nl> + const Tensor & self , <nl> + int64_t dim_ , <nl> + bool keepdim , <nl> + c10 : : optional < ScalarType > dtype ) { <nl> return at : : _prod ( integer_upcast ( self , dtype ) , dim_ , keepdim ) ; <nl> } <nl> <nl> mmm a / aten / src / ATen / native / cpu / TensorCompareKernel . cpp <nl> ppp b / aten / src / ATen / native / cpu / TensorCompareKernel . cpp <nl> <nl> <nl> # include " ATen / Dispatch . h " <nl> # include " ATen / Parallel . h " <nl> - # include " ATen / optional . h " <nl> + # include " c10 / util / Optional . h " <nl> <nl> namespace at { namespace native { namespace { <nl> <nl> mmm a / aten / src / ATen / native / cpu / TensorCompareKernel . h <nl> ppp b / aten / src / ATen / native / cpu / TensorCompareKernel . h <nl> <nl> <nl> # include < ATen / ATen . h > <nl> # include < ATen / native / DispatchStub . h > <nl> - # include < ATen / optional . h > <nl> + # include < c10 / util / Optional . h > <nl> <nl> namespace at { namespace native { <nl> <nl> mmm a / aten / src / ATen / native / native_functions . yaml <nl> ppp b / aten / src / ATen / native / native_functions . yaml <nl> <nl> dispatch : <nl> CUDA : cudnn_grid_sampler_backward <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : cumsum ( Tensor self , int64_t dim , * , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> <nl> <nl> - func : cumsum_out ( Tensor result , Tensor self , int64_t dim ) - > Tensor <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : cumprod ( Tensor self , int64_t dim , * , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> <nl> <nl> - func : logspace_out ( Tensor result , Scalar start , Scalar end , int64_t steps ) - > Tensor <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : log_softmax ( Tensor self , int64_t dim , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> <nl> <nl> - func : max_pool3d ( Tensor self , IntList [ 1 ] kernel_size , IntList [ 1 ] stride = { } , IntList [ 1 ] padding = 0 , IntList [ 1 ] dilation = 1 , bool ceil_mode = false ) - > Tensor <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : mean ( Tensor self , * , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> <nl> - func : smm ( Tensor self , Tensor mat2 ) - > Tensor <nl> variants : function , method <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : softmax ( Tensor self , int64_t dim , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> <nl> variants : function , method <nl> device_guard : false <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : sum ( Tensor self , * , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> <nl> <nl> - func : std_out ( Tensor result , Tensor self , int64_t dim , bool unbiased = true , bool keepdim = false ) - > Tensor <nl> <nl> - # FIXME : These could be combined as optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> + # FIXME : These could be combined as c10 : : optional < ScalarType > but for https : / / github . com / pytorch / pytorch / issues / 6593 . <nl> - func : prod ( Tensor self , * , ScalarType dtype ) - > Tensor <nl> variants : function , method <nl> <nl> deleted file mode 100644 <nl> index f1a639e94004 . . 000000000000 <nl> mmm a / aten / src / ATen / optional . h <nl> ppp / dev / null <nl> @ @ - 1 + 0 , 0 @ @ <nl> - # include " c10 / util / Optional . h " <nl> mmm a / aten / src / ATen / templates / Type . h <nl> ppp b / aten / src / ATen / templates / Type . h <nl> struct CAFFE2_API Type { <nl> return backendToDeviceType ( backend ( ) ) ; <nl> } <nl> <nl> - virtual Tensor copy ( const Tensor & src , bool non_blocking = false , optional < Device > to_device = { } ) const = 0 ; <nl> + virtual Tensor copy ( <nl> + const Tensor & src , <nl> + bool non_blocking = false , <nl> + c10 : : optional < Device > to_device = { } ) const = 0 ; <nl> virtual Tensor & copy_ ( Tensor & self , const Tensor & src , bool non_blocking = false ) const = 0 ; <nl> virtual Tensor & s_copy_ ( Tensor & self , const Tensor & src , bool non_blocking ) const = 0 ; <nl> virtual Tensor & _s_copy_from ( const Tensor & self , Tensor & dst , bool non_blocking ) const = 0 ; <nl> mmm a / aten / src / ATen / templates / TypeDefault . cpp <nl> ppp b / aten / src / ATen / templates / TypeDefault . cpp <nl> Tensor & TypeDefault : : copy_ ( Tensor & self , const Tensor & src , bool non_blocking <nl> return s_copy_ ( self , b_src , non_blocking ) ; <nl> } <nl> <nl> - Tensor TypeDefault : : copy ( const Tensor & src , bool non_blocking , optional < Device > to_device ) const { <nl> + Tensor TypeDefault : : copy ( <nl> + const Tensor & src , <nl> + bool non_blocking , <nl> + c10 : : optional < Device > to_device ) const { <nl> DeviceGuard device_guard ; <nl> if ( to_device . has_value ( ) ) { <nl> device_guard . set_index ( to_device . value ( ) . index ( ) ) ; <nl> mmm a / aten / src / ATen / templates / TypeDefault . h <nl> ppp b / aten / src / ATen / templates / TypeDefault . h <nl> struct CAFFE2_API TypeDefault : public TypeExtendedInterface { <nl> Type & toBackend ( Backend b ) const override ; <nl> Type & toScalarType ( ScalarType s ) const override ; <nl> <nl> - Tensor copy ( const Tensor & src , bool non_blocking = false , optional < Device > to_device = { } ) const override ; <nl> + Tensor copy ( <nl> + const Tensor & src , <nl> + bool non_blocking = false , <nl> + c10 : : optional < Device > to_device = { } ) const override ; <nl> Tensor & copy_ ( Tensor & self , const Tensor & src , bool non_blocking = false ) const override ; <nl> <nl> void backward ( <nl> mmm a / aten / src / ATen / test / cuda_optional_test . cu <nl> ppp b / aten / src / ATen / test / cuda_optional_test . cu <nl> <nl> # include " gtest / gtest . h " <nl> <nl> # include " ATen / ATen . h " <nl> - # include " ATen / optional . h " <nl> + # include " c10 / util / Optional . h " <nl> <nl> # include < assert . h > <nl> <nl> mmm a / c10 / util / Optional . h <nl> ppp b / c10 / util / Optional . h <nl> struct hash < c10 : : optional < T & > > { <nl> } ; <nl> } / / namespace std <nl> <nl> - / / TODO : remove at : : optional when done moving files <nl> - namespace at { <nl> - template < class T > <nl> - using optional = c10 : : optional < T > ; <nl> - } <nl> - <nl> # undef TR2_OPTIONAL_REQUIRES <nl> # undef TR2_OPTIONAL_ASSERTED_EXPRESSION <nl> <nl> mmm a / test / cpp / api / memory . cpp <nl> ppp b / test / cpp / api / memory . cpp <nl> <nl> <nl> # include < torch / csrc / utils / memory . h > <nl> <nl> - # include < ATen / optional . h > <nl> + # include " c10 / util / Optional . h " <nl> <nl> struct TestValue { <nl> explicit TestValue ( const int & x ) : lvalue_ ( x ) { } <nl> mmm a / torch / csrc / api / include / torch / data / samplers / random . h <nl> ppp b / torch / csrc / api / include / torch / data / samplers / random . h <nl> <nl> # include < torch / data / samplers / base . h > <nl> # include < torch / tensor . h > <nl> <nl> - # include < ATen / optional . h > <nl> + # include " c10 / util / Optional . h " <nl> <nl> # include < algorithm > <nl> # include < cstddef > <nl> mmm a / torch / csrc / api / include / torch / data / samplers / sequential . h <nl> ppp b / torch / csrc / api / include / torch / data / samplers / sequential . h <nl> <nl> # include < torch / data / samplers / base . h > <nl> # include < torch / tensor . h > <nl> <nl> - # include < ATen / optional . h > <nl> + # include " c10 / util / Optional . h " <nl> <nl> # include < algorithm > <nl> # include < cstddef > <nl> mmm a / torch / csrc / autograd / VariableTypeManual . cpp <nl> ppp b / torch / csrc / autograd / VariableTypeManual . cpp <nl> <nl> + # include " c10 / util / Optional . h " <nl> # include " torch / csrc / autograd / VariableTypeUtils . h " <nl> <nl> using namespace at ; <nl> std : : vector < at : : Tensor > VariableType : : unpack ( at : : TensorList tl , const char * name <nl> return ret ; <nl> } <nl> <nl> - void VariableType : : backward ( Tensor & self , at : : optional < Tensor > gradient , bool keep_graph , bool create_graph ) const { <nl> + void VariableType : : backward ( <nl> + Tensor & self , <nl> + c10 : : optional < Tensor > gradient , <nl> + bool keep_graph , <nl> + bool create_graph ) const { <nl> as_variable_ref ( self ) . backward ( gradient , keep_graph , create_graph ) ; <nl> } <nl> <nl> mmm a / torch / csrc / cuda / nccl . h <nl> ppp b / torch / csrc / cuda / nccl . h <nl> void reduce ( <nl> std : : vector < at : : Tensor > & outputs , <nl> int32_t root = 0 , <nl> int32_t op = ncclSum , <nl> - at : : optional < std : : vector < at : : cuda : : CUDAStream > > streams = c10 : : nullopt , <nl> - at : : optional < std : : vector < ncclComm_t > > user_comms = c10 : : nullopt ) ; <nl> + c10 : : optional < std : : vector < at : : cuda : : CUDAStream > > streams = c10 : : nullopt , <nl> + c10 : : optional < std : : vector < ncclComm_t > > user_comms = c10 : : nullopt ) ; <nl> <nl> void reduce ( <nl> std : : vector < at : : Tensor > & inputs , <nl> mmm a / torch / csrc / distributed / c10d / ddp . h <nl> ppp b / torch / csrc / distributed / c10d / ddp . h <nl> <nl> # include < c10d / ProcessGroup . hpp > <nl> <nl> # include < ATen / ATen . h > <nl> - # include < ATen / optional . h > <nl> + # include " c10 / util / Optional . h " <nl> <nl> # include < cstddef > <nl> # include < memory > <nl> mmm a / torch / csrc / jit / script / module . h <nl> ppp b / torch / csrc / jit / script / module . h <nl> struct Module { <nl> void save ( const std : : string & filename ) ; <nl> <nl> private : <nl> - void to_impl ( <nl> - at : : optional < at : : Device > device , <nl> - at : : optional < at : : ScalarType > dtype , <nl> - bool non_blocking ) ; <nl> + void to_impl ( <nl> + c10 : : optional < at : : Device > device , <nl> + c10 : : optional < at : : ScalarType > dtype , <nl> + bool non_blocking ) ; <nl> <nl> / / invariant : to ensure member_inputs of Methods stay valid , <nl> / / it is only legal to _add_ new modules and parameters . <nl>
Remove at : : Optional ( )
pytorch/pytorch
d401dc4374bd5532b3481f389dbe69f6835d5f3a
2018-10-23T07:03:20Z
mmm a / tests / cpp - tests / Classes / LabelTest / LabelTestNew . cpp <nl> ppp b / tests / cpp - tests / Classes / LabelTest / LabelTestNew . cpp <nl> std : : string LabelIssue13846Test : : title ( ) const <nl> <nl> std : : string LabelIssue13846Test : : subtitle ( ) const <nl> { <nl> - return " Test hide label ' s letter , the label should display ‘ 12 45 ’ as expected " ; <nl> + return " Test hide label ' s letter , the label should display ' 12 45 ' as expected " ; <nl> } <nl> <nl> / / <nl>
use correctly ' ' ( )
cocos2d/cocos2d-x
5fec861e27f96417f630c14b378956dbd310da3c
2017-03-20T03:45:17Z
mmm a / arangod / V8Server / v8 - collection . cpp <nl> ppp b / arangod / V8Server / v8 - collection . cpp <nl> static void JS_SaveVocbase ( v8 : : FunctionCallbackInfo < v8 : : Value > const & args ) { <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief inserts a document , using a VPack <nl> + / / / @ brief inserts a document , using VPack <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> static void JS_InsertVocbaseVPack ( <nl> static void JS_InsertVocbaseVPack ( <nl> VPackOptions vpackOptions = VPackOptions : : Defaults ; <nl> vpackOptions . attributeExcludeHandler = basics : : VelocyPackHelper : : getExcludeHandler ( ) ; <nl> VPackBuilder builder ( & vpackOptions ) ; <nl> - v8 : : Handle < v8 : : Value > payload = args [ 0 ] ; <nl> + v8 : : Handle < v8 : : Value > payload = args [ docIdx ] ; <nl> <nl> auto doOneDocument = [ & ] ( v8 : : Handle < v8 : : Value > obj ) - > void { <nl> int res = TRI_V8ToVPack ( isolate , builder , obj , true ) ; <nl> - <nl> + <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> TRI_V8_THROW_EXCEPTION ( res ) ; <nl> } <nl> <nl> if ( isEdgeCollection ) { <nl> / / Just insert from and to . Check is done later . <nl> - std : : string tmpId = ExtractIdString ( isolate , obj ) ; <nl> + std : : string tmpId = ExtractIdString ( isolate , args [ 0 ] ) ; <nl> if ( tmpId . empty ( ) ) { <nl> TRI_V8_THROW_EXCEPTION ( TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD ) ; <nl> } <nl>
allow creating edges
arangodb/arangodb
fc44d4af6f6f7953042685ff8234b821a748a932
2016-03-07T20:05:35Z
mmm a / stdlib / public / runtime / CMakeLists . txt <nl> ppp b / stdlib / public / runtime / CMakeLists . txt <nl> if ( SWIFT_BUILD_STATIC_STDLIB AND " $ { sdk } " STREQUAL " LINUX " ) <nl> set_target_properties ( swiftImageInspectionShared PROPERTIES <nl> ARCHIVE_OUTPUT_DIRECTORY " $ { SWIFTSTATICLIB_DIR } / $ { lowercase_sdk } " ) <nl> <nl> + swift_install_in_component ( stdlib <nl> + TARGETS swiftImageInspectionShared <nl> + DESTINATION " lib / swift_static / $ { lowercase_sdk } " ) <nl> + <nl> # Generate the static - executable - args . lnk file used for ELF systems ( eg linux ) <nl> set ( linkfile " $ { lowercase_sdk } / static - executable - args . lnk " ) <nl> add_custom_command_target ( swift_static_binary_ $ { sdk } _args <nl>
Merge pull request from bob - wilson / sr7038
apple/swift
b124a51570c070c11c143391af9b5a8cdfc66d7c
2018-03-01T02:38:44Z
mmm a / tests / test_browser . py <nl> ppp b / tests / test_browser . py <nl> def no_firefox ( note = ' firefox is not supported ' ) : <nl> <nl> <nl> def no_swiftshader ( f ) : <nl> + assert callable ( f ) <nl> + <nl> def decorated ( self ) : <nl> if is_chrome ( ) and ' - - use - gl = swiftshader ' in EMTEST_BROWSER : <nl> self . skipTest ( ' not compatible with swiftshader ' ) <nl> def decorated ( self ) : <nl> <nl> <nl> def requires_threads ( f ) : <nl> + assert callable ( f ) <nl> + <nl> def decorated ( self , * args , * * kwargs ) : <nl> if os . environ . get ( ' EMTEST_LACKS_THREAD_SUPPORT ' ) : <nl> self . skipTest ( ' EMTEST_LACKS_THREAD_SUPPORT is set ' ) <nl> def decorated ( self , * args , * * kwargs ) : <nl> <nl> <nl> def requires_asmfs ( f ) : <nl> + assert callable ( f ) <nl> + <nl> def decorated ( self , * args , * * kwargs ) : <nl> # https : / / github . com / emscripten - core / emscripten / issues / 9534 <nl> self . skipTest ( ' ASMFS is looking for a maintainer ' ) <nl> def test_doublestart_bug ( self ) : <nl> <nl> self . btest ( ' doublestart . c ' , args = [ ' - - pre - js ' , ' pre . js ' , ' - o ' , ' test . html ' ] , expected = ' 1 ' ) <nl> <nl> + @ parameterized ( { <nl> + ' ' : ( [ ] , ) , <nl> + ' closure ' : ( [ ' - O2 ' , ' - g1 ' , ' - - closure ' , ' 1 ' , ' - s ' , ' HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS = 0 ' ] , ) , <nl> + ' pthread ' : ( [ ' - s ' , ' USE_PTHREADS = 1 ' , ' - s ' , ' PROXY_TO_PTHREAD = 1 ' ] , ) , <nl> + ' legacy ' : ( [ ' - s ' , ' MIN_FIREFOX_VERSION = 0 ' , ' - s ' , ' MIN_SAFARI_VERSION = 0 ' , ' - s ' , ' MIN_IE_VERSION = 0 ' , ' - s ' , ' MIN_EDGE_VERSION = 0 ' , ' - s ' , ' MIN_CHROME_VERSION = 0 ' ] , ) <nl> + } ) <nl> @ requires_threads <nl> - def test_html5 ( self ) : <nl> - for opts in [ [ ] , [ ' - O2 ' , ' - g1 ' , ' - - closure ' , ' 1 ' , ' - s ' , ' HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS = 0 ' ] , [ ' - s ' , ' USE_PTHREADS = 1 ' , ' - s ' , ' PROXY_TO_PTHREAD = 1 ' ] , [ ' - s ' , ' MIN_FIREFOX_VERSION = 0 ' , ' - s ' , ' MIN_SAFARI_VERSION = 0 ' , ' - s ' , ' MIN_IE_VERSION = 0 ' , ' - s ' , ' MIN_EDGE_VERSION = 0 ' , ' - s ' , ' MIN_CHROME_VERSION = 0 ' ] ] : <nl> - print ( opts ) <nl> - self . btest ( path_from_root ( ' tests ' , ' test_html5 . c ' ) , args = [ ] + opts , expected = ' 0 ' ) <nl> + def test_html5_core ( self , opts ) : <nl> + self . btest ( path_from_root ( ' tests ' , ' test_html5_core . c ' ) , args = opts , expected = ' 0 ' ) <nl> <nl> @ requires_threads <nl> def test_html5_gamepad ( self ) : <nl> similarity index 100 % <nl> rename from tests / test_html5 . c <nl> rename to tests / test_html5_core . c <nl> mmm a / tests / test_other . py <nl> ppp b / tests / test_other . py <nl> def test_output_to_nowhere ( self ) : <nl> # I . e . - s MIN_X_VERSION = - 1 is equal to - s MIN_X_VERSION = Infinity <nl> def test_drop_support_for_browser ( self ) : <nl> # Test that - 1 means " not supported " <nl> - self . run_process ( [ EMCC , path_from_root ( ' tests ' , ' test_html5 . c ' ) , ' - s ' , ' MIN_IE_VERSION = - 1 ' ] ) <nl> + self . run_process ( [ EMCC , path_from_root ( ' tests ' , ' test_html5_core . c ' ) , ' - s ' , ' MIN_IE_VERSION = - 1 ' ] ) <nl> self . assertContained ( ' allowsDeferredCalls : true ' , open ( ' a . out . js ' ) . read ( ) ) <nl> self . assertNotContained ( ' allowsDeferredCalls : JSEvents . isInternetExplorer ( ) ' , open ( ' a . out . js ' ) . read ( ) ) <nl> <nl>
Paraeterize browser . test_html5 ( )
emscripten-core/emscripten
72b0caf40cb56b06a008da57c9b72ce52ba3def5
2020-11-24T16:42:15Z
mmm a / tensorflow / examples / tutorials / estimators / abalone . py <nl> ppp b / tensorflow / examples / tutorials / estimators / abalone . py <nl> <nl> <nl> tf . logging . set_verbosity ( tf . logging . INFO ) <nl> <nl> - FEATURES = [ " len " , " diam " , " height " , " whole_weight " , " shucked_weight " , <nl> - " viscera_weight " , " shell_weight " ] <nl> - <nl> # Learning rate for the model <nl> LEARNING_RATE = 0 . 001 <nl> <nl> <nl> def maybe_download ( ) : <nl> - " " " May be downloads training data and returns train and test file names . " " " <nl> + " " " Maybe downloads training data and returns train and test file names . " " " <nl> if FLAGS . train_data : <nl> train_file_name = FLAGS . train_data <nl> else : <nl>
Removing constant unused in code .
tensorflow/tensorflow
c296310b869518ef28cdcb0736241630001fe7b3
2016-09-21T16:03:07Z
mmm a / lib / Sema / ITCDecl . cpp <nl> ppp b / lib / Sema / ITCDecl . cpp <nl> decomposeInheritedClauseDecl ( <nl> } <nl> } <nl> } <nl> - <nl> - if ( ! isa < EnumDecl > ( typeDecl ) ) { <nl> - options | = TR_NonEnumInheritanceClauseOuterLayer ; <nl> - } <nl> } else { <nl> auto ext = decl . get < ExtensionDecl * > ( ) ; <nl> inheritanceClause = ext - > getInherited ( ) ; <nl> void IterativeTypeChecker : : processInheritedProtocols ( <nl> / / FIXME : We ' d prefer to keep what the user wrote here . <nl> if ( inherited . getType ( ) - > isExistentialType ( ) ) { <nl> auto layout = inherited . getType ( ) - > getExistentialLayout ( ) ; <nl> - assert ( ! layout . superclass & & " Need to redo inheritance clause " <nl> - " typechecking " ) ; <nl> for ( auto inheritedProtocolTy : layout . getProtocols ( ) ) { <nl> auto * inheritedProtocol = inheritedProtocolTy - > getDecl ( ) ; <nl> <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> Type TypeChecker : : applyGenericArguments ( Type type , TypeDecl * decl , <nl> for ( auto tyR : genericArgs ) <nl> args . push_back ( tyR ) ; <nl> <nl> - auto argumentOptions = options - TR_NonEnumInheritanceClauseOuterLayer ; <nl> auto result = applyUnboundGenericArguments ( unboundType , genericDecl , loc , <nl> - dc , args , argumentOptions , <nl> + dc , args , options , <nl> resolver , unsatisfiedDependency ) ; <nl> if ( ! result ) <nl> return result ; <nl> resolveTopLevelIdentTypeComponent ( TypeChecker & TC , DeclContext * DC , <nl> if ( type - > is < ErrorType > ( ) ) <nl> return type ; <nl> <nl> - if ( options & TR_NonEnumInheritanceClauseOuterLayer ) { <nl> - auto protocolOrClass = <nl> - ( type - > is < ProtocolType > ( ) | | <nl> - type - > is < ClassType > ( ) | | <nl> - type - > isAnyObject ( ) ) ; <nl> - if ( ! protocolOrClass ) { <nl> - TC . diagnose ( comp - > getIdLoc ( ) , <nl> - diag : : inheritance_from_non_protocol_or_class , <nl> - type ) ; <nl> - return ErrorType : : get ( type ) ; <nl> - } <nl> - } <nl> - <nl> / / If this is the first result we found , record it . <nl> if ( current . isNull ( ) ) { <nl> current = type ; <nl> static Type resolveNestedIdentTypeComponent ( <nl> } <nl> } <nl> <nl> - if ( options & TR_NonEnumInheritanceClauseOuterLayer ) { <nl> - auto protocolOrClass = <nl> - memberType - > is < ProtocolType > ( ) | | <nl> - memberType - > is < ClassType > ( ) | | <nl> - memberType - > isAnyObject ( ) ; <nl> - if ( ! protocolOrClass ) { <nl> - TC . diagnose ( comp - > getIdLoc ( ) , <nl> - diag : : inheritance_from_non_protocol_or_class , memberType ) ; <nl> - return ErrorType : : get ( memberType ) ; <nl> - } <nl> - } <nl> - <nl> / / If there are generic arguments , apply them now . <nl> if ( auto genComp = dyn_cast < GenericIdentTypeRepr > ( comp ) ) { <nl> memberType = TC . applyGenericArguments ( <nl> static Type resolveIdentTypeComponent ( <nl> <nl> / / All remaining components use qualified lookup . <nl> <nl> - auto parentOptions = options - TR_NonEnumInheritanceClauseOuterLayer ; <nl> / / Resolve the parent type . <nl> - Type parentTy = resolveIdentTypeComponent ( TC , DC , parentComps , parentOptions , <nl> + Type parentTy = resolveIdentTypeComponent ( TC , DC , parentComps , options , <nl> diagnoseErrors , resolver , <nl> unsatisfiedDependency ) ; <nl> if ( ! parentTy | | parentTy - > hasError ( ) ) return parentTy ; <nl> mmm a / lib / Sema / TypeChecker . h <nl> ppp b / lib / Sema / TypeChecker . h <nl> enum TypeResolutionFlags : unsigned { <nl> / / / Whether we are checking the outermost type of a computed property setter ' s newValue <nl> TR_ImmediateSetterNewValue = 0x1000000 , <nl> <nl> - / / / Whether we are checking the outermost layer of types in an inheritance <nl> - / / / clause on something other than an enum ( i . e . V , but not U or W , in class <nl> - / / / T : U . V < W > ) <nl> - TR_NonEnumInheritanceClauseOuterLayer = 0x2000000 , <nl> - <nl> / / / Whether we are checking the underlying type of a typealias . <nl> - TR_TypeAliasUnderlyingType = 0x4000000 , <nl> + TR_TypeAliasUnderlyingType = 0x2000000 , <nl> <nl> / / / Whether we are checking the parameter list of a subscript . <nl> - TR_SubscriptParameters = 0x8000000 , <nl> + TR_SubscriptParameters = 0x4000000 , <nl> } ; <nl> <nl> / / / Option set describing how type resolution should work . <nl> mmm a / test / decl / nested / protocol . swift <nl> ppp b / test / decl / nested / protocol . swift <nl> class OuterClass { <nl> } <nl> <nl> class OtherGenericClass < T > { <nl> - / / FIXME : The diagnostic is misleading - - OuterClass is in fact a class type <nl> protocol InnerProtocol : OtherGenericClass { } <nl> - / / expected - error @ - 1 { { inheritance from non - protocol , non - class type ' OtherGenericClass < T > ' } } <nl> - / / expected - error @ - 2 { { protocol ' InnerProtocol ' cannot be nested inside another declaration } } <nl> + / / expected - error @ - 1 { { protocol ' InnerProtocol ' cannot be nested inside another declaration } } <nl> } <nl> mmm a / test / decl / protocol / protocols . swift <nl> ppp b / test / decl / protocol / protocols . swift <nl> func test1 ( ) { <nl> v1 . creator = " Me " / / expected - error { { cannot assign to property : ' creator ' is a get - only property } } <nl> } <nl> <nl> - protocol Bogus : Int { } / / expected - error { { inheritance from non - protocol , non - class type ' Int ' } } <nl> + protocol Bogus : Int { } <nl> + / / expected - error @ - 1 { { inheritance from non - protocol type ' Int ' } } <nl> + / / expected - error @ - 2 { { type ' Self ' constrained to non - protocol type ' Int ' } } <nl> <nl> / / Explicit conformance checks ( successful ) . <nl> <nl> struct NotFormattedPrintable : FormattedPrintable { / / expected - error { { type ' Not <nl> func print ( format : TestFormat ) { } / / expected - note { { candidate has non - matching type ' ( TestFormat ) - > ( ) ' } } <nl> } <nl> <nl> + / / Protocol compositions in inheritance clauses <nl> + protocol Left { <nl> + func l ( ) / / expected - note { { protocol requires function ' l ( ) ' with type ' ( ) - > ( ) ' ; do you want to add a stub ? } } <nl> + } <nl> + protocol Right { <nl> + func r ( ) / / expected - note { { protocol requires function ' r ( ) ' with type ' ( ) - > ( ) ' ; do you want to add a stub ? } } <nl> + } <nl> + typealias Both = Left & Right <nl> + <nl> + protocol Up : Both { <nl> + func u ( ) <nl> + } <nl> + <nl> + struct DoesNotConform : Up { <nl> + / / expected - error @ - 1 { { type ' DoesNotConform ' does not conform to protocol ' Left ' } } <nl> + / / expected - error @ - 2 { { type ' DoesNotConform ' does not conform to protocol ' Right ' } } <nl> + func u ( ) { } <nl> + } <nl> + <nl> / / Circular protocols <nl> <nl> protocol CircleMiddle : CircleStart { func circle_middle ( ) } / / expected - error 2 { { circular protocol inheritance CircleMiddle } } <nl> mmm a / test / type / subclass_composition . swift <nl> ppp b / test / type / subclass_composition . swift <nl> func conformsTo < T1 : P2 , T2 : Base < Int > & P2 > ( <nl> protocol ProtoConstraintsSelfToClass where Self : Base < Int > { } <nl> <nl> protocol ProtoRefinesClass : Base < Int > { } / / FIXME expected - error { { } } <nl> - protocol ProtoRefinesClassAndProtocolAlias : BaseIntAndP2 { } / / FIXME expected - error { { } } <nl> + protocol ProtoRefinesClassAndProtocolAlias : BaseIntAndP2 { } <nl> protocol ProtoRefinesClassAndProtocolDirect : Base < Int > & P2 { } / / FIXME expected - error 2 { { } } <nl> protocol ProtoRefinesClassAndProtocolExpanded : Base < Int > , P2 { } / / FIXME expected - error { { } } <nl> <nl> class ClassConformsToClassProtocolBad1 : ProtoConstraintsSelfToClass { } <nl> / / expected - note @ - 2 { { requirement specified as ' Self ' : ' Base < Int > ' [ with Self = ClassConformsToClassProtocolBad1 ] } } <nl> class ClassConformsToClassProtocolGood1 : Derived , ProtoConstraintsSelfToClass { } <nl> <nl> - class ClassConformsToClassProtocolBad2 : ProtoRefinesClass { } / / FIXME <nl> + class ClassConformsToClassProtocolBad2 : ProtoRefinesClass { } <nl> + / / expected - error @ - 1 { { ' ProtoRefinesClass ' requires that ' ClassConformsToClassProtocolBad2 ' inherit from ' Base < Int > ' } } <nl> + / / expected - note @ - 2 { { requirement specified as ' Self ' : ' Base < Int > ' [ with Self = ClassConformsToClassProtocolBad2 ] } } <nl> class ClassConformsToClassProtocolGood2 : Derived , ProtoRefinesClass { } <nl> <nl> / / Subclass existentials inside inheritance clauses <nl> class CompositionInClassInheritanceClauseDirect : Base < Int > & P2 { <nl> <nl> protocol CompositionInAssociatedTypeInheritanceClause { <nl> associatedtype A : BaseIntAndP2 <nl> - / / FIXME expected - error @ - 1 { { } } <nl> } <nl> <nl> / / Members of metatypes and existential metatypes <nl>
Sema : Remove TR_NonEnumInheritanceClauseOuterLayer
apple/swift
6548ad709b353e749ecd56e24cdda0c8049939ef
2017-05-11T05:12:25Z
mmm a / arangod / Aql / AqlItemBlock . cpp <nl> ppp b / arangod / Aql / AqlItemBlock . cpp <nl> void AqlItemBlock : : destroy ( ) { <nl> it . erase ( ) ; <nl> } <nl> } <nl> + <nl> + _valueCount . clear ( ) ; <nl> } <nl> <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> mmm a / arangod / Aql / AqlItemBlock . h <nl> ppp b / arangod / Aql / AqlItemBlock . h <nl> namespace triagens { <nl> / / First update the reference count , if this fails , the value is empty <nl> if ( value . requiresDestruction ( ) ) { <nl> auto it = _valueCount . find ( value ) ; <nl> + <nl> if ( it = = _valueCount . end ( ) ) { <nl> TRI_IF_FAILURE ( " AqlItemBlock : : setValue " ) { <nl> THROW_ARANGO_EXCEPTION ( TRI_ERROR_DEBUG ) ; <nl>
always perform proper block cleanup
arangodb/arangodb
ed24ee798ae7be705e77abf6b72ec69350a26ed9
2015-04-27T21:28:42Z
mmm a / include / swift / SILOptimizer / Analysis / ARCAnalysis . h <nl> ppp b / include / swift / SILOptimizer / Analysis / ARCAnalysis . h <nl> class ConsumedArgToEpilogueReleaseMatcher { <nl> return false ; <nl> } <nl> <nl> - / / / Return true if we ' ve found some epilgoue releases for the argument <nl> + / / / Return true if we ' ve found some epilogue releases for the argument <nl> / / / but not all . <nl> bool hasSomeReleasesForArgument ( SILArgument * Arg ) { <nl> return FoundSomeReleases . find ( Arg ) ! = FoundSomeReleases . end ( ) ; <nl> mmm a / lib / IRGen / GenMeta . cpp <nl> ppp b / lib / IRGen / GenMeta . cpp <nl> createInPlaceMetadataInitializationFunction ( IRGenModule & IGM , <nl> IGM . DebugInfo - > emitArtificialFunction ( IGF , fn ) ; <nl> <nl> / / Skip instrumentation when building for TSan to avoid false positives . <nl> - / / The syncronization for this happens in the Runtime and we do not see it . <nl> + / / The synchronization for this happens in the Runtime and we do not see it . <nl> if ( IGM . IRGen . Opts . Sanitize = = SanitizerKind : : Thread ) <nl> fn - > removeFnAttr ( llvm : : Attribute : : SanitizeThread ) ; <nl> <nl> namespace { <nl> IRGenFunction IGF ( IGM , f ) ; <nl> <nl> / / Skip instrumentation when building for TSan to avoid false positives . <nl> - / / The syncronization for this happens in the Runtime and we do not see it . <nl> + / / The synchronization for this happens in the Runtime and we do not see it . <nl> if ( IGM . IRGen . Opts . Sanitize = = SanitizerKind : : Thread ) <nl> f - > removeFnAttr ( llvm : : Attribute : : SanitizeThread ) ; <nl> <nl> mmm a / lib / IRGen / GenProto . cpp <nl> ppp b / lib / IRGen / GenProto . cpp <nl> void IRGenModule : : emitSILWitnessTable ( SILWitnessTable * wt ) { <nl> return ; <nl> <nl> / / Don ' t emit a witness table that is available externally . <nl> - / / It can end up in having duplicate sumbols for generated associated type <nl> + / / It can end up in having duplicate symbols for generated associated type <nl> / / metadata access functions . <nl> / / Also , it is not a big benefit for LLVM to emit such witness tables . <nl> if ( isAvailableExternally ( wt - > getLinkage ( ) ) ) <nl> mmm a / lib / SILOptimizer / Mandatory / DefiniteInitialization . cpp <nl> ppp b / lib / SILOptimizer / Mandatory / DefiniteInitialization . cpp <nl> int LifetimeChecker : : getAnyUninitializedMemberAtInst ( SILInstruction * Inst , <nl> / / Determine the liveness states of the elements that we care about . <nl> auto Liveness = getLivenessAtInst ( Inst , FirstElt , NumElts ) ; <nl> <nl> - / / Find unintialized member . <nl> + / / Find uninitialized member . <nl> for ( unsigned i = FirstElt , e = i + NumElts ; i ! = e ; + + i ) <nl> if ( Liveness . get ( i ) ! = DIKind : : Yes ) <nl> return i ; <nl> mmm a / lib / SILOptimizer / Transforms / FunctionSignatureOpts . cpp <nl> ppp b / lib / SILOptimizer / Transforms / FunctionSignatureOpts . cpp <nl> using SILParameterInfoList = llvm : : SmallVector < SILParameterInfo , 8 > ; <nl> using ArgumentIndexMap = llvm : : SmallDenseMap < int , int > ; <nl> <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - / / Utilties <nl> + / / Utilities <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> / / / Return the single apply found in this function . <nl> class FunctionSignatureTransform { <nl> / / Keep tracks to argument mapping . <nl> ArgumentIndexMap & AIM ; <nl> <nl> - / / Self arument is modified . <nl> + / / Self argument is modified . <nl> bool shouldModifySelfArgument ; <nl> <nl> - / / / Keep a " view " of precompiled information on argumentis that we use <nl> + / / / Keep a " view " of precompiled information on arguments that we use <nl> / / / during our optimization . <nl> llvm : : SmallVector < ArgumentDescriptor , 4 > & ArgumentDescList ; <nl> <nl> class FunctionSignatureTransform { <nl> return Params | | Result ; <nl> } <nl> <nl> - / / / Do the actual owned to guaranteeed transformations . <nl> + / / / Do the actual owned to guaranteed transformations . <nl> void OwnedToGuaranteedTransform ( ) { <nl> OwnedToGuaranteedTransformFunctionResults ( ) ; <nl> OwnedToGuaranteedTransformFunctionParameters ( ) ; <nl> std : : string FunctionSignatureTransform : : createOptimizedSILFunctionName ( ) { <nl> <nl> / / / Compute what the function interface will look like based on the <nl> / / / optimization we are doing on the given argument descriptor . Default <nl> - / / / implemenation simply passes it through . <nl> + / / / implementation simply passes it through . <nl> void <nl> FunctionSignatureTransform : : <nl> computeOptimizedArgInterface ( ArgumentDescriptor & AD , SILParameterInfoList & Out ) { <nl> mmm a / lib / Sema / TypeCheckExpr . cpp <nl> ppp b / lib / Sema / TypeCheckExpr . cpp <nl> void TypeChecker : : computeCaptures ( AnyFunctionRef AFR ) { <nl> <nl> if ( inoutCount > 0 ) { <nl> if ( auto e = AFR . getAbstractFunctionDecl ( ) ) { <nl> - for ( auto returnOccurance : getEscapingFunctionAsReturnValue ( e ) ) { <nl> - diagnose ( returnOccurance - > getReturnLoc ( ) , <nl> + for ( auto returnOccurrence : getEscapingFunctionAsReturnValue ( e ) ) { <nl> + diagnose ( returnOccurrence - > getReturnLoc ( ) , <nl> diag : : nested_function_escaping_inout_capture ) ; <nl> } <nl> - auto occurances = getEscapingFunctionAsArgument ( e ) ; <nl> - for ( auto occurance : occurances ) { <nl> - diagnose ( occurance - > getLoc ( ) , <nl> + auto occurrences = getEscapingFunctionAsArgument ( e ) ; <nl> + for ( auto occurrence : occurrences ) { <nl> + diagnose ( occurrence - > getLoc ( ) , <nl> diag : : nested_function_with_implicit_capture_argument , <nl> inoutCount > 1 ) ; <nl> } <nl> mmm a / lib / Sema / TypeChecker . h <nl> ppp b / lib / Sema / TypeChecker . h <nl> class TypeChecker final : public LazyResolver { <nl> llvm : : DenseMap < AbstractFunctionDecl * , llvm : : DenseSet < ReturnStmt * > > <nl> FunctionAsReturnValue ; <nl> <nl> - / / / Function apply expressions with a certain function as an arugment . <nl> + / / / Function apply expressions with a certain function as an argument . <nl> llvm : : DenseMap < AbstractFunctionDecl * , llvm : : DenseSet < ApplyExpr * > > <nl> FunctionAsEscapingArg ; <nl> <nl> public : <nl> - / / / Record an occurence of a function that captures inout values as an <nl> + / / / Record an occurrence of a function that captures inout values as an <nl> / / / argument . <nl> / / / <nl> - / / / \ param decl the function that occurs as an arugment . <nl> + / / / \ param decl the function that occurs as an argument . <nl> / / / <nl> / / / \ param apply the expression in which the function appears . <nl> void addEscapingFunctionAsArgument ( AbstractFunctionDecl * decl , <nl> class TypeChecker final : public LazyResolver { <nl> FunctionAsEscapingArg [ decl ] . insert ( apply ) ; <nl> } <nl> <nl> - / / / Find occurences of a function that captures inout values as arguments . <nl> + / / / Find occurrences of a function that captures inout values as arguments . <nl> / / / <nl> - / / / \ param decl the function that occurs as an arugment . <nl> + / / / \ param decl the function that occurs as an argument . <nl> / / / <nl> / / / \ returns Expressions in which the function appears as arguments . <nl> llvm : : DenseSet < ApplyExpr * > & <nl> class TypeChecker final : public LazyResolver { <nl> return FunctionAsEscapingArg [ decl ] ; <nl> } <nl> <nl> - / / / Record an occurence of a function that captures inout values as a return <nl> + / / / Record an occurrence of a function that captures inout values as a return <nl> / / / value <nl> / / / <nl> / / / \ param decl the function that occurs as a return value . <nl> class TypeChecker final : public LazyResolver { <nl> FunctionAsReturnValue [ decl ] . insert ( stmt ) ; <nl> } <nl> <nl> - / / / Find occurences of a function that captures inout values as return <nl> + / / / Find occurrences of a function that captures inout values as return <nl> / / / values . <nl> / / / <nl> / / / \ param decl the function that occurs as a return value . <nl> mmm a / stdlib / public / Platform / CMakeLists . txt <nl> ppp b / stdlib / public / Platform / CMakeLists . txt <nl> foreach ( sdk $ { SWIFT_SDKS } ) <nl> <nl> # Determine the location of glibc headers based on the target . <nl> set ( GLIBC_SYSROOT_RELATIVE_INCLUDE_PATH " / usr / include " ) <nl> - set ( GLIBC_SYSROOT_REALTIVE_ARCH_INCLUDE_PATH $ { GLIBC_SYSROOT_RELATIVE_INCLUDE_PATH } ) <nl> + set ( GLIBC_SYSROOT_RELATIVE_ARCH_INCLUDE_PATH $ { GLIBC_SYSROOT_RELATIVE_INCLUDE_PATH } ) <nl> <nl> # Some SDKs place their headers in architecture - specific subfolders . <nl> if ( ( $ { sdk } STREQUAL " LINUX " OR $ { sdk } STREQUAL " FREEBSD " ) AND CMAKE_LIBRARY_ARCHITECTURE ) <nl> - set ( GLIBC_SYSROOT_REALTIVE_ARCH_INCLUDE_PATH " $ { GLIBC_SYSROOT_REALTIVE_ARCH_INCLUDE_PATH } / $ { CMAKE_LIBRARY_ARCHITECTURE } " ) <nl> + set ( GLIBC_SYSROOT_RELATIVE_ARCH_INCLUDE_PATH " $ { GLIBC_SYSROOT_RELATIVE_ARCH_INCLUDE_PATH } / $ { CMAKE_LIBRARY_ARCHITECTURE } " ) <nl> endif ( ) <nl> <nl> set ( GLIBC_INCLUDE_PATH " $ { SWIFT_SDK_ $ { sdk } _PATH } / $ { GLIBC_SYSROOT_RELATIVE_INCLUDE_PATH } " ) <nl> - set ( GLIBC_ARCH_INCLUDE_PATH " $ { SWIFT_SDK_ $ { sdk } _PATH } / $ { GLIBC_SYSROOT_REALTIVE_ARCH_INCLUDE_PATH } " ) <nl> + set ( GLIBC_ARCH_INCLUDE_PATH " $ { SWIFT_SDK_ $ { sdk } _PATH } / $ { GLIBC_SYSROOT_RELATIVE_ARCH_INCLUDE_PATH } " ) <nl> <nl> set ( glibc_modulemap_source " glibc . modulemap . gyb " ) <nl> set ( glibc_modulemap_out " $ { module_dir } / glibc . modulemap " ) <nl> foreach ( sdk $ { SWIFT_SDKS } ) <nl> FLAGS <nl> " - DCMAKE_SDK = $ { sdk } " <nl> " - DGLIBC_INCLUDE_PATH = $ { GLIBC_SYSROOT_RELATIVE_INCLUDE_PATH } " <nl> - " - DGLIBC_ARCH_INCLUDE_PATH = $ { GLIBC_SYSROOT_REALTIVE_ARCH_INCLUDE_PATH } " ) <nl> + " - DGLIBC_ARCH_INCLUDE_PATH = $ { GLIBC_SYSROOT_RELATIVE_ARCH_INCLUDE_PATH } " ) <nl> <nl> list ( APPEND glibc_modulemap_target_list $ { glibc_modulemap_native_target } ) <nl> set ( glibc_modulemap_out $ { glibc_sysroot_relative_modulemap_out } ) <nl> mmm a / stdlib / public / core / StringLegacy . swift <nl> ppp b / stdlib / public / core / StringLegacy . swift <nl> extension String { <nl> / / / specified prefix . <nl> / / / <nl> / / / The comparison is both case sensitive and Unicode safe . The <nl> - / / / case - sensitive comparision will only match strings whose corresponding <nl> + / / / case - sensitive comparison will only match strings whose corresponding <nl> / / / characters have the same case . <nl> / / / <nl> / / / let cafe = " Café du Monde " <nl> extension String { <nl> / / / specified suffix . <nl> / / / <nl> / / / The comparison is both case sensitive and Unicode safe . The <nl> - / / / case - sensitive comparision will only match strings whose corresponding <nl> + / / / case - sensitive comparison will only match strings whose corresponding <nl> / / / characters have the same case . <nl> / / / <nl> / / / let plans = " Let ' s meet at the café " <nl> mmm a / stdlib / public / core / StringRangeReplaceableCollection . swift . gyb <nl> ppp b / stdlib / public / core / StringRangeReplaceableCollection . swift . gyb <nl> extension String { <nl> / / / let str = " The rain in Spain stays mainly in the plain . " <nl> / / / <nl> / / / let vowels : Set < Character > = [ " a " , " e " , " i " , " o " , " u " ] <nl> - / / / let disemvowelled = String ( str . characters . lazy . filter { ! vowels . contains ( $ 0 ) } ) <nl> + / / / let disemvoweled = String ( str . characters . lazy . filter { ! vowels . contains ( $ 0 ) } ) <nl> / / / <nl> - / / / print ( disemvowelled ) <nl> + / / / print ( disemvoweled ) <nl> / / / / / Prints " Th rn n Spn stys mnly n th pln . " <nl> / / / <nl> / / / - Parameter characters : A sequence of characters . <nl> mmm a / stdlib / public / core / UnfoldSequence . swift <nl> ppp b / stdlib / public / core / UnfoldSequence . swift <nl> public func sequence < T > ( first : T , next : ( T ) - > T ? ) - > UnfoldFirstSequence < T > { <nl> / / / Returns a sequence formed from repeated lazy applications of ` next ` to a <nl> / / / mutable ` state ` . <nl> / / / <nl> - / / / The elements of the sequence are obtaned by invoking ` next ` with a mutable <nl> + / / / The elements of the sequence are obtained by invoking ` next ` with a mutable <nl> / / / state . The same state is passed to all invocations of ` next ` , so subsequent <nl> / / / calls will see any mutations made by previous calls . The sequence ends when <nl> / / / ` next ` returns ` nil ` . If ` next ` never returns ` nil ` , the sequence is <nl> mmm a / stdlib / public / core / Unicode . swift <nl> ppp b / stdlib / public / core / Unicode . swift <nl> extension UTF16 { <nl> } <nl> <nl> / / / Returns the high - surrogate code unit of the surrogate pair representing <nl> - / / / the specifed Unicode scalar . <nl> + / / / the specified Unicode scalar . <nl> / / / <nl> / / / Because a Unicode scalar value can require up to 21 bits to store its <nl> / / / value , some Unicode scalars are represented in UTF - 16 by a pair of <nl> extension UTF16 { <nl> } <nl> <nl> / / / Returns the low - surrogate code unit of the surrogate pair representing <nl> - / / / the specifed Unicode scalar . <nl> + / / / the specified Unicode scalar . <nl> / / / <nl> / / / Because a Unicode scalar value can require up to 21 bits to store its <nl> / / / value , some Unicode scalars are represented in UTF - 16 by a pair of <nl> mmm a / test / Prototypes / Algorithms . swift <nl> ppp b / test / Prototypes / Algorithms . swift <nl> public protocol MutableCollectionAlgorithms : MutableCollection { <nl> ) - > Index <nl> } <nl> <nl> - / / In the stdlib , this conformace wouldn ' t be needed <nl> + / / In the stdlib , this conformance wouldn ' t be needed <nl> extension Array : MutableCollectionAlgorithms { } <nl> <nl> / / / In the stdlib , this would simply be MutableCollection <nl> mmm a / test / Sanitizers / tsan - type - metadata . swift <nl> ppp b / test / Sanitizers / tsan - type - metadata . swift <nl> <nl> <nl> / / We expect not to report any races on this testcase . <nl> <nl> - / / This test excercises accesses to type metadata , which uses lockless <nl> - / / syncronization in the runtime that is relied upon by the direct accesses in the IR . <nl> - / / We have to make sure TSan does not see the acesses to the metadata from the IR . <nl> + / / This test exercises accesses to type metadata , which uses lockless <nl> + / / synchronization in the runtime that is relied upon by the direct accesses in the IR . <nl> + / / We have to make sure TSan does not see the accesses to the metadata from the IR . <nl> / / Otherwise , it will report a race . <nl> <nl> / / Generic classes . <nl> mmm a / test / Sanitizers / witness_table_lookup . swift <nl> ppp b / test / Sanitizers / witness_table_lookup . swift <nl> <nl> / / REQUIRES : OS = macosx <nl> / / REQUIRES : tsan_runtime <nl> <nl> - / / Check taht TSan does not report spurious races in witness table lookup . <nl> + / / Check that TSan does not report spurious races in witness table lookup . <nl> <nl> func consume ( _ x : Any ) { } <nl> protocol Q { <nl> mmm a / utils / build - script <nl> ppp b / utils / build - script <nl> details of the setups of other systems or automated environments . " " " ) <nl> if args . show_sdks : <nl> swift_build_support . debug . print_xcodebuild_versions ( ) <nl> <nl> - # Clean build direcotry if requested . <nl> + # Clean build directory if requested . <nl> if args . clean : <nl> shell . rmtree ( workspace . build_root ) <nl> <nl> mmm a / utils / build - script - impl <nl> ppp b / utils / build - script - impl <nl> KNOWN_SETTINGS = ( <nl> only - execute " all " " Only execute the named action ( see implementation ) " <nl> ) <nl> <nl> - # Centalized access point for traced command invocation . <nl> + # Centralized access point for traced command invocation . <nl> # Every operation that might mutates file system should be called via <nl> # these functions . <nl> <nl> mmm a / utils / swift_build_support / swift_build_support / products / ninja . py <nl> ppp b / utils / swift_build_support / swift_build_support / products / ninja . py <nl> <nl> - # swift_build_support / producsts / ninja . py mmmmmmmmmmmmmmmmmmmmmmmm * - python - * - <nl> + # swift_build_support / products / ninja . py mmmmmmmmmmmmmmmmmmmmmmmm - * - python - * - <nl> # <nl> # This source file is part of the Swift . org open source project <nl> # <nl>
Merge pull request from practicalswift / typo - fixes - 20160605
apple/swift
f92e90a54e18766b3a7d012223b8fe4a54dfeb1e
2016-06-05T23:30:03Z
mmm a / drivers / gles2 / rasterizer_scene_gles2 . cpp <nl> ppp b / drivers / gles2 / rasterizer_scene_gles2 . cpp <nl> void RasterizerSceneGLES2 : : _draw_sky ( RasterizerStorageGLES2 : : Sky * p_sky , const C <nl> glEnableVertexAttribArray ( VS : : ARRAY_VERTEX ) ; <nl> glEnableVertexAttribArray ( VS : : ARRAY_TEX_UV ) ; <nl> <nl> + storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_ASYM_PANO , asymmetrical ) ; <nl> + storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_PANORAMA , ! asymmetrical ) ; <nl> storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_MULTIPLIER , true ) ; <nl> storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_CUBEMAP , false ) ; <nl> - storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_PANORAMA , true ) ; <nl> storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_COPY_SECTION , false ) ; <nl> storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_CUSTOM_ALPHA , false ) ; <nl> storage - > shaders . copy . bind ( ) ; <nl> storage - > shaders . copy . set_uniform ( CopyShaderGLES2 : : MULTIPLIER , p_energy ) ; <nl> + if ( asymmetrical ) { <nl> + / / pack the bits we need from our projection matrix <nl> + storage - > shaders . copy . set_uniform ( CopyShaderGLES2 : : ASYM_PROJ , camera . matrix [ 2 ] [ 0 ] , camera . matrix [ 0 ] [ 0 ] , camera . matrix [ 2 ] [ 1 ] , camera . matrix [ 1 ] [ 1 ] ) ; <nl> + / / / @ TODO I couldn ' t get mat3 + p_transform . basis to work , that would be better here . <nl> + storage - > shaders . copy . set_uniform ( CopyShaderGLES2 : : PANO_TRANSFORM , p_transform ) ; <nl> + } <nl> <nl> glDrawArrays ( GL_TRIANGLE_FAN , 0 , 4 ) ; <nl> <nl> void RasterizerSceneGLES2 : : _draw_sky ( RasterizerStorageGLES2 : : Sky * p_sky , const C <nl> glDisableVertexAttribArray ( VS : : ARRAY_TEX_UV ) ; <nl> glBindBuffer ( GL_ARRAY_BUFFER , 0 ) ; <nl> <nl> + storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_ASYM_PANO , false ) ; <nl> + storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_PANORAMA , false ) ; <nl> storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_MULTIPLIER , false ) ; <nl> storage - > shaders . copy . set_conditional ( CopyShaderGLES2 : : USE_CUBEMAP , false ) ; <nl> } <nl> mmm a / drivers / gles2 / shaders / copy . glsl <nl> ppp b / drivers / gles2 / shaders / copy . glsl <nl> void main ( ) { <nl> <nl> # if defined ( USE_CUBEMAP ) | | defined ( USE_PANORAMA ) <nl> cube_interp = cube_in ; <nl> + # elif defined ( USE_ASYM_PANO ) <nl> + uv_interp = vertex_attrib . xy ; <nl> # else <nl> uv_interp = uv_in ; <nl> # endif <nl> varying vec2 uv_interp ; <nl> # endif <nl> / * clang - format on * / <nl> <nl> + # ifdef USE_ASYM_PANO <nl> + uniform highp mat4 pano_transform ; <nl> + uniform highp vec4 asym_proj ; <nl> + # endif <nl> + <nl> # ifdef USE_CUBEMAP <nl> uniform samplerCube source_cube ; / / texunit : 0 <nl> # else <nl> void main ( ) { <nl> <nl> vec4 color = texturePanorama ( source , normalize ( cube_interp ) ) ; <nl> <nl> + # elif defined ( USE_ASYM_PANO ) <nl> + <nl> + / / When an asymmetrical projection matrix is used ( applicable for stereoscopic rendering i . e . VR ) we need to do this calculation per fragment to get a perspective correct result . <nl> + / / Note that we ' re ignoring the x - offset for IPD , with Z sufficiently in the distance it becomes neglectible , as a result we could probably just set cube_normal . z to - 1 . <nl> + / / The Matrix [ 2 ] [ 0 ] ( = asym_proj . x ) and Matrix [ 2 ] [ 1 ] ( = asym_proj . z ) values are what provide the right shift in the image . <nl> + <nl> + vec3 cube_normal ; <nl> + cube_normal . z = - 1000000 . 0 ; <nl> + cube_normal . x = ( cube_normal . z * ( - uv_interp . x - asym_proj . x ) ) / asym_proj . y ; <nl> + cube_normal . y = ( cube_normal . z * ( - uv_interp . y - asym_proj . z ) ) / asym_proj . a ; <nl> + cube_normal = mat3 ( pano_transform ) * cube_normal ; <nl> + cube_normal . z = - cube_normal . z ; <nl> + <nl> + vec4 color = texturePanorama ( source , normalize ( cube_normal . xyz ) ) ; <nl> + <nl> # elif defined ( USE_CUBEMAP ) <nl> vec4 color = textureCube ( source_cube , normalize ( cube_interp ) ) ; <nl> # else <nl>
Fixed stereoscopic ( VR ) sky in GLES2
godotengine/godot
ef06079ad196adcee17adcabbd6d53366ef85c3f
2018-10-21T00:36:01Z
mmm a / lib / V8 / v8 - utils . cpp <nl> ppp b / lib / V8 / v8 - utils . cpp <nl> static v8 : : Handle < v8 : : Value > JS_RemoveRecursiveDirectory ( v8 : : Arguments const & a <nl> } <nl> <nl> const string path ( * name ) ; <nl> + # ifdef _WIN32 <nl> + / / windows paths are case - insensitive <nl> + if ( ! TRI_CaseEqualString2 ( path . c_str ( ) , tempPath , strlen ( tempPath ) ) ) { <nl> + # else <nl> if ( ! TRI_EqualString2 ( path . c_str ( ) , tempPath , strlen ( tempPath ) ) ) { <nl> + # endif <nl> TRI_FreeString ( TRI_CORE_MEM_ZONE , tempPath ) ; <nl> <nl> TRI_V8_EXCEPTION_PARAMETER ( scope , " directory to be removed is outside of temporary path " ) ; <nl>
fix Windows path name comparison
arangodb/arangodb
60526230abbb606201da0f641a6973a31647c9bf
2013-11-11T19:06:50Z
mmm a / third_party / wasm - polyfill / wasmator . py <nl> ppp b / third_party / wasm - polyfill / wasmator . py <nl> def path_in_polyfill ( * pathelems ) : <nl> emscripten . safe_copy ( jsfile , ' before . js ' ) <nl> <nl> print ' process input ' <nl> - Popen ( [ PYTHON , path_from_root ( ' tools ' , ' distill_asm . py ' ) , jsfile , tempfile ] ) . communicate ( ) <nl> + check_call ( [ PYTHON , path_from_root ( ' tools ' , ' distill_asm . py ' ) , jsfile , tempfile ] ) <nl> + <nl> module = open ( tempfile ) . read ( ) <nl> if ' use asm ' not in module : <nl> print > > sys . stderr , ' no asm . js module to wasm - ify ' <nl>
check distill_asm return code in wasamator
emscripten-core/emscripten
36aeee8b07a569820927e382f412c22c91740bc4
2015-07-27T19:58:44Z
mmm a / proton <nl> ppp b / proton <nl> <nl> <nl> # script to launch Wine with the correct environment <nl> <nl> - from __future__ import print_function <nl> - <nl> import fcntl <nl> import array <nl> import filecmp <nl> if __name__ = = " __main__ " : <nl> <nl> g_session . init_session ( ) <nl> <nl> - if sys . version_info [ 0 ] = = 2 : <nl> - binary_stdout = sys . stdout <nl> - elif sys . version_info [ 0 ] = = 3 : <nl> - binary_stdout = sys . stdout . buffer <nl> - else : <nl> - raise Exception ( " Unsupported python version " ) <nl> - <nl> # determine mode <nl> if sys . argv [ 1 ] = = " run " : <nl> # start target app <nl> if __name__ = = " __main__ " : <nl> elif sys . argv [ 1 ] = = " getcompatpath " : <nl> # linux - > windows path <nl> path = subprocess . check_output ( [ g_proton . wine_bin , " winepath " , " - w " , sys . argv [ 2 ] ] , env = g_session . env , stderr = g_session . log_file ) <nl> - binary_stdout . write ( path ) <nl> + sys . stdout . buffer . write ( path ) <nl> elif sys . argv [ 1 ] = = " getnativepath " : <nl> # windows - > linux path <nl> path = subprocess . check_output ( [ g_proton . wine_bin , " winepath " , sys . argv [ 2 ] ] , env = g_session . env , stderr = g_session . log_file ) <nl> - binary_stdout . write ( path ) <nl> + sys . stdout . buffer . write ( path ) <nl> else : <nl> log ( " Need a verb . " ) <nl> sys . exit ( 1 ) <nl>
proton : Remove python2 case
ValveSoftware/Proton
4338fb517aedcfe1f8217377e598172d04eb9788
2020-01-13T14:25:10Z
mmm a / modules / dnn / include / opencv2 / dnn / dnn . hpp <nl> ppp b / modules / dnn / include / opencv2 / dnn / dnn . hpp <nl> CV__DNN_EXPERIMENTAL_NS_BEGIN <nl> * / <nl> virtual void copyToHost ( ) = 0 ; <nl> <nl> + / * * <nl> + * @ brief Indicate that an actual data is on CPU . <nl> + * / <nl> + virtual void setHostDirty ( ) = 0 ; <nl> + <nl> int backendId ; / / ! < Backend identifier . <nl> int targetId ; / / ! < Target identifier . <nl> } ; <nl> mmm a / modules / dnn / src / dnn . cpp <nl> ppp b / modules / dnn / src / dnn . cpp <nl> struct LayerPin <nl> } <nl> } ; <nl> <nl> - / / Objects of this class manages wrappers . For every CPU memory pointer and shape <nl> - / / one and only wrapper . Now it support wrapping for single backend and target . <nl> - class BackendWrapManager <nl> - { <nl> - public : <nl> - Ptr < BackendWrapper > wrap ( const Mat & m , int backendId , int targetId ) <nl> - { <nl> - CV_TRACE_FUNCTION ( ) ; <nl> - <nl> - CV_Assert ( backendId ! = DNN_BACKEND_DEFAULT ) ; <nl> - <nl> - std : : map < void * , Ptr < BackendWrapper > > : : iterator hostsIt ; <nl> - / / Check that the same CPU memory was previously wrapped . <nl> - hostsIt = hostWrappers . find ( m . data ) ; <nl> - if ( hostsIt = = hostWrappers . end ( ) ) <nl> - { <nl> - / / If not wrapped before . <nl> - return ( hostWrappers [ m . data ] = wrapHost ( m , backendId , targetId ) ) ; <nl> - } <nl> - else <nl> - { <nl> - / / Find if wrapper of this host and shape was created before . <nl> - std : : map < std : : pair < void * , MatSize > , Ptr < BackendWrapper > > : : iterator it ; <nl> - std : : pair < void * , MatSize > key ( m . data , m . size ) ; <nl> - it = extraWrappers . find ( key ) ; <nl> - if ( it = = extraWrappers . end ( ) ) <nl> - { <nl> - MatShape shape ( m . dims ) ; <nl> - for ( int i = 0 ; i < m . dims ; + + i ) <nl> - shape [ i ] = m . size . p [ i ] ; <nl> - return ( extraWrappers [ key ] = wrapUser ( hostsIt - > second , shape ) ) ; <nl> - } <nl> - else <nl> - return it - > second ; <nl> - } <nl> - } <nl> - <nl> - std : : vector < Ptr < BackendWrapper > > wrap ( const std : : vector < Mat * > & mats , <nl> - int backendId , int targetId ) <nl> - { <nl> - const int num = mats . size ( ) ; <nl> - std : : vector < Ptr < BackendWrapper > > dst ( num ) ; <nl> - for ( int i = 0 ; i < num ; + + i ) <nl> - { <nl> - dst [ i ] = wrap ( * mats [ i ] , backendId , targetId ) ; <nl> - } <nl> - return dst ; <nl> - } <nl> - <nl> - std : : vector < Ptr < BackendWrapper > > wrap ( const std : : vector < Mat > & mats , <nl> - int backendId , int targetId ) <nl> - { <nl> - const int num = mats . size ( ) ; <nl> - std : : vector < Ptr < BackendWrapper > > dst ( num ) ; <nl> - for ( int i = 0 ; i < num ; + + i ) <nl> - { <nl> - dst [ i ] = wrap ( mats [ i ] , backendId , targetId ) ; <nl> - } <nl> - return dst ; <nl> - } <nl> - <nl> - void reset ( ) <nl> - { <nl> - CV_TRACE_FUNCTION ( ) ; <nl> - <nl> - hostWrappers . clear ( ) ; <nl> - extraWrappers . clear ( ) ; <nl> - } <nl> - <nl> - private : <nl> - / / Backend - specific wrapping function . <nl> - Ptr < BackendWrapper > wrapHost ( const Mat & m , int backendId , int targetId ) <nl> - { <nl> - if ( backendId = = DNN_BACKEND_DEFAULT ) <nl> - { <nl> - return Ptr < BackendWrapper > ( ) ; <nl> - } <nl> - else if ( backendId = = DNN_BACKEND_HALIDE ) <nl> - { <nl> - CV_Assert ( haveHalide ( ) ) ; <nl> - # ifdef HAVE_HALIDE <nl> - return Ptr < BackendWrapper > ( new HalideBackendWrapper ( targetId , m ) ) ; <nl> - # endif / / HAVE_HALIDE <nl> - } <nl> - else <nl> - { <nl> - CV_Error ( Error : : StsNotImplemented , " Unknown backend identifier " ) ; <nl> - } <nl> - return Ptr < BackendWrapper > ( ) ; <nl> - } <nl> - <nl> - / / Backend - specific wrapping function . <nl> - Ptr < BackendWrapper > wrapUser ( const Ptr < BackendWrapper > & host , const MatShape & shape ) <nl> - { <nl> - int backendId = host - > backendId ; <nl> - if ( backendId = = DNN_BACKEND_DEFAULT ) <nl> - { <nl> - return Ptr < BackendWrapper > ( ) ; <nl> - } <nl> - else if ( backendId = = DNN_BACKEND_HALIDE ) <nl> - { <nl> - CV_Assert ( haveHalide ( ) ) ; <nl> - # ifdef HAVE_HALIDE <nl> - return Ptr < BackendWrapper > ( new HalideBackendWrapper ( host , shape ) ) ; <nl> - # endif / / HAVE_HALIDE <nl> - } <nl> - else <nl> - { <nl> - CV_Error ( Error : : StsNotImplemented , " Unknown backend identifier " ) ; <nl> - } <nl> - return Ptr < BackendWrapper > ( ) ; <nl> - } <nl> - <nl> - / / Wrappers that initialized for memory hosts ( first wrapping of CPU data ) . <nl> - std : : map < void * , Ptr < BackendWrapper > > hostWrappers ; <nl> - / / The rest of wrappers . They initialized for non - host cv : : Mat . <nl> - std : : map < std : : pair < void * , MatSize > , Ptr < BackendWrapper > > extraWrappers ; <nl> - } ; <nl> - <nl> struct LayerData <nl> { <nl> LayerData ( ) : id ( - 1 ) , flag ( 0 ) { } <nl> struct LayerData <nl> std : : set < int > inputLayersId ; <nl> std : : set < int > requiredOutputs ; <nl> std : : vector < LayerPin > consumers ; <nl> + std : : vector < Ptr < BackendWrapper > > outputBlobsWrappers ; <nl> + std : : vector < Ptr < BackendWrapper > > inputBlobsWrappers ; <nl> <nl> Ptr < Layer > layerInstance ; <nl> std : : vector < Mat > outputBlobs ; <nl> struct BlobManager <nl> std : : map < LayerPin , Mat > memHosts ; <nl> } ; <nl> <nl> + static Ptr < BackendWrapper > wrapMat ( int backendId , int targetId , const cv : : Mat & m ) <nl> + { <nl> + if ( backendId = = DNN_BACKEND_DEFAULT ) <nl> + { <nl> + return Ptr < BackendWrapper > ( ) ; <nl> + } <nl> + else if ( backendId = = DNN_BACKEND_HALIDE ) <nl> + { <nl> + CV_Assert ( haveHalide ( ) ) ; <nl> + # ifdef HAVE_HALIDE <nl> + return Ptr < BackendWrapper > ( new HalideBackendWrapper ( targetId , m ) ) ; <nl> + # endif / / HAVE_HALIDE <nl> + } <nl> + else <nl> + CV_Error ( Error : : StsNotImplemented , " Unknown backend identifier " ) ; <nl> + return Ptr < BackendWrapper > ( ) ; <nl> + } <nl> + <nl> struct Net : : Impl <nl> { <nl> typedef std : : map < int , LayerShapes > LayersShapesMap ; <nl> struct Net : : Impl <nl> int preferableBackend ; <nl> int preferableTarget ; <nl> String halideConfigFile ; <nl> - / / Backend - specific wrapping manager . <nl> - BackendWrapManager backendWrapper ; <nl> + / / Map host data to backend specific wrapper . <nl> + std : : map < void * , Ptr < BackendWrapper > > backendWrappers ; <nl> <nl> int lastLayerId ; <nl> <nl> struct Net : : Impl <nl> bool fusion ; <nl> std : : vector < int64 > layersTimings ; <nl> <nl> + Ptr < BackendWrapper > wrap ( const Mat & host ) <nl> + { <nl> + if ( preferableBackend = = DNN_BACKEND_DEFAULT ) <nl> + return Ptr < BackendWrapper > ( ) ; <nl> + <nl> + MatShape shape ( host . dims ) ; <nl> + for ( int i = 0 ; i < host . dims ; + + i ) <nl> + shape [ i ] = host . size [ i ] ; <nl> + <nl> + void * data = host . data ; <nl> + if ( backendWrappers . find ( data ) ! = backendWrappers . end ( ) ) <nl> + { <nl> + Ptr < BackendWrapper > baseBuffer = backendWrappers [ data ] ; <nl> + if ( preferableBackend = = DNN_BACKEND_HALIDE ) <nl> + { <nl> + CV_Assert ( haveHalide ( ) ) ; <nl> + # ifdef HAVE_HALIDE <nl> + return Ptr < BackendWrapper > ( new HalideBackendWrapper ( baseBuffer , shape ) ) ; <nl> + # endif / / HAVE_HALIDE <nl> + } <nl> + else <nl> + CV_Error ( Error : : StsNotImplemented , " Unknown backend identifier " ) ; <nl> + } <nl> + <nl> + Ptr < BackendWrapper > wrapper = wrapMat ( preferableBackend , preferableTarget , host ) ; <nl> + backendWrappers [ data ] = wrapper ; <nl> + return wrapper ; <nl> + } <nl> + <nl> + class HalideCompiler : public ParallelLoopBody <nl> + { <nl> + public : <nl> + HalideCompiler ( const MapIdToLayerData & layers_ , int preferableTarget_ ) <nl> + : layers ( & layers_ ) , preferableTarget ( preferableTarget_ ) { } <nl> + <nl> + void operator ( ) ( const Range & r ) const <nl> + { <nl> + MapIdToLayerData : : const_iterator it = layers - > begin ( ) ; <nl> + for ( int i = 0 ; i < r . start & & it ! = layers - > end ( ) ; + + i , + + it ) { } <nl> + for ( int i = r . start ; i < r . end & & it ! = layers - > end ( ) ; + + i , + + it ) <nl> + { <nl> + const LayerData & ld = it - > second ; <nl> + Ptr < Layer > layer = ld . layerInstance ; <nl> + bool skip = ld . skipFlags . find ( DNN_BACKEND_HALIDE ) - > second ; <nl> + if ( layer - > supportBackend ( DNN_BACKEND_HALIDE ) & & ! skip ) <nl> + { <nl> + Ptr < BackendNode > node = ld . backendNodes . find ( DNN_BACKEND_HALIDE ) - > second ; <nl> + dnn : : compileHalide ( ld . outputBlobs , node , preferableTarget ) ; <nl> + } <nl> + } <nl> + } <nl> + private : <nl> + const MapIdToLayerData * layers ; <nl> + int preferableTarget ; <nl> + } ; <nl> + <nl> void compileHalide ( ) <nl> { <nl> CV_TRACE_FUNCTION ( ) ; <nl> struct Net : : Impl <nl> ld . inputBlobs , ld . outputBlobs , <nl> preferableTarget ) ; <nl> } <nl> - dnn : : compileHalide ( ld . outputBlobs , ld . backendNodes [ DNN_BACKEND_HALIDE ] , <nl> - preferableTarget ) ; <nl> } <nl> } <nl> + parallel_for_ ( Range ( 0 , layers . size ( ) ) , HalideCompiler ( layers , preferableTarget ) ) ; <nl> } <nl> <nl> void clear ( ) <nl> struct Net : : Impl <nl> { <nl> CV_TRACE_FUNCTION ( ) ; <nl> <nl> - backendWrapper . reset ( ) ; <nl> if ( preferableBackend = = DNN_BACKEND_DEFAULT ) <nl> { <nl> CV_Assert ( preferableTarget = = DNN_TARGET_CPU ) ; <nl> struct Net : : Impl <nl> } <nl> / / No layers fusion . <nl> ldTop . skipFlags [ preferableBackend ] = false ; <nl> - std : : vector < Ptr < BackendWrapper > > inputs = <nl> - backendWrapper . wrap ( ldTop . inputBlobs , preferableBackend , <nl> - preferableTarget ) ; <nl> if ( preferableBackend = = DNN_BACKEND_HALIDE ) <nl> { <nl> - ldTop . backendNodes [ DNN_BACKEND_HALIDE ] = layerTop - > initHalide ( inputs ) ; <nl> + ldTop . backendNodes [ DNN_BACKEND_HALIDE ] = <nl> + layerTop - > initHalide ( ldTop . inputBlobsWrappers ) ; <nl> baseIt = it ; <nl> } <nl> else <nl> struct Net : : Impl <nl> <nl> / / bind inputs <nl> ld . inputBlobs . resize ( ninputs ) ; <nl> + ld . inputBlobsWrappers . resize ( ninputs ) ; <nl> for ( size_t i = 0 ; i < ninputs ; i + + ) <nl> { <nl> LayerPin from = ld . inputBlobsId [ i ] ; <nl> CV_Assert ( from . valid ( ) ) ; <nl> CV_DbgAssert ( layers . count ( from . lid ) & & ( int ) layers [ from . lid ] . outputBlobs . size ( ) > from . oid ) ; <nl> ld . inputBlobs [ i ] = & layers [ from . lid ] . outputBlobs [ from . oid ] ; <nl> + ld . inputBlobsWrappers [ i ] = layers [ from . lid ] . outputBlobsWrappers [ from . oid ] ; <nl> } <nl> <nl> LayersShapesMap : : const_iterator layerShapesIt = layersShapes . find ( lid ) ; <nl> struct Net : : Impl <nl> std : : vector < LayerPin > pinsForInternalBlobs ; <nl> bool maximizeReuse = preferableBackend = = DNN_BACKEND_HALIDE ; <nl> blobManager . allocateBlobsForLayer ( ld , layerShapesIt - > second , pinsForInternalBlobs , maximizeReuse ) ; <nl> + ld . outputBlobsWrappers . resize ( ld . outputBlobs . size ( ) ) ; <nl> + for ( int i = 0 ; i < ld . outputBlobs . size ( ) ; + + i ) <nl> + { <nl> + ld . outputBlobsWrappers [ i ] = wrap ( ld . outputBlobs [ i ] ) ; <nl> + } <nl> <nl> Ptr < Layer > layerPtr = ld . getLayerInstance ( ) ; <nl> { <nl> struct Net : : Impl <nl> getLayersShapes ( inputShapes , layersShapes ) ; <nl> <nl> blobManager . reset ( ) ; <nl> + backendWrappers . clear ( ) ; <nl> + blobManager . addReference ( LayerPin ( 0 , 0 ) ) ; <nl> for ( it = layers . begin ( ) ; it ! = layers . end ( ) ; + + it ) <nl> { <nl> const LayerData & ld = it - > second ; <nl> struct Net : : Impl <nl> ! layer - > supportBackend ( preferableBackend ) ) <nl> { <nl> if ( ! ld . skipFlags [ DNN_BACKEND_DEFAULT ] ) <nl> + { <nl> + for ( int i = 0 , n = ld . inputBlobsWrappers . size ( ) ; i < n ; + + i ) <nl> + { <nl> + if ( ! ld . inputBlobsWrappers [ i ] . empty ( ) ) <nl> + ld . inputBlobsWrappers [ i ] - > copyToHost ( ) ; <nl> + } <nl> layer - > forward ( ld . inputBlobs , ld . outputBlobs , ld . internals ) ; <nl> + for ( int i = 0 , n = ld . outputBlobsWrappers . size ( ) ; i < n ; + + i ) <nl> + { <nl> + if ( ! ld . outputBlobsWrappers [ i ] . empty ( ) ) <nl> + ld . outputBlobsWrappers [ i ] - > setHostDirty ( ) ; <nl> + } <nl> + } <nl> else <nl> tm . reset ( ) ; <nl> } <nl> else if ( ! ld . skipFlags [ preferableBackend ] ) <nl> { <nl> - std : : vector < Ptr < BackendWrapper > > outputs = <nl> - backendWrapper . wrap ( ld . outputBlobs , preferableBackend , preferableTarget ) ; <nl> Ptr < BackendNode > node = ld . backendNodes [ preferableBackend ] ; <nl> if ( preferableBackend = = DNN_BACKEND_HALIDE ) <nl> { <nl> - forwardHalide ( outputs , node ) ; <nl> + forwardHalide ( ld . outputBlobsWrappers , node ) ; <nl> } <nl> else <nl> { <nl> struct Net : : Impl <nl> CV_Error ( Error : : StsOutOfRange , " Layer \ " " + ld . name + " \ " produce only " + toString ( ld . outputBlobs . size ( ) ) + <nl> " outputs , the # " + toString ( pin . oid ) + " was requsted " ) ; <nl> } <nl> - if ( preferableBackend ! = DNN_BACKEND_DEFAULT ) <nl> + if ( preferableBackend ! = DNN_TARGET_CPU ) <nl> { <nl> / / Transfer data to CPU if it ' s require . <nl> - backendWrapper . wrap ( ld . outputBlobs [ pin . oid ] , preferableBackend , <nl> - preferableTarget ) - > copyToHost ( ) ; <nl> + ld . outputBlobsWrappers [ pin . oid ] - > copyToHost ( ) ; <nl> } <nl> else <nl> { <nl> void Net : : setInput ( const Mat & blob_ , const String & name ) <nl> <nl> LayerData & ld = impl - > layers [ pin . lid ] ; <nl> ld . outputBlobs . resize ( std : : max ( pin . oid + 1 , ( int ) ld . requiredOutputs . size ( ) ) ) ; <nl> + ld . outputBlobsWrappers . resize ( ld . outputBlobs . size ( ) ) ; <nl> MatShape prevShape = shape ( ld . outputBlobs [ pin . oid ] ) ; <nl> bool oldShape = prevShape = = shape ( blob_ ) ; <nl> if ( oldShape ) <nl> void Net : : setInput ( const Mat & blob_ , const String & name ) <nl> else <nl> ld . outputBlobs [ pin . oid ] = blob_ . clone ( ) ; <nl> <nl> + if ( ! ld . outputBlobsWrappers [ pin . oid ] . empty ( ) ) <nl> + { <nl> + ld . outputBlobsWrappers [ pin . oid ] - > setHostDirty ( ) ; <nl> + } <nl> impl - > netWasAllocated = impl - > netWasAllocated & & oldShape ; <nl> } <nl> <nl> mmm a / modules / dnn / src / op_halide . cpp <nl> ppp b / modules / dnn / src / op_halide . cpp <nl> namespace dnn <nl> { <nl> <nl> # ifdef HAVE_HALIDE <nl> + static MatShape getBufferShape ( const MatShape & shape ) <nl> + { <nl> + if ( shape . size ( ) = = 2 | | shape . size ( ) = = 4 ) <nl> + { <nl> + int w , h , c , n ; <nl> + getCanonicalSize ( shape , & w , & h , & c , & n ) ; <nl> + return { w , h , c , n } ; <nl> + } <nl> + else <nl> + { <nl> + MatShape bufferShape ( shape ) ; <nl> + std : : reverse ( bufferShape . begin ( ) , bufferShape . end ( ) ) ; <nl> + return bufferShape ; <nl> + } <nl> + } <nl> + <nl> + static MatShape getBufferShape ( const MatSize & size ) <nl> + { <nl> + return getBufferShape ( MatShape ( size . p , size . p + size [ - 1 ] ) ) ; <nl> + } <nl> + <nl> Halide : : Buffer < float > wrapToHalideBuffer ( const Mat & mat ) <nl> { <nl> - int n , c , w , h ; <nl> - getCanonicalSize ( mat . size , & w , & h , & c , & n ) ; <nl> - return wrapToHalideBuffer ( mat , { w , h , c , n } ) ; <nl> + return wrapToHalideBuffer ( mat , getBufferShape ( mat . size ) ) ; <nl> } <nl> <nl> Halide : : Buffer < float > wrapToHalideBuffer ( const Mat & mat , <nl> HalideBackendWrapper : : HalideBackendWrapper ( const Ptr < BackendWrapper > & base , <nl> : BackendWrapper ( DNN_BACKEND_HALIDE , base - > targetId ) <nl> { <nl> managesDevMemory = false ; <nl> - int w , h , c , n ; <nl> - getCanonicalSize ( shape , & w , & h , & c , & n ) ; <nl> Halide : : Buffer < float > baseBuffer = halideBuffer ( base ) ; <nl> buffer = Halide : : Buffer < float > ( ( float * ) baseBuffer . raw_buffer ( ) - > host , <nl> - { w , h , c , n } ) ; <nl> + getBufferShape ( shape ) ) ; <nl> if ( baseBuffer . has_device_allocation ( ) ) <nl> { <nl> buffer . raw_buffer ( ) - > device = baseBuffer . raw_buffer ( ) - > device ; <nl> HalideBackendWrapper : : ~ HalideBackendWrapper ( ) <nl> <nl> void HalideBackendWrapper : : copyToHost ( ) <nl> { <nl> - CV_Assert ( targetId = = DNN_TARGET_CPU | | buffer . device_dirty ( ) ) ; <nl> if ( buffer . device_dirty ( ) ) <nl> { <nl> buffer . device_sync ( ) ; <nl> buffer . copy_to_host ( ) ; <nl> } <nl> } <nl> + <nl> + void HalideBackendWrapper : : setHostDirty ( ) <nl> + { <nl> + buffer . set_device_dirty ( false ) ; <nl> + buffer . set_host_dirty ( ) ; <nl> + } <nl> # endif / / HAVE_HALIDE <nl> <nl> - void getCanonicalSize ( const MatSize & size , int * width , int * height , <nl> - int * channels , int * batch ) <nl> + void getCanonicalSize ( const MatSize & size , int * w , int * h , int * c , int * n ) <nl> { <nl> - const int dims = size . p [ - 1 ] ; <nl> - CV_Assert ( dims = = 2 | | dims = = 4 ) ; <nl> - * batch = size [ 0 ] ; <nl> - * channels = size [ 1 ] ; <nl> - if ( dims = = 4 ) <nl> - { <nl> - * width = size [ 3 ] ; <nl> - * height = size [ 2 ] ; <nl> - } <nl> - else <nl> - { <nl> - * width = 1 ; <nl> - * height = 1 ; <nl> - } <nl> + getCanonicalSize ( MatShape ( size . p , size . p + size [ - 1 ] ) , w , h , c , n ) ; <nl> } <nl> <nl> void getCanonicalSize ( const MatShape & shape , int * width , int * height , <nl> void getCanonicalSize ( const MatShape & shape , int * width , int * height , <nl> } <nl> } <nl> <nl> - void compileHalide ( std : : vector < Mat > & outputs , Ptr < BackendNode > & node , int targetId ) <nl> + void compileHalide ( const std : : vector < Mat > & outputs , Ptr < BackendNode > & node , int targetId ) <nl> { <nl> # ifdef HAVE_HALIDE <nl> CV_Assert ( ! node . empty ( ) ) ; <nl> mmm a / modules / dnn / src / op_halide . hpp <nl> ppp b / modules / dnn / src / op_halide . hpp <nl> namespace dnn <nl> <nl> virtual void copyToHost ( ) ; <nl> <nl> + virtual void setHostDirty ( ) ; <nl> + <nl> Halide : : Buffer < float > buffer ; <nl> <nl> private : <nl> namespace dnn <nl> const Ptr < BackendNode > & node ) ; <nl> <nl> / / Compile Halide pipeline to specific target . Use outputs to set bounds of functions . <nl> - void compileHalide ( std : : vector < Mat > & outputs , Ptr < BackendNode > & node , int targetId ) ; <nl> + void compileHalide ( const std : : vector < Mat > & outputs , Ptr < BackendNode > & node , int targetId ) ; <nl> <nl> bool haveHalide ( ) ; <nl> } / / namespace dnn <nl> mmm a / modules / dnn / test / test_halide_layers . cpp <nl> ppp b / modules / dnn / test / test_halide_layers . cpp <nl> INSTANTIATE_TEST_CASE_P ( Layer_Test_Halide , Eltwise , Combine ( <nl> / * num convs * / Values ( 1 , 2 , 3 ) , <nl> / * weighted ( for sum only ) * / Bool ( ) <nl> ) ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / Mixed backends <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + TEST ( MixedBackends_Halide_Default_Halide , Accuracy ) <nl> + { <nl> + / / Just a layer that supports Halide backend . <nl> + LayerParams lrn ; <nl> + lrn . type = " LRN " ; <nl> + lrn . name = " testLRN " ; <nl> + <nl> + / / Some of layers that doesn ' t supports Halide backend yet . <nl> + LayerParams mvn ; <nl> + mvn . type = " MVN " ; <nl> + mvn . name = " testMVN " ; <nl> + <nl> + / / Halide layer again . <nl> + LayerParams lrn2 ; <nl> + lrn2 . type = " LRN " ; <nl> + lrn2 . name = " testLRN2 " ; <nl> + <nl> + Net net ; <nl> + int lrnId = net . addLayer ( lrn . name , lrn . type , lrn ) ; <nl> + net . connect ( 0 , 0 , lrnId , 0 ) ; <nl> + net . addLayerToPrev ( mvn . name , mvn . type , mvn ) ; <nl> + net . addLayerToPrev ( lrn2 . name , lrn2 . type , lrn2 ) ; <nl> + <nl> + Mat input ( { 4 , 3 , 5 , 6 } , CV_32F ) ; <nl> + randu ( input , - 1 . 0f , 1 . 0f ) ; <nl> + net . setInput ( input ) ; <nl> + Mat outputDefault = net . forward ( ) . clone ( ) ; <nl> + <nl> + net . setPreferableBackend ( DNN_BACKEND_HALIDE ) ; <nl> + net . setInput ( input ) ; <nl> + Mat outputHalide = net . forward ( ) . clone ( ) ; <nl> + normAssert ( outputDefault , outputHalide ) ; <nl> + <nl> + net . setPreferableTarget ( DNN_TARGET_OPENCL ) ; <nl> + net . setInput ( input ) ; <nl> + outputHalide = net . forward ( ) . clone ( ) ; <nl> + normAssert ( outputDefault , outputHalide ) ; <nl> + } <nl> # endif / / HAVE_HALIDE <nl> <nl> } / / namespace cvtest <nl> mmm a / modules / dnn / test / test_halide_nets . cpp <nl> ppp b / modules / dnn / test / test_halide_nets . cpp <nl> static void test ( const std : : string & weights , const std : : string & proto , <nl> netHalide . setInput ( blobFromImage ( input . clone ( ) , 1 . 0 , Size ( ) , Scalar ( ) , false ) ) ; <nl> <nl> normAssert ( outputDefault , outputHalide , " Second run " , l1 , lInf ) ; <nl> + std : : cout < < " . " < < std : : endl ; <nl> <nl> / / Swap backends . <nl> netHalide . setPreferableBackend ( DNN_BACKEND_DEFAULT ) ; <nl> static void test ( const std : : string & weights , const std : : string & proto , <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / CPU target <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + TEST ( Reproducibility_MobileNetSSD_Halide , Accuracy ) <nl> + { <nl> + test ( findDataFile ( " dnn / MobileNetSSD_deploy . caffemodel " , false ) , <nl> + findDataFile ( " dnn / MobileNetSSD_deploy . prototxt " , false ) , <nl> + " " , 300 , 300 , " detection_out " , " caffe " , DNN_TARGET_CPU ) ; <nl> + } ; <nl> + <nl> + TEST ( Reproducibility_SSD_Halide , Accuracy ) <nl> + { <nl> + test ( findDataFile ( " dnn / VGG_ILSVRC2016_SSD_300x300_iter_440000 . caffemodel " , false ) , <nl> + findDataFile ( " dnn / ssd_vgg16 . prototxt " , false ) , <nl> + " " , 300 , 300 , " detection_out " , " caffe " , DNN_TARGET_CPU ) ; <nl> + } ; <nl> + <nl> TEST ( Reproducibility_GoogLeNet_Halide , Accuracy ) <nl> { <nl> test ( findDataFile ( " dnn / bvlc_googlenet . caffemodel " , false ) , <nl> TEST ( Reproducibility_ENet_Halide , Accuracy ) <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / OpenCL target <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + TEST ( Reproducibility_MobileNetSSD_Halide_opencl , Accuracy ) <nl> + { <nl> + test ( findDataFile ( " dnn / MobileNetSSD_deploy . caffemodel " , false ) , <nl> + findDataFile ( " dnn / MobileNetSSD_deploy . prototxt " , false ) , <nl> + " " , 300 , 300 , " detection_out " , " caffe " , DNN_TARGET_OPENCL ) ; <nl> + } ; <nl> + <nl> + TEST ( Reproducibility_SSD_Halide_opencl , Accuracy ) <nl> + { <nl> + test ( findDataFile ( " dnn / VGG_ILSVRC2016_SSD_300x300_iter_440000 . caffemodel " , false ) , <nl> + findDataFile ( " dnn / ssd_vgg16 . prototxt " , false ) , <nl> + " " , 300 , 300 , " detection_out " , " caffe " , DNN_TARGET_OPENCL ) ; <nl> + } ; <nl> + <nl> TEST ( Reproducibility_GoogLeNet_Halide_opencl , Accuracy ) <nl> { <nl> test ( findDataFile ( " dnn / bvlc_googlenet . caffemodel " , false ) , <nl>
MobileNet - SSD and VGG - SSD topologies in Halide
opencv/opencv
cad7c4d51d3ba0b4e6df3f1439e45625cc95f20f
2017-09-08T06:55:53Z
mmm a / imgui . cpp <nl> ppp b / imgui . cpp <nl> <nl> - MISSION STATEMENT <nl> - END - USER GUIDE <nl> - PROGRAMMER GUIDE <nl> - - TROUBLESHOOTING & FREQUENTLY ASKED QUESTIONS <nl> - API BREAKING CHANGES <nl> + - TROUBLESHOOTING & FREQUENTLY ASKED QUESTIONS <nl> - ISSUES & TODO - LIST <nl> - CODE <nl> - SAMPLE CODE <nl> <nl> / / swap video buffer , etc . <nl> } <nl> <nl> + API BREAKING CHANGES <nl> + = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + Occasionally introducing changes that are breaking the API . The breakage are generally minor and easy to fix . <nl> + Here is a change - log of API breaking changes , if you are using one of the functions listed , expect to have to fix some code . <nl> + <nl> + - 2014 / 11 / 26 ( 1 . 17 ) retired IMGUI_ONCE_UPON_A_FRAME helper macro in favor of ImGuiOnceUponAFrame type that works on all compilers . <nl> + - 2014 / 11 / 07 ( 1 . 15 ) renamed IsHovered ( ) to IsItemHovered ( ) <nl> + - 2014 / 10 / 02 ( 1 . 14 ) renamed IMGUI_INCLUDE_IMGUI_USER_CPP to IMGUI_INCLUDE_IMGUI_USER_INL and imgui_user . cpp to imgui_user . inl ( more IDE friendly ) <nl> + - 2014 / 09 / 25 ( 1 . 13 ) removed ' text_end ' parameter from IO . SetClipboardTextFn ( the string is now always zero - terminated for simplicity ) <nl> + - 2014 / 09 / 24 ( 1 . 12 ) renamed SetFontScale ( ) to SetWindowFontScale ( ) <nl> + - 2014 / 09 / 24 ( 1 . 12 ) moved IM_MALLOC / IM_REALLOC / IM_FREE preprocessor defines to IO . MemAllocFn / IO . MemReallocFn / IO . MemFreeFn <nl> + - 2014 / 08 / 30 ( 1 . 09 ) removed IO . FontHeight ( now computed automatically ) <nl> + - 2014 / 08 / 30 ( 1 . 09 ) moved IMGUI_FONT_TEX_UV_FOR_WHITE preprocessor define to IO . FontTexUvForWhite <nl> + - 2014 / 08 / 28 ( 1 . 09 ) changed the behavior of IO . PixelCenterOffset following various rendering fixes <nl> + <nl> TROUBLESHOOTING & FREQUENTLY ASKED QUESTIONS <nl> = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> <nl> <nl> - tip : you can create widgets without a Begin ( ) / End ( ) block , they will go in an implicit window called " Debug " <nl> - tip : read the ShowTestWindow ( ) code for more example of how to use ImGui ! <nl> <nl> - API BREAKING CHANGES <nl> - = = = = = = = = = = = = = = = = = = = = <nl> - <nl> - - 2014 / 11 / 07 ( 1 . 15 ) renamed IsHovered ( ) to IsItemHovered ( ) <nl> - - 2014 / 10 / 02 ( 1 . 14 ) renamed IMGUI_INCLUDE_IMGUI_USER_CPP to IMGUI_INCLUDE_IMGUI_USER_INL and imgui_user . cpp to imgui_user . inl ( more IDE friendly ) <nl> - - 2014 / 09 / 25 ( 1 . 13 ) removed ' text_end ' parameter from IO . SetClipboardTextFn ( the string is now always zero - terminated for simplicity ) <nl> - - 2014 / 09 / 24 ( 1 . 12 ) renamed SetFontScale ( ) to SetWindowFontScale ( ) <nl> - - 2014 / 09 / 24 ( 1 . 12 ) moved IM_MALLOC / IM_REALLOC / IM_FREE preprocessor defines to IO . MemAllocFn / IO . MemReallocFn / IO . MemFreeFn <nl> - - 2014 / 08 / 30 ( 1 . 09 ) removed IO . FontHeight ( now computed automatically ) <nl> - - 2014 / 08 / 30 ( 1 . 09 ) moved IMGUI_FONT_TEX_UV_FOR_WHITE preprocessor define to IO . FontTexUvForWhite <nl> - - 2014 / 08 / 28 ( 1 . 09 ) changed the behavior of IO . PixelCenterOffset following various rendering fixes <nl> - <nl> ISSUES & TODO - LIST <nl> = = = = = = = = = = = = = = = = = = <nl> <nl>
Moved API Breaking Changes section of the documentation above the programmer ' s FAQ .
ocornut/imgui
abe45e9976eaf05028c9e1971d08f32bb7816d7c
2014-11-26T22:27:48Z
mmm a / tensorflow / python / keras / benchmarks / BUILD <nl> ppp b / tensorflow / python / keras / benchmarks / BUILD <nl> cuda_py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> + cuda_py_test ( <nl> name = " text_classification_transformer_benchmark_test " , <nl> srcs = [ " keras_examples_benchmarks / text_classification_transformer_benchmark_test . py " ] , <nl> python_version = " PY3 " , <nl> cuda_py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> + cuda_py_test ( <nl> name = " mnist_conv_benchmark_test " , <nl> srcs = [ " keras_examples_benchmarks / mnist_conv_benchmark_test . py " ] , <nl> python_version = " PY3 " , <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> + cuda_py_test ( <nl> name = " mnist_hierarchical_rnn_benchmark_test " , <nl> srcs = [ " keras_examples_benchmarks / mnist_hierarchical_rnn_benchmark_test . py " ] , <nl> python_version = " PY3 " , <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> + cuda_py_test ( <nl> name = " mnist_irnn_benchmark_test " , <nl> srcs = [ " keras_examples_benchmarks / mnist_irnn_benchmark_test . py " ] , <nl> python_version = " PY3 " , <nl> py_test ( <nl> ] , <nl> ) <nl> <nl> - py_test ( <nl> + cuda_py_test ( <nl> name = " reuters_mlp_benchmark_test " , <nl> srcs = [ " keras_examples_benchmarks / reuters_mlp_benchmark_test . py " ] , <nl> python_version = " PY3 " , <nl> mmm a / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / mnist_conv_benchmark_test . py <nl> ppp b / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / mnist_conv_benchmark_test . py <nl> def benchmark_conv_mnist_bs_256 ( self ) : <nl> self . report_benchmark ( <nl> iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> <nl> + def benchmark_conv_mnist_bs_256_gpu_2 ( self ) : <nl> + " " " Measure performance with batch_size = 256 , run_iters = 3 , gpu = 2 and <nl> + <nl> + distribution_strategy = ' mirrored ' <nl> + " " " <nl> + batch_size = 256 <nl> + run_iters = 3 <nl> + metrics , wall_time , extras = benchmark_util . measure_performance ( <nl> + self . _build_model , <nl> + x = self . x_train , <nl> + y = self . y_train , <nl> + batch_size = batch_size , <nl> + run_iters = run_iters , <nl> + num_gpus = 2 , <nl> + distribution_strategy = ' mirrored ' , <nl> + epochs = self . epochs , <nl> + optimizer = ' adam ' , <nl> + loss = ' categorical_crossentropy ' , <nl> + metrics = [ ' accuracy ' ] ) <nl> + <nl> + self . report_benchmark ( <nl> + iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> + <nl> def benchmark_conv_mnist_bs_512 ( self ) : <nl> " " " Measure performance with batch_size = 512 and run_iters = 3 . " " " <nl> batch_size = 512 <nl> mmm a / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / mnist_hierarchical_rnn_benchmark_test . py <nl> ppp b / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / mnist_hierarchical_rnn_benchmark_test . py <nl> def benchmark_hrnn_mnist_bs_256 ( self ) : <nl> self . report_benchmark ( <nl> iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> <nl> + def benchmark_hrnn_mnist_bs_256_gpu_2 ( self ) : <nl> + " " " Measure performance with batch_size = 256 , run_iters = 4 , gpu = 2 and <nl> + <nl> + distribution_strategy = ' mirrored ' <nl> + " " " <nl> + batch_size = 256 <nl> + run_iters = 4 <nl> + metrics , wall_time , extras = benchmark_util . measure_performance ( <nl> + self . _build_model , <nl> + x = self . x_train , <nl> + y = self . y_train , <nl> + batch_size = batch_size , <nl> + run_iters = run_iters , <nl> + num_gpus = 2 , <nl> + distribution_strategy = ' mirrored ' , <nl> + optimizer = ' rmsprop ' , <nl> + loss = ' categorical_crossentropy ' , <nl> + metrics = [ ' accuracy ' ] ) <nl> + <nl> + self . report_benchmark ( <nl> + iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> + <nl> def benchmark_hrnn_mnist_bs_512 ( self ) : <nl> " " " Measure performance with batch_size = 512 and run_iters = 5 . " " " <nl> batch_size = 512 <nl> mmm a / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / mnist_irnn_benchmark_test . py <nl> ppp b / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / mnist_irnn_benchmark_test . py <nl> def benchmark_irnn_mnist_bs_1024 ( self ) : <nl> self . report_benchmark ( <nl> iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> <nl> + def benchmark_irnn_mnist_bs_1024_gpu_3 ( self ) : <nl> + " " " Measure performance with batch_size = 1024 , run_iters = 3 , gpu = 3 and <nl> + <nl> + distribution_strategy = ' mirrored ' <nl> + " " " <nl> + batch_size = 1024 <nl> + run_iters = 3 <nl> + metrics , wall_time , extras = benchmark_util . measure_performance ( <nl> + self . _build_model , <nl> + x = self . x_train , <nl> + y = self . y_train , <nl> + batch_size = batch_size , <nl> + run_iters = run_iters , <nl> + num_gpus = 3 , <nl> + distribution_strategy = ' mirrored ' , <nl> + optimizer = tf . keras . optimizers . RMSprop ( learning_rate = self . learning_rate ) , <nl> + loss = ' categorical_crossentropy ' , <nl> + metrics = [ ' accuracy ' ] ) <nl> + <nl> + self . report_benchmark ( <nl> + iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> tf . test . main ( ) <nl> mmm a / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / reuters_mlp_benchmark_test . py <nl> ppp b / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / reuters_mlp_benchmark_test . py <nl> def benchmark_mlp_reuters_bs_128 ( self ) : <nl> self . report_benchmark ( <nl> iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> <nl> + def benchmark_mlp_reuters_bs_128_gpu_3 ( self ) : <nl> + " " " Measure performance with batch_size = 128 , run_iters = 2 , gpu = 3 and <nl> + <nl> + distribution_strategy = ' mirrored ' <nl> + " " " <nl> + batch_size = 128 <nl> + run_iters = 2 <nl> + metrics , wall_time , extras = benchmark_util . measure_performance ( <nl> + self . _build_model , <nl> + x = self . x_train , <nl> + y = self . y_train , <nl> + batch_size = batch_size , <nl> + run_iters = run_iters , <nl> + num_gpus = 3 , <nl> + distribution_strategy = ' mirrored ' , <nl> + epochs = self . epochs , <nl> + optimizer = ' adam ' , <nl> + loss = ' categorical_crossentropy ' , <nl> + metrics = [ ' accuracy ' ] ) <nl> + <nl> + self . report_benchmark ( <nl> + iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> + <nl> def benchmark_mlp_reuters_bs_256 ( self ) : <nl> " " " Measure performance with batch_size = 256 and run_iters = 3 . " " " <nl> batch_size = 256 <nl> mmm a / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / text_classification_transformer_benchmark_test . py <nl> ppp b / tensorflow / python / keras / benchmarks / keras_examples_benchmarks / text_classification_transformer_benchmark_test . py <nl> def benchmark_text_classification_bs_128 ( self ) : <nl> self . report_benchmark ( <nl> iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> <nl> + def benchmark_text_classification_bs_256 ( self ) : <nl> + " " " Measure performance with batch_size = 256 and run_iters = 3 . " " " <nl> + batch_size = 256 <nl> + run_iters = 3 <nl> + metrics , wall_time , extras = benchmark_util . measure_performance ( <nl> + self . _build_model , <nl> + x = self . imdb_x , <nl> + y = self . imdb_y , <nl> + batch_size = batch_size , <nl> + run_iters = run_iters , <nl> + optimizer = ' adam ' , <nl> + loss = ' sparse_categorical_crossentropy ' , <nl> + metrics = [ ' accuracy ' ] ) <nl> + <nl> + self . report_benchmark ( <nl> + iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> + <nl> def benchmark_text_classification_bs_512 ( self ) : <nl> " " " Measure performance with batch_size = 512 and run_iters = 4 . " " " <nl> batch_size = 512 <nl> def benchmark_text_classification_bs_512 ( self ) : <nl> self . report_benchmark ( <nl> iters = run_iters , wall_time = wall_time , metrics = metrics , extras = extras ) <nl> <nl> - def benchmark_text_classification_bs_256 ( self ) : <nl> - " " " Measure performance with batch_size = 256 and run_iters = 3 . " " " <nl> - batch_size = 256 <nl> - run_iters = 3 <nl> + def benchmark_text_classification_bs_512_gpu_2 ( self ) : <nl> + " " " Measure performance with batch_size = 512 , run_iters = 4 , gpu = 1 and <nl> + <nl> + distribution_strategy = ' mirrored ' <nl> + " " " <nl> + batch_size = 512 <nl> + run_iters = 4 <nl> metrics , wall_time , extras = benchmark_util . measure_performance ( <nl> self . _build_model , <nl> x = self . imdb_x , <nl> y = self . imdb_y , <nl> batch_size = batch_size , <nl> run_iters = run_iters , <nl> + num_gpus = 2 , <nl> + distribution_strategy = ' mirrored ' , <nl> optimizer = ' adam ' , <nl> loss = ' sparse_categorical_crossentropy ' , <nl> metrics = [ ' accuracy ' ] ) <nl>
Merge pull request from xingyu - long : xingyu - add - other - gpu - usage
tensorflow/tensorflow
2e0f7039ef9e42cf94ac4be49c690fd9351969bb
2020-07-28T14:59:04Z
mmm a / src / rest . cpp <nl> ppp b / src / rest . cpp <nl> static bool rest_block_notxdetails ( const util : : Ref & context , HTTPRequest * req , c <nl> } <nl> <nl> / / A bit of a hack - dependency on a function defined in rpc / blockchain . cpp <nl> - UniValue getblockchaininfo ( const JSONRPCRequest & request ) ; <nl> + RPCHelpMan getblockchaininfo ( ) ; <nl> <nl> static bool rest_chaininfo ( const util : : Ref & context , HTTPRequest * req , const std : : string & strURIPart ) <nl> { <nl> static bool rest_chaininfo ( const util : : Ref & context , HTTPRequest * req , const std <nl> case RetFormat : : JSON : { <nl> JSONRPCRequest jsonRequest ( context ) ; <nl> jsonRequest . params = UniValue ( UniValue : : VARR ) ; <nl> - UniValue chainInfoObject = getblockchaininfo ( jsonRequest ) ; <nl> + UniValue chainInfoObject = getblockchaininfo ( ) . HandleRequest ( jsonRequest ) ; <nl> std : : string strJSON = chainInfoObject . write ( ) + " \ n " ; <nl> req - > WriteHeader ( " Content - Type " , " application / json " ) ; <nl> req - > WriteReply ( HTTP_OK , strJSON ) ; <nl> mmm a / src / rpc / blockchain . cpp <nl> ppp b / src / rpc / blockchain . cpp <nl> UniValue blockToJSON ( const CBlock & block , const CBlockIndex * tip , const CBlockIn <nl> return result ; <nl> } <nl> <nl> - static UniValue getblockcount ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getblockcount ( ) <nl> { <nl> - RPCHelpMan { " getblockcount " , <nl> + return RPCHelpMan { " getblockcount " , <nl> " \ nReturns the height of the most - work fully - validated chain . \ n " <nl> " The genesis block has height 0 . \ n " , <nl> { } , <nl> static UniValue getblockcount ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getblockcount " , " " ) <nl> + HelpExampleRpc ( " getblockcount " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> return : : ChainActive ( ) . Height ( ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getbestblockhash ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getbestblockhash ( ) <nl> { <nl> - RPCHelpMan { " getbestblockhash " , <nl> + return RPCHelpMan { " getbestblockhash " , <nl> " \ nReturns the hash of the best ( tip ) block in the most - work fully - validated chain . \ n " , <nl> { } , <nl> RPCResult { <nl> static UniValue getbestblockhash ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getbestblockhash " , " " ) <nl> + HelpExampleRpc ( " getbestblockhash " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> return : : ChainActive ( ) . Tip ( ) - > GetBlockHash ( ) . GetHex ( ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> void RPCNotifyBlockChange ( const CBlockIndex * pindex ) <nl> void RPCNotifyBlockChange ( const CBlockIndex * pindex ) <nl> cond_blockchange . notify_all ( ) ; <nl> } <nl> <nl> - static UniValue waitfornewblock ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan waitfornewblock ( ) <nl> { <nl> - RPCHelpMan { " waitfornewblock " , <nl> + return RPCHelpMan { " waitfornewblock " , <nl> " \ nWaits for a specific new block and returns useful info about it . \ n " <nl> " \ nReturns the current block on timeout or exit . \ n " , <nl> { <nl> static UniValue waitfornewblock ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " waitfornewblock " , " 1000 " ) <nl> + HelpExampleRpc ( " waitfornewblock " , " 1000 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> int timeout = 0 ; <nl> if ( ! request . params [ 0 ] . isNull ( ) ) <nl> timeout = request . params [ 0 ] . get_int ( ) ; <nl> static UniValue waitfornewblock ( const JSONRPCRequest & request ) <nl> ret . pushKV ( " hash " , block . hash . GetHex ( ) ) ; <nl> ret . pushKV ( " height " , block . height ) ; <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue waitforblock ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan waitforblock ( ) <nl> { <nl> - RPCHelpMan { " waitforblock " , <nl> + return RPCHelpMan { " waitforblock " , <nl> " \ nWaits for a specific new block and returns useful info about it . \ n " <nl> " \ nReturns the current block on timeout or exit . \ n " , <nl> { <nl> static UniValue waitforblock ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " waitforblock " , " \ " 0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862 \ " 1000 " ) <nl> + HelpExampleRpc ( " waitforblock " , " \ " 0000000000079f8ef3d2c688c244eb7a4570b24c9ed7b4a8c619eb02596f8862 \ " , 1000 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> int timeout = 0 ; <nl> <nl> uint256 hash ( ParseHashV ( request . params [ 0 ] , " blockhash " ) ) ; <nl> static UniValue waitforblock ( const JSONRPCRequest & request ) <nl> ret . pushKV ( " hash " , block . hash . GetHex ( ) ) ; <nl> ret . pushKV ( " height " , block . height ) ; <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue waitforblockheight ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan waitforblockheight ( ) <nl> { <nl> - RPCHelpMan { " waitforblockheight " , <nl> + return RPCHelpMan { " waitforblockheight " , <nl> " \ nWaits for ( at least ) block height and returns the height and hash \ n " <nl> " of the current tip . \ n " <nl> " \ nReturns the current block on timeout or exit . \ n " , <nl> static UniValue waitforblockheight ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " waitforblockheight " , " 100 1000 " ) <nl> + HelpExampleRpc ( " waitforblockheight " , " 100 , 1000 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> int timeout = 0 ; <nl> <nl> int height = request . params [ 0 ] . get_int ( ) ; <nl> static UniValue waitforblockheight ( const JSONRPCRequest & request ) <nl> ret . pushKV ( " hash " , block . hash . GetHex ( ) ) ; <nl> ret . pushKV ( " height " , block . height ) ; <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue syncwithvalidationinterfacequeue ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan syncwithvalidationinterfacequeue ( ) <nl> { <nl> - RPCHelpMan { " syncwithvalidationinterfacequeue " , <nl> + return RPCHelpMan { " syncwithvalidationinterfacequeue " , <nl> " \ nWaits for the validation interface queue to catch up on everything that was there when we entered this function . \ n " , <nl> { } , <nl> RPCResult { RPCResult : : Type : : NONE , " " , " " } , <nl> static UniValue syncwithvalidationinterfacequeue ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " syncwithvalidationinterfacequeue " , " " ) <nl> + HelpExampleRpc ( " syncwithvalidationinterfacequeue " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> SyncWithValidationInterfaceQueue ( ) ; <nl> return NullUniValue ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getdifficulty ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getdifficulty ( ) <nl> { <nl> - RPCHelpMan { " getdifficulty " , <nl> + return RPCHelpMan { " getdifficulty " , <nl> " \ nReturns the proof - of - work difficulty as a multiple of the minimum difficulty . \ n " , <nl> { } , <nl> RPCResult { <nl> static UniValue getdifficulty ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getdifficulty " , " " ) <nl> + HelpExampleRpc ( " getdifficulty " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> return GetDifficulty ( : : ChainActive ( ) . Tip ( ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> static std : : vector < RPCResult > MempoolEntryDescription ( ) { return { <nl> UniValue MempoolToJSON ( const CTxMemPool & pool , bool verbose ) <nl> } <nl> } <nl> <nl> - static UniValue getrawmempool ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getrawmempool ( ) <nl> { <nl> - RPCHelpMan { " getrawmempool " , <nl> + return RPCHelpMan { " getrawmempool " , <nl> " \ nReturns all transaction ids in memory pool as a json array of string transaction ids . \ n " <nl> " \ nHint : use getmempoolentry to fetch a specific transaction from the mempool . \ n " , <nl> { <nl> static UniValue getrawmempool ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getrawmempool " , " true " ) <nl> + HelpExampleRpc ( " getrawmempool " , " true " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> bool fVerbose = false ; <nl> if ( ! request . params [ 0 ] . isNull ( ) ) <nl> fVerbose = request . params [ 0 ] . get_bool ( ) ; <nl> <nl> return MempoolToJSON ( EnsureMemPool ( request . context ) , fVerbose ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getmempoolancestors ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getmempoolancestors ( ) <nl> { <nl> - RPCHelpMan { " getmempoolancestors " , <nl> + return RPCHelpMan { " getmempoolancestors " , <nl> " \ nIf txid is in the mempool , returns all in - mempool ancestors . \ n " , <nl> { <nl> { " txid " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " The transaction id ( must be in mempool ) " } , <nl> static UniValue getmempoolancestors ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getmempoolancestors " , " \ " mytxid \ " " ) <nl> + HelpExampleRpc ( " getmempoolancestors " , " \ " mytxid \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> bool fVerbose = false ; <nl> if ( ! request . params [ 1 ] . isNull ( ) ) <nl> fVerbose = request . params [ 1 ] . get_bool ( ) ; <nl> static UniValue getmempoolancestors ( const JSONRPCRequest & request ) <nl> } <nl> return o ; <nl> } <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getmempooldescendants ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getmempooldescendants ( ) <nl> { <nl> - RPCHelpMan { " getmempooldescendants " , <nl> + return RPCHelpMan { " getmempooldescendants " , <nl> " \ nIf txid is in the mempool , returns all in - mempool descendants . \ n " , <nl> { <nl> { " txid " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " The transaction id ( must be in mempool ) " } , <nl> static UniValue getmempooldescendants ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getmempooldescendants " , " \ " mytxid \ " " ) <nl> + HelpExampleRpc ( " getmempooldescendants " , " \ " mytxid \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> bool fVerbose = false ; <nl> if ( ! request . params [ 1 ] . isNull ( ) ) <nl> fVerbose = request . params [ 1 ] . get_bool ( ) ; <nl> static UniValue getmempooldescendants ( const JSONRPCRequest & request ) <nl> } <nl> return o ; <nl> } <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getmempoolentry ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getmempoolentry ( ) <nl> { <nl> - RPCHelpMan { " getmempoolentry " , <nl> + return RPCHelpMan { " getmempoolentry " , <nl> " \ nReturns mempool data for given transaction \ n " , <nl> { <nl> { " txid " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " The transaction id ( must be in mempool ) " } , <nl> static UniValue getmempoolentry ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getmempoolentry " , " \ " mytxid \ " " ) <nl> + HelpExampleRpc ( " getmempoolentry " , " \ " mytxid \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 hash = ParseHashV ( request . params [ 0 ] , " parameter 1 " ) ; <nl> <nl> const CTxMemPool & mempool = EnsureMemPool ( request . context ) ; <nl> static UniValue getmempoolentry ( const JSONRPCRequest & request ) <nl> UniValue info ( UniValue : : VOBJ ) ; <nl> entryToJSON ( mempool , info , e ) ; <nl> return info ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getblockhash ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getblockhash ( ) <nl> { <nl> - RPCHelpMan { " getblockhash " , <nl> + return RPCHelpMan { " getblockhash " , <nl> " \ nReturns hash of block in best - block - chain at height provided . \ n " , <nl> { <nl> { " height " , RPCArg : : Type : : NUM , RPCArg : : Optional : : NO , " The height index " } , <nl> static UniValue getblockhash ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getblockhash " , " 1000 " ) <nl> + HelpExampleRpc ( " getblockhash " , " 1000 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> <nl> int nHeight = request . params [ 0 ] . get_int ( ) ; <nl> static UniValue getblockhash ( const JSONRPCRequest & request ) <nl> <nl> CBlockIndex * pblockindex = : : ChainActive ( ) [ nHeight ] ; <nl> return pblockindex - > GetBlockHash ( ) . GetHex ( ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getblockheader ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getblockheader ( ) <nl> { <nl> - RPCHelpMan { " getblockheader " , <nl> + return RPCHelpMan { " getblockheader " , <nl> " \ nIf verbose is false , returns a string that is serialized , hex - encoded data for blockheader ' hash ' . \ n " <nl> " If verbose is true , returns an Object with information about blockheader < hash > . \ n " , <nl> { <nl> static UniValue getblockheader ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getblockheader " , " \ " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 \ " " ) <nl> + HelpExampleRpc ( " getblockheader " , " \ " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 hash ( ParseHashV ( request . params [ 0 ] , " hash " ) ) ; <nl> <nl> bool fVerbose = true ; <nl> static UniValue getblockheader ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return blockheaderToJSON ( tip , pblockindex ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> static CBlock GetBlockChecked ( const CBlockIndex * pblockindex ) <nl> static CBlockUndo GetUndoChecked ( const CBlockIndex * pblockindex ) <nl> return blockUndo ; <nl> } <nl> <nl> - static UniValue getblock ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getblock ( ) <nl> { <nl> - RPCHelpMan { " getblock " , <nl> + return RPCHelpMan { " getblock " , <nl> " \ nIf verbosity is 0 , returns a string that is serialized , hex - encoded data for block ' hash ' . \ n " <nl> " If verbosity is 1 , returns an Object with information about block < hash > . \ n " <nl> " If verbosity is 2 , returns an Object with information about block < hash > and information about each transaction . \ n " , <nl> static UniValue getblock ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getblock " , " \ " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 \ " " ) <nl> + HelpExampleRpc ( " getblock " , " \ " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 hash ( ParseHashV ( request . params [ 0 ] , " blockhash " ) ) ; <nl> <nl> int verbosity = 1 ; <nl> static UniValue getblock ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return blockToJSON ( block , tip , pblockindex , verbosity > = 2 ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue pruneblockchain ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan pruneblockchain ( ) <nl> { <nl> - RPCHelpMan { " pruneblockchain " , " " , <nl> + return RPCHelpMan { " pruneblockchain " , " " , <nl> { <nl> { " height " , RPCArg : : Type : : NUM , RPCArg : : Optional : : NO , " The block height to prune up to . May be set to a discrete height , or to a " + UNIX_EPOCH_TIME + " \ n " <nl> " to prune blocks whose block time is at least 2 hours older than the provided timestamp . " } , <nl> static UniValue pruneblockchain ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " pruneblockchain " , " 1000 " ) <nl> + HelpExampleRpc ( " pruneblockchain " , " 1000 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> if ( ! fPruneMode ) <nl> throw JSONRPCError ( RPC_MISC_ERROR , " Cannot prune blocks because node is not in prune mode . " ) ; <nl> <nl> static UniValue pruneblockchain ( const JSONRPCRequest & request ) <nl> block = block - > pprev ; <nl> } <nl> return uint64_t ( block - > nHeight ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue gettxoutsetinfo ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan gettxoutsetinfo ( ) <nl> { <nl> - RPCHelpMan { " gettxoutsetinfo " , <nl> + return RPCHelpMan { " gettxoutsetinfo " , <nl> " \ nReturns statistics about the unspent transaction output set . \ n " <nl> " Note this call may take some time . \ n " , <nl> { <nl> static UniValue gettxoutsetinfo ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " gettxoutsetinfo " , " " ) <nl> + HelpExampleRpc ( " gettxoutsetinfo " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> UniValue ret ( UniValue : : VOBJ ) ; <nl> <nl> CCoinsStats stats ; <nl> static UniValue gettxoutsetinfo ( const JSONRPCRequest & request ) <nl> throw JSONRPCError ( RPC_INTERNAL_ERROR , " Unable to read UTXO set " ) ; <nl> } <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue gettxout ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan gettxout ( ) <nl> { <nl> - RPCHelpMan { " gettxout " , <nl> + return RPCHelpMan { " gettxout " , <nl> " \ nReturns details about an unspent transaction output . \ n " , <nl> { <nl> { " txid " , RPCArg : : Type : : STR , RPCArg : : Optional : : NO , " The transaction id " } , <nl> UniValue gettxout ( const JSONRPCRequest & request ) <nl> " \ nAs a JSON - RPC call \ n " <nl> + HelpExampleRpc ( " gettxout " , " \ " txid \ " , 1 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> <nl> UniValue ret ( UniValue : : VOBJ ) ; <nl> UniValue gettxout ( const JSONRPCRequest & request ) <nl> ret . pushKV ( " coinbase " , ( bool ) coin . fCoinBase ) ; <nl> <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue verifychain ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan verifychain ( ) <nl> { <nl> - RPCHelpMan { " verifychain " , <nl> + return RPCHelpMan { " verifychain " , <nl> " \ nVerifies blockchain database . \ n " , <nl> { <nl> { " checklevel " , RPCArg : : Type : : NUM , / * default * / strprintf ( " % d , range = 0 - 4 " , DEFAULT_CHECKLEVEL ) , <nl> static UniValue verifychain ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " verifychain " , " " ) <nl> + HelpExampleRpc ( " verifychain " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> const int check_level ( request . params [ 0 ] . isNull ( ) ? DEFAULT_CHECKLEVEL : request . params [ 0 ] . get_int ( ) ) ; <nl> const int check_depth { request . params [ 1 ] . isNull ( ) ? DEFAULT_CHECKBLOCKS : request . params [ 1 ] . get_int ( ) } ; <nl> <nl> LOCK ( cs_main ) ; <nl> <nl> return CVerifyDB ( ) . VerifyDB ( Params ( ) , & : : ChainstateActive ( ) . CoinsTip ( ) , check_level , check_depth ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> static void BuriedForkDescPushBack ( UniValue & softforks , const std : : string & name , int height ) EXCLUSIVE_LOCKS_REQUIRED ( cs_main ) <nl> static void BIP9SoftForkDescPushBack ( UniValue & softforks , const std : : string & nam <nl> softforks . pushKV ( name , rv ) ; <nl> } <nl> <nl> - UniValue getblockchaininfo ( const JSONRPCRequest & request ) <nl> + RPCHelpMan getblockchaininfo ( ) <nl> { <nl> - RPCHelpMan { " getblockchaininfo " , <nl> + return RPCHelpMan { " getblockchaininfo " , <nl> " Returns an object containing various state info regarding blockchain processing . \ n " , <nl> { } , <nl> RPCResult { <nl> UniValue getblockchaininfo ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getblockchaininfo " , " " ) <nl> + HelpExampleRpc ( " getblockchaininfo " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> <nl> const CBlockIndex * tip = : : ChainActive ( ) . Tip ( ) ; <nl> UniValue getblockchaininfo ( const JSONRPCRequest & request ) <nl> <nl> obj . pushKV ( " warnings " , GetWarnings ( false ) . original ) ; <nl> return obj ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> / * * Comparison function for sorting the getchaintips heads . * / <nl> struct CompareBlocksByHeight <nl> } <nl> } ; <nl> <nl> - static UniValue getchaintips ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getchaintips ( ) <nl> { <nl> - RPCHelpMan { " getchaintips " , <nl> + return RPCHelpMan { " getchaintips " , <nl> " Return information about all known tips in the block tree , " <nl> " including the main chain as well as orphaned branches . \ n " , <nl> { } , <nl> static UniValue getchaintips ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getchaintips " , " " ) <nl> + HelpExampleRpc ( " getchaintips " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> ChainstateManager & chainman = EnsureChainman ( request . context ) ; <nl> LOCK ( cs_main ) ; <nl> <nl> static UniValue getchaintips ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return res ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> UniValue MempoolInfoToJSON ( const CTxMemPool & pool ) <nl> UniValue MempoolInfoToJSON ( const CTxMemPool & pool ) <nl> return ret ; <nl> } <nl> <nl> - static UniValue getmempoolinfo ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getmempoolinfo ( ) <nl> { <nl> - RPCHelpMan { " getmempoolinfo " , <nl> + return RPCHelpMan { " getmempoolinfo " , <nl> " \ nReturns details on the active state of the TX memory pool . \ n " , <nl> { } , <nl> RPCResult { <nl> static UniValue getmempoolinfo ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getmempoolinfo " , " " ) <nl> + HelpExampleRpc ( " getmempoolinfo " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> return MempoolInfoToJSON ( EnsureMemPool ( request . context ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue preciousblock ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan preciousblock ( ) <nl> { <nl> - RPCHelpMan { " preciousblock " , <nl> + return RPCHelpMan { " preciousblock " , <nl> " \ nTreats a block as if it were received before others with the same work . \ n " <nl> " \ nA later preciousblock call can override the effect of an earlier one . \ n " <nl> " \ nThe effects of preciousblock are not retained across restarts . \ n " , <nl> static UniValue preciousblock ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " preciousblock " , " \ " blockhash \ " " ) <nl> + HelpExampleRpc ( " preciousblock " , " \ " blockhash \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 hash ( ParseHashV ( request . params [ 0 ] , " blockhash " ) ) ; <nl> CBlockIndex * pblockindex ; <nl> <nl> static UniValue preciousblock ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return NullUniValue ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue invalidateblock ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan invalidateblock ( ) <nl> { <nl> - RPCHelpMan { " invalidateblock " , <nl> + return RPCHelpMan { " invalidateblock " , <nl> " \ nPermanently marks a block as invalid , as if it violated a consensus rule . \ n " , <nl> { <nl> { " blockhash " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " the hash of the block to mark as invalid " } , <nl> static UniValue invalidateblock ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " invalidateblock " , " \ " blockhash \ " " ) <nl> + HelpExampleRpc ( " invalidateblock " , " \ " blockhash \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 hash ( ParseHashV ( request . params [ 0 ] , " blockhash " ) ) ; <nl> BlockValidationState state ; <nl> <nl> static UniValue invalidateblock ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return NullUniValue ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue reconsiderblock ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan reconsiderblock ( ) <nl> { <nl> - RPCHelpMan { " reconsiderblock " , <nl> + return RPCHelpMan { " reconsiderblock " , <nl> " \ nRemoves invalidity status of a block , its ancestors and its descendants , reconsider them for activation . \ n " <nl> " This can be used to undo the effects of invalidateblock . \ n " , <nl> { <nl> static UniValue reconsiderblock ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " reconsiderblock " , " \ " blockhash \ " " ) <nl> + HelpExampleRpc ( " reconsiderblock " , " \ " blockhash \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 hash ( ParseHashV ( request . params [ 0 ] , " blockhash " ) ) ; <nl> <nl> { <nl> static UniValue reconsiderblock ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return NullUniValue ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getchaintxstats ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getchaintxstats ( ) <nl> { <nl> - RPCHelpMan { " getchaintxstats " , <nl> + return RPCHelpMan { " getchaintxstats " , <nl> " \ nCompute statistics about the total number and rate of transactions in the chain . \ n " , <nl> { <nl> { " nblocks " , RPCArg : : Type : : NUM , / * default * / " one month " , " Size of the window in number of blocks " } , <nl> static UniValue getchaintxstats ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " getchaintxstats " , " " ) <nl> + HelpExampleRpc ( " getchaintxstats " , " 2016 " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> const CBlockIndex * pindex ; <nl> int blockcount = 30 * 24 * 60 * 60 / Params ( ) . GetConsensus ( ) . nPowTargetSpacing ; / / By default : 1 month <nl> <nl> static UniValue getchaintxstats ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> template < typename T > <nl> static inline bool SetHasKeys ( const std : : set < T > & set , const Tk & key , const Args & <nl> / / outpoint ( needed for the utxo index ) + nHeight + fCoinBase <nl> static constexpr size_t PER_UTXO_OVERHEAD = sizeof ( COutPoint ) + sizeof ( uint32_t ) + sizeof ( bool ) ; <nl> <nl> - static UniValue getblockstats ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getblockstats ( ) <nl> { <nl> - RPCHelpMan { " getblockstats " , <nl> + return RPCHelpMan { " getblockstats " , <nl> " \ nCompute per block statistics for a given window . All amounts are in satoshis . \ n " <nl> " It won ' t work for some heights with pruning . \ n " , <nl> { <nl> static UniValue getblockstats ( const JSONRPCRequest & request ) <nl> HelpExampleRpc ( " getblockstats " , R " ( " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 " , [ " minfeerate " , " avgfeerate " ] ) " ) + <nl> HelpExampleRpc ( " getblockstats " , R " ( 1000 , [ " minfeerate " , " avgfeerate " ] ) " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> LOCK ( cs_main ) ; <nl> <nl> CBlockIndex * pindex ; <nl> static UniValue getblockstats ( const JSONRPCRequest & request ) <nl> ret . pushKV ( stat , value ) ; <nl> } <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue savemempool ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan savemempool ( ) <nl> { <nl> - RPCHelpMan { " savemempool " , <nl> + return RPCHelpMan { " savemempool " , <nl> " \ nDumps the mempool to disk . It will fail until the previous dump is fully loaded . \ n " , <nl> { } , <nl> RPCResult { RPCResult : : Type : : NONE , " " , " " } , <nl> static UniValue savemempool ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " savemempool " , " " ) <nl> + HelpExampleRpc ( " savemempool " , " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> const CTxMemPool & mempool = EnsureMemPool ( request . context ) ; <nl> <nl> if ( ! mempool . IsLoaded ( ) ) { <nl> static UniValue savemempool ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return NullUniValue ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> namespace { <nl> class CoinsViewScanReserver <nl> } <nl> } ; <nl> <nl> - UniValue scantxoutset ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan scantxoutset ( ) <nl> { <nl> - RPCHelpMan { " scantxoutset " , <nl> + return RPCHelpMan { " scantxoutset " , <nl> " \ nEXPERIMENTAL warning : this call may be removed or changed in future releases . \ n " <nl> " \ nScans the unspent transaction output set for entries that match certain output descriptors . \ n " <nl> " Examples of output descriptors are : \ n " <nl> UniValue scantxoutset ( const JSONRPCRequest & request ) <nl> { RPCResult : : Type : : STR_AMOUNT , " total_amount " , " The total amount of all found unspent outputs in " + CURRENCY_UNIT } , <nl> } } , <nl> RPCExamples { " " } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR , UniValue : : VARR } ) ; <nl> <nl> UniValue result ( UniValue : : VOBJ ) ; <nl> UniValue scantxoutset ( const JSONRPCRequest & request ) <nl> throw JSONRPCError ( RPC_INVALID_PARAMETER , " Invalid command " ) ; <nl> } <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue getblockfilter ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getblockfilter ( ) <nl> { <nl> - RPCHelpMan { " getblockfilter " , <nl> + return RPCHelpMan { " getblockfilter " , <nl> " \ nRetrieve a BIP 157 content filter for a particular block . \ n " , <nl> { <nl> { " blockhash " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " The hash of the block " } , <nl> static UniValue getblockfilter ( const JSONRPCRequest & request ) <nl> RPCExamples { <nl> HelpExampleCli ( " getblockfilter " , " \ " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 \ " \ " basic \ " " ) + <nl> HelpExampleRpc ( " getblockfilter " , " \ " 00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09 \ " , \ " basic \ " " ) <nl> - } <nl> - } . Check ( request ) ; <nl> - <nl> + } , <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> uint256 block_hash = ParseHashV ( request . params [ 0 ] , " blockhash " ) ; <nl> std : : string filtertype_name = " basic " ; <nl> if ( ! request . params [ 1 ] . isNull ( ) ) { <nl> static UniValue getblockfilter ( const JSONRPCRequest & request ) <nl> ret . pushKV ( " filter " , HexStr ( filter . GetEncodedFilter ( ) ) ) ; <nl> ret . pushKV ( " header " , filter_header . GetHex ( ) ) ; <nl> return ret ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> / * * <nl> static UniValue getblockfilter ( const JSONRPCRequest & request ) <nl> * <nl> * @ see SnapshotMetadata <nl> * / <nl> - UniValue dumptxoutset ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan dumptxoutset ( ) <nl> { <nl> - RPCHelpMan { <nl> + return RPCHelpMan { <nl> " dumptxoutset " , <nl> " \ nWrite the serialized UTXO set to disk . \ n " , <nl> { <nl> UniValue dumptxoutset ( const JSONRPCRequest & request ) <nl> } , <nl> RPCExamples { <nl> HelpExampleCli ( " dumptxoutset " , " utxo . dat " ) <nl> - } <nl> - } . Check ( request ) ; <nl> - <nl> + } , <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> fs : : path path = fs : : absolute ( request . params [ 0 ] . get_str ( ) , GetDataDir ( ) ) ; <nl> / / Write to a temporary path and then move into ` path ` on completion <nl> / / to avoid confusion due to an interruption . <nl> UniValue dumptxoutset ( const JSONRPCRequest & request ) <nl> result . pushKV ( " base_height " , tip - > nHeight ) ; <nl> result . pushKV ( " path " , path . string ( ) ) ; <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> void RegisterBlockchainRPCCommands ( CRPCTable & t ) <nl> mmm a / src / rpc / rawtransaction . cpp <nl> ppp b / src / rpc / rawtransaction . cpp <nl> static void TxToJSON ( const CTransaction & tx , const uint256 hashBlock , UniValue & <nl> } <nl> } <nl> <nl> - static UniValue getrawtransaction ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan getrawtransaction ( ) <nl> { <nl> - RPCHelpMan { <nl> + return RPCHelpMan { <nl> " getrawtransaction " , <nl> " \ nReturn the raw transaction data . \ n " <nl> <nl> static UniValue getrawtransaction ( const JSONRPCRequest & request ) <nl> + HelpExampleCli ( " getrawtransaction " , " \ " mytxid \ " false \ " myblockhash \ " " ) <nl> + HelpExampleCli ( " getrawtransaction " , " \ " mytxid \ " true \ " myblockhash \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> const NodeContext & node = EnsureNodeContext ( request . context ) ; <nl> <nl> bool in_active_chain = true ; <nl> static UniValue getrawtransaction ( const JSONRPCRequest & request ) <nl> if ( blockindex ) result . pushKV ( " in_active_chain " , in_active_chain ) ; <nl> TxToJSON ( * tx , hash_block , result ) ; <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue gettxoutproof ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan gettxoutproof ( ) <nl> { <nl> - RPCHelpMan { " gettxoutproof " , <nl> + return RPCHelpMan { " gettxoutproof " , <nl> " \ nReturns a hex - encoded proof that \ " txid \ " was included in a block . \ n " <nl> " \ nNOTE : By default this function only works sometimes . This is when there is an \ n " <nl> " unspent output in the utxo for this transaction . To make it always work , \ n " <nl> static UniValue gettxoutproof ( const JSONRPCRequest & request ) <nl> RPCResult : : Type : : STR , " data " , " A string that is a serialized , hex - encoded data for the proof . " <nl> } , <nl> RPCExamples { " " } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> std : : set < uint256 > setTxids ; <nl> uint256 oneTxid ; <nl> UniValue txids = request . params [ 0 ] . get_array ( ) ; <nl> static UniValue gettxoutproof ( const JSONRPCRequest & request ) <nl> ssMB < < mb ; <nl> std : : string strHex = HexStr ( ssMB ) ; <nl> return strHex ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue verifytxoutproof ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan verifytxoutproof ( ) <nl> { <nl> - RPCHelpMan { " verifytxoutproof " , <nl> + return RPCHelpMan { " verifytxoutproof " , <nl> " \ nVerifies that a proof points to a transaction in a block , returning the transaction it commits to \ n " <nl> " and throwing an RPC error if the block is not in our best chain \ n " , <nl> { <nl> static UniValue verifytxoutproof ( const JSONRPCRequest & request ) <nl> } <nl> } , <nl> RPCExamples { " " } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> CDataStream ssMB ( ParseHexV ( request . params [ 0 ] , " proof " ) , SER_NETWORK , PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS ) ; <nl> CMerkleBlock merkleBlock ; <nl> ssMB > > merkleBlock ; <nl> static UniValue verifytxoutproof ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return res ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue createrawtransaction ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan createrawtransaction ( ) <nl> { <nl> - RPCHelpMan { " createrawtransaction " , <nl> + return RPCHelpMan { " createrawtransaction " , <nl> " \ nCreate a transaction spending the given inputs and creating new outputs . \ n " <nl> " Outputs can be addresses or data . \ n " <nl> " Returns hex - encoded raw transaction . \ n " <nl> static UniValue createrawtransaction ( const JSONRPCRequest & request ) <nl> + HelpExampleRpc ( " createrawtransaction " , " \ " [ { \ \ \ " txid \ \ \ " : \ \ \ " myid \ \ \ " , \ \ \ " vout \ \ \ " : 0 } ] \ " , \ " [ { \ \ \ " address \ \ \ " : 0 . 01 } ] \ " " ) <nl> + HelpExampleRpc ( " createrawtransaction " , " \ " [ { \ \ \ " txid \ \ \ " : \ \ \ " myid \ \ \ " , \ \ \ " vout \ \ \ " : 0 } ] \ " , \ " [ { \ \ \ " data \ \ \ " : \ \ \ " 00010203 \ \ \ " } ] \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { <nl> UniValue : : VARR , <nl> UniValueType ( ) , / / ARR or OBJ , checked later <nl> static UniValue createrawtransaction ( const JSONRPCRequest & request ) <nl> CMutableTransaction rawTx = ConstructTransaction ( request . params [ 0 ] , request . params [ 1 ] , request . params [ 2 ] , rbf ) ; <nl> <nl> return EncodeHexTx ( CTransaction ( rawTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue decoderawtransaction ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan decoderawtransaction ( ) <nl> { <nl> - RPCHelpMan { " decoderawtransaction " , <nl> + return RPCHelpMan { " decoderawtransaction " , <nl> " \ nReturn a JSON object representing the serialized , hex - encoded transaction . \ n " , <nl> { <nl> { " hexstring " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " The transaction hex string " } , <nl> static UniValue decoderawtransaction ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " decoderawtransaction " , " \ " hexstring \ " " ) <nl> + HelpExampleRpc ( " decoderawtransaction " , " \ " hexstring \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR , UniValue : : VBOOL } ) ; <nl> <nl> CMutableTransaction mtx ; <nl> static UniValue decoderawtransaction ( const JSONRPCRequest & request ) <nl> TxToUniv ( CTransaction ( std : : move ( mtx ) ) , uint256 ( ) , result , false ) ; <nl> <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> static std : : string GetAllOutputTypes ( ) <nl> static std : : string GetAllOutputTypes ( ) <nl> return Join ( ret , " , " ) ; <nl> } <nl> <nl> - static UniValue decodescript ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan decodescript ( ) <nl> { <nl> - RPCHelpMan { " decodescript " , <nl> + return RPCHelpMan { " decodescript " , <nl> " \ nDecode a hex - encoded script . \ n " , <nl> { <nl> { " hexstring " , RPCArg : : Type : : STR_HEX , RPCArg : : Optional : : NO , " the hex - encoded script " } , <nl> static UniValue decodescript ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " decodescript " , " \ " hexstring \ " " ) <nl> + HelpExampleRpc ( " decodescript " , " \ " hexstring \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR } ) ; <nl> <nl> UniValue r ( UniValue : : VOBJ ) ; <nl> static UniValue decodescript ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return r ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue combinerawtransaction ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan combinerawtransaction ( ) <nl> { <nl> - RPCHelpMan { " combinerawtransaction " , <nl> + return RPCHelpMan { " combinerawtransaction " , <nl> " \ nCombine multiple partially signed transactions into one transaction . \ n " <nl> " The combined transaction may be another partially signed transaction or a \ n " <nl> " fully signed transaction . " , <nl> static UniValue combinerawtransaction ( const JSONRPCRequest & request ) <nl> RPCExamples { <nl> HelpExampleCli ( " combinerawtransaction " , R " ( ' [ " myhex1 " , " myhex2 " , " myhex3 " ] ' ) " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> <nl> UniValue txs = request . params [ 0 ] . get_array ( ) ; <nl> std : : vector < CMutableTransaction > txVariants ( txs . size ( ) ) ; <nl> static UniValue combinerawtransaction ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return EncodeHexTx ( CTransaction ( mergedTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue signrawtransactionwithkey ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan signrawtransactionwithkey ( ) <nl> { <nl> - RPCHelpMan { " signrawtransactionwithkey " , <nl> + return RPCHelpMan { " signrawtransactionwithkey " , <nl> " \ nSign inputs for raw transaction ( serialized , hex - encoded ) . \ n " <nl> " The second argument is an array of base58 - encoded private \ n " <nl> " keys that will be the only keys used to sign the transaction . \ n " <nl> static UniValue signrawtransactionwithkey ( const JSONRPCRequest & request ) <nl> HelpExampleCli ( " signrawtransactionwithkey " , " \ " myhex \ " \ " [ \ \ \ " key1 \ \ \ " , \ \ \ " key2 \ \ \ " ] \ " " ) <nl> + HelpExampleRpc ( " signrawtransactionwithkey " , " \ " myhex \ " , \ " [ \ \ \ " key1 \ \ \ " , \ \ \ " key2 \ \ \ " ] \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR , UniValue : : VARR , UniValue : : VARR , UniValue : : VSTR } , true ) ; <nl> <nl> CMutableTransaction mtx ; <nl> static UniValue signrawtransactionwithkey ( const JSONRPCRequest & request ) <nl> UniValue result ( UniValue : : VOBJ ) ; <nl> SignTransaction ( mtx , & keystore , coins , request . params [ 3 ] , result ) ; <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue sendrawtransaction ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan sendrawtransaction ( ) <nl> { <nl> - RPCHelpMan { " sendrawtransaction " , <nl> + return RPCHelpMan { " sendrawtransaction " , <nl> " \ nSubmit a raw transaction ( serialized , hex - encoded ) to local node and network . \ n " <nl> " \ nNote that the transaction will be sent unconditionally to all peers , so using this \ n " <nl> " for manual rebroadcast may degrade privacy by leaking the transaction ' s origin , as \ n " <nl> static UniValue sendrawtransaction ( const JSONRPCRequest & request ) <nl> " \ nAs a JSON - RPC call \ n " <nl> + HelpExampleRpc ( " sendrawtransaction " , " \ " signedhex \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { <nl> UniValue : : VSTR , <nl> UniValueType ( ) , / / VNUM or VSTR , checked inside AmountFromValue ( ) <nl> static UniValue sendrawtransaction ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return tx - > GetHash ( ) . GetHex ( ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - static UniValue testmempoolaccept ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan testmempoolaccept ( ) <nl> { <nl> - RPCHelpMan { " testmempoolaccept " , <nl> + return RPCHelpMan { " testmempoolaccept " , <nl> " \ nReturns result of mempool acceptance tests indicating if raw transaction ( serialized , hex - encoded ) would be accepted by mempool . \ n " <nl> " \ nThis checks if the transaction violates the consensus or policy rules . \ n " <nl> " \ nSee sendrawtransaction call . \ n " , <nl> static UniValue testmempoolaccept ( const JSONRPCRequest & request ) <nl> " \ nAs a JSON - RPC call \ n " <nl> + HelpExampleRpc ( " testmempoolaccept " , " [ \ " signedhex \ " ] " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { <nl> UniValue : : VARR , <nl> UniValueType ( ) , / / VNUM or VSTR , checked inside AmountFromValue ( ) <nl> static UniValue testmempoolaccept ( const JSONRPCRequest & request ) <nl> <nl> result . push_back ( std : : move ( result_0 ) ) ; <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue decodepsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan decodepsbt ( ) <nl> { <nl> - RPCHelpMan { " decodepsbt " , <nl> + return RPCHelpMan { " decodepsbt " , <nl> " \ nReturn a JSON object representing the serialized , base64 - encoded partially signed Bitcoin transaction . \ n " , <nl> { <nl> { " psbt " , RPCArg : : Type : : STR , RPCArg : : Optional : : NO , " The PSBT base64 string " } , <nl> UniValue decodepsbt ( const JSONRPCRequest & request ) <nl> RPCExamples { <nl> HelpExampleCli ( " decodepsbt " , " \ " psbt \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR } ) ; <nl> <nl> / / Unserialize the transactions <nl> UniValue decodepsbt ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue combinepsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan combinepsbt ( ) <nl> { <nl> - RPCHelpMan { " combinepsbt " , <nl> + return RPCHelpMan { " combinepsbt " , <nl> " \ nCombine multiple partially signed Bitcoin transactions into one transaction . \ n " <nl> " Implements the Combiner role . \ n " , <nl> { <nl> UniValue combinepsbt ( const JSONRPCRequest & request ) <nl> RPCExamples { <nl> HelpExampleCli ( " combinepsbt " , R " ( ' [ " mybase64_1 " , " mybase64_2 " , " mybase64_3 " ] ' ) " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VARR } , true ) ; <nl> <nl> / / Unserialize the transactions <nl> UniValue combinepsbt ( const JSONRPCRequest & request ) <nl> CDataStream ssTx ( SER_NETWORK , PROTOCOL_VERSION ) ; <nl> ssTx < < merged_psbt ; <nl> return EncodeBase64 ( MakeUCharSpan ( ssTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue finalizepsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan finalizepsbt ( ) <nl> { <nl> - RPCHelpMan { " finalizepsbt " , <nl> + return RPCHelpMan { " finalizepsbt " , <nl> " Finalize the inputs of a PSBT . If the transaction is fully signed , it will produce a \ n " <nl> " network serialized transaction which can be broadcast with sendrawtransaction . Otherwise a PSBT will be \ n " <nl> " created which has the final_scriptSig and final_scriptWitness fields filled for inputs that are complete . \ n " <nl> UniValue finalizepsbt ( const JSONRPCRequest & request ) <nl> RPCExamples { <nl> HelpExampleCli ( " finalizepsbt " , " \ " psbt \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR , UniValue : : VBOOL } , true ) ; <nl> <nl> / / Unserialize the transactions <nl> UniValue finalizepsbt ( const JSONRPCRequest & request ) <nl> result . pushKV ( " complete " , complete ) ; <nl> <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue createpsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan createpsbt ( ) <nl> { <nl> - RPCHelpMan { " createpsbt " , <nl> + return RPCHelpMan { " createpsbt " , <nl> " \ nCreates a transaction in the Partially Signed Transaction format . \ n " <nl> " Implements the Creator role . \ n " , <nl> { <nl> UniValue createpsbt ( const JSONRPCRequest & request ) <nl> RPCExamples { <nl> HelpExampleCli ( " createpsbt " , " \ " [ { \ \ \ " txid \ \ \ " : \ \ \ " myid \ \ \ " , \ \ \ " vout \ \ \ " : 0 } ] \ " \ " [ { \ \ \ " data \ \ \ " : \ \ \ " 00010203 \ \ \ " } ] \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> <nl> RPCTypeCheck ( request . params , { <nl> UniValue : : VARR , <nl> UniValue createpsbt ( const JSONRPCRequest & request ) <nl> ssTx < < psbtx ; <nl> <nl> return EncodeBase64 ( MakeUCharSpan ( ssTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue converttopsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan converttopsbt ( ) <nl> { <nl> - RPCHelpMan { " converttopsbt " , <nl> + return RPCHelpMan { " converttopsbt " , <nl> " \ nConverts a network serialized transaction to a PSBT . This should be used only with createrawtransaction and fundrawtransaction \ n " <nl> " createpsbt and walletcreatefundedpsbt should be used for new applications . \ n " , <nl> { <nl> UniValue converttopsbt ( const JSONRPCRequest & request ) <nl> " \ nConvert the transaction to a PSBT \ n " <nl> + HelpExampleCli ( " converttopsbt " , " \ " rawtransaction \ " " ) <nl> } , <nl> - } . Check ( request ) ; <nl> - <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR , UniValue : : VBOOL , UniValue : : VBOOL } , true ) ; <nl> <nl> / / parse hex string from parameter <nl> UniValue converttopsbt ( const JSONRPCRequest & request ) <nl> ssTx < < psbtx ; <nl> <nl> return EncodeBase64 ( MakeUCharSpan ( ssTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue utxoupdatepsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan utxoupdatepsbt ( ) <nl> { <nl> - RPCHelpMan { " utxoupdatepsbt " , <nl> + return RPCHelpMan { " utxoupdatepsbt " , <nl> " \ nUpdates all segwit inputs and outputs in a PSBT with data from output descriptors , the UTXO set or the mempool . \ n " , <nl> { <nl> { " psbt " , RPCArg : : Type : : STR , RPCArg : : Optional : : NO , " A base64 string of a PSBT " } , <nl> UniValue utxoupdatepsbt ( const JSONRPCRequest & request ) <nl> } , <nl> RPCExamples { <nl> HelpExampleCli ( " utxoupdatepsbt " , " \ " psbt \ " " ) <nl> - } } . Check ( request ) ; <nl> - <nl> + } , <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR , UniValue : : VARR } , true ) ; <nl> <nl> / / Unserialize the transactions <nl> UniValue utxoupdatepsbt ( const JSONRPCRequest & request ) <nl> CDataStream ssTx ( SER_NETWORK , PROTOCOL_VERSION ) ; <nl> ssTx < < psbtx ; <nl> return EncodeBase64 ( MakeUCharSpan ( ssTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue joinpsbts ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan joinpsbts ( ) <nl> { <nl> - RPCHelpMan { " joinpsbts " , <nl> + return RPCHelpMan { " joinpsbts " , <nl> " \ nJoins multiple distinct PSBTs with different inputs and outputs into one PSBT with inputs and outputs from all of the PSBTs \ n " <nl> " No input in any of the PSBTs can be in more than one of the PSBTs . \ n " , <nl> { <nl> UniValue joinpsbts ( const JSONRPCRequest & request ) <nl> } , <nl> RPCExamples { <nl> HelpExampleCli ( " joinpsbts " , " \ " psbt \ " " ) <nl> - } } . Check ( request ) ; <nl> - <nl> + } , <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VARR } , true ) ; <nl> <nl> / / Unserialize the transactions <nl> UniValue joinpsbts ( const JSONRPCRequest & request ) <nl> CDataStream ssTx ( SER_NETWORK , PROTOCOL_VERSION ) ; <nl> ssTx < < shuffled_psbt ; <nl> return EncodeBase64 ( MakeUCharSpan ( ssTx ) ) ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> - UniValue analyzepsbt ( const JSONRPCRequest & request ) <nl> + static RPCHelpMan analyzepsbt ( ) <nl> { <nl> - RPCHelpMan { " analyzepsbt " , <nl> + return RPCHelpMan { " analyzepsbt " , <nl> " \ nAnalyzes and provides information about the current status of a PSBT and its inputs \ n " , <nl> { <nl> { " psbt " , RPCArg : : Type : : STR , RPCArg : : Optional : : NO , " A base64 string of a PSBT " } <nl> UniValue analyzepsbt ( const JSONRPCRequest & request ) <nl> } , <nl> RPCExamples { <nl> HelpExampleCli ( " analyzepsbt " , " \ " psbt \ " " ) <nl> - } } . Check ( request ) ; <nl> - <nl> + } , <nl> + [ & ] ( const RPCHelpMan & self , const JSONRPCRequest & request ) - > UniValue <nl> + { <nl> RPCTypeCheck ( request . params , { UniValue : : VSTR } ) ; <nl> <nl> / / Unserialize the transaction <nl> UniValue analyzepsbt ( const JSONRPCRequest & request ) <nl> } <nl> <nl> return result ; <nl> + } , <nl> + } ; <nl> } <nl> <nl> void RegisterRawTransactionRPCCommands ( CRPCTable & t ) <nl>
Merge : Assert that RPCArg names are equal to CRPCCommand ones ( blockchain , rawtransaction )
bitcoin/bitcoin
d692d192cda37fda6359ad0736b85de20383db73
2020-09-22T15:08:08Z
mmm a / docs / en / sql - reference / aggregate - functions / combinators . md <nl> ppp b / docs / en / sql - reference / aggregate - functions / combinators . md <nl> Example 2 : ` uniqArray ( arr ) ` – Counts the number of unique elements in all ‘ a <nl> <nl> - If and - Array can be combined . However , ‘ Array ’ must come first , then ‘ If ’ . Examples : ` uniqArrayIf ( arr , cond ) ` , ` quantilesTimingArrayIf ( level1 , level2 ) ( arr , cond ) ` . Due to this order , the ‘ cond ’ argument won ’ t be an array . <nl> <nl> + # # - SimpleState { # agg - functions - combinator - simplestate } <nl> + <nl> + If you apply this combinator , the aggregate function returns the same value but with a different type . This is an ` SimpleAggregateFunction ( . . . ) ` that can be stored in a table to work with [ AggregatingMergeTree ] ( . . / . . / engines / table - engines / mergetree - family / aggregatingmergetree . md ) table engines . <nl> + <nl> # # - State { # agg - functions - combinator - state } <nl> <nl> If you apply this combinator , the aggregate function doesn ’ t return the resulting value ( such as the number of unique values for the [ uniq ] ( . . / . . / sql - reference / aggregate - functions / reference / uniq . md # agg_function - uniq ) function ) , but an intermediate state of the aggregation ( for ` uniq ` , this is the hash table for calculating the number of unique values ) . This is an ` AggregateFunction ( . . . ) ` that can be used for further processing or stored in a table to finish aggregating later . <nl> new file mode 100644 <nl> index 00000000000 . . 8a5a71d6806 <nl> mmm / dev / null <nl> ppp b / src / AggregateFunctions / AggregateFunctionSimpleState . cpp <nl> <nl> + # include < AggregateFunctions / AggregateFunctionCombinatorFactory . h > <nl> + # include < AggregateFunctions / AggregateFunctionSimpleState . h > <nl> + <nl> + namespace DB <nl> + { <nl> + namespace <nl> + { <nl> + class AggregateFunctionCombinatorSimpleState final : public IAggregateFunctionCombinator <nl> + { <nl> + public : <nl> + String getName ( ) const override { return " SimpleState " ; } <nl> + <nl> + DataTypes transformArguments ( const DataTypes & arguments ) const override { return arguments ; } <nl> + <nl> + AggregateFunctionPtr transformAggregateFunction ( <nl> + const AggregateFunctionPtr & nested_function , <nl> + const AggregateFunctionProperties & , <nl> + const DataTypes & arguments , <nl> + const Array & params ) const override <nl> + { <nl> + return std : : make_shared < AggregateFunctionSimpleState > ( nested_function , arguments , params ) ; <nl> + } <nl> + } ; <nl> + <nl> + } <nl> + <nl> + void registerAggregateFunctionCombinatorSimpleState ( AggregateFunctionCombinatorFactory & factory ) <nl> + { <nl> + factory . registerCombinator ( std : : make_shared < AggregateFunctionCombinatorSimpleState > ( ) ) ; <nl> + } <nl> + <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . 34509c9756e <nl> mmm / dev / null <nl> ppp b / src / AggregateFunctions / AggregateFunctionSimpleState . h <nl> <nl> + # pragma once <nl> + <nl> + # include < AggregateFunctions / IAggregateFunction . h > <nl> + # include < DataTypes / DataTypeCustomSimpleAggregateFunction . h > <nl> + # include < DataTypes / DataTypeFactory . h > <nl> + <nl> + namespace DB <nl> + { <nl> + <nl> + / * * Not an aggregate function , but an adapter of aggregate functions . <nl> + * Aggregate functions with the ` SimpleState ` suffix is almost identical to the corresponding ones , <nl> + * except the return type becomes DataTypeCustomSimpleAggregateFunction . <nl> + * / <nl> + class AggregateFunctionSimpleState final : public IAggregateFunctionHelper < AggregateFunctionSimpleState > <nl> + { <nl> + private : <nl> + AggregateFunctionPtr nested_func ; <nl> + DataTypes arguments ; <nl> + Array params ; <nl> + <nl> + public : <nl> + AggregateFunctionSimpleState ( AggregateFunctionPtr nested_ , const DataTypes & arguments_ , const Array & params_ ) <nl> + : IAggregateFunctionHelper < AggregateFunctionSimpleState > ( arguments_ , params_ ) <nl> + , nested_func ( nested_ ) <nl> + , arguments ( arguments_ ) <nl> + , params ( params_ ) <nl> + { <nl> + } <nl> + <nl> + String getName ( ) const override { return nested_func - > getName ( ) + " SimpleState " ; } <nl> + <nl> + DataTypePtr getReturnType ( ) const override <nl> + { <nl> + DataTypeCustomSimpleAggregateFunction : : checkSupportedFunctions ( nested_func ) ; <nl> + / / Need to make a clone because it ' ll be customized . <nl> + auto storage_type = DataTypeFactory : : instance ( ) . get ( nested_func - > getReturnType ( ) - > getName ( ) ) ; <nl> + DataTypeCustomNamePtr custom_name = std : : make_unique < DataTypeCustomSimpleAggregateFunction > ( nested_func , arguments , params ) ; <nl> + storage_type - > setCustomization ( std : : make_unique < DataTypeCustomDesc > ( std : : move ( custom_name ) , nullptr ) ) ; <nl> + return storage_type ; <nl> + } <nl> + <nl> + void create ( AggregateDataPtr place ) const override { nested_func - > create ( place ) ; } <nl> + <nl> + void destroy ( AggregateDataPtr place ) const noexcept override { nested_func - > destroy ( place ) ; } <nl> + <nl> + bool hasTrivialDestructor ( ) const override { return nested_func - > hasTrivialDestructor ( ) ; } <nl> + <nl> + size_t sizeOfData ( ) const override { return nested_func - > sizeOfData ( ) ; } <nl> + <nl> + size_t alignOfData ( ) const override { return nested_func - > alignOfData ( ) ; } <nl> + <nl> + void add ( AggregateDataPtr place , const IColumn * * columns , size_t row_num , Arena * arena ) const override <nl> + { <nl> + nested_func - > add ( place , columns , row_num , arena ) ; <nl> + } <nl> + <nl> + void merge ( AggregateDataPtr place , ConstAggregateDataPtr rhs , Arena * arena ) const override { nested_func - > merge ( place , rhs , arena ) ; } <nl> + <nl> + void serialize ( ConstAggregateDataPtr place , WriteBuffer & buf ) const override { nested_func - > serialize ( place , buf ) ; } <nl> + <nl> + void deserialize ( AggregateDataPtr place , ReadBuffer & buf , Arena * arena ) const override <nl> + { <nl> + nested_func - > deserialize ( place , buf , arena ) ; <nl> + } <nl> + <nl> + void insertResultInto ( AggregateDataPtr place , IColumn & to , Arena * arena ) const override <nl> + { <nl> + nested_func - > insertResultInto ( place , to , arena ) ; <nl> + } <nl> + <nl> + bool allocatesMemoryInArena ( ) const override { return nested_func - > allocatesMemoryInArena ( ) ; } <nl> + <nl> + AggregateFunctionPtr getNestedFunction ( ) const { return nested_func ; } <nl> + } ; <nl> + <nl> + } <nl> mmm a / src / AggregateFunctions / registerAggregateFunctions . cpp <nl> ppp b / src / AggregateFunctions / registerAggregateFunctions . cpp <nl> class AggregateFunctionCombinatorFactory ; <nl> void registerAggregateFunctionCombinatorIf ( AggregateFunctionCombinatorFactory & ) ; <nl> void registerAggregateFunctionCombinatorArray ( AggregateFunctionCombinatorFactory & ) ; <nl> void registerAggregateFunctionCombinatorForEach ( AggregateFunctionCombinatorFactory & ) ; <nl> + void registerAggregateFunctionCombinatorSimpleState ( AggregateFunctionCombinatorFactory & ) ; <nl> void registerAggregateFunctionCombinatorState ( AggregateFunctionCombinatorFactory & ) ; <nl> void registerAggregateFunctionCombinatorMerge ( AggregateFunctionCombinatorFactory & ) ; <nl> void registerAggregateFunctionCombinatorNull ( AggregateFunctionCombinatorFactory & ) ; <nl> void registerAggregateFunctions ( ) <nl> registerAggregateFunctionCombinatorIf ( factory ) ; <nl> registerAggregateFunctionCombinatorArray ( factory ) ; <nl> registerAggregateFunctionCombinatorForEach ( factory ) ; <nl> + registerAggregateFunctionCombinatorSimpleState ( factory ) ; <nl> registerAggregateFunctionCombinatorState ( factory ) ; <nl> registerAggregateFunctionCombinatorMerge ( factory ) ; <nl> registerAggregateFunctionCombinatorNull ( factory ) ; <nl> mmm a / src / AggregateFunctions / ya . make <nl> ppp b / src / AggregateFunctions / ya . make <nl> SRCS ( <nl> AggregateFunctionRetention . cpp <nl> AggregateFunctionSequenceMatch . cpp <nl> AggregateFunctionSimpleLinearRegression . cpp <nl> + AggregateFunctionSimpleState . cpp <nl> AggregateFunctionState . cpp <nl> AggregateFunctionStatistics . cpp <nl> AggregateFunctionStatisticsSimple . cpp <nl> mmm a / src / DataTypes / DataTypeCustomSimpleAggregateFunction . cpp <nl> ppp b / src / DataTypes / DataTypeCustomSimpleAggregateFunction . cpp <nl> namespace ErrorCodes <nl> extern const int LOGICAL_ERROR ; <nl> } <nl> <nl> - static const std : : vector < String > supported_functions { " any " , " anyLast " , " min " , <nl> - " max " , " sum " , " sumWithOverflow " , " groupBitAnd " , " groupBitOr " , " groupBitXor " , <nl> - " sumMap " , " minMap " , " maxMap " , " groupArrayArray " , " groupUniqArrayArray " } ; <nl> + void DataTypeCustomSimpleAggregateFunction : : checkSupportedFunctions ( const AggregateFunctionPtr & function ) <nl> + { <nl> + static const std : : vector < String > supported_functions { " any " , " anyLast " , " min " , <nl> + " max " , " sum " , " sumWithOverflow " , " groupBitAnd " , " groupBitOr " , " groupBitXor " , <nl> + " sumMap " , " minMap " , " maxMap " , " groupArrayArray " , " groupUniqArrayArray " } ; <nl> <nl> + / / check function <nl> + if ( std : : find ( std : : begin ( supported_functions ) , std : : end ( supported_functions ) , function - > getName ( ) ) = = std : : end ( supported_functions ) ) <nl> + { <nl> + throw Exception ( " Unsupported aggregate function " + function - > getName ( ) + " , supported functions are " + boost : : algorithm : : join ( supported_functions , " , " ) , <nl> + ErrorCodes : : BAD_ARGUMENTS ) ; <nl> + } <nl> + } <nl> <nl> String DataTypeCustomSimpleAggregateFunction : : getName ( ) const <nl> { <nl> static std : : pair < DataTypePtr , DataTypeCustomDescPtr > create ( const ASTPtr & argum <nl> AggregateFunctionProperties properties ; <nl> function = AggregateFunctionFactory : : instance ( ) . get ( function_name , argument_types , params_row , properties ) ; <nl> <nl> - / / check function <nl> - if ( std : : find ( std : : begin ( supported_functions ) , std : : end ( supported_functions ) , function - > getName ( ) ) = = std : : end ( supported_functions ) ) <nl> - { <nl> - throw Exception ( " Unsupported aggregate function " + function - > getName ( ) + " , supported functions are " + boost : : algorithm : : join ( supported_functions , " , " ) , <nl> - ErrorCodes : : BAD_ARGUMENTS ) ; <nl> - } <nl> + DataTypeCustomSimpleAggregateFunction : : checkSupportedFunctions ( function ) ; <nl> <nl> DataTypePtr storage_type = DataTypeFactory : : instance ( ) . get ( argument_types [ 0 ] - > getName ( ) ) ; <nl> <nl> mmm a / src / DataTypes / DataTypeCustomSimpleAggregateFunction . h <nl> ppp b / src / DataTypes / DataTypeCustomSimpleAggregateFunction . h <nl> class DataTypeCustomSimpleAggregateFunction : public IDataTypeCustomName <nl> <nl> const AggregateFunctionPtr getFunction ( ) const { return function ; } <nl> String getName ( ) const override ; <nl> + static void checkSupportedFunctions ( const AggregateFunctionPtr & function ) ; <nl> } ; <nl> <nl> } <nl> mmm a / src / DataTypes / IDataType . h <nl> ppp b / src / DataTypes / IDataType . h <nl> class IDataType : private boost : : noncopyable <nl> static bool isSpecialCompressionAllowed ( const SubstreamPath & path ) ; <nl> private : <nl> friend class DataTypeFactory ; <nl> + friend class AggregateFunctionSimpleState ; <nl> / / / Customize this DataType <nl> void setCustomization ( DataTypeCustomDescPtr custom_desc_ ) const ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . 1c7908bf830 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01570_aggregator_combinator_simple_state . reference <nl> <nl> + SimpleAggregateFunction ( any , UInt64 ) 0 <nl> + SimpleAggregateFunction ( anyLast , UInt64 ) 0 <nl> + SimpleAggregateFunction ( min , UInt64 ) 0 <nl> + SimpleAggregateFunction ( max , UInt64 ) 0 <nl> + SimpleAggregateFunction ( sum , UInt64 ) 0 <nl> + SimpleAggregateFunction ( sumWithOverflow , UInt64 ) 0 <nl> + SimpleAggregateFunction ( groupBitAnd , UInt64 ) 0 <nl> + SimpleAggregateFunction ( groupBitOr , UInt64 ) 0 <nl> + SimpleAggregateFunction ( groupBitXor , UInt64 ) 0 <nl> + SimpleAggregateFunction ( sumMap , Tuple ( Array ( UInt64 ) , Array ( UInt64 ) ) ) ( [ ] , [ ] ) <nl> + SimpleAggregateFunction ( minMap , Tuple ( Array ( UInt64 ) , Array ( UInt64 ) ) ) ( [ 0 ] , [ 0 ] ) <nl> + SimpleAggregateFunction ( maxMap , Tuple ( Array ( UInt64 ) , Array ( UInt64 ) ) ) ( [ 0 ] , [ 0 ] ) <nl> + SimpleAggregateFunction ( groupArrayArray , Array ( UInt64 ) ) [ 0 ] <nl> + SimpleAggregateFunction ( groupUniqArrayArray , Array ( UInt64 ) ) [ 0 ] <nl> new file mode 100644 <nl> index 00000000000 . . 00a12a69d16 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01570_aggregator_combinator_simple_state . sql <nl> <nl> + with anySimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with anyLastSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with minSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with maxSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with sumSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with sumWithOverflowSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with groupBitAndSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with groupBitOrSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with groupBitXorSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with sumMapSimpleState ( ( [ number ] , [ number ] ) ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with minMapSimpleState ( ( [ number ] , [ number ] ) ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with maxMapSimpleState ( ( [ number ] , [ number ] ) ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with groupArrayArraySimpleState ( [ number ] ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + with groupUniqArrayArraySimpleState ( [ number ] ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; <nl> + <nl> + - - non - SimpleAggregateFunction <nl> + with countSimpleState ( number ) as c select toTypeName ( c ) , c from numbers ( 1 ) ; - - { serverError 36 } <nl>
Add - SimpleState combinator
ClickHouse/ClickHouse
5b1e5679b4a292e33ee5e60c0ba9cefa1e8388bd
2020-12-11T03:43:56Z
mmm a / hphp / runtime / ext / session / ext_session . cpp <nl> ppp b / hphp / runtime / ext / session / ext_session . cpp <nl> struct SessionRequestData final : Session { <nl> void destroy ( ) { <nl> id . reset ( ) ; <nl> session_status = Session : : None ; <nl> - ps_session_handler = nullptr ; <nl> + / / Note : we should not destroy user save handler here <nl> + / / ( if the session is restarted during request , the handler <nl> + / / should be alive ) , it ' s destroyed only in the request shutdown . <nl> } <nl> <nl> void requestShutdownImpl ( ) ; <nl> static bool ini_on_update_save_dir ( const std : : string & value ) { <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - static int php_session_destroy ( ) { <nl> - int retval = true ; <nl> + static bool php_session_destroy ( ) { <nl> + bool retval = true ; <nl> <nl> if ( s_session - > session_status ! = Session : : Active ) { <nl> raise_warning ( " Trying to destroy uninitialized session " ) ; <nl> static int php_session_destroy ( ) { <nl> raise_warning ( " Session object destruction failed " ) ; <nl> } <nl> <nl> - s_session - > requestShutdownImpl ( ) ; <nl> + if ( mod_is_open ( ) ) { <nl> + s_session - > mod - > close ( ) ; <nl> + } <nl> + <nl> s_session - > destroy ( ) ; <nl> <nl> return retval ; <nl> static bool HHVM_FUNCTION ( session_start ) { <nl> } <nl> <nl> static bool HHVM_FUNCTION ( session_destroy ) { <nl> - bool retval = true ; <nl> - <nl> - if ( s_session - > session_status ! = Session : : Active ) { <nl> - raise_warning ( " Trying to destroy uninitialized session " ) ; <nl> - return false ; <nl> - } <nl> - <nl> - if ( ! s_session - > mod - > destroy ( s_session - > id . data ( ) ) ) { <nl> - retval = false ; <nl> - raise_warning ( " Session object destruction failed " ) ; <nl> - } <nl> - <nl> - s_session - > requestShutdownImpl ( ) ; <nl> - s_session - > destroy ( ) ; <nl> - <nl> - return retval ; <nl> + return php_session_destroy ( ) ; <nl> } <nl> <nl> static void HHVM_FUNCTION ( session_unset ) { <nl> new file mode 100644 <nl> index 00000000000 . . 37f9e363d71 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / ext_session / keep_save_handler . php <nl> <nl> + < ? php <nl> + <nl> + ob_start ( ) ; <nl> + <nl> + function s_open ( $ path , $ name ) { return true ; } <nl> + function s_close ( ) { return true ; } <nl> + function s_read ( $ id ) { return ' ' ; } <nl> + function s_write ( $ id , $ data ) { <nl> + var_dump ( ' s_write ' ) ; <nl> + var_dump ( $ id ) ; <nl> + var_dump ( $ data ) ; <nl> + return true ; <nl> + } <nl> + function s_destroy ( $ id ) { return true ; } <nl> + function s_gc ( $ t ) { return true ; } <nl> + <nl> + session_set_save_handler ( <nl> + ' s_open ' , <nl> + ' s_close ' , <nl> + ' s_read ' , <nl> + ' s_write ' , <nl> + ' s_destroy ' , <nl> + ' s_gc ' <nl> + ) ; <nl> + <nl> + session_start ( ) ; <nl> + <nl> + $ _SESSION [ ' foo ' ] = 10 ; <nl> + $ _SESSION [ ' bar ' ] = 20 ; <nl> + <nl> + var_dump ( $ _SESSION ) ; <nl> + session_write_close ( ) ; <nl> + <nl> + session_start ( ) ; <nl> + session_destroy ( ) ; <nl> + <nl> + session_start ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . b8d31b6542a <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / ext_session / keep_save_handler . php . expectf <nl> <nl> + array ( 2 ) { <nl> + [ " foo " ] = > <nl> + int ( 10 ) <nl> + [ " bar " ] = > <nl> + int ( 20 ) <nl> + } <nl> + string ( 7 ) " s_write " <nl> + string ( 32 ) " % s " <nl> + string ( 18 ) " foo | i : 10 ; bar | i : 20 ; " <nl> + string ( 7 ) " s_write " <nl> + string ( 32 ) " % s " <nl> + string ( 0 ) " " <nl>
Session : Preserve user save handler on session_destroy
facebook/hhvm
adc98c20d9c051c260faa2e0eedb8649e533f874
2015-04-06T20:00:30Z
mmm a / bindings / python / cntk / ops / tests / userfunction_complex_test . py <nl> ppp b / bindings / python / cntk / ops / tests / userfunction_complex_test . py <nl> def print_training_progress ( trainer , mb , frequency ) : <nl> return mb , training_loss , eval_error <nl> <nl> <nl> - def train ( nonlinearity , num_hidden_layers , device_id , <nl> - minibatch_size = 10 , num_samples = 100000 ) : <nl> + def train ( nonlinearity , num_hidden_layers , device_id , <nl> + minibatch_size = 10 , num_samples = 1000 ) : <nl> from cntk . cntk_py import always_allow_setting_default_device <nl> always_allow_setting_default_device ( ) <nl> try_set_default_device ( cntk_device ( device_id ) ) <nl> def train ( nonlinearity , num_hidden_layers , device_id , <nl> <nl> training_progress_output_freq = 20 <nl> <nl> - # Preallocate so that we don ' t measure the memory incrase <nl> - losses = np . zeros ( num_minibatches_to_train ) <nl> - errors = np . zeros ( num_minibatches_to_train ) <nl> + losses = [ ] <nl> + errors = [ ] <nl> + <nl> + for i in range ( num_minibatches_to_train ) : <nl> + features , labels = generate_random_data_sample ( minibatch_size , <nl> + input_dim , <nl> + num_output_classes ) <nl> + <nl> + # Specify the input variables mapping in the model to actual minibatch <nl> + # data for training . <nl> + trainer . train_minibatch ( { inp : features , label : labels } , <nl> + device = cntk_device ( device_id ) ) <nl> + <nl> + batchsize , loss , error = print_training_progress ( trainer , i , <nl> + training_progress_output_freq ) <nl> + <nl> + if not ( loss = = " NA " or error = = " NA " ) : <nl> + losses . append ( loss ) <nl> + errors . append ( error ) <nl> + <nl> + return losses , errors <nl> + <nl> + <nl> + def mem_leak_check ( nonlinearity , num_hidden_layers , device_id , <nl> + minibatch_size = 1 , num_samples = 10000 ) : <nl> + from cntk . cntk_py import always_allow_setting_default_device <nl> + always_allow_setting_default_device ( ) <nl> + try_set_default_device ( cntk_device ( device_id ) ) <nl> + np . random . seed ( 0 ) <nl> + <nl> + learning_rate = 0 . 5 <nl> + lr_schedule = learning_rate_schedule ( learning_rate , UnitType . minibatch ) <nl> + <nl> + hidden_layers_dim = 50 <nl> + <nl> + inp = input ( ( input_dim ) , np . float32 ) <nl> + label = input ( ( num_output_classes ) , np . float32 ) <nl> + <nl> + z = fully_connected_classifier_net ( inp , num_output_classes , hidden_layers_dim , <nl> + num_hidden_layers , nonlinearity ) <nl> + <nl> + loss = cross_entropy_with_softmax ( z , label ) <nl> + eval_error = classification_error ( z , label ) <nl> + <nl> + learner = sgd ( z . parameters , lr_schedule ) <nl> + trainer = Trainer ( z , ( loss , eval_error ) , [ learner ] ) <nl> + <nl> + num_minibatches_to_train = int ( num_samples / minibatch_size ) <nl> <nl> mem = np . zeros ( num_minibatches_to_train ) <nl> <nl> def train ( nonlinearity , num_hidden_layers , device_id , <nl> input_dim , <nl> num_output_classes ) <nl> <nl> - # Set a maximum number of training runs , in which the memory is allowed to <nl> + # Set a maximum fraction of iterations , in which the memory is allowed to <nl> # increase . Most likely these will be the first training runs . <nl> - MEM_INCREASE_COUNT_TOLERANCE = 40 <nl> + # Long - term this test needs to be run in a separate process over a longer <nl> + # period of time . <nl> + MEM_INCREASE_FRACTION_TOLERANCE = 0 . 01 <nl> <nl> dev = cntk_device ( device_id ) <nl> i = 0 <nl> def train ( nonlinearity , num_hidden_layers , device_id , <nl> # data for training . <nl> trainer . train_minibatch ( { inp : features , label : labels } , <nl> device = dev ) <nl> - <nl> - batchsize , loss , error = print_training_progress ( trainer , i , <nl> - training_progress_output_freq ) <nl> - <nl> - if loss = = " NA " or error = = " NA " : <nl> - loss = error = np . nan <nl> - <nl> - losses [ i ] = loss <nl> - errors [ i ] = error <nl> - <nl> i + = 1 <nl> <nl> mem_deltas = np . diff ( mem ) <nl> - <nl> - if ( mem_deltas > 0 ) . sum ( ) > MEM_INCREASE_COUNT_TOLERANCE : <nl> - raise ValueError ( ' Potential Memory leak detected with % s : % s ' % <nl> - ( nonlinearity , mem_deltas [ mem_deltas ! = 0 ] ) ) <nl> - <nl> - losses = losses [ ~ np . isnan ( losses ) ] <nl> - errors = errors [ ~ np . isnan ( errors ) ] <nl> + iterations_with_mem_increase = ( mem_deltas > 0 ) . sum ( ) <nl> + mem_inc_fraction = iterations_with_mem_increase / num_minibatches_to_train <nl> + rough_mem_diff = mem [ - 1 ] - mem [ 10 ] <nl> <nl> - return losses , errors <nl> + if mem_inc_fraction > MEM_INCREASE_FRACTION_TOLERANCE and rough_mem_diff > 0 : <nl> + # For the rough leak estimation we take the memory footprint after the <nl> + # dust of the first train_minibatch runs has settled . <nl> + mem_changes = mem_deltas [ mem_deltas ! = 0 ] <nl> + raise ValueError ( ' Potential memory leak of ~ % i KB ( % i % % of MBs ' <nl> + ' increased memory usage ) detected with % s : \ n % s ' % <nl> + ( int ( rough_mem_diff / 1024 ) , int ( mem_inc_fraction * 100 ) , <nl> + nonlinearity , mem_changes ) ) <nl> <nl> <nl> class MySigmoid ( UserFunction ) : <nl> def infer_outputs ( self ) : <nl> <nl> <nl> def test_ext_user_sigmoid ( device_id ) : <nl> - np . random . seed ( 0 ) <nl> - act_losses , act_errors = train ( MySigmoid , 4 , device_id ) <nl> - np . random . seed ( 0 ) <nl> exp_losses , exp_errors = train ( sigmoid , 4 , device_id ) <nl> + act_losses , act_errors = train ( MySigmoid , 4 , device_id ) <nl> assert np . allclose ( exp_losses , act_losses ) <nl> assert np . allclose ( exp_errors , act_errors ) <nl> <nl> <nl> + def test_mem_leak ( device_id ) : <nl> + mem_leak_check ( sigmoid , 4 , device_id ) <nl> + mem_leak_check ( MySigmoid , 4 , device_id ) <nl> + <nl> + <nl> def measure_runtime ( device_id ) : <nl> import timeit <nl> np . random . seed ( 0 ) <nl>
Refactoring mem leak test
microsoft/CNTK
b184b00a7759f3c1257191afee748a670b1c069e
2017-05-10T14:29:45Z
mmm a / google / protobuf <nl> ppp b / google / protobuf <nl> @ @ - 1 + 1 @ @ <nl> - Subproject commit bd8a476510d17d3841ff2509fbd67b7f4b543c1c <nl> + Subproject commit 0906f5d18a2548024b511eadcbb4cfc0ca56cd67 <nl> mmm a / tensorflow / core / BUILD <nl> ppp b / tensorflow / core / BUILD <nl> <nl> # " : all_kernels " - The cpu - specific kernels , plus " : gpu_kernels " if <nl> # built with Cuda <nl> # " : tensorflow_opensource " - The complete open - source package , including <nl> - # " : kernels " , " : core " , and a Session implementation . <nl> + # " : all_kernels " , " : core " , and a Session implementation . <nl> # " : tensorflow " - " tensorflow_opensource " plus some Google - internal libraries . <nl> # " : testlib " - TensorFlow - specific test support , e . g . utilities for testing <nl> # kernels . <nl> tf_cuda_library ( <nl> ] , <nl> ) <nl> <nl> - # DEPRECATED : Use either " : all_kernels " and / or " : kernel_lib " instead . <nl> - # We need to get rid of this library before we can make the kernels <nl> - # directory its own package , due to name conflicts . <nl> - tf_cuda_library ( <nl> - name = " kernels " , <nl> - hdrs = glob ( [ " kernels / * * / * . h " ] ) , <nl> - deprecation = " use ' : all_kernels ' or ' : kernel_lib ' instead " , <nl> - visibility = [ " / / visibility : public " ] , <nl> - deps = [ <nl> - " : kernel_lib " , <nl> - ] , <nl> - ) <nl> - <nl> tf_cuda_library ( <nl> name = " tensorflow_opensource " , <nl> copts = tf_copts ( ) , <nl> tf_cc_tests ( <nl> # TODO ( opensource ) : fix <nl> " common_runtime / gpu / * _test . cc " , <nl> # Run by tests below <nl> - " common_runtime / gpu / gpu_region_allocator_test . cc " , <nl> + " common_runtime / gpu / gpu_allocator_retry_test . cc " , <nl> " common_runtime / gpu / gpu_bfc_allocator_test . cc " , <nl> + " common_runtime / gpu / gpu_region_allocator_test . cc " , <nl> ] , <nl> ) , <nl> deps = [ <nl> tf_cc_tests ( <nl> " user_ops / * * / * _test . cc " , <nl> " common_runtime / gpu / * _test . cc " , <nl> ] , <nl> + exclude = [ <nl> + # Run by tests below <nl> + " common_runtime / gpu / gpu_allocator_retry_test . cc " , <nl> + ] , <nl> ) , <nl> deps = [ <nl> " : all_kernels " , <nl> tf_cc_tests ( <nl> ] , <nl> ) <nl> <nl> + tf_cc_tests ( <nl> + linkstatic = tf_kernel_tests_linkstatic ( ) , <nl> + tags = tf_cuda_tests_tags ( ) + [ " nomac " ] , <nl> + tests = [ " common_runtime / gpu / gpu_allocator_retry_test . cc " ] , <nl> + deps = [ <nl> + " : all_kernels " , <nl> + " : core_cpu " , <nl> + " : core_cpu_internal " , <nl> + " : direct_session " , <nl> + " : framework " , <nl> + " : framework_internal " , <nl> + " : gpu_runtime " , <nl> + " : kernel_lib " , <nl> + " : lib " , <nl> + " : lib_internal " , <nl> + " : protos_all_cc " , <nl> + " : test " , <nl> + " : test_main " , <nl> + " : testlib " , <nl> + " / / tensorflow / cc : cc_ops " , <nl> + ] , <nl> + ) <nl> + <nl> # Test data <nl> filegroup ( <nl> name = " image_testdata " , <nl> mmm a / tensorflow / core / framework / allocator . h <nl> ppp b / tensorflow / core / framework / allocator . h <nl> class Allocator { <nl> / / TracksAlloctionSizes is overridden to return true . <nl> virtual bool TracksAllocationSizes ( ) { return false ; } <nl> <nl> + / / Returns true if this allocator requires tensors with 0 elements <nl> + / / to allocate buffers . This is false for most allocators , but may <nl> + / / be used by special - case allocators that want to track tensor <nl> + / / usage . <nl> + virtual bool ShouldAllocateEmptyTensors ( ) { return false ; } <nl> + <nl> / / Returns the user - requested size of the data allocated at <nl> / / ' ptr ' . Note that the actual buffer allocated might be larger <nl> / / than requested , but this function returns the size requested by <nl> mmm a / tensorflow / core / framework / tensor . cc <nl> ppp b / tensorflow / core / framework / tensor . cc <nl> void Tensor : : CopyFromInternal ( const Tensor & other , const TensorShape & shape ) { <nl> Tensor : : Tensor ( Allocator * a , DataType type , const TensorShape & shape ) <nl> : type_ ( type ) , shape_ ( shape ) , buf_ ( nullptr ) { <nl> CHECK_NOTNULL ( a ) ; <nl> - if ( shape_ . num_elements ( ) > 0 ) { <nl> + if ( shape_ . num_elements ( ) > 0 | | a - > ShouldAllocateEmptyTensors ( ) ) { <nl> CASES ( type , buf_ = new Buffer < T > ( a , shape . num_elements ( ) ) ) ; <nl> } <nl> } <nl> Tensor : : Tensor ( Allocator * a , DataType type , const TensorShape & shape , <nl> const AllocationAttributes & allocation_attr ) <nl> : type_ ( type ) , shape_ ( shape ) , buf_ ( nullptr ) { <nl> CHECK_NOTNULL ( a ) ; <nl> - if ( shape_ . num_elements ( ) > 0 ) { <nl> + if ( shape_ . num_elements ( ) > 0 | | a - > ShouldAllocateEmptyTensors ( ) ) { <nl> CASES ( type , buf_ = new Buffer < T > ( a , shape . num_elements ( ) , allocation_attr ) ) ; <nl> } <nl> } <nl> mmm a / tensorflow / core / kernels / reshape_op . h <nl> ppp b / tensorflow / core / kernels / reshape_op . h <nl> class ReshapeOp : public OpKernel { <nl> if ( unknown_index ! = - 1 ) { <nl> OP_REQUIRES ( <nl> context , product > 0 , <nl> - errors : : InvalidArgument ( " cannot infer the missing input size for " <nl> - " an empty tensor unless all specified " <nl> + errors : : InvalidArgument ( " Reshape cannot infer the missing input size " <nl> + " for an empty tensor unless all specified " <nl> " input sizes are non - zero " ) ) ; <nl> const int32 missing = input . NumElements ( ) / product ; <nl> - OP_REQUIRES ( context , product * missing = = input . NumElements ( ) , <nl> - errors : : InvalidArgument ( " Input has " , input . NumElements ( ) , <nl> - " values , which isn ' t divisible by " , <nl> - product ) ) ; <nl> + OP_REQUIRES ( <nl> + context , product * missing = = input . NumElements ( ) , <nl> + errors : : InvalidArgument ( <nl> + " Input to reshape is a tensor with " , input . NumElements ( ) , <nl> + " values , but the requested shape requires a multiple of " , <nl> + product ) ) ; <nl> shape . set_dim ( unknown_index , missing ) ; <nl> } <nl> OP_REQUIRES ( context , shape . num_elements ( ) = = input . NumElements ( ) , <nl> - errors : : InvalidArgument ( " Input has " , input . NumElements ( ) , <nl> - " values , which isn ' t the same as " , <nl> + errors : : InvalidArgument ( " Input to reshape is a tensor with " , <nl> + input . NumElements ( ) , <nl> + " values , but the requested shape has " , <nl> shape . num_elements ( ) ) ) ; <nl> <nl> / / Actually produce the reshaped output . <nl> mmm a / tensorflow / core / kernels / tensor_array . h <nl> ppp b / tensorflow / core / kernels / tensor_array . h <nl> class TensorArray : public ResourceBase { <nl> mutex_lock l ( mu_ ) ; <nl> values - > clear ( ) ; <nl> values - > resize ( tensors_ . size ( ) ) ; <nl> - for ( int32 i = 0 ; i < tensors_ . size ( ) ; + + i ) { <nl> + for ( std : : size_t i = 0 ; i < tensors_ . size ( ) ; + + i ) { <nl> TF_RETURN_IF_ERROR ( LockedRead ( i , & ( * values ) [ i ] ) ) ; <nl> } <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / core / kernels / topk_op . cc <nl> ppp b / tensorflow / core / kernels / topk_op . cc <nl> class TopK : public OpKernel { <nl> explicit TopK ( OpKernelConstruction * context ) : OpKernel ( context ) { <nl> OP_REQUIRES_OK ( context , context - > GetAttr ( " sorted " , & sorted_ ) ) ; <nl> if ( num_inputs ( ) < 2 ) { / / k is an attr ( TopK ) . <nl> + OP_DEPRECATED ( context , 7 , " Use TopKV2 instead " ) ; <nl> OP_REQUIRES_OK ( context , context - > GetAttr ( " k " , & k_ ) ) ; <nl> } else { / / k is an input ( TopKV2 ) , so we won ' t know it until Compute . <nl> k_ = - 1 ; <nl> mmm a / tensorflow / core / public / version . h <nl> ppp b / tensorflow / core / public / version . h <nl> limitations under the License . <nl> / / 111635679 , 7jan2016 ) . <nl> / / 5 . Graphs are wholly - validated during Session : : Create ( ) ( 7jan2016 ) . <nl> / / 6 . TensorFlow is scalar strict within Google ( 27jan2016 ) . <nl> + / / 7 . Remove TopK in favor of TopKV2 ( 5feb2016 ) . <nl> # define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 <nl> # define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 <nl> - # define TF_GRAPH_DEF_VERSION 6 <nl> + # define TF_GRAPH_DEF_VERSION 7 <nl> <nl> # endif / / TENSORFLOW_CORE_PUBLIC_VERSION_H_ <nl> mmm a / tensorflow / models / image / cifar10 / cifar10 . py <nl> ppp b / tensorflow / models / image / cifar10 / cifar10 . py <nl> def loss ( logits , labels ) : <nl> Returns : <nl> Loss tensor of type float . <nl> " " " <nl> - # Reshape the labels into a dense Tensor of <nl> - # shape [ batch_size , NUM_CLASSES ] . <nl> - sparse_labels = tf . reshape ( labels , [ FLAGS . batch_size , 1 ] ) <nl> - indices = tf . reshape ( tf . range ( FLAGS . batch_size ) , [ FLAGS . batch_size , 1 ] ) <nl> - concated = tf . concat ( 1 , [ indices , sparse_labels ] ) <nl> - dense_labels = tf . sparse_to_dense ( concated , <nl> - [ FLAGS . batch_size , NUM_CLASSES ] , <nl> - 1 . 0 , 0 . 0 ) <nl> - <nl> # Calculate the average cross entropy loss across the batch . <nl> - cross_entropy = tf . nn . softmax_cross_entropy_with_logits ( <nl> - logits , dense_labels , name = ' cross_entropy_per_example ' ) <nl> + labels = tf . cast ( labels , tf . int64 ) <nl> + cross_entropy = tf . nn . sparse_softmax_cross_entropy_with_logits ( <nl> + logits , labels , name = ' cross_entropy_per_example ' ) <nl> cross_entropy_mean = tf . reduce_mean ( cross_entropy , name = ' cross_entropy ' ) <nl> tf . add_to_collection ( ' losses ' , cross_entropy_mean ) <nl> <nl> mmm a / tensorflow / models / image / mnist / convolutional . py <nl> ppp b / tensorflow / models / image / mnist / convolutional . py <nl> def extract_data ( filename , num_images ) : <nl> <nl> <nl> def extract_labels ( filename , num_images ) : <nl> - " " " Extract the labels into a 1 - hot matrix [ image index , label index ] . " " " <nl> + " " " Extract the labels into a vector of int64 label IDs . " " " <nl> print ( ' Extracting ' , filename ) <nl> with gzip . open ( filename ) as bytestream : <nl> bytestream . read ( 8 ) <nl> buf = bytestream . read ( 1 * num_images ) <nl> - labels = numpy . frombuffer ( buf , dtype = numpy . uint8 ) <nl> - # Convert to dense 1 - hot representation . <nl> - return ( numpy . arange ( NUM_LABELS ) = = labels [ : , None ] ) . astype ( numpy . float32 ) <nl> + labels = numpy . frombuffer ( buf , dtype = numpy . uint8 ) . astype ( numpy . int64 ) <nl> + return labels <nl> <nl> <nl> def fake_data ( num_images ) : <nl> def fake_data ( num_images ) : <nl> data = numpy . ndarray ( <nl> shape = ( num_images , IMAGE_SIZE , IMAGE_SIZE , NUM_CHANNELS ) , <nl> dtype = numpy . float32 ) <nl> - labels = numpy . zeros ( shape = ( num_images , NUM_LABELS ) , dtype = numpy . float32 ) <nl> + labels = numpy . zeros ( shape = ( num_images , ) , dtype = numpy . int64 ) <nl> for image in xrange ( num_images ) : <nl> label = image % 2 <nl> data [ image , : , : , 0 ] = label - 0 . 5 <nl> - labels [ image , label ] = 1 . 0 <nl> + labels [ image ] = label <nl> return data , labels <nl> <nl> <nl> def error_rate ( predictions , labels ) : <nl> - " " " Return the error rate based on dense predictions and 1 - hot labels . " " " <nl> + " " " Return the error rate based on dense predictions and sparse labels . " " " <nl> return 100 . 0 - ( <nl> 100 . 0 * <nl> - numpy . sum ( numpy . argmax ( predictions , 1 ) = = numpy . argmax ( labels , 1 ) ) / <nl> + numpy . sum ( numpy . argmax ( predictions , 1 ) = = labels ) / <nl> predictions . shape [ 0 ] ) <nl> <nl> <nl> def main ( argv = None ) : # pylint : disable = unused - argument <nl> train_data_node = tf . placeholder ( <nl> tf . float32 , <nl> shape = ( BATCH_SIZE , IMAGE_SIZE , IMAGE_SIZE , NUM_CHANNELS ) ) <nl> - train_labels_node = tf . placeholder ( tf . float32 , <nl> - shape = ( BATCH_SIZE , NUM_LABELS ) ) <nl> + train_labels_node = tf . placeholder ( tf . int64 , shape = ( BATCH_SIZE , ) ) <nl> eval_data = tf . placeholder ( <nl> tf . float32 , <nl> shape = ( EVAL_BATCH_SIZE , IMAGE_SIZE , IMAGE_SIZE , NUM_CHANNELS ) ) <nl> def model ( data , train = False ) : <nl> <nl> # Training computation : logits + cross - entropy loss . <nl> logits = model ( train_data_node , True ) <nl> - loss = tf . reduce_mean ( tf . nn . softmax_cross_entropy_with_logits ( <nl> + loss = tf . reduce_mean ( tf . nn . sparse_softmax_cross_entropy_with_logits ( <nl> logits , train_labels_node ) ) <nl> <nl> # L2 regularization for the fully connected parameters . <nl> mmm a / tensorflow / python / ops / image_ops . py <nl> ppp b / tensorflow / python / ops / image_ops . py <nl> def resize_images ( images , <nl> <nl> _ , height , width , depth = _ImageDimensions ( images ) <nl> <nl> - if width = = new_width and height = = new_height : <nl> + # Handle tensor - valued sizes as well as Python integers . <nl> + try : <nl> + new_width = ops . convert_to_tensor ( new_width , dtypes . int32 , <nl> + name = ' new_width ' ) <nl> + new_width . get_shape ( ) . assert_has_rank ( 0 ) <nl> + except ( TypeError , ValueError ) : <nl> + raise ValueError ( ' new_width must be a scalar integer ' ) <nl> + try : <nl> + new_height = ops . convert_to_tensor ( new_height , dtypes . int32 , <nl> + name = ' new_height ' ) <nl> + new_height . get_shape ( ) . assert_has_rank ( 0 ) <nl> + except ( TypeError , ValueError ) : <nl> + raise ValueError ( ' new_height must be a scalar integer ' ) <nl> + <nl> + new_width_const = tensor_util . constant_value ( new_width ) <nl> + new_height_const = tensor_util . constant_value ( new_height ) <nl> + <nl> + if width = = new_width_const and height = = new_height_const : <nl> if not is_batch : <nl> images = array_ops . squeeze ( images , squeeze_dims = [ 0 ] ) <nl> return images <nl> <nl> + new_size = array_ops . pack ( [ new_height , new_width ] ) <nl> + <nl> if method = = ResizeMethod . BILINEAR : <nl> images = gen_image_ops . resize_bilinear ( images , <nl> - [ new_height , new_width ] , <nl> + new_size , <nl> align_corners = align_corners ) <nl> elif method = = ResizeMethod . NEAREST_NEIGHBOR : <nl> images = gen_image_ops . resize_nearest_neighbor ( images , <nl> - [ new_height , new_width ] , <nl> + new_size , <nl> align_corners = align_corners ) <nl> elif method = = ResizeMethod . BICUBIC : <nl> images = gen_image_ops . resize_bicubic ( images , <nl> - [ new_height , new_width ] , <nl> + new_size , <nl> align_corners = align_corners ) <nl> elif method = = ResizeMethod . AREA : <nl> images = gen_image_ops . resize_area ( images , <nl> - [ new_height , new_width ] , <nl> + new_size , <nl> align_corners = align_corners ) <nl> else : <nl> raise ValueError ( ' Resize method is not implemented . ' ) <nl> <nl> + # NOTE ( mrry ) : The shape functions for the resize ops cannot unpack <nl> + # the packed values in ` new_size ` , so set the shape here . <nl> + images . set_shape ( [ None , new_height_const , new_width_const , None ] ) <nl> + <nl> if not is_batch : <nl> images = array_ops . squeeze ( images , squeeze_dims = [ 0 ] ) <nl> return images <nl> def adjust_contrast ( images , contrast_factor ) : <nl> def _ResizeShape ( op ) : <nl> " " " Shape function for the resize_bilinear and resize_nearest_neighbor ops . " " " <nl> input_shape = op . inputs [ 0 ] . get_shape ( ) . with_rank ( 4 ) <nl> + unused_size_shape = op . inputs [ 1 ] . get_shape ( ) . merge_with ( [ 2 ] ) <nl> size = tensor_util . constant_value ( op . inputs [ 1 ] ) <nl> if size is not None : <nl> height = size [ 0 ] <nl> mmm a / tensorflow / python / ops / image_ops_test . py <nl> ppp b / tensorflow / python / ops / image_ops_test . py <nl> def testNoOp ( self ) : <nl> newshape = yshape . eval ( ) <nl> self . assertAllEqual ( single_shape , newshape ) <nl> <nl> + def testTensorArguments ( self ) : <nl> + img_shape = [ 1 , 6 , 4 , 1 ] <nl> + single_shape = [ 6 , 4 , 1 ] <nl> + # This test is also conducted with int8 , so 127 is the maximum <nl> + # value that can be used . <nl> + data = [ 127 , 127 , 64 , 64 , <nl> + 127 , 127 , 64 , 64 , <nl> + 64 , 64 , 127 , 127 , <nl> + 64 , 64 , 127 , 127 , <nl> + 50 , 50 , 100 , 100 , <nl> + 50 , 50 , 100 , 100 ] <nl> + target_height = array_ops . placeholder ( dtypes . int32 ) <nl> + target_width = array_ops . placeholder ( dtypes . int32 ) <nl> + <nl> + img_np = np . array ( data , dtype = np . uint8 ) . reshape ( img_shape ) <nl> + <nl> + for opt in self . OPTIONS : <nl> + with self . test_session ( ) as sess : <nl> + image = constant_op . constant ( img_np , shape = img_shape ) <nl> + y = image_ops . resize_images ( image , target_height , target_width , opt ) <nl> + yshape = array_ops . shape ( y ) <nl> + resized , newshape = sess . run ( [ y , yshape ] , { target_height : 6 , <nl> + target_width : 4 } ) <nl> + self . assertAllEqual ( img_shape , newshape ) <nl> + self . assertAllClose ( resized , img_np , atol = 1e - 5 ) <nl> + <nl> + # Resizing with a single image must leave the shape unchanged also . <nl> + with self . test_session ( ) : <nl> + img_single = img_np . reshape ( single_shape ) <nl> + image = constant_op . constant ( img_single , shape = single_shape ) <nl> + y = image_ops . resize_images ( image , target_height , target_width , <nl> + self . OPTIONS [ 0 ] ) <nl> + yshape = array_ops . shape ( y ) <nl> + newshape = yshape . eval ( feed_dict = { target_height : 6 , target_width : 4 } ) <nl> + self . assertAllEqual ( single_shape , newshape ) <nl> + <nl> + # Incorrect shape . <nl> + with self . assertRaises ( ValueError ) : <nl> + _ = image_ops . resize_images ( <nl> + image , [ 12 , 32 ] , 4 , image_ops . ResizeMethod . BILINEAR ) <nl> + with self . assertRaises ( ValueError ) : <nl> + _ = image_ops . resize_images ( <nl> + image , 6 , [ 12 , 32 ] , image_ops . ResizeMethod . BILINEAR ) <nl> + <nl> + # Incorrect dtypes . <nl> + with self . assertRaises ( ValueError ) : <nl> + _ = image_ops . resize_images ( <nl> + image , 6 . 0 , 4 , image_ops . ResizeMethod . BILINEAR ) <nl> + with self . assertRaises ( ValueError ) : <nl> + _ = image_ops . resize_images ( <nl> + image , 6 , 4 . 0 , image_ops . ResizeMethod . BILINEAR ) <nl> + <nl> def testResizeDown ( self ) : <nl> # This test is also conducted with int8 , so 127 is the maximum <nl> # value that can be used . <nl> mmm a / tensorflow / python / ops / nn_ops . py <nl> ppp b / tensorflow / python / ops / nn_ops . py <nl> def top_k ( input , k = 1 , sorted = True , name = None ) : <nl> values : The ` k ` largest elements along each last dimensional slice . <nl> indices : The indices of ` values ` within the last dimension of ` input ` . <nl> " " " <nl> - # TODO ( irving ) : Always use v2 once the GraphDef mechanism is unstuck . <nl> - if isinstance ( k , ops . Tensor ) : <nl> - op = gen_nn_ops . _top_kv2 <nl> - else : <nl> - op = gen_nn_ops . _top_k <nl> - return op ( input , k = k , sorted = sorted , name = name ) <nl> + return gen_nn_ops . _top_kv2 ( input , k = k , sorted = sorted , name = name ) <nl> <nl> <nl> # pylint : enable = invalid - name <nl> mmm a / tensorflow / stream_executor / cuda / cuda_gpu_executor . cc <nl> ppp b / tensorflow / stream_executor / cuda / cuda_gpu_executor . cc <nl> static string GetBinaryDir ( bool strip_exe ) { <nl> return exe_path ; <nl> } <nl> <nl> - / / Returns the location of the runfiles directory . <nl> - / / This is the directory which " bazel run " sets as the current working directory <nl> - / / before the program starts . <nl> - / / N . B . This doesn ' t have to be running under " bazel run " in order to get the <nl> - / / appropriate runfiles directory . <nl> - static string GetRunfilesDir ( ) { <nl> - return port : : StrCat ( GetBinaryDir ( false ) , " . runfiles " ) ; <nl> - } <nl> - <nl> bool CUDAExecutor : : GetKernel ( const MultiKernelLoaderSpec & spec , <nl> KernelBase * kernel ) { <nl> CUDAKernel * cuda_kernel = AsCUDAKernel ( kernel ) ; <nl> mmm a / tensorflow / tensorflow . bzl <nl> ppp b / tensorflow / tensorflow . bzl <nl> def _py_wrap_cc_impl ( ctx ) : <nl> ctx . action ( executable = ctx . executable . swig_binary , <nl> arguments = args , <nl> mnemonic = " PythonSwig " , <nl> - inputs = list ( set ( [ src ] ) + cc_includes + ctx . files . swig_includes + <nl> + inputs = sorted ( set ( [ src ] ) + cc_includes + ctx . files . swig_includes + <nl> ctx . attr . swig_deps . files ) , <nl> outputs = outputs , <nl> progress_message = " SWIGing { input } " . format ( input = src . path ) ) <nl> mmm a / third_party / eigen3 / Eigen / Cholesky <nl> ppp b / third_party / eigen3 / Eigen / Cholesky <nl> @ @ - 1 + 1 @ @ <nl> - # include " external / eigen_archive / eigen - eigen - 8cd7c2c6e9e1 / Eigen / Cholesky " <nl> + # include " eigen - eigen - 8cd7c2c6e9e1 / Eigen / Cholesky " <nl> \ No newline at end of file <nl> mmm a / third_party / eigen3 / Eigen / Core <nl> ppp b / third_party / eigen3 / Eigen / Core <nl> @ @ - 1 + 1 @ @ <nl> - # include " external / eigen_archive / eigen - eigen - 8cd7c2c6e9e1 / Eigen / Core " <nl> + # include " eigen - eigen - 8cd7c2c6e9e1 / Eigen / Core " <nl> mmm a / third_party / eigen3 / Eigen / Eigenvalues <nl> ppp b / third_party / eigen3 / Eigen / Eigenvalues <nl> @ @ - 1 + 1 @ @ <nl> - # include " external / eigen_archive / eigen - eigen - 8cd7c2c6e9e1 / Eigen / Eigenvalues " <nl> + # include " eigen - eigen - 8cd7c2c6e9e1 / Eigen / Eigenvalues " <nl> mmm a / third_party / eigen3 / Eigen / LU <nl> ppp b / third_party / eigen3 / Eigen / LU <nl> @ @ - 1 + 1 @ @ <nl> - # include " external / eigen_archive / eigen - eigen - 8cd7c2c6e9e1 / Eigen / LU " <nl> + # include " eigen - eigen - 8cd7c2c6e9e1 / Eigen / LU " <nl> mmm a / third_party / eigen3 / Eigen / QR <nl> ppp b / third_party / eigen3 / Eigen / QR <nl> @ @ - 1 + 1 @ @ <nl> - # include " external / eigen_archive / eigen - eigen - 8cd7c2c6e9e1 / Eigen / QR " <nl> + # include " eigen - eigen - 8cd7c2c6e9e1 / Eigen / QR " <nl> mmm a / third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor <nl> ppp b / third_party / eigen3 / unsupported / Eigen / CXX11 / Tensor <nl> @ @ - 1 + 1 , 2 @ @ <nl> - # include " external / eigen_archive / eigen - eigen - 8cd7c2c6e9e1 / unsupported / Eigen / CXX11 / Tensor " <nl> + <nl> + # include " eigen - eigen - 8cd7c2c6e9e1 / unsupported / Eigen / CXX11 / Tensor " <nl> mmm a / third_party / gpus / crosstool / CROSSTOOL <nl> ppp b / third_party / gpus / crosstool / CROSSTOOL <nl> toolchain { <nl> compiler_flag : " - fdata - sections " <nl> linker_flag : " - Wl , - - gc - sections " <nl> } <nl> + linking_mode_flags { mode : DYNAMIC } <nl> } <nl>
Merge commit for internal changes
tensorflow/tensorflow
4754990b30f3096dbe34a19ea645e2db4e065ad8
2016-02-06T01:08:02Z
mmm a / lib / Basics / Thread . cpp <nl> ppp b / lib / Basics / Thread . cpp <nl> bool Thread : : start ( ConditionVariable * finishedCondition ) { <nl> <nl> void Thread : : stop ( ) { <nl> if ( _running ! = 0 ) { <nl> - LOGGER_TRACE ( " trying to cancel ( aka stop ) the thread " < < _name ) ; <nl> + LOGGER_TRACE ( " trying to cancel ( aka stop ) the thread ' " < < _name < < " ' " ) ; <nl> TRI_StopThread ( & _thread ) ; <nl> } <nl> } <nl> void Thread : : shutdown ( ) { <nl> } <nl> <nl> if ( _running ! = 0 ) { <nl> - LOGGER_TRACE ( " trying to cancel ( aka stop ) the thread " < < _name ) ; <nl> + LOGGER_TRACE ( " trying to cancel ( aka stop ) the thread ' " < < _name < < " ' " ) ; <nl> TRI_StopThread ( & _thread ) ; <nl> } <nl> <nl> void Thread : : allowAsynchronousCancelation ( ) { <nl> if ( _started ) { <nl> if ( _running ) { <nl> if ( TRI_IsSelfThread ( & _thread ) ) { <nl> - LOGGER_DEBUG ( " set asynchronous cancelation for " < < _name ) ; <nl> + LOGGER_DEBUG ( " set asynchronous cancelation for thread ' " < < _name < < " ' " ) ; <nl> TRI_AllowCancelation ( ) ; <nl> } <nl> else { <nl> void Thread : : allowAsynchronousCancelation ( ) { <nl> <nl> void Thread : : runMe ( ) { <nl> if ( _asynchronousCancelation ) { <nl> - LOGGER_DEBUG ( " set asynchronous cancelation for " < < _name ) ; <nl> + LOGGER_DEBUG ( " set asynchronous cancelation for thread ' " < < _name < < " ' " ) ; <nl> TRI_AllowCancelation ( ) ; <nl> } <nl> <nl> void Thread : : runMe ( ) { <nl> } <nl> catch ( . . . ) { <nl> _running = 0 ; <nl> + LOGGER_WARNING ( " exception caught in thread ' " < < _name < < " ' " ) ; <nl> throw ; <nl> } <nl> <nl>
add warning on exception
arangodb/arangodb
0216883e36d91a9e7c74b4101d2081a82eb6e39a
2013-03-14T16:28:13Z
Binary files a / docs / images / register_lb . png and b / docs / images / register_lb . png differ <nl>
replace register_lb . png
apache/incubator-brpc
b1e23a19a5e330444c1673db88ed3e78b5991363
2018-03-22T07:29:43Z
mmm a / hphp / runtime / base / array - data . h <nl> ppp b / hphp / runtime / base / array - data . h <nl> struct ArrayData { <nl> <nl> private : <nl> void serializeImpl ( VariableSerializer * serializer ) const ; <nl> + friend size_t getMemSize ( const ArrayData * ) ; <nl> static void compileTimeAssertions ( ) { <nl> static_assert ( offsetof ( ArrayData , m_count ) = = FAST_REFCOUNT_OFFSET , " " ) ; <nl> } <nl> struct ArrayData { <nl> } ; <nl> uint64_t m_posAndCount ; / / be careful , m_pos is signed <nl> } ; <nl> - <nl> - private : <nl> - friend size_t getMemSize ( const ArrayData * ) ; <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl>
Move getMemSize ( ) out of the ArrayData private member - data section
facebook/hhvm
12e73f7d3a67138f89bd07a3714fcbe01586231c
2014-05-23T23:08:56Z
mmm a / src / core / hle / kernel / address_arbiter . cpp <nl> ppp b / src / core / hle / kernel / address_arbiter . cpp <nl> <nl> # include " core / core_cpu . h " <nl> # include " core / hle / kernel / address_arbiter . h " <nl> # include " core / hle / kernel / errors . h " <nl> - # include " core / hle / kernel / object . h " <nl> - # include " core / hle / kernel / process . h " <nl> # include " core / hle / kernel / scheduler . h " <nl> # include " core / hle / kernel / thread . h " <nl> # include " core / hle / result . h " <nl> mmm a / src / core / hle / kernel / address_arbiter . h <nl> ppp b / src / core / hle / kernel / address_arbiter . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include < memory > <nl> # include < vector > <nl> <nl> # include " common / common_types . h " <nl> - # include " core / hle / kernel / object . h " <nl> <nl> union ResultCode ; <nl> <nl> mmm a / src / core / hle / kernel / client_port . cpp <nl> ppp b / src / core / hle / kernel / client_port . cpp <nl> <nl> # include " core / hle / kernel / hle_ipc . h " <nl> # include " core / hle / kernel / object . h " <nl> # include " core / hle / kernel / server_port . h " <nl> - # include " core / hle / kernel / server_session . h " <nl> # include " core / hle / kernel / session . h " <nl> <nl> namespace Kernel { <nl> mmm a / src / core / hle / kernel / client_port . h <nl> ppp b / src / core / hle / kernel / client_port . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include < memory > <nl> # include < string > <nl> + <nl> # include " common / common_types . h " <nl> # include " core / hle / kernel / object . h " <nl> # include " core / hle / result . h " <nl> mmm a / src / core / hle / kernel / handle_table . h <nl> ppp b / src / core / hle / kernel / handle_table . h <nl> <nl> <nl> # include < array > <nl> # include < cstddef > <nl> + # include < memory > <nl> + <nl> # include " common / common_types . h " <nl> # include " core / hle / kernel / object . h " <nl> # include " core / hle / result . h " <nl> mmm a / src / core / hle / kernel / kernel . cpp <nl> ppp b / src / core / hle / kernel / kernel . cpp <nl> <nl> # include " core / core . h " <nl> # include " core / core_timing . h " <nl> # include " core / core_timing_util . h " <nl> - # include " core / hle / kernel / address_arbiter . h " <nl> # include " core / hle / kernel / client_port . h " <nl> # include " core / hle / kernel / errors . h " <nl> # include " core / hle / kernel / handle_table . h " <nl> mmm a / src / core / hle / kernel / kernel . h <nl> ppp b / src / core / hle / kernel / kernel . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include < memory > <nl> # include < string > <nl> # include < unordered_map > <nl> # include < vector > <nl> mmm a / src / core / hle / kernel / mutex . cpp <nl> ppp b / src / core / hle / kernel / mutex . cpp <nl> <nl> / / Licensed under GPLv2 or any later version <nl> / / Refer to the license . txt file included . <nl> <nl> + # include < memory > <nl> # include < utility > <nl> # include < vector > <nl> <nl> mmm a / src / core / hle / kernel / resource_limit . h <nl> ppp b / src / core / hle / kernel / resource_limit . h <nl> <nl> # pragma once <nl> <nl> # include < array > <nl> + # include < memory > <nl> + <nl> # include " common / common_types . h " <nl> # include " core / hle / kernel / object . h " <nl> <nl> mmm a / src / core / hle / kernel / scheduler . h <nl> ppp b / src / core / hle / kernel / scheduler . h <nl> <nl> <nl> # pragma once <nl> <nl> - # include < mutex > <nl> + # include < atomic > <nl> + # include < memory > <nl> # include < vector > <nl> + <nl> # include " common / common_types . h " <nl> # include " common / multi_level_queue . h " <nl> - # include " core / hle / kernel / object . h " <nl> # include " core / hle / kernel / thread . h " <nl> <nl> namespace Core { <nl> mmm a / src / core / hle / kernel / session . h <nl> ppp b / src / core / hle / kernel / session . h <nl> <nl> <nl> # include < memory > <nl> # include < string > <nl> + # include < utility > <nl> <nl> # include " core / hle / kernel / wait_object . h " <nl> - # include " core / hle / result . h " <nl> <nl> namespace Kernel { <nl> <nl> mmm a / src / core / hle / kernel / shared_memory . h <nl> ppp b / src / core / hle / kernel / shared_memory . h <nl> <nl> <nl> # include < memory > <nl> # include < string > <nl> - # include < vector > <nl> <nl> # include " common / common_types . h " <nl> # include " core / hle / kernel / object . h " <nl> mmm a / src / core / hle / kernel / transfer_memory . h <nl> ppp b / src / core / hle / kernel / transfer_memory . h <nl> <nl> # pragma once <nl> <nl> # include < memory > <nl> - # include < vector > <nl> <nl> # include " core / hle / kernel / object . h " <nl> # include " core / hle / kernel / physical_memory . h " <nl> mmm a / src / core / hle / kernel / wait_object . h <nl> ppp b / src / core / hle / kernel / wait_object . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include < memory > <nl> # include < vector > <nl> - # include < boost / smart_ptr / intrusive_ptr . hpp > <nl> + <nl> # include " core / hle / kernel / object . h " <nl> <nl> namespace Kernel { <nl> mmm a / src / core / hle / kernel / writable_event . h <nl> ppp b / src / core / hle / kernel / writable_event . h <nl> <nl> <nl> # pragma once <nl> <nl> + # include < memory > <nl> + <nl> # include " core / hle / kernel / object . h " <nl> <nl> namespace Kernel { <nl>
kernel : Remove unnecessary includes
yuzu-emu/yuzu
c3e43c7e812be54ee33d559cbe5283bd445897dc
2019-12-08T03:37:05Z
mmm a / src / hydrogen . cc <nl> ppp b / src / hydrogen . cc <nl> void HGraphBuilder : : IfBuilder : : CaptureContinuation ( <nl> void HGraphBuilder : : IfBuilder : : JoinContinuation ( HIfContinuation * continuation ) { <nl> ASSERT ( ! finished_ ) ; <nl> ASSERT ( ! captured_ ) ; <nl> + ASSERT ( did_then_ ) ; <nl> + if ( ! did_else_ ) Else ( ) ; <nl> HBasicBlock * true_block = last_true_block_ = = NULL <nl> ? first_true_block_ <nl> : last_true_block_ ; <nl> - HBasicBlock * false_block = did_else_ & & ( first_false_block_ ! = NULL ) <nl> - ? builder_ - > current_block ( ) <nl> - : first_false_block_ ; <nl> + HBasicBlock * false_block = builder_ - > current_block ( ) ; <nl> if ( true_block ! = NULL & & ! true_block - > IsFinished ( ) ) { <nl> ASSERT ( continuation - > IsTrueReachable ( ) ) ; <nl> builder_ - > GotoNoSimulate ( true_block , continuation - > true_branch ( ) ) ; <nl>
Do Else ( ) first for JoinContinuation ( ) in IfBuilder .
v8/v8
537100336c67e81515f9be7bbf4a7fb8289ee399
2013-11-05T08:56:48Z
mmm a / stdlib / public / runtime / ObjCRuntimeGetImageNameFromClass . cpp <nl> ppp b / stdlib / public / runtime / ObjCRuntimeGetImageNameFromClass . cpp <nl> namespace { <nl> typedef struct segment_command macho_segment_command ; <nl> # endif <nl> <nl> - struct patch_t { <nl> - const char * name ; <nl> - const void * fn ; <nl> - <nl> - template < typename T > <nl> - patch_t ( const char * newName , const T * newFn ) <nl> - : name ( newName ) , fn ( ( const void * ) newFn ) { <nl> - } <nl> - } ; <nl> } / / end anonymous namespace <nl> <nl> / / / Overwrite a cross - image symbol reference by directly editing symbol tables <nl> namespace { <nl> / / / <nl> / / / Also , if the symbol being patched has references within the image where it <nl> / / / was originaly defined , those references will \ e not be patched . <nl> - static void patchLazyPointers ( const mach_header * mh , patch_t patch ) { <nl> + static void patchLazyPointers ( const mach_header * mh , const char * symbolName , <nl> + const void * newValue ) { <nl> / / Get linkEditBase <nl> const uint32_t cmd_count = mh - > ncmds ; <nl> const load_command * const cmds = <nl> static void patchLazyPointers ( const mach_header * mh , patch_t patch ) { <nl> / / Found symbol for this lazy pointer , now lookup address . <nl> const char * lazyTargetName = <nl> & stringTable [ symbolTable [ symbolIndex ] . n_un . n_strx ] ; <nl> - if ( strcmp ( patch . name , lazyTargetName ) = = 0 ) { <nl> + if ( strcmp ( symbolName , lazyTargetName ) = = 0 ) { <nl> / / Can ' t use the value currently stored here because it may <nl> / / be a dyld stub binder that will undo our patch if called . <nl> - symbolPointers [ lazyIndex ] = ( uintptr_t ) patch . fn ; <nl> + symbolPointers [ lazyIndex ] = ( uintptr_t ) newValue ; <nl> } <nl> } <nl> } <nl> static const char * patchedGetImageNameFromClassForOldOSs ( Class _Nullable cls ) { <nl> static void patchGetImageNameInImage ( const struct mach_header * mh , <nl> intptr_t vmaddr_slide ) { <nl> ( void ) vmaddr_slide ; <nl> - patchLazyPointers ( mh , patch_t ( " _class_getImageName " , <nl> - & patchedGetImageNameFromClassForOldOSs ) ) ; <nl> + const void * newImplementationAddr = <nl> + reinterpret_cast < const void * > ( & patchedGetImageNameFromClassForOldOSs ) ; <nl> + patchLazyPointers ( mh , " _class_getImageName " , newImplementationAddr ) ; <nl> } <nl> <nl> / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / <nl>
[ runtime ] Tidy up symbol table patching to satisfy with upstream Clang
apple/swift
37e81db5695678569003c3b2ef84d9bbf778e40f
2018-07-19T18:22:44Z
mmm a / dbms / src / Interpreters / ExpressionAnalyzer . cpp <nl> ppp b / dbms / src / Interpreters / ExpressionAnalyzer . cpp <nl> bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool onl <nl> <nl> Names required_sample_columns ; <nl> if ( sampling_expression ) <nl> - required_sample_columns = ExpressionAnalyzer ( sampling_expression , context , nullptr , source_columns ) . getRequiredSourceColumns ( ) ; <nl> + required_sample_columns = ExpressionAnalyzer ( sampling_expression , context , storage ) . getRequiredSourceColumns ( ) ; <nl> <nl> initChain ( chain , source_columns ) ; <nl> auto & step = chain . getLastStep ( ) ; <nl> bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool onl <nl> step . required_output . push_back ( prewhere_column_name ) ; <nl> step . can_remove_required_output . push_back ( true ) ; <nl> <nl> - / / / Add required columns for sample expression to required output in order not to remove them after <nl> - / / / prewhere execution because sampling is executed after prewhere . <nl> - / / / TODO : add sampling execution to common chain . <nl> - for ( const auto & column : required_sample_columns ) <nl> - { <nl> - step . required_output . push_back ( column ) ; <nl> - step . can_remove_required_output . push_back ( true ) ; <nl> - } <nl> - <nl> { <nl> / / / Remove unused source_columns from prewhere actions . <nl> auto tmp_actions = std : : make_shared < ExpressionActions > ( source_columns , context ) ; <nl> bool ExpressionAnalyzer : : appendPrewhere ( ExpressionActionsChain & chain , bool onl <nl> auto required_columns = tmp_actions - > getRequiredColumns ( ) ; <nl> NameSet required_source_columns ( required_columns . begin ( ) , required_columns . end ( ) ) ; <nl> <nl> + / / / Add required columns for sample expression to required output in order not to remove them after <nl> + / / / prewhere execution because sampling is executed after prewhere . <nl> + / / / TODO : add sampling execution to common chain . <nl> + for ( const auto & column : required_sample_columns ) <nl> + { <nl> + if ( required_source_columns . count ( column ) ) <nl> + { <nl> + step . required_output . push_back ( column ) ; <nl> + step . can_remove_required_output . push_back ( true ) ; <nl> + } <nl> + } <nl> + <nl> auto names = step . actions - > getSampleBlock ( ) . getNames ( ) ; <nl> NameSet name_set ( names . begin ( ) , names . end ( ) ) ; <nl> <nl> mmm a / dbms / tests / queries / 0_stateless / 00712_prewhere_with_sampling . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 00712_prewhere_with_sampling . reference <nl> @ @ - 1 + 1 , 2 @ @ <nl> 1 <nl> + 0 <nl> mmm a / dbms / tests / queries / 0_stateless / 00712_prewhere_with_sampling . sql <nl> ppp b / dbms / tests / queries / 0_stateless / 00712_prewhere_with_sampling . sql <nl> insert into test . tab values ( 1 , 2 ) , ( 1 , 4 ) ; <nl> select a from test . tab sample 1 / 2 prewhere b = 2 ; <nl> drop table if exists test . tab ; <nl> <nl> + DROP TABLE IF EXISTS test . sample_prewhere ; <nl> + CREATE TABLE test . sample_prewhere ( CounterID UInt32 , UserID UInt64 ) ENGINE = MergeTree ORDER BY UserID SAMPLE BY UserID ; <nl> + SELECT count ( ) FROM test . sample_prewhere SAMPLE 1 / 2 PREWHERE CounterID = 1 ; <nl> + DROP TABLE test . sample_prewhere ; <nl>
Merge pull request from yandex / fix_prewhere_with_sampling_2
ClickHouse/ClickHouse
1d28a9c510120b07f0719b2f33ccbc21be1e339d
2018-09-10T10:43:07Z
mmm a / src / mongo / db / repl / replica_set_config . cpp <nl> ppp b / src / mongo / db / repl / replica_set_config . cpp <nl> namespace repl { <nl> # endif <nl> <nl> const Seconds ReplicaSetConfig : : kDefaultHeartbeatTimeoutPeriod ( 10 ) ; <nl> + const std : : string ReplicaSetConfig : : kIdFieldName = " _id " ; <nl> + const std : : string ReplicaSetConfig : : kVersionFieldName = " version " ; <nl> + const std : : string ReplicaSetConfig : : kMembersFieldName = " members " ; <nl> + const std : : string ReplicaSetConfig : : kSettingsFieldName = " settings " ; <nl> <nl> namespace { <nl> <nl> - const std : : string kIdFieldName = " _id " ; <nl> - const std : : string kVersionFieldName = " version " ; <nl> - const std : : string kMembersFieldName = " members " ; <nl> - const std : : string kSettingsFieldName = " settings " ; <nl> - <nl> const std : : string kLegalConfigTopFieldNames [ ] = { <nl> - kIdFieldName , <nl> - kVersionFieldName , <nl> - kMembersFieldName , <nl> - kSettingsFieldName <nl> + ReplicaSetConfig : : kIdFieldName , <nl> + ReplicaSetConfig : : kVersionFieldName , <nl> + ReplicaSetConfig : : kMembersFieldName , <nl> + ReplicaSetConfig : : kSettingsFieldName <nl> } ; <nl> <nl> const std : : string kHeartbeatTimeoutFieldName = " heartbeatTimeoutSecs " ; <nl> namespace { <nl> <nl> } / / namespace <nl> <nl> - ReplicaSetConfig : : ReplicaSetConfig ( ) : _heartbeatTimeoutPeriod ( 0 ) { } <nl> + ReplicaSetConfig : : ReplicaSetConfig ( ) : _isInitialized ( false ) , _heartbeatTimeoutPeriod ( 0 ) { } <nl> <nl> Status ReplicaSetConfig : : initialize ( const BSONObj & cfg ) { <nl> + _isInitialized = false ; <nl> _members . clear ( ) ; <nl> Status status = bsonCheckOnlyHasFields ( <nl> " replica set configuration " , cfg , kLegalConfigTopFieldNames ) ; <nl> namespace { <nl> return status ; <nl> <nl> _calculateMajorityNumber ( ) ; <nl> + _isInitialized = true ; <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> mmm a / src / mongo / db / repl / replica_set_config . h <nl> ppp b / src / mongo / db / repl / replica_set_config . h <nl> namespace repl { <nl> public : <nl> typedef std : : vector < MemberConfig > : : const_iterator MemberIterator ; <nl> <nl> + static const std : : string kIdFieldName ; <nl> + static const std : : string kVersionFieldName ; <nl> + static const std : : string kMembersFieldName ; <nl> + static const std : : string kSettingsFieldName ; <nl> + <nl> static const size_t kMaxMembers = 12 ; <nl> static const size_t kMaxVotingMembers = 7 ; <nl> static const Seconds kDefaultHeartbeatTimeoutPeriod ; <nl> namespace repl { <nl> * / <nl> Status initialize ( const BSONObj & cfg ) ; <nl> <nl> + / * * <nl> + * Returns true if this object has been successfully initialized or copied from <nl> + * an initialized object . <nl> + * / <nl> + bool isInitialized ( ) const { return _isInitialized ; } <nl> + <nl> / * * <nl> * Performs basic consistency checks on the replica set configuration . <nl> * / <nl> namespace repl { <nl> * / <nl> void _calculateMajorityNumber ( ) ; <nl> <nl> + bool _isInitialized ; <nl> long long _version ; <nl> std : : string _replSetName ; <nl> std : : vector < MemberConfig > _members ; <nl>
SERVER - 14624 Add isInitialized ( ) concept to ReplicaSetConfig ; expose some field name constants .
mongodb/mongo
fcab456c204a1c5eccfc3d700337cb5bff0621fc
2014-07-21T17:41:21Z
mmm a / dbms / src / Interpreters / PredicateExpressionsOptimizer . cpp <nl> ppp b / dbms / src / Interpreters / PredicateExpressionsOptimizer . cpp <nl> ASTs PredicateExpressionsOptimizer : : getSelectQueryProjectionColumns ( ASTPtr & ast <nl> <nl> TranslateQualifiedNamesVisitor : : Data qn_visitor_data { { } , tables } ; <nl> TranslateQualifiedNamesVisitor ( qn_visitor_data ) . visit ( ast ) ; <nl> + <nl> QueryAliasesVisitor : : Data query_aliases_data { aliases } ; <nl> QueryAliasesVisitor ( query_aliases_data ) . visit ( ast ) ; <nl> - QueryNormalizer ( ast , aliases , settings ) . perform ( ) ; <nl> + <nl> + QueryNormalizer : : Data normalizer_data ( aliases , settings ) ; <nl> + QueryNormalizer ( normalizer_data ) . visit ( ast ) ; <nl> <nl> for ( const auto & projection_column : select_query - > select_expression_list - > children ) <nl> { <nl> mmm a / dbms / src / Interpreters / QueryNormalizer . cpp <nl> ppp b / dbms / src / Interpreters / QueryNormalizer . cpp <nl> namespace ErrorCodes <nl> extern const int CYCLIC_ALIASES ; <nl> } <nl> <nl> - <nl> - QueryNormalizer : : QueryNormalizer ( ASTPtr & query_ , const QueryNormalizer : : Aliases & aliases_ , ExtractedSettings & & settings_ , <nl> - std : : vector < TableWithColumnNames > & & tables_with_columns_ ) <nl> - : query ( query_ ) , aliases ( aliases_ ) , settings ( settings_ ) , tables_with_columns ( tables_with_columns_ ) <nl> - { } <nl> - <nl> - void QueryNormalizer : : perform ( ) <nl> + class CheckASTDepth <nl> { <nl> - SetOfASTs tmp_set ; <nl> - MapOfASTs tmp_map ; <nl> - performImpl ( query , tmp_map , tmp_set , " " , 0 ) ; <nl> + public : <nl> + CheckASTDepth ( QueryNormalizer : : Data & data_ ) <nl> + : data ( data_ ) <nl> + { <nl> + if ( data . level > data . settings . max_ast_depth ) <nl> + throw Exception ( " Normalized AST is too deep . Maximum : " + toString ( data . settings . max_ast_depth ) , ErrorCodes : : TOO_DEEP_AST ) ; <nl> + + + data . level ; <nl> + } <nl> <nl> - try <nl> + ~ CheckASTDepth ( ) <nl> { <nl> - query - > checkSize ( settings . max_expanded_ast_elements ) ; <nl> + - - data . level ; <nl> } <nl> - catch ( Exception & e ) <nl> + <nl> + private : <nl> + QueryNormalizer : : Data & data ; <nl> + } ; <nl> + <nl> + <nl> + class RestoreAliasOnExitScope <nl> + { <nl> + public : <nl> + RestoreAliasOnExitScope ( String & alias_ ) <nl> + : alias ( alias_ ) <nl> + , copy ( alias_ ) <nl> + { } <nl> + <nl> + ~ RestoreAliasOnExitScope ( ) <nl> { <nl> - e . addMessage ( " ( after expansion of aliases ) " ) ; <nl> - throw ; <nl> + alias = copy ; <nl> } <nl> - } <nl> <nl> - / / / finished_asts - already processed vertices ( and by what they replaced ) <nl> - / / / current_asts - vertices in the current call stack of this method <nl> - / / / current_alias - the alias referencing to the ancestor of ast ( the deepest ancestor with aliases ) <nl> - void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOfASTs & current_asts , std : : string current_alias , size_t level ) <nl> + private : <nl> + String & alias ; <nl> + const String copy ; <nl> + } ; <nl> + <nl> + <nl> + void QueryNormalizer : : visit ( ASTPtr & ast , Data & data ) <nl> { <nl> - if ( level > settings . max_ast_depth ) <nl> - throw Exception ( " Normalized AST is too deep . Maximum : " + toString ( settings . max_ast_depth ) , ErrorCodes : : TOO_DEEP_AST ) ; <nl> + CheckASTDepth scope1 ( data ) ; <nl> + RestoreAliasOnExitScope scope2 ( data . current_alias ) ; <nl> + <nl> + auto & aliases = data . aliases ; <nl> + auto & tables_with_columns = data . tables_with_columns ; <nl> + auto & finished_asts = data . finished_asts ; <nl> + auto & current_asts = data . current_asts ; <nl> + String & current_alias = data . current_alias ; <nl> <nl> if ( finished_asts . count ( ast ) ) <nl> { <nl> void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOf <nl> / / / will be sent to remote servers during distributed query execution , <nl> / / / and on all remote servers , function implementation will be same . <nl> if ( endsWith ( func_node - > name , " Distinct " ) & & func_name_lowercase = = " countdistinct " ) <nl> - func_node - > name = settings . count_distinct_implementation ; <nl> + func_node - > name = data . settings . count_distinct_implementation ; <nl> <nl> / / / As special case , treat count ( * ) as count ( ) , not as count ( list of all columns ) . <nl> if ( func_name_lowercase = = " count " & & func_node - > arguments - > children . size ( ) = = 1 <nl> void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOf <nl> / / / Replace * , alias . * , database . table . * with a list of columns . <nl> <nl> ASTs old_children ; <nl> - if ( processAsterisks ( ) ) <nl> + if ( data . processAsterisks ( ) ) <nl> { <nl> bool has_asterisk = false ; <nl> for ( const auto & child : expr_list - > children ) <nl> void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOf <nl> / / / If we replace the root of the subtree , we will be called again for the new root , in case the alias is replaced by an alias . <nl> if ( replaced ) <nl> { <nl> - performImpl ( ast , finished_asts , current_asts , current_alias , level + 1 ) ; <nl> + visit ( ast , data ) ; <nl> current_asts . erase ( initial_ast . get ( ) ) ; <nl> current_asts . erase ( ast . get ( ) ) ; <nl> finished_asts [ initial_ast ] = ast ; <nl> void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOf <nl> if ( typeid_cast < const ASTSelectQuery * > ( child . get ( ) ) | | typeid_cast < const ASTTableExpression * > ( child . get ( ) ) ) <nl> continue ; <nl> <nl> - performImpl ( child , finished_asts , current_asts , current_alias , level + 1 ) ; <nl> + visit ( child , data ) ; <nl> } <nl> } <nl> else if ( identifier_node ) <nl> void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOf <nl> if ( typeid_cast < const ASTSelectQuery * > ( child . get ( ) ) | | typeid_cast < const ASTTableExpression * > ( child . get ( ) ) ) <nl> continue ; <nl> <nl> - performImpl ( child , finished_asts , current_asts , current_alias , level + 1 ) ; <nl> + visit ( child , data ) ; <nl> } <nl> } <nl> <nl> void QueryNormalizer : : performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOf <nl> if ( ASTSelectQuery * select = typeid_cast < ASTSelectQuery * > ( ast . get ( ) ) ) <nl> { <nl> if ( select - > prewhere_expression ) <nl> - performImpl ( select - > prewhere_expression , finished_asts , current_asts , current_alias , level + 1 ) ; <nl> + visit ( select - > prewhere_expression , data ) ; <nl> if ( select - > where_expression ) <nl> - performImpl ( select - > where_expression , finished_asts , current_asts , current_alias , level + 1 ) ; <nl> + visit ( select - > where_expression , data ) ; <nl> if ( select - > having_expression ) <nl> - performImpl ( select - > having_expression , finished_asts , current_asts , current_alias , level + 1 ) ; <nl> + visit ( select - > having_expression , data ) ; <nl> } <nl> <nl> current_asts . erase ( initial_ast . get ( ) ) ; <nl> current_asts . erase ( ast . get ( ) ) ; <nl> finished_asts [ initial_ast ] = ast ; <nl> + <nl> + / / / @ note can not place it in CheckASTDepth dror cause of throw . <nl> + if ( data . level = = 1 ) <nl> + { <nl> + try <nl> + { <nl> + ast - > checkSize ( data . settings . max_expanded_ast_elements ) ; <nl> + } <nl> + catch ( Exception & e ) <nl> + { <nl> + e . addMessage ( " ( after expansion of aliases ) " ) ; <nl> + throw ; <nl> + } <nl> + } <nl> } <nl> <nl> } <nl> mmm a / dbms / src / Interpreters / QueryNormalizer . h <nl> ppp b / dbms / src / Interpreters / QueryNormalizer . h <nl> class QueryNormalizer <nl> using Aliases = std : : unordered_map < String , ASTPtr > ; <nl> using TableWithColumnNames = std : : pair < DatabaseAndTableWithAlias , Names > ; <nl> <nl> - QueryNormalizer ( ASTPtr & query , const Aliases & aliases , ExtractedSettings & & settings , <nl> - std : : vector < TableWithColumnNames > & & tables_with_columns = { } ) ; <nl> + struct Data <nl> + { <nl> + using SetOfASTs = std : : set < const IAST * > ; <nl> + using MapOfASTs = std : : map < ASTPtr , ASTPtr > ; <nl> <nl> - void perform ( ) ; <nl> + const Aliases & aliases ; <nl> + const ExtractedSettings settings ; <nl> + const std : : vector < TableWithColumnNames > tables_with_columns ; <nl> <nl> - private : <nl> - using SetOfASTs = std : : set < const IAST * > ; <nl> - using MapOfASTs = std : : map < ASTPtr , ASTPtr > ; <nl> + / / / tmp data <nl> + size_t level ; <nl> + MapOfASTs finished_asts ; / / / already processed vertices ( and by what they replaced ) <nl> + SetOfASTs current_asts ; / / / vertices in the current call stack of this method <nl> + std : : string current_alias ; / / / the alias referencing to the ancestor of ast ( the deepest ancestor with aliases ) <nl> <nl> - ASTPtr & query ; <nl> - const Aliases & aliases ; <nl> - const ExtractedSettings settings ; <nl> - const std : : vector < TableWithColumnNames > tables_with_columns ; <nl> + Data ( const Aliases & aliases_ , ExtractedSettings & & settings_ , std : : vector < TableWithColumnNames > & & tables_with_columns_ = { } ) <nl> + : aliases ( aliases_ ) <nl> + , settings ( settings_ ) <nl> + , tables_with_columns ( tables_with_columns_ ) <nl> + , level ( 0 ) <nl> + { } <nl> + <nl> + bool processAsterisks ( ) const { return ! tables_with_columns . empty ( ) ; } <nl> + } ; <nl> <nl> - bool processAsterisks ( ) const { return ! tables_with_columns . empty ( ) ; } <nl> + QueryNormalizer ( Data & data ) <nl> + : visitor_data ( data ) <nl> + { } <nl> + <nl> + void visit ( ASTPtr & ast ) <nl> + { <nl> + visit ( ast , visitor_data ) ; <nl> + } <nl> + <nl> + private : <nl> + Data & visitor_data ; <nl> <nl> - void performImpl ( ASTPtr & ast , MapOfASTs & finished_asts , SetOfASTs & current_asts , std : : string current_alias , size_t level ) ; <nl> + void visit ( ASTPtr & query , Data & data ) ; <nl> } ; <nl> <nl> } <nl> mmm a / dbms / src / Interpreters / SyntaxAnalyzer . cpp <nl> ppp b / dbms / src / Interpreters / SyntaxAnalyzer . cpp <nl> void normalizeTree ( <nl> else <nl> table_with_columns . emplace_back ( DatabaseAndTableWithAlias { } , std : : move ( all_columns_name ) ) ; <nl> <nl> - QueryNormalizer ( query , result . aliases , context . getSettingsRef ( ) , std : : move ( table_with_columns ) ) . perform ( ) ; <nl> + QueryNormalizer : : Data normalizer_data ( result . aliases , context . getSettingsRef ( ) , std : : move ( table_with_columns ) ) ; <nl> + QueryNormalizer ( normalizer_data ) . visit ( query ) ; <nl> } <nl> <nl> bool hasArrayJoin ( const ASTPtr & ast ) <nl>
QueryNormalizer with visitor interface
ClickHouse/ClickHouse
c53854125f275a7867070a3154cf24732bf5ff15
2019-01-11T14:09:23Z
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> set ( CXX_STANDARD_REQUIRED ON ) <nl> <nl> set ( VERSION_MAJOR 1 ) <nl> set ( VERSION_MINOR 4 ) <nl> - set ( VERSION_PATCH 0 ) <nl> + set ( VERSION_PATCH 1 ) <nl> <nl> set ( CLI_CLIENT_EXECUTABLE_NAME cleos ) <nl> set ( NODE_EXECUTABLE_NAME nodeos ) <nl> mmm a / Docker / README . md <nl> ppp b / Docker / README . md <nl> cd eos / Docker <nl> docker build . - t eosio / eos <nl> ` ` ` <nl> <nl> - The above will build off the most recent commit to the master branch by default . If you would like to target a specific branch / tag , you may use a build argument . For example , if you wished to generate a docker image based off of the v1 . 4 . 0 tag , you could do the following : <nl> + The above will build off the most recent commit to the master branch by default . If you would like to target a specific branch / tag , you may use a build argument . For example , if you wished to generate a docker image based off of the v1 . 4 . 1 tag , you could do the following : <nl> <nl> ` ` ` bash <nl> - docker build - t eosio / eos : v1 . 4 . 0 - - build - arg branch = v1 . 4 . 0 . <nl> + docker build - t eosio / eos : v1 . 4 . 1 - - build - arg branch = v1 . 4 . 1 . <nl> ` ` ` <nl> <nl> By default , the symbol in eosio . system is set to SYS . You can override this using the symbol argument while building the docker image . <nl>
Merge pull request from EOSIO / feature / bump - version - to - 1 . 4 . 1
EOSIO/eos
85755ce4ce3b188d4dc03dd497f7004c4156eabc
2018-10-17T18:06:09Z
new file mode 100644 <nl> index 00000000000 . . 38fb68f7da9 <nl> mmm / dev / null <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10S / _Bootscreen . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + / * * <nl> + * Custom Boot Screen bitmap <nl> + * <nl> + * Place this file in the root with your configuration files <nl> + * and enable SHOW_CUSTOM_BOOTSCREEN in Configuration . h . <nl> + * <nl> + * Use the Marlin Bitmap Converter to make your own : <nl> + * http : / / marlinfw . org / tools / u8glib / converter . html <nl> + * / <nl> + <nl> + # define CUSTOM_BOOTSCREEN_TIMEOUT 1000 <nl> + # define CUSTOM_BOOTSCREEN_BMPWIDTH 128 <nl> + <nl> + const unsigned char custom_start_bmp [ ] PROGMEM = { <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B11111100 , B00000000 , B00000000 , <nl> + B00001111 , B11110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000111 , B11100000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000001 , B10000110 , B00011111 , B11000000 , <nl> + B00011000 , B01110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00111100 , B00001100 , B00000000 , B00000000 , B00000001 , B10000011 , B00001100 , B01100000 , <nl> + B00010000 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00111100 , B00001100 , B00000000 , B00000000 , B00000001 , B10000011 , B00001100 , B00110000 , <nl> + B00110000 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00000000 , B00001100 , B00000000 , B00000000 , B00000000 , B00000011 , B00001100 , B00011000 , <nl> + B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00000000 , B00111111 , B00001111 , B00111100 , B00000000 , B00000011 , B00001100 , B00001100 , <nl> + B01100000 , B00000001 , B11011111 , B00001111 , B11100000 , B11111110 , B00000000 , B01100000 , B00011100 , B00011100 , B00000110 , B00011000 , B00000000 , B00000110 , B00001100 , B00001100 , <nl> + B01100000 , B00000000 , B11110011 , B00011000 , B00110001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00011000 , B00000000 , B00011110 , B00001100 , B00001100 , <nl> + B01100000 , B00000000 , B11100000 , B00110000 , B00111001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00110000 , B00000000 , B00000011 , B00001100 , B00001100 , <nl> + B01100000 , B00000000 , B11000000 , B00110000 , B00111000 , B00001111 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00110000 , B00000000 , B00000001 , B10001100 , B00001100 , <nl> + B01100000 , B00000000 , B11000000 , B00111111 , B11111000 , B11111011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00110000 , B00000000 , B00000001 , B10001100 , B00001100 , <nl> + B01100000 , B00110000 , B11000000 , B00110000 , B00000001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000001 , B11110000 , B00000001 , B10000001 , B10001100 , B00001100 , <nl> + B01100000 , B00110000 , B11000000 , B00110000 , B00000001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000000 , B11100000 , B00000001 , B10000001 , B10001100 , B00011000 , <nl> + B00110000 , B00110000 , B11000000 , B00011000 , B00110001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B01000000 , B11100000 , B00000001 , B10000011 , B10001100 , B00110000 , <nl> + B00011000 , B01100000 , B11000000 , B00001100 , B01100001 , B10000111 , B11000000 , B11100000 , B00011100 , B00001100 , B11000000 , B01100000 , B00000000 , B11000011 , B00001100 , B01100000 , <nl> + B00001111 , B11000011 , B11110000 , B00000111 , B11000000 , B11111111 , B11000111 , B11111100 , B01111111 , B00000111 , B10000001 , B11000000 , B00000000 , B01111110 , B00011111 , B11000000 , <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000111 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000111 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 <nl> + } ; <nl> new file mode 100644 <nl> index 00000000000 . . 2cb789e80dd <nl> mmm / dev / null <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10mini / Configuration . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + / * * <nl> + * Configuration . h <nl> + * <nl> + * Basic settings such as : <nl> + * <nl> + * - Type of electronics <nl> + * - Type of temperature sensor <nl> + * - Printer geometry <nl> + * - Endstop configuration <nl> + * - LCD controller <nl> + * - Extra features <nl> + * <nl> + * Advanced settings can be found in Configuration_adv . h <nl> + * <nl> + * / <nl> + <nl> + / * * <nl> + * Creality CR - 10 Mini <nl> + * X = 300mm Y = 220mm Z = 300mm <nl> + * E3DV6 Hotend <nl> + * Titan Extruder <nl> + * CR10_STOCKDISPLAY ( RAMPS - compatible with single 10 - pin plug ) <nl> + * / <nl> + <nl> + # ifndef CONFIGURATION_H <nl> + # define CONFIGURATION_H <nl> + # define CONFIGURATION_H_VERSION 020000 <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Getting Started = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / * * <nl> + * Here are some standard links for getting your machine calibrated : <nl> + * <nl> + * http : / / reprap . org / wiki / Calibration <nl> + * http : / / youtu . be / wAL9d7FgInk <nl> + * http : / / calculator . josefprusa . cz <nl> + * http : / / reprap . org / wiki / Triffid_Hunter % 27s_Calibration_Guide <nl> + * http : / / www . thingiverse . com / thing : 5573 <nl> + * https : / / sites . google . com / site / repraplogphase / calibration - of - your - reprap <nl> + * http : / / www . thingiverse . com / thing : 298812 <nl> + * / <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = DELTA Printer = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / For a Delta printer start with one of the configuration files in the <nl> + / / config / examples / delta directory and customize for your machine . <nl> + / / <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = SCARA Printer = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / For a SCARA printer start with the configuration files in <nl> + / / config / examples / SCARA and customize for your machine . <nl> + / / <nl> + <nl> + / / @ section info <nl> + <nl> + / / User - specified version info of this build to display in [ Pronterface , etc ] terminal window during <nl> + / / startup . Implementation of an idea by Prof Braino to inform user that any changes made to this <nl> + / / build by the user have been successfully uploaded into firmware . <nl> + # define STRING_CONFIG_H_AUTHOR " ( none , default config ) " / / Who made the changes . <nl> + # define SHOW_BOOTSCREEN <nl> + # define STRING_SPLASH_LINE1 SHORT_BUILD_VERSION / / will be shown during bootup in line 1 <nl> + # define STRING_SPLASH_LINE2 WEBSITE_URL / / will be shown during bootup in line 2 <nl> + <nl> + / / <nl> + / / * * * VENDORS PLEASE READ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * <nl> + / / <nl> + / / Marlin now allow you to have a vendor boot image to be displayed on machine <nl> + / / start . When SHOW_CUSTOM_BOOTSCREEN is defined Marlin will first show your <nl> + / / custom boot image and then the default Marlin boot image is shown . <nl> + / / <nl> + / / We suggest for you to take advantage of this new feature and keep the Marlin <nl> + / / boot image unmodified . For an example have a look at the bq Hephestos 2 <nl> + / / example configuration folder . <nl> + / / <nl> + # define SHOW_CUSTOM_BOOTSCREEN <nl> + <nl> + / / Enable to show the bitmap in Marlin / _Statusscreen . h on the status screen . <nl> + # define CUSTOM_STATUS_SCREEN_IMAGE <nl> + <nl> + / / @ section machine <nl> + <nl> + / * * <nl> + * Select the serial port on the board to use for communication with the host . <nl> + * This allows the connection of wireless adapters ( for instance ) to non - default port pins . <nl> + * Note : The first serial port ( - 1 or 0 ) will always be used by the Arduino bootloader . <nl> + * <nl> + * : [ - 1 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ] <nl> + * / <nl> + # define SERIAL_PORT 0 <nl> + <nl> + / * * <nl> + * Select a secondary serial port on the board to use for communication with the host . <nl> + * This allows the connection of wireless adapters ( for instance ) to non - default port pins . <nl> + * Serial port - 1 is the USB emulated serial port , if available . <nl> + * <nl> + * : [ - 1 , 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ] <nl> + * / <nl> + # define SERIAL_PORT_2 - 1 <nl> + <nl> + / * * <nl> + * This setting determines the communication speed of the printer . <nl> + * <nl> + * 250000 works in most cases , but you might try a lower speed if <nl> + * you commonly experience drop - outs during host printing . <nl> + * You may try up to 1000000 to speed up SD file transfer . <nl> + * <nl> + * : [ 2400 , 9600 , 19200 , 38400 , 57600 , 115200 , 250000 , 500000 , 1000000 ] <nl> + * / <nl> + # define BAUDRATE 115200 <nl> + <nl> + / / Enable the Bluetooth serial interface on AT90USB devices <nl> + / / # define BLUETOOTH <nl> + <nl> + / / The following define selects which electronics board you have . <nl> + / / Please choose the name from boards . h that matches your setup <nl> + # ifndef MOTHERBOARD <nl> + # define MOTHERBOARD BOARD_MELZI_CREALITY <nl> + # endif <nl> + <nl> + / / Optional custom name for your RepStrap or other custom machine <nl> + / / Displayed in the LCD " Ready " message <nl> + # define CUSTOM_MACHINE_NAME " CR - 10 Mini " <nl> + <nl> + / / Define this to set a unique identifier for this printer , ( Used by some programs to differentiate between machines ) <nl> + / / You can use an online service to generate a random UUID . ( eg http : / / www . uuidgenerator . net / version4 ) <nl> + / / # define MACHINE_UUID " 00000000 - 0000 - 0000 - 0000 - 000000000000 " <nl> + <nl> + / / @ section extruder <nl> + <nl> + / / This defines the number of extruders <nl> + / / : [ 1 , 2 , 3 , 4 , 5 ] <nl> + # define EXTRUDERS 1 <nl> + <nl> + / / Generally expected filament diameter ( 1 . 75 , 2 . 85 , 3 . 0 , . . . ) . Used for Volumetric , Filament Width Sensor , etc . <nl> + # define DEFAULT_NOMINAL_FILAMENT_DIA 1 . 75 <nl> + <nl> + / / For Cyclops or any " multi - extruder " that shares a single nozzle . <nl> + / / # define SINGLENOZZLE <nl> + <nl> + / * * <nl> + * Průša MK2 Single Nozzle Multi - Material Multiplexer , and variants . <nl> + * <nl> + * This device allows one stepper driver on a control board to drive <nl> + * two to eight stepper motors , one at a time , in a manner suitable <nl> + * for extruders . <nl> + * <nl> + * This option only allows the multiplexer to switch on tool - change . <nl> + * Additional options to configure custom E moves are pending . <nl> + * / <nl> + / / # define MK2_MULTIPLEXER <nl> + # if ENABLED ( MK2_MULTIPLEXER ) <nl> + / / Override the default DIO selector pins here , if needed . <nl> + / / Some pins files may provide defaults for these pins . <nl> + / / # define E_MUX0_PIN 40 / / Always Required <nl> + / / # define E_MUX1_PIN 42 / / Needed for 3 to 8 steppers <nl> + / / # define E_MUX2_PIN 44 / / Needed for 5 to 8 steppers <nl> + # endif <nl> + <nl> + / / A dual extruder that uses a single stepper motor <nl> + / / # define SWITCHING_EXTRUDER <nl> + # if ENABLED ( SWITCHING_EXTRUDER ) <nl> + # define SWITCHING_EXTRUDER_SERVO_NR 0 <nl> + # define SWITCHING_EXTRUDER_SERVO_ANGLES { 0 , 90 } / / Angles for E0 , E1 [ , E2 , E3 ] <nl> + # if EXTRUDERS > 3 <nl> + # define SWITCHING_EXTRUDER_E23_SERVO_NR 1 <nl> + # endif <nl> + # endif <nl> + <nl> + / / A dual - nozzle that uses a servomotor to raise / lower one of the nozzles <nl> + / / # define SWITCHING_NOZZLE <nl> + # if ENABLED ( SWITCHING_NOZZLE ) <nl> + # define SWITCHING_NOZZLE_SERVO_NR 0 <nl> + # define SWITCHING_NOZZLE_SERVO_ANGLES { 0 , 90 } / / Angles for E0 , E1 <nl> + / / # define HOTEND_OFFSET_Z { 0 . 0 , 0 . 0 } <nl> + # endif <nl> + <nl> + / * * <nl> + * Two separate X - carriages with extruders that connect to a moving part <nl> + * via a magnetic docking mechanism . Requires SOL1_PIN and SOL2_PIN . <nl> + * / <nl> + / / # define PARKING_EXTRUDER <nl> + # if ENABLED ( PARKING_EXTRUDER ) <nl> + # define PARKING_EXTRUDER_SOLENOIDS_INVERT / / If enabled , the solenoid is NOT magnetized with applied voltage <nl> + # define PARKING_EXTRUDER_SOLENOIDS_PINS_ACTIVE LOW / / LOW or HIGH pin signal energizes the coil <nl> + # define PARKING_EXTRUDER_SOLENOIDS_DELAY 250 / / Delay ( ms ) for magnetic field . No delay if 0 or not defined . <nl> + # define PARKING_EXTRUDER_PARKING_X { - 78 , 184 } / / X positions for parking the extruders <nl> + # define PARKING_EXTRUDER_GRAB_DISTANCE 1 / / mm to move beyond the parking point to grab the extruder <nl> + # define PARKING_EXTRUDER_SECURITY_RAISE 5 / / Z - raise before parking <nl> + # define HOTEND_OFFSET_Z { 0 . 0 , 1 . 3 } / / Z - offsets of the two hotends . The first must be 0 . <nl> + # endif <nl> + <nl> + / * * <nl> + * " Mixing Extruder " <nl> + * - Adds a new code , M165 , to set the current mix factors . <nl> + * - Extends the stepping routines to move multiple steppers in proportion to the mix . <nl> + * - Optional support for Repetier Firmware M163 , M164 , and virtual extruder . <nl> + * - This implementation supports only a single extruder . <nl> + * - Enable DIRECT_MIXING_IN_G1 for Pia Taubert ' s reference implementation <nl> + * / <nl> + / / # define MIXING_EXTRUDER <nl> + # if ENABLED ( MIXING_EXTRUDER ) <nl> + # define MIXING_STEPPERS 2 / / Number of steppers in your mixing extruder <nl> + # define MIXING_VIRTUAL_TOOLS 16 / / Use the Virtual Tool method with M163 and M164 <nl> + / / # define DIRECT_MIXING_IN_G1 / / Allow ABCDHI mix factors in G1 movement commands <nl> + # endif <nl> + <nl> + / / Offset of the extruders ( uncomment if using more than one and relying on firmware to position when changing ) . <nl> + / / The offset has to be X = 0 , Y = 0 for the extruder 0 hotend ( default extruder ) . <nl> + / / For the other hotends it is their distance from the extruder 0 hotend . <nl> + / / # define HOTEND_OFFSET_X { 0 . 0 , 20 . 00 } / / ( in mm ) for each extruder , offset of the hotend on the X axis <nl> + / / # define HOTEND_OFFSET_Y { 0 . 0 , 5 . 00 } / / ( in mm ) for each extruder , offset of the hotend on the Y axis <nl> + <nl> + / / @ section machine <nl> + <nl> + / * * <nl> + * Select your power supply here . Use 0 if you haven ' t connected the PS_ON_PIN <nl> + * <nl> + * 0 = No Power Switch <nl> + * 1 = ATX <nl> + * 2 = X - Box 360 203Watts ( the blue wire connected to PS_ON and the red wire to VCC ) <nl> + * <nl> + * : { 0 : ' No power switch ' , 1 : ' ATX ' , 2 : ' X - Box 360 ' } <nl> + * / <nl> + # define POWER_SUPPLY 0 <nl> + <nl> + # if POWER_SUPPLY > 0 <nl> + / / Enable this option to leave the PSU off at startup . <nl> + / / Power to steppers and heaters will need to be turned on with M80 . <nl> + / / # define PS_DEFAULT_OFF <nl> + <nl> + / / # define AUTO_POWER_CONTROL / / Enable automatic control of the PS_ON pin <nl> + # if ENABLED ( AUTO_POWER_CONTROL ) <nl> + # define AUTO_POWER_FANS / / Turn on PSU if fans need power <nl> + # define AUTO_POWER_E_FANS <nl> + # define AUTO_POWER_CONTROLLERFAN <nl> + # define POWER_TIMEOUT 30 <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> + / / @ section temperature <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Thermal Settings = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / * * <nl> + * - - NORMAL IS 4 . 7kohm PULLUP ! - - 1kohm pullup can be used on hotend sensor , using correct resistor and table <nl> + * <nl> + * Temperature sensors available : <nl> + * <nl> + * - 3 : thermocouple with MAX31855 ( only for sensor 0 ) <nl> + * - 2 : thermocouple with MAX6675 ( only for sensor 0 ) <nl> + * - 1 : thermocouple with AD595 <nl> + * 0 : not used <nl> + * 1 : 100k thermistor - best choice for EPCOS 100k ( 4 . 7k pullup ) <nl> + * 2 : 200k thermistor - ATC Semitec 204GT - 2 ( 4 . 7k pullup ) <nl> + * 3 : Mendel - parts thermistor ( 4 . 7k pullup ) <nl> + * 4 : 10k thermistor ! ! do not use it for a hotend . It gives bad resolution at high temp . ! ! <nl> + * 5 : 100K thermistor - ATC Semitec 104GT - 2 / 104NT - 4 - R025H42G ( Used in ParCan & J - Head ) ( 4 . 7k pullup ) <nl> + * 6 : 100k EPCOS - Not as accurate as table 1 ( created using a fluke thermocouple ) ( 4 . 7k pullup ) <nl> + * 7 : 100k Honeywell thermistor 135 - 104LAG - J01 ( 4 . 7k pullup ) <nl> + * 71 : 100k Honeywell thermistor 135 - 104LAF - J01 ( 4 . 7k pullup ) <nl> + * 8 : 100k 0603 SMD Vishay NTCS0603E3104FXT ( 4 . 7k pullup ) <nl> + * 9 : 100k GE Sensing AL03006 - 58 . 2K - 97 - G1 ( 4 . 7k pullup ) <nl> + * 10 : 100k RS thermistor 198 - 961 ( 4 . 7k pullup ) <nl> + * 11 : 100k beta 3950 1 % thermistor ( 4 . 7k pullup ) <nl> + * 12 : 100k 0603 SMD Vishay NTCS0603E3104FXT ( 4 . 7k pullup ) ( calibrated for Makibox hot bed ) <nl> + * 13 : 100k Hisens 3950 1 % up to 300 ° C for hotend " Simple ONE " & " Hotend " All In ONE " <nl> + * 15 : 100k thermistor calibration for JGAurora A5 hotend <nl> + * 20 : the PT100 circuit found in the Ultimainboard V2 . x <nl> + * 60 : 100k Maker ' s Tool Works Kapton Bed Thermistor beta = 3950 <nl> + * 66 : 4 . 7M High Temperature thermistor from Dyze Design <nl> + * 70 : the 100K thermistor found in the bq Hephestos 2 <nl> + * 75 : 100k Generic Silicon Heat Pad with NTC 100K MGB18 - 104F39050L32 thermistor <nl> + * <nl> + * 1k ohm pullup tables - This is atypical , and requires changing out the 4 . 7k pullup for 1k . <nl> + * ( but gives greater accuracy and more stable PID ) <nl> + * 51 : 100k thermistor - EPCOS ( 1k pullup ) <nl> + * 52 : 200k thermistor - ATC Semitec 204GT - 2 ( 1k pullup ) <nl> + * 55 : 100k thermistor - ATC Semitec 104GT - 2 ( Used in ParCan & J - Head ) ( 1k pullup ) <nl> + * <nl> + * 1047 : Pt1000 with 4k7 pullup <nl> + * 1010 : Pt1000 with 1k pullup ( non standard ) <nl> + * 147 : Pt100 with 4k7 pullup <nl> + * 110 : Pt100 with 1k pullup ( non standard ) <nl> + * <nl> + * Use these for Testing or Development purposes . NEVER for production machine . <nl> + * 998 : Dummy Table that ALWAYS reads 25 ° C or the temperature defined below . <nl> + * 999 : Dummy Table that ALWAYS reads 100 ° C or the temperature defined below . <nl> + * <nl> + * : { ' 0 ' : " Not used " , ' 1 ' : " 100k / 4 . 7k - EPCOS " , ' 2 ' : " 200k / 4 . 7k - ATC Semitec 204GT - 2 " , ' 3 ' : " Mendel - parts / 4 . 7k " , ' 4 ' : " 10k ! ! do not use for a hotend . Bad resolution at high temp . ! ! " , ' 5 ' : " 100K / 4 . 7k - ATC Semitec 104GT - 2 ( Used in ParCan & J - Head ) " , ' 6 ' : " 100k / 4 . 7k EPCOS - Not as accurate as Table 1 " , ' 7 ' : " 100k / 4 . 7k Honeywell 135 - 104LAG - J01 " , ' 8 ' : " 100k / 4 . 7k 0603 SMD Vishay NTCS0603E3104FXT " , ' 9 ' : " 100k / 4 . 7k GE Sensing AL03006 - 58 . 2K - 97 - G1 " , ' 10 ' : " 100k / 4 . 7k RS 198 - 961 " , ' 11 ' : " 100k / 4 . 7k beta 3950 1 % " , ' 12 ' : " 100k / 4 . 7k 0603 SMD Vishay NTCS0603E3104FXT ( calibrated for Makibox hot bed ) " , ' 13 ' : " 100k Hisens 3950 1 % up to 300 ° C for hotend ' Simple ONE ' & hotend ' All In ONE ' " , ' 20 ' : " PT100 ( Ultimainboard V2 . x ) " , ' 51 ' : " 100k / 1k - EPCOS " , ' 52 ' : " 200k / 1k - ATC Semitec 204GT - 2 " , ' 55 ' : " 100k / 1k - ATC Semitec 104GT - 2 ( Used in ParCan & J - Head ) " , ' 60 ' : " 100k Maker ' s Tool Works Kapton Bed Thermistor beta = 3950 " , ' 66 ' : " Dyze Design 4 . 7M High Temperature thermistor " , ' 70 ' : " the 100K thermistor found in the bq Hephestos 2 " , ' 71 ' : " 100k / 4 . 7k Honeywell 135 - 104LAF - J01 " , ' 147 ' : " Pt100 / 4 . 7k " , ' 1047 ' : " Pt1000 / 4 . 7k " , ' 110 ' : " Pt100 / 1k ( non - standard ) " , ' 1010 ' : " Pt1000 / 1k ( non standard ) " , ' - 3 ' : " Thermocouple + MAX31855 ( only for sensor 0 ) " , ' - 2 ' : " Thermocouple + MAX6675 ( only for sensor 0 ) " , ' - 1 ' : " Thermocouple + AD595 " , ' 998 ' : " Dummy 1 " , ' 999 ' : " Dummy 2 " } <nl> + * / <nl> + # define TEMP_SENSOR_0 1 <nl> + # define TEMP_SENSOR_1 0 <nl> + # define TEMP_SENSOR_2 0 <nl> + # define TEMP_SENSOR_3 0 <nl> + # define TEMP_SENSOR_4 0 <nl> + # define TEMP_SENSOR_BED 5 <nl> + <nl> + / / Dummy thermistor constant temperature readings , for use with 998 and 999 <nl> + # define DUMMY_THERMISTOR_998_VALUE 25 <nl> + # define DUMMY_THERMISTOR_999_VALUE 100 <nl> + <nl> + / / Use temp sensor 1 as a redundant sensor with sensor 0 . If the readings <nl> + / / from the two sensors differ too much the print will be aborted . <nl> + / / # define TEMP_SENSOR_1_AS_REDUNDANT <nl> + # define MAX_REDUNDANT_TEMP_SENSOR_DIFF 10 <nl> + <nl> + / / Extruder temperature must be close to target for this long before M109 returns success <nl> + # define TEMP_RESIDENCY_TIME 10 / / ( seconds ) <nl> + # define TEMP_HYSTERESIS 3 / / ( degC ) range of + / - temperatures considered " close " to the target one <nl> + # define TEMP_WINDOW 1 / / ( degC ) Window around target to start the residency timer x degC early . <nl> + <nl> + / / Bed temperature must be close to target for this long before M190 returns success <nl> + # define TEMP_BED_RESIDENCY_TIME 10 / / ( seconds ) <nl> + # define TEMP_BED_HYSTERESIS 3 / / ( degC ) range of + / - temperatures considered " close " to the target one <nl> + # define TEMP_BED_WINDOW 1 / / ( degC ) Window around target to start the residency timer x degC early . <nl> + <nl> + / / The minimal temperature defines the temperature below which the heater will not be enabled It is used <nl> + / / to check that the wiring to the thermistor is not broken . <nl> + / / Otherwise this would lead to the heater being powered on all the time . <nl> + # define HEATER_0_MINTEMP 5 <nl> + # define HEATER_1_MINTEMP 5 <nl> + # define HEATER_2_MINTEMP 5 <nl> + # define HEATER_3_MINTEMP 5 <nl> + # define HEATER_4_MINTEMP 5 <nl> + # define BED_MINTEMP 5 <nl> + <nl> + / / When temperature exceeds max temp , your heater will be switched off . <nl> + / / This feature exists to protect your hotend from overheating accidentally , but * NOT * from thermistor short / failure ! <nl> + / / You should use MINTEMP for thermistor short / failure protection . <nl> + # define HEATER_0_MAXTEMP 275 <nl> + # define HEATER_1_MAXTEMP 275 <nl> + # define HEATER_2_MAXTEMP 275 <nl> + # define HEATER_3_MAXTEMP 275 <nl> + # define HEATER_4_MAXTEMP 275 <nl> + # define BED_MAXTEMP 120 <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = PID Settings = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / PID Tuning Guide here : http : / / reprap . org / wiki / PID_Tuning <nl> + <nl> + / / Comment the following line to disable PID and enable bang - bang . <nl> + # define PIDTEMP <nl> + # define BANG_MAX 255 / / Limits current to nozzle while in bang - bang mode ; 255 = full current <nl> + # define PID_MAX BANG_MAX / / Limits current to nozzle while PID is active ( see PID_FUNCTIONAL_RANGE below ) ; 255 = full current <nl> + # define PID_K1 0 . 95 / / Smoothing factor within any PID loop <nl> + # if ENABLED ( PIDTEMP ) <nl> + # define PID_AUTOTUNE_MENU / / Add PID Autotune to the LCD " Temperature " menu to run M303 and apply the result . <nl> + / / # define PID_DEBUG / / Sends debug data to the serial port . <nl> + / / # define PID_OPENLOOP 1 / / Puts PID in open loop . M104 / M140 sets the output power from 0 to PID_MAX <nl> + / / # define SLOW_PWM_HEATERS / / PWM with very low frequency ( roughly 0 . 125Hz = 8s ) and minimum state time of approximately 1s useful for heaters driven by a relay <nl> + / / # define PID_PARAMS_PER_HOTEND / / Uses separate PID parameters for each extruder ( useful for mismatched extruders ) <nl> + / / Set / get with gcode : M301 E [ extruder number , 0 - 2 ] <nl> + # define PID_FUNCTIONAL_RANGE 10 / / If the temperature difference between the target temperature and the actual temperature <nl> + / / is more than PID_FUNCTIONAL_RANGE then the PID will be shut off and the heater will be set to min / max . <nl> + <nl> + / / If you are using a pre - configured hotend then you can use one of the value sets by uncommenting it <nl> + <nl> + / / Stock CR - 10 tuned for 70C <nl> + # define DEFAULT_Kp 22 . 57 <nl> + # define DEFAULT_Ki 1 . 72 <nl> + # define DEFAULT_Kd 73 . 96 <nl> + <nl> + / / Ultimaker <nl> + / / # define DEFAULT_Kp 22 . 2 <nl> + / / # define DEFAULT_Ki 1 . 08 <nl> + / / # define DEFAULT_Kd 114 <nl> + <nl> + / / MakerGear <nl> + / / # define DEFAULT_Kp 7 . 0 <nl> + / / # define DEFAULT_Ki 0 . 1 <nl> + / / # define DEFAULT_Kd 12 <nl> + <nl> + / / Mendel Parts V9 on 12V <nl> + / / # define DEFAULT_Kp 63 . 0 <nl> + / / # define DEFAULT_Ki 2 . 25 <nl> + / / # define DEFAULT_Kd 440 <nl> + <nl> + # endif / / PIDTEMP <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = PID > Bed Temperature Control = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / Select PID or bang - bang with PIDTEMPBED . If bang - bang , BED_LIMIT_SWITCHING will enable hysteresis <nl> + / / <nl> + / / Uncomment this to enable PID on the bed . It uses the same frequency PWM as the extruder . <nl> + / / If your PID_dT is the default , and correct for your hardware / configuration , that means 7 . 689Hz , <nl> + / / which is fine for driving a square wave into a resistive load and does not significantly impact you FET heating . <nl> + / / This also works fine on a Fotek SSR - 10DA Solid State Relay into a 250W heater . <nl> + / / If your configuration is significantly different than this and you don ' t understand the issues involved , you probably <nl> + / / shouldn ' t use bed PID until someone else verifies your hardware works . <nl> + / / If this is enabled , find your own PID constants below . <nl> + # define PIDTEMPBED <nl> + <nl> + / / # define BED_LIMIT_SWITCHING <nl> + <nl> + / / This sets the max power delivered to the bed , and replaces the HEATER_BED_DUTY_CYCLE_DIVIDER option . <nl> + / / all forms of bed control obey this ( PID , bang - bang , bang - bang with hysteresis ) <nl> + / / setting this to anything other than 255 enables a form of PWM to the bed just like HEATER_BED_DUTY_CYCLE_DIVIDER did , <nl> + / / so you shouldn ' t use it unless you are OK with PWM on your bed . ( see the comment on enabling PIDTEMPBED ) <nl> + # define MAX_BED_POWER 255 / / limits duty cycle to bed ; 255 = full current <nl> + <nl> + # if ENABLED ( PIDTEMPBED ) <nl> + <nl> + / / # define PID_BED_DEBUG / / Sends debug data to the serial port . <nl> + <nl> + / / Stock CR - 10 Bed Tuned for 70C <nl> + # define DEFAULT_bedKp 426 . 68 <nl> + # define DEFAULT_bedKi 78 . 92 <nl> + # define DEFAULT_bedKd 576 . 71 <nl> + <nl> + / / 120V 250W silicone heater into 4mm borosilicate ( MendelMax 1 . 5 + ) <nl> + / / from FOPDT model - kp = . 39 Tp = 405 Tdead = 66 , Tc set to 79 . 2 , aggressive factor of . 15 ( vs . 1 , 1 , 10 ) <nl> + / / # define DEFAULT_bedKp 10 . 00 <nl> + / / # define DEFAULT_bedKi . 023 <nl> + / / # define DEFAULT_bedKd 305 . 4 <nl> + <nl> + / / 120V 250W silicone heater into 4mm borosilicate ( MendelMax 1 . 5 + ) <nl> + / / from pidautotune <nl> + / / # define DEFAULT_bedKp 97 . 1 <nl> + / / # define DEFAULT_bedKi 1 . 41 <nl> + / / # define DEFAULT_bedKd 1675 . 16 <nl> + <nl> + / / FIND YOUR OWN : " M303 E - 1 C8 S90 " to run autotune on the bed at 90 degreesC for 8 cycles . <nl> + # endif / / PIDTEMPBED <nl> + <nl> + / / @ section extruder <nl> + <nl> + / / This option prevents extrusion if the temperature is below EXTRUDE_MINTEMP . <nl> + / / It also enables the M302 command to set the minimum extrusion temperature <nl> + / / or to allow moving the extruder regardless of the hotend temperature . <nl> + / / * * * IT IS HIGHLY RECOMMENDED TO LEAVE THIS OPTION ENABLED ! * * * <nl> + # define PREVENT_COLD_EXTRUSION <nl> + # define EXTRUDE_MINTEMP 170 <nl> + <nl> + / / This option prevents a single extrusion longer than EXTRUDE_MAXLENGTH . <nl> + / / Note that for Bowden Extruders a too - small value here may prevent loading . <nl> + # define PREVENT_LENGTHY_EXTRUDE <nl> + # define EXTRUDE_MAXLENGTH 1000 <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = Thermal Runaway Protection = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / * * <nl> + * Thermal Protection provides additional protection to your printer from damage <nl> + * and fire . Marlin always includes safe min and max temperature ranges which <nl> + * protect against a broken or disconnected thermistor wire . <nl> + * <nl> + * The issue : If a thermistor falls out , it will report the much lower <nl> + * temperature of the air in the room , and the the firmware will keep <nl> + * the heater on . <nl> + * <nl> + * If you get " Thermal Runaway " or " Heating failed " errors the <nl> + * details can be tuned in Configuration_adv . h <nl> + * / <nl> + <nl> + # define THERMAL_PROTECTION_HOTENDS / / Enable thermal protection for all extruders <nl> + # define THERMAL_PROTECTION_BED / / Enable thermal protection for the heated bed <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Mechanical Settings = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section machine <nl> + <nl> + / / Uncomment one of these options to enable CoreXY , CoreXZ , or CoreYZ kinematics <nl> + / / either in the usual order or reversed <nl> + / / # define COREXY <nl> + / / # define COREXZ <nl> + / / # define COREYZ <nl> + / / # define COREYX <nl> + / / # define COREZX <nl> + / / # define COREZY <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Endstop Settings = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section homing <nl> + <nl> + / / Specify here all the endstop connectors that are connected to any endstop or probe . <nl> + / / Almost all printers will be using one per axis . Probes will use one or more of the <nl> + / / extra connectors . Leave undefined any used for non - endstop and non - probe purposes . <nl> + # define USE_XMIN_PLUG <nl> + # define USE_YMIN_PLUG <nl> + # define USE_ZMIN_PLUG <nl> + / / # define USE_XMAX_PLUG <nl> + / / # define USE_YMAX_PLUG <nl> + / / # define USE_ZMAX_PLUG <nl> + <nl> + / / Enable pullup for all endstops to prevent a floating state <nl> + / / # define ENDSTOPPULLUPS <nl> + # if DISABLED ( ENDSTOPPULLUPS ) <nl> + / / Disable ENDSTOPPULLUPS to set pullups individually <nl> + / / # define ENDSTOPPULLUP_XMAX <nl> + / / # define ENDSTOPPULLUP_YMAX <nl> + / / # define ENDSTOPPULLUP_ZMAX <nl> + / / # define ENDSTOPPULLUP_XMIN <nl> + / / # define ENDSTOPPULLUP_YMIN <nl> + / / # define ENDSTOPPULLUP_ZMIN <nl> + / / # define ENDSTOPPULLUP_ZMIN_PROBE <nl> + # endif <nl> + <nl> + / / Enable pulldown for all endstops to prevent a floating state <nl> + / / # define ENDSTOPPULLDOWNS <nl> + # if DISABLED ( ENDSTOPPULLDOWNS ) <nl> + / / Disable ENDSTOPPULLDOWNS to set pulldowns individually <nl> + / / # define ENDSTOPPULLDOWN_XMAX <nl> + / / # define ENDSTOPPULLDOWN_YMAX <nl> + / / # define ENDSTOPPULLDOWN_ZMAX <nl> + / / # define ENDSTOPPULLDOWN_XMIN <nl> + / / # define ENDSTOPPULLDOWN_YMIN <nl> + / / # define ENDSTOPPULLDOWN_ZMIN <nl> + / / # define ENDSTOPPULLDOWN_ZMIN_PROBE <nl> + # endif <nl> + <nl> + / / Mechanical endstop with COM to ground and NC to Signal uses " false " here ( most common setup ) . <nl> + # define X_MIN_ENDSTOP_INVERTING false / / set to true to invert the logic of the endstop . <nl> + # define Y_MIN_ENDSTOP_INVERTING false / / set to true to invert the logic of the endstop . <nl> + # define Z_MIN_ENDSTOP_INVERTING false / / set to true to invert the logic of the endstop . <nl> + # define X_MAX_ENDSTOP_INVERTING false / / set to true to invert the logic of the endstop . <nl> + # define Y_MAX_ENDSTOP_INVERTING false / / set to true to invert the logic of the endstop . <nl> + # define Z_MAX_ENDSTOP_INVERTING false / / set to true to invert the logic of the endstop . <nl> + # define Z_MIN_PROBE_ENDSTOP_INVERTING false / / set to true to invert the logic of the probe . <nl> + <nl> + / / Enable this feature if all enabled endstop pins are interrupt - capable . <nl> + / / This will remove the need to poll the interrupt pins , saving many CPU cycles . <nl> + / / # define ENDSTOP_INTERRUPTS_FEATURE <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Movement Settings = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / @ section motion <nl> + <nl> + / * * <nl> + * Default Settings <nl> + * <nl> + * These settings can be reset by M502 <nl> + * <nl> + * Note that if EEPROM is enabled , saved values will override these . <nl> + * / <nl> + <nl> + / * * <nl> + * With this option each E stepper can have its own factors for the <nl> + * following movement settings . If fewer factors are given than the <nl> + * total number of extruders , the last value applies to the rest . <nl> + * / <nl> + / / # define DISTINCT_E_FACTORS <nl> + <nl> + / * * <nl> + * Default Axis Steps Per Unit ( steps / mm ) <nl> + * Override with M92 <nl> + * X , Y , Z , E0 [ , E1 [ , E2 [ , E3 [ , E4 ] ] ] ] <nl> + * / <nl> + # define DEFAULT_AXIS_STEPS_PER_UNIT { 79 . 60 , 80 , 400 , 229 . 4 } <nl> + <nl> + / * * <nl> + * Default Max Feed Rate ( mm / s ) <nl> + * Override with M203 <nl> + * X , Y , Z , E0 [ , E1 [ , E2 [ , E3 [ , E4 ] ] ] ] <nl> + * / <nl> + # define DEFAULT_MAX_FEEDRATE { 500 , 500 , 15 , 25 } <nl> + <nl> + / * * <nl> + * Default Max Acceleration ( change / s ) change = mm / s <nl> + * ( Maximum start speed for accelerated moves ) <nl> + * Override with M201 <nl> + * X , Y , Z , E0 [ , E1 [ , E2 [ , E3 [ , E4 ] ] ] ] <nl> + * / <nl> + # define DEFAULT_MAX_ACCELERATION { 3000 , 3000 , 100 , 5000 } <nl> + <nl> + / * * <nl> + * Default Acceleration ( change / s ) change = mm / s <nl> + * Override with M204 <nl> + * <nl> + * M204 P Acceleration <nl> + * M204 R Retract Acceleration <nl> + * M204 T Travel Acceleration <nl> + * / <nl> + # define DEFAULT_ACCELERATION 600 / / X , Y , Z and E acceleration for printing moves <nl> + # define DEFAULT_RETRACT_ACCELERATION 1000 / / E acceleration for retracts <nl> + # define DEFAULT_TRAVEL_ACCELERATION 1000 / / X , Y , Z acceleration for travel ( non printing ) moves <nl> + <nl> + / * * <nl> + * Default Jerk ( mm / s ) <nl> + * Override with M205 X Y Z E <nl> + * <nl> + * " Jerk " specifies the minimum speed change that requires acceleration . <nl> + * When changing speed and direction , if the difference is less than the <nl> + * value set here , it may happen instantaneously . <nl> + * / <nl> + # define DEFAULT_XJERK 10 . 0 <nl> + # define DEFAULT_YJERK 10 . 0 <nl> + # define DEFAULT_ZJERK 0 . 3 <nl> + # define DEFAULT_EJERK 5 . 0 <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Z Probe Options = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / @ section probes <nl> + <nl> + / / <nl> + / / See http : / / marlinfw . org / docs / configuration / probes . html <nl> + / / <nl> + <nl> + / * * <nl> + * Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN <nl> + * <nl> + * Enable this option for a probe connected to the Z Min endstop pin . <nl> + * / <nl> + # define Z_MIN_PROBE_USES_Z_MIN_ENDSTOP_PIN <nl> + <nl> + / * * <nl> + * Z_MIN_PROBE_ENDSTOP <nl> + * <nl> + * Enable this option for a probe connected to any pin except Z - Min . <nl> + * ( By default Marlin assumes the Z - Max endstop pin . ) <nl> + * To use a custom Z Probe pin , set Z_MIN_PROBE_PIN below . <nl> + * <nl> + * - The simplest option is to use a free endstop connector . <nl> + * - Use 5V for powered ( usually inductive ) sensors . <nl> + * <nl> + * - RAMPS 1 . 3 / 1 . 4 boards may use the 5V , GND , and Aux4 - > D32 pin : <nl> + * - For simple switches connect . . . <nl> + * - normally - closed switches to GND and D32 . <nl> + * - normally - open switches to 5V and D32 . <nl> + * <nl> + * WARNING : Setting the wrong pin may have unexpected and potentially <nl> + * disastrous consequences . Use with caution and do your homework . <nl> + * <nl> + * / <nl> + / / # define Z_MIN_PROBE_ENDSTOP <nl> + <nl> + / * * <nl> + * Probe Type <nl> + * <nl> + * Allen Key Probes , Servo Probes , Z - Sled Probes , FIX_MOUNTED_PROBE , etc . <nl> + * Activate one of these to use Auto Bed Leveling below . <nl> + * / <nl> + <nl> + / * * <nl> + * The " Manual Probe " provides a means to do " Auto " Bed Leveling without a probe . <nl> + * Use G29 repeatedly , adjusting the Z height at each point with movement commands <nl> + * or ( with LCD_BED_LEVELING ) the LCD controller . <nl> + * / <nl> + / / # define PROBE_MANUALLY <nl> + <nl> + / * * <nl> + * A Fix - Mounted Probe either doesn ' t deploy or needs manual deployment . <nl> + * ( e . g . , an inductive probe or a nozzle - based probe - switch . ) <nl> + * / <nl> + / / # define FIX_MOUNTED_PROBE <nl> + <nl> + / * * <nl> + * Z Servo Probe , such as an endstop switch on a rotating arm . <nl> + * / <nl> + / / # define Z_ENDSTOP_SERVO_NR 0 / / Defaults to SERVO 0 connector . <nl> + / / # define Z_SERVO_ANGLES { 70 , 0 } / / Z Servo Deploy and Stow angles <nl> + <nl> + / * * <nl> + * The BLTouch probe uses a Hall effect sensor and emulates a servo . <nl> + * / <nl> + / / # define BLTOUCH <nl> + # if ENABLED ( BLTOUCH ) <nl> + / / # define BLTOUCH_DELAY 375 / / ( ms ) Enable and increase if needed <nl> + # endif <nl> + <nl> + / * * <nl> + * Enable one or more of the following if probing seems unreliable . <nl> + * Heaters and / or fans can be disabled during probing to minimize electrical <nl> + * noise . A delay can also be added to allow noise and vibration to settle . <nl> + * These options are most useful for the BLTouch probe , but may also improve <nl> + * readings with inductive probes and piezo sensors . <nl> + * / <nl> + / / # define PROBING_HEATERS_OFF / / Turn heaters off when probing <nl> + / / # define PROBING_FANS_OFF / / Turn fans off when probing <nl> + / / # define DELAY_BEFORE_PROBING 200 / / ( ms ) To prevent vibrations from triggering piezo sensors <nl> + <nl> + / / A probe that is deployed and stowed with a solenoid pin ( SOL1_PIN ) <nl> + / / # define SOLENOID_PROBE <nl> + <nl> + / / A sled - mounted probe like those designed by Charles Bell . <nl> + / / # define Z_PROBE_SLED <nl> + / / # define SLED_DOCKING_OFFSET 5 / / The extra distance the X axis must travel to pickup the sled . 0 should be fine but you can push it further if you ' d like . <nl> + <nl> + / / <nl> + / / For Z_PROBE_ALLEN_KEY see the Delta example configurations . <nl> + / / <nl> + <nl> + / * * <nl> + * Z Probe to nozzle ( X , Y ) offset , relative to ( 0 , 0 ) . <nl> + * X and Y offsets must be integers . <nl> + * <nl> + * In the following example the X and Y offsets are both positive : <nl> + * # define X_PROBE_OFFSET_FROM_EXTRUDER 10 <nl> + * # define Y_PROBE_OFFSET_FROM_EXTRUDER 10 <nl> + * <nl> + * + - - BACK mmm + <nl> + * | | <nl> + * L | ( + ) P | R < - - probe ( 20 , 20 ) <nl> + * E | | I <nl> + * F | ( - ) N ( + ) | G < - - nozzle ( 10 , 10 ) <nl> + * T | | H <nl> + * | ( - ) | T <nl> + * | | <nl> + * O - - FRONT - - + <nl> + * ( 0 , 0 ) <nl> + * / <nl> + # define X_PROBE_OFFSET_FROM_EXTRUDER 10 / / X offset : - left + right [ of the nozzle ] <nl> + # define Y_PROBE_OFFSET_FROM_EXTRUDER 10 / / Y offset : - front + behind [ the nozzle ] <nl> + # define Z_PROBE_OFFSET_FROM_EXTRUDER 0 / / Z offset : - below + above [ the nozzle ] <nl> + <nl> + / / X and Y axis travel speed ( mm / m ) between probes <nl> + # define XY_PROBE_SPEED 8000 <nl> + <nl> + / / Speed for the first approach when double - probing ( MULTIPLE_PROBING = = 2 ) <nl> + # define Z_PROBE_SPEED_FAST HOMING_FEEDRATE_Z <nl> + <nl> + / / Speed for the " accurate " probe of each point <nl> + # define Z_PROBE_SPEED_SLOW ( Z_PROBE_SPEED_FAST / 2 ) <nl> + <nl> + / / The number of probes to perform at each point . <nl> + / / Set to 2 for a fast / slow probe , using the second probe result . <nl> + / / Set to 3 or more for slow probes , averaging the results . <nl> + / / # define MULTIPLE_PROBING 2 <nl> + <nl> + / * * <nl> + * Z probes require clearance when deploying , stowing , and moving between <nl> + * probe points to avoid hitting the bed and other hardware . <nl> + * Servo - mounted probes require extra space for the arm to rotate . <nl> + * Inductive probes need space to keep from triggering early . <nl> + * <nl> + * Use these settings to specify the distance ( mm ) to raise the probe ( or <nl> + * lower the bed ) . The values set here apply over and above any ( negative ) <nl> + * probe Z Offset set with Z_PROBE_OFFSET_FROM_EXTRUDER , M851 , or the LCD . <nl> + * Only integer values > = 1 are valid here . <nl> + * <nl> + * Example : ` M851 Z - 5 ` with a CLEARANCE of 4 = > 9mm from bed to nozzle . <nl> + * But : ` M851 Z + 1 ` with a CLEARANCE of 2 = > 2mm from bed to nozzle . <nl> + * / <nl> + # define Z_CLEARANCE_DEPLOY_PROBE 10 / / Z Clearance for Deploy / Stow <nl> + # define Z_CLEARANCE_BETWEEN_PROBES 5 / / Z Clearance between probe points <nl> + <nl> + / / For M851 give a range for adjusting the Z probe offset <nl> + # define Z_PROBE_OFFSET_RANGE_MIN - 20 <nl> + # define Z_PROBE_OFFSET_RANGE_MAX 20 <nl> + <nl> + / / Enable the M48 repeatability test to test probe accuracy <nl> + / / # define Z_MIN_PROBE_REPEATABILITY_TEST <nl> + <nl> + / / For Inverting Stepper Enable Pins ( Active Low ) use 0 , Non Inverting ( Active High ) use 1 <nl> + / / : { 0 : ' Low ' , 1 : ' High ' } <nl> + # define X_ENABLE_ON 0 <nl> + # define Y_ENABLE_ON 0 <nl> + # define Z_ENABLE_ON 0 <nl> + # define E_ENABLE_ON 0 / / For all extruders <nl> + <nl> + / / Disables axis stepper immediately when it ' s not being used . <nl> + / / WARNING : When motors turn off there is a chance of losing position accuracy ! <nl> + # define DISABLE_X false <nl> + # define DISABLE_Y false <nl> + # define DISABLE_Z false <nl> + / / Warn on display about possibly reduced accuracy <nl> + / / # define DISABLE_REDUCED_ACCURACY_WARNING <nl> + <nl> + / / @ section extruder <nl> + <nl> + # define DISABLE_E false / / For all extruders <nl> + # define DISABLE_INACTIVE_EXTRUDER true / / Keep only the active extruder enabled . <nl> + <nl> + / / @ section machine <nl> + <nl> + / / Invert the stepper direction . Change ( or reverse the motor connector ) if an axis goes the wrong way . <nl> + # define INVERT_X_DIR true <nl> + # define INVERT_Y_DIR true <nl> + # define INVERT_Z_DIR false <nl> + <nl> + / / Enable this option for Toshiba stepper drivers <nl> + / / # define CONFIG_STEPPERS_TOSHIBA <nl> + <nl> + / / @ section extruder <nl> + <nl> + / / For direct drive extruder v9 set to true , for geared extruder set to false . <nl> + # define INVERT_E0_DIR true <nl> + # define INVERT_E1_DIR false <nl> + # define INVERT_E2_DIR false <nl> + # define INVERT_E3_DIR false <nl> + # define INVERT_E4_DIR false <nl> + <nl> + / / @ section homing <nl> + <nl> + / / # define NO_MOTION_BEFORE_HOMING / / Inhibit movement until all axes have been homed <nl> + <nl> + / / # define Z_HOMING_HEIGHT 5 / / ( in mm ) Minimal z height before homing ( G28 ) for Z clearance above the bed , clamps , . . . <nl> + / / Be sure you have this distance over your Z_MAX_POS in case . <nl> + <nl> + / / Direction of endstops when homing ; 1 = MAX , - 1 = MIN <nl> + / / : [ - 1 , 1 ] <nl> + # define X_HOME_DIR - 1 <nl> + # define Y_HOME_DIR - 1 <nl> + # define Z_HOME_DIR - 1 <nl> + <nl> + / / @ section machine <nl> + <nl> + / / The size of the print bed <nl> + # define X_BED_SIZE 300 <nl> + # define Y_BED_SIZE 220 <nl> + <nl> + / / Travel limits ( mm ) after homing , corresponding to endstop positions . <nl> + # define X_MIN_POS 0 <nl> + # define Y_MIN_POS 0 <nl> + # define Z_MIN_POS 0 <nl> + # define X_MAX_POS X_BED_SIZE <nl> + # define Y_MAX_POS Y_BED_SIZE <nl> + # define Z_MAX_POS 300 <nl> + <nl> + / * * <nl> + * Software Endstops <nl> + * <nl> + * - Prevent moves outside the set machine bounds . <nl> + * - Individual axes can be disabled , if desired . <nl> + * - X and Y only apply to Cartesian robots . <nl> + * - Use ' M211 ' to set software endstops on / off or report current state <nl> + * / <nl> + <nl> + / / Min software endstops constrain movement within minimum coordinate bounds <nl> + # define MIN_SOFTWARE_ENDSTOPS <nl> + # if ENABLED ( MIN_SOFTWARE_ENDSTOPS ) <nl> + # define MIN_SOFTWARE_ENDSTOP_X <nl> + # define MIN_SOFTWARE_ENDSTOP_Y <nl> + # define MIN_SOFTWARE_ENDSTOP_Z <nl> + # endif <nl> + <nl> + / / Max software endstops constrain movement within maximum coordinate bounds <nl> + # define MAX_SOFTWARE_ENDSTOPS <nl> + # if ENABLED ( MAX_SOFTWARE_ENDSTOPS ) <nl> + # define MAX_SOFTWARE_ENDSTOP_X <nl> + # define MAX_SOFTWARE_ENDSTOP_Y <nl> + # define MAX_SOFTWARE_ENDSTOP_Z <nl> + # endif <nl> + <nl> + / * * <nl> + * Filament Runout Sensors <nl> + * Mechanical or opto endstops are used to check for the presence of filament . <nl> + * <nl> + * RAMPS - based boards use SERVO3_PIN for the first runout sensor . <nl> + * For other boards you may need to define FIL_RUNOUT_PIN , FIL_RUNOUT2_PIN , etc . <nl> + * By default the firmware assumes HIGH = FILAMENT PRESENT . <nl> + * / <nl> + / / # define FILAMENT_RUNOUT_SENSOR <nl> + # if ENABLED ( FILAMENT_RUNOUT_SENSOR ) <nl> + # define NUM_RUNOUT_SENSORS 1 / / Number of sensors , up to one per extruder . Define a FIL_RUNOUT # _PIN for each . <nl> + # define FIL_RUNOUT_INVERTING false / / set to true to invert the logic of the sensor . <nl> + # define FIL_RUNOUT_PULLUP / / Use internal pullup for filament runout pins . <nl> + / / # define FIL_RUNOUT_PULLDOWN / / Use internal pulldown for filament runout pins . <nl> + # define FILAMENT_RUNOUT_SCRIPT " M600 " <nl> + # endif <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Bed Leveling = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / @ section calibrate <nl> + <nl> + / * * <nl> + * Choose one of the options below to enable G29 Bed Leveling . The parameters <nl> + * and behavior of G29 will change depending on your selection . <nl> + * <nl> + * If using a Probe for Z Homing , enable Z_SAFE_HOMING also ! <nl> + * <nl> + * - AUTO_BED_LEVELING_3POINT <nl> + * Probe 3 arbitrary points on the bed ( that aren ' t collinear ) <nl> + * You specify the XY coordinates of all 3 points . <nl> + * The result is a single tilted plane . Best for a flat bed . <nl> + * <nl> + * - AUTO_BED_LEVELING_LINEAR <nl> + * Probe several points in a grid . <nl> + * You specify the rectangle and the density of sample points . <nl> + * The result is a single tilted plane . Best for a flat bed . <nl> + * <nl> + * - AUTO_BED_LEVELING_BILINEAR <nl> + * Probe several points in a grid . <nl> + * You specify the rectangle and the density of sample points . <nl> + * The result is a mesh , best for large or uneven beds . <nl> + * <nl> + * - AUTO_BED_LEVELING_UBL ( Unified Bed Leveling ) <nl> + * A comprehensive bed leveling system combining the features and benefits <nl> + * of other systems . UBL also includes integrated Mesh Generation , Mesh <nl> + * Validation and Mesh Editing systems . <nl> + * <nl> + * - MESH_BED_LEVELING <nl> + * Probe a grid manually <nl> + * The result is a mesh , suitable for large or uneven beds . ( See BILINEAR . ) <nl> + * For machines without a probe , Mesh Bed Leveling provides a method to perform <nl> + * leveling in steps so you can manually adjust the Z height at each grid - point . <nl> + * With an LCD controller the process is guided step - by - step . <nl> + * / <nl> + / / # define AUTO_BED_LEVELING_3POINT <nl> + / / # define AUTO_BED_LEVELING_LINEAR <nl> + / / # define AUTO_BED_LEVELING_BILINEAR <nl> + / / # define AUTO_BED_LEVELING_UBL <nl> + / / # define MESH_BED_LEVELING <nl> + <nl> + / * * <nl> + * Enable detailed logging of G28 , G29 , M48 , etc . <nl> + * Turn on with the command ' M111 S32 ' . <nl> + * NOTE : Requires a lot of PROGMEM ! <nl> + * / <nl> + / / # define DEBUG_LEVELING_FEATURE <nl> + <nl> + # if ENABLED ( MESH_BED_LEVELING ) | | ENABLED ( AUTO_BED_LEVELING_BILINEAR ) | | ENABLED ( AUTO_BED_LEVELING_UBL ) <nl> + / / Gradually reduce leveling correction until a set height is reached , <nl> + / / at which point movement will be level to the machine ' s XY plane . <nl> + / / The height can be set with M420 Z < height > <nl> + # define ENABLE_LEVELING_FADE_HEIGHT <nl> + <nl> + / / For Cartesian machines , instead of dividing moves on mesh boundaries , <nl> + / / split up moves into short segments like a Delta . This follows the <nl> + / / contours of the bed more closely than edge - to - edge straight moves . <nl> + # define SEGMENT_LEVELED_MOVES <nl> + # define LEVELED_SEGMENT_LENGTH 5 . 0 / / ( mm ) Length of all segments ( except the last one ) <nl> + <nl> + / * * <nl> + * Enable the G26 Mesh Validation Pattern tool . <nl> + * / <nl> + / / # define G26_MESH_VALIDATION <nl> + # if ENABLED ( G26_MESH_VALIDATION ) <nl> + # define MESH_TEST_NOZZLE_SIZE 0 . 4 / / ( mm ) Diameter of primary nozzle . <nl> + # define MESH_TEST_LAYER_HEIGHT 0 . 2 / / ( mm ) Default layer height for the G26 Mesh Validation Tool . <nl> + # define MESH_TEST_HOTEND_TEMP 205 . 0 / / ( ° C ) Default nozzle temperature for the G26 Mesh Validation Tool . <nl> + # define MESH_TEST_BED_TEMP 60 . 0 / / ( ° C ) Default bed temperature for the G26 Mesh Validation Tool . <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> + # if ENABLED ( AUTO_BED_LEVELING_LINEAR ) | | ENABLED ( AUTO_BED_LEVELING_BILINEAR ) <nl> + <nl> + / / Set the number of grid points per dimension . <nl> + # define GRID_MAX_POINTS_X 3 <nl> + # define GRID_MAX_POINTS_Y GRID_MAX_POINTS_X <nl> + <nl> + / / The Z probe minimum outer margin ( to validate G29 parameters ) . <nl> + # define MIN_PROBE_EDGE 10 <nl> + <nl> + / / Set the boundaries for probing ( where the probe can reach ) . <nl> + # define LEFT_PROBE_BED_POSITION 15 <nl> + # define RIGHT_PROBE_BED_POSITION ( X_BED_SIZE - 15 ) <nl> + # define FRONT_PROBE_BED_POSITION 15 <nl> + # define BACK_PROBE_BED_POSITION ( Y_BED_SIZE - 15 ) <nl> + <nl> + / / Probe along the Y axis , advancing X after each column <nl> + / / # define PROBE_Y_FIRST <nl> + <nl> + # if ENABLED ( AUTO_BED_LEVELING_BILINEAR ) <nl> + <nl> + / / Beyond the probed grid , continue the implied tilt ? <nl> + / / Default is to maintain the height of the nearest edge . <nl> + / / # define EXTRAPOLATE_BEYOND_GRID <nl> + <nl> + / / <nl> + / / Experimental Subdivision of the grid by Catmull - Rom method . <nl> + / / Synthesizes intermediate points to produce a more detailed mesh . <nl> + / / <nl> + / / # define ABL_BILINEAR_SUBDIVISION <nl> + # if ENABLED ( ABL_BILINEAR_SUBDIVISION ) <nl> + / / Number of subdivisions between probe points <nl> + # define BILINEAR_SUBDIVISIONS 3 <nl> + # endif <nl> + <nl> + # endif <nl> + <nl> + # elif ENABLED ( AUTO_BED_LEVELING_3POINT ) <nl> + <nl> + / / 3 arbitrary points to probe . <nl> + / / A simple cross - product is used to estimate the plane of the bed . <nl> + # define ABL_PROBE_PT_1_X 15 <nl> + # define ABL_PROBE_PT_1_Y 180 <nl> + # define ABL_PROBE_PT_2_X 15 <nl> + # define ABL_PROBE_PT_2_Y 20 <nl> + # define ABL_PROBE_PT_3_X 170 <nl> + # define ABL_PROBE_PT_3_Y 20 <nl> + <nl> + # elif ENABLED ( AUTO_BED_LEVELING_UBL ) <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = Unified Bed Leveling = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / # define MESH_EDIT_GFX_OVERLAY / / Display a graphics overlay while editing the mesh <nl> + <nl> + # define MESH_INSET 1 / / Mesh inset margin on print area <nl> + # define GRID_MAX_POINTS_X 10 / / Don ' t use more than 15 points per axis , implementation limited . <nl> + # define GRID_MAX_POINTS_Y GRID_MAX_POINTS_X <nl> + <nl> + # define UBL_PROBE_PT_1_X 39 / / Probing points for 3 - Point leveling of the mesh <nl> + # define UBL_PROBE_PT_1_Y 180 <nl> + # define UBL_PROBE_PT_2_X 39 <nl> + # define UBL_PROBE_PT_2_Y 20 <nl> + # define UBL_PROBE_PT_3_X 180 <nl> + # define UBL_PROBE_PT_3_Y 20 <nl> + <nl> + # define UBL_MESH_EDIT_MOVES_Z / / Sophisticated users prefer no movement of nozzle <nl> + # define UBL_SAVE_ACTIVE_ON_M500 / / Save the currently active mesh in the current slot on M500 <nl> + <nl> + / / # define UBL_Z_RAISE_WHEN_OFF_MESH 2 . 5 / / When the nozzle is off the mesh , this value is used <nl> + / / as the Z - Height correction value . <nl> + <nl> + # elif ENABLED ( MESH_BED_LEVELING ) <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Mesh = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + # define MESH_INSET 10 / / Mesh inset margin on print area <nl> + # define GRID_MAX_POINTS_X 3 / / Don ' t use more than 7 points per axis , implementation limited . <nl> + # define GRID_MAX_POINTS_Y GRID_MAX_POINTS_X <nl> + <nl> + / / # define MESH_G28_REST_ORIGIN / / After homing all axes ( ' G28 ' or ' G28 XYZ ' ) rest Z at Z_MIN_POS <nl> + <nl> + # endif / / BED_LEVELING <nl> + <nl> + / * * <nl> + * Use the LCD controller for bed leveling <nl> + * Requires MESH_BED_LEVELING or PROBE_MANUALLY <nl> + * / <nl> + / / # define LCD_BED_LEVELING <nl> + <nl> + # if ENABLED ( LCD_BED_LEVELING ) <nl> + # define MBL_Z_STEP 0 . 025 / / Step size while manually probing Z axis . <nl> + # define LCD_PROBE_Z_RANGE 4 / / Z Range centered on Z_MIN_POS for LCD Z adjustment <nl> + # endif <nl> + <nl> + / / Add a menu item to move between bed corners for manual bed adjustment <nl> + / / # define LEVEL_BED_CORNERS <nl> + <nl> + / * * <nl> + * Commands to execute at the end of G29 probing . <nl> + * Useful to retract or move the Z probe out of the way . <nl> + * / <nl> + / / # define Z_PROBE_END_SCRIPT " G1 Z10 F12000 \ nG1 X15 Y330 \ nG1 Z0 . 5 \ nG1 Z10 " <nl> + <nl> + <nl> + / / @ section homing <nl> + <nl> + / / The center of the bed is at ( X = 0 , Y = 0 ) <nl> + / / # define BED_CENTER_AT_0_0 <nl> + <nl> + / / Manually set the home position . Leave these undefined for automatic settings . <nl> + / / For DELTA this is the top - center of the Cartesian print volume . <nl> + / / # define MANUAL_X_HOME_POS 0 <nl> + / / # define MANUAL_Y_HOME_POS 0 <nl> + / / # define MANUAL_Z_HOME_POS 0 <nl> + <nl> + / / Use " Z Safe Homing " to avoid homing with a Z probe outside the bed area . <nl> + / / <nl> + / / With this feature enabled : <nl> + / / <nl> + / / - Allow Z homing only after X and Y homing AND stepper drivers still enabled . <nl> + / / - If stepper drivers time out , it will need X and Y homing again before Z homing . <nl> + / / - Move the Z probe ( or nozzle ) to a defined XY point before Z Homing when homing all axes ( G28 ) . <nl> + / / - Prevent Z homing when the Z probe is outside bed area . <nl> + / / <nl> + / / # define Z_SAFE_HOMING <nl> + <nl> + # if ENABLED ( Z_SAFE_HOMING ) <nl> + # define Z_SAFE_HOMING_X_POINT ( ( X_BED_SIZE ) / 2 ) / / X point for Z homing when homing all axes ( G28 ) . <nl> + # define Z_SAFE_HOMING_Y_POINT ( ( Y_BED_SIZE ) / 2 ) / / Y point for Z homing when homing all axes ( G28 ) . <nl> + # endif <nl> + <nl> + / / Homing speeds ( mm / m ) <nl> + # define HOMING_FEEDRATE_XY ( 50 * 60 ) <nl> + # define HOMING_FEEDRATE_Z ( 4 * 60 ) <nl> + <nl> + / / @ section calibrate <nl> + <nl> + / * * <nl> + * Bed Skew Compensation <nl> + * <nl> + * This feature corrects for misalignment in the XYZ axes . <nl> + * <nl> + * Take the following steps to get the bed skew in the XY plane : <nl> + * 1 . Print a test square ( e . g . , https : / / www . thingiverse . com / thing : 2563185 ) <nl> + * 2 . For XY_DIAG_AC measure the diagonal A to C <nl> + * 3 . For XY_DIAG_BD measure the diagonal B to D <nl> + * 4 . For XY_SIDE_AD measure the edge A to D <nl> + * <nl> + * Marlin automatically computes skew factors from these measurements . <nl> + * Skew factors may also be computed and set manually : <nl> + * <nl> + * - Compute AB : SQRT ( 2 * AC * AC + 2 * BD * BD - 4 * AD * AD ) / 2 <nl> + * - XY_SKEW_FACTOR : TAN ( PI / 2 - ACOS ( ( AC * AC - AB * AB - AD * AD ) / ( 2 * AB * AD ) ) ) <nl> + * <nl> + * If desired , follow the same procedure for XZ and YZ . <nl> + * Use these diagrams for reference : <nl> + * <nl> + * Y Z Z <nl> + * ^ Bmmmmmm - C ^ Bmmmmmm - C ^ Bmmmmmm - C <nl> + * | / / | / / | / / <nl> + * | / / | / / | / / <nl> + * | Ammmmmm - D | Ammmmmm - D | Ammmmmm - D <nl> + * + mmmmmmmmmmmm - - > X + mmmmmmmmmmmm - - > X + mmmmmmmmmmmm - - > Y <nl> + * XY_SKEW_FACTOR XZ_SKEW_FACTOR YZ_SKEW_FACTOR <nl> + * / <nl> + / / # define SKEW_CORRECTION <nl> + <nl> + # if ENABLED ( SKEW_CORRECTION ) <nl> + / / Input all length measurements here : <nl> + # define XY_DIAG_AC 282 . 8427124746 <nl> + # define XY_DIAG_BD 282 . 8427124746 <nl> + # define XY_SIDE_AD 200 <nl> + <nl> + / / Or , set the default skew factors directly here <nl> + / / to override the above measurements : <nl> + # define XY_SKEW_FACTOR 0 . 0 <nl> + <nl> + / / # define SKEW_CORRECTION_FOR_Z <nl> + # if ENABLED ( SKEW_CORRECTION_FOR_Z ) <nl> + # define XZ_DIAG_AC 282 . 8427124746 <nl> + # define XZ_DIAG_BD 282 . 8427124746 <nl> + # define YZ_DIAG_AC 282 . 8427124746 <nl> + # define YZ_DIAG_BD 282 . 8427124746 <nl> + # define YZ_SIDE_AD 200 <nl> + # define XZ_SKEW_FACTOR 0 . 0 <nl> + # define YZ_SKEW_FACTOR 0 . 0 <nl> + # endif <nl> + <nl> + / / Enable this option for M852 to set skew at runtime <nl> + / / # define SKEW_CORRECTION_GCODE <nl> + # endif <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Additional Features = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section extras <nl> + <nl> + / / <nl> + / / EEPROM <nl> + / / <nl> + / / The microcontroller can store settings in the EEPROM , e . g . max velocity . . . <nl> + / / M500 - stores parameters in EEPROM <nl> + / / M501 - reads parameters from EEPROM ( if you need reset them after you changed them temporarily ) . <nl> + / / M502 - reverts to the default " factory settings " . You still need to store them in EEPROM afterwards if you want to . <nl> + / / <nl> + # define EEPROM_SETTINGS / / Enable for M500 and M501 commands <nl> + / / # define DISABLE_M503 / / Saves ~ 2700 bytes of PROGMEM . Disable for release ! <nl> + # define EEPROM_CHITCHAT / / Give feedback on EEPROM commands . Disable to save PROGMEM . <nl> + <nl> + / / <nl> + / / Host Keepalive <nl> + / / <nl> + / / When enabled Marlin will send a busy status message to the host <nl> + / / every couple of seconds when it can ' t accept commands . <nl> + / / <nl> + # define HOST_KEEPALIVE_FEATURE / / Disable this if your host doesn ' t like keepalive messages <nl> + # define DEFAULT_KEEPALIVE_INTERVAL 2 / / Number of seconds between " busy " messages . Set with M113 . <nl> + # define BUSY_WHILE_HEATING / / Some hosts require " busy " messages even during heating <nl> + <nl> + / / <nl> + / / M100 Free Memory Watcher <nl> + / / <nl> + / / # define M100_FREE_MEMORY_WATCHER / / Add M100 ( Free Memory Watcher ) to debug memory usage <nl> + <nl> + / / <nl> + / / G20 / G21 Inch mode support <nl> + / / <nl> + / / # define INCH_MODE_SUPPORT <nl> + <nl> + / / <nl> + / / M149 Set temperature units support <nl> + / / <nl> + / / # define TEMPERATURE_UNITS_SUPPORT <nl> + <nl> + / / @ section temperature <nl> + <nl> + / / Preheat Constants <nl> + # define PREHEAT_1_TEMP_HOTEND 200 <nl> + # define PREHEAT_1_TEMP_BED 70 <nl> + # define PREHEAT_1_FAN_SPEED 0 / / Value from 0 to 255 <nl> + <nl> + # define PREHEAT_2_TEMP_HOTEND 240 <nl> + # define PREHEAT_2_TEMP_BED 110 <nl> + # define PREHEAT_2_FAN_SPEED 0 / / Value from 0 to 255 <nl> + <nl> + / * * <nl> + * Nozzle Park <nl> + * <nl> + * Park the nozzle at the given XYZ position on idle or G27 . <nl> + * <nl> + * The " P " parameter controls the action applied to the Z axis : <nl> + * <nl> + * P0 ( Default ) If Z is below park Z raise the nozzle . <nl> + * P1 Raise the nozzle always to Z - park height . <nl> + * P2 Raise the nozzle by Z - park amount , limited to Z_MAX_POS . <nl> + * / <nl> + / / # define NOZZLE_PARK_FEATURE <nl> + <nl> + # if ENABLED ( NOZZLE_PARK_FEATURE ) <nl> + / / Specify a park position as { X , Y , Z } <nl> + # define NOZZLE_PARK_POINT { ( X_MIN_POS + 10 ) , ( Y_MAX_POS - 10 ) , 20 } <nl> + # define NOZZLE_PARK_XY_FEEDRATE 100 / / X and Y axes feedrate in mm / s ( also used for delta printers Z axis ) <nl> + # define NOZZLE_PARK_Z_FEEDRATE 5 / / Z axis feedrate in mm / s ( not used for delta printers ) <nl> + # endif <nl> + <nl> + / * * <nl> + * Clean Nozzle Feature - - EXPERIMENTAL <nl> + * <nl> + * Adds the G12 command to perform a nozzle cleaning process . <nl> + * <nl> + * Parameters : <nl> + * P Pattern <nl> + * S Strokes / Repetitions <nl> + * T Triangles ( P1 only ) <nl> + * <nl> + * Patterns : <nl> + * P0 Straight line ( default ) . This process requires a sponge type material <nl> + * at a fixed bed location . " S " specifies strokes ( i . e . back - forth motions ) <nl> + * between the start / end points . <nl> + * <nl> + * P1 Zig - zag pattern between ( X0 , Y0 ) and ( X1 , Y1 ) , " T " specifies the <nl> + * number of zig - zag triangles to do . " S " defines the number of strokes . <nl> + * Zig - zags are done in whichever is the narrower dimension . <nl> + * For example , " G12 P1 S1 T3 " will execute : <nl> + * <nl> + * - - <nl> + * | ( X0 , Y1 ) | / \ / \ / \ | ( X1 , Y1 ) <nl> + * | | / \ / \ / \ | <nl> + * A | | / \ / \ / \ | <nl> + * | | / \ / \ / \ | <nl> + * | ( X0 , Y0 ) | / \ / \ / \ | ( X1 , Y0 ) <nl> + * - - + mmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - + <nl> + * | ________ | _________ | _________ | <nl> + * T1 T2 T3 <nl> + * <nl> + * P2 Circular pattern with middle at NOZZLE_CLEAN_CIRCLE_MIDDLE . <nl> + * " R " specifies the radius . " S " specifies the stroke count . <nl> + * Before starting , the nozzle moves to NOZZLE_CLEAN_START_POINT . <nl> + * <nl> + * Caveats : The ending Z should be the same as starting Z . <nl> + * Attention : EXPERIMENTAL . G - code arguments may change . <nl> + * <nl> + * / <nl> + / / # define NOZZLE_CLEAN_FEATURE <nl> + <nl> + # if ENABLED ( NOZZLE_CLEAN_FEATURE ) <nl> + / / Default number of pattern repetitions <nl> + # define NOZZLE_CLEAN_STROKES 12 <nl> + <nl> + / / Default number of triangles <nl> + # define NOZZLE_CLEAN_TRIANGLES 3 <nl> + <nl> + / / Specify positions as { X , Y , Z } <nl> + # define NOZZLE_CLEAN_START_POINT { 30 , 30 , ( Z_MIN_POS + 1 ) } <nl> + # define NOZZLE_CLEAN_END_POINT { 100 , 60 , ( Z_MIN_POS + 1 ) } <nl> + <nl> + / / Circular pattern radius <nl> + # define NOZZLE_CLEAN_CIRCLE_RADIUS 6 . 5 <nl> + / / Circular pattern circle fragments number <nl> + # define NOZZLE_CLEAN_CIRCLE_FN 10 <nl> + / / Middle point of circle <nl> + # define NOZZLE_CLEAN_CIRCLE_MIDDLE NOZZLE_CLEAN_START_POINT <nl> + <nl> + / / Moves the nozzle to the initial position <nl> + # define NOZZLE_CLEAN_GOBACK <nl> + # endif <nl> + <nl> + / * * <nl> + * Print Job Timer <nl> + * <nl> + * Automatically start and stop the print job timer on M104 / M109 / M190 . <nl> + * <nl> + * M104 ( hotend , no wait ) - high temp = none , low temp = stop timer <nl> + * M109 ( hotend , wait ) - high temp = start timer , low temp = stop timer <nl> + * M190 ( bed , wait ) - high temp = start timer , low temp = none <nl> + * <nl> + * The timer can also be controlled with the following commands : <nl> + * <nl> + * M75 - Start the print job timer <nl> + * M76 - Pause the print job timer <nl> + * M77 - Stop the print job timer <nl> + * / <nl> + # define PRINTJOB_TIMER_AUTOSTART <nl> + <nl> + / * * <nl> + * Print Counter <nl> + * <nl> + * Track statistical data such as : <nl> + * <nl> + * - Total print jobs <nl> + * - Total successful print jobs <nl> + * - Total failed print jobs <nl> + * - Total time printing <nl> + * <nl> + * View the current statistics with M78 . <nl> + * / <nl> + / / # define PRINTCOUNTER <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = LCD and SD support = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section lcd <nl> + <nl> + / * * <nl> + * LCD LANGUAGE <nl> + * <nl> + * Select the language to display on the LCD . These languages are available : <nl> + * <nl> + * en , an , bg , ca , cn , cz , cz_utf8 , de , el , el - gr , es , eu , fi , fr , fr_utf8 , gl , <nl> + * hr , it , kana , kana_utf8 , nl , pl , pt , pt_utf8 , pt - br , pt - br_utf8 , ru , sk_utf8 , <nl> + * tr , uk , zh_CN , zh_TW , test <nl> + * <nl> + * : { ' en ' : ' English ' , ' an ' : ' Aragonese ' , ' bg ' : ' Bulgarian ' , ' ca ' : ' Catalan ' , ' cn ' : ' Chinese ' , ' cz ' : ' Czech ' , ' cz_utf8 ' : ' Czech ( UTF8 ) ' , ' de ' : ' German ' , ' el ' : ' Greek ' , ' el - gr ' : ' Greek ( Greece ) ' , ' es ' : ' Spanish ' , ' eu ' : ' Basque - Euskera ' , ' fi ' : ' Finnish ' , ' fr ' : ' French ' , ' fr_utf8 ' : ' French ( UTF8 ) ' , ' gl ' : ' Galician ' , ' hr ' : ' Croatian ' , ' it ' : ' Italian ' , ' kana ' : ' Japanese ' , ' kana_utf8 ' : ' Japanese ( UTF8 ) ' , ' nl ' : ' Dutch ' , ' pl ' : ' Polish ' , ' pt ' : ' Portuguese ' , ' pt - br ' : ' Portuguese ( Brazilian ) ' , ' pt - br_utf8 ' : ' Portuguese ( Brazilian UTF8 ) ' , ' pt_utf8 ' : ' Portuguese ( UTF8 ) ' , ' ru ' : ' Russian ' , ' sk_utf8 ' : ' Slovak ( UTF8 ) ' , ' tr ' : ' Turkish ' , ' uk ' : ' Ukrainian ' , ' zh_CN ' : ' Chinese ( Simplified ) ' , ' zh_TW ' : ' Chinese ( Taiwan ) ' , test ' : ' TEST ' } <nl> + * / <nl> + # define LCD_LANGUAGE en <nl> + <nl> + / * * <nl> + * LCD Character Set <nl> + * <nl> + * Note : This option is NOT applicable to Graphical Displays . <nl> + * <nl> + * All character - based LCDs provide ASCII plus one of these <nl> + * language extensions : <nl> + * <nl> + * - JAPANESE . . . the most common <nl> + * - WESTERN . . . with more accented characters <nl> + * - CYRILLIC . . . for the Russian language <nl> + * <nl> + * To determine the language extension installed on your controller : <nl> + * <nl> + * - Compile and upload with LCD_LANGUAGE set to ' test ' <nl> + * - Click the controller to view the LCD menu <nl> + * - The LCD will display Japanese , Western , or Cyrillic text <nl> + * <nl> + * See http : / / marlinfw . org / docs / development / lcd_language . html <nl> + * <nl> + * : [ ' JAPANESE ' , ' WESTERN ' , ' CYRILLIC ' ] <nl> + * / <nl> + # define DISPLAY_CHARSET_HD44780 WESTERN <nl> + <nl> + / * * <nl> + * LCD TYPE <nl> + * <nl> + * Enable ULTRA_LCD for a 16x2 , 16x4 , 20x2 , or 20x4 character - based LCD . <nl> + * Enable DOGLCD for a 128x64 ( ST7565R ) Full Graphical Display . <nl> + * ( These options will be enabled automatically for most displays . ) <nl> + * <nl> + * IMPORTANT : The U8glib library is required for Full Graphic Display ! <nl> + * https : / / github . com / olikraus / U8glib_Arduino <nl> + * / <nl> + / / # define ULTRA_LCD / / Character based <nl> + / / # define DOGLCD / / Full graphics display <nl> + <nl> + / * * <nl> + * SD CARD <nl> + * <nl> + * SD Card support is disabled by default . If your controller has an SD slot , <nl> + * you must uncomment the following option or it won ' t work . <nl> + * <nl> + * / <nl> + # define SDSUPPORT <nl> + <nl> + / * * <nl> + * SD CARD : SPI SPEED <nl> + * <nl> + * Enable one of the following items for a slower SPI transfer speed . <nl> + * This may be required to resolve " volume init " errors . <nl> + * / <nl> + / / # define SPI_SPEED SPI_HALF_SPEED <nl> + / / # define SPI_SPEED SPI_QUARTER_SPEED <nl> + / / # define SPI_SPEED SPI_EIGHTH_SPEED <nl> + <nl> + / * * <nl> + * SD CARD : ENABLE CRC <nl> + * <nl> + * Use CRC checks and retries on the SD communication . <nl> + * / <nl> + / / # define SD_CHECK_AND_RETRY <nl> + <nl> + / / <nl> + / / ENCODER SETTINGS <nl> + / / <nl> + / / This option overrides the default number of encoder pulses needed to <nl> + / / produce one step . Should be increased for high - resolution encoders . <nl> + / / <nl> + # define ENCODER_PULSES_PER_STEP 4 <nl> + <nl> + / / <nl> + / / Use this option to override the number of step signals required to <nl> + / / move between next / prev menu items . <nl> + / / <nl> + # define ENCODER_STEPS_PER_MENU_ITEM 1 <nl> + <nl> + / * * <nl> + * Encoder Direction Options <nl> + * <nl> + * Test your encoder ' s behavior first with both options disabled . <nl> + * <nl> + * Reversed Value Edit and Menu Nav ? Enable REVERSE_ENCODER_DIRECTION . <nl> + * Reversed Menu Navigation only ? Enable REVERSE_MENU_DIRECTION . <nl> + * Reversed Value Editing only ? Enable BOTH options . <nl> + * / <nl> + <nl> + / / <nl> + / / This option reverses the encoder direction everywhere . <nl> + / / <nl> + / / Set this option if CLOCKWISE causes values to DECREASE <nl> + / / <nl> + / / # define REVERSE_ENCODER_DIRECTION <nl> + <nl> + / / <nl> + / / This option reverses the encoder direction for navigating LCD menus . <nl> + / / <nl> + / / If CLOCKWISE normally moves DOWN this makes it go UP . <nl> + / / If CLOCKWISE normally moves UP this makes it go DOWN . <nl> + / / <nl> + / / # define REVERSE_MENU_DIRECTION <nl> + <nl> + / / <nl> + / / Individual Axis Homing <nl> + / / <nl> + / / Add individual axis homing items ( Home X , Home Y , and Home Z ) to the LCD menu . <nl> + / / <nl> + / / # define INDIVIDUAL_AXIS_HOMING_MENU <nl> + <nl> + / / <nl> + / / SPEAKER / BUZZER <nl> + / / <nl> + / / If you have a speaker that can produce tones , enable it here . <nl> + / / By default Marlin assumes you have a buzzer with a fixed frequency . <nl> + / / <nl> + # define SPEAKER <nl> + <nl> + / / <nl> + / / The duration and frequency for the UI feedback sound . <nl> + / / Set these to 0 to disable audio feedback in the LCD menus . <nl> + / / <nl> + / / Note : Test audio output with the G - Code : <nl> + / / M300 S < frequency Hz > P < duration ms > <nl> + / / <nl> + / / # define LCD_FEEDBACK_FREQUENCY_DURATION_MS 2 <nl> + / / # define LCD_FEEDBACK_FREQUENCY_HZ 5000 <nl> + <nl> + / / <nl> + / / CONTROLLER TYPE : Standard <nl> + / / <nl> + / / Marlin supports a wide variety of controllers . <nl> + / / Enable one of the following options to specify your controller . <nl> + / / <nl> + <nl> + / / <nl> + / / Original RADDS LCD Display + Encoder + SDCardReader <nl> + / / http : / / doku . radds . org / dokumentation / lcd - display / <nl> + / / <nl> + / / # define RADDS_DISPLAY <nl> + <nl> + / / <nl> + / / ULTIMAKER Controller . <nl> + / / <nl> + / / # define ULTIMAKERCONTROLLER <nl> + <nl> + / / <nl> + / / ULTIPANEL as seen on Thingiverse . <nl> + / / <nl> + / / # define ULTIPANEL <nl> + <nl> + / / <nl> + / / PanelOne from T3P3 ( via RAMPS 1 . 4 AUX2 / AUX3 ) <nl> + / / http : / / reprap . org / wiki / PanelOne <nl> + / / <nl> + / / # define PANEL_ONE <nl> + <nl> + / / <nl> + / / MaKr3d Makr - Panel with graphic controller and SD support . <nl> + / / http : / / reprap . org / wiki / MaKr3d_MaKrPanel <nl> + / / <nl> + / / # define MAKRPANEL <nl> + <nl> + / / <nl> + / / ReprapWorld Graphical LCD <nl> + / / https : / / reprapworld . com / ? products_details & products_id / 1218 <nl> + / / <nl> + / / # define REPRAPWORLD_GRAPHICAL_LCD <nl> + <nl> + / / <nl> + / / Activate one of these if you have a Panucatt Devices <nl> + / / Viki 2 . 0 or mini Viki with Graphic LCD <nl> + / / http : / / panucatt . com <nl> + / / <nl> + / / # define VIKI2 <nl> + / / # define miniVIKI <nl> + <nl> + / / <nl> + / / Adafruit ST7565 Full Graphic Controller . <nl> + / / https : / / github . com / eboston / Adafruit - ST7565 - Full - Graphic - Controller / <nl> + / / <nl> + / / # define ELB_FULL_GRAPHIC_CONTROLLER <nl> + <nl> + / / <nl> + / / RepRapDiscount Smart Controller . <nl> + / / http : / / reprap . org / wiki / RepRapDiscount_Smart_Controller <nl> + / / <nl> + / / Note : Usually sold with a white PCB . <nl> + / / <nl> + / / # define REPRAP_DISCOUNT_SMART_CONTROLLER <nl> + <nl> + / / <nl> + / / GADGETS3D G3D LCD / SD Controller <nl> + / / http : / / reprap . org / wiki / RAMPS_1 . 3 / 1 . 4_GADGETS3D_Shield_with_Panel <nl> + / / <nl> + / / Note : Usually sold with a blue PCB . <nl> + / / <nl> + / / # define G3D_PANEL <nl> + <nl> + / / <nl> + / / RepRapDiscount FULL GRAPHIC Smart Controller <nl> + / / http : / / reprap . org / wiki / RepRapDiscount_Full_Graphic_Smart_Controller <nl> + / / <nl> + / / # define REPRAP_DISCOUNT_FULL_GRAPHIC_SMART_CONTROLLER <nl> + <nl> + / / <nl> + / / MakerLab Mini Panel with graphic <nl> + / / controller and SD support - http : / / reprap . org / wiki / Mini_panel <nl> + / / <nl> + / / # define MINIPANEL <nl> + <nl> + / / <nl> + / / RepRapWorld REPRAPWORLD_KEYPAD v1 . 1 <nl> + / / http : / / reprapworld . com / ? products_details & products_id = 202 & cPath = 1591_1626 <nl> + / / <nl> + / / REPRAPWORLD_KEYPAD_MOVE_STEP sets how much should the robot move when a key <nl> + / / is pressed , a value of 10 . 0 means 10mm per click . <nl> + / / <nl> + / / # define REPRAPWORLD_KEYPAD <nl> + / / # define REPRAPWORLD_KEYPAD_MOVE_STEP 1 . 0 <nl> + <nl> + / / <nl> + / / RigidBot Panel V1 . 0 <nl> + / / http : / / www . inventapart . com / <nl> + / / <nl> + / / # define RIGIDBOT_PANEL <nl> + <nl> + / / <nl> + / / BQ LCD Smart Controller shipped by <nl> + / / default with the BQ Hephestos 2 and Witbox 2 . <nl> + / / <nl> + / / # define BQ_LCD_SMART_CONTROLLER <nl> + <nl> + / / <nl> + / / Cartesio UI <nl> + / / http : / / mauk . cc / webshop / cartesio - shop / electronics / user - interface <nl> + / / <nl> + / / # define CARTESIO_UI <nl> + <nl> + / / <nl> + / / ANET and Tronxy Controller supported displays . <nl> + / / <nl> + / / # define ZONESTAR_LCD / / Requires ADC_KEYPAD_PIN to be assigned to an analog pin . <nl> + / / This LCD is known to be susceptible to electrical interference <nl> + / / which scrambles the display . Pressing any button clears it up . <nl> + / / This is a LCD2004 display with 5 analog buttons . <nl> + <nl> + / / # define ANET_FULL_GRAPHICS_LCD / / Anet 128x64 full graphics lcd with rotary encoder as used on Anet A6 <nl> + / / A clone of the RepRapDiscount full graphics display but with <nl> + / / different pins / wiring ( see pins_ANET_10 . h ) . <nl> + <nl> + / / <nl> + / / LCD for Melzi Card with Graphical LCD <nl> + / / <nl> + / / # define LCD_FOR_MELZI <nl> + <nl> + / / <nl> + / / LCD for Malyan M200 printers . <nl> + / / This requires SDSUPPORT to be enabled <nl> + / / <nl> + / / # define MALYAN_LCD <nl> + <nl> + / / <nl> + / / CONTROLLER TYPE : I2C <nl> + / / <nl> + / / Note : These controllers require the installation of Arduino ' s LiquidCrystal_I2C <nl> + / / library . For more info : https : / / github . com / kiyoshigawa / LiquidCrystal_I2C <nl> + / / <nl> + <nl> + / / <nl> + / / Elefu RA Board Control Panel <nl> + / / http : / / www . elefu . com / index . php ? route = product / product & product_id = 53 <nl> + / / <nl> + / / # define RA_CONTROL_PANEL <nl> + <nl> + / / <nl> + / / Sainsmart ( YwRobot ) LCD Displays <nl> + / / <nl> + / / These require F . Malpartida ' s LiquidCrystal_I2C library <nl> + / / https : / / bitbucket . org / fmalpartida / new - liquidcrystal / wiki / Home <nl> + / / <nl> + / / # define LCD_SAINSMART_I2C_1602 <nl> + / / # define LCD_SAINSMART_I2C_2004 <nl> + <nl> + / / <nl> + / / Generic LCM1602 LCD adapter <nl> + / / <nl> + / / # define LCM1602 <nl> + <nl> + / / <nl> + / / PANELOLU2 LCD with status LEDs , <nl> + / / separate encoder and click inputs . <nl> + / / <nl> + / / Note : This controller requires Arduino ' s LiquidTWI2 library v1 . 2 . 3 or later . <nl> + / / For more info : https : / / github . com / lincomatic / LiquidTWI2 <nl> + / / <nl> + / / Note : The PANELOLU2 encoder click input can either be directly connected to <nl> + / / a pin ( if BTN_ENC defined to ! = - 1 ) or read through I2C ( when BTN_ENC = = - 1 ) . <nl> + / / <nl> + / / # define LCD_I2C_PANELOLU2 <nl> + <nl> + / / <nl> + / / Panucatt VIKI LCD with status LEDs , <nl> + / / integrated click & L / R / U / D buttons , separate encoder inputs . <nl> + / / <nl> + / / # define LCD_I2C_VIKI <nl> + <nl> + / / <nl> + / / SSD1306 OLED full graphics generic display <nl> + / / <nl> + / / # define U8GLIB_SSD1306 <nl> + <nl> + / / <nl> + / / SAV OLEd LCD module support using either SSD1306 or SH1106 based LCD modules <nl> + / / <nl> + / / # define SAV_3DGLCD <nl> + # if ENABLED ( SAV_3DGLCD ) <nl> + / / # define U8GLIB_SSD1306 <nl> + # define U8GLIB_SH1106 <nl> + # endif <nl> + <nl> + / / <nl> + / / Original Ulticontroller from Ultimaker 2 printer with SSD1309 I2C display and encoder <nl> + / / https : / / github . com / Ultimaker / Ultimaker2 / tree / master / 1249_Ulticontroller_Board_ ( x1 ) <nl> + / / <nl> + / / # define ULTI_CONTROLLER <nl> + <nl> + / / <nl> + / / CONTROLLER TYPE : Shift register panels <nl> + / / <nl> + / / 2 wire Non - latching LCD SR from https : / / goo . gl / aJJ4sH <nl> + / / LCD configuration : http : / / reprap . org / wiki / SAV_3D_LCD <nl> + / / <nl> + / / # define SAV_3DLCD <nl> + <nl> + / / <nl> + / / TinyBoy2 128x64 OLED / Encoder Panel <nl> + / / <nl> + / / # define OLED_PANEL_TINYBOY2 <nl> + <nl> + / / <nl> + / / Makeboard 3D Printer Parts 3D Printer Mini Display 1602 Mini Controller <nl> + / / https : / / www . aliexpress . com / item / Micromake - Makeboard - 3D - Printer - Parts - 3D - Printer - Mini - Display - 1602 - Mini - Controller - Compatible - with - Ramps - 1 / 32765887917 . html <nl> + / / <nl> + / / # define MAKEBOARD_MINI_2_LINE_DISPLAY_1602 <nl> + <nl> + / / <nl> + / / MKS MINI12864 with graphic controller and SD support <nl> + / / http : / / reprap . org / wiki / MKS_MINI_12864 <nl> + / / <nl> + / / # define MKS_MINI_12864 <nl> + <nl> + / / <nl> + / / Factory display for Creality CR - 10 <nl> + / / https : / / www . aliexpress . com / item / Universal - LCD - 12864 - 3D - Printer - Display - Screen - With - Encoder - For - CR - 10 - CR - 7 - Model / 32833148327 . html <nl> + / / <nl> + / / This is RAMPS - compatible using a single 10 - pin connector . <nl> + / / ( For CR - 10 owners who want to replace the Melzi Creality board but retain the display ) <nl> + / / <nl> + # define CR10_STOCKDISPLAY <nl> + <nl> + / / <nl> + / / MKS OLED 1 . 3 " 128 × 64 FULL GRAPHICS CONTROLLER <nl> + / / http : / / reprap . org / wiki / MKS_12864OLED <nl> + / / <nl> + / / Tiny , but very sharp OLED display <nl> + / / <nl> + / / # define MKS_12864OLED / / Uses the SH1106 controller ( default ) <nl> + / / # define MKS_12864OLED_SSD1306 / / Uses the SSD1306 controller <nl> + <nl> + / / <nl> + / / AZSMZ 12864 LCD with SD <nl> + / / https : / / www . aliexpress . com / store / product / 3D - printer - smart - controller - SMART - RAMPS - OR - RAMPS - 1 - 4 - LCD - 12864 - LCD - control - panel - green / 2179173_32213636460 . html <nl> + / / <nl> + / / # define AZSMZ_12864 <nl> + <nl> + / / Silvergate GLCD controller <nl> + / / http : / / github . com / android444 / Silvergate <nl> + / / <nl> + / / # define SILVER_GATE_GLCD_CONTROLLER <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Extra Features = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section extras <nl> + <nl> + / / Increase the FAN PWM frequency . Removes the PWM noise but increases heating in the FET / Arduino <nl> + / / # define FAST_PWM_FAN <nl> + <nl> + / / Use software PWM to drive the fan , as for the heaters . This uses a very low frequency <nl> + / / which is not as annoying as with the hardware PWM . On the other hand , if this frequency <nl> + / / is too low , you should also increment SOFT_PWM_SCALE . <nl> + / / # define FAN_SOFT_PWM <nl> + <nl> + / / Incrementing this by 1 will double the software PWM frequency , <nl> + / / affecting heaters , and the fan if FAN_SOFT_PWM is enabled . <nl> + / / However , control resolution will be halved for each increment ; <nl> + / / at zero value , there are 128 effective control positions . <nl> + # define SOFT_PWM_SCALE 0 <nl> + <nl> + / / If SOFT_PWM_SCALE is set to a value higher than 0 , dithering can <nl> + / / be used to mitigate the associated resolution loss . If enabled , <nl> + / / some of the PWM cycles are stretched so on average the desired <nl> + / / duty cycle is attained . <nl> + / / # define SOFT_PWM_DITHER <nl> + <nl> + / / Temperature status LEDs that display the hotend and bed temperature . <nl> + / / If all hotends , bed temperature , and target temperature are under 54C <nl> + / / then the BLUE led is on . Otherwise the RED led is on . ( 1C hysteresis ) <nl> + / / # define TEMP_STAT_LEDS <nl> + <nl> + / / M240 Triggers a camera by emulating a Canon RC - 1 Remote <nl> + / / Data from : http : / / www . doc - diy . net / photo / rc - 1_hacked / <nl> + / / # define PHOTOGRAPH_PIN 23 <nl> + <nl> + / / SkeinForge sends the wrong arc g - codes when using Arc Point as fillet procedure <nl> + / / # define SF_ARC_FIX <nl> + <nl> + / / Support for the BariCUDA Paste Extruder <nl> + / / # define BARICUDA <nl> + <nl> + / / Support for BlinkM / CyzRgb <nl> + / / # define BLINKM <nl> + <nl> + / / Support for PCA9632 PWM LED driver <nl> + / / # define PCA9632 <nl> + <nl> + / * * <nl> + * RGB LED / LED Strip Control <nl> + * <nl> + * Enable support for an RGB LED connected to 5V digital pins , or <nl> + * an RGB Strip connected to MOSFETs controlled by digital pins . <nl> + * <nl> + * Adds the M150 command to set the LED ( or LED strip ) color . <nl> + * If pins are PWM capable ( e . g . , 4 , 5 , 6 , 11 ) then a range of <nl> + * luminance values can be set from 0 to 255 . <nl> + * For Neopixel LED an overall brightness parameter is also available . <nl> + * <nl> + * * * * CAUTION * * * <nl> + * LED Strips require a MOFSET Chip between PWM lines and LEDs , <nl> + * as the Arduino cannot handle the current the LEDs will require . <nl> + * Failure to follow this precaution can destroy your Arduino ! <nl> + * NOTE : A separate 5V power supply is required ! The Neopixel LED needs <nl> + * more current than the Arduino 5V linear regulator can produce . <nl> + * * * * CAUTION * * * <nl> + * <nl> + * LED Type . Enable only one of the following two options . <nl> + * <nl> + * / <nl> + / / # define RGB_LED <nl> + / / # define RGBW_LED <nl> + <nl> + # if ENABLED ( RGB_LED ) | | ENABLED ( RGBW_LED ) <nl> + # define RGB_LED_R_PIN 34 <nl> + # define RGB_LED_G_PIN 43 <nl> + # define RGB_LED_B_PIN 35 <nl> + # define RGB_LED_W_PIN - 1 <nl> + # endif <nl> + <nl> + / / Support for Adafruit Neopixel LED driver <nl> + / / # define NEOPIXEL_LED <nl> + # if ENABLED ( NEOPIXEL_LED ) <nl> + # define NEOPIXEL_TYPE NEO_GRBW / / NEO_GRBW / NEO_GRB - four / three channel driver type ( defined in Adafruit_NeoPixel . h ) <nl> + # define NEOPIXEL_PIN 4 / / LED driving pin on motherboard 4 = > D4 ( EXP2 - 5 on Printrboard ) / 30 = > PC7 ( EXP3 - 13 on Rumba ) <nl> + # define NEOPIXEL_PIXELS 30 / / Number of LEDs in the strip <nl> + # define NEOPIXEL_IS_SEQUENTIAL / / Sequential display for temperature change - LED by LED . Disable to change all LEDs at once . <nl> + # define NEOPIXEL_BRIGHTNESS 127 / / Initial brightness ( 0 - 255 ) <nl> + / / # define NEOPIXEL_STARTUP_TEST / / Cycle through colors at startup <nl> + # endif <nl> + <nl> + / * * <nl> + * Printer Event LEDs <nl> + * <nl> + * During printing , the LEDs will reflect the printer status : <nl> + * <nl> + * - Gradually change from blue to violet as the heated bed gets to target temp <nl> + * - Gradually change from violet to red as the hotend gets to temperature <nl> + * - Change to white to illuminate work surface <nl> + * - Change to green once print has finished <nl> + * - Turn off after the print has finished and the user has pushed a button <nl> + * / <nl> + # if ENABLED ( BLINKM ) | | ENABLED ( RGB_LED ) | | ENABLED ( RGBW_LED ) | | ENABLED ( PCA9632 ) | | ENABLED ( NEOPIXEL_LED ) <nl> + # define PRINTER_EVENT_LEDS <nl> + # endif <nl> + <nl> + / * * <nl> + * R / C SERVO support <nl> + * Sponsored by TrinityLabs , Reworked by codexmas <nl> + * / <nl> + <nl> + / * * <nl> + * Number of servos <nl> + * <nl> + * For some servo - related options NUM_SERVOS will be set automatically . <nl> + * Set this manually if there are extra servos needing manual control . <nl> + * Leave undefined or set to 0 to entirely disable the servo subsystem . <nl> + * / <nl> + / / # define NUM_SERVOS 3 / / Servo index starts with 0 for M280 command <nl> + <nl> + / / Delay ( in milliseconds ) before the next move will start , to give the servo time to reach its target angle . <nl> + / / 300ms is a good value but you can try less delay . <nl> + / / If the servo can ' t reach the requested position , increase it . <nl> + # define SERVO_DELAY { 300 } <nl> + <nl> + / / Servo deactivation <nl> + / / <nl> + / / With this option servos are powered only during movement , then turned off to prevent jitter . <nl> + / / # define DEACTIVATE_SERVOS_AFTER_MOVE <nl> + <nl> + # endif / / CONFIGURATION_H <nl> new file mode 100644 <nl> index 00000000000 . . d443d2dc412 <nl> mmm / dev / null <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10mini / Configuration_adv . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + / * * <nl> + * Configuration_adv . h <nl> + * <nl> + * Advanced settings . <nl> + * Only change these if you know exactly what you ' re doing . <nl> + * Some of these settings can damage your printer if improperly set ! <nl> + * <nl> + * Basic settings can be found in Configuration . h <nl> + * <nl> + * / <nl> + # ifndef CONFIGURATION_ADV_H <nl> + # define CONFIGURATION_ADV_H <nl> + # define CONFIGURATION_ADV_H_VERSION 020000 <nl> + <nl> + / / @ section temperature <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Thermal Settings = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + # if DISABLED ( PIDTEMPBED ) <nl> + # define BED_CHECK_INTERVAL 5000 / / ms between checks in bang - bang control <nl> + # if ENABLED ( BED_LIMIT_SWITCHING ) <nl> + # define BED_HYSTERESIS 2 / / Only disable heating if T > target + BED_HYSTERESIS and enable heating if T > target - BED_HYSTERESIS <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Thermal Protection provides additional protection to your printer from damage <nl> + * and fire . Marlin always includes safe min and max temperature ranges which <nl> + * protect against a broken or disconnected thermistor wire . <nl> + * <nl> + * The issue : If a thermistor falls out , it will report the much lower <nl> + * temperature of the air in the room , and the the firmware will keep <nl> + * the heater on . <nl> + * <nl> + * The solution : Once the temperature reaches the target , start observing . <nl> + * If the temperature stays too far below the target ( hysteresis ) for too <nl> + * long ( period ) , the firmware will halt the machine as a safety precaution . <nl> + * <nl> + * If you get false positives for " Thermal Runaway " , increase <nl> + * THERMAL_PROTECTION_HYSTERESIS and / or THERMAL_PROTECTION_PERIOD <nl> + * / <nl> + # if ENABLED ( THERMAL_PROTECTION_HOTENDS ) <nl> + # define THERMAL_PROTECTION_PERIOD 40 / / Seconds <nl> + # define THERMAL_PROTECTION_HYSTERESIS 4 / / Degrees Celsius <nl> + <nl> + / * * <nl> + * Whenever an M104 , M109 , or M303 increases the target temperature , the <nl> + * firmware will wait for the WATCH_TEMP_PERIOD to expire . If the temperature <nl> + * hasn ' t increased by WATCH_TEMP_INCREASE degrees , the machine is halted and <nl> + * requires a hard reset . This test restarts with any M104 / M109 / M303 , but only <nl> + * if the current temperature is far enough below the target for a reliable <nl> + * test . <nl> + * <nl> + * If you get false positives for " Heating failed " , increase WATCH_TEMP_PERIOD <nl> + * and / or decrease WATCH_TEMP_INCREASE . WATCH_TEMP_INCREASE should not be set <nl> + * below 2 . <nl> + * / <nl> + # define WATCH_TEMP_PERIOD 20 / / Seconds <nl> + # define WATCH_TEMP_INCREASE 2 / / Degrees Celsius <nl> + # endif <nl> + <nl> + / * * <nl> + * Thermal Protection parameters for the bed are just as above for hotends . <nl> + * / <nl> + # if ENABLED ( THERMAL_PROTECTION_BED ) <nl> + # define THERMAL_PROTECTION_BED_PERIOD 20 / / Seconds <nl> + # define THERMAL_PROTECTION_BED_HYSTERESIS 2 / / Degrees Celsius <nl> + <nl> + / * * <nl> + * As described above , except for the bed ( M140 / M190 / M303 ) . <nl> + * / <nl> + # define WATCH_BED_TEMP_PERIOD 60 / / Seconds <nl> + # define WATCH_BED_TEMP_INCREASE 2 / / Degrees Celsius <nl> + # endif <nl> + <nl> + # if ENABLED ( PIDTEMP ) <nl> + / / this adds an experimental additional term to the heating power , proportional to the extrusion speed . <nl> + / / if Kc is chosen well , the additional required power due to increased melting should be compensated . <nl> + / / # define PID_EXTRUSION_SCALING <nl> + # if ENABLED ( PID_EXTRUSION_SCALING ) <nl> + # define DEFAULT_Kc ( 100 ) / / heating power = Kc * ( e_speed ) <nl> + # define LPQ_MAX_LEN 50 <nl> + # endif <nl> + # endif <nl> + <nl> + / * * <nl> + * Automatic Temperature : <nl> + * The hotend target temperature is calculated by all the buffered lines of gcode . <nl> + * The maximum buffered steps / sec of the extruder motor is called " se " . <nl> + * Start autotemp mode with M109 S < mintemp > B < maxtemp > F < factor > <nl> + * The target temperature is set to mintemp + factor * se [ steps / sec ] and is limited by <nl> + * mintemp and maxtemp . Turn this off by executing M109 without F * <nl> + * Also , if the temperature is set to a value below mintemp , it will not be changed by autotemp . <nl> + * On an Ultimaker , some initial testing worked with M109 S215 B260 F1 in the start . gcode <nl> + * / <nl> + # define AUTOTEMP <nl> + # if ENABLED ( AUTOTEMP ) <nl> + # define AUTOTEMP_OLDWEIGHT 0 . 98 <nl> + # endif <nl> + <nl> + / / Show extra position information in M114 <nl> + / / # define M114_DETAIL <nl> + <nl> + / / Show Temperature ADC value <nl> + / / Enable for M105 to include ADC values read from temperature sensors . <nl> + / / # define SHOW_TEMP_ADC_VALUES <nl> + <nl> + / * * <nl> + * High Temperature Thermistor Support <nl> + * <nl> + * Thermistors able to support high temperature tend to have a hard time getting <nl> + * good readings at room and lower temperatures . This means HEATER_X_RAW_LO_TEMP <nl> + * will probably be caught when the heating element first turns on during the <nl> + * preheating process , which will trigger a min_temp_error as a safety measure <nl> + * and force stop everything . <nl> + * To circumvent this limitation , we allow for a preheat time ( during which , <nl> + * min_temp_error won ' t be triggered ) and add a min_temp buffer to handle <nl> + * aberrant readings . <nl> + * <nl> + * If you want to enable this feature for your hotend thermistor ( s ) <nl> + * uncomment and set values > 0 in the constants below <nl> + * / <nl> + <nl> + / / The number of consecutive low temperature errors that can occur <nl> + / / before a min_temp_error is triggered . ( Shouldn ' t be more than 10 . ) <nl> + / / # define MAX_CONSECUTIVE_LOW_TEMPERATURE_ERROR_ALLOWED 0 <nl> + <nl> + / / The number of milliseconds a hotend will preheat before starting to check <nl> + / / the temperature . This value should NOT be set to the time it takes the <nl> + / / hot end to reach the target temperature , but the time it takes to reach <nl> + / / the minimum temperature your thermistor can read . The lower the better / safer . <nl> + / / This shouldn ' t need to be more than 30 seconds ( 30000 ) <nl> + / / # define MILLISECONDS_PREHEAT_TIME 0 <nl> + <nl> + / / @ section extruder <nl> + <nl> + / / Extruder runout prevention . <nl> + / / If the machine is idle and the temperature over MINTEMP <nl> + / / then extrude some filament every couple of SECONDS . <nl> + / / # define EXTRUDER_RUNOUT_PREVENT <nl> + # if ENABLED ( EXTRUDER_RUNOUT_PREVENT ) <nl> + # define EXTRUDER_RUNOUT_MINTEMP 190 <nl> + # define EXTRUDER_RUNOUT_SECONDS 30 <nl> + # define EXTRUDER_RUNOUT_SPEED 1500 / / mm / m <nl> + # define EXTRUDER_RUNOUT_EXTRUDE 5 / / mm <nl> + # endif <nl> + <nl> + / / @ section temperature <nl> + <nl> + / / These defines help to calibrate the AD595 sensor in case you get wrong temperature measurements . <nl> + / / The measured temperature is defined as " actualTemp = ( measuredTemp * TEMP_SENSOR_AD595_GAIN ) + TEMP_SENSOR_AD595_OFFSET " <nl> + # define TEMP_SENSOR_AD595_OFFSET 0 . 0 <nl> + # define TEMP_SENSOR_AD595_GAIN 1 . 0 <nl> + <nl> + / * * <nl> + * Controller Fan <nl> + * To cool down the stepper drivers and MOSFETs . <nl> + * <nl> + * The fan will turn on automatically whenever any stepper is enabled <nl> + * and turn off after a set period after all steppers are turned off . <nl> + * / <nl> + / / # define USE_CONTROLLER_FAN <nl> + # if ENABLED ( USE_CONTROLLER_FAN ) <nl> + / / # define CONTROLLER_FAN_PIN - 1 / / Set a custom pin for the controller fan <nl> + # define CONTROLLERFAN_SECS 60 / / Duration in seconds for the fan to run after all motors are disabled <nl> + # define CONTROLLERFAN_SPEED 255 / / 255 = = full speed <nl> + # endif <nl> + <nl> + / / When first starting the main fan , run it at full speed for the <nl> + / / given number of milliseconds . This gets the fan spinning reliably <nl> + / / before setting a PWM value . ( Does not work with software PWM for fan on Sanguinololu ) <nl> + / / # define FAN_KICKSTART_TIME 100 <nl> + <nl> + / / This defines the minimal speed for the main fan , run in PWM mode <nl> + / / to enable uncomment and set minimal PWM speed for reliable running ( 1 - 255 ) <nl> + / / if fan speed is [ 1 - ( FAN_MIN_PWM - 1 ) ] it is set to FAN_MIN_PWM <nl> + / / # define FAN_MIN_PWM 50 <nl> + <nl> + / / @ section extruder <nl> + <nl> + / * * <nl> + * Extruder cooling fans <nl> + * <nl> + * Extruder auto fans automatically turn on when their extruders ' <nl> + * temperatures go above EXTRUDER_AUTO_FAN_TEMPERATURE . <nl> + * <nl> + * Your board ' s pins file specifies the recommended pins . Override those here <nl> + * or set to - 1 to disable completely . <nl> + * <nl> + * Multiple extruders can be assigned to the same pin in which case <nl> + * the fan will turn on when any selected extruder is above the threshold . <nl> + * / <nl> + # define E0_AUTO_FAN_PIN - 1 <nl> + # define E1_AUTO_FAN_PIN - 1 <nl> + # define E2_AUTO_FAN_PIN - 1 <nl> + # define E3_AUTO_FAN_PIN - 1 <nl> + # define E4_AUTO_FAN_PIN - 1 <nl> + # define EXTRUDER_AUTO_FAN_TEMPERATURE 50 <nl> + # define EXTRUDER_AUTO_FAN_SPEED 255 / / = = full speed <nl> + <nl> + / * * <nl> + * Part - Cooling Fan Multiplexer <nl> + * <nl> + * This feature allows you to digitally multiplex the fan output . <nl> + * The multiplexer is automatically switched at tool - change . <nl> + * Set FANMUX [ 012 ] _PINs below for up to 2 , 4 , or 8 multiplexed fans . <nl> + * / <nl> + # define FANMUX0_PIN - 1 <nl> + # define FANMUX1_PIN - 1 <nl> + # define FANMUX2_PIN - 1 <nl> + <nl> + / * * <nl> + * M355 Case Light on - off / brightness <nl> + * / <nl> + / / # define CASE_LIGHT_ENABLE <nl> + # if ENABLED ( CASE_LIGHT_ENABLE ) <nl> + / / # define CASE_LIGHT_PIN 4 / / Override the default pin if needed <nl> + # define INVERT_CASE_LIGHT false / / Set true if Case Light is ON when pin is LOW <nl> + # define CASE_LIGHT_DEFAULT_ON true / / Set default power - up state on <nl> + # define CASE_LIGHT_DEFAULT_BRIGHTNESS 105 / / Set default power - up brightness ( 0 - 255 , requires PWM pin ) <nl> + / / # define MENU_ITEM_CASE_LIGHT / / Add a Case Light option to the LCD main menu <nl> + / / # define CASE_LIGHT_USE_NEOPIXEL / / Use Neopixel LED as case light , requires NEOPIXEL_LED . <nl> + # if ENABLED ( CASE_LIGHT_USE_NEOPIXEL ) <nl> + # define CASE_LIGHT_NEOPIXEL_COLOR { 255 , 255 , 255 , 255 } / / { Red , Green , Blue , White } <nl> + # endif <nl> + # endif <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = Mechanical Settings = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section homing <nl> + <nl> + / / If you want endstops to stay on ( by default ) even when not homing <nl> + / / enable this option . Override at any time with M120 , M121 . <nl> + / / # define ENDSTOPS_ALWAYS_ON_DEFAULT <nl> + <nl> + / / @ section extras <nl> + <nl> + / / # define Z_LATE_ENABLE / / Enable Z the last moment . Needed if your Z driver overheats . <nl> + <nl> + / * * <nl> + * Dual Steppers / Dual Endstops <nl> + * <nl> + * This section will allow you to use extra E drivers to drive a second motor for X , Y , or Z axes . <nl> + * <nl> + * For example , set X_DUAL_STEPPER_DRIVERS setting to use a second motor . If the motors need to <nl> + * spin in opposite directions set INVERT_X2_VS_X_DIR . If the second motor needs its own endstop <nl> + * set X_DUAL_ENDSTOPS . This can adjust for " racking . " Use X2_USE_ENDSTOP to set the endstop plug <nl> + * that should be used for the second endstop . Extra endstops will appear in the output of ' M119 ' . <nl> + * <nl> + * Use X_DUAL_ENDSTOP_ADJUSTMENT to adjust for mechanical imperfection . After homing both motors <nl> + * this offset is applied to the X2 motor . To find the offset home the X axis , and measure the error <nl> + * in X2 . Dual endstop offsets can be set at runtime with ' M666 X < offset > Y < offset > Z < offset > ' . <nl> + * / <nl> + <nl> + / / # define X_DUAL_STEPPER_DRIVERS <nl> + # if ENABLED ( X_DUAL_STEPPER_DRIVERS ) <nl> + # define INVERT_X2_VS_X_DIR true / / Set ' true ' if X motors should rotate in opposite directions <nl> + / / # define X_DUAL_ENDSTOPS <nl> + # if ENABLED ( X_DUAL_ENDSTOPS ) <nl> + # define X2_USE_ENDSTOP _XMAX_ <nl> + # define X_DUAL_ENDSTOPS_ADJUSTMENT 0 <nl> + # endif <nl> + # endif <nl> + <nl> + / / # define Y_DUAL_STEPPER_DRIVERS <nl> + # if ENABLED ( Y_DUAL_STEPPER_DRIVERS ) <nl> + # define INVERT_Y2_VS_Y_DIR true / / Set ' true ' if Y motors should rotate in opposite directions <nl> + / / # define Y_DUAL_ENDSTOPS <nl> + # if ENABLED ( Y_DUAL_ENDSTOPS ) <nl> + # define Y2_USE_ENDSTOP _YMAX_ <nl> + # define Y_DUAL_ENDSTOPS_ADJUSTMENT 0 <nl> + # endif <nl> + # endif <nl> + <nl> + / / # define Z_DUAL_STEPPER_DRIVERS <nl> + # if ENABLED ( Z_DUAL_STEPPER_DRIVERS ) <nl> + / / # define Z_DUAL_ENDSTOPS <nl> + # if ENABLED ( Z_DUAL_ENDSTOPS ) <nl> + # define Z2_USE_ENDSTOP _XMAX_ <nl> + # define Z_DUAL_ENDSTOPS_ADJUSTMENT 0 <nl> + # endif <nl> + # endif <nl> + <nl> + / / Enable this for dual x - carriage printers . <nl> + / / A dual x - carriage design has the advantage that the inactive extruder can be parked which <nl> + / / prevents hot - end ooze contaminating the print . It also reduces the weight of each x - carriage <nl> + / / allowing faster printing speeds . Connect your X2 stepper to the first unused E plug . <nl> + / / # define DUAL_X_CARRIAGE <nl> + # if ENABLED ( DUAL_X_CARRIAGE ) <nl> + / / Configuration for second X - carriage <nl> + / / Note : the first x - carriage is defined as the x - carriage which homes to the minimum endstop ; <nl> + / / the second x - carriage always homes to the maximum endstop . <nl> + # define X2_MIN_POS 80 / / set minimum to ensure second x - carriage doesn ' t hit the parked first X - carriage <nl> + # define X2_MAX_POS 353 / / set maximum to the distance between toolheads when both heads are homed <nl> + # define X2_HOME_DIR 1 / / the second X - carriage always homes to the maximum endstop position <nl> + # define X2_HOME_POS X2_MAX_POS / / default home position is the maximum carriage position <nl> + / / However : In this mode the HOTEND_OFFSET_X value for the second extruder provides a software <nl> + / / override for X2_HOME_POS . This also allow recalibration of the distance between the two endstops <nl> + / / without modifying the firmware ( through the " M218 T1 X ? ? ? " command ) . <nl> + / / Remember : you should set the second extruder x - offset to 0 in your slicer . <nl> + <nl> + / / There are a few selectable movement modes for dual x - carriages using M605 S < mode > <nl> + / / Mode 0 ( DXC_FULL_CONTROL_MODE ) : Full control . The slicer has full control over both x - carriages and can achieve optimal travel results <nl> + / / as long as it supports dual x - carriages . ( M605 S0 ) <nl> + / / Mode 1 ( DXC_AUTO_PARK_MODE ) : Auto - park mode . The firmware will automatically park and unpark the x - carriages on tool changes so <nl> + / / that additional slicer support is not required . ( M605 S1 ) <nl> + / / Mode 2 ( DXC_DUPLICATION_MODE ) : Duplication mode . The firmware will transparently make the second x - carriage and extruder copy all <nl> + / / actions of the first x - carriage . This allows the printer to print 2 arbitrary items at <nl> + / / once . ( 2nd extruder x offset and temp offset are set using : M605 S2 [ Xnnn ] [ Rmmm ] ) <nl> + <nl> + / / This is the default power - up mode which can be later using M605 . <nl> + # define DEFAULT_DUAL_X_CARRIAGE_MODE DXC_FULL_CONTROL_MODE <nl> + <nl> + / / Default settings in " Auto - park Mode " <nl> + # define TOOLCHANGE_PARK_ZLIFT 0 . 2 / / the distance to raise Z axis when parking an extruder <nl> + # define TOOLCHANGE_UNPARK_ZLIFT 1 / / the distance to raise Z axis when unparking an extruder <nl> + <nl> + / / Default x offset in duplication mode ( typically set to half print bed width ) <nl> + # define DEFAULT_DUPLICATION_X_OFFSET 100 <nl> + <nl> + # endif / / DUAL_X_CARRIAGE <nl> + <nl> + / / Activate a solenoid on the active extruder with M380 . Disable all with M381 . <nl> + / / Define SOL0_PIN , SOL1_PIN , etc . , for each extruder that has a solenoid . <nl> + / / # define EXT_SOLENOID <nl> + <nl> + / / @ section homing <nl> + <nl> + / / Homing hits each endstop , retracts by these distances , then does a slower bump . <nl> + # define X_HOME_BUMP_MM 5 <nl> + # define Y_HOME_BUMP_MM 5 <nl> + # define Z_HOME_BUMP_MM 2 <nl> + # define HOMING_BUMP_DIVISOR { 2 , 2 , 4 } / / Re - Bump Speed Divisor ( Divides the Homing Feedrate ) <nl> + # define QUICK_HOME / / If homing includes X and Y , do a diagonal move initially <nl> + <nl> + / / When G28 is called , this option will make Y home before X <nl> + / / # define HOME_Y_BEFORE_X <nl> + <nl> + / / Enable this if X or Y can ' t home without homing the other axis first . <nl> + / / # define CODEPENDENT_XY_HOMING <nl> + <nl> + / / @ section machine <nl> + <nl> + # define AXIS_RELATIVE_MODES { false , false , false , false } <nl> + <nl> + / / Allow duplication mode with a basic dual - nozzle extruder <nl> + / / # define DUAL_NOZZLE_DUPLICATION_MODE <nl> + <nl> + / / By default pololu step drivers require an active high signal . However , some high power drivers require an active low signal as step . <nl> + # define INVERT_X_STEP_PIN false <nl> + # define INVERT_Y_STEP_PIN false <nl> + # define INVERT_Z_STEP_PIN false <nl> + # define INVERT_E_STEP_PIN false <nl> + <nl> + / / Default stepper release if idle . Set to 0 to deactivate . <nl> + / / Steppers will shut down DEFAULT_STEPPER_DEACTIVE_TIME seconds after the last move when DISABLE_INACTIVE_ ? is true . <nl> + / / Time can be set by M18 and M84 . <nl> + # define DEFAULT_STEPPER_DEACTIVE_TIME 120 <nl> + # define DISABLE_INACTIVE_X true <nl> + # define DISABLE_INACTIVE_Y true <nl> + # define DISABLE_INACTIVE_Z true / / set to false if the nozzle will fall down on your printed part when print has finished . <nl> + # define DISABLE_INACTIVE_E true <nl> + <nl> + # define DEFAULT_MINIMUMFEEDRATE 0 . 0 / / minimum feedrate <nl> + # define DEFAULT_MINTRAVELFEEDRATE 0 . 0 <nl> + <nl> + / / # define HOME_AFTER_DEACTIVATE / / Require rehoming after steppers are deactivated <nl> + <nl> + / / @ section lcd <nl> + <nl> + # if ENABLED ( ULTIPANEL ) <nl> + # define MANUAL_FEEDRATE { 50 * 60 , 50 * 60 , 4 * 60 , 60 } / / Feedrates for manual moves along X , Y , Z , E from panel <nl> + # define ULTIPANEL_FEEDMULTIPLY / / Comment to disable setting feedrate multiplier via encoder <nl> + # endif <nl> + <nl> + / / @ section extras <nl> + <nl> + / / minimum time in microseconds that a movement needs to take if the buffer is emptied . <nl> + # define DEFAULT_MINSEGMENTTIME 20000 <nl> + <nl> + / / If defined the movements slow down when the look ahead buffer is only half full <nl> + # define SLOWDOWN <nl> + <nl> + / / Frequency limit <nl> + / / See nophead ' s blog for more info <nl> + / / Not working O <nl> + / / # define XY_FREQUENCY_LIMIT 15 <nl> + <nl> + / / Minimum planner junction speed . Sets the default minimum speed the planner plans for at the end <nl> + / / of the buffer and all stops . This should not be much greater than zero and should only be changed <nl> + / / if unwanted behavior is observed on a user ' s machine when running at very slow speeds . <nl> + # define MINIMUM_PLANNER_SPEED 0 . 05 / / ( mm / sec ) <nl> + <nl> + / / Microstep setting ( Only functional when stepper driver microstep pins are connected to MCU . <nl> + # define MICROSTEP_MODES { 16 , 16 , 16 , 16 , 16 } / / [ 1 , 2 , 4 , 8 , 16 ] <nl> + <nl> + / * * <nl> + * @ section stepper motor current <nl> + * <nl> + * Some boards have a means of setting the stepper motor current via firmware . <nl> + * <nl> + * The power on motor currents are set by : <nl> + * PWM_MOTOR_CURRENT - used by MINIRAMBO & ULTIMAIN_2 <nl> + * known compatible chips : A4982 <nl> + * DIGIPOT_MOTOR_CURRENT - used by BQ_ZUM_MEGA_3D , RAMBO & SCOOVO_X9H <nl> + * known compatible chips : AD5206 <nl> + * DAC_MOTOR_CURRENT_DEFAULT - used by PRINTRBOARD_REVF & RIGIDBOARD_V2 <nl> + * known compatible chips : MCP4728 <nl> + * DIGIPOT_I2C_MOTOR_CURRENTS - used by 5DPRINT , AZTEEG_X3_PRO , MIGHTYBOARD_REVE <nl> + * known compatible chips : MCP4451 , MCP4018 <nl> + * <nl> + * Motor currents can also be set by M907 - M910 and by the LCD . <nl> + * M907 - applies to all . <nl> + * M908 - BQ_ZUM_MEGA_3D , RAMBO , PRINTRBOARD_REVF , RIGIDBOARD_V2 & SCOOVO_X9H <nl> + * M909 , M910 & LCD - only PRINTRBOARD_REVF & RIGIDBOARD_V2 <nl> + * / <nl> + / / # define PWM_MOTOR_CURRENT { 1300 , 1300 , 1250 } / / Values in milliamps <nl> + / / # define DIGIPOT_MOTOR_CURRENT { 135 , 135 , 135 , 135 , 135 } / / Values 0 - 255 ( RAMBO 135 = ~ 0 . 75A , 185 = ~ 1A ) <nl> + / / # define DAC_MOTOR_CURRENT_DEFAULT { 70 , 80 , 90 , 80 } / / Default drive percent - X , Y , Z , E axis <nl> + <nl> + / / Use an I2C based DIGIPOT ( e . g . , Azteeg X3 Pro ) <nl> + / / # define DIGIPOT_I2C <nl> + # if ENABLED ( DIGIPOT_I2C ) & & ! defined ( DIGIPOT_I2C_ADDRESS_A ) <nl> + / * * <nl> + * Common slave addresses : <nl> + * <nl> + * A ( A shifted ) B ( B shifted ) IC <nl> + * Smoothie 0x2C ( 0x58 ) 0x2D ( 0x5A ) MCP4451 <nl> + * AZTEEG_X3_PRO 0x2C ( 0x58 ) 0x2E ( 0x5C ) MCP4451 <nl> + * MIGHTYBOARD_REVE 0x2F ( 0x5E ) MCP4018 <nl> + * / <nl> + # define DIGIPOT_I2C_ADDRESS_A 0x2C / / unshifted slave address for first DIGIPOT <nl> + # define DIGIPOT_I2C_ADDRESS_B 0x2D / / unshifted slave address for second DIGIPOT <nl> + # endif <nl> + <nl> + / / # define DIGIPOT_MCP4018 / / Requires library from https : / / github . com / stawel / SlowSoftI2CMaster <nl> + # define DIGIPOT_I2C_NUM_CHANNELS 8 / / 5DPRINT : 4 AZTEEG_X3_PRO : 8 <nl> + / / Actual motor currents in Amps . The number of entries must match DIGIPOT_I2C_NUM_CHANNELS . <nl> + / / These correspond to the physical drivers , so be mindful if the order is changed . <nl> + # define DIGIPOT_I2C_MOTOR_CURRENTS { 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 , 1 . 0 } / / AZTEEG_X3_PRO <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Additional Features = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + # define ENCODER_RATE_MULTIPLIER / / If defined , certain menu edit operations automatically multiply the steps when the encoder is moved quickly <nl> + # define ENCODER_10X_STEPS_PER_SEC 75 / / If the encoder steps per sec exceeds this value , multiply steps moved x10 to quickly advance the value <nl> + # define ENCODER_100X_STEPS_PER_SEC 160 / / If the encoder steps per sec exceeds this value , multiply steps moved x100 to really quickly advance the value <nl> + <nl> + / / # define CHDK 4 / / Pin for triggering CHDK to take a picture see how to use it here http : / / captain - slow . dk / 2014 / 03 / 09 / 3d - printing - timelapses / <nl> + # define CHDK_DELAY 50 / / How long in ms the pin should stay HIGH before going LOW again <nl> + <nl> + / / @ section lcd <nl> + <nl> + / / Include a page of printer information in the LCD Main Menu <nl> + # define LCD_INFO_MENU <nl> + <nl> + / / Leave out seldom - used LCD menu items to recover some Program Memory <nl> + / / # define SLIM_LCD_MENUS <nl> + <nl> + / / Scroll a longer status message into view <nl> + # define STATUS_MESSAGE_SCROLLING <nl> + <nl> + / / On the Info Screen , display XY with one decimal place when possible <nl> + / / # define LCD_DECIMAL_SMALL_XY <nl> + <nl> + / / The timeout ( in ms ) to return to the status screen from sub - menus <nl> + / / # define LCD_TIMEOUT_TO_STATUS 15000 <nl> + <nl> + / / Add an ' M73 ' G - code to set the current percentage <nl> + / / # define LCD_SET_PROGRESS_MANUALLY <nl> + <nl> + / * * <nl> + * LED Control Menu <nl> + * Enable this feature to add LED Control to the LCD menu <nl> + * / <nl> + / / # define LED_CONTROL_MENU <nl> + # if ENABLED ( LED_CONTROL_MENU ) <nl> + # define LED_COLOR_PRESETS / / Enable the Preset Color menu option <nl> + # if ENABLED ( LED_COLOR_PRESETS ) <nl> + # define LED_USER_PRESET_RED 255 / / User defined RED value <nl> + # define LED_USER_PRESET_GREEN 128 / / User defined GREEN value <nl> + # define LED_USER_PRESET_BLUE 0 / / User defined BLUE value <nl> + # define LED_USER_PRESET_WHITE 255 / / User defined WHITE value <nl> + # define LED_USER_PRESET_BRIGHTNESS 255 / / User defined intensity <nl> + / / # define LED_USER_PRESET_STARTUP / / Have the printer display the user preset color on startup <nl> + # endif <nl> + # endif / / LED_CONTROL_MENU <nl> + <nl> + # if ENABLED ( SDSUPPORT ) <nl> + <nl> + / / Some RAMPS and other boards don ' t detect when an SD card is inserted . You can work <nl> + / / around this by connecting a push button or single throw switch to the pin defined <nl> + / / as SD_DETECT_PIN in your board ' s pins definitions . <nl> + / / This setting should be disabled unless you are using a push button , pulling the pin to ground . <nl> + / / Note : This is always disabled for ULTIPANEL ( except ELB_FULL_GRAPHIC_CONTROLLER ) . <nl> + # define SD_DETECT_INVERTED <nl> + <nl> + # define SD_FINISHED_STEPPERRELEASE true / / Disable steppers when SD Print is finished <nl> + # define SD_FINISHED_RELEASECOMMAND " M84 X Y Z E " / / You might want to keep the z enabled so your bed stays in place . <nl> + <nl> + / / Reverse SD sort to show " more recent " files first , according to the card ' s FAT . <nl> + / / Since the FAT gets out of order with usage , SDCARD_SORT_ALPHA is recommended . <nl> + # define SDCARD_RATHERRECENTFIRST <nl> + <nl> + / / Add an option in the menu to run all auto # . g files <nl> + / / # define MENU_ADDAUTOSTART <nl> + <nl> + / * * <nl> + * Sort SD file listings in alphabetical order . <nl> + * <nl> + * With this option enabled , items on SD cards will be sorted <nl> + * by name for easier navigation . <nl> + * <nl> + * By default . . . <nl> + * <nl> + * - Use the slowest - but safest - method for sorting . <nl> + * - Folders are sorted to the top . <nl> + * - The sort key is statically allocated . <nl> + * - No added G - code ( M34 ) support . <nl> + * - 40 item sorting limit . ( Items after the first 40 are unsorted . ) <nl> + * <nl> + * SD sorting uses static allocation ( as set by SDSORT_LIMIT ) , allowing the <nl> + * compiler to calculate the worst - case usage and throw an error if the SRAM <nl> + * limit is exceeded . <nl> + * <nl> + * - SDSORT_USES_RAM provides faster sorting via a static directory buffer . <nl> + * - SDSORT_USES_STACK does the same , but uses a local stack - based buffer . <nl> + * - SDSORT_CACHE_NAMES will retain the sorted file listing in RAM . ( Expensive ! ) <nl> + * - SDSORT_DYNAMIC_RAM only uses RAM when the SD menu is visible . ( Use with caution ! ) <nl> + * / <nl> + / / # define SDCARD_SORT_ALPHA <nl> + <nl> + / / SD Card Sorting options <nl> + # if ENABLED ( SDCARD_SORT_ALPHA ) <nl> + # define SDSORT_LIMIT 40 / / Maximum number of sorted items ( 10 - 256 ) . Costs 27 bytes each . <nl> + # define FOLDER_SORTING - 1 / / - 1 = above 0 = none 1 = below <nl> + # define SDSORT_GCODE false / / Allow turning sorting on / off with LCD and M34 g - code . <nl> + # define SDSORT_USES_RAM false / / Pre - allocate a static array for faster pre - sorting . <nl> + # define SDSORT_USES_STACK false / / Prefer the stack for pre - sorting to give back some SRAM . ( Negated by next 2 options . ) <nl> + # define SDSORT_CACHE_NAMES false / / Keep sorted items in RAM longer for speedy performance . Most expensive option . <nl> + # define SDSORT_DYNAMIC_RAM false / / Use dynamic allocation ( within SD menus ) . Least expensive option . Set SDSORT_LIMIT before use ! <nl> + # define SDSORT_CACHE_VFATS 2 / / Maximum number of 13 - byte VFAT entries to use for sorting . <nl> + / / Note : Only affects SCROLL_LONG_FILENAMES with SDSORT_CACHE_NAMES but not SDSORT_DYNAMIC_RAM . <nl> + # endif <nl> + <nl> + / / Show a progress bar on HD44780 LCDs for SD printing <nl> + / / # define LCD_PROGRESS_BAR <nl> + <nl> + # if ENABLED ( LCD_PROGRESS_BAR ) <nl> + / / Amount of time ( ms ) to show the bar <nl> + # define PROGRESS_BAR_BAR_TIME 2000 <nl> + / / Amount of time ( ms ) to show the status message <nl> + # define PROGRESS_BAR_MSG_TIME 3000 <nl> + / / Amount of time ( ms ) to retain the status message ( 0 = forever ) <nl> + # define PROGRESS_MSG_EXPIRE 0 <nl> + / / Enable this to show messages for MSG_TIME then hide them <nl> + / / # define PROGRESS_MSG_ONCE <nl> + / / Add a menu item to test the progress bar : <nl> + / / # define LCD_PROGRESS_BAR_TEST <nl> + # endif <nl> + <nl> + / / This allows hosts to request long names for files and folders with M33 <nl> + / / # define LONG_FILENAME_HOST_SUPPORT <nl> + <nl> + / / Enable this option to scroll long filenames in the SD card menu <nl> + # define SCROLL_LONG_FILENAMES <nl> + <nl> + / * * <nl> + * This option allows you to abort SD printing when any endstop is triggered . <nl> + * This feature must be enabled with " M540 S1 " or from the LCD menu . <nl> + * To have any effect , endstops must be enabled during SD printing . <nl> + * / <nl> + / / # define ABORT_ON_ENDSTOP_HIT_FEATURE_ENABLED <nl> + <nl> + / * * <nl> + * This option makes it easier to print the same SD Card file again . <nl> + * On print completion the LCD Menu will open with the file selected . <nl> + * You can just click to start the print , or navigate elsewhere . <nl> + * / <nl> + / / # define SD_REPRINT_LAST_SELECTED_FILE <nl> + <nl> + # endif / / SDSUPPORT <nl> + <nl> + / * * <nl> + * Additional options for Graphical Displays <nl> + * <nl> + * Use the optimizations here to improve printing performance , <nl> + * which can be adversely affected by graphical display drawing , <nl> + * especially when doing several short moves , and when printing <nl> + * on DELTA and SCARA machines . <nl> + * <nl> + * Some of these options may result in the display lagging behind <nl> + * controller events , as there is a trade - off between reliable <nl> + * printing performance versus fast display updates . <nl> + * / <nl> + # if ENABLED ( DOGLCD ) <nl> + / / Show SD percentage next to the progress bar <nl> + / / # define DOGM_SD_PERCENT <nl> + <nl> + / / Enable to save many cycles by drawing a hollow frame on the Info Screen <nl> + # define XYZ_HOLLOW_FRAME <nl> + <nl> + / / Enable to save many cycles by drawing a hollow frame on Menu Screens <nl> + # define MENU_HOLLOW_FRAME <nl> + <nl> + / / A bigger font is available for edit items . Costs 3120 bytes of PROGMEM . <nl> + / / Western only . Not available for Cyrillic , Kana , Turkish , Greek , or Chinese . <nl> + / / # define USE_BIG_EDIT_FONT <nl> + <nl> + / / A smaller font may be used on the Info Screen . Costs 2300 bytes of PROGMEM . <nl> + / / Western only . Not available for Cyrillic , Kana , Turkish , Greek , or Chinese . <nl> + / / # define USE_SMALL_INFOFONT <nl> + <nl> + / / Enable this option and reduce the value to optimize screen updates . <nl> + / / The normal delay is 10µs . Use the lowest value that still gives a reliable display . <nl> + / / # define DOGM_SPI_DELAY_US 5 <nl> + <nl> + / / Swap the CW / CCW indicators in the graphics overlay <nl> + / / # define OVERLAY_GFX_REVERSE <nl> + <nl> + # endif / / DOGLCD <nl> + <nl> + / / @ section safety <nl> + <nl> + / / The hardware watchdog should reset the microcontroller disabling all outputs , <nl> + / / in case the firmware gets stuck and doesn ' t do temperature regulation . <nl> + # define USE_WATCHDOG <nl> + <nl> + # if ENABLED ( USE_WATCHDOG ) <nl> + / / If you have a watchdog reboot in an ArduinoMega2560 then the device will hang forever , as a watchdog reset will leave the watchdog on . <nl> + / / The " WATCHDOG_RESET_MANUAL " goes around this by not using the hardware reset . <nl> + / / However , THIS FEATURE IS UNSAFE ! , as it will only work if interrupts are disabled . And the code could hang in an interrupt routine with interrupts disabled . <nl> + / / # define WATCHDOG_RESET_MANUAL <nl> + # endif <nl> + <nl> + / / @ section lcd <nl> + <nl> + / * * <nl> + * Babystepping enables movement of the axes by tiny increments without changing <nl> + * the current position values . This feature is used primarily to adjust the Z <nl> + * axis in the first layer of a print in real - time . <nl> + * <nl> + * Warning : Does not respect endstops ! <nl> + * / <nl> + # define BABYSTEPPING <nl> + # if ENABLED ( BABYSTEPPING ) <nl> + / / # define BABYSTEP_XY / / Also enable X / Y Babystepping . Not supported on DELTA ! <nl> + # define BABYSTEP_INVERT_Z false / / Change if Z babysteps should go the other way <nl> + # define BABYSTEP_MULTIPLICATOR 1 / / Babysteps are very small . Increase for faster motion . <nl> + / / # define BABYSTEP_ZPROBE_OFFSET / / Enable to combine M851 and Babystepping <nl> + # define DOUBLECLICK_FOR_Z_BABYSTEPPING / / Double - click on the Status Screen for Z Babystepping . <nl> + # define DOUBLECLICK_MAX_INTERVAL 1250 / / Maximum interval between clicks , in milliseconds . <nl> + / / Note : Extra time may be added to mitigate controller latency . <nl> + / / # define BABYSTEP_ZPROBE_GFX_OVERLAY / / Enable graphical overlay on Z - offset editor <nl> + # endif <nl> + <nl> + / / @ section extruder <nl> + <nl> + / * * <nl> + * Implementation of linear pressure control <nl> + * <nl> + * Assumption : advance = k * ( delta velocity ) <nl> + * K = 0 means advance disabled . <nl> + * See Marlin documentation for calibration instructions . <nl> + * / <nl> + / / # define LIN_ADVANCE <nl> + <nl> + # if ENABLED ( LIN_ADVANCE ) <nl> + # define LIN_ADVANCE_K 75 <nl> + <nl> + / * * <nl> + * Some Slicers produce Gcode with randomly jumping extrusion widths occasionally . <nl> + * For example within a 0 . 4mm perimeter it may produce a single segment of 0 . 05mm width . <nl> + * While this is harmless for normal printing ( the fluid nature of the filament will <nl> + * close this very , very tiny gap ) , it throws off the LIN_ADVANCE pressure adaption . <nl> + * <nl> + * For this case LIN_ADVANCE_E_D_RATIO can be used to set the extrusion : distance ratio <nl> + * to a fixed value . Note that using a fixed ratio will lead to wrong nozzle pressures <nl> + * if the slicer is using variable widths or layer heights within one print ! <nl> + * <nl> + * This option sets the default E : D ratio at startup . Use ` M900 ` to override this value . <nl> + * <nl> + * Example : ` M900 W0 . 4 H0 . 2 D1 . 75 ` , where : <nl> + * - W is the extrusion width in mm <nl> + * - H is the layer height in mm <nl> + * - D is the filament diameter in mm <nl> + * <nl> + * Example : ` M900 R0 . 0458 ` to set the ratio directly . <nl> + * <nl> + * Set to 0 to auto - detect the ratio based on given Gcode G1 print moves . <nl> + * <nl> + * Slic3r ( including Průša Control ) produces Gcode compatible with the automatic mode . <nl> + * Cura ( as of this writing ) may produce Gcode incompatible with the automatic mode . <nl> + * / <nl> + # define LIN_ADVANCE_E_D_RATIO 0 / / The calculated ratio ( or 0 ) according to the formula W * H / ( ( D / 2 ) ^ 2 * PI ) <nl> + / / Example : 0 . 4 * 0 . 2 / ( ( 1 . 75 / 2 ) ^ 2 * PI ) = 0 . 033260135 <nl> + # endif <nl> + <nl> + / / @ section leveling <nl> + <nl> + # if ENABLED ( DELTA ) & & ! defined ( DELTA_PROBEABLE_RADIUS ) <nl> + # define DELTA_PROBEABLE_RADIUS DELTA_PRINTABLE_RADIUS <nl> + # elif IS_SCARA & & ! defined ( SCARA_PRINTABLE_RADIUS ) <nl> + # define SCARA_PRINTABLE_RADIUS ( SCARA_LINKAGE_1 + SCARA_LINKAGE_2 ) <nl> + # endif <nl> + <nl> + # if ENABLED ( MESH_BED_LEVELING ) | | ENABLED ( AUTO_BED_LEVELING_UBL ) <nl> + / / Override the mesh area if the automatic ( max ) area is too large <nl> + / / # define MESH_MIN_X MESH_INSET <nl> + / / # define MESH_MIN_Y MESH_INSET <nl> + / / # define MESH_MAX_X X_BED_SIZE - ( MESH_INSET ) <nl> + / / # define MESH_MAX_Y Y_BED_SIZE - ( MESH_INSET ) <nl> + # endif <nl> + <nl> + / / @ section extras <nl> + <nl> + / / <nl> + / / G2 / G3 Arc Support <nl> + / / <nl> + # define ARC_SUPPORT / / Disable this feature to save ~ 3226 bytes <nl> + # if ENABLED ( ARC_SUPPORT ) <nl> + # define MM_PER_ARC_SEGMENT 1 / / Length of each arc segment <nl> + # define N_ARC_CORRECTION 25 / / Number of intertpolated segments between corrections <nl> + / / # define ARC_P_CIRCLES / / Enable the ' P ' parameter to specify complete circles <nl> + / / # define CNC_WORKSPACE_PLANES / / Allow G2 / G3 to operate in XY , ZX , or YZ planes <nl> + # endif <nl> + <nl> + / / Support for G5 with XYZE destination and IJPQ offsets . Requires ~ 2666 bytes . <nl> + / / # define BEZIER_CURVE_SUPPORT <nl> + <nl> + / / G38 . 2 and G38 . 3 Probe Target <nl> + / / Set MULTIPLE_PROBING if you want G38 to double touch <nl> + / / # define G38_PROBE_TARGET <nl> + # if ENABLED ( G38_PROBE_TARGET ) <nl> + # define G38_MINIMUM_MOVE 0 . 0275 / / minimum distance in mm that will produce a move ( determined using the print statement in check_move ) <nl> + # endif <nl> + <nl> + / / Moves ( or segments ) with fewer steps than this will be joined with the next move <nl> + # define MIN_STEPS_PER_SEGMENT 6 <nl> + <nl> + / / The minimum pulse width ( in µs ) for stepping a stepper . <nl> + / / Set this if you find stepping unreliable , or if using a very fast CPU . <nl> + # define MINIMUM_STEPPER_PULSE 0 / / ( µs ) The smallest stepper pulse allowed <nl> + <nl> + / / @ section temperature <nl> + <nl> + / / Control heater 0 and heater 1 in parallel . <nl> + / / # define HEATERS_PARALLEL <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = Buffers = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / / @ section hidden <nl> + <nl> + / / The number of linear motions that can be in the plan at any give time . <nl> + / / THE BLOCK_BUFFER_SIZE NEEDS TO BE A POWER OF 2 ( e . g . 8 , 16 , 32 ) because shifts and ors are used to do the ring - buffering . <nl> + # if ENABLED ( SDSUPPORT ) <nl> + # define BLOCK_BUFFER_SIZE 16 / / SD , LCD , Buttons take more memory , block buffer needs to be smaller <nl> + # else <nl> + # define BLOCK_BUFFER_SIZE 16 / / maximize block buffer <nl> + # endif <nl> + <nl> + / / @ section serial <nl> + <nl> + / / The ASCII buffer for serial input <nl> + # define MAX_CMD_SIZE 96 <nl> + # define BUFSIZE 4 <nl> + <nl> + / / Transmission to Host Buffer Size <nl> + / / To save 386 bytes of PROGMEM ( and TX_BUFFER_SIZE + 3 bytes of RAM ) set to 0 . <nl> + / / To buffer a simple " ok " you need 4 bytes . <nl> + / / For ADVANCED_OK ( M105 ) you need 32 bytes . <nl> + / / For debug - echo : 128 bytes for the optimal speed . <nl> + / / Other output doesn ' t need to be that speedy . <nl> + / / : [ 0 , 2 , 4 , 8 , 16 , 32 , 64 , 128 , 256 ] <nl> + # define TX_BUFFER_SIZE 0 <nl> + <nl> + / / Host Receive Buffer Size <nl> + / / Without XON / XOFF flow control ( see SERIAL_XON_XOFF below ) 32 bytes should be enough . <nl> + / / To use flow control , set this buffer size to at least 1024 bytes . <nl> + / / : [ 0 , 2 , 4 , 8 , 16 , 32 , 64 , 128 , 256 , 512 , 1024 , 2048 ] <nl> + / / # define RX_BUFFER_SIZE 1024 <nl> + <nl> + # if RX_BUFFER_SIZE > = 1024 <nl> + / / Enable to have the controller send XON / XOFF control characters to <nl> + / / the host to signal the RX buffer is becoming full . <nl> + / / # define SERIAL_XON_XOFF <nl> + # endif <nl> + <nl> + # if ENABLED ( SDSUPPORT ) <nl> + / / Enable this option to collect and display the maximum <nl> + / / RX queue usage after transferring a file to SD . <nl> + / / # define SERIAL_STATS_MAX_RX_QUEUED <nl> + <nl> + / / Enable this option to collect and display the number <nl> + / / of dropped bytes after a file transfer to SD . <nl> + / / # define SERIAL_STATS_DROPPED_RX <nl> + # endif <nl> + <nl> + / / Enable an emergency - command parser to intercept certain commands as they <nl> + / / enter the serial receive buffer , so they cannot be blocked . <nl> + / / Currently handles M108 , M112 , M410 <nl> + / / Does not work on boards using AT90USB ( USBCON ) processors ! <nl> + / / # define EMERGENCY_PARSER <nl> + <nl> + / / Bad Serial - connections can miss a received command by sending an ' ok ' <nl> + / / Therefore some clients abort after 30 seconds in a timeout . <nl> + / / Some other clients start sending commands while receiving a ' wait ' . <nl> + / / This " wait " is only sent when the buffer is empty . 1 second is a good value here . <nl> + / / # define NO_TIMEOUTS 1000 / / Milliseconds <nl> + <nl> + / / Some clients will have this feature soon . This could make the NO_TIMEOUTS unnecessary . <nl> + / / # define ADVANCED_OK <nl> + <nl> + / / @ section extras <nl> + <nl> + / * * <nl> + * Firmware - based and LCD - controlled retract <nl> + * <nl> + * Add G10 / G11 commands for automatic firmware - based retract / recover . <nl> + * Use M207 and M208 to define parameters for retract / recover . <nl> + * <nl> + * Use M209 to enable or disable auto - retract . <nl> + * With auto - retract enabled , all G1 E moves within the set range <nl> + * will be converted to firmware - based retract / recover moves . <nl> + * <nl> + * Be sure to turn off auto - retract during filament change . <nl> + * <nl> + * Note that M207 / M208 / M209 settings are saved to EEPROM . <nl> + * <nl> + * / <nl> + / / # define FWRETRACT / / ONLY PARTIALLY TESTED <nl> + # if ENABLED ( FWRETRACT ) <nl> + # define MIN_AUTORETRACT 0 . 1 / / When auto - retract is on , convert E moves of this length and over <nl> + # define MAX_AUTORETRACT 10 . 0 / / Upper limit for auto - retract conversion <nl> + # define RETRACT_LENGTH 3 / / Default retract length ( positive mm ) <nl> + # define RETRACT_LENGTH_SWAP 13 / / Default swap retract length ( positive mm ) , for extruder change <nl> + # define RETRACT_FEEDRATE 45 / / Default feedrate for retracting ( mm / s ) <nl> + # define RETRACT_ZLIFT 0 / / Default retract Z - lift <nl> + # define RETRACT_RECOVER_LENGTH 0 / / Default additional recover length ( mm , added to retract length when recovering ) <nl> + # define RETRACT_RECOVER_LENGTH_SWAP 0 / / Default additional swap recover length ( mm , added to retract length when recovering from extruder change ) <nl> + # define RETRACT_RECOVER_FEEDRATE 8 / / Default feedrate for recovering from retraction ( mm / s ) <nl> + # define RETRACT_RECOVER_FEEDRATE_SWAP 8 / / Default feedrate for recovering from swap retraction ( mm / s ) <nl> + # endif <nl> + <nl> + / * * <nl> + * Extra Fan Speed <nl> + * Adds a secondary fan speed for each print - cooling fan . <nl> + * ' M106 P < fan > T3 - 255 ' : Set a secondary speed for < fan > <nl> + * ' M106 P < fan > T2 ' : Use the set secondary speed <nl> + * ' M106 P < fan > T1 ' : Restore the previous fan speed <nl> + * / <nl> + / / # define EXTRA_FAN_SPEED <nl> + <nl> + / * * <nl> + * Advanced Pause <nl> + * Experimental feature for filament change support and for parking the nozzle when paused . <nl> + * Adds the GCode M600 for initiating filament change . <nl> + * If PARK_HEAD_ON_PAUSE enabled , adds the GCode M125 to pause printing and park the nozzle . <nl> + * <nl> + * Requires an LCD display . <nl> + * Requires NOZZLE_PARK_FEATURE . <nl> + * This feature is required for the default FILAMENT_RUNOUT_SCRIPT . <nl> + * / <nl> + / / # define ADVANCED_PAUSE_FEATURE <nl> + # if ENABLED ( ADVANCED_PAUSE_FEATURE ) <nl> + # define PAUSE_PARK_RETRACT_FEEDRATE 60 / / ( mm / s ) Initial retract feedrate . <nl> + # define PAUSE_PARK_RETRACT_LENGTH 4 / / ( mm ) Initial retract . <nl> + / / This short retract is done immediately , before parking the nozzle . <nl> + # define FILAMENT_CHANGE_UNLOAD_FEEDRATE 10 / / ( mm / s ) Unload filament feedrate . This can be pretty fast . <nl> + # define FILAMENT_CHANGE_UNLOAD_LENGTH 420 / / ( mm ) The length of filament for a complete unload . <nl> + / / For Bowden , the full length of the tube and nozzle . <nl> + / / For direct drive , the full length of the nozzle . <nl> + / / Set to 0 for manual unloading . <nl> + # define FILAMENT_CHANGE_LOAD_FEEDRATE 8 / / ( mm / s ) Load filament feedrate . This can be pretty fast . <nl> + # define FILAMENT_CHANGE_LOAD_LENGTH 0 / / ( mm ) Load length of filament , from extruder gear to nozzle . <nl> + / / For Bowden , the full length of the tube and nozzle . <nl> + / / For direct drive , the full length of the nozzle . <nl> + # define ADVANCED_PAUSE_EXTRUDE_FEEDRATE 3 / / ( mm / s ) Extrude feedrate ( after loading ) . Should be slower than load feedrate . <nl> + # define ADVANCED_PAUSE_EXTRUDE_LENGTH 50 / / ( mm ) Length to extrude after loading . <nl> + / / Set to 0 for manual extrusion . <nl> + / / Filament can be extruded repeatedly from the Filament Change menu <nl> + / / until extrusion is consistent , and to purge old filament . <nl> + <nl> + / / Filament Unload does a Retract , Delay , and Purge first : <nl> + # define FILAMENT_UNLOAD_RETRACT_LENGTH 13 / / ( mm ) Unload initial retract length . <nl> + # define FILAMENT_UNLOAD_DELAY 5000 / / ( ms ) Delay for the filament to cool after retract . <nl> + # define FILAMENT_UNLOAD_PURGE_LENGTH 8 / / ( mm ) An unretract is done , then this length is purged . <nl> + <nl> + # define PAUSE_PARK_NOZZLE_TIMEOUT 120 / / ( seconds ) Time limit before the nozzle is turned off for safety . <nl> + # define FILAMENT_CHANGE_ALERT_BEEPS 6 / / Number of alert beeps to play when a response is needed . <nl> + # define PAUSE_PARK_NO_STEPPER_TIMEOUT / / Enable for XYZ steppers to stay powered on during filament change . <nl> + <nl> + # define PARK_HEAD_ON_PAUSE / / Park the nozzle during pause and filament change . <nl> + # define HOME_BEFORE_FILAMENT_CHANGE / / Ensure homing has been completed prior to parking for filament change <nl> + <nl> + / / # define FILAMENT_LOAD_UNLOAD_GCODES / / Add M701 / M702 Load / Unload G - codes , plus Load / Unload in the LCD Prepare menu . <nl> + / / # define FILAMENT_UNLOAD_ALL_EXTRUDERS / / Allow M702 to unload all extruders above a minimum target temp ( as set by M302 ) <nl> + # endif <nl> + <nl> + / / @ section tmc <nl> + <nl> + / * * <nl> + * Enable this section if you have TMC26X motor drivers . <nl> + * You will need to import the TMC26XStepper library into the Arduino IDE for this <nl> + * ( https : / / github . com / trinamic / TMC26XStepper . git ) <nl> + * / <nl> + / / # define HAVE_TMCDRIVER <nl> + <nl> + # if ENABLED ( HAVE_TMCDRIVER ) <nl> + <nl> + / / # define X_IS_TMC <nl> + / / # define X2_IS_TMC <nl> + / / # define Y_IS_TMC <nl> + / / # define Y2_IS_TMC <nl> + / / # define Z_IS_TMC <nl> + / / # define Z2_IS_TMC <nl> + / / # define E0_IS_TMC <nl> + / / # define E1_IS_TMC <nl> + / / # define E2_IS_TMC <nl> + / / # define E3_IS_TMC <nl> + / / # define E4_IS_TMC <nl> + <nl> + # define X_MAX_CURRENT 1000 / / in mA <nl> + # define X_SENSE_RESISTOR 91 / / in mOhms <nl> + # define X_MICROSTEPS 16 / / number of microsteps <nl> + <nl> + # define X2_MAX_CURRENT 1000 <nl> + # define X2_SENSE_RESISTOR 91 <nl> + # define X2_MICROSTEPS 16 <nl> + <nl> + # define Y_MAX_CURRENT 1000 <nl> + # define Y_SENSE_RESISTOR 91 <nl> + # define Y_MICROSTEPS 16 <nl> + <nl> + # define Y2_MAX_CURRENT 1000 <nl> + # define Y2_SENSE_RESISTOR 91 <nl> + # define Y2_MICROSTEPS 16 <nl> + <nl> + # define Z_MAX_CURRENT 1000 <nl> + # define Z_SENSE_RESISTOR 91 <nl> + # define Z_MICROSTEPS 16 <nl> + <nl> + # define Z2_MAX_CURRENT 1000 <nl> + # define Z2_SENSE_RESISTOR 91 <nl> + # define Z2_MICROSTEPS 16 <nl> + <nl> + # define E0_MAX_CURRENT 1000 <nl> + # define E0_SENSE_RESISTOR 91 <nl> + # define E0_MICROSTEPS 16 <nl> + <nl> + # define E1_MAX_CURRENT 1000 <nl> + # define E1_SENSE_RESISTOR 91 <nl> + # define E1_MICROSTEPS 16 <nl> + <nl> + # define E2_MAX_CURRENT 1000 <nl> + # define E2_SENSE_RESISTOR 91 <nl> + # define E2_MICROSTEPS 16 <nl> + <nl> + # define E3_MAX_CURRENT 1000 <nl> + # define E3_SENSE_RESISTOR 91 <nl> + # define E3_MICROSTEPS 16 <nl> + <nl> + # define E4_MAX_CURRENT 1000 <nl> + # define E4_SENSE_RESISTOR 91 <nl> + # define E4_MICROSTEPS 16 <nl> + <nl> + # endif <nl> + <nl> + / / @ section TMC2130 , TMC2208 <nl> + <nl> + / * * <nl> + * Enable this for SilentStepStick Trinamic TMC2130 SPI - configurable stepper drivers . <nl> + * <nl> + * You ' ll also need the TMC2130Stepper Arduino library <nl> + * ( https : / / github . com / teemuatlut / TMC2130Stepper ) . <nl> + * <nl> + * To use TMC2130 stepper drivers in SPI mode connect your SPI pins to <nl> + * the hardware SPI interface on your board and define the required CS pins <nl> + * in your ` pins_MYBOARD . h ` file . ( e . g . , RAMPS 1 . 4 uses AUX3 pins ` X_CS_PIN 53 ` , ` Y_CS_PIN 49 ` , etc . ) . <nl> + * You may also use software SPI if you wish to use general purpose IO pins . <nl> + * / <nl> + / / # define HAVE_TMC2130 <nl> + <nl> + / * * <nl> + * Enable this for SilentStepStick Trinamic TMC2208 UART - configurable stepper drivers . <nl> + * Connect # _SERIAL_TX_PIN to the driver side PDN_UART pin with a 1K resistor . <nl> + * To use the reading capabilities , also connect # _SERIAL_RX_PIN <nl> + * to PDN_UART without a resistor . <nl> + * The drivers can also be used with hardware serial . <nl> + * <nl> + * You ' ll also need the TMC2208Stepper Arduino library <nl> + * ( https : / / github . com / teemuatlut / TMC2208Stepper ) . <nl> + * / <nl> + / / # define HAVE_TMC2208 <nl> + <nl> + # if ENABLED ( HAVE_TMC2130 ) | | ENABLED ( HAVE_TMC2208 ) <nl> + <nl> + / / CHOOSE YOUR MOTORS HERE , THIS IS MANDATORY <nl> + / / # define X_IS_TMC2130 <nl> + / / # define X2_IS_TMC2130 <nl> + / / # define Y_IS_TMC2130 <nl> + / / # define Y2_IS_TMC2130 <nl> + / / # define Z_IS_TMC2130 <nl> + / / # define Z2_IS_TMC2130 <nl> + / / # define E0_IS_TMC2130 <nl> + / / # define E1_IS_TMC2130 <nl> + / / # define E2_IS_TMC2130 <nl> + / / # define E3_IS_TMC2130 <nl> + / / # define E4_IS_TMC2130 <nl> + <nl> + / / # define X_IS_TMC2208 <nl> + / / # define X2_IS_TMC2208 <nl> + / / # define Y_IS_TMC2208 <nl> + / / # define Y2_IS_TMC2208 <nl> + / / # define Z_IS_TMC2208 <nl> + / / # define Z2_IS_TMC2208 <nl> + / / # define E0_IS_TMC2208 <nl> + / / # define E1_IS_TMC2208 <nl> + / / # define E2_IS_TMC2208 <nl> + / / # define E3_IS_TMC2208 <nl> + / / # define E4_IS_TMC2208 <nl> + <nl> + / * * <nl> + * Stepper driver settings <nl> + * / <nl> + <nl> + # define R_SENSE 0 . 11 / / R_sense resistor for SilentStepStick2130 <nl> + # define HOLD_MULTIPLIER 0 . 5 / / Scales down the holding current from run current <nl> + # define INTERPOLATE true / / Interpolate X / Y / Z_MICROSTEPS to 256 <nl> + <nl> + # define X_CURRENT 800 / / rms current in mA . Multiply by 1 . 41 for peak current . <nl> + # define X_MICROSTEPS 16 / / 0 . . 256 <nl> + <nl> + # define Y_CURRENT 800 <nl> + # define Y_MICROSTEPS 16 <nl> + <nl> + # define Z_CURRENT 800 <nl> + # define Z_MICROSTEPS 16 <nl> + <nl> + # define X2_CURRENT 800 <nl> + # define X2_MICROSTEPS 16 <nl> + <nl> + # define Y2_CURRENT 800 <nl> + # define Y2_MICROSTEPS 16 <nl> + <nl> + # define Z2_CURRENT 800 <nl> + # define Z2_MICROSTEPS 16 <nl> + <nl> + # define E0_CURRENT 800 <nl> + # define E0_MICROSTEPS 16 <nl> + <nl> + # define E1_CURRENT 800 <nl> + # define E1_MICROSTEPS 16 <nl> + <nl> + # define E2_CURRENT 800 <nl> + # define E2_MICROSTEPS 16 <nl> + <nl> + # define E3_CURRENT 800 <nl> + # define E3_MICROSTEPS 16 <nl> + <nl> + # define E4_CURRENT 800 <nl> + # define E4_MICROSTEPS 16 <nl> + <nl> + / * * <nl> + * Use software SPI for TMC2130 . <nl> + * The default SW SPI pins are defined the respective pins files , <nl> + * but you can override or define them here . <nl> + * / <nl> + / / # define TMC_USE_SW_SPI <nl> + / / # define TMC_SW_MOSI - 1 <nl> + / / # define TMC_SW_MISO - 1 <nl> + / / # define TMC_SW_SCK - 1 <nl> + <nl> + / * * <nl> + * Use Trinamic ' s ultra quiet stepping mode . <nl> + * When disabled , Marlin will use spreadCycle stepping mode . <nl> + * / <nl> + # define STEALTHCHOP <nl> + <nl> + / * * <nl> + * Monitor Trinamic TMC2130 and TMC2208 drivers for error conditions , <nl> + * like overtemperature and short to ground . TMC2208 requires hardware serial . <nl> + * In the case of overtemperature Marlin can decrease the driver current until error condition clears . <nl> + * Other detected conditions can be used to stop the current print . <nl> + * Relevant g - codes : <nl> + * M906 - Set or get motor current in milliamps using axis codes X , Y , Z , E . Report values if no axis codes given . <nl> + * M911 - Report stepper driver overtemperature pre - warn condition . <nl> + * M912 - Clear stepper driver overtemperature pre - warn condition flag . <nl> + * M122 S0 / 1 - Report driver parameters ( Requires TMC_DEBUG ) <nl> + * / <nl> + / / # define MONITOR_DRIVER_STATUS <nl> + <nl> + # if ENABLED ( MONITOR_DRIVER_STATUS ) <nl> + # define CURRENT_STEP_DOWN 50 / / [ mA ] <nl> + # define REPORT_CURRENT_CHANGE <nl> + # define STOP_ON_ERROR <nl> + # endif <nl> + <nl> + / * * <nl> + * The driver will switch to spreadCycle when stepper speed is over HYBRID_THRESHOLD . <nl> + * This mode allows for faster movements at the expense of higher noise levels . <nl> + * STEALTHCHOP needs to be enabled . <nl> + * M913 X / Y / Z / E to live tune the setting <nl> + * / <nl> + / / # define HYBRID_THRESHOLD <nl> + <nl> + # define X_HYBRID_THRESHOLD 100 / / [ mm / s ] <nl> + # define X2_HYBRID_THRESHOLD 100 <nl> + # define Y_HYBRID_THRESHOLD 100 <nl> + # define Y2_HYBRID_THRESHOLD 100 <nl> + # define Z_HYBRID_THRESHOLD 3 <nl> + # define Z2_HYBRID_THRESHOLD 3 <nl> + # define E0_HYBRID_THRESHOLD 30 <nl> + # define E1_HYBRID_THRESHOLD 30 <nl> + # define E2_HYBRID_THRESHOLD 30 <nl> + # define E3_HYBRID_THRESHOLD 30 <nl> + # define E4_HYBRID_THRESHOLD 30 <nl> + <nl> + / * * <nl> + * Use stallGuard2 to sense an obstacle and trigger an endstop . <nl> + * You need to place a wire from the driver ' s DIAG1 pin to the X / Y endstop pin . <nl> + * X , Y , and Z homing will always be done in spreadCycle mode . <nl> + * <nl> + * X / Y / Z_HOMING_SENSITIVITY is used for tuning the trigger sensitivity . <nl> + * Higher values make the system LESS sensitive . <nl> + * Lower value make the system MORE sensitive . <nl> + * Too low values can lead to false positives , while too high values will collide the axis without triggering . <nl> + * It is advised to set X / Y / Z_HOME_BUMP_MM to 0 . <nl> + * M914 X / Y / Z to live tune the setting <nl> + * / <nl> + / / # define SENSORLESS_HOMING / / TMC2130 only <nl> + <nl> + # if ENABLED ( SENSORLESS_HOMING ) <nl> + # define X_HOMING_SENSITIVITY 8 <nl> + # define Y_HOMING_SENSITIVITY 8 <nl> + # define Z_HOMING_SENSITIVITY 8 <nl> + # endif <nl> + <nl> + / * * <nl> + * Enable M122 debugging command for TMC stepper drivers . <nl> + * M122 S0 / 1 will enable continous reporting . <nl> + * / <nl> + / / # define TMC_DEBUG <nl> + <nl> + / * * <nl> + * M915 Z Axis Calibration <nl> + * <nl> + * - Adjust Z stepper current , <nl> + * - Drive the Z axis to its physical maximum , and <nl> + * - Home Z to account for the lost steps . <nl> + * <nl> + * Use M915 Snn to specify the current . <nl> + * Use M925 Znn to add extra Z height to Z_MAX_POS . <nl> + * / <nl> + / / # define TMC_Z_CALIBRATION <nl> + # if ENABLED ( TMC_Z_CALIBRATION ) <nl> + # define CALIBRATION_CURRENT 250 <nl> + # define CALIBRATION_EXTRA_HEIGHT 10 <nl> + # endif <nl> + <nl> + / * * <nl> + * You can set your own advanced settings by filling in predefined functions . <nl> + * A list of available functions can be found on the library github page <nl> + * https : / / github . com / teemuatlut / TMC2130Stepper <nl> + * https : / / github . com / teemuatlut / TMC2208Stepper <nl> + * <nl> + * Example : <nl> + * # define TMC_ADV ( ) { \ <nl> + * stepperX . diag0_temp_prewarn ( 1 ) ; \ <nl> + * stepperY . interpolate ( 0 ) ; \ <nl> + * } <nl> + * / <nl> + # define TMC_ADV ( ) { } <nl> + <nl> + # endif / / TMC2130 | | TMC2208 <nl> + <nl> + / / @ section L6470 <nl> + <nl> + / * * <nl> + * Enable this section if you have L6470 motor drivers . <nl> + * You need to import the L6470 library into the Arduino IDE for this . <nl> + * ( https : / / github . com / ameyer / Arduino - L6470 ) <nl> + * / <nl> + <nl> + / / # define HAVE_L6470DRIVER <nl> + # if ENABLED ( HAVE_L6470DRIVER ) <nl> + <nl> + / / # define X_IS_L6470 <nl> + / / # define X2_IS_L6470 <nl> + / / # define Y_IS_L6470 <nl> + / / # define Y2_IS_L6470 <nl> + / / # define Z_IS_L6470 <nl> + / / # define Z2_IS_L6470 <nl> + / / # define E0_IS_L6470 <nl> + / / # define E1_IS_L6470 <nl> + / / # define E2_IS_L6470 <nl> + / / # define E3_IS_L6470 <nl> + / / # define E4_IS_L6470 <nl> + <nl> + # define X_MICROSTEPS 16 / / number of microsteps <nl> + # define X_OVERCURRENT 2000 / / maxc current in mA . If the current goes over this value , the driver will switch off <nl> + # define X_STALLCURRENT 1500 / / current in mA where the driver will detect a stall <nl> + <nl> + # define X2_MICROSTEPS 16 <nl> + # define X2_OVERCURRENT 2000 <nl> + # define X2_STALLCURRENT 1500 <nl> + <nl> + # define Y_MICROSTEPS 16 <nl> + # define Y_OVERCURRENT 2000 <nl> + # define Y_STALLCURRENT 1500 <nl> + <nl> + # define Y2_MICROSTEPS 16 <nl> + # define Y2_OVERCURRENT 2000 <nl> + # define Y2_STALLCURRENT 1500 <nl> + <nl> + # define Z_MICROSTEPS 16 <nl> + # define Z_OVERCURRENT 2000 <nl> + # define Z_STALLCURRENT 1500 <nl> + <nl> + # define Z2_MICROSTEPS 16 <nl> + # define Z2_OVERCURRENT 2000 <nl> + # define Z2_STALLCURRENT 1500 <nl> + <nl> + # define E0_MICROSTEPS 16 <nl> + # define E0_OVERCURRENT 2000 <nl> + # define E0_STALLCURRENT 1500 <nl> + <nl> + # define E1_MICROSTEPS 16 <nl> + # define E1_OVERCURRENT 2000 <nl> + # define E1_STALLCURRENT 1500 <nl> + <nl> + # define E2_MICROSTEPS 16 <nl> + # define E2_OVERCURRENT 2000 <nl> + # define E2_STALLCURRENT 1500 <nl> + <nl> + # define E3_MICROSTEPS 16 <nl> + # define E3_OVERCURRENT 2000 <nl> + # define E3_STALLCURRENT 1500 <nl> + <nl> + # define E4_MICROSTEPS 16 <nl> + # define E4_OVERCURRENT 2000 <nl> + # define E4_STALLCURRENT 1500 <nl> + <nl> + # endif <nl> + <nl> + / * * <nl> + * TWI / I2C BUS <nl> + * <nl> + * This feature is an EXPERIMENTAL feature so it shall not be used on production <nl> + * machines . Enabling this will allow you to send and receive I2C data from slave <nl> + * devices on the bus . <nl> + * <nl> + * ; Example # 1 <nl> + * ; This macro send the string " Marlin " to the slave device with address 0x63 ( 99 ) <nl> + * ; It uses multiple M260 commands with one B < base 10 > arg <nl> + * M260 A99 ; Target slave address <nl> + * M260 B77 ; M <nl> + * M260 B97 ; a <nl> + * M260 B114 ; r <nl> + * M260 B108 ; l <nl> + * M260 B105 ; i <nl> + * M260 B110 ; n <nl> + * M260 S1 ; Send the current buffer <nl> + * <nl> + * ; Example # 2 <nl> + * ; Request 6 bytes from slave device with address 0x63 ( 99 ) <nl> + * M261 A99 B5 <nl> + * <nl> + * ; Example # 3 <nl> + * ; Example serial output of a M261 request <nl> + * echo : i2c - reply : from : 99 bytes : 5 data : hello <nl> + * / <nl> + <nl> + / / @ section i2cbus <nl> + <nl> + / / # define EXPERIMENTAL_I2CBUS <nl> + # define I2C_SLAVE_ADDRESS 0 / / Set a value from 8 to 127 to act as a slave <nl> + <nl> + / / @ section extras <nl> + <nl> + / * * <nl> + * Spindle & Laser control <nl> + * <nl> + * Add the M3 , M4 , and M5 commands to turn the spindle / laser on and off , and <nl> + * to set spindle speed , spindle direction , and laser power . <nl> + * <nl> + * SuperPid is a router / spindle speed controller used in the CNC milling community . <nl> + * Marlin can be used to turn the spindle on and off . It can also be used to set <nl> + * the spindle speed from 5 , 000 to 30 , 000 RPM . <nl> + * <nl> + * You ' ll need to select a pin for the ON / OFF function and optionally choose a 0 - 5V <nl> + * hardware PWM pin for the speed control and a pin for the rotation direction . <nl> + * <nl> + * See http : / / marlinfw . org / docs / configuration / laser_spindle . html for more config details . <nl> + * / <nl> + / / # define SPINDLE_LASER_ENABLE <nl> + # if ENABLED ( SPINDLE_LASER_ENABLE ) <nl> + <nl> + # define SPINDLE_LASER_ENABLE_INVERT false / / set to " true " if the on / off function is reversed <nl> + # define SPINDLE_LASER_PWM true / / set to true if your controller supports setting the speed / power <nl> + # define SPINDLE_LASER_PWM_INVERT true / / set to " true " if the speed / power goes up when you want it to go slower <nl> + # define SPINDLE_LASER_POWERUP_DELAY 5000 / / delay in milliseconds to allow the spindle / laser to come up to speed / power <nl> + # define SPINDLE_LASER_POWERDOWN_DELAY 5000 / / delay in milliseconds to allow the spindle to stop <nl> + # define SPINDLE_DIR_CHANGE true / / set to true if your spindle controller supports changing spindle direction <nl> + # define SPINDLE_INVERT_DIR false <nl> + # define SPINDLE_STOP_ON_DIR_CHANGE true / / set to true if Marlin should stop the spindle before changing rotation direction <nl> + <nl> + / * * <nl> + * The M3 & M4 commands use the following equation to convert PWM duty cycle to speed / power <nl> + * <nl> + * SPEED / POWER = PWM duty cycle * SPEED_POWER_SLOPE + SPEED_POWER_INTERCEPT <nl> + * where PWM duty cycle varies from 0 to 255 <nl> + * <nl> + * set the following for your controller ( ALL MUST BE SET ) <nl> + * / <nl> + <nl> + # define SPEED_POWER_SLOPE 118 . 4 <nl> + # define SPEED_POWER_INTERCEPT 0 <nl> + # define SPEED_POWER_MIN 5000 <nl> + # define SPEED_POWER_MAX 30000 / / SuperPID router controller 0 - 30 , 000 RPM <nl> + <nl> + / / # define SPEED_POWER_SLOPE 0 . 3922 <nl> + / / # define SPEED_POWER_INTERCEPT 0 <nl> + / / # define SPEED_POWER_MIN 10 <nl> + / / # define SPEED_POWER_MAX 100 / / 0 - 100 % <nl> + # endif <nl> + <nl> + / * * <nl> + * Filament Width Sensor <nl> + * <nl> + * Measures the filament width in real - time and adjusts <nl> + * flow rate to compensate for any irregularities . <nl> + * <nl> + * Also allows the measured filament diameter to set the <nl> + * extrusion rate , so the slicer only has to specify the <nl> + * volume . <nl> + * <nl> + * Only a single extruder is supported at this time . <nl> + * <nl> + * 34 RAMPS_14 : Analog input 5 on the AUX2 connector <nl> + * 81 PRINTRBOARD : Analog input 2 on the Exp1 connector ( version B , C , D , E ) <nl> + * 301 RAMBO : Analog input 3 <nl> + * <nl> + * Note : May require analog pins to be defined for other boards . <nl> + * / <nl> + / / # define FILAMENT_WIDTH_SENSOR <nl> + <nl> + # if ENABLED ( FILAMENT_WIDTH_SENSOR ) <nl> + # define FILAMENT_SENSOR_EXTRUDER_NUM 0 / / Index of the extruder that has the filament sensor . : [ 0 , 1 , 2 , 3 , 4 ] <nl> + # define MEASUREMENT_DELAY_CM 14 / / ( cm ) The distance from the filament sensor to the melting chamber <nl> + <nl> + # define FILWIDTH_ERROR_MARGIN 1 . 0 / / ( mm ) If a measurement differs too much from nominal width ignore it <nl> + # define MAX_MEASUREMENT_DELAY 20 / / ( bytes ) Buffer size for stored measurements ( 1 byte per cm ) . Must be larger than MEASUREMENT_DELAY_CM . <nl> + <nl> + # define DEFAULT_MEASURED_FILAMENT_DIA DEFAULT_NOMINAL_FILAMENT_DIA / / Set measured to nominal initially <nl> + <nl> + / / Display filament width on the LCD status line . Status messages will expire after 5 seconds . <nl> + / / # define FILAMENT_LCD_DISPLAY <nl> + # endif <nl> + <nl> + / * * <nl> + * CNC Coordinate Systems <nl> + * <nl> + * Enables G53 and G54 - G59 . 3 commands to select coordinate systems <nl> + * and G92 . 1 to reset the workspace to native machine space . <nl> + * / <nl> + / / # define CNC_COORDINATE_SYSTEMS <nl> + <nl> + / * * <nl> + * M43 - display pin status , watch pins for changes , watch endstops & toggle LED , Z servo probe test , toggle pins <nl> + * / <nl> + / / # define PINS_DEBUGGING <nl> + <nl> + / * * <nl> + * Auto - report temperatures with M155 S < seconds > <nl> + * / <nl> + # define AUTO_REPORT_TEMPERATURES <nl> + <nl> + / * * <nl> + * Include capabilities in M115 output <nl> + * / <nl> + # define EXTENDED_CAPABILITIES_REPORT <nl> + <nl> + / * * <nl> + * Disable all Volumetric extrusion options <nl> + * / <nl> + / / # define NO_VOLUMETRICS <nl> + <nl> + # if DISABLED ( NO_VOLUMETRICS ) <nl> + / * * <nl> + * Volumetric extrusion default state <nl> + * Activate to make volumetric extrusion the default method , <nl> + * with DEFAULT_NOMINAL_FILAMENT_DIA as the default diameter . <nl> + * <nl> + * M200 D0 to disable , M200 Dn to set a new diameter . <nl> + * / <nl> + / / # define VOLUMETRIC_DEFAULT_ON <nl> + # endif <nl> + <nl> + / * * <nl> + * Enable this option for a leaner build of Marlin that removes all <nl> + * workspace offsets , simplifying coordinate transformations , leveling , etc . <nl> + * <nl> + * - M206 and M428 are disabled . <nl> + * - G92 will revert to its behavior from Marlin 1 . 0 . <nl> + * / <nl> + / / # define NO_WORKSPACE_OFFSETS <nl> + <nl> + / * * <nl> + * Set the number of proportional font spaces required to fill up a typical character space . <nl> + * This can help to better align the output of commands like ` G29 O ` Mesh Output . <nl> + * <nl> + * For clients that use a fixed - width font ( like OctoPrint ) , leave this set to 1 . 0 . <nl> + * Otherwise , adjust according to your client and font . <nl> + * / <nl> + # define PROPORTIONAL_FONT_RATIO 1 . 0 <nl> + <nl> + / * * <nl> + * Spend 28 bytes of SRAM to optimize the GCode parser <nl> + * / <nl> + # define FASTER_GCODE_PARSER <nl> + <nl> + / * * <nl> + * User - defined menu items that execute custom GCode <nl> + * / <nl> + / / # define CUSTOM_USER_MENUS <nl> + # if ENABLED ( CUSTOM_USER_MENUS ) <nl> + # define USER_SCRIPT_DONE " M117 User Script Done " <nl> + # define USER_SCRIPT_AUDIBLE_FEEDBACK <nl> + / / # define USER_SCRIPT_RETURN / / Return to status screen after a script <nl> + <nl> + # define USER_DESC_1 " Home & UBL Info " <nl> + # define USER_GCODE_1 " G28 \ nG29 W " <nl> + <nl> + # define USER_DESC_2 " Preheat for PLA " <nl> + # define USER_GCODE_2 " M140 S " STRINGIFY ( PREHEAT_1_TEMP_BED ) " \ nM104 S " STRINGIFY ( PREHEAT_1_TEMP_HOTEND ) <nl> + <nl> + # define USER_DESC_3 " Preheat for ABS " <nl> + # define USER_GCODE_3 " M140 S " STRINGIFY ( PREHEAT_2_TEMP_BED ) " \ nM104 S " STRINGIFY ( PREHEAT_2_TEMP_HOTEND ) <nl> + <nl> + # define USER_DESC_4 " Heat Bed / Home / Level " <nl> + # define USER_GCODE_4 " M140 S " STRINGIFY ( PREHEAT_2_TEMP_BED ) " \ nG28 \ nG29 " <nl> + <nl> + # define USER_DESC_5 " Home & Info " <nl> + # define USER_GCODE_5 " G28 \ nM503 " <nl> + # endif <nl> + <nl> + / * * <nl> + * Specify an action command to send to the host when the printer is killed . <nl> + * Will be sent in the form ' / / action : ACTION_ON_KILL ' , e . g . ' / / action : poweroff ' . <nl> + * The host must be configured to handle the action command . <nl> + * / <nl> + / / # define ACTION_ON_KILL " poweroff " <nl> + <nl> + / * * <nl> + * Specify an action command to send to the host on pause and resume . <nl> + * Will be sent in the form ' / / action : ACTION_ON_PAUSE ' , e . g . ' / / action : pause ' . <nl> + * The host must be configured to handle the action command . <nl> + * / <nl> + / / # define ACTION_ON_PAUSE " pause " <nl> + / / # define ACTION_ON_RESUME " resume " <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = I2C Position Encoder Settings = = = = = = = = = = = = = = = = = = = = = = <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + / * * <nl> + * I2C position encoders for closed loop control . <nl> + * Developed by Chris Barr at Aus3D . <nl> + * <nl> + * Wiki : http : / / wiki . aus3d . com . au / Magnetic_Encoder <nl> + * Github : https : / / github . com / Aus3D / MagneticEncoder <nl> + * <nl> + * Supplier : http : / / aus3d . com . au / magnetic - encoder - module <nl> + * Alternative Supplier : http : / / reliabuild3d . com / <nl> + * <nl> + * Reilabuild encoders have been modified to improve reliability . <nl> + * / <nl> + <nl> + / / # define I2C_POSITION_ENCODERS <nl> + # if ENABLED ( I2C_POSITION_ENCODERS ) <nl> + <nl> + # define I2CPE_ENCODER_CNT 1 / / The number of encoders installed ; max of 5 <nl> + / / encoders supported currently . <nl> + <nl> + # define I2CPE_ENC_1_ADDR I2CPE_PRESET_ADDR_X / / I2C address of the encoder . 30 - 200 . <nl> + # define I2CPE_ENC_1_AXIS X_AXIS / / Axis the encoder module is installed on . < X | Y | Z | E > _AXIS . <nl> + # define I2CPE_ENC_1_TYPE I2CPE_ENC_TYPE_LINEAR / / Type of encoder : I2CPE_ENC_TYPE_LINEAR - or - <nl> + / / I2CPE_ENC_TYPE_ROTARY . <nl> + # define I2CPE_ENC_1_TICKS_UNIT 2048 / / 1024 for magnetic strips with 2mm poles ; 2048 for <nl> + / / 1mm poles . For linear encoders this is ticks / mm , <nl> + / / for rotary encoders this is ticks / revolution . <nl> + / / # define I2CPE_ENC_1_TICKS_REV ( 16 * 200 ) / / Only needed for rotary encoders ; number of stepper <nl> + / / steps per full revolution ( motor steps / rev * microstepping ) <nl> + / / # define I2CPE_ENC_1_INVERT / / Invert the direction of axis travel . <nl> + # define I2CPE_ENC_1_EC_METHOD I2CPE_ECM_MICROSTEP / / Type of error error correction . <nl> + # define I2CPE_ENC_1_EC_THRESH 0 . 10 / / Threshold size for error ( in mm ) above which the <nl> + / / printer will attempt to correct the error ; errors <nl> + / / smaller than this are ignored to minimize effects of <nl> + / / measurement noise / latency ( filter ) . <nl> + <nl> + # define I2CPE_ENC_2_ADDR I2CPE_PRESET_ADDR_Y / / Same as above , but for encoder 2 . <nl> + # define I2CPE_ENC_2_AXIS Y_AXIS <nl> + # define I2CPE_ENC_2_TYPE I2CPE_ENC_TYPE_LINEAR <nl> + # define I2CPE_ENC_2_TICKS_UNIT 2048 <nl> + / / # define I2CPE_ENC_2_TICKS_REV ( 16 * 200 ) <nl> + / / # define I2CPE_ENC_2_INVERT <nl> + # define I2CPE_ENC_2_EC_METHOD I2CPE_ECM_MICROSTEP <nl> + # define I2CPE_ENC_2_EC_THRESH 0 . 10 <nl> + <nl> + # define I2CPE_ENC_3_ADDR I2CPE_PRESET_ADDR_Z / / Encoder 3 . Add additional configuration options <nl> + # define I2CPE_ENC_3_AXIS Z_AXIS / / as above , or use defaults below . <nl> + <nl> + # define I2CPE_ENC_4_ADDR I2CPE_PRESET_ADDR_E / / Encoder 4 . <nl> + # define I2CPE_ENC_4_AXIS E_AXIS <nl> + <nl> + # define I2CPE_ENC_5_ADDR 34 / / Encoder 5 . <nl> + # define I2CPE_ENC_5_AXIS E_AXIS <nl> + <nl> + / / Default settings for encoders which are enabled , but without settings configured above . <nl> + # define I2CPE_DEF_TYPE I2CPE_ENC_TYPE_LINEAR <nl> + # define I2CPE_DEF_ENC_TICKS_UNIT 2048 <nl> + # define I2CPE_DEF_TICKS_REV ( 16 * 200 ) <nl> + # define I2CPE_DEF_EC_METHOD I2CPE_ECM_NONE <nl> + # define I2CPE_DEF_EC_THRESH 0 . 1 <nl> + <nl> + / / # define I2CPE_ERR_THRESH_ABORT 100 . 0 / / Threshold size for error ( in mm ) error on any given <nl> + / / axis after which the printer will abort . Comment out to <nl> + / / disable abort behaviour . <nl> + <nl> + # define I2CPE_TIME_TRUSTED 10000 / / After an encoder fault , there must be no further fault <nl> + / / for this amount of time ( in ms ) before the encoder <nl> + / / is trusted again . <nl> + <nl> + / * * <nl> + * Position is checked every time a new command is executed from the buffer but during long moves , <nl> + * this setting determines the minimum update time between checks . A value of 100 works well with <nl> + * error rolling average when attempting to correct only for skips and not for vibration . <nl> + * / <nl> + # define I2CPE_MIN_UPD_TIME_MS 4 / / ( ms ) Minimum time between encoder checks . <nl> + <nl> + / / Use a rolling average to identify persistant errors that indicate skips , as opposed to vibration and noise . <nl> + # define I2CPE_ERR_ROLLING_AVERAGE <nl> + <nl> + # endif / / I2C_POSITION_ENCODERS <nl> + <nl> + / * * <nl> + * MAX7219 Debug Matrix <nl> + * <nl> + * Add support for a low - cost 8x8 LED Matrix based on the Max7219 chip , which can be used as a status <nl> + * display . Requires 3 signal wires . Some useful debug options are included to demonstrate its usage . <nl> + * <nl> + * Fully assembled MAX7219 boards can be found on the internet for under $ 2 ( US ) . <nl> + * For example , see https : / / www . ebay . com / sch / i . html ? _nkw = 332349290049 <nl> + * / <nl> + / / # define MAX7219_DEBUG <nl> + # if ENABLED ( MAX7219_DEBUG ) <nl> + # define MAX7219_CLK_PIN 64 / / 77 on Re - ARM / / Configuration of the 3 pins to control the display <nl> + # define MAX7219_DIN_PIN 57 / / 78 on Re - ARM <nl> + # define MAX7219_LOAD_PIN 44 / / 79 on Re - ARM <nl> + <nl> + / * * <nl> + * Sample debug features <nl> + * If you add more debug displays , be careful to avoid conflicts ! <nl> + * / <nl> + # define MAX7219_DEBUG_PRINTER_ALIVE / / Blink corner LED of 8x8 matrix to show that the firmware is functioning <nl> + # define MAX7219_DEBUG_STEPPER_HEAD 3 / / Show the stepper queue head position on this and the next LED matrix row <nl> + # define MAX7219_DEBUG_STEPPER_TAIL 5 / / Show the stepper queue tail position on this and the next LED matrix row <nl> + <nl> + # define MAX7219_DEBUG_STEPPER_QUEUE 0 / / Show the current stepper queue depth on this and the next LED matrix row <nl> + / / If you experience stuttering , reboots , etc . this option can reveal how <nl> + / / tweaks made to the configuration are affecting the printer in real - time . <nl> + # endif <nl> + <nl> + / * * <nl> + * NanoDLP Sync support <nl> + * <nl> + * Add support for Synchronized Z moves when using with NanoDLP . G0 / G1 axis moves will output " Z_move_comp " <nl> + * string to enable synchronization with DLP projector exposure . This change will allow to use <nl> + * [ [ WaitForDoneMessage ] ] instead of populating your gcode with M400 commands <nl> + * / <nl> + / / # define NANODLP_Z_SYNC <nl> + # if ENABLED ( NANODLP_Z_SYNC ) <nl> + / / # define NANODLP_ALL_AXIS / / Enables " Z_move_comp " output on any axis move . <nl> + / / Default behaviour is limited to Z axis only . <nl> + # endif <nl> + <nl> + # endif / / CONFIGURATION_ADV_H <nl> new file mode 100644 <nl> index 00000000000 . . 38fb68f7da9 <nl> mmm / dev / null <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10mini / _Bootscreen . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + / * * <nl> + * Custom Boot Screen bitmap <nl> + * <nl> + * Place this file in the root with your configuration files <nl> + * and enable SHOW_CUSTOM_BOOTSCREEN in Configuration . h . <nl> + * <nl> + * Use the Marlin Bitmap Converter to make your own : <nl> + * http : / / marlinfw . org / tools / u8glib / converter . html <nl> + * / <nl> + <nl> + # define CUSTOM_BOOTSCREEN_TIMEOUT 1000 <nl> + # define CUSTOM_BOOTSCREEN_BMPWIDTH 128 <nl> + <nl> + const unsigned char custom_start_bmp [ ] PROGMEM = { <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B11111100 , B00000000 , B00000000 , <nl> + B00001111 , B11110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000111 , B11100000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000001 , B10000110 , B00011111 , B11000000 , <nl> + B00011000 , B01110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00111100 , B00001100 , B00000000 , B00000000 , B00000001 , B10000011 , B00001100 , B01100000 , <nl> + B00010000 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00111100 , B00001100 , B00000000 , B00000000 , B00000001 , B10000011 , B00001100 , B00110000 , <nl> + B00110000 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00000000 , B00001100 , B00000000 , B00000000 , B00000000 , B00000011 , B00001100 , B00011000 , <nl> + B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B01100000 , B00000000 , B00111111 , B00001111 , B00111100 , B00000000 , B00000011 , B00001100 , B00001100 , <nl> + B01100000 , B00000001 , B11011111 , B00001111 , B11100000 , B11111110 , B00000000 , B01100000 , B00011100 , B00011100 , B00000110 , B00011000 , B00000000 , B00000110 , B00001100 , B00001100 , <nl> + B01100000 , B00000000 , B11110011 , B00011000 , B00110001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00011000 , B00000000 , B00011110 , B00001100 , B00001100 , <nl> + B01100000 , B00000000 , B11100000 , B00110000 , B00111001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00110000 , B00000000 , B00000011 , B00001100 , B00001100 , <nl> + B01100000 , B00000000 , B11000000 , B00110000 , B00111000 , B00001111 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00110000 , B00000000 , B00000001 , B10001100 , B00001100 , <nl> + B01100000 , B00000000 , B11000000 , B00111111 , B11111000 , B11111011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000011 , B00110000 , B00000000 , B00000001 , B10001100 , B00001100 , <nl> + B01100000 , B00110000 , B11000000 , B00110000 , B00000001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000001 , B11110000 , B00000001 , B10000001 , B10001100 , B00001100 , <nl> + B01100000 , B00110000 , B11000000 , B00110000 , B00000001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B00000000 , B11100000 , B00000001 , B10000001 , B10001100 , B00011000 , <nl> + B00110000 , B00110000 , B11000000 , B00011000 , B00110001 , B10000011 , B00000000 , B01100000 , B00001100 , B00001100 , B01000000 , B11100000 , B00000001 , B10000011 , B10001100 , B00110000 , <nl> + B00011000 , B01100000 , B11000000 , B00001100 , B01100001 , B10000111 , B11000000 , B11100000 , B00011100 , B00001100 , B11000000 , B01100000 , B00000000 , B11000011 , B00001100 , B01100000 , <nl> + B00001111 , B11000011 , B11110000 , B00000111 , B11000000 , B11111111 , B11000111 , B11111100 , B01111111 , B00000111 , B10000001 , B11000000 , B00000000 , B01111110 , B00011111 , B11000000 , <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000111 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000111 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 <nl> + } ; <nl> new file mode 100644 <nl> index 00000000000 . . f4fd17696ae <nl> mmm / dev / null <nl> ppp b / Marlin / src / config / examples / Creality / CR - 10mini / _Statusscreen . h <nl> <nl> + / * * <nl> + * Marlin 3D Printer Firmware <nl> + * Copyright ( C ) 2016 MarlinFirmware [ https : / / github . com / MarlinFirmware / Marlin ] <nl> + * <nl> + * Based on Sprinter and grbl . <nl> + * Copyright ( C ) 2011 Camiel Gubbels / Erik van der Zalm <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU General Public License as published by <nl> + * the Free Software Foundation , either version 3 of the License , or <nl> + * ( at your option ) any later version . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * <nl> + * / <nl> + <nl> + / * * <nl> + * Custom Status Screen bitmap <nl> + * <nl> + * Place this file in the root with your configuration files <nl> + * and enable CUSTOM_STATUS_SCREEN_IMAGE in Configuration . h . <nl> + * <nl> + * Use the Marlin Bitmap Converter to make your own : <nl> + * http : / / marlinfw . org / tools / u8glib / converter . html <nl> + * / <nl> + <nl> + # define STATUS_SCREENWIDTH 128 <nl> + # define STATUS_SCREEN_HOTEND_TEXT_X ( E ) 38 <nl> + # define STATUS_SCREEN_BED_TEXT_X 73 <nl> + <nl> + / / = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + const unsigned char status_screen0_bmp [ ] PROGMEM = { <nl> + B00000111 , B11001111 , B10000000 , B00110001 , B11100000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111111 , B11111111 , B11110000 , <nl> + B00001111 , B11001111 , B11000000 , B01110011 , B11110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111000 , B00000000 , B01110000 , <nl> + B00001100 , B00001100 , B01000000 , B01110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00110000 , B11111100 , B00110000 , <nl> + B00001100 , B00001100 , B11000000 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00100000 , B11111100 , B00010000 , <nl> + B00001100 , B00001111 , B11001111 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00100000 , B01111000 , B00010000 , <nl> + B00001100 , B00001101 , B10001111 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00100000 , B00110000 , B00010000 , <nl> + B00001100 , B00001100 , B11000000 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00101100 , B00000000 , B11010000 , <nl> + B00001111 , B11001100 , B11000000 , B00110011 , B11110000 , B00011111 , B11100000 , B00000000 , B00000000 , B00001000 , B00100000 , B10000000 , B00000000 , B00101110 , B00110001 , B11010000 , <nl> + B00000111 , B11001100 , B11000000 , B00110001 , B11100000 , B00111111 , B11110000 , B00000000 , B00000000 , B00000100 , B00010000 , B01000000 , B00000000 , B00101111 , B01111011 , B11010000 , <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00000100 , B00010000 , B01000000 , B00000000 , B00101111 , B01111011 , B11010000 , <nl> + B00000001 , B10000011 , B00110000 , B00000011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00001000 , B00100000 , B10000000 , B00000000 , B00101110 , B00110001 , B11010000 , <nl> + B00000001 , B10000011 , B00110010 , B00000011 , B00000000 , B00011111 , B11100000 , B00000000 , B00000000 , B00010000 , B01000001 , B00000000 , B00000000 , B00101100 , B00000000 , B11010000 , <nl> + B00000001 , B11000111 , B00000010 , B11100000 , B00000000 , B00011111 , B11100000 , B00000000 , B00000000 , B00100000 , B10000010 , B00000000 , B00000000 , B00100000 , B00110000 , B00010000 , <nl> + B00000001 , B11000111 , B00110011 , B11110011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00100000 , B10000010 , B00000000 , B00000000 , B00100000 , B01111000 , B00010000 , <nl> + B00000001 , B11101111 , B00110011 , B00110011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00010000 , B01000001 , B00000000 , B00000000 , B00100000 , B11111100 , B00010000 , <nl> + B00000001 , B10111011 , B00110011 , B00110011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00001000 , B00100000 , B10000000 , B00000000 , B00110000 , B11111100 , B00110000 , <nl> + B00000001 , B10010011 , B00110011 , B00110011 , B00000000 , B00001111 , B11000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111000 , B00000000 , B01110000 , <nl> + B00000001 , B10010011 , B00110011 , B00110011 , B00000000 , B00000111 , B10000000 , B00000000 , B00000000 , B11111111 , B11111111 , B11000000 , B00000000 , B00111111 , B11111111 , B11110000 , <nl> + B00000001 , B10000011 , B00110011 , B00110011 , B00000000 , B00000011 , B00000000 , B00000000 , B00000000 , B11111111 , B11111111 , B11000000 , B00000000 , B00000000 , B00000000 , B00000000 <nl> + } ; <nl> + const unsigned char status_screen1_bmp [ ] PROGMEM = { <nl> + B00000111 , B11001111 , B10000000 , B00110001 , B11100000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111111 , B11111111 , B11110000 , <nl> + B00001111 , B11001111 , B11000000 , B01110011 , B11110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111000 , B00000000 , B01110000 , <nl> + B00001100 , B00001100 , B01000000 , B01110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00110011 , B10000111 , B00110000 , <nl> + B00001100 , B00001100 , B11000000 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00100111 , B10000111 , B10010000 , <nl> + B00001100 , B00001111 , B11001111 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00101111 , B10000111 , B11010000 , <nl> + B00001100 , B00001101 , B10001111 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00101111 , B10000111 , B11010000 , <nl> + B00001100 , B00001100 , B11000000 , B00110011 , B00110000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00101111 , B00000011 , B11010000 , <nl> + B00001111 , B11001100 , B11000000 , B00110011 , B11110000 , B00011111 , B11100000 , B00000000 , B00000000 , B00001000 , B00100000 , B10000000 , B00000000 , B00100000 , B00110000 , B00010000 , <nl> + B00000111 , B11001100 , B11000000 , B00110001 , B11100000 , B00111111 , B11110000 , B00000000 , B00000000 , B00000100 , B00010000 , B01000000 , B00000000 , B00100000 , B01111000 , B00010000 , <nl> + B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00000100 , B00010000 , B01000000 , B00000000 , B00100000 , B01111000 , B00010000 , <nl> + B00000001 , B10000011 , B00110000 , B00000011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00001000 , B00100000 , B10000000 , B00000000 , B00100000 , B00110000 , B00010000 , <nl> + B00000001 , B10000011 , B00110010 , B00000011 , B00000000 , B00011111 , B11100000 , B00000000 , B00000000 , B00010000 , B01000001 , B00000000 , B00000000 , B00101111 , B00000011 , B11010000 , <nl> + B00000001 , B11000111 , B00000010 , B11100000 , B00000000 , B00011111 , B11100000 , B00000000 , B00000000 , B00100000 , B10000010 , B00000000 , B00000000 , B00101111 , B10000111 , B11010000 , <nl> + B00000001 , B11000111 , B00110011 , B11110011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00100000 , B10000010 , B00000000 , B00000000 , B00101111 , B10000111 , B11010000 , <nl> + B00000001 , B11101111 , B00110011 , B00110011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00010000 , B01000001 , B00000000 , B00000000 , B00100111 , B10000111 , B10010000 , <nl> + B00000001 , B10111011 , B00110011 , B00110011 , B00000000 , B00111111 , B11110000 , B00000000 , B00000000 , B00001000 , B00100000 , B10000000 , B00000000 , B00110011 , B10000111 , B00110000 , <nl> + B00000001 , B10010011 , B00110011 , B00110011 , B00000000 , B00001111 , B11000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00000000 , B00111000 , B00000000 , B01110000 , <nl> + B00000001 , B10010011 , B00110011 , B00110011 , B00000000 , B00000111 , B10000000 , B00000000 , B00000000 , B11111111 , B11111111 , B11000000 , B00000000 , B00111111 , B11111111 , B11110000 , <nl> + B00000001 , B10000011 , B00110011 , B00110011 , B00000000 , B00000011 , B00000000 , B00000000 , B00000000 , B11111111 , B11111111 , B11000000 , B00000000 , B00000000 , B00000000 , B00000000 <nl> + } ; <nl>
Merge pull request from ldkraemer / BF2CR - 10Bootscr
MarlinFirmware/Marlin
8c1a82c32765dda9a6cc6f204c713e3e81310f73
2018-02-22T02:29:36Z
mmm a / stdlib / public / core / Algorithm . swift <nl> ppp b / stdlib / public / core / Algorithm . swift <nl> <nl> <nl> / / / Returns the minimum element in ` elements ` . <nl> / / / <nl> - / / / Requires : ` elements ` is non - empty . O ( elements . count ( ) ) <nl> + / / / - Requires : ` elements ` is non - empty . O ( ` elements . count ( ) ` ) . <nl> @ availability ( * , unavailable , message = " call the ' minElement ( ) ' method on the sequence " ) <nl> public func minElement < <nl> R : SequenceType <nl> public func minElement < <nl> <nl> / / / Returns the maximum element in ` elements ` . <nl> / / / <nl> - / / / Requires : ` elements ` is non - empty . O ( elements . count ( ) ) <nl> + / / / - Requires : ` elements ` is non - empty . O ( ` elements . count ( ) ` ) . <nl> @ availability ( * , unavailable , message = " call the ' maxElement ( ) ' method on the sequence " ) <nl> public func maxElement < <nl> R : SequenceType <nl> public func startsWith < <nl> / / / Return true iff ` s ` begins with elements equivalent to those of <nl> / / / ` prefix ` , using ` isEquivalent ` as the equivalence test . <nl> / / / <nl> - / / / Requires : ` isEquivalent ` is an [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) <nl> + / / / - Requires : ` isEquivalent ` is an [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) . <nl> @ availability ( * , unavailable , message = " call the ' startsWith ( ) ' method on the sequence " ) <nl> public func startsWith < <nl> S0 : SequenceType , S1 : SequenceType <nl> public struct EnumerateGenerator < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : no preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> + / / / - Requires : No preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> public mutating func next ( ) - > Element ? { <nl> let b = base . next ( ) <nl> if b = = nil { return . None } <nl> public func equal < <nl> / / / Return true iff ` a1 ` and ` a2 ` contain equivalent elements , using <nl> / / / ` isEquivalent ` as the equivalence test . <nl> / / / <nl> - / / / Requires : ` isEquivalent ` is an [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) <nl> + / / / - Requires : ` isEquivalent ` is an [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) . <nl> @ availability ( * , unavailable , message = " call the ' equal ( ) ' method on the sequence " ) <nl> public func equal < <nl> S1 : SequenceType , S2 : SequenceType <nl> public func lexicographicalCompare < <nl> / / / Return true iff ` a1 ` precedes ` a2 ` in a lexicographical ( " dictionary " ) <nl> / / / ordering , using ` isOrderedBefore ` as the comparison between elements . <nl> / / / <nl> - / / / Requires : ` isOrderedBefore ` is a <nl> + / / / - Requires : ` isOrderedBefore ` is a <nl> / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> / / / over the elements of ` a1 ` and ` a2 ` . <nl> @ availability ( * , unavailable , message = " call the ' lexicographicalCompare ( ) ' method on the sequence " ) <nl> mmm a / stdlib / public / core / ArrayBuffer . swift <nl> ppp b / stdlib / public / core / ArrayBuffer . swift <nl> public struct _ArrayBuffer < T > : _ArrayBufferType { <nl> <nl> / / / Returns an ` _ArrayBuffer < U > ` containing the same elements . <nl> / / / <nl> - / / / Requires : the elements actually have dynamic type ` U ` , and ` U ` <nl> - / / / is a class or ` @ objc ` existential . <nl> + / / / - Requires : The elements actually have dynamic type ` U ` , and ` U ` <nl> + / / / is a class or ` @ objc ` existential . <nl> func castToBufferOf < U > ( _ : U . Type ) - > _ArrayBuffer < U > { <nl> _sanityCheck ( _isClassOrObjCExistential ( T . self ) ) <nl> _sanityCheck ( _isClassOrObjCExistential ( U . self ) ) <nl> public struct _ArrayBuffer < T > : _ArrayBufferType { <nl> / / / Returns an ` _ArrayBuffer < U > ` containing the same elements , <nl> / / / deffering checking each element ' s ` U ` - ness until it is accessed . <nl> / / / <nl> - / / / Requires : ` U ` is a class or ` @ objc ` existential derived from ` T ` . <nl> + / / / - Requires : ` U ` is a class or ` @ objc ` existential derived from ` T ` . <nl> func downcastToBufferWithDeferredTypeCheckOf < U > ( <nl> _ : U . Type <nl> ) - > _ArrayBuffer < U > { <nl> extension _ArrayBuffer { <nl> / / / Replace the given ` subRange ` with the first ` newCount ` elements of <nl> / / / the given collection . <nl> / / / <nl> - / / / Requires : this buffer is backed by a uniquely - referenced <nl> - / / / _ContiguousArrayBuffer <nl> + / / / - Requires : This buffer is backed by a uniquely - referenced <nl> + / / / ` _ContiguousArrayBuffer ` . <nl> public mutating func replace < <nl> C : CollectionType where C . Generator . Element = = Element <nl> > ( <nl> extension _ArrayBuffer { <nl> / / / Call ` body ( p ) ` , where ` p ` is an ` UnsafeMutableBufferPointer ` <nl> / / / over the underlying contiguous storage . <nl> / / / <nl> - / / / Requires : such contiguous storage exists or the buffer is empty <nl> + / / / - Requires : Such contiguous storage exists or the buffer is empty . <nl> public mutating func withUnsafeMutableBufferPointer < R > ( <nl> @ noescape body : ( UnsafeMutableBufferPointer < T > ) - > R <nl> ) - > R { <nl> extension _ArrayBuffer { <nl> <nl> / / / An object that keeps the elements stored in this buffer alive <nl> / / / <nl> - / / / Requires : this buffer is backed by a _ContiguousArrayBuffer <nl> + / / / - Requires : This buffer is backed by a ` _ContiguousArrayBuffer ` . <nl> public var nativeOwner : AnyObject { <nl> _sanityCheck ( _isNative , " Expect a native array " ) <nl> return _native . _storage <nl> extension _ArrayBuffer { <nl> <nl> / / / Our native representation . <nl> / / / <nl> - / / / Requires : ` _isNative ` <nl> + / / / - Requires : ` _isNative ` . <nl> var _native : NativeBuffer { <nl> return NativeBuffer ( <nl> _isClassOrObjCExistential ( T . self ) <nl> extension _ArrayBuffer { <nl> <nl> / / / Fast access to the native representation . <nl> / / / <nl> - / / / Requires : ` _isNativeNoTypeCheck ` <nl> + / / / - Requires : ` _isNativeNoTypeCheck ` . <nl> var _nativeNoTypeCheck : NativeBuffer { <nl> return NativeBuffer ( _storage . nativeInstance_noSpareBits ) <nl> } <nl> mmm a / stdlib / public / core / ArrayBufferType . swift <nl> ppp b / stdlib / public / core / ArrayBufferType . swift <nl> public protocol _ArrayBufferType : MutableCollectionType { <nl> / / / Replace the given ` subRange ` with the first ` newCount ` elements of <nl> / / / the given collection . <nl> / / / <nl> - / / / Requires : this buffer is backed by a uniquely - referenced <nl> - / / / _ContiguousArrayBuffer <nl> + / / / - Requires : This buffer is backed by a uniquely - referenced <nl> + / / / ` _ContiguousArrayBuffer ` . <nl> mutating func replace < C : CollectionType where C . Generator . Element = = Element > ( <nl> subRange subRange : Range < Int > , with newCount : Int , elementsOf newValues : C <nl> ) <nl> public protocol _ArrayBufferType : MutableCollectionType { <nl> / / / Call ` body ( p ) ` , where ` p ` is an ` UnsafeMutableBufferPointer ` <nl> / / / over the underlying contiguous storage . <nl> / / / <nl> - / / / Requires : such contiguous storage exists or the buffer is empty <nl> + / / / - Requires : Such contiguous storage exists or the buffer is empty . <nl> mutating func withUnsafeMutableBufferPointer < R > ( <nl> @ noescape body : ( UnsafeMutableBufferPointer < Element > ) - > R <nl> ) - > R <nl> mmm a / stdlib / public / core / ArrayCast . swift <nl> ppp b / stdlib / public / core / ArrayCast . swift <nl> case Verbatim , Explicit <nl> <nl> / / / Implements ` source as [ TargetElement ] ` . <nl> / / / <nl> - / / / Requires : At least one of ` SourceElement ` and ` TargetElement ` is a <nl> + / / / - Requires : At least one of ` SourceElement ` and ` TargetElement ` is a <nl> / / / class type or ObjC existential . May trap for other " valid " inputs <nl> / / / when ` TargetElement ` is not bridged verbatim , if an element can ' t <nl> / / / be converted . <nl> ElementwiseBridging : <nl> / / / ` source ` to a ` TargetElement ` and return the resulting array , or <nl> / / / return ` nil ` if any element fails to convert . <nl> / / / <nl> - / / / Requires : ` SourceElement ` is a class or ObjC existential type <nl> + / / / - Requires : ` SourceElement ` is a class or ObjC existential type . <nl> / / / O ( n ) , because each element must be checked . <nl> public func _arrayConditionalCast < SourceElement , TargetElement > ( <nl> source : [ SourceElement ] <nl> mmm a / stdlib / public / core / ArrayType . swift <nl> ppp b / stdlib / public / core / ArrayType . swift <nl> protocol _ArrayType <nl> <nl> / / / Remove an element from the end of the Array in O ( 1 ) . <nl> / / / <nl> - / / / - Returns : the removed element . <nl> + / / / - returns : The removed element . <nl> / / / <nl> - / / / Requires : count > 0 <nl> + / / / - Requires : ` count > 0 ` . <nl> mutating func removeLast ( ) - > Generator . Element <nl> <nl> / / / Insert ` newElement ` at index ` i ` . <nl> protocol _ArrayType <nl> / / / <nl> / / / - Complexity : O ( ` self . count ( ) ` ) . <nl> / / / <nl> - / / / Requires : ` atIndex ` < = ` count ` <nl> + / / / - Requires : ` atIndex < = count ` . <nl> mutating func insert ( newElement : Generator . Element , atIndex i : Int ) <nl> <nl> / / / Remove and return the element at the given index . <nl> / / / <nl> - / / / - Returns : the removed element . <nl> + / / / - returns : The removed element . <nl> / / / <nl> / / / - Complexity : Worst case O ( N ) . <nl> / / / <nl> - / / / Requires : ` count ` > ` index ` <nl> + / / / - Requires : ` count > index ` . <nl> mutating func removeAtIndex ( index : Int ) - > Generator . Element <nl> <nl> / / / Erase all the elements . If ` keepCapacity ` is ` true ` , ` capacity ` <nl> protocol _ArrayType <nl> <nl> / / / Sort ` self ` in - place according to ` isOrderedBefore ` . <nl> / / / <nl> - / / / Requires : ` isOrderedBefore ` induces a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> - / / / over the elements . <nl> + / / / - Requires : ` isOrderedBefore ` induces a <nl> + / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / over the elements . <nl> mutating func sort ( <nl> isOrderedBefore : ( Generator . Element , Generator . Element ) - > Bool <nl> ) <nl> mmm a / stdlib / public / core / Arrays . swift . gyb <nl> ppp b / stdlib / public / core / Arrays . swift . gyb <nl> public struct $ { Self } < T > <nl> return _buffer . capacity <nl> } <nl> <nl> - / / Requires : the array has a native buffer <nl> + / / / - Requires : The array has a native buffer . <nl> @ _semantics ( " array . owner " ) <nl> func _getOwner ( ) - > Builtin . NativeObject { <nl> return Builtin . castToNativeObject ( _buffer . nativeOwner ) <nl> extension $ { Self } : _ArrayType { <nl> } <nl> <nl> / / / Remove an element from the end of the $ { Self } in O ( 1 ) . <nl> - / / / Requires : count > 0 <nl> + / / / <nl> + / / / - Requires : ` count > 0 ` . <nl> public mutating func removeLast ( ) - > T { <nl> _precondition ( count > 0 , " can ' t removeLast from an empty $ { Self } " ) <nl> let c = count <nl> extension $ { Self } : _ArrayType { <nl> <nl> / / / Insert ` newElement ` at index ` i ` . <nl> / / / <nl> - / / / Requires : ` i < = count ` <nl> + / / / - Requires : ` i < = count ` . <nl> / / / <nl> / / / - Complexity : O ( ` count ` ) . <nl> public mutating func insert ( newElement : T , atIndex i : Int ) { <nl> extension $ { Self } : _ArrayType { <nl> return Swift . join ( self , elements ) <nl> } <nl> <nl> - / / / Sort ` self ` in - place according to ` isOrderedBefore ` . Requires : <nl> - / / / ` isOrderedBefore ` induces a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / Sort ` self ` in - place according to ` isOrderedBefore ` . <nl> + / / / <nl> + / / / - Requires : ` isOrderedBefore ` induces a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> / / / over the elements . <nl> public mutating func sort ( isOrderedBefore : ( T , T ) - > Bool ) { <nl> return withUnsafeMutableBufferPointer { <nl> extension $ { Self } : _ArrayType { <nl> } <nl> <nl> / / / Return a copy of ` self ` that has been sorted according to <nl> - / / / ` isOrderedBefore ` . Requires : ` isOrderedBefore ` induces a <nl> + / / / ` isOrderedBefore ` . <nl> + / / / <nl> + / / / - Requires : ` isOrderedBefore ` induces a <nl> / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> / / / over the elements . <nl> public func sorted ( isOrderedBefore : ( T , T ) - > Bool ) - > $ { Self } { <nl> public func ! = < T : Equatable > ( lhs : $ { Self } < T > , rhs : $ { Self } < T > ) - > Bool { <nl> % end <nl> <nl> # if _runtime ( _ObjC ) <nl> - / / / Returns an Array < Base > containing the same elements as a in <nl> - / / / O ( 1 ) . Requires : Base is a base class or base @ objc protocol ( such <nl> - / / / as AnyObject ) of Derived . <nl> - / / / FIXME : Dynamic casting is currently not possible without the objc runtime : <nl> - / / / rdar : / / problem / 18801510 <nl> + / / / Returns an ` Array < Base > ` containing the same elements as ` a ` in <nl> + / / / O ( 1 ) . <nl> + / / / <nl> + / / / - Requires : ` Base ` is a base class or base ` @ objc ` protocol ( such <nl> + / / / as ` AnyObject ` ) of ` Derived ` . <nl> public func _arrayUpCast < Derived , Base > ( a : Array < Derived > ) - > Array < Base > { <nl> + / / FIXME : Dynamic casting is currently not possible without the objc runtime : <nl> + / / rdar : / / problem / 18801510 <nl> return Array ( a . _buffer . castToBufferOf ( Base . self ) ) <nl> } <nl> # endif <nl> mmm a / stdlib / public / core / Bit . swift <nl> ppp b / stdlib / public / core / Bit . swift <nl> public enum Bit : Int , Comparable , RandomAccessIndexType , Reflectable { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : ` self = = . Zero ` . <nl> + / / / - Requires : ` self = = . Zero ` . <nl> public func successor ( ) - > Bit { <nl> _precondition ( self = = . Zero , " Can ' t increment past one " ) <nl> return . One <nl> public enum Bit : Int , Comparable , RandomAccessIndexType , Reflectable { <nl> <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : ` self ! = . Zero ` . <nl> + / / / - Requires : ` self ! = . Zero ` . <nl> public func predecessor ( ) - > Bit { <nl> _precondition ( self = = . One , " Can ' t decrement past zero " ) <nl> return . Zero <nl> mmm a / stdlib / public / core / BridgeObjectiveC . swift <nl> ppp b / stdlib / public / core / BridgeObjectiveC . swift <nl> public struct AutoreleasingUnsafeMutablePointer < T / * TODO : class * / > <nl> / / / Access the ` i ` th element of the raw array pointed to by <nl> / / / ` self ` . <nl> / / / <nl> - / / / Requires : ` self ! = nil ` <nl> + / / / - Requires : ` self ! = nil ` . <nl> public subscript ( i : Int ) - > T { <nl> @ transparent <nl> get { <nl> mmm a / stdlib / public / core / Builtin . swift <nl> ppp b / stdlib / public / core / Builtin . swift <nl> public func unsafeAddressOf ( object : AnyObject ) - > UnsafePointer < Void > { <nl> return UnsafePointer ( Builtin . bridgeToRawPointer ( object ) ) <nl> } <nl> <nl> - / / / - Returns : ` x as T ` <nl> + / / / - returns : ` x as T ` . <nl> / / / <nl> - / / / Requires : ` x is T ` . In particular , in - O builds , no test is <nl> - / / / performed to ensure that ` x ` actually has dynamic type ` T ` . <nl> + / / / - Requires : ` x is T ` . In particular , in - O builds , no test is <nl> + / / / performed to ensure that ` x ` actually has dynamic type ` T ` . <nl> / / / <nl> - / / / Danger : trades safety for performance . Use ` unsafeDowncast ` <nl> - / / / only when ` x as T ` has proven to be a performance problem and you <nl> - / / / are confident that , always , ` x is T ` . It is better than an <nl> - / / / ` unsafeBitCast ` because it ' s more restrictive , and because <nl> - / / / checking is still performed in debug builds . <nl> + / / / - warning : Trades safety for performance . Use ` unsafeDowncast ` <nl> + / / / only when ` x as T ` has proven to be a performance problem and you <nl> + / / / are confident that , always , ` x is T ` . It is better than an <nl> + / / / ` unsafeBitCast ` because it ' s more restrictive , and because <nl> + / / / checking is still performed in debug builds . <nl> @ transparent <nl> public func unsafeDowncast < T : AnyObject > ( x : AnyObject ) - > T { <nl> _debugPrecondition ( x is T , " invalid unsafeDowncast " ) <nl> public func unsafeDowncast < T : AnyObject > ( x : AnyObject ) - > T { <nl> <nl> / / / - Returns : ` nonEmpty ! ` <nl> / / / <nl> - / / / Requires : ` nonEmpty ! = nil ` . In particular , in - O builds , no test <nl> - / / / is performed to ensure that ` nonEmpty ` actually is non - nil . <nl> + / / / - Requires : ` nonEmpty ! = nil ` . In particular , in - O builds , no test <nl> + / / / is performed to ensure that ` nonEmpty ` actually is non - nil . <nl> / / / <nl> - / / / Danger : trades safety for performance . Use ` unsafeUnwrap ` <nl> - / / / only when ` nonEmpty ! ` has proven to be a performance problem and <nl> - / / / you are confident that , always , ` nonEmpty ! = nil ` . It is better <nl> - / / / than an ` unsafeBitCast ` because it ' s more restrictive , and <nl> - / / / because checking is still performed in debug builds . <nl> + / / / - warning : Trades safety for performance . Use ` unsafeUnwrap ` <nl> + / / / only when ` nonEmpty ! ` has proven to be a performance problem and <nl> + / / / you are confident that , always , ` nonEmpty ! = nil ` . It is better <nl> + / / / than an ` unsafeBitCast ` because it ' s more restrictive , and <nl> + / / / because checking is still performed in debug builds . <nl> @ inline ( __always ) <nl> public func unsafeUnwrap < T > ( nonEmpty : T ? ) - > T { <nl> if let x = nonEmpty { <nl> internal func _isObjCTaggedPointer ( x : AnyObject ) - > Bool { <nl> } <nl> <nl> / / / Create a ` BridgeObject ` around the given ` nativeObject ` with the <nl> - / / / given spare bits . Reference - counting and other operations on this <nl> + / / / given spare bits . <nl> + / / / <nl> + / / / Reference - counting and other operations on this <nl> / / / object will have access to the knowledge that it is native . <nl> / / / <nl> - / / / Requires : ` bits & _objectPointerIsObjCBit = = 0 ` , ` bits & <nl> - / / / _objectPointerSpareBits = = bits ` <nl> + / / / - Requires : ` bits & _objectPointerIsObjCBit = = 0 ` , <nl> + / / / ` bits & _objectPointerSpareBits = = bits ` . <nl> @ inline ( __always ) <nl> internal func _makeNativeBridgeObject ( <nl> nativeObject : AnyObject , _ bits : UInt <nl> internal func _makeObjCBridgeObject ( <nl> / / / Create a ` BridgeObject ` around the given ` object ` with the <nl> / / / given spare bits . <nl> / / / <nl> - / / / Requires : <nl> + / / / - Requires : <nl> / / / <nl> - / / / 1 . ` bits & _objectPointerSpareBits = = bits ` <nl> - / / / 2 . if ` object ` is a tagged pointer , ` bits = = 0 ` . Otherwise , <nl> - / / / ` object ` is either a native object , or ` bits = = <nl> - / / / _objectPointerIsObjCBit ` . <nl> + / / / 1 . ` bits & _objectPointerSpareBits = = bits ` <nl> + / / / 2 . if ` object ` is a tagged pointer , ` bits = = 0 ` . Otherwise , <nl> + / / / ` object ` is either a native object , or ` bits = = <nl> + / / / _objectPointerIsObjCBit ` . <nl> @ inline ( __always ) <nl> internal func _makeBridgeObject ( <nl> object : AnyObject , _ bits : UInt <nl> mmm a / stdlib / public / core / Character . swift <nl> ppp b / stdlib / public / core / Character . swift <nl> public struct Character : <nl> <nl> / / / Create an instance from a single - character ` String ` . <nl> / / / <nl> - / / / Requires : ` s ` contains exactly one extended grapheme cluster . <nl> + / / / - Requires : ` s ` contains exactly one extended grapheme cluster . <nl> public init ( _ s : String ) { <nl> / / The small representation can accept up to 8 code units as long <nl> / / as the last one is a continuation . Since the high bit of the <nl> public struct Character : <nl> <nl> / / / Access the code unit at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> subscript ( position : Int ) - > UTF8 . CodeUnit { <nl> _sanityCheck ( position > = 0 ) <nl> _sanityCheck ( position < Int ( count ) ) <nl> public struct Character : <nl> <nl> / / / Access the code unit at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> subscript ( position : Int ) - > UTF16 . CodeUnit { <nl> _sanityCheck ( position > = 0 ) <nl> _sanityCheck ( position < Int ( count ) ) <nl> mmm a / stdlib / public / core / Collection . swift <nl> ppp b / stdlib / public / core / Collection . swift <nl> public protocol CollectionType <nl> <nl> / / / Access the element indicated by ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` indicates a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` indicates a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> subscript ( position : Index ) - > Generator . Element { get } <nl> <nl> / / / The * collection * type that represents a sub - range of elements . <nl> public func last < C : CollectionType where C . Index : BidirectionalIndexType > ( <nl> public protocol MutableCollectionType : CollectionType { <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` indicates a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` indicates a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> subscript ( position : Index ) - > Generator . Element { get set } <nl> } <nl> <nl> public struct IndexingGenerator < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : no preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> + / / / - Requires : No preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> public mutating func next ( ) - > C . _Element ? { <nl> return _position = = _elements . endIndex <nl> ? . None : . Some ( _elements [ _position + + ] ) <nl> public struct PermutationGenerator < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : no preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> + / / / - Requires : No preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> public mutating func next ( ) - > Element ? { <nl> let result = indices . next ( ) <nl> return result ! = nil ? seq [ result ! ] : . None <nl> public struct PermutationGenerator < <nl> / / / Construct a * generator * over a permutation of ` elements ` given <nl> / / / by ` indices ` . <nl> / / / <nl> - / / / Requires : ` elements [ i ] ` is valid for every ` i ` in ` indices ` . <nl> + / / / - Requires : ` elements [ i ] ` is valid for every ` i ` in ` indices ` . <nl> public init ( elements : C , indices : Indices ) { <nl> self . seq = elements <nl> self . indices = indices . generate ( ) <nl> public protocol MutableSliceable : Sliceable , MutableCollectionType { <nl> <nl> / / / Return a slice containing all but the first element of ` s ` . <nl> / / / <nl> - / / / Requires : ` s ` is non - empty . <nl> + / / / - Requires : ` s ` is non - empty . <nl> public func dropFirst < Seq : Sliceable > ( s : Seq ) - > Seq . SubSlice { <nl> return s [ s . startIndex . successor ( ) . . < s . endIndex ] <nl> } <nl> <nl> / / / Return a slice containing all but the last element of ` s ` . <nl> / / / <nl> - / / / Requires : ` s ` is non - empty . <nl> + / / / - Requires : ` s ` is non - empty . <nl> public func dropLast < <nl> S : Sliceable <nl> where S . Index : BidirectionalIndexType <nl> mmm a / stdlib / public / core / CollectionAlgorithms . swift . gyb <nl> ppp b / stdlib / public / core / CollectionAlgorithms . swift . gyb <nl> partitionDocComment = " " " \ <nl> / / / Only returns ` range . endIndex ` when ` self ` is empty . " " " <nl> <nl> orderingRequirementForPredicate = " " " \ <nl> - / / / Requires : ` isOrderedBefore ` is a ` strict weak ordering <nl> - / / / < http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings > ` __ <nl> - / / / over ` self ` . " " " <nl> + / / / - Requires : ` isOrderedBefore ` is a <nl> + / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / over ` self ` . " " " <nl> <nl> orderingRequirementForComparable = " " " \ <nl> - / / / Requires : The less - than operator ( ` func < ` ) defined in the ` Comparable ` <nl> - / / / conformance is a ` strict weak ordering <nl> - / / / < http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings > ` __ <nl> - / / / over ` self ` . " " " <nl> + / / / - Requires : The less - than operator ( ` func < ` ) defined in <nl> + / / / the ` Comparable ` conformance is a <nl> + / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / over ` self ` . " " " <nl> <nl> } % <nl> <nl> mmm a / stdlib / public / core / CollectionOfOne . swift <nl> ppp b / stdlib / public / core / CollectionOfOne . swift <nl> public struct GeneratorOfOne < T > : GeneratorType , SequenceType { <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . <nl> public mutating func next ( ) - > T ? { <nl> let result = elements <nl> elements = . None <nl> public struct CollectionOfOne < T > : CollectionType { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position = = . Zero ` <nl> + / / / - Requires : ` position = = . Zero ` . <nl> public subscript ( position : Index ) - > T { <nl> _precondition ( position = = . Zero , " Index out of range " ) <nl> return element <nl> mmm a / stdlib / public / core / Concatenate . swift . gyb <nl> ppp b / stdlib / public / core / Concatenate . swift . gyb <nl> struct _ConcatenateSequenceGenerator < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . <nl> public / / @ testable <nl> mutating func next ( ) - > Outer . Element . Generator . Element ? { <nl> repeat { <nl> struct $ { Index } < <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : the next value is representable . <nl> public / / @ testable <nl> func successor ( ) - > $ { Index } { <nl> return $ { Index } . adjustForward ( _data , _outer , _inner ! . successor ( ) ) <nl> struct $ { Index } < <nl> % if traversal = = ' Bidirectional ' : <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : the previous value is representable . <nl> + / / / - Requires : The previous value is representable . <nl> public / / @ testable <nl> func predecessor ( ) - > $ { Index } { <nl> <nl> struct $ { View } < <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public / / @ testable <nl> subscript ( position : Index ) - > C . Generator . Element . Generator . Element { <nl> return _base [ position . _outer ] [ position . _inner ! ] <nl> mmm a / stdlib / public / core / ContiguousArrayBuffer . swift <nl> ppp b / stdlib / public / core / ContiguousArrayBuffer . swift <nl> public struct _ContiguousArrayBuffer < T > : _ArrayBufferType { <nl> / / / Replace the given subRange with the first newCount elements of <nl> / / / the given collection . <nl> / / / <nl> - / / / Requires : this buffer is backed by a uniquely - referenced <nl> - / / / _ContiguousArrayBuffer <nl> + / / / - Requires : This buffer is backed by a uniquely - referenced <nl> + / / / ` _ContiguousArrayBuffer ` . <nl> public mutating func replace < <nl> C : CollectionType where C . Generator . Element = = Element <nl> > ( <nl> public struct _ContiguousArrayBuffer < T > : _ArrayBufferType { <nl> } <nl> <nl> / / / Return true if the buffer stores only elements of type ` U ` . <nl> - / / / Requires : ` U ` is a class or ` @ objc ` existential . O ( N ) <nl> + / / / <nl> + / / / - Requires : ` U ` is a class or ` @ objc ` existential . O ( N ) . <nl> func storesOnlyElementsOfType < U > ( <nl> _ : U . Type <nl> ) - > Bool { <nl> mmm a / stdlib / public / core / ExistentialCollection . swift . gyb <nl> ppp b / stdlib / public / core / ExistentialCollection . swift . gyb <nl> public struct $ { Self } : $ { Traversal } IndexType { <nl> / / / Return the next consecutive value in a discrete sequence of <nl> / / / ` $ { Self } ` values . <nl> / / / <nl> - / / / Requires : ` self ` has a well - defined successor . <nl> + / / / - Requires : ` self ` has a well - defined successor . <nl> public func successor ( ) - > $ { Self } { <nl> return $ { Self } ( _box . successor ( ) ) <nl> } <nl> public struct $ { Self } : $ { Traversal } IndexType { <nl> / / / Return the previous consecutive value in a discrete sequence of <nl> / / / ` $ { Self } ` values . <nl> / / / <nl> - / / / Requires : ` self ` has a well - defined predecessor . <nl> + / / / - Requires : ` self ` has a well - defined predecessor . <nl> public func predecessor ( ) - > $ { Self } { <nl> return $ { Self } ( _box . predecessor ( ) ) <nl> } <nl> public struct $ { Self } : $ { Traversal } IndexType { <nl> / / / Return the minimum number of applications of ` successor ` or <nl> / / / ` predecessor ` required to reach ` other ` from ` self ` . <nl> / / / <nl> - / / / Requires : ` self ` and ` other ` wrap instances of the same type . <nl> + / / / - Requires : ` self ` and ` other ` wrap instances of the same type . <nl> public func distanceTo ( other : $ { Self } ) - > Distance { <nl> return _box . _distanceTo ( other . _box ) <nl> } <nl> public func ~ > ( <nl> / / / Return true iff ` lhs ` and ` rhs ` wrap equal underlying <nl> / / / ` $ { Self } ` s . <nl> / / / <nl> - / / / Requires : the types of indices wrapped by ` lhs ` and ` rhs ` are <nl> - / / / identical . <nl> + / / / - Requires : The types of indices wrapped by ` lhs ` and ` rhs ` are <nl> + / / / identical . <nl> public func = = ( lhs : $ { Self } , rhs : $ { Self } ) - > Bool { <nl> precondition ( lhs . _typeID = = rhs . _typeID , " base index types differ . " ) <nl> return lhs . _box . equals ( rhs . _box ) <nl> public struct Any $ { Traversal } Collection < Element > : AnyCollectionType { <nl> <nl> / / / Access the element indicated by ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` indicates a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` indicates a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Any $ { Traversal } Index ) - > Element { <nl> return _box [ position . _box ] <nl> } <nl> mmm a / stdlib / public / core / Filter . swift . gyb <nl> ppp b / stdlib / public / core / Filter . swift . gyb <nl> public struct FilterGenerator < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . <nl> public mutating func next ( ) - > Base . Element ? { <nl> var n : Base . Element ? <nl> for / * ever * / ; ; { <nl> public struct FilterCollectionViewIndex < <nl> > : ForwardIndexType { <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > FilterCollectionViewIndex { <nl> for nextPos in _pos . successor ( ) . . < _end { <nl> if _include ( _base [ nextPos ] ) { <nl> public struct FilterCollectionView < Base : CollectionType > : CollectionType { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> / / / ` position ! = endIndex ` . <nl> public subscript ( position : Index ) - > Base . Generator . Element { <nl> return _base [ position . _pos ] <nl> mmm a / stdlib / public / core / FixedPoint . swift . gyb <nl> ppp b / stdlib / public / core / FixedPoint . swift . gyb <nl> extension $ { Self } : RandomAccessIndexType { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> @ transparent public <nl> func successor ( ) - > $ { Self } { <nl> return self & + 1 <nl> } <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : the previous value is representable . <nl> + / / / - Requires : The previous value is representable . <nl> @ transparent public <nl> func predecessor ( ) - > $ { Self } { <nl> return self & - 1 <nl> mmm a / stdlib / public / core / HashedCollections . swift . gyb <nl> ppp b / stdlib / public / core / HashedCollections . swift . gyb <nl> public struct Set < T : Hashable > : <nl> _variantStorage . removeAll ( keepCapacity : keepCapacity ) <nl> } <nl> <nl> - / / / Remove a member from the set and return it . Requires : ` count > 0 ` . <nl> + / / / Remove a member from the set and return it . <nl> + / / / <nl> + / / / - Requires : ` count > 0 ` . <nl> public mutating func removeFirst ( ) - > T { <nl> _precondition ( count > 0 , " can ' t removeFirst from an empty Set " ) <nl> let member = first ! <nl> internal func _stdlib_NSSet_allObjects ( nss : _NSSetType ) - > <nl> # if _runtime ( _ObjC ) <nl> / / / Perform a non - bridged upcast that always succeeds . <nl> / / / <nl> - / / / Requires : ` BaseValue ` is a base class or base @ objc <nl> - / / / protocol ( such as ` AnyObject ` ) of ` DerivedValue ` . <nl> + / / / - Requires : ` BaseValue ` is a base class or base ` @ objc ` <nl> + / / / protocol ( such as ` AnyObject ` ) of ` DerivedValue ` . <nl> public func _setUpCast < DerivedValue , BaseValue > ( source : Set < DerivedValue > ) <nl> - > Set < BaseValue > { <nl> _sanityCheck ( _isClassOrObjCExistential ( BaseValue . self ) ) <nl> internal func _stdlib_NSDictionary_allKeys ( nsd : _NSDictionaryType ) <nl> # if _runtime ( _ObjC ) <nl> / / / Perform a non - bridged upcast that always succeeds . <nl> / / / <nl> - / / / Requires : ` BaseKey ` and ` BaseValue ` are base classes or base @ objc <nl> - / / / protocols ( such as ` AnyObject ` ) of ` DerivedKey ` and ` DerivedValue ` , <nl> - / / / respectively . <nl> + / / / - Requires : ` BaseKey ` and ` BaseValue ` are base classes or base ` @ objc ` <nl> + / / / protocols ( such as ` AnyObject ` ) of ` DerivedKey ` and ` DerivedValue ` , <nl> + / / / respectively . <nl> public func _dictionaryUpCast < DerivedKey , DerivedValue , BaseKey , BaseValue > ( <nl> source : Dictionary < DerivedKey , DerivedValue > <nl> ) - > Dictionary < BaseKey , BaseValue > { <nl> internal struct _Native $ { Self } Index < $ { TypeParametersDecl } > : <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> internal func successor ( ) - > NativeIndex { <nl> var i = offset + 1 <nl> / / FIXME : Can ' t write the simple code pending <nl> internal struct _Cocoa $ { Self } Index : ForwardIndexType , Comparable { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> internal func successor ( ) - > _Cocoa $ { Self } Index { <nl> _precondition ( <nl> currentKeyIndex < allKeys . value , " can not increment endIndex " ) <nl> public struct $ { Self } Index < $ { TypeParametersDecl } > : <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > Index { <nl> if _fastPath ( _guaranteedNative ) { <nl> return . _Native ( _nativeIndex . successor ( ) ) <nl> public struct $ { Self } Generator < $ { TypeParametersDecl } > : GeneratorType { <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : no preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> + / / / - Requires : No preceding call to ` self . next ( ) ` has returned ` nil ` . <nl> public mutating func next ( ) - > $ { SequenceType } ? { <nl> if _fastPath ( _guaranteedNative ) { <nl> return _nativeNext ( ) <nl> mmm a / stdlib / public / core / Index . swift <nl> ppp b / stdlib / public / core / Index . swift <nl> public protocol _IncrementableDefaultsType { <nl> / / / Return the next consecutive value in a discrete sequence of <nl> / / / ` Self ` values <nl> / / / <nl> - / / / Requires : ` self ` has a well - defined successor . <nl> + / / / - Requires : ` self ` has a well - defined successor . <nl> func successor ( ) - > Self <nl> } <nl> <nl> public protocol _BidirectionalIndexDefaultsType : _ForwardIndexType { <nl> / / / well - defined predecessor , ` self . predecessor ( ) . successor ( ) = = <nl> / / / self ` . <nl> / / / <nl> - / / / Requires : ` self ` has a well - defined predecessor . <nl> + / / / - Requires : ` self ` has a well - defined predecessor . <nl> func predecessor ( ) - > Self <nl> } <nl> <nl> mmm a / stdlib / public / core / Interval . swift . gyb <nl> ppp b / stdlib / public / core / Interval . swift . gyb <nl> public struct $ { Self } < T : Comparable > <nl> self = x <nl> } <nl> <nl> - / / / Construct an interval with the given bounds . Requires : ` start ` <nl> - / / / < = ` end ` . <nl> + / / / Construct an interval with the given bounds . <nl> + / / / <nl> + / / / - Requires : ` start < = end ` . <nl> public init ( _ start : T , _ end : T ) { <nl> _precondition ( end > = start , " Invalid $ { Self } bounds ( end < start ) " ) <nl> _start = start <nl> mmm a / stdlib / public / core / LazyCollection . swift . gyb <nl> ppp b / stdlib / public / core / LazyCollection . swift . gyb <nl> public struct $ { Self } < S : CollectionType $ { whereClause } > : CollectionType { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public <nl> subscript ( position : S . Index ) - > S . Generator . Element { <nl> return _base [ position ] <nl> mmm a / stdlib / public / core / ManagedBuffer . swift <nl> ppp b / stdlib / public / core / ManagedBuffer . swift <nl> public struct ManagedBufferPointer < Value , Element > : Equatable { <nl> / / / object and a function that can be called on it to get the actual <nl> / / / number of allocated elements . <nl> / / / <nl> - / / / Requires : minimumCapacity > = 0 , and the type indicated by <nl> - / / / ` bufferClass ` is a non - ` @ objc ` class with no declared stored <nl> - / / / properties . The ` deinit ` of ` bufferClass ` must destroy its <nl> - / / / stored ` Value ` and any constructed ` Element ` s . <nl> + / / / - Requires : ` minimumCapacity > = 0 ` , and the type indicated by <nl> + / / / ` bufferClass ` is a non - ` @ objc ` class with no declared stored <nl> + / / / properties . The ` deinit ` of ` bufferClass ` must destroy its <nl> + / / / stored ` Value ` and any constructed ` Element ` s . <nl> public init ( <nl> bufferClass : AnyClass , <nl> minimumCapacity : Int , <nl> public struct ManagedBufferPointer < Value , Element > : Equatable { <nl> <nl> / / / Manage the given ` buffer ` . <nl> / / / <nl> - / / / Requires : ` buffer ` is an instance of a non - ` @ objc ` class whose <nl> - / / / ` deinit ` destroys its stored ` Value ` and any constructed <nl> - / / / ` Element ` s . <nl> + / / / - Requires : ` buffer ` is an instance of a non - ` @ objc ` class whose <nl> + / / / ` deinit ` destroys its stored ` Value ` and any constructed <nl> + / / / ` Element ` s . <nl> public init ( unsafeBufferObject buffer : AnyObject ) { <nl> ManagedBufferPointer . _checkValidBufferClass ( buffer . dynamicType ) <nl> <nl> public struct ManagedBufferPointer < Value , Element > : Equatable { <nl> / / / - parameter minimumCapacity : the minimum number of ` Element ` s that <nl> / / / must be able to be stored in the new buffer . <nl> / / / <nl> - / / / Requires : minimumCapacity > = 0 , and the type indicated by <nl> - / / / ` bufferClass ` is a non - ` @ objc ` class with no declared stored <nl> - / / / properties . The ` deinit ` of ` bufferClass ` must destroy its <nl> - / / / stored ` Value ` and any constructed ` Element ` s . <nl> + / / / - Requires : ` minimumCapacity > = 0 ` , and the type indicated by <nl> + / / / ` bufferClass ` is a non - ` @ objc ` class with no declared stored <nl> + / / / properties . The ` deinit ` of ` bufferClass ` must destroy its <nl> + / / / stored ` Value ` and any constructed ` Element ` s . <nl> internal init ( <nl> bufferClass : AnyClass , <nl> minimumCapacity : Int <nl> mmm a / stdlib / public / core / Map . swift . gyb <nl> ppp b / stdlib / public / core / Map . swift . gyb <nl> public struct MapSequenceGenerator < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . <nl> public mutating func next ( ) - > T ? { <nl> let x = _base . next ( ) <nl> if x ! = nil { <nl> public struct MapCollectionView < Base : CollectionType , T > : CollectionType { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Base . Index ) - > T { <nl> return _transform ( _base [ position ] ) <nl> } <nl> mmm a / stdlib / public / core / Mirror . swift <nl> ppp b / stdlib / public / core / Mirror . swift <nl> extension DictionaryLiteral : CollectionType { <nl> <nl> / / / Access the element indicated by ` position ` . <nl> / / / <nl> - / / / Requires : ` position > = 0 & & position < endIndex ` . <nl> + / / / - Requires : ` position > = 0 & & position < endIndex ` . <nl> / / / <nl> - / / / - Complexity : O ( 1 ) <nl> + / / / - complexity : O ( 1 ) . <nl> public subscript ( position : Int ) - > Element { <nl> return elements [ position ] <nl> } <nl> mmm a / stdlib / public / core / Range . swift <nl> ppp b / stdlib / public / core / Range . swift <nl> public struct Range < <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : T ) - > T { <nl> _debugPrecondition ( position ! = endIndex , " Index out of range " ) <nl> return position <nl> public func . . . < Pos : ForwardIndexType > ( <nl> <nl> / / / Forms a half - open range that contains ` start ` , but not ` end ` . <nl> / / / <nl> - / / / Requires : ` start < = end ` <nl> + / / / - Requires : ` start < = end ` . <nl> @ transparent <nl> public func . . < < Pos : ForwardIndexType where Pos : Comparable > ( <nl> start : Pos , end : Pos <nl> mmm a / stdlib / public / core / RangeReplaceableCollectionType . swift <nl> ppp b / stdlib / public / core / RangeReplaceableCollectionType . swift <nl> public func extend < <nl> } <nl> <nl> / / / Remove an element from the end of ` x ` in O ( 1 ) . <nl> - / / / Requires : ` x ` is nonempty <nl> + / / / <nl> + / / / - Requires : ` x ` is nonempty . <nl> public func removeLast < <nl> C : RangeReplaceableCollectionType where C . Index : BidirectionalIndexType <nl> > ( <nl> mmm a / stdlib / public / core / Repeat . swift <nl> ppp b / stdlib / public / core / Repeat . swift <nl> public struct Repeat < T > : CollectionType { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Int ) - > T { <nl> _precondition ( position > = 0 & & position < count , " Index out of range " ) <nl> return repeatedValue <nl> mmm a / stdlib / public / core / Reverse . swift . gyb <nl> ppp b / stdlib / public / core / Reverse . swift . gyb <nl> public struct $ { Index } < I : $ { IndexProtocol } > : $ { IndexProtocol } { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > $ { Index } { <nl> return $ { Index } ( _base . predecessor ( ) ) <nl> } <nl> <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : the previous value is representable . <nl> + / / / - Requires : The previous value is representable . <nl> public func predecessor ( ) - > $ { Index } { <nl> return $ { Index } ( _base . successor ( ) ) <nl> } <nl> public struct $ { View } < <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Index ) - > T . Generator . Element { <nl> return _base [ position . _base . predecessor ( ) ] <nl> } <nl> mmm a / stdlib / public / core / Sequence . swift <nl> ppp b / stdlib / public / core / Sequence . swift <nl> public protocol GeneratorType { <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . Specific implementations of this protocol <nl> - / / / are encouraged to respond to violations of this requirement by <nl> - / / / calling ` preconditionFailure ( " . . . " ) ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . Specific implementations of this protocol <nl> + / / / are encouraged to respond to violations of this requirement by <nl> + / / / calling ` preconditionFailure ( " . . . " ) ` . <nl> mutating func next ( ) - > Element ? <nl> } <nl> <nl> public struct GeneratorSequence < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . <nl> public mutating func next ( ) - > G . Element ? { <nl> return _base . next ( ) <nl> } <nl> mmm a / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> ppp b / stdlib / public / core / SequenceAlgorithms . swift . gyb <nl> extension SequenceType { <nl> % { <nl> if preds : <nl> orderingRequirement = " " " <nl> - / / / Requires : ` isOrderedBefore ` is a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) . <nl> - / / / over ` self ` . " " " <nl> + / / / - Requires : ` isOrderedBefore ` is a <nl> + / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) . <nl> + / / / over ` self ` . " " " <nl> else : <nl> orderingRequirement = " " <nl> } % <nl> if preds : <nl> / / / ` other ` , using ` isEquivalent ` as the equivalence test . Return true if <nl> / / / ` other ` is empty . <nl> / / / <nl> - / / / Requires : ` isEquivalent ` is an [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) " " " <nl> + / / / - Requires : ` isEquivalent ` is an <nl> + / / / [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) . " " " <nl> else : <nl> comment = " " " <nl> / / / Return true iff the the initial elements of ` self ` are equal to ` prefix ` . <nl> if preds : <nl> / / / Return true iff ` self ` and ` other ` contain equivalent elements , using <nl> / / / ` isEquivalent ` as the equivalence test . <nl> / / / <nl> - / / / Requires : ` isEquivalent ` is an [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) " " " <nl> + / / / - Requires : ` isEquivalent ` is an <nl> + / / / [ equivalence relation ] ( http : / / en . wikipedia . org / wiki / Equivalence_relation ) . " " " <nl> else : <nl> comment = " " " <nl> / / / Return ` true ` iff ` self ` and ` other ` contain the same elements in the <nl> if preds : <nl> / / / to present to the end - user , you should use ` String ` APIs that perform <nl> / / / localized comparison . <nl> / / / <nl> - / / / Requires : ` isOrderedBefore ` is a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> - / / / over the elements of ` self ` and ` other ` . " " " <nl> + / / / - Requires : ` isOrderedBefore ` is a <nl> + / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / over the elements of ` self ` and ` other ` . " " " <nl> else : <nl> comment = " " " <nl> / / / Return true iff ` self ` precedes ` other ` in a lexicographical ( " dictionary " ) <nl> mmm a / stdlib / public / core / SliceBuffer . swift <nl> ppp b / stdlib / public / core / SliceBuffer . swift <nl> struct _SliceBuffer < T > : _ArrayBufferType { <nl> / / / Replace the given subRange with the first newCount elements of <nl> / / / the given collection . <nl> / / / <nl> - / / / Requires : this buffer is backed by a uniquely - referenced <nl> - / / / _ContiguousArrayBuffer , <nl> - / / / <nl> - / / / Requires : insertCount < = numericCast ( newValues . count ( ) ) <nl> - / / / <nl> + / / / - Requires : This buffer is backed by a uniquely - referenced <nl> + / / / ` _ContiguousArrayBuffer ` and <nl> + / / / ` insertCount < = numericCast ( newValues . count ( ) ) ` . <nl> public <nl> mutating func replace < C : CollectionType where C . Generator . Element = = T > ( <nl> subRange subRange : Range < Int > , <nl> struct _SliceBuffer < T > : _ArrayBufferType { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Int ) - > T { <nl> get { <nl> return getElement ( position , hoistedIsNativeNoTypeCheckBuffer : true ) <nl> mmm a / stdlib / public / core / Sort . swift . gyb <nl> ppp b / stdlib / public / core / Sort . swift . gyb <nl> def cmp ( a , b , p ) : <nl> % { <nl> if p : <nl> orderingRequirement = " " " \ <nl> - / / / Requires : ` isOrderedBefore ` is a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> - / / / over ` elements ` . " " " <nl> + / / / - Requires : ` isOrderedBefore ` is a <nl> + / / / [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / over ` elements ` . " " " <nl> according = " according to ` isOrderedBefore ` " <nl> else : <nl> orderingRequirement = " " " \ <nl> - / / / Requires : The less - than operator ( ` func < ` ) defined in the ` Comparable ` <nl> - / / / conformance is a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> - / / / over ` elements ` . " " " <nl> + / / / - Requires : The less - than operator ( ` func < ` ) defined in the ` Comparable ` <nl> + / / / conformance is a [ strict weak ordering ] ( http : / / en . wikipedia . org / wiki / Strict_weak_order # Strict_weak_orderings ) <nl> + / / / over ` elements ` . " " " <nl> according = " " <nl> } % <nl> <nl> mmm a / stdlib / public / core / StaticString . swift <nl> ppp b / stdlib / public / core / StaticString . swift <nl> public struct StaticString <nl> <nl> / / / A pointer to the beginning of UTF - 8 code units <nl> / / / <nl> - / / / Requires : ` self ` stores a pointer to either ASCII or UTF - 8 code <nl> - / / / units . <nl> + / / / - Requires : ` self ` stores a pointer to either ASCII or UTF - 8 code <nl> + / / / units . <nl> @ transparent <nl> public var utf8Start : UnsafePointer < UInt8 > { <nl> _precondition ( <nl> public struct StaticString <nl> <nl> / / / The stored Unicode scalar value <nl> / / / <nl> - / / / Requires : ` self ` stores a single Unicode scalar value . <nl> + / / / - Requires : ` self ` stores a single Unicode scalar value . <nl> @ transparent <nl> public var unicodeScalar : UnicodeScalar { <nl> _precondition ( <nl> mmm a / stdlib / public / core / String . swift <nl> ppp b / stdlib / public / core / String . swift <nl> extension String { <nl> <nl> / / / Access the ` Character ` at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self . characters ` <nl> - / / / and ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self . characters ` <nl> + / / / and ` position ! = endIndex ` . <nl> public subscript ( i : Index ) - > Character { return characters [ i ] } <nl> <nl> @ availability ( * , unavailable , message = " cannot subscript String with an Int " ) <nl> extension String . Index { <nl> / / / Construct the position in ` characters ` that corresponds exactly to <nl> / / / ` unicodeScalarIndex ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` unicodeScalarIndex ` is an element of <nl> - / / / ` characters . unicodeScalars . indices ` . <nl> + / / / - Requires : ` unicodeScalarIndex ` is an element of <nl> + / / / ` characters . unicodeScalars . indices ` . <nl> public init ? ( <nl> _ unicodeScalarIndex : String . UnicodeScalarIndex , <nl> within characters : String <nl> extension String . Index { <nl> / / / Construct the position in ` characters ` that corresponds exactly to <nl> / / / ` utf16Index ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` utf16Index ` is an element of <nl> - / / / ` characters . utf16 . indices ` . <nl> + / / / - Requires : ` utf16Index ` is an element of <nl> + / / / ` characters . utf16 . indices ` . <nl> public init ? ( <nl> _ utf16Index : String . UTF16Index , <nl> within characters : String <nl> extension String . Index { <nl> / / / Construct the position in ` characters ` that corresponds exactly to <nl> / / / ` utf8Index ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` utf8Index ` is an element of <nl> - / / / ` characters . utf8 . indices ` . <nl> + / / / - Requires : ` utf8Index ` is an element of <nl> + / / / ` characters . utf8 . indices ` . <nl> public init ? ( <nl> _ utf8Index : String . UTF8Index , <nl> within characters : String <nl> extension String . Index { <nl> / / / Return the position in ` utf8 ` that corresponds exactly <nl> / / / to ` self ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` String ( utf8 ) . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` String ( utf8 ) . indices ` . <nl> public func samePositionIn ( <nl> utf8 : String . UTF8View <nl> ) - > String . UTF8View . Index { <nl> extension String . Index { <nl> / / / Return the position in ` utf16 ` that corresponds exactly <nl> / / / to ` self ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` String ( utf16 ) . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` String ( utf16 ) . indices ` . <nl> public func samePositionIn ( <nl> utf16 : String . UTF16View <nl> ) - > String . UTF16View . Index { <nl> extension String . Index { <nl> / / / Return the position in ` unicodeScalars ` that corresponds exactly <nl> / / / to ` self ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` String ( unicodeScalars ) . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` String ( unicodeScalars ) . indices ` . <nl> public func samePositionIn ( <nl> unicodeScalars : String . UnicodeScalarView <nl> ) - > String . UnicodeScalarView . Index { <nl> mmm a / stdlib / public / core / StringCharacterView . swift <nl> ppp b / stdlib / public / core / StringCharacterView . swift <nl> extension String . CharacterView : CollectionType { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > Index { <nl> _precondition ( _base ! = _base . _viewEndIndex , " can not increment endIndex " ) <nl> return Index ( _base : _endBase ) <nl> extension String . CharacterView : CollectionType { <nl> <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : the previous value is representable . <nl> + / / / - Requires : The previous value is representable . <nl> public func predecessor ( ) - > Index { <nl> _precondition ( _base ! = _base . _viewStartIndex , <nl> " can not decrement startIndex " ) <nl> extension String . CharacterView : CollectionType { <nl> <nl> / / / Access the ` Character ` at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( i : Index ) - > Character { <nl> return Character ( String ( unicodeScalars [ i . _base . . < i . _endBase ] ) ) <nl> } <nl> mmm a / stdlib / public / core / StringUTF16 . swift <nl> ppp b / stdlib / public / core / StringUTF16 . swift <nl> extension String { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( i : Index ) - > UTF16 . CodeUnit { <nl> let position = i . _offset <nl> _precondition ( position > = 0 & & position < _length , <nl> extension String . UTF16View . Index { <nl> / / / Construct the position in ` utf16 ` that corresponds exactly to <nl> / / / ` utf8Index ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` utf8Index ` is an element of <nl> - / / / ` String ( utf16 ) ! . utf8 . indices ` . <nl> + / / / - Requires : ` utf8Index ` is an element of <nl> + / / / ` String ( utf16 ) ! . utf8 . indices ` . <nl> public init ? ( <nl> _ utf8Index : String . UTF8Index , within utf16 : String . UTF16View <nl> ) { <nl> extension String . UTF16View . Index { <nl> / / / Construct the position in ` utf16 ` that corresponds exactly to <nl> / / / ` unicodeScalarIndex ` . <nl> / / / <nl> - / / / Requires : ` unicodeScalarIndex ` is an element of <nl> - / / / ` String ( utf16 ) ! . unicodeScalars . indices ` . <nl> + / / / - Requires : ` unicodeScalarIndex ` is an element of <nl> + / / / ` String ( utf16 ) ! . unicodeScalars . indices ` . <nl> public init ( <nl> _ unicodeScalarIndex : String . UnicodeScalarIndex , <nl> within utf16 : String . UTF16View ) { <nl> extension String . UTF16View . Index { <nl> / / / Construct the position in ` utf16 ` that corresponds exactly to <nl> / / / ` characterIndex ` . <nl> / / / <nl> - / / / Requires : ` characterIndex ` is an element of <nl> - / / / ` String ( utf16 ) ! . indices ` . <nl> + / / / - Requires : ` characterIndex ` is an element of <nl> + / / / ` String ( utf16 ) ! . indices ` . <nl> public init ( _ characterIndex : String . Index , within utf16 : String . UTF16View ) { <nl> _offset = characterIndex . _utf16Index <nl> } <nl> extension String . UTF16View . Index { <nl> / / / Return the position in ` utf8 ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of <nl> - / / / ` String ( utf8 ) ! . utf16 . indices ` . <nl> + / / / - Requires : ` self ` is an element of <nl> + / / / ` String ( utf8 ) ! . utf16 . indices ` . <nl> public func samePositionIn ( <nl> utf8 : String . UTF8View <nl> ) - > String . UTF8View . Index ? { <nl> extension String . UTF16View . Index { <nl> / / / Return the position in ` unicodeScalars ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of <nl> - / / / ` String ( unicodeScalars ) . utf16 . indices ` . <nl> + / / / - Requires : ` self ` is an element of <nl> + / / / ` String ( unicodeScalars ) . utf16 . indices ` . <nl> public func samePositionIn ( <nl> unicodeScalars : String . UnicodeScalarView <nl> ) - > String . UnicodeScalarIndex ? { <nl> extension String . UTF16View . Index { <nl> / / / Return the position in ` characters ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` characters . utf16 . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` characters . utf16 . indices ` . <nl> public func samePositionIn ( <nl> characters : String <nl> ) - > String . Index ? { <nl> mmm a / stdlib / public / core / StringUTF8 . swift <nl> ppp b / stdlib / public / core / StringUTF8 . swift <nl> extension String { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > Index { <nl> let currentUnit = UTF8 . CodeUnit ( truncatingBitPattern : _buffer ) <nl> let hiNibble = currentUnit > > 4 <nl> extension String { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Index ) - > UTF8 . CodeUnit { <nl> let result : UTF8 . CodeUnit = numericCast ( position . _buffer & 0xFF ) <nl> _precondition ( result ! = 0xFF , " can not subscript using endIndex " ) <nl> extension String . UTF8View . Index { <nl> / / / Construct the position in ` utf8 ` that corresponds exactly to <nl> / / / ` utf16Index ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` utf8Index ` is an element of <nl> - / / / ` String ( utf16 ) ! . utf8 . indices ` . <nl> + / / / - Requires : ` utf8Index ` is an element of <nl> + / / / ` String ( utf16 ) ! . utf8 . indices ` . <nl> public init ? ( _ utf16Index : String . UTF16Index , within utf8 : String . UTF8View ) { <nl> let utf16 = String . UTF16View ( utf8 . _core ) <nl> <nl> extension String . UTF8View . Index { <nl> / / / Construct the position in ` utf8 ` that corresponds exactly to <nl> / / / ` unicodeScalarIndex ` . <nl> / / / <nl> - / / / Requires : ` unicodeScalarIndex ` is an element of <nl> - / / / ` String ( utf8 ) ! . unicodeScalars . indices ` . <nl> + / / / - Requires : ` unicodeScalarIndex ` is an element of <nl> + / / / ` String ( utf8 ) ! . unicodeScalars . indices ` . <nl> public init ( <nl> _ unicodeScalarIndex : String . UnicodeScalarIndex , <nl> within utf8 : String . UTF8View <nl> extension String . UTF8View . Index { <nl> / / / Construct the position in ` utf8 ` that corresponds exactly to <nl> / / / ` characterIndex ` . <nl> / / / <nl> - / / / Requires : ` characterIndex ` is an element of <nl> - / / / ` String ( utf8 ) ! . indices ` . <nl> + / / / - Requires : ` characterIndex ` is an element of <nl> + / / / ` String ( utf8 ) ! . indices ` . <nl> public init ( _ characterIndex : String . Index , within utf8 : String . UTF8View ) { <nl> self . init ( utf8 . _core , _utf16Offset : characterIndex . _base . _position ) <nl> } <nl> extension String . UTF8View . Index { <nl> / / / Return the position in ` utf16 ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` String ( utf16 ) ! . utf8 . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` String ( utf16 ) ! . utf8 . indices ` . <nl> public func samePositionIn ( <nl> utf16 : String . UTF16View <nl> ) - > String . UTF16View . Index ? { <nl> extension String . UTF8View . Index { <nl> / / / Return the position in ` unicodeScalars ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of <nl> - / / / ` String ( unicodeScalars ) . utf8 . indices ` . <nl> + / / / - Requires : ` self ` is an element of <nl> + / / / ` String ( unicodeScalars ) . utf8 . indices ` . <nl> public func samePositionIn ( <nl> unicodeScalars : String . UnicodeScalarView <nl> ) - > String . UnicodeScalarIndex ? { <nl> extension String . UTF8View . Index { <nl> / / / Return the position in ` characters ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` characters . utf8 . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` characters . utf8 . indices ` . <nl> public func samePositionIn ( <nl> characters : String <nl> ) - > String . Index ? { <nl> mmm a / stdlib / public / core / StringUnicodeScalarView . swift <nl> ppp b / stdlib / public / core / StringUnicodeScalarView . swift <nl> extension String { <nl> <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > Index { <nl> var scratch = _ScratchGenerator ( _core , _position ) <nl> var decoder = UTF16 ( ) <nl> extension String { <nl> <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : the previous value is representable . <nl> + / / / - Requires : The previous value is representable . <nl> public func predecessor ( ) - > Index { <nl> var i = _position <nl> let codeUnit = _core [ - - i ] <nl> extension String { <nl> <nl> / / / Access the element at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> public subscript ( position : Index ) - > UnicodeScalar { <nl> var scratch = _ScratchGenerator ( _core , position . _position ) <nl> var decoder = UTF16 ( ) <nl> extension String { <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : no preceding call to ` self . next ( ) ` has returned <nl> - / / / ` nil ` . <nl> + / / / - Requires : No preceding call to ` self . next ( ) ` has returned <nl> + / / / ` nil ` . <nl> public mutating func next ( ) - > UnicodeScalar ? { <nl> var result : UnicodeDecodingResult <nl> if _baseSet { <nl> extension String . UnicodeScalarIndex { <nl> / / / Construct the position in ` unicodeScalars ` that corresponds exactly to <nl> / / / ` utf16Index ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` utf16Index ` is an element of <nl> - / / / ` String ( unicodeScalars ) . utf16 . indices ` . <nl> + / / / - Requires : ` utf16Index ` is an element of <nl> + / / / ` String ( unicodeScalars ) . utf16 . indices ` . <nl> public init ? ( <nl> _ utf16Index : String . UTF16Index , <nl> within unicodeScalars : String . UnicodeScalarView <nl> extension String . UnicodeScalarIndex { <nl> / / / Construct the position in ` unicodeScalars ` that corresponds exactly to <nl> / / / ` utf8Index ` . If no such position exists , the result is ` nil ` . <nl> / / / <nl> - / / / Requires : ` utf8Index ` is an element of <nl> - / / / ` String ( unicodeScalars ) . utf8 . indices ` . <nl> + / / / - Requires : ` utf8Index ` is an element of <nl> + / / / ` String ( unicodeScalars ) . utf8 . indices ` . <nl> public init ? ( <nl> _ utf8Index : String . UTF8Index , <nl> within unicodeScalars : String . UnicodeScalarView <nl> extension String . UnicodeScalarIndex { <nl> / / / Construct the position in ` unicodeScalars ` that corresponds <nl> / / / exactly to ` characterIndex ` . <nl> / / / <nl> - / / / Requires : ` characterIndex ` is an element of <nl> - / / / ` String ( unicodeScalars ) . indices ` . <nl> + / / / - Requires : ` characterIndex ` is an element of <nl> + / / / ` String ( unicodeScalars ) . indices ` . <nl> public init ( <nl> _ characterIndex : String . Index , <nl> within unicodeScalars : String . UnicodeScalarView <nl> extension String . UnicodeScalarIndex { <nl> / / / Return the position in ` utf8 ` that corresponds exactly <nl> / / / to ` self ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` String ( utf8 ) ! . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` String ( utf8 ) ! . indices ` . <nl> public func samePositionIn ( utf8 : String . UTF8View ) - > String . UTF8View . Index { <nl> return String . UTF8View . Index ( self , within : utf8 ) <nl> } <nl> extension String . UnicodeScalarIndex { <nl> / / / Return the position in ` utf16 ` that corresponds exactly <nl> / / / to ` self ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` String ( utf16 ) ! . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` String ( utf16 ) ! . indices ` . <nl> public func samePositionIn ( <nl> utf16 : String . UTF16View <nl> ) - > String . UTF16View . Index { <nl> extension String . UnicodeScalarIndex { <nl> / / / Return the position in ` characters ` that corresponds exactly <nl> / / / to ` self ` , or if no such position exists , ` nil ` . <nl> / / / <nl> - / / / Requires : ` self ` is an element of ` characters . unicodeScalars . indices ` . <nl> + / / / - Requires : ` self ` is an element of ` characters . unicodeScalars . indices ` . <nl> public func samePositionIn ( characters : String ) - > String . Index ? { <nl> return String . Index ( self , within : characters ) <nl> } <nl> mmm a / stdlib / public / core / Unicode . swift <nl> ppp b / stdlib / public / core / Unicode . swift <nl> extension UTF16 { <nl> / / / Return the high surrogate code unit of a [ surrogate pair ] ( http : / / www . unicode . org / glossary / # surrogate_pair ) representing <nl> / / / ` x ` . <nl> / / / <nl> - / / / Requires : ` width ( x ) = = 2 ` <nl> + / / / - Requires : ` width ( x ) = = 2 ` . <nl> public static func leadSurrogate ( x : UnicodeScalar ) - > UTF16 . CodeUnit { <nl> _precondition ( width ( x ) = = 2 ) <nl> return UTF16 . CodeUnit ( ( x . value - 0x1_0000 ) > > ( 10 as UInt32 ) ) + 0xD800 <nl> extension UTF16 { <nl> / / / Return the low surrogate code unit of a [ surrogate pair ] ( http : / / www . unicode . org / glossary / # surrogate_pair ) representing <nl> / / / ` x ` . <nl> / / / <nl> - / / / Requires : ` width ( x ) = = 2 ` <nl> + / / / - Requires : ` width ( x ) = = 2 ` . <nl> public static func trailSurrogate ( x : UnicodeScalar ) - > UTF16 . CodeUnit { <nl> _precondition ( width ( x ) = = 2 ) <nl> return UTF16 . CodeUnit ( <nl> mmm a / stdlib / public / core / UnicodeScalar . swift <nl> ppp b / stdlib / public / core / UnicodeScalar . swift <nl> public struct UnicodeScalar : <nl> <nl> / / / Create an instance with numeric value ` v ` . <nl> / / / <nl> - / / / Requires : ` v ` is a valid Unicode scalar value . <nl> + / / / - Requires : ` v ` is a valid Unicode scalar value . <nl> public init ( _ v : UInt32 ) { <nl> / / Unicode 6 . 3 . 0 : <nl> / / <nl> public struct UnicodeScalar : <nl> <nl> / / / Create an instance with numeric value ` v ` . <nl> / / / <nl> - / / / Requires : ` v ` is a valid Unicode scalar value . <nl> + / / / - Requires : ` v ` is a valid Unicode scalar value . <nl> public init ( _ v : UInt16 ) { <nl> self = UnicodeScalar ( UInt32 ( v ) ) <nl> } <nl> extension UnicodeScalar : Hashable { <nl> extension UnicodeScalar { <nl> / / / Construct with value ` v ` . <nl> / / / <nl> - / / / Requires : ` v ` is a valid unicode scalar value . <nl> + / / / - Requires : ` v ` is a valid unicode scalar value . <nl> public init ( _ v : Int ) { <nl> self = UnicodeScalar ( UInt32 ( v ) ) <nl> } <nl> extension UnicodeScalar { <nl> extension UInt8 { <nl> / / / Construct with value ` v . value ` . <nl> / / / <nl> - / / / Requires : ` v . value ` can be represented as ASCII ( 0 . . < 128 ) . <nl> + / / / - Requires : ` v . value ` can be represented as ASCII ( 0 . . < 128 ) . <nl> public init ( ascii v : UnicodeScalar ) { <nl> _precondition ( v . value < 128 , <nl> " Code point value does not fit into ASCII " ) <nl> extension UInt8 { <nl> extension UInt32 { <nl> / / / Construct with value ` v . value ` . <nl> / / / <nl> - / / / Requires : ` v . value ` can be represented as UInt32 . <nl> + / / / - Requires : ` v . value ` can be represented as UInt32 . <nl> public init ( _ v : UnicodeScalar ) { <nl> self = v . value <nl> } <nl> extension UInt32 { <nl> extension UInt64 { <nl> / / / Construct with value ` v . value ` . <nl> / / / <nl> - / / / Requires : ` v . value ` can be represented as UInt64 . <nl> + / / / - Requires : ` v . value ` can be represented as UInt64 . <nl> public init ( _ v : UnicodeScalar ) { <nl> self = UInt64 ( v . value ) <nl> } <nl> extension UnicodeScalar . UTF16View : CollectionType { <nl> <nl> / / / Access the code unit at ` position ` . <nl> / / / <nl> - / / / Requires : ` position ` is a valid position in ` self ` and <nl> - / / / ` position ! = endIndex ` . <nl> + / / / - Requires : ` position ` is a valid position in ` self ` and <nl> + / / / ` position ! = endIndex ` . <nl> subscript ( position : Int ) - > UTF16 . CodeUnit { <nl> return position = = 0 ? ( <nl> endIndex = = 1 ? UTF16 . CodeUnit ( value . value ) : UTF16 . leadSurrogate ( value ) <nl> mmm a / stdlib / public / core / UnitTestArrayBuffer . swift <nl> ppp b / stdlib / public / core / UnitTestArrayBuffer . swift <nl> public struct _UnitTestArrayBuffer < T > : _ArrayBufferType { <nl> / / / Replace the given subRange with the first newCount elements of <nl> / / / the given collection . <nl> / / / <nl> - / / / Requires : this buffer is backed by a uniquely - referenced <nl> - / / / _UnitTestArrayBuffer <nl> + / / / - Requires : This buffer is backed by a uniquely - referenced <nl> + / / / ` _UnitTestArrayBuffer ` . <nl> public mutating func replace < <nl> C : CollectionType where C . Generator . Element = = Element <nl> > ( <nl> public struct _UnitTestArrayBuffer < T > : _ArrayBufferType { <nl> } <nl> <nl> / / / Return true if the buffer stores only elements of type ` U ` . <nl> - / / / Requires : ` U ` is a class or ` @ objc ` existential . O ( N ) <nl> + / / / - Requires : ` U ` is a class or ` @ objc ` existential . O ( N ) . <nl> func storesOnlyElementsOfType < U > ( <nl> _ : U . Type <nl> ) - > Bool { <nl> mmm a / stdlib / public / core / UnsafePointer . swift . gyb <nl> ppp b / stdlib / public / core / UnsafePointer . swift . gyb <nl> public struct $ { Self } < T > <nl> / / / Use this for assigning ranges into later memory that may overlap <nl> / / / with the source range . <nl> / / / <nl> - / / / Requires : either ` source ` precedes ` self ` or follows ` self + count ` . <nl> + / / / - Requires : Either ` source ` precedes ` self ` or follows ` self + count ` . <nl> public func assignBackwardFrom ( source : $ { Self } , count : Int ) { <nl> _debugPrecondition ( <nl> count > = 0 , " $ { Self } . assignBackwardFrom with negative count " ) <nl> public struct $ { Self } < T > <nl> / / / the last value to the first . Use this for copying ranges into <nl> / / / later memory that may overlap with the source range . <nl> / / / <nl> - / / / Requires : either ` source ` precedes ` self ` or follows ` self + count ` . <nl> + / / / - Requires : Either ` source ` precedes ` self ` or follows ` self + count ` . <nl> public func moveInitializeBackwardFrom ( source : $ { Self } , count : Int ) { <nl> _debugPrecondition ( <nl> count > = 0 , " $ { Self } . moveInitializeBackwardFrom with negative count " ) <nl> public struct $ { Self } < T > <nl> / / / <nl> / / / - Precondition : The memory is not initialized . <nl> / / / <nl> - / / / Requires : ` self ` and ` source ` may not overlap . <nl> + / / / - Requires : ` self ` and ` source ` may not overlap . <nl> public func initializeFrom ( source : $ { Self } , count : Int ) { <nl> _debugPrecondition ( <nl> count > = 0 , " $ { Self } . initializeFrom with negative count " ) <nl> public struct $ { Self } < T > <nl> / / / Assign from ` count ` values beginning at ` source ` into initialized <nl> / / / memory , transforming the source values into raw memory . <nl> / / / <nl> - / / / Requires : the ` self ` and ` source ` ranges may not overlap . <nl> + / / / - Requires : The ` self ` and ` source ` ranges may not overlap . <nl> public func moveAssignFrom ( source : $ { Self } , count : Int ) { <nl> _debugPrecondition ( <nl> count > = 0 , " moveAssignFrom with negative count " ) <nl> public struct $ { Self } < T > <nl> } <nl> / / / Returns the next consecutive value after ` self ` . <nl> / / / <nl> - / / / Requires : the next value is representable . <nl> + / / / - Requires : The next value is representable . <nl> public func successor ( ) - > $ { Self } { <nl> return self + 1 <nl> } <nl> / / / Returns the previous consecutive value before ` self ` . <nl> / / / <nl> - / / / Requires : the previous value is representable . <nl> + / / / - Requires : The previous value is representable . <nl> public func predecessor ( ) - > $ { Self } { <nl> return self - 1 <nl> } <nl> mmm a / stdlib / public / core / Zip . swift <nl> ppp b / stdlib / public / core / Zip . swift <nl> public struct ZipGenerator2 < <nl> / / / Advance to the next element and return it , or ` nil ` if no next <nl> / / / element exists . <nl> / / / <nl> - / / / Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> - / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> - / / / has returned ` nil ` . <nl> + / / / - Requires : ` next ( ) ` has not been applied to a copy of ` self ` <nl> + / / / since the copy was made , and no preceding call to ` self . next ( ) ` <nl> + / / / has returned ` nil ` . <nl> public mutating func next ( ) - > Element ? { <nl> / / The next ( ) function needs to track if it has reached the end . If we <nl> / / didn ' t , and the first sequence is shorter than the second , then , when we <nl>
stdlib : Convert comments to use ' - requires : ' instead of ' Requires : ' .
apple/swift
68ef59e37afb417872024a8a259054391498a2e3
2015-05-12T17:47:11Z
mmm a / python / google / protobuf / internal / message_test . py <nl> ppp b / python / google / protobuf / internal / message_test . py <nl> def testGoldenMessage ( self , message_module ) : <nl> def testGoldenPackedMessage ( self , message_module ) : <nl> golden_data = test_util . GoldenFileData ( ' golden_packed_fields_message ' ) <nl> golden_message = message_module . TestPackedTypes ( ) <nl> - golden_message . ParseFromString ( golden_data ) <nl> + parsed_bytes = golden_message . ParseFromString ( golden_data ) <nl> all_set = message_module . TestPackedTypes ( ) <nl> test_util . SetAllPackedFields ( all_set ) <nl> + self . assertEqual ( parsed_bytes , len ( golden_data ) ) <nl> self . assertEqual ( all_set , golden_message ) <nl> self . assertEqual ( golden_data , all_set . SerializeToString ( ) ) <nl> golden_copy = copy . deepcopy ( golden_message ) <nl> mmm a / python / google / protobuf / message . py <nl> ppp b / python / google / protobuf / message . py <nl> def MergeFromString ( self , serialized ) : <nl> def ParseFromString ( self , serialized ) : <nl> " " " Parse serialized protocol buffer data into this message . <nl> <nl> - Like MergeFromString ( ) , except we clear the object first and <nl> - do not return the value that MergeFromString returns . <nl> + Like MergeFromString ( ) , except we clear the object first . <nl> " " " <nl> self . Clear ( ) <nl> - self . MergeFromString ( serialized ) <nl> + return self . MergeFromString ( serialized ) <nl> <nl> def SerializeToString ( self , * * kwargs ) : <nl> " " " Serializes the protocol message to a binary string . <nl>
ParseFromString returns bytes parsed ( )
protocolbuffers/protobuf
8d6f8df1ec57faf2c354e6bf1cdcd1f1d1e8ee68
2018-10-05T18:07:55Z
mmm a / core / object . cpp <nl> ppp b / core / object . cpp <nl> Variant Object : : call ( const StringName & p_method , const Variant * * p_args , int p_a <nl> MethodBind * method = ClassDB : : get_method ( get_class_name ( ) , p_method ) ; <nl> <nl> if ( method ) { <nl> - <nl> ret = method - > call ( this , p_args , p_argcount , r_error ) ; <nl> } else { <nl> r_error . error = Variant : : CallError : : CALL_ERROR_INVALID_METHOD ; <nl> mmm a / editor / editor_visual_profiler . cpp <nl> ppp b / editor / editor_visual_profiler . cpp <nl> void EditorVisualProfiler : : _graph_tex_input ( const Ref < InputEvent > & p_ev ) { <nl> <nl> if ( activate - > is_pressed ( ) ) { <nl> if ( ! seeking ) { <nl> - / / probably not need to break request , can just stop profiling <nl> - / / emit_signal ( " break_request " ) ; <nl> + / / Break request is not required , just stop profiling <nl> } <nl> } <nl> <nl> void EditorVisualProfiler : : _bind_methods ( ) { <nl> <nl> ClassDB : : bind_method ( D_METHOD ( " _item_selected " ) , & EditorVisualProfiler : : _item_selected ) ; <nl> ADD_SIGNAL ( MethodInfo ( " enable_profiling " , PropertyInfo ( Variant : : BOOL , " enable " ) ) ) ; <nl> - ADD_SIGNAL ( MethodInfo ( " break_request " ) ) ; <nl> } <nl> <nl> void EditorVisualProfiler : : set_enabled ( bool p_enable ) { <nl> mmm a / editor / plugins / canvas_item_editor_plugin . cpp <nl> ppp b / editor / plugins / canvas_item_editor_plugin . cpp <nl> CanvasItemEditor : : CanvasItemEditor ( EditorNode * p_editor ) { <nl> editor_selection - > connect ( " selection_changed " , this , " update " ) ; <nl> editor_selection - > connect ( " selection_changed " , this , " _selection_changed " ) ; <nl> <nl> - editor - > call_deferred ( " connect " , " play_pressed " , this , " _update_override_camera_button " , make_binds ( true ) ) ; <nl> - editor - > call_deferred ( " connect " , " stop_pressed " , this , " _update_override_camera_button " , make_binds ( false ) ) ; <nl> + editor - > call_deferred ( " connect " , make_binds ( " play_pressed " , this , " _update_override_camera_button " , true ) ) ; <nl> + editor - > call_deferred ( " connect " , make_binds ( " stop_pressed " , this , " _update_override_camera_button " , false ) ) ; <nl> <nl> hb = memnew ( HBoxContainer ) ; <nl> add_child ( hb ) ; <nl> mmm a / editor / script_editor_debugger . cpp <nl> ppp b / editor / script_editor_debugger . cpp <nl> ScriptEditorDebugger : : ScriptEditorDebugger ( EditorNode * p_editor ) { <nl> visual_profiler - > set_name ( TTR ( " Visual Profiler " ) ) ; <nl> tabs - > add_child ( visual_profiler ) ; <nl> visual_profiler - > connect ( " enable_profiling " , this , " _visual_profiler_activate " ) ; <nl> - visual_profiler - > connect ( " break_request " , this , " _profiler_seeked " ) ; <nl> } <nl> <nl> { / / network profiler <nl> ScriptEditorDebugger : : ScriptEditorDebugger ( EditorNode * p_editor ) { <nl> network_profiler - > set_name ( TTR ( " Network Profiler " ) ) ; <nl> tabs - > add_child ( network_profiler ) ; <nl> network_profiler - > connect ( " enable_profiling " , this , " _network_profiler_activate " ) ; <nl> - network_profiler - > connect ( " break_request " , this , " _profiler_seeked " ) ; <nl> } <nl> <nl> { / / monitors <nl>
Fix two signal errors
godotengine/godot
a31bc1b0ba608ef340068676f6709621a37f2140
2020-02-19T21:22:34Z
new file mode 100644 <nl> index 000000000000 . . e6d8e0b70cde <nl> mmm / dev / null <nl> ppp b / doc / release - notes - 14282 . md <nl> <nl> + Low - level RPC changes <nl> + mmmmmmmmmmmmmmmmmmmmm - <nl> + <nl> + ` - usehd ` was removed in version 0 . 16 . From that version onwards , all new <nl> + wallets created are hierarchical deterministic wallets . Version 0 . 18 makes <nl> + specifying ` - usehd ` invalid config . <nl>
[ docs ] Add release notes for removing ` - usehd `
bitcoin/bitcoin
7ac911afe7aee0d3ac742a20d0091c0b75e4535e
2018-09-26T21:32:39Z
mmm a / tensorflow / core / grappler / utils . cc <nl> ppp b / tensorflow / core / grappler / utils . cc <nl> namespace grappler { <nl> NodeMap : : NodeMap ( GraphDef * graph ) : graph_ ( graph ) { <nl> for ( int i = 0 ; i < graph_ - > node_size ( ) ; i + + ) { <nl> auto node = graph_ - > mutable_node ( i ) ; <nl> - nodes_ . insert ( std : : make_pair ( node - > name ( ) , node ) ) ; <nl> + auto rslt = nodes_ . insert ( std : : make_pair ( node - > name ( ) , node ) ) ; <nl> + / / Check that the graph doesn ' t contain multiple nodes with the same name . <nl> + CHECK ( rslt . second ) ; <nl> for ( const auto & input : node - > input ( ) ) { <nl> outputs_ [ NodeName ( input ) ] . insert ( nodes_ [ node - > name ( ) ] ) ; <nl> } <nl>
Added a sanity check to make it easier to debug graph corruptions
tensorflow/tensorflow
bda0611e6251d1d065a3bb36f36f70fa6083b28a
2017-08-28T22:55:56Z
mmm a / stdlib / core / ArrayBody . swift <nl> ppp b / stdlib / core / ArrayBody . swift <nl> <nl> / / <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> <nl> - import SwiftShims <nl> - <nl> internal struct _ArrayBody { <nl> - var _storage : _SwiftArrayBodyStorage <nl> - <nl> init ( count : Int , capacity : Int , elementTypeIsBridgedVerbatim : Bool = false ) { <nl> _sanityCheck ( count > = 0 ) <nl> _sanityCheck ( capacity > = 0 ) <nl> - <nl> - _storage = _SwiftArrayBodyStorage ( <nl> - count : count , <nl> - _capacityAndFlags : <nl> - ( UInt ( capacity ) < < 1 ) | ( elementTypeIsBridgedVerbatim ? 1 : 0 ) ) <nl> + self . count = count <nl> + self . _capacityAndFlags <nl> + = ( UInt ( capacity ) < < 1 ) | ( elementTypeIsBridgedVerbatim ? 1 : 0 ) <nl> } <nl> <nl> / / / In principle CountAndCapacity shouldn ' t need to be default <nl> internal struct _ArrayBody { <nl> / / / capacity after a new buffer is allocated , it ' s typical to want <nl> / / / to update it immediately after construction . <nl> init ( ) { <nl> - _storage = _SwiftArrayBodyStorage ( count : 0 , _capacityAndFlags : 0 ) <nl> + self . count = 0 <nl> + self . _capacityAndFlags = 0 <nl> } <nl> <nl> / / / The number of elements stored in this Array <nl> - var count : Int { <nl> - get { <nl> - return _storage . count <nl> - } <nl> - set ( newCount ) { <nl> - _storage . count = newCount <nl> - } <nl> - } <nl> + var count : Int <nl> <nl> / / / The number of elements that can be stored in this Array without <nl> / / / reallocation . <nl> internal struct _ArrayBody { <nl> <nl> / / / Storage optimization : compresses capacity and <nl> / / / elementTypeIsBridgedVerbatim together . <nl> - var _capacityAndFlags : UInt { <nl> - get { <nl> - return _storage . _capacityAndFlags <nl> - } <nl> - set { <nl> - _storage . _capacityAndFlags = newValue <nl> - } <nl> - } <nl> + var _capacityAndFlags : UInt <nl> } <nl> <nl> mmm a / stdlib / core / ArrayBuffer . swift <nl> ppp b / stdlib / core / ArrayBuffer . swift <nl> public struct _ArrayBuffer < T > : _ArrayBufferType { <nl> public <nl> init ( ) { <nl> storage = ! _isClassOrObjCExistential ( T . self ) <nl> - ? Builtin . castToNativeObject ( _emptyArrayStorage ) <nl> - : Builtin . castToNativeObject ( <nl> + ? nil : Builtin . castToNativeObject ( <nl> _IndirectArrayBuffer ( <nl> nativeBuffer : _ContiguousArrayBuffer < T > ( ) , <nl> isMutable : false , <nl> extension _ArrayBuffer { <nl> / / / If this buffer is backed by a _ContiguousArrayBuffer , return it . <nl> / / / Otherwise , return nil . Note : the result ' s baseAddress may <nl> / / / not match ours , if we are a _SliceBuffer . <nl> - public func requestNativeBuffer ( ) - > NativeBuffer ? { <nl> - if ! _isClassOrObjCExistential ( T . self ) { <nl> - return _native <nl> - } <nl> - else { <nl> - let i = indirect <nl> - return _fastPath ( ! i . isCocoa ) <nl> - ? i . getNativeBufferOf ( T . self ) <nl> - : nil <nl> - } <nl> + public <nl> + func requestNativeBuffer ( ) - > NativeBuffer ? { <nl> + let result = self . _native <nl> + if result . hasStorage { return result } <nl> + return nil <nl> } <nl> <nl> / / / Replace the given subRange with the first newCount elements of <nl> mmm a / stdlib / core / ContiguousArrayBuffer . swift <nl> ppp b / stdlib / core / ContiguousArrayBuffer . swift <nl> <nl> <nl> import SwiftShims <nl> <nl> - / / / Class used whose sole instance is used as storage for empty <nl> - / / / arrays . The instance is defined in the runtime and statically <nl> - / / / initialized . See stdlib / runtime / GlobalObjects . cpp for details . <nl> - internal final class _EmptyArrayStorage <nl> - : _ContiguousArrayStorageBase { <nl> - <nl> - init ( _DoNotCallMe : ( ) ) { <nl> - _sanityCheckFailure ( " creating instance of _EmptyArrayStorage " ) <nl> - } <nl> - <nl> - var countAndCapacity : _ArrayBody <nl> - <nl> - override func _tryGetVerbatimBridgedUnsafeBuffer ( <nl> - dummy : Void <nl> - ) - > UnsafeBufferPointer < AnyObject > { <nl> - return UnsafeBufferPointer ( start : . null ( ) , count : 0 ) <nl> - } <nl> - <nl> - override func _getNonVerbatimBridgedCount ( dummy : Void ) - > Int { <nl> - return 0 <nl> - } <nl> - <nl> - override func _getNonVerbatimBridgedHeapBuffer ( <nl> - dummy : Void <nl> - ) - > HeapBuffer < Int , AnyObject > { <nl> - return HeapBuffer < Int , AnyObject > ( <nl> - HeapBufferStorage < Int , AnyObject > . self , 0 , 0 ) <nl> - } <nl> - <nl> - override func canStoreElementsOfDynamicType ( _ : Any . Type ) - > Bool { <nl> - return false <nl> - } <nl> - <nl> - / / / A type that every element in the array is . <nl> - override var staticElementType : Any . Type { <nl> - return Void . self <nl> - } <nl> - } <nl> - <nl> / / The empty array prototype . We use the same object for all empty <nl> / / [ Native ] Array < T > s . <nl> - var _emptyArrayStorage : _EmptyArrayStorage { <nl> - return Builtin . bridgeFromRawPointer ( <nl> - Builtin . addressof ( & _swiftEmptyArrayStorage ) ) <nl> - } <nl> + let _emptyContiguousArrayStorageBase = unsafeBitCast ( <nl> + _ContiguousArrayBuffer < Int > ( count : 0 , minimumCapacity : 0 ) , <nl> + _ContiguousArrayStorageBase . self <nl> + ) <nl> <nl> / / The class that implements the storage for a ContiguousArray < T > <nl> final class _ContiguousArrayStorage < T > : _ContiguousArrayStorageBase { <nl> public struct _ContiguousArrayBuffer < T > : _ArrayBufferType { <nl> / / / result ' s . baseAddress or set the result ' s . count to zero . <nl> public init ( count : Int , minimumCapacity : Int ) <nl> { <nl> - let realMinimumCapacity = max ( count , minimumCapacity ) <nl> - if realMinimumCapacity = = 0 { <nl> - self = _ContiguousArrayBuffer < T > ( ) <nl> + _base = HeapBuffer ( <nl> + _ContiguousArrayStorage < T > . self , <nl> + _ArrayBody ( ) , <nl> + max ( count , minimumCapacity ) ) <nl> + <nl> + var bridged = false <nl> + if _canBeClass ( T . self ) ! = 0 { <nl> + bridged = _isBridgedVerbatimToObjectiveC ( T . self ) <nl> } <nl> - else { <nl> - _base = HeapBuffer ( <nl> - _ContiguousArrayStorage < T > . self , <nl> - _ArrayBody ( ) , <nl> - realMinimumCapacity ) <nl> - <nl> - var bridged = false <nl> - if _canBeClass ( T . self ) ! = 0 { <nl> - bridged = _isBridgedVerbatimToObjectiveC ( T . self ) <nl> - } <nl> <nl> - _base . value = _ArrayBody ( <nl> - count : count , capacity : _base . _capacity ( ) , <nl> - elementTypeIsBridgedVerbatim : bridged ) <nl> - } <nl> + _base . value = _ArrayBody ( <nl> + count : count , capacity : _base . _capacity ( ) , <nl> + elementTypeIsBridgedVerbatim : bridged ) <nl> } <nl> <nl> init ( _ storage : _ContiguousArrayStorage < T > ? ) { <nl> public struct _ContiguousArrayBuffer < T > : _ArrayBufferType { <nl> return ret <nl> } <nl> <nl> + public mutating func take ( ) - > _ContiguousArrayBuffer { <nl> + if ! _base . hasStorage { <nl> + return _ContiguousArrayBuffer ( ) <nl> + } <nl> + _sanityCheck ( _base . isUniquelyReferenced ( ) , " Can ' t \ " take \ " a shared array buffer " ) <nl> + let result = self <nl> + _base = _Base ( ) <nl> + return result <nl> + } <nl> + <nl> / / = = = mmm _ArrayBufferType conformance mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - = = = / / <nl> / / / The type of elements stored in the buffer <nl> public typealias Element = T <nl> <nl> / / / create an empty buffer <nl> public init ( ) { <nl> - _base = unsafeBitCast ( _emptyArrayStorage , HeapBuffer < _ArrayBody , T > . self ) <nl> + _base = HeapBuffer ( ) <nl> } <nl> <nl> / / / Adopt the storage of x <nl> public struct _ContiguousArrayBuffer < T > : _ArrayBufferType { <nl> " Array element type is not bridged to ObjectiveC " ) <nl> if count = = 0 { <nl> return _NSSwiftArray ( <nl> - _nativeStorage : _emptyArrayStorage ) <nl> + _nativeStorage : _emptyContiguousArrayStorageBase ) <nl> } <nl> return _NSSwiftArray ( _nativeStorage : _storage ! ) <nl> } <nl> public struct _ContiguousArrayBuffer < T > : _ArrayBufferType { <nl> _ : U . Type <nl> ) - > Bool { <nl> _sanityCheck ( _isClassOrObjCExistential ( U . self ) ) <nl> - <nl> - / / Start with the base class so that optimizations based on <nl> - / / ' final ' don ' t bypass dynamic type check . <nl> - let s : _ContiguousArrayStorageBase ? = _storage <nl> - <nl> + let s = _storage <nl> if _fastPath ( s ! = nil ) { <nl> if _fastPath ( s ! . staticElementType is U . Type ) { <nl> / / Done in O ( 1 ) <nl> public func ~ > < <nl> for x in GeneratorSequence ( source . generate ( ) ) { <nl> result + = x <nl> } <nl> - return result <nl> + return result . take ( ) <nl> } <nl> <nl> public func ~ > < <nl> mmm a / stdlib / core / SliceBuffer . swift <nl> ppp b / stdlib / core / SliceBuffer . swift <nl> struct _SliceBuffer < T > : _ArrayBufferType { <nl> <nl> func _invariantCheck ( ) { <nl> let isNative = _hasNativeBuffer <nl> - let isNativeStorage : Bool = ( owner as ? _ContiguousArrayStorageBase ) ! = nil <nl> + let isNativeStorage : Bool = ( owner as ? NativeStorage ) ! = nil <nl> _sanityCheck ( <nl> isNativeStorage = = isNative <nl> ) <nl> mmm a / stdlib / runtime / CMakeLists . txt <nl> ppp b / stdlib / runtime / CMakeLists . txt <nl> endif ( ) <nl> add_swift_library ( swiftRuntime <nl> Casting . cpp <nl> Demangle . cpp <nl> - GlobalObjects . cpp <nl> HeapObject . cpp <nl> KnownMetadata . cpp <nl> Metadata . cpp <nl> deleted file mode 100644 <nl> index 398d3ffe7a18 . . 000000000000 <nl> mmm a / stdlib / runtime / GlobalObjects . cpp <nl> ppp / dev / null <nl> <nl> - / / = = = mmm GlobalObjects . cpp - Statically - initialized objects mmmmmmmmmmmmmmm = = = / / <nl> - / / <nl> - / / This source file is part of the Swift . org open source project <nl> - / / <nl> - / / Copyright ( c ) 2014 - 2015 Apple Inc . and the Swift project authors <nl> - / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> - / / <nl> - / / See http : / / swift . org / LICENSE . txt for license information <nl> - / / See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> - / / <nl> - / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - / / <nl> - / / Objects that are allocated at global scope instead of on the heap , <nl> - / / and statically initialized to avoid synchronization costs , are <nl> - / / defined here . <nl> - / / <nl> - / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - # include " . . / shims / GlobalObjects . h " <nl> - # include " swift / Runtime / FastEntryPoints . h " <nl> - # include " swift / Runtime / Metadata . h " <nl> - <nl> - namespace swift { <nl> - <nl> - / / _direct type metadata for Swift . _EmptyArrayStorage <nl> - extern " C " FullMetadata < ClassMetadata > _TMdCSs18_EmptyArrayStorage ; <nl> - <nl> - extern " C " _SwiftEmptyArrayStorage _swiftEmptyArrayStorage = { <nl> - / / HeapObject header ; <nl> - { <nl> - & _TMdCSs18_EmptyArrayStorage , / / is - a pointer <nl> - RC_INTERVAL , / / refcount <nl> - WRC_INTERVAL / / weak refcount <nl> - } , <nl> - <nl> - / / _SwiftArrayBodyStorage body ; <nl> - { <nl> - 0 , / / int count ; <nl> - 1 / / unsigned int _capacityAndFlags ; 1 means elementTypeIsBridgedVerbatim <nl> - } <nl> - } ; <nl> - <nl> - } <nl> mmm a / stdlib / shims / CMakeLists . txt <nl> ppp b / stdlib / shims / CMakeLists . txt <nl> <nl> - set ( sources shims . h GlobalObjects . h HeapObject . h RefCount . h module . map ) <nl> + set ( sources shims . h HeapObject . h RefCount . h module . map ) <nl> set ( output_dir " $ { SWIFTLIB_DIR } / shims " ) <nl> <nl> set ( commands ) <nl> deleted file mode 100644 <nl> index e9ef06fd1e00 . . 000000000000 <nl> mmm a / stdlib / shims / GlobalObjects . h <nl> ppp / dev / null <nl> <nl> - / / = = = mmm GlobalObjects . h - Statically - initialized objects mmmmmmmmmmmmmmm - - = = = / / <nl> - / / <nl> - / / This source file is part of the Swift . org open source project <nl> - / / <nl> - / / Copyright ( c ) 2014 - 2015 Apple Inc . and the Swift project authors <nl> - / / Licensed under Apache License v2 . 0 with Runtime Library Exception <nl> - / / <nl> - / / See http : / / swift . org / LICENSE . txt for license information <nl> - / / See http : / / swift . org / CONTRIBUTORS . txt for the list of Swift project authors <nl> - / / <nl> - / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - / / <nl> - / / Objects that are allocated at global scope instead of on the heap , <nl> - / / and statically initialized to avoid synchronization costs , are <nl> - / / defined here . <nl> - / / <nl> - / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> - # ifndef SWIFT_STDLIB_SHIMS_GLOBALOBJECTS_H_ <nl> - # define SWIFT_STDLIB_SHIMS_GLOBALOBJECTS_H_ <nl> - # include " HeapObject . h " <nl> - # include < stdint . h > <nl> - <nl> - # ifdef __cplusplus <nl> - namespace swift { extern " C " { <nl> - # endif <nl> - <nl> - struct _SwiftArrayBodyStorage { <nl> - intptr_t count ; <nl> - uintptr_t _capacityAndFlags ; <nl> - } ; <nl> - <nl> - struct _SwiftEmptyArrayStorage { <nl> - struct HeapObject header ; <nl> - struct _SwiftArrayBodyStorage body ; <nl> - } ; <nl> - <nl> - extern struct _SwiftEmptyArrayStorage _swiftEmptyArrayStorage ; <nl> - <nl> - # ifdef __cplusplus <nl> - } } / / extern " C " , namespace swift <nl> - # endif <nl> - <nl> - # endif <nl> mmm a / stdlib / shims / module . map <nl> ppp b / stdlib / shims / module . map <nl> module SwiftShims { <nl> header " shims . h " <nl> header " HeapObject . h " <nl> header " RefCount . h " <nl> - header " GlobalObjects . h " <nl> export * <nl> } <nl> mmm a / test / stdlib / ArrayNew . swift . gyb <nl> ppp b / test / stdlib / ArrayNew . swift . gyb <nl> ArrayTestSuite . test ( " $ { array_type } / Sliceable / Enums " ) { <nl> <nl> ArrayTestSuite . test ( " $ { array_type } / emptyAllocation " ) { <nl> <nl> - let arr0 = $ { array_type } < Int > ( ) <nl> - let arr1 = $ { array_type } < TestValueTy > ( count : 0 , repeatedValue : TestValueTy ( 0 ) ) <nl> - / / Empty arrays all use the same buffer <nl> - expectEqual ( arr0 . _buffer . identity , arr1 . _buffer . identity ) <nl> - <nl> - / / Only non - literal empty Slices have nil buffers <nl> - let hasNilBuffer = arr1 . identity = = 0 <nl> - expect $ { array_type = = ' Slice ' } ( hasNilBuffer ) <nl> - <nl> - let arr2 : $ { array_type } < TestValueTy > = [ ] <nl> - let emptyLiteralsShareBuffer = arr0 . _buffer . identity = = arr2 . _buffer . identity <nl> - expect $ { array_type ! = ' Slice ' } ( emptyLiteralsShareBuffer ) <nl> + / / Test if an empty array does not allocate a buffer <nl> + let arr = $ { array_type } < TestValueTy > ( count : 0 , repeatedValue : TestValueTy ( 0 ) ) <nl> + expectEqual ( 0 , arr . _buffer . identity ) <nl> } <nl> <nl> % end <nl> <nl> + ArrayTestSuite . test ( " Array / emptyLiteral " ) { <nl> + <nl> + / / Test if an empty array literal does not allocate a buffer <nl> + let arr : [ TestValueTy ] = [ ] <nl> + expectEqual ( 0 , arr . _buffer . identity ) <nl> + } <nl> + <nl> / / = = = mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - = = = / / <nl> / / COW tests <nl> / / FIXME : incomplete . <nl>
Revert " [ stdlib ] Use a universal empty array buffer "
apple/swift
b740ad0df9f3ee6a955c164ce61bc1a3389c0fad
2014-10-03T18:52:11Z
mmm a / src / code - stub - assembler . cc <nl> ppp b / src / code - stub - assembler . cc <nl> <nl> # include " src / code - factory . h " <nl> # include " src / frames - inl . h " <nl> # include " src / frames . h " <nl> + # include " src / ic / handler - configuration . h " <nl> # include " src / ic / stub - cache . h " <nl> <nl> namespace v8 { <nl> void CodeStubAssembler : : TryProbeStubCache ( <nl> } <nl> } <nl> <nl> - void CodeStubAssembler : : HandleLoadICHandlerCase ( const LoadICParameters * p , <nl> - Node * handler , Label * miss ) { <nl> + / / | is_jsarray | should be non - zero for JSArrays . <nl> + void CodeStubAssembler : : EmitBoundsCheck ( Node * object , Node * elements , <nl> + Node * intptr_key , Node * is_jsarray , <nl> + Label * miss ) { <nl> + Variable var_length ( this , MachineRepresentation : : kTagged ) ; <nl> + Label if_array ( this ) , length_loaded ( this , & var_length ) ; <nl> + GotoUnless ( WordEqual ( is_jsarray , IntPtrConstant ( 0 ) ) , & if_array ) ; <nl> + { <nl> + var_length . Bind ( SmiUntag ( LoadFixedArrayBaseLength ( elements ) ) ) ; <nl> + Goto ( & length_loaded ) ; <nl> + } <nl> + Bind ( & if_array ) ; <nl> + { <nl> + var_length . Bind ( SmiUntag ( LoadObjectField ( object , JSArray : : kLengthOffset ) ) ) ; <nl> + Goto ( & length_loaded ) ; <nl> + } <nl> + Bind ( & length_loaded ) ; <nl> + GotoUnless ( UintPtrLessThan ( intptr_key , var_length . value ( ) ) , miss ) ; <nl> + } <nl> + <nl> + / / | key | should be untagged ( int32 ) . <nl> + void CodeStubAssembler : : EmitElementLoad ( Node * object , Node * elements , <nl> + Node * elements_kind , Node * key , <nl> + Label * if_hole , Label * rebox_double , <nl> + Variable * var_double_value , <nl> + Label * miss ) { <nl> + Label if_typed_array ( this ) , if_fast_packed ( this ) , if_fast_holey ( this ) , <nl> + if_fast_double ( this ) , if_fast_holey_double ( this ) , <nl> + unimplemented_elements_kind ( this ) ; <nl> + STATIC_ASSERT ( LAST_ELEMENTS_KIND = = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND ) ; <nl> + GotoIf ( <nl> + IntPtrGreaterThanOrEqual ( <nl> + elements_kind , IntPtrConstant ( FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND ) ) , <nl> + & if_typed_array ) ; <nl> + <nl> + int32_t kinds [ ] = { / / Handled by if_fast_packed . <nl> + FAST_SMI_ELEMENTS , FAST_ELEMENTS , <nl> + / / Handled by if_fast_holey . <nl> + FAST_HOLEY_SMI_ELEMENTS , FAST_HOLEY_ELEMENTS , <nl> + / / Handled by if_fast_double . <nl> + FAST_DOUBLE_ELEMENTS , <nl> + / / Handled by if_fast_holey_double . <nl> + FAST_HOLEY_DOUBLE_ELEMENTS } ; <nl> + Label * labels [ ] = { / / FAST_ { SMI , } _ELEMENTS <nl> + & if_fast_packed , & if_fast_packed , <nl> + / / FAST_HOLEY_ { SMI , } _ELEMENTS <nl> + & if_fast_holey , & if_fast_holey , <nl> + / / FAST_DOUBLE_ELEMENTS <nl> + & if_fast_double , <nl> + / / FAST_HOLEY_DOUBLE_ELEMENTS <nl> + & if_fast_holey_double } ; <nl> + Switch ( elements_kind , & unimplemented_elements_kind , kinds , labels , <nl> + arraysize ( kinds ) ) ; <nl> + Bind ( & unimplemented_elements_kind ) ; <nl> + { <nl> + / / Crash if we get here . <nl> + DebugBreak ( ) ; <nl> + Goto ( miss ) ; <nl> + } <nl> + <nl> + Bind ( & if_fast_packed ) ; <nl> + { <nl> + Comment ( " fast packed elements " ) ; <nl> + / / TODO ( jkummerow ) : The Load * Element helpers add movsxlq instructions <nl> + / / on x64 which we don ' t need here , because | key | is an IntPtr already . <nl> + / / Do something about that . <nl> + Return ( LoadFixedArrayElement ( elements , key ) ) ; <nl> + } <nl> + <nl> + Bind ( & if_fast_holey ) ; <nl> + { <nl> + Comment ( " fast holey elements " ) ; <nl> + Node * element = LoadFixedArrayElement ( elements , key ) ; <nl> + GotoIf ( WordEqual ( element , TheHoleConstant ( ) ) , if_hole ) ; <nl> + Return ( element ) ; <nl> + } <nl> + <nl> + Bind ( & if_fast_double ) ; <nl> + { <nl> + Comment ( " packed double elements " ) ; <nl> + var_double_value - > Bind ( <nl> + LoadFixedDoubleArrayElement ( elements , key , MachineType : : Float64 ( ) ) ) ; <nl> + Goto ( rebox_double ) ; <nl> + } <nl> + <nl> + Bind ( & if_fast_holey_double ) ; <nl> + { <nl> + Comment ( " holey double elements " ) ; <nl> + if ( kPointerSize = = kDoubleSize ) { <nl> + Node * raw_element = <nl> + LoadFixedDoubleArrayElement ( elements , key , MachineType : : Uint64 ( ) ) ; <nl> + Node * the_hole = Int64Constant ( kHoleNanInt64 ) ; <nl> + GotoIf ( Word64Equal ( raw_element , the_hole ) , if_hole ) ; <nl> + } else { <nl> + Node * element_upper = LoadFixedDoubleArrayElement ( <nl> + elements , key , MachineType : : Uint32 ( ) , kIeeeDoubleExponentWordOffset ) ; <nl> + GotoIf ( Word32Equal ( element_upper , Int32Constant ( kHoleNanUpper32 ) ) , <nl> + if_hole ) ; <nl> + } <nl> + var_double_value - > Bind ( <nl> + LoadFixedDoubleArrayElement ( elements , key , MachineType : : Float64 ( ) ) ) ; <nl> + Goto ( rebox_double ) ; <nl> + } <nl> + <nl> + Bind ( & if_typed_array ) ; <nl> + { <nl> + Comment ( " typed elements " ) ; <nl> + / / Check if buffer has been neutered . <nl> + Node * buffer = LoadObjectField ( object , JSArrayBufferView : : kBufferOffset ) ; <nl> + Node * bitfield = LoadObjectField ( buffer , JSArrayBuffer : : kBitFieldOffset , <nl> + MachineType : : Uint32 ( ) ) ; <nl> + Node * neutered_bit = <nl> + Word32And ( bitfield , Int32Constant ( JSArrayBuffer : : WasNeutered : : kMask ) ) ; <nl> + GotoUnless ( Word32Equal ( neutered_bit , Int32Constant ( 0 ) ) , miss ) ; <nl> + / / Backing store = external_pointer + base_pointer . <nl> + Node * external_pointer = <nl> + LoadObjectField ( elements , FixedTypedArrayBase : : kExternalPointerOffset , <nl> + MachineType : : Pointer ( ) ) ; <nl> + Node * base_pointer = <nl> + LoadObjectField ( elements , FixedTypedArrayBase : : kBasePointerOffset ) ; <nl> + Node * backing_store = IntPtrAdd ( external_pointer , base_pointer ) ; <nl> + <nl> + const int kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND - <nl> + FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + <nl> + 1 ; <nl> + Label * elements_kind_labels [ kTypedElementsKindCount ] ; <nl> + int32_t elements_kinds [ kTypedElementsKindCount ] ; <nl> + for ( int i = 0 ; i < kTypedElementsKindCount ; i + + ) { <nl> + elements_kinds [ i ] = i + FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND ; <nl> + elements_kind_labels [ i ] = new Label ( this ) ; <nl> + } <nl> + Switch ( elements_kind , miss , elements_kinds , elements_kind_labels , <nl> + static_cast < size_t > ( kTypedElementsKindCount ) ) ; <nl> + <nl> + for ( int i = 0 ; i < kTypedElementsKindCount ; i + + ) { <nl> + ElementsKind kind = static_cast < ElementsKind > ( elements_kinds [ i ] ) ; <nl> + Bind ( elements_kind_labels [ i ] ) ; <nl> + Comment ( ElementsKindToString ( kind ) ) ; <nl> + switch ( kind ) { <nl> + case UINT8_ELEMENTS : <nl> + case UINT8_CLAMPED_ELEMENTS : <nl> + Return ( SmiTag ( Load ( MachineType : : Uint8 ( ) , backing_store , key ) ) ) ; <nl> + break ; <nl> + case INT8_ELEMENTS : <nl> + Return ( SmiTag ( Load ( MachineType : : Int8 ( ) , backing_store , key ) ) ) ; <nl> + break ; <nl> + case UINT16_ELEMENTS : { <nl> + Node * index = WordShl ( key , IntPtrConstant ( 1 ) ) ; <nl> + Return ( SmiTag ( Load ( MachineType : : Uint16 ( ) , backing_store , index ) ) ) ; <nl> + break ; <nl> + } <nl> + case INT16_ELEMENTS : { <nl> + Node * index = WordShl ( key , IntPtrConstant ( 1 ) ) ; <nl> + Return ( SmiTag ( Load ( MachineType : : Int16 ( ) , backing_store , index ) ) ) ; <nl> + break ; <nl> + } <nl> + case UINT32_ELEMENTS : { <nl> + Node * index = WordShl ( key , IntPtrConstant ( 2 ) ) ; <nl> + Node * element = Load ( MachineType : : Uint32 ( ) , backing_store , index ) ; <nl> + Return ( ChangeUint32ToTagged ( element ) ) ; <nl> + break ; <nl> + } <nl> + case INT32_ELEMENTS : { <nl> + Node * index = WordShl ( key , IntPtrConstant ( 2 ) ) ; <nl> + Node * element = Load ( MachineType : : Int32 ( ) , backing_store , index ) ; <nl> + Return ( ChangeInt32ToTagged ( element ) ) ; <nl> + break ; <nl> + } <nl> + case FLOAT32_ELEMENTS : { <nl> + Node * index = WordShl ( key , IntPtrConstant ( 2 ) ) ; <nl> + Node * element = Load ( MachineType : : Float32 ( ) , backing_store , index ) ; <nl> + var_double_value - > Bind ( ChangeFloat32ToFloat64 ( element ) ) ; <nl> + Goto ( rebox_double ) ; <nl> + break ; <nl> + } <nl> + case FLOAT64_ELEMENTS : { <nl> + Node * index = WordShl ( key , IntPtrConstant ( 3 ) ) ; <nl> + Node * element = Load ( MachineType : : Float64 ( ) , backing_store , index ) ; <nl> + var_double_value - > Bind ( element ) ; <nl> + Goto ( rebox_double ) ; <nl> + break ; <nl> + } <nl> + default : <nl> + UNREACHABLE ( ) ; <nl> + } <nl> + / / Don ' t forget to clean up . <nl> + delete elements_kind_labels [ i ] ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void CodeStubAssembler : : HandleLoadICHandlerCase ( <nl> + const LoadICParameters * p , Node * handler , Label * miss , <nl> + ElementSupport support_elements ) { <nl> Comment ( " have_handler " ) ; <nl> Label call_handler ( this ) ; <nl> GotoUnless ( WordIsSmi ( handler ) , & call_handler ) ; <nl> <nl> - / / | handler | is a Smi . It encodes a field index as obtained by <nl> - / / FieldIndex . GetLoadByFieldOffset ( ) . <nl> - / / TODO ( jkummerow ) : For KeyedLoadICs , extend this scheme to encode <nl> - / / fast * element * loads . <nl> + / / | handler | is a Smi , encoding what to do . See handler - configuration . h <nl> + / / for the encoding format . <nl> { <nl> Variable var_double_value ( this , MachineRepresentation : : kFloat64 ) ; <nl> Label rebox_double ( this , & var_double_value ) ; <nl> <nl> Node * handler_word = SmiUntag ( handler ) ; <nl> + if ( support_elements = = kSupportElements ) { <nl> + Label property ( this ) ; <nl> + Node * handler_type = <nl> + WordAnd ( handler_word , IntPtrConstant ( LoadHandlerTypeBit : : kMask ) ) ; <nl> + GotoUnless ( <nl> + WordEqual ( handler_type , IntPtrConstant ( kLoadICHandlerForElements ) ) , <nl> + & property ) ; <nl> + <nl> + Comment ( " element_load " ) ; <nl> + GotoUnless ( WordIsSmi ( p - > name ) , miss ) ; <nl> + Node * key = SmiUntag ( p - > name ) ; <nl> + Node * elements = LoadElements ( p - > receiver ) ; <nl> + Node * is_jsarray = <nl> + WordAnd ( handler_word , IntPtrConstant ( KeyedLoadIsJsArray : : kMask ) ) ; <nl> + EmitBoundsCheck ( p - > receiver , elements , key , is_jsarray , miss ) ; <nl> + Label if_hole ( this ) ; <nl> + <nl> + Node * elements_kind = BitFieldDecode < KeyedLoadElementsKind > ( handler_word ) ; <nl> + <nl> + EmitElementLoad ( p - > receiver , elements , elements_kind , key , & if_hole , <nl> + & rebox_double , & var_double_value , miss ) ; <nl> + <nl> + Bind ( & if_hole ) ; <nl> + { <nl> + Comment ( " convert hole " ) ; <nl> + Node * convert_hole = <nl> + WordAnd ( handler_word , IntPtrConstant ( KeyedLoadConvertHole : : kMask ) ) ; <nl> + GotoIf ( WordEqual ( convert_hole , IntPtrConstant ( 0 ) ) , miss ) ; <nl> + Node * protector_cell = LoadRoot ( Heap : : kArrayProtectorRootIndex ) ; <nl> + GotoUnless ( <nl> + WordEqual ( LoadObjectField ( protector_cell , Cell : : kValueOffset ) , <nl> + SmiConstant ( Smi : : FromInt ( Isolate : : kArrayProtectorValid ) ) ) , <nl> + miss ) ; <nl> + Return ( UndefinedConstant ( ) ) ; <nl> + } <nl> + <nl> + Bind ( & property ) ; <nl> + Comment ( " property_load " ) ; <nl> + } <nl> + <nl> / / | handler_word | is a field index as obtained by <nl> / / FieldIndex . GetLoadByFieldOffset ( ) : <nl> Label inobject_double ( this ) , out_of_object ( this ) , <nl> out_of_object_double ( this ) ; <nl> - Node * inobject_bit = WordAnd ( <nl> - handler_word , IntPtrConstant ( FieldIndex : : FieldOffsetIsInobject : : kMask ) ) ; <nl> - Node * double_bit = WordAnd ( <nl> - handler_word , IntPtrConstant ( FieldIndex : : FieldOffsetIsDouble : : kMask ) ) ; <nl> - Node * offset = WordSar ( <nl> - handler_word , IntPtrConstant ( FieldIndex : : FieldOffsetOffset : : kShift ) ) ; <nl> + Node * inobject_bit = <nl> + WordAnd ( handler_word , IntPtrConstant ( FieldOffsetIsInobject : : kMask ) ) ; <nl> + Node * double_bit = <nl> + WordAnd ( handler_word , IntPtrConstant ( FieldOffsetIsDouble : : kMask ) ) ; <nl> + Node * offset = <nl> + WordSar ( handler_word , IntPtrConstant ( FieldOffsetOffset : : kShift ) ) ; <nl> <nl> GotoIf ( WordEqual ( inobject_bit , IntPtrConstant ( 0 ) ) , & out_of_object ) ; <nl> <nl> void CodeStubAssembler : : KeyedLoadIC ( const LoadICParameters * p ) { <nl> & var_handler , & try_polymorphic ) ; <nl> Bind ( & if_handler ) ; <nl> { <nl> - HandleLoadICHandlerCase ( p , var_handler . value ( ) , & miss ) ; <nl> + HandleLoadICHandlerCase ( p , var_handler . value ( ) , & miss , kSupportElements ) ; <nl> } <nl> <nl> Bind ( & try_polymorphic ) ; <nl> mmm a / src / code - stub - assembler . h <nl> ppp b / src / code - stub - assembler . h <nl> class CodeStubAssembler : public compiler : : CodeAssembler { <nl> Variable * var_handler , Label * if_miss , <nl> int unroll_count ) ; <nl> <nl> - void HandleLoadICHandlerCase ( const LoadICParameters * p , <nl> - compiler : : Node * handler , Label * miss ) ; <nl> - <nl> compiler : : Node * StubCachePrimaryOffset ( compiler : : Node * name , <nl> compiler : : Node * map ) ; <nl> <nl> class CodeStubAssembler : public compiler : : CodeAssembler { <nl> compiler : : Node * value ) ; <nl> <nl> private : <nl> + enum ElementSupport { kOnlyProperties , kSupportElements } ; <nl> + <nl> + void HandleLoadICHandlerCase ( <nl> + const LoadICParameters * p , compiler : : Node * handler , Label * miss , <nl> + ElementSupport support_elements = kOnlyProperties ) ; <nl> + void EmitBoundsCheck ( compiler : : Node * object , compiler : : Node * elements , <nl> + compiler : : Node * intptr_key , compiler : : Node * is_jsarray , <nl> + Label * miss ) ; <nl> + void EmitElementLoad ( compiler : : Node * object , compiler : : Node * elements , <nl> + compiler : : Node * elements_kind , compiler : : Node * key , <nl> + Label * if_hole , Label * rebox_double , <nl> + Variable * var_double_value , Label * miss ) ; <nl> + <nl> compiler : : Node * ElementOffsetFromIndex ( compiler : : Node * index , <nl> ElementsKind kind , ParameterMode mode , <nl> int base_size = 0 ) ; <nl> mmm a / src / field - index - inl . h <nl> ppp b / src / field - index - inl . h <nl> <nl> # define V8_FIELD_INDEX_INL_H_ <nl> <nl> # include " src / field - index . h " <nl> + # include " src / ic / handler - configuration . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> inline int FieldIndex : : GetLoadByFieldIndex ( ) const { <nl> / / FieldIndex object from it . <nl> / / static <nl> inline FieldIndex FieldIndex : : ForLoadByFieldOffset ( Map * map , int offset ) { <nl> - DCHECK ( offset & 1 ) ; / / Property marker ( as opposed to element ) . <nl> + DCHECK ( LoadHandlerTypeBit : : decode ( offset ) = = kLoadICHandlerForProperties ) ; <nl> bool is_inobject = FieldOffsetIsInobject : : decode ( offset ) ; <nl> bool is_double = FieldOffsetIsDouble : : decode ( offset ) ; <nl> int field_index = FieldOffsetOffset : : decode ( offset ) > > kPointerSizeLog2 ; <nl> inline int FieldIndex : : GetLoadByFieldOffset ( ) const { <nl> return FieldOffsetIsInobject : : encode ( is_inobject ( ) ) | <nl> FieldOffsetIsDouble : : encode ( is_double ( ) ) | <nl> FieldOffsetOffset : : encode ( index ( ) < < kPointerSizeLog2 ) | <nl> - 1 ; / / Property marker ( as opposed to element ) . <nl> + LoadHandlerTypeBit : : encode ( kLoadICHandlerForProperties ) ; <nl> } <nl> <nl> inline FieldIndex FieldIndex : : ForDescriptor ( Map * map , int descriptor_index ) { <nl> mmm a / src / field - index . h <nl> ppp b / src / field - index . h <nl> class FieldIndex final { <nl> } <nl> bool operator ! = ( FieldIndex const & other ) const { return ! ( * this = = other ) ; } <nl> <nl> - / / For GetLoadByFieldOffset . <nl> - class FieldOffsetIsInobject : public BitField < bool , 1 , 1 > { } ; <nl> - class FieldOffsetIsDouble : public BitField < bool , 2 , 1 > { } ; <nl> - class FieldOffsetOffset : public BitField < int , 3 , 27 > { } ; <nl> - / / Make sure we don ' t overflow into the sign bit . <nl> - STATIC_ASSERT ( FieldOffsetOffset : : kNext < = kSmiValueSize - 1 ) ; <nl> - <nl> private : <nl> FieldIndex ( bool is_inobject , int local_index , bool is_double , <nl> int inobject_properties , int first_inobject_property_offset , <nl> mmm a / src / ic / handler - compiler . cc <nl> ppp b / src / ic / handler - compiler . cc <nl> <nl> <nl> # include " src / field - type . h " <nl> # include " src / ic / call - optimization . h " <nl> + # include " src / ic / handler - configuration . h " <nl> # include " src / ic / ic - inl . h " <nl> # include " src / ic / ic . h " <nl> # include " src / isolate - inl . h " <nl> Handle < Code > NamedStoreHandlerCompiler : : CompileStoreCallback ( <nl> # undef __ <nl> <nl> / / static <nl> - Handle < Code > ElementHandlerCompiler : : GetKeyedLoadHandler ( <nl> + Handle < Object > ElementHandlerCompiler : : GetKeyedLoadHandler ( <nl> Handle < Map > receiver_map , Isolate * isolate ) { <nl> if ( receiver_map - > has_indexed_interceptor ( ) & & <nl> ! receiver_map - > GetIndexedInterceptor ( ) - > getter ( ) - > IsUndefined ( isolate ) & & <nl> Handle < Code > ElementHandlerCompiler : : GetKeyedLoadHandler ( <nl> bool convert_hole_to_undefined = <nl> is_js_array & & elements_kind = = FAST_HOLEY_ELEMENTS & & <nl> * receiver_map = = isolate - > get_initial_js_array_map ( elements_kind ) ; <nl> - TRACE_HANDLER_STATS ( isolate , KeyedLoadIC_LoadFastElementStub ) ; <nl> - return LoadFastElementStub ( isolate , is_js_array , elements_kind , <nl> - convert_hole_to_undefined ) <nl> - . GetCode ( ) ; <nl> + if ( FLAG_tf_load_ic_stub ) { <nl> + int config = KeyedLoadElementsKind : : encode ( elements_kind ) | <nl> + KeyedLoadConvertHole : : encode ( convert_hole_to_undefined ) | <nl> + KeyedLoadIsJsArray : : encode ( is_js_array ) | <nl> + LoadHandlerTypeBit : : encode ( kLoadICHandlerForElements ) ; <nl> + return handle ( Smi : : FromInt ( config ) , isolate ) ; <nl> + } else { <nl> + TRACE_HANDLER_STATS ( isolate , KeyedLoadIC_LoadFastElementStub ) ; <nl> + return LoadFastElementStub ( isolate , is_js_array , elements_kind , <nl> + convert_hole_to_undefined ) <nl> + . GetCode ( ) ; <nl> + } <nl> } <nl> <nl> void ElementHandlerCompiler : : CompileElementHandlers ( <nl> mmm a / src / ic / handler - compiler . h <nl> ppp b / src / ic / handler - compiler . h <nl> class ElementHandlerCompiler : public PropertyHandlerCompiler { <nl> <nl> virtual ~ ElementHandlerCompiler ( ) { } <nl> <nl> - static Handle < Code > GetKeyedLoadHandler ( Handle < Map > receiver_map , <nl> - Isolate * isolate ) ; <nl> + static Handle < Object > GetKeyedLoadHandler ( Handle < Map > receiver_map , <nl> + Isolate * isolate ) ; <nl> void CompileElementHandlers ( MapHandleList * receiver_maps , <nl> List < Handle < Object > > * handlers ) ; <nl> <nl> new file mode 100644 <nl> index 00000000000 . . bf7c4770b91 <nl> mmm / dev / null <nl> ppp b / src / ic / handler - configuration . h <nl> <nl> + / / Copyright 2016 the V8 project authors . All rights reserved . <nl> + / / Use of this source code is governed by a BSD - style license that can be <nl> + / / found in the LICENSE file . <nl> + <nl> + # ifndef V8_IC_HANDLER_CONFIGURATION_H_ <nl> + # define V8_IC_HANDLER_CONFIGURATION_H_ <nl> + <nl> + # include " src / elements - kind . h " <nl> + # include " src / globals . h " <nl> + # include " src / utils . h " <nl> + <nl> + namespace v8 { <nl> + namespace internal { <nl> + <nl> + enum LoadHandlerType { <nl> + kLoadICHandlerForElements = 0 , <nl> + kLoadICHandlerForProperties = 1 <nl> + } ; <nl> + <nl> + class LoadHandlerTypeBit : public BitField < bool , 0 , 1 > { } ; <nl> + <nl> + / / Encoding for configuration Smis for property loads : <nl> + class FieldOffsetIsInobject <nl> + : public BitField < bool , LoadHandlerTypeBit : : kNext , 1 > { } ; <nl> + class FieldOffsetIsDouble <nl> + : public BitField < bool , FieldOffsetIsInobject : : kNext , 1 > { } ; <nl> + class FieldOffsetOffset : public BitField < int , FieldOffsetIsDouble : : kNext , 27 > { <nl> + } ; <nl> + / / Make sure we don ' t overflow into the sign bit . <nl> + STATIC_ASSERT ( FieldOffsetOffset : : kNext < = kSmiValueSize - 1 ) ; <nl> + <nl> + / / Encoding for configuration Smis for elements loads : <nl> + class KeyedLoadIsJsArray : public BitField < bool , LoadHandlerTypeBit : : kNext , 1 > { <nl> + } ; <nl> + class KeyedLoadConvertHole <nl> + : public BitField < bool , KeyedLoadIsJsArray : : kNext , 1 > { } ; <nl> + class KeyedLoadElementsKind <nl> + : public BitField < ElementsKind , KeyedLoadConvertHole : : kNext , 8 > { } ; <nl> + / / Make sure we don ' t overflow into the sign bit . <nl> + STATIC_ASSERT ( KeyedLoadElementsKind : : kNext < = kSmiValueSize - 1 ) ; <nl> + <nl> + } / / namespace internal <nl> + } / / namespace v8 <nl> + <nl> + # endif / / V8_IC_HANDLER_CONFIGURATION_H_ <nl> mmm a / src / ic / ic . cc <nl> ppp b / src / ic / ic . cc <nl> void KeyedLoadIC : : UpdateLoadElement ( Handle < HeapObject > receiver ) { <nl> TargetMaps ( & target_receiver_maps ) ; <nl> <nl> if ( target_receiver_maps . length ( ) = = 0 ) { <nl> - Handle < Code > handler = <nl> + Handle < Object > handler = <nl> ElementHandlerCompiler : : GetKeyedLoadHandler ( receiver_map , isolate ( ) ) ; <nl> return ConfigureVectorState ( Handle < Name > ( ) , receiver_map , handler ) ; <nl> } <nl> void KeyedLoadIC : : UpdateLoadElement ( Handle < HeapObject > receiver ) { <nl> IsMoreGeneralElementsKindTransition ( <nl> target_receiver_maps . at ( 0 ) - > elements_kind ( ) , <nl> Handle < JSObject > : : cast ( receiver ) - > GetElementsKind ( ) ) ) { <nl> - Handle < Code > handler = <nl> + Handle < Object > handler = <nl> ElementHandlerCompiler : : GetKeyedLoadHandler ( receiver_map , isolate ( ) ) ; <nl> return ConfigureVectorState ( Handle < Name > ( ) , receiver_map , handler ) ; <nl> } <nl>
[ KeyedLoadIC ] Support Smi " handlers " for element loads
v8/v8
c9308147b341596de2733039223918a6202afa5f
2016-08-05T12:11:02Z
mmm a / src / csharp / Grpc . Core / CallOptions . cs <nl> ppp b / src / csharp / Grpc . Core / CallOptions . cs <nl> public class CallOptions <nl> readonly DateTime deadline ; <nl> readonly CancellationToken cancellationToken ; <nl> readonly WriteOptions writeOptions ; <nl> + readonly ContextPropagationToken propagationToken ; <nl> <nl> / / / < summary > <nl> / / / Creates a new instance of < c > CallOptions < / c > . <nl> public class CallOptions <nl> / / / < param name = " deadline " > Deadline for the call to finish . null means no deadline . < / param > <nl> / / / < param name = " cancellationToken " > Can be used to request cancellation of the call . < / param > <nl> / / / < param name = " writeOptions " > Write options that will be used for this call . < / param > <nl> - public CallOptions ( Metadata headers = null , DateTime ? deadline = null , CancellationToken cancellationToken = default ( CancellationToken ) , WriteOptions writeOptions = null ) <nl> + / / / < param name = " propagationToken " > Context propagation token obtained from < see cref = " ServerCallContext " / > . < / param > <nl> + public CallOptions ( Metadata headers = null , DateTime ? deadline = null , CancellationToken ? cancellationToken = null , <nl> + WriteOptions writeOptions = null , ContextPropagationToken propagationToken = null ) <nl> { <nl> / / TODO ( jtattermusch ) : consider only creating metadata object once it ' s really needed . <nl> - this . headers = headers ! = null ? headers : new Metadata ( ) ; <nl> - / / TODO ( jtattermusch ) : allow null value of deadline ? <nl> - this . deadline = deadline . HasValue ? deadline . Value : DateTime . MaxValue ; <nl> - this . cancellationToken = cancellationToken ; <nl> + this . headers = headers ? ? new Metadata ( ) ; <nl> + this . deadline = deadline ? ? ( propagationToken ! = null ? propagationToken . Deadline : DateTime . MaxValue ) ; <nl> + this . cancellationToken = cancellationToken ? ? ( propagationToken ! = null ? propagationToken . CancellationToken : CancellationToken . None ) ; <nl> this . writeOptions = writeOptions ; <nl> + this . propagationToken = propagationToken ; <nl> } <nl> <nl> / / / < summary > <nl> public WriteOptions WriteOptions <nl> return this . writeOptions ; <nl> } <nl> } <nl> + <nl> + / / / < summary > <nl> + / / / Token for propagating parent call context . <nl> + / / / < / summary > <nl> + public ContextPropagationToken PropagationToken <nl> + { <nl> + get <nl> + { <nl> + return this . propagationToken ; <nl> + } <nl> + } <nl> } <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . e7659477662 <nl> mmm / dev / null <nl> ppp b / src / csharp / Grpc . Core / ContextPropagationToken . cs <nl> <nl> + # region Copyright notice and license <nl> + <nl> + / / Copyright 2015 , Google Inc . <nl> + / / All rights reserved . <nl> + / / <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following disclaimer <nl> + / / in the documentation and / or other materials provided with the <nl> + / / distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived from <nl> + / / this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + # endregion <nl> + <nl> + using System ; <nl> + using System . Threading ; <nl> + <nl> + using Grpc . Core . Internal ; <nl> + using Grpc . Core . Utils ; <nl> + <nl> + namespace Grpc . Core <nl> + { <nl> + / / / < summary > <nl> + / / / Token for propagating context of server side handlers to child calls . <nl> + / / / In situations when a backend is making calls to another backend , <nl> + / / / it makes sense to propagate properties like deadline and cancellation <nl> + / / / token of the server call to the child call . <nl> + / / / C core provides some other contexts ( like tracing context ) that <nl> + / / / are not accessible to C # layer , but this token still allows propagating them . <nl> + / / / < / summary > <nl> + public class ContextPropagationToken <nl> + { <nl> + / / / < summary > <nl> + / / / Default propagation mask used by C core . <nl> + / / / < / summary > <nl> + const ContextPropagationFlags DefaultCoreMask = ( ContextPropagationFlags ) 0xffff ; <nl> + <nl> + / / / < summary > <nl> + / / / Default propagation mask used by C # - we want to propagate deadline <nl> + / / / and cancellation token by our own means . <nl> + / / / < / summary > <nl> + internal const ContextPropagationFlags DefaultMask = DefaultCoreMask <nl> + & ~ ContextPropagationFlags . Deadline & ~ ContextPropagationFlags . Cancellation ; <nl> + <nl> + readonly CallSafeHandle parentCall ; <nl> + readonly DateTime deadline ; <nl> + readonly CancellationToken cancellationToken ; <nl> + readonly ContextPropagationOptions options ; <nl> + <nl> + internal ContextPropagationToken ( CallSafeHandle parentCall , DateTime deadline , CancellationToken cancellationToken , ContextPropagationOptions options ) <nl> + { <nl> + this . parentCall = Preconditions . CheckNotNull ( parentCall ) ; <nl> + this . deadline = deadline ; <nl> + this . cancellationToken = cancellationToken ; <nl> + this . options = options ? ? ContextPropagationOptions . Default ; <nl> + } <nl> + <nl> + internal CallSafeHandle ParentCall <nl> + { <nl> + get <nl> + { <nl> + return this . parentCall ; <nl> + } <nl> + } <nl> + <nl> + internal DateTime Deadline <nl> + { <nl> + get <nl> + { <nl> + return this . deadline ; <nl> + } <nl> + } <nl> + <nl> + internal CancellationToken CancellationToken <nl> + { <nl> + get <nl> + { <nl> + return this . cancellationToken ; <nl> + } <nl> + } <nl> + <nl> + internal ContextPropagationOptions Options <nl> + { <nl> + get <nl> + { <nl> + return this . options ; <nl> + } <nl> + } <nl> + <nl> + internal bool IsPropagateDeadline <nl> + { <nl> + get { return false ; } <nl> + } <nl> + <nl> + internal bool IsPropagateCancellation <nl> + { <nl> + get { return false ; } <nl> + } <nl> + } <nl> + <nl> + / / / < summary > <nl> + / / / Options for < see cref = " ContextPropagationToken " / > . <nl> + / / / < / summary > <nl> + public class ContextPropagationOptions <nl> + { <nl> + public static readonly ContextPropagationOptions Default = new ContextPropagationOptions ( ) ; <nl> + } <nl> + <nl> + / / / < summary > <nl> + / / / Context propagation flags from grpc / grpc . h . <nl> + / / / < / summary > <nl> + [ Flags ] <nl> + internal enum ContextPropagationFlags <nl> + { <nl> + Deadline = 1 , <nl> + CensusStatsContext = 2 , <nl> + CensusTracingContext = 4 , <nl> + Cancellation = 8 <nl> + } <nl> + } <nl> mmm a / src / csharp / Grpc . Core / Grpc . Core . csproj <nl> ppp b / src / csharp / Grpc . Core / Grpc . Core . csproj <nl> <nl> < Compile Include = " CallOptions . cs " / > <nl> < Compile Include = " CompressionLevel . cs " / > <nl> < Compile Include = " WriteOptions . cs " / > <nl> + < Compile Include = " ContextPropagationToken . cs " / > <nl> < / ItemGroup > <nl> < ItemGroup > <nl> < None Include = " Grpc . Core . nuspec " / > <nl> mmm a / src / csharp / Grpc . Core / Internal / AsyncCall . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / AsyncCall . cs <nl> protected override void OnReleaseResources ( ) <nl> <nl> private void Initialize ( CompletionQueueSafeHandle cq ) <nl> { <nl> - var call = details . Channel . Handle . CreateCall ( details . Channel . Environment . CompletionRegistry , cq , <nl> + var propagationToken = details . Options . PropagationToken ; <nl> + var parentCall = propagationToken ! = null ? propagationToken . ParentCall : CallSafeHandle . NullInstance ; <nl> + <nl> + var call = details . Channel . Handle . CreateCall ( details . Channel . Environment . CompletionRegistry , <nl> + parentCall , ContextPropagationToken . DefaultMask , cq , <nl> details . Method , details . Host , Timespec . FromDateTime ( details . Options . Deadline ) ) ; <nl> details . Channel . Environment . DebugStats . ActiveClientCalls . Increment ( ) ; <nl> InitializeInternal ( call ) ; <nl> mmm a / src / csharp / Grpc . Core / Internal / CallSafeHandle . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / CallSafeHandle . cs <nl> namespace Grpc . Core . Internal <nl> / / / < / summary > <nl> internal class CallSafeHandle : SafeHandleZeroIsInvalid <nl> { <nl> + public static readonly CallSafeHandle NullInstance = new CallSafeHandle ( ) ; <nl> + <nl> const uint GRPC_WRITE_BUFFER_HINT = 1 ; <nl> CompletionRegistry completionRegistry ; <nl> <nl> mmm a / src / csharp / Grpc . Core / Internal / ChannelSafeHandle . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / ChannelSafeHandle . cs <nl> internal class ChannelSafeHandle : SafeHandleZeroIsInvalid <nl> static extern ChannelSafeHandle grpcsharp_secure_channel_create ( CredentialsSafeHandle credentials , string target , ChannelArgsSafeHandle channelArgs ) ; <nl> <nl> [ DllImport ( " grpc_csharp_ext . dll " ) ] <nl> - static extern CallSafeHandle grpcsharp_channel_create_call ( ChannelSafeHandle channel , CompletionQueueSafeHandle cq , string method , string host , Timespec deadline ) ; <nl> + static extern CallSafeHandle grpcsharp_channel_create_call ( ChannelSafeHandle channel , CallSafeHandle parentCall , ContextPropagationFlags propagationMask , CompletionQueueSafeHandle cq , string method , string host , Timespec deadline ) ; <nl> <nl> [ DllImport ( " grpc_csharp_ext . dll " ) ] <nl> static extern ChannelState grpcsharp_channel_check_connectivity_state ( ChannelSafeHandle channel , int tryToConnect ) ; <nl> public static ChannelSafeHandle CreateSecure ( CredentialsSafeHandle credentials , <nl> return grpcsharp_secure_channel_create ( credentials , target , channelArgs ) ; <nl> } <nl> <nl> - public CallSafeHandle CreateCall ( CompletionRegistry registry , CompletionQueueSafeHandle cq , string method , string host , Timespec deadline ) <nl> + public CallSafeHandle CreateCall ( CompletionRegistry registry , CallSafeHandle parentCall , ContextPropagationFlags propagationMask , CompletionQueueSafeHandle cq , string method , string host , Timespec deadline ) <nl> { <nl> - var result = grpcsharp_channel_create_call ( this , cq , method , host , deadline ) ; <nl> + var result = grpcsharp_channel_create_call ( this , parentCall , propagationMask , cq , method , host , deadline ) ; <nl> result . SetCompletionRegistry ( registry ) ; <nl> return result ; <nl> } <nl> mmm a / src / csharp / Grpc . Core / Internal / ServerCallHandler . cs <nl> ppp b / src / csharp / Grpc . Core / Internal / ServerCallHandler . cs <nl> public static Status StatusFromException ( Exception e ) <nl> { <nl> DateTime realtimeDeadline = newRpc . Deadline . ToClockType ( GPRClockType . Realtime ) . ToDateTime ( ) ; <nl> <nl> - return new ServerCallContext ( <nl> - newRpc . Method , newRpc . Host , peer , realtimeDeadline , <nl> + return new ServerCallContext ( newRpc . Call , newRpc . Method , newRpc . Host , peer , realtimeDeadline , <nl> newRpc . RequestMetadata , cancellationToken , serverResponseStream . WriteResponseHeadersAsync , serverResponseStream ) ; <nl> } <nl> } <nl> mmm a / src / csharp / Grpc . Core / ServerCallContext . cs <nl> ppp b / src / csharp / Grpc . Core / ServerCallContext . cs <nl> namespace Grpc . Core <nl> / / / < / summary > <nl> public class ServerCallContext <nl> { <nl> + private readonly CallSafeHandle callHandle ; <nl> private readonly string method ; <nl> private readonly string host ; <nl> private readonly string peer ; <nl> public class ServerCallContext <nl> private Func < Metadata , Task > writeHeadersFunc ; <nl> private IHasWriteOptions writeOptionsHolder ; <nl> <nl> - public ServerCallContext ( string method , string host , string peer , DateTime deadline , Metadata requestHeaders , CancellationToken cancellationToken , <nl> + internal ServerCallContext ( CallSafeHandle callHandle , string method , string host , string peer , DateTime deadline , Metadata requestHeaders , CancellationToken cancellationToken , <nl> Func < Metadata , Task > writeHeadersFunc , IHasWriteOptions writeOptionsHolder ) <nl> { <nl> + this . callHandle = callHandle ; <nl> this . method = method ; <nl> this . host = host ; <nl> this . peer = peer ; <nl> public Task WriteResponseHeadersAsync ( Metadata responseHeaders ) <nl> { <nl> return writeHeadersFunc ( responseHeaders ) ; <nl> } <nl> + <nl> + / / / < summary > <nl> + / / / Creates a propagation token to be used to propagate call context to a child call . <nl> + / / / < / summary > <nl> + public ContextPropagationToken CreatePropagationToken ( ContextPropagationOptions options = null ) <nl> + { <nl> + return new ContextPropagationToken ( callHandle , deadline , cancellationToken , options ) ; <nl> + } <nl> <nl> / / / < summary > Name of method called in this RPC . < / summary > <nl> public string Method <nl> mmm a / src / csharp / ext / grpc_csharp_ext . c <nl> ppp b / src / csharp / ext / grpc_csharp_ext . c <nl> GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_destroy ( grpc_channel * channel ) { <nl> } <nl> <nl> GPR_EXPORT grpc_call * GPR_CALLTYPE <nl> - grpcsharp_channel_create_call ( grpc_channel * channel , grpc_completion_queue * cq , <nl> + grpcsharp_channel_create_call ( grpc_channel * channel , grpc_call * parent_call , <nl> + gpr_uint32 propagation_mask , <nl> + grpc_completion_queue * cq , <nl> const char * method , const char * host , <nl> gpr_timespec deadline ) { <nl> - return grpc_channel_create_call ( channel , NULL , GRPC_PROPAGATE_DEFAULTS , cq , <nl> + return grpc_channel_create_call ( channel , parent_call , propagation_mask , cq , <nl> method , host , deadline ) ; <nl> } <nl> <nl>
context propagation API
grpc/grpc
392fae26d2d47f4197b0fd376ff6ea13546d6448
2015-08-09T05:21:57Z
mmm a / src / ia32 / codegen - ia32 . cc <nl> ppp b / src / ia32 / codegen - ia32 . cc <nl> Result CodeGenerator : : EmitNamedStore ( Handle < String > name , bool is_contextual ) { <nl> <nl> / / Allocate scratch register for write barrier . <nl> Result scratch = allocator ( ) - > Allocate ( ) ; <nl> - ASSERT ( scratch . is_valid ( ) & & <nl> - result . is_valid ( ) & & <nl> - receiver . is_valid ( ) & & <nl> - value . is_valid ( ) ) ; <nl> + ASSERT ( scratch . is_valid ( ) ) ; <nl> <nl> / / The write barrier clobbers all input registers , so spill the <nl> / / receiver and the value . <nl> frame_ - > Spill ( receiver . reg ( ) ) ; <nl> frame_ - > Spill ( value . reg ( ) ) ; <nl> <nl> + / / If the receiver and the value share a register allocate a new <nl> + / / register for the receiver . <nl> + if ( receiver . reg ( ) . is ( value . reg ( ) ) ) { <nl> + receiver = allocator ( ) - > Allocate ( ) ; <nl> + ASSERT ( receiver . is_valid ( ) ) ; <nl> + __ mov ( receiver . reg ( ) , Operand ( value . reg ( ) ) ) ; <nl> + } <nl> + <nl> / / Update the write barrier . To save instructions in the inlined <nl> / / version we do not filter smis . <nl> Label skip_write_barrier ; <nl> mmm a / src / x64 / codegen - x64 . cc <nl> ppp b / src / x64 / codegen - x64 . cc <nl> Result CodeGenerator : : EmitNamedStore ( Handle < String > name , bool is_contextual ) { <nl> <nl> / / Allocate scratch register for write barrier . <nl> Result scratch = allocator ( ) - > Allocate ( ) ; <nl> - ASSERT ( scratch . is_valid ( ) & & <nl> - result . is_valid ( ) & & <nl> - receiver . is_valid ( ) & & <nl> - value . is_valid ( ) ) ; <nl> + ASSERT ( scratch . is_valid ( ) ) ; <nl> <nl> / / The write barrier clobbers all input registers , so spill the <nl> / / receiver and the value . <nl> frame_ - > Spill ( receiver . reg ( ) ) ; <nl> frame_ - > Spill ( value . reg ( ) ) ; <nl> <nl> + / / If the receiver and the value share a register allocate a new <nl> + / / register for the receiver . <nl> + if ( receiver . reg ( ) . is ( value . reg ( ) ) ) { <nl> + receiver = allocator ( ) - > Allocate ( ) ; <nl> + ASSERT ( receiver . is_valid ( ) ) ; <nl> + __ movq ( receiver . reg ( ) , value . reg ( ) ) ; <nl> + } <nl> + <nl> / / Update the write barrier . To save instructions in the inlined <nl> / / version we do not filter smis . <nl> Label skip_write_barrier ; <nl>
Fix aliasing problem in inlined stores on x64 and ia32 . The receiver
v8/v8
79e332010a4ad190043c55b2dd8cb4ba66cececf
2010-07-23T11:55:03Z
mmm a / tests / cpp - tests / Resources / configs / config - example . plist <nl> ppp b / tests / cpp - tests / Resources / configs / config - example . plist <nl> <nl> < key > cocos2d . x . texture . pvrv2_has_alpha_premultiplied < / key > <nl> < false / > <nl> < key > cocos2d . x . testcpp . autorun < / key > <nl> - < true / > <nl> + < false / > <nl> < / dict > <nl> < key > metadata < / key > <nl> < dict > <nl>
change default auto - run config to false
cocos2d/cocos2d-x
8cc224dcef96af20e43497f354969e448a30d80e
2014-05-13T06:17:43Z
mmm a / README . md <nl> ppp b / README . md <nl> <nl> The implementations are for learning purpose . They may be less efficient than the implementation in the standard library . <nl> <nl> # # # Contribute Guidelines <nl> - Read our [ Contribution Guidelines ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / README . md ) before you contribute <nl> - <nl> - How you can contribute ? See this small guide . <nl> - <nl> + Read our [ Contribution Guidelines ] ( https : / / github . com / TheAlgorithms / C - Plus - Plus / blob / master / README . md ) before you contribute . <nl>
docs : update README . md
TheAlgorithms/C-Plus-Plus
b0a97adb4a3224baceae3946d7952024f53912d9
2019-11-14T11:23:55Z
mmm a / include / swift / Sema / CSFix . h <nl> ppp b / include / swift / Sema / CSFix . h <nl> class UseRawValue final : public ConstraintFix { <nl> Type expectedType , ConstraintLocator * locator ) ; <nl> } ; <nl> <nl> - / / / Replace a coercion ( ' as ' ) with a forced checked cast ( ' as ! ' ) . <nl> + / / / Replace a coercion ( ' as ' ) with runtime checked cast ( ' as ! ' or ' as ? ' ) . <nl> class CoerceToCheckedCast final : public ContextualMismatch { <nl> CoerceToCheckedCast ( ConstraintSystem & cs , Type fromType , Type toType , <nl> - ConstraintLocator * locator ) <nl> + bool useConditionalCast , ConstraintLocator * locator ) <nl> : ContextualMismatch ( cs , FixKind : : CoerceToCheckedCast , fromType , toType , <nl> - locator ) { } <nl> + locator ) , <nl> + UseConditionalCast ( useConditionalCast ) { } <nl> + bool UseConditionalCast = false ; <nl> <nl> public : <nl> - std : : string getName ( ) const override { return " as to as ! " ; } <nl> + std : : string getName ( ) const override { <nl> + return UseConditionalCast ? " as to as ? " : " as to as ! " ; <nl> + } <nl> <nl> bool diagnose ( const Solution & solution , bool asNote = false ) const override ; <nl> <nl> static CoerceToCheckedCast * attempt ( ConstraintSystem & cs , Type fromType , <nl> - Type toType , ConstraintLocator * locator ) ; <nl> + Type toType , bool useConditionalCast , <nl> + ConstraintLocator * locator ) ; <nl> } ; <nl> <nl> class RemoveInvalidCall final : public ConstraintFix { <nl> mmm a / lib / Sema / CSDiagnostics . cpp <nl> ppp b / lib / Sema / CSDiagnostics . cpp <nl> bool NoEscapeFuncToTypeConversionFailure : : diagnoseParameterUse ( ) const { <nl> return true ; <nl> } <nl> <nl> - ASTNode MissingForcedDowncastFailure : : getAnchor ( ) const { <nl> + ASTNode InvalidCoercionFailure : : getAnchor ( ) const { <nl> auto anchor = FailureDiagnostic : : getAnchor ( ) ; <nl> if ( auto * assignExpr = getAsExpr < AssignExpr > ( anchor ) ) <nl> return assignExpr - > getSrc ( ) ; <nl> return anchor ; <nl> } <nl> <nl> - bool MissingForcedDowncastFailure : : diagnoseAsError ( ) { <nl> + bool InvalidCoercionFailure : : diagnoseAsError ( ) { <nl> auto fromType = getFromType ( ) ; <nl> auto toType = getToType ( ) ; <nl> <nl> emitDiagnostic ( diag : : cannot_coerce_to_type , fromType , toType ) ; <nl> <nl> - auto & solution = getSolution ( ) ; <nl> - auto restriction = solution . ConstraintRestrictions . find ( <nl> - { toType - > getCanonicalType ( ) , <nl> - OptionalType : : get ( toType ) - > getCanonicalType ( ) } ) ; <nl> - / / If the type has an value to optional conversion we can instead suggest <nl> - / / the conditional downcast as it is safer in situations like conditional <nl> - / / binding . <nl> - if ( restriction ! = solution . ConstraintRestrictions . end ( ) & & <nl> - restriction - > getSecond ( ) = = ConversionRestrictionKind : : ValueToOptional ) { <nl> + if ( UseConditionalCast ) { <nl> emitDiagnostic ( diag : : missing_optional_downcast ) <nl> . highlight ( getSourceRange ( ) ) <nl> . fixItReplace ( getLoc ( ) , " as ? " ) ; <nl> mmm a / lib / Sema / CSDiagnostics . h <nl> ppp b / lib / Sema / CSDiagnostics . h <nl> class ArgumentMismatchFailure : public ContextualFailure { <nl> bool diagnoseMisplacedMissingArgument ( ) const ; <nl> } ; <nl> <nl> - / / / Replace a coercion ( ' as ' ) with a forced checked cast ( ' as ! ' ) . <nl> - class MissingForcedDowncastFailure final : public ContextualFailure { <nl> + / / / Replace a coercion ( ' as ' ) with a runtime checked cast ( ' as ! ' or ' as ? ' ) . <nl> + class InvalidCoercionFailure final : public ContextualFailure { <nl> + bool UseConditionalCast ; <nl> + <nl> public : <nl> - MissingForcedDowncastFailure ( const Solution & solution , Type fromType , <nl> - Type toType , ConstraintLocator * locator ) <nl> - : ContextualFailure ( solution , fromType , toType , locator ) { } <nl> + InvalidCoercionFailure ( const Solution & solution , Type fromType , Type toType , <nl> + bool useConditionalCast , ConstraintLocator * locator ) <nl> + : ContextualFailure ( solution , fromType , toType , locator ) , <nl> + UseConditionalCast ( useConditionalCast ) { } <nl> <nl> ASTNode getAnchor ( ) const override ; <nl> <nl> mmm a / lib / Sema / CSFix . cpp <nl> ppp b / lib / Sema / CSFix . cpp <nl> TreatRValueAsLValue * TreatRValueAsLValue : : create ( ConstraintSystem & cs , <nl> <nl> bool CoerceToCheckedCast : : diagnose ( const Solution & solution , <nl> bool asNote ) const { <nl> - MissingForcedDowncastFailure failure ( solution , getFromType ( ) , getToType ( ) , <nl> - getLocator ( ) ) ; <nl> + InvalidCoercionFailure failure ( solution , getFromType ( ) , getToType ( ) , <nl> + UseConditionalCast , getLocator ( ) ) ; <nl> return failure . diagnose ( asNote ) ; <nl> } <nl> <nl> CoerceToCheckedCast * CoerceToCheckedCast : : attempt ( ConstraintSystem & cs , <nl> Type fromType , Type toType , <nl> + bool useConditionalCast , <nl> ConstraintLocator * locator ) { <nl> / / If any of the types has a type variable , don ' t add the fix . <nl> if ( fromType - > hasTypeVariable ( ) | | toType - > hasTypeVariable ( ) ) <nl> CoerceToCheckedCast * CoerceToCheckedCast : : attempt ( ConstraintSystem & cs , <nl> return nullptr ; <nl> <nl> return new ( cs . getAllocator ( ) ) <nl> - CoerceToCheckedCast ( cs , fromType , toType , locator ) ; <nl> + CoerceToCheckedCast ( cs , fromType , toType , useConditionalCast , locator ) ; <nl> } <nl> <nl> bool TreatArrayLiteralAsDictionary : : diagnose ( const Solution & solution , <nl> mmm a / lib / Sema / CSSimplify . cpp <nl> ppp b / lib / Sema / CSSimplify . cpp <nl> bool ConstraintSystem : : repairFailures ( <nl> getConstraintLocator ( coercion - > getSubExpr ( ) ) ) ) <nl> return true ; <nl> <nl> - / / Repair a coercion ( ' as ' ) with a forced checked cast ( ' as ! ' ) . <nl> - if ( auto * coerceToCheckCastFix = CoerceToCheckedCast : : attempt ( <nl> - * this , lhs , rhs , getConstraintLocator ( locator ) ) ) { <nl> + / / If the result type of the coercion has an value to optional conversion <nl> + / / we can instead suggest the conditional downcast as it is safer in <nl> + / / situations like conditional binding . <nl> + auto useConditionalCast = llvm : : any_of ( <nl> + ConstraintRestrictions , <nl> + [ & ] ( std : : tuple < Type , Type , ConversionRestrictionKind > restriction ) { <nl> + ConversionRestrictionKind restrictionKind ; <nl> + Type type1 , type2 ; <nl> + std : : tie ( type1 , type2 , restrictionKind ) = restriction ; <nl> + <nl> + if ( restrictionKind ! = ConversionRestrictionKind : : ValueToOptional ) <nl> + return false ; <nl> + <nl> + return rhs - > isEqual ( type1 ) ; <nl> + } ) ; <nl> + <nl> + / / Repair a coercion ( ' as ' ) with a runtime checked cast ( ' as ! ' or ' as ? ' ) . <nl> + if ( auto * coerceToCheckCastFix = <nl> + CoerceToCheckedCast : : attempt ( * this , lhs , rhs , useConditionalCast , <nl> + getConstraintLocator ( locator ) ) ) { <nl> conversionsOrFixes . push_back ( coerceToCheckCastFix ) ; <nl> return true ; <nl> } <nl> ConstraintSystem : : simplifyRestrictedConstraintImpl ( <nl> loc - > isLastElement < LocatorPathElt : : ApplyArgToParam > ( ) | | <nl> loc - > isForOptionalTry ( ) ) { <nl> if ( restriction = = ConversionRestrictionKind : : Superclass ) { <nl> - if ( auto * fix = <nl> - CoerceToCheckedCast : : attempt ( * this , fromType , toType , loc ) ) <nl> + if ( auto * fix = CoerceToCheckedCast : : attempt ( <nl> + * this , fromType , toType , / * useConditionalCast * / false , loc ) ) <nl> return ! recordFix ( fix , impact ) ; <nl> } <nl> <nl>
[ Sema ] Detect if we should use a conditional binding when recording the CoerceToCheckedCast fix
apple/swift
ac65e6fcc464d08b7707327a3bfb2166ef273092
2020-12-01T12:03:17Z